Remove Cell Broadband Engine debugging support
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2019 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "gdbsupport/agent.h"
23 #include "tdesc.h"
24 #include "gdbsupport/rsp-low.h"
25 #include "gdbsupport/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdbsupport/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "gdbsupport/filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #include "gdbsupport/common-inferior.h"
51 #include "nat/fork-inferior.h"
52 #include "gdbsupport/environ.h"
53 #include "gdbsupport/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 #ifndef AT_HWCAP2
75 #define AT_HWCAP2 26
76 #endif
77
78 /* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81 #if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84 #if defined(__mcoldfire__)
85 /* These are still undefined in 3.10 kernels. */
86 #define PT_TEXT_ADDR 49*4
87 #define PT_DATA_ADDR 50*4
88 #define PT_TEXT_END_ADDR 51*4
89 /* BFIN already defines these since at least 2.6.32 kernels. */
90 #elif defined(BFIN)
91 #define PT_TEXT_ADDR 220
92 #define PT_TEXT_END_ADDR 224
93 #define PT_DATA_ADDR 228
94 /* These are still undefined in 3.10 kernels. */
95 #elif defined(__TMS320C6X__)
96 #define PT_TEXT_ADDR (0x10000*4)
97 #define PT_DATA_ADDR (0x10004*4)
98 #define PT_TEXT_END_ADDR (0x10008*4)
99 #endif
100 #endif
101
102 #ifdef HAVE_LINUX_BTRACE
103 # include "nat/linux-btrace.h"
104 # include "gdbsupport/btrace-common.h"
105 #endif
106
107 #ifndef HAVE_ELF32_AUXV_T
108 /* Copied from glibc's elf.h. */
109 typedef struct
110 {
111 uint32_t a_type; /* Entry type */
112 union
113 {
114 uint32_t a_val; /* Integer value */
115 /* We use to have pointer elements added here. We cannot do that,
116 though, since it does not work when using 32-bit definitions
117 on 64-bit platforms and vice versa. */
118 } a_un;
119 } Elf32_auxv_t;
120 #endif
121
122 #ifndef HAVE_ELF64_AUXV_T
123 /* Copied from glibc's elf.h. */
124 typedef struct
125 {
126 uint64_t a_type; /* Entry type */
127 union
128 {
129 uint64_t a_val; /* Integer value */
130 /* We use to have pointer elements added here. We cannot do that,
131 though, since it does not work when using 32-bit definitions
132 on 64-bit platforms and vice versa. */
133 } a_un;
134 } Elf64_auxv_t;
135 #endif
136
137 /* Does the current host support PTRACE_GETREGSET? */
138 int have_ptrace_getregset = -1;
139
140 /* LWP accessors. */
141
142 /* See nat/linux-nat.h. */
143
144 ptid_t
145 ptid_of_lwp (struct lwp_info *lwp)
146 {
147 return ptid_of (get_lwp_thread (lwp));
148 }
149
150 /* See nat/linux-nat.h. */
151
152 void
153 lwp_set_arch_private_info (struct lwp_info *lwp,
154 struct arch_lwp_info *info)
155 {
156 lwp->arch_private = info;
157 }
158
159 /* See nat/linux-nat.h. */
160
161 struct arch_lwp_info *
162 lwp_arch_private_info (struct lwp_info *lwp)
163 {
164 return lwp->arch_private;
165 }
166
167 /* See nat/linux-nat.h. */
168
169 int
170 lwp_is_stopped (struct lwp_info *lwp)
171 {
172 return lwp->stopped;
173 }
174
175 /* See nat/linux-nat.h. */
176
177 enum target_stop_reason
178 lwp_stop_reason (struct lwp_info *lwp)
179 {
180 return lwp->stop_reason;
181 }
182
183 /* See nat/linux-nat.h. */
184
185 int
186 lwp_is_stepping (struct lwp_info *lwp)
187 {
188 return lwp->stepping;
189 }
190
191 /* A list of all unknown processes which receive stop signals. Some
192 other process will presumably claim each of these as forked
193 children momentarily. */
194
195 struct simple_pid_list
196 {
197 /* The process ID. */
198 int pid;
199
200 /* The status as reported by waitpid. */
201 int status;
202
203 /* Next in chain. */
204 struct simple_pid_list *next;
205 };
206 struct simple_pid_list *stopped_pids;
207
208 /* Trivial list manipulation functions to keep track of a list of new
209 stopped processes. */
210
211 static void
212 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
213 {
214 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
215
216 new_pid->pid = pid;
217 new_pid->status = status;
218 new_pid->next = *listp;
219 *listp = new_pid;
220 }
221
222 static int
223 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
224 {
225 struct simple_pid_list **p;
226
227 for (p = listp; *p != NULL; p = &(*p)->next)
228 if ((*p)->pid == pid)
229 {
230 struct simple_pid_list *next = (*p)->next;
231
232 *statusp = (*p)->status;
233 xfree (*p);
234 *p = next;
235 return 1;
236 }
237 return 0;
238 }
239
240 enum stopping_threads_kind
241 {
242 /* Not stopping threads presently. */
243 NOT_STOPPING_THREADS,
244
245 /* Stopping threads. */
246 STOPPING_THREADS,
247
248 /* Stopping and suspending threads. */
249 STOPPING_AND_SUSPENDING_THREADS
250 };
251
252 /* This is set while stop_all_lwps is in effect. */
253 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
254
255 /* FIXME make into a target method? */
256 int using_threads = 1;
257
258 /* True if we're presently stabilizing threads (moving them out of
259 jump pads). */
260 static int stabilizing_threads;
261
262 static void linux_resume_one_lwp (struct lwp_info *lwp,
263 int step, int signal, siginfo_t *info);
264 static void linux_resume (struct thread_resume *resume_info, size_t n);
265 static void stop_all_lwps (int suspend, struct lwp_info *except);
266 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
267 static void unsuspend_all_lwps (struct lwp_info *except);
268 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
269 int *wstat, int options);
270 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
271 static struct lwp_info *add_lwp (ptid_t ptid);
272 static void linux_mourn (struct process_info *process);
273 static int linux_stopped_by_watchpoint (void);
274 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
275 static int lwp_is_marked_dead (struct lwp_info *lwp);
276 static void proceed_all_lwps (void);
277 static int finish_step_over (struct lwp_info *lwp);
278 static int kill_lwp (unsigned long lwpid, int signo);
279 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
280 static void complete_ongoing_step_over (void);
281 static int linux_low_ptrace_options (int attached);
282 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
283 static void proceed_one_lwp (thread_info *thread, lwp_info *except);
284
285 /* When the event-loop is doing a step-over, this points at the thread
286 being stepped. */
287 ptid_t step_over_bkpt;
288
289 /* True if the low target can hardware single-step. */
290
291 static int
292 can_hardware_single_step (void)
293 {
294 if (the_low_target.supports_hardware_single_step != NULL)
295 return the_low_target.supports_hardware_single_step ();
296 else
297 return 0;
298 }
299
300 /* True if the low target can software single-step. Such targets
301 implement the GET_NEXT_PCS callback. */
302
303 static int
304 can_software_single_step (void)
305 {
306 return (the_low_target.get_next_pcs != NULL);
307 }
308
309 /* True if the low target supports memory breakpoints. If so, we'll
310 have a GET_PC implementation. */
311
312 static int
313 supports_breakpoints (void)
314 {
315 return (the_low_target.get_pc != NULL);
316 }
317
318 /* Returns true if this target can support fast tracepoints. This
319 does not mean that the in-process agent has been loaded in the
320 inferior. */
321
322 static int
323 supports_fast_tracepoints (void)
324 {
325 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
326 }
327
328 /* True if LWP is stopped in its stepping range. */
329
330 static int
331 lwp_in_step_range (struct lwp_info *lwp)
332 {
333 CORE_ADDR pc = lwp->stop_pc;
334
335 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
336 }
337
338 struct pending_signals
339 {
340 int signal;
341 siginfo_t info;
342 struct pending_signals *prev;
343 };
344
345 /* The read/write ends of the pipe registered as waitable file in the
346 event loop. */
347 static int linux_event_pipe[2] = { -1, -1 };
348
349 /* True if we're currently in async mode. */
350 #define target_is_async_p() (linux_event_pipe[0] != -1)
351
352 static void send_sigstop (struct lwp_info *lwp);
353 static void wait_for_sigstop (void);
354
355 /* Return non-zero if HEADER is a 64-bit ELF file. */
356
357 static int
358 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
359 {
360 if (header->e_ident[EI_MAG0] == ELFMAG0
361 && header->e_ident[EI_MAG1] == ELFMAG1
362 && header->e_ident[EI_MAG2] == ELFMAG2
363 && header->e_ident[EI_MAG3] == ELFMAG3)
364 {
365 *machine = header->e_machine;
366 return header->e_ident[EI_CLASS] == ELFCLASS64;
367
368 }
369 *machine = EM_NONE;
370 return -1;
371 }
372
373 /* Return non-zero if FILE is a 64-bit ELF file,
374 zero if the file is not a 64-bit ELF file,
375 and -1 if the file is not accessible or doesn't exist. */
376
377 static int
378 elf_64_file_p (const char *file, unsigned int *machine)
379 {
380 Elf64_Ehdr header;
381 int fd;
382
383 fd = open (file, O_RDONLY);
384 if (fd < 0)
385 return -1;
386
387 if (read (fd, &header, sizeof (header)) != sizeof (header))
388 {
389 close (fd);
390 return 0;
391 }
392 close (fd);
393
394 return elf_64_header_p (&header, machine);
395 }
396
397 /* Accepts an integer PID; Returns true if the executable PID is
398 running is a 64-bit ELF file.. */
399
400 int
401 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
402 {
403 char file[PATH_MAX];
404
405 sprintf (file, "/proc/%d/exe", pid);
406 return elf_64_file_p (file, machine);
407 }
408
409 static void
410 delete_lwp (struct lwp_info *lwp)
411 {
412 struct thread_info *thr = get_lwp_thread (lwp);
413
414 if (debug_threads)
415 debug_printf ("deleting %ld\n", lwpid_of (thr));
416
417 remove_thread (thr);
418
419 if (the_low_target.delete_thread != NULL)
420 the_low_target.delete_thread (lwp->arch_private);
421 else
422 gdb_assert (lwp->arch_private == NULL);
423
424 free (lwp);
425 }
426
427 /* Add a process to the common process list, and set its private
428 data. */
429
430 static struct process_info *
431 linux_add_process (int pid, int attached)
432 {
433 struct process_info *proc;
434
435 proc = add_process (pid, attached);
436 proc->priv = XCNEW (struct process_info_private);
437
438 if (the_low_target.new_process != NULL)
439 proc->priv->arch_private = the_low_target.new_process ();
440
441 return proc;
442 }
443
444 static CORE_ADDR get_pc (struct lwp_info *lwp);
445
446 /* Call the target arch_setup function on the current thread. */
447
448 static void
449 linux_arch_setup (void)
450 {
451 the_low_target.arch_setup ();
452 }
453
454 /* Call the target arch_setup function on THREAD. */
455
456 static void
457 linux_arch_setup_thread (struct thread_info *thread)
458 {
459 struct thread_info *saved_thread;
460
461 saved_thread = current_thread;
462 current_thread = thread;
463
464 linux_arch_setup ();
465
466 current_thread = saved_thread;
467 }
468
469 /* Handle a GNU/Linux extended wait response. If we see a clone,
470 fork, or vfork event, we need to add the new LWP to our list
471 (and return 0 so as not to report the trap to higher layers).
472 If we see an exec event, we will modify ORIG_EVENT_LWP to point
473 to a new LWP representing the new program. */
474
475 static int
476 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
477 {
478 client_state &cs = get_client_state ();
479 struct lwp_info *event_lwp = *orig_event_lwp;
480 int event = linux_ptrace_get_extended_event (wstat);
481 struct thread_info *event_thr = get_lwp_thread (event_lwp);
482 struct lwp_info *new_lwp;
483
484 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
485
486 /* All extended events we currently use are mid-syscall. Only
487 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
488 you have to be using PTRACE_SEIZE to get that. */
489 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
490
491 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
492 || (event == PTRACE_EVENT_CLONE))
493 {
494 ptid_t ptid;
495 unsigned long new_pid;
496 int ret, status;
497
498 /* Get the pid of the new lwp. */
499 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
500 &new_pid);
501
502 /* If we haven't already seen the new PID stop, wait for it now. */
503 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
504 {
505 /* The new child has a pending SIGSTOP. We can't affect it until it
506 hits the SIGSTOP, but we're already attached. */
507
508 ret = my_waitpid (new_pid, &status, __WALL);
509
510 if (ret == -1)
511 perror_with_name ("waiting for new child");
512 else if (ret != new_pid)
513 warning ("wait returned unexpected PID %d", ret);
514 else if (!WIFSTOPPED (status))
515 warning ("wait returned unexpected status 0x%x", status);
516 }
517
518 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
519 {
520 struct process_info *parent_proc;
521 struct process_info *child_proc;
522 struct lwp_info *child_lwp;
523 struct thread_info *child_thr;
524 struct target_desc *tdesc;
525
526 ptid = ptid_t (new_pid, new_pid, 0);
527
528 if (debug_threads)
529 {
530 debug_printf ("HEW: Got fork event from LWP %ld, "
531 "new child is %d\n",
532 ptid_of (event_thr).lwp (),
533 ptid.pid ());
534 }
535
536 /* Add the new process to the tables and clone the breakpoint
537 lists of the parent. We need to do this even if the new process
538 will be detached, since we will need the process object and the
539 breakpoints to remove any breakpoints from memory when we
540 detach, and the client side will access registers. */
541 child_proc = linux_add_process (new_pid, 0);
542 gdb_assert (child_proc != NULL);
543 child_lwp = add_lwp (ptid);
544 gdb_assert (child_lwp != NULL);
545 child_lwp->stopped = 1;
546 child_lwp->must_set_ptrace_flags = 1;
547 child_lwp->status_pending_p = 0;
548 child_thr = get_lwp_thread (child_lwp);
549 child_thr->last_resume_kind = resume_stop;
550 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
551
552 /* If we're suspending all threads, leave this one suspended
553 too. If the fork/clone parent is stepping over a breakpoint,
554 all other threads have been suspended already. Leave the
555 child suspended too. */
556 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
557 || event_lwp->bp_reinsert != 0)
558 {
559 if (debug_threads)
560 debug_printf ("HEW: leaving child suspended\n");
561 child_lwp->suspended = 1;
562 }
563
564 parent_proc = get_thread_process (event_thr);
565 child_proc->attached = parent_proc->attached;
566
567 if (event_lwp->bp_reinsert != 0
568 && can_software_single_step ()
569 && event == PTRACE_EVENT_VFORK)
570 {
571 /* If we leave single-step breakpoints there, child will
572 hit it, so uninsert single-step breakpoints from parent
573 (and child). Once vfork child is done, reinsert
574 them back to parent. */
575 uninsert_single_step_breakpoints (event_thr);
576 }
577
578 clone_all_breakpoints (child_thr, event_thr);
579
580 tdesc = allocate_target_description ();
581 copy_target_description (tdesc, parent_proc->tdesc);
582 child_proc->tdesc = tdesc;
583
584 /* Clone arch-specific process data. */
585 if (the_low_target.new_fork != NULL)
586 the_low_target.new_fork (parent_proc, child_proc);
587
588 /* Save fork info in the parent thread. */
589 if (event == PTRACE_EVENT_FORK)
590 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
591 else if (event == PTRACE_EVENT_VFORK)
592 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
593
594 event_lwp->waitstatus.value.related_pid = ptid;
595
596 /* The status_pending field contains bits denoting the
597 extended event, so when the pending event is handled,
598 the handler will look at lwp->waitstatus. */
599 event_lwp->status_pending_p = 1;
600 event_lwp->status_pending = wstat;
601
602 /* Link the threads until the parent event is passed on to
603 higher layers. */
604 event_lwp->fork_relative = child_lwp;
605 child_lwp->fork_relative = event_lwp;
606
607 /* If the parent thread is doing step-over with single-step
608 breakpoints, the list of single-step breakpoints are cloned
609 from the parent's. Remove them from the child process.
610 In case of vfork, we'll reinsert them back once vforked
611 child is done. */
612 if (event_lwp->bp_reinsert != 0
613 && can_software_single_step ())
614 {
615 /* The child process is forked and stopped, so it is safe
616 to access its memory without stopping all other threads
617 from other processes. */
618 delete_single_step_breakpoints (child_thr);
619
620 gdb_assert (has_single_step_breakpoints (event_thr));
621 gdb_assert (!has_single_step_breakpoints (child_thr));
622 }
623
624 /* Report the event. */
625 return 0;
626 }
627
628 if (debug_threads)
629 debug_printf ("HEW: Got clone event "
630 "from LWP %ld, new child is LWP %ld\n",
631 lwpid_of (event_thr), new_pid);
632
633 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
634 new_lwp = add_lwp (ptid);
635
636 /* Either we're going to immediately resume the new thread
637 or leave it stopped. linux_resume_one_lwp is a nop if it
638 thinks the thread is currently running, so set this first
639 before calling linux_resume_one_lwp. */
640 new_lwp->stopped = 1;
641
642 /* If we're suspending all threads, leave this one suspended
643 too. If the fork/clone parent is stepping over a breakpoint,
644 all other threads have been suspended already. Leave the
645 child suspended too. */
646 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
647 || event_lwp->bp_reinsert != 0)
648 new_lwp->suspended = 1;
649
650 /* Normally we will get the pending SIGSTOP. But in some cases
651 we might get another signal delivered to the group first.
652 If we do get another signal, be sure not to lose it. */
653 if (WSTOPSIG (status) != SIGSTOP)
654 {
655 new_lwp->stop_expected = 1;
656 new_lwp->status_pending_p = 1;
657 new_lwp->status_pending = status;
658 }
659 else if (cs.report_thread_events)
660 {
661 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
662 new_lwp->status_pending_p = 1;
663 new_lwp->status_pending = status;
664 }
665
666 #ifdef USE_THREAD_DB
667 thread_db_notice_clone (event_thr, ptid);
668 #endif
669
670 /* Don't report the event. */
671 return 1;
672 }
673 else if (event == PTRACE_EVENT_VFORK_DONE)
674 {
675 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
676
677 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
678 {
679 reinsert_single_step_breakpoints (event_thr);
680
681 gdb_assert (has_single_step_breakpoints (event_thr));
682 }
683
684 /* Report the event. */
685 return 0;
686 }
687 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
688 {
689 struct process_info *proc;
690 std::vector<int> syscalls_to_catch;
691 ptid_t event_ptid;
692 pid_t event_pid;
693
694 if (debug_threads)
695 {
696 debug_printf ("HEW: Got exec event from LWP %ld\n",
697 lwpid_of (event_thr));
698 }
699
700 /* Get the event ptid. */
701 event_ptid = ptid_of (event_thr);
702 event_pid = event_ptid.pid ();
703
704 /* Save the syscall list from the execing process. */
705 proc = get_thread_process (event_thr);
706 syscalls_to_catch = std::move (proc->syscalls_to_catch);
707
708 /* Delete the execing process and all its threads. */
709 linux_mourn (proc);
710 current_thread = NULL;
711
712 /* Create a new process/lwp/thread. */
713 proc = linux_add_process (event_pid, 0);
714 event_lwp = add_lwp (event_ptid);
715 event_thr = get_lwp_thread (event_lwp);
716 gdb_assert (current_thread == event_thr);
717 linux_arch_setup_thread (event_thr);
718
719 /* Set the event status. */
720 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
721 event_lwp->waitstatus.value.execd_pathname
722 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
723
724 /* Mark the exec status as pending. */
725 event_lwp->stopped = 1;
726 event_lwp->status_pending_p = 1;
727 event_lwp->status_pending = wstat;
728 event_thr->last_resume_kind = resume_continue;
729 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
730
731 /* Update syscall state in the new lwp, effectively mid-syscall too. */
732 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
733
734 /* Restore the list to catch. Don't rely on the client, which is free
735 to avoid sending a new list when the architecture doesn't change.
736 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
737 proc->syscalls_to_catch = std::move (syscalls_to_catch);
738
739 /* Report the event. */
740 *orig_event_lwp = event_lwp;
741 return 0;
742 }
743
744 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
745 }
746
747 /* Return the PC as read from the regcache of LWP, without any
748 adjustment. */
749
750 static CORE_ADDR
751 get_pc (struct lwp_info *lwp)
752 {
753 struct thread_info *saved_thread;
754 struct regcache *regcache;
755 CORE_ADDR pc;
756
757 if (the_low_target.get_pc == NULL)
758 return 0;
759
760 saved_thread = current_thread;
761 current_thread = get_lwp_thread (lwp);
762
763 regcache = get_thread_regcache (current_thread, 1);
764 pc = (*the_low_target.get_pc) (regcache);
765
766 if (debug_threads)
767 debug_printf ("pc is 0x%lx\n", (long) pc);
768
769 current_thread = saved_thread;
770 return pc;
771 }
772
773 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
774 Fill *SYSNO with the syscall nr trapped. */
775
776 static void
777 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
778 {
779 struct thread_info *saved_thread;
780 struct regcache *regcache;
781
782 if (the_low_target.get_syscall_trapinfo == NULL)
783 {
784 /* If we cannot get the syscall trapinfo, report an unknown
785 system call number. */
786 *sysno = UNKNOWN_SYSCALL;
787 return;
788 }
789
790 saved_thread = current_thread;
791 current_thread = get_lwp_thread (lwp);
792
793 regcache = get_thread_regcache (current_thread, 1);
794 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
795
796 if (debug_threads)
797 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
798
799 current_thread = saved_thread;
800 }
801
802 static int check_stopped_by_watchpoint (struct lwp_info *child);
803
804 /* Called when the LWP stopped for a signal/trap. If it stopped for a
805 trap check what caused it (breakpoint, watchpoint, trace, etc.),
806 and save the result in the LWP's stop_reason field. If it stopped
807 for a breakpoint, decrement the PC if necessary on the lwp's
808 architecture. Returns true if we now have the LWP's stop PC. */
809
810 static int
811 save_stop_reason (struct lwp_info *lwp)
812 {
813 CORE_ADDR pc;
814 CORE_ADDR sw_breakpoint_pc;
815 struct thread_info *saved_thread;
816 #if USE_SIGTRAP_SIGINFO
817 siginfo_t siginfo;
818 #endif
819
820 if (the_low_target.get_pc == NULL)
821 return 0;
822
823 pc = get_pc (lwp);
824 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
825
826 /* breakpoint_at reads from the current thread. */
827 saved_thread = current_thread;
828 current_thread = get_lwp_thread (lwp);
829
830 #if USE_SIGTRAP_SIGINFO
831 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
832 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
833 {
834 if (siginfo.si_signo == SIGTRAP)
835 {
836 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
837 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
838 {
839 /* The si_code is ambiguous on this arch -- check debug
840 registers. */
841 if (!check_stopped_by_watchpoint (lwp))
842 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
843 }
844 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
845 {
846 /* If we determine the LWP stopped for a SW breakpoint,
847 trust it. Particularly don't check watchpoint
848 registers, because at least on s390, we'd find
849 stopped-by-watchpoint as long as there's a watchpoint
850 set. */
851 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
852 }
853 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
854 {
855 /* This can indicate either a hardware breakpoint or
856 hardware watchpoint. Check debug registers. */
857 if (!check_stopped_by_watchpoint (lwp))
858 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
859 }
860 else if (siginfo.si_code == TRAP_TRACE)
861 {
862 /* We may have single stepped an instruction that
863 triggered a watchpoint. In that case, on some
864 architectures (such as x86), instead of TRAP_HWBKPT,
865 si_code indicates TRAP_TRACE, and we need to check
866 the debug registers separately. */
867 if (!check_stopped_by_watchpoint (lwp))
868 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
869 }
870 }
871 }
872 #else
873 /* We may have just stepped a breakpoint instruction. E.g., in
874 non-stop mode, GDB first tells the thread A to step a range, and
875 then the user inserts a breakpoint inside the range. In that
876 case we need to report the breakpoint PC. */
877 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
878 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
879 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
880
881 if (hardware_breakpoint_inserted_here (pc))
882 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
883
884 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
885 check_stopped_by_watchpoint (lwp);
886 #endif
887
888 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
889 {
890 if (debug_threads)
891 {
892 struct thread_info *thr = get_lwp_thread (lwp);
893
894 debug_printf ("CSBB: %s stopped by software breakpoint\n",
895 target_pid_to_str (ptid_of (thr)));
896 }
897
898 /* Back up the PC if necessary. */
899 if (pc != sw_breakpoint_pc)
900 {
901 struct regcache *regcache
902 = get_thread_regcache (current_thread, 1);
903 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
904 }
905
906 /* Update this so we record the correct stop PC below. */
907 pc = sw_breakpoint_pc;
908 }
909 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
910 {
911 if (debug_threads)
912 {
913 struct thread_info *thr = get_lwp_thread (lwp);
914
915 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
916 target_pid_to_str (ptid_of (thr)));
917 }
918 }
919 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
920 {
921 if (debug_threads)
922 {
923 struct thread_info *thr = get_lwp_thread (lwp);
924
925 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
926 target_pid_to_str (ptid_of (thr)));
927 }
928 }
929 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
930 {
931 if (debug_threads)
932 {
933 struct thread_info *thr = get_lwp_thread (lwp);
934
935 debug_printf ("CSBB: %s stopped by trace\n",
936 target_pid_to_str (ptid_of (thr)));
937 }
938 }
939
940 lwp->stop_pc = pc;
941 current_thread = saved_thread;
942 return 1;
943 }
944
945 static struct lwp_info *
946 add_lwp (ptid_t ptid)
947 {
948 struct lwp_info *lwp;
949
950 lwp = XCNEW (struct lwp_info);
951
952 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
953
954 lwp->thread = add_thread (ptid, lwp);
955
956 if (the_low_target.new_thread != NULL)
957 the_low_target.new_thread (lwp);
958
959 return lwp;
960 }
961
962 /* Callback to be used when calling fork_inferior, responsible for
963 actually initiating the tracing of the inferior. */
964
965 static void
966 linux_ptrace_fun ()
967 {
968 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
969 (PTRACE_TYPE_ARG4) 0) < 0)
970 trace_start_error_with_name ("ptrace");
971
972 if (setpgid (0, 0) < 0)
973 trace_start_error_with_name ("setpgid");
974
975 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
976 stdout to stderr so that inferior i/o doesn't corrupt the connection.
977 Also, redirect stdin to /dev/null. */
978 if (remote_connection_is_stdio ())
979 {
980 if (close (0) < 0)
981 trace_start_error_with_name ("close");
982 if (open ("/dev/null", O_RDONLY) < 0)
983 trace_start_error_with_name ("open");
984 if (dup2 (2, 1) < 0)
985 trace_start_error_with_name ("dup2");
986 if (write (2, "stdin/stdout redirected\n",
987 sizeof ("stdin/stdout redirected\n") - 1) < 0)
988 {
989 /* Errors ignored. */;
990 }
991 }
992 }
993
994 /* Start an inferior process and returns its pid.
995 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
996 are its arguments. */
997
998 static int
999 linux_create_inferior (const char *program,
1000 const std::vector<char *> &program_args)
1001 {
1002 client_state &cs = get_client_state ();
1003 struct lwp_info *new_lwp;
1004 int pid;
1005 ptid_t ptid;
1006
1007 {
1008 maybe_disable_address_space_randomization restore_personality
1009 (cs.disable_randomization);
1010 std::string str_program_args = stringify_argv (program_args);
1011
1012 pid = fork_inferior (program,
1013 str_program_args.c_str (),
1014 get_environ ()->envp (), linux_ptrace_fun,
1015 NULL, NULL, NULL, NULL);
1016 }
1017
1018 linux_add_process (pid, 0);
1019
1020 ptid = ptid_t (pid, pid, 0);
1021 new_lwp = add_lwp (ptid);
1022 new_lwp->must_set_ptrace_flags = 1;
1023
1024 post_fork_inferior (pid, program);
1025
1026 return pid;
1027 }
1028
1029 /* Implement the post_create_inferior target_ops method. */
1030
1031 static void
1032 linux_post_create_inferior (void)
1033 {
1034 struct lwp_info *lwp = get_thread_lwp (current_thread);
1035
1036 linux_arch_setup ();
1037
1038 if (lwp->must_set_ptrace_flags)
1039 {
1040 struct process_info *proc = current_process ();
1041 int options = linux_low_ptrace_options (proc->attached);
1042
1043 linux_enable_event_reporting (lwpid_of (current_thread), options);
1044 lwp->must_set_ptrace_flags = 0;
1045 }
1046 }
1047
1048 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1049 error. */
1050
1051 int
1052 linux_attach_lwp (ptid_t ptid)
1053 {
1054 struct lwp_info *new_lwp;
1055 int lwpid = ptid.lwp ();
1056
1057 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1058 != 0)
1059 return errno;
1060
1061 new_lwp = add_lwp (ptid);
1062
1063 /* We need to wait for SIGSTOP before being able to make the next
1064 ptrace call on this LWP. */
1065 new_lwp->must_set_ptrace_flags = 1;
1066
1067 if (linux_proc_pid_is_stopped (lwpid))
1068 {
1069 if (debug_threads)
1070 debug_printf ("Attached to a stopped process\n");
1071
1072 /* The process is definitely stopped. It is in a job control
1073 stop, unless the kernel predates the TASK_STOPPED /
1074 TASK_TRACED distinction, in which case it might be in a
1075 ptrace stop. Make sure it is in a ptrace stop; from there we
1076 can kill it, signal it, et cetera.
1077
1078 First make sure there is a pending SIGSTOP. Since we are
1079 already attached, the process can not transition from stopped
1080 to running without a PTRACE_CONT; so we know this signal will
1081 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1082 probably already in the queue (unless this kernel is old
1083 enough to use TASK_STOPPED for ptrace stops); but since
1084 SIGSTOP is not an RT signal, it can only be queued once. */
1085 kill_lwp (lwpid, SIGSTOP);
1086
1087 /* Finally, resume the stopped process. This will deliver the
1088 SIGSTOP (or a higher priority signal, just like normal
1089 PTRACE_ATTACH), which we'll catch later on. */
1090 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1091 }
1092
1093 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1094 brings it to a halt.
1095
1096 There are several cases to consider here:
1097
1098 1) gdbserver has already attached to the process and is being notified
1099 of a new thread that is being created.
1100 In this case we should ignore that SIGSTOP and resume the
1101 process. This is handled below by setting stop_expected = 1,
1102 and the fact that add_thread sets last_resume_kind ==
1103 resume_continue.
1104
1105 2) This is the first thread (the process thread), and we're attaching
1106 to it via attach_inferior.
1107 In this case we want the process thread to stop.
1108 This is handled by having linux_attach set last_resume_kind ==
1109 resume_stop after we return.
1110
1111 If the pid we are attaching to is also the tgid, we attach to and
1112 stop all the existing threads. Otherwise, we attach to pid and
1113 ignore any other threads in the same group as this pid.
1114
1115 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1116 existing threads.
1117 In this case we want the thread to stop.
1118 FIXME: This case is currently not properly handled.
1119 We should wait for the SIGSTOP but don't. Things work apparently
1120 because enough time passes between when we ptrace (ATTACH) and when
1121 gdb makes the next ptrace call on the thread.
1122
1123 On the other hand, if we are currently trying to stop all threads, we
1124 should treat the new thread as if we had sent it a SIGSTOP. This works
1125 because we are guaranteed that the add_lwp call above added us to the
1126 end of the list, and so the new thread has not yet reached
1127 wait_for_sigstop (but will). */
1128 new_lwp->stop_expected = 1;
1129
1130 return 0;
1131 }
1132
1133 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1134 already attached. Returns true if a new LWP is found, false
1135 otherwise. */
1136
1137 static int
1138 attach_proc_task_lwp_callback (ptid_t ptid)
1139 {
1140 /* Is this a new thread? */
1141 if (find_thread_ptid (ptid) == NULL)
1142 {
1143 int lwpid = ptid.lwp ();
1144 int err;
1145
1146 if (debug_threads)
1147 debug_printf ("Found new lwp %d\n", lwpid);
1148
1149 err = linux_attach_lwp (ptid);
1150
1151 /* Be quiet if we simply raced with the thread exiting. EPERM
1152 is returned if the thread's task still exists, and is marked
1153 as exited or zombie, as well as other conditions, so in that
1154 case, confirm the status in /proc/PID/status. */
1155 if (err == ESRCH
1156 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1157 {
1158 if (debug_threads)
1159 {
1160 debug_printf ("Cannot attach to lwp %d: "
1161 "thread is gone (%d: %s)\n",
1162 lwpid, err, strerror (err));
1163 }
1164 }
1165 else if (err != 0)
1166 {
1167 std::string reason
1168 = linux_ptrace_attach_fail_reason_string (ptid, err);
1169
1170 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1171 }
1172
1173 return 1;
1174 }
1175 return 0;
1176 }
1177
1178 static void async_file_mark (void);
1179
1180 /* Attach to PID. If PID is the tgid, attach to it and all
1181 of its threads. */
1182
1183 static int
1184 linux_attach (unsigned long pid)
1185 {
1186 struct process_info *proc;
1187 struct thread_info *initial_thread;
1188 ptid_t ptid = ptid_t (pid, pid, 0);
1189 int err;
1190
1191 proc = linux_add_process (pid, 1);
1192
1193 /* Attach to PID. We will check for other threads
1194 soon. */
1195 err = linux_attach_lwp (ptid);
1196 if (err != 0)
1197 {
1198 remove_process (proc);
1199
1200 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1201 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1202 }
1203
1204 /* Don't ignore the initial SIGSTOP if we just attached to this
1205 process. It will be collected by wait shortly. */
1206 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
1207 initial_thread->last_resume_kind = resume_stop;
1208
1209 /* We must attach to every LWP. If /proc is mounted, use that to
1210 find them now. On the one hand, the inferior may be using raw
1211 clone instead of using pthreads. On the other hand, even if it
1212 is using pthreads, GDB may not be connected yet (thread_db needs
1213 to do symbol lookups, through qSymbol). Also, thread_db walks
1214 structures in the inferior's address space to find the list of
1215 threads/LWPs, and those structures may well be corrupted. Note
1216 that once thread_db is loaded, we'll still use it to list threads
1217 and associate pthread info with each LWP. */
1218 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1219
1220 /* GDB will shortly read the xml target description for this
1221 process, to figure out the process' architecture. But the target
1222 description is only filled in when the first process/thread in
1223 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1224 that now, otherwise, if GDB is fast enough, it could read the
1225 target description _before_ that initial stop. */
1226 if (non_stop)
1227 {
1228 struct lwp_info *lwp;
1229 int wstat, lwpid;
1230 ptid_t pid_ptid = ptid_t (pid);
1231
1232 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1233 &wstat, __WALL);
1234 gdb_assert (lwpid > 0);
1235
1236 lwp = find_lwp_pid (ptid_t (lwpid));
1237
1238 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1239 {
1240 lwp->status_pending_p = 1;
1241 lwp->status_pending = wstat;
1242 }
1243
1244 initial_thread->last_resume_kind = resume_continue;
1245
1246 async_file_mark ();
1247
1248 gdb_assert (proc->tdesc != NULL);
1249 }
1250
1251 return 0;
1252 }
1253
1254 static int
1255 last_thread_of_process_p (int pid)
1256 {
1257 bool seen_one = false;
1258
1259 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1260 {
1261 if (!seen_one)
1262 {
1263 /* This is the first thread of this process we see. */
1264 seen_one = true;
1265 return false;
1266 }
1267 else
1268 {
1269 /* This is the second thread of this process we see. */
1270 return true;
1271 }
1272 });
1273
1274 return thread == NULL;
1275 }
1276
1277 /* Kill LWP. */
1278
1279 static void
1280 linux_kill_one_lwp (struct lwp_info *lwp)
1281 {
1282 struct thread_info *thr = get_lwp_thread (lwp);
1283 int pid = lwpid_of (thr);
1284
1285 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1286 there is no signal context, and ptrace(PTRACE_KILL) (or
1287 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1288 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1289 alternative is to kill with SIGKILL. We only need one SIGKILL
1290 per process, not one for each thread. But since we still support
1291 support debugging programs using raw clone without CLONE_THREAD,
1292 we send one for each thread. For years, we used PTRACE_KILL
1293 only, so we're being a bit paranoid about some old kernels where
1294 PTRACE_KILL might work better (dubious if there are any such, but
1295 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1296 second, and so we're fine everywhere. */
1297
1298 errno = 0;
1299 kill_lwp (pid, SIGKILL);
1300 if (debug_threads)
1301 {
1302 int save_errno = errno;
1303
1304 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1305 target_pid_to_str (ptid_of (thr)),
1306 save_errno ? strerror (save_errno) : "OK");
1307 }
1308
1309 errno = 0;
1310 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1311 if (debug_threads)
1312 {
1313 int save_errno = errno;
1314
1315 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1316 target_pid_to_str (ptid_of (thr)),
1317 save_errno ? strerror (save_errno) : "OK");
1318 }
1319 }
1320
1321 /* Kill LWP and wait for it to die. */
1322
1323 static void
1324 kill_wait_lwp (struct lwp_info *lwp)
1325 {
1326 struct thread_info *thr = get_lwp_thread (lwp);
1327 int pid = ptid_of (thr).pid ();
1328 int lwpid = ptid_of (thr).lwp ();
1329 int wstat;
1330 int res;
1331
1332 if (debug_threads)
1333 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1334
1335 do
1336 {
1337 linux_kill_one_lwp (lwp);
1338
1339 /* Make sure it died. Notes:
1340
1341 - The loop is most likely unnecessary.
1342
1343 - We don't use linux_wait_for_event as that could delete lwps
1344 while we're iterating over them. We're not interested in
1345 any pending status at this point, only in making sure all
1346 wait status on the kernel side are collected until the
1347 process is reaped.
1348
1349 - We don't use __WALL here as the __WALL emulation relies on
1350 SIGCHLD, and killing a stopped process doesn't generate
1351 one, nor an exit status.
1352 */
1353 res = my_waitpid (lwpid, &wstat, 0);
1354 if (res == -1 && errno == ECHILD)
1355 res = my_waitpid (lwpid, &wstat, __WCLONE);
1356 } while (res > 0 && WIFSTOPPED (wstat));
1357
1358 /* Even if it was stopped, the child may have already disappeared.
1359 E.g., if it was killed by SIGKILL. */
1360 if (res < 0 && errno != ECHILD)
1361 perror_with_name ("kill_wait_lwp");
1362 }
1363
1364 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1365 except the leader. */
1366
1367 static void
1368 kill_one_lwp_callback (thread_info *thread, int pid)
1369 {
1370 struct lwp_info *lwp = get_thread_lwp (thread);
1371
1372 /* We avoid killing the first thread here, because of a Linux kernel (at
1373 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1374 the children get a chance to be reaped, it will remain a zombie
1375 forever. */
1376
1377 if (lwpid_of (thread) == pid)
1378 {
1379 if (debug_threads)
1380 debug_printf ("lkop: is last of process %s\n",
1381 target_pid_to_str (thread->id));
1382 return;
1383 }
1384
1385 kill_wait_lwp (lwp);
1386 }
1387
1388 static int
1389 linux_kill (process_info *process)
1390 {
1391 int pid = process->pid;
1392
1393 /* If we're killing a running inferior, make sure it is stopped
1394 first, as PTRACE_KILL will not work otherwise. */
1395 stop_all_lwps (0, NULL);
1396
1397 for_each_thread (pid, [&] (thread_info *thread)
1398 {
1399 kill_one_lwp_callback (thread, pid);
1400 });
1401
1402 /* See the comment in linux_kill_one_lwp. We did not kill the first
1403 thread in the list, so do so now. */
1404 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1405
1406 if (lwp == NULL)
1407 {
1408 if (debug_threads)
1409 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1410 pid);
1411 }
1412 else
1413 kill_wait_lwp (lwp);
1414
1415 the_target->mourn (process);
1416
1417 /* Since we presently can only stop all lwps of all processes, we
1418 need to unstop lwps of other processes. */
1419 unstop_all_lwps (0, NULL);
1420 return 0;
1421 }
1422
1423 /* Get pending signal of THREAD, for detaching purposes. This is the
1424 signal the thread last stopped for, which we need to deliver to the
1425 thread when detaching, otherwise, it'd be suppressed/lost. */
1426
1427 static int
1428 get_detach_signal (struct thread_info *thread)
1429 {
1430 client_state &cs = get_client_state ();
1431 enum gdb_signal signo = GDB_SIGNAL_0;
1432 int status;
1433 struct lwp_info *lp = get_thread_lwp (thread);
1434
1435 if (lp->status_pending_p)
1436 status = lp->status_pending;
1437 else
1438 {
1439 /* If the thread had been suspended by gdbserver, and it stopped
1440 cleanly, then it'll have stopped with SIGSTOP. But we don't
1441 want to deliver that SIGSTOP. */
1442 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1443 || thread->last_status.value.sig == GDB_SIGNAL_0)
1444 return 0;
1445
1446 /* Otherwise, we may need to deliver the signal we
1447 intercepted. */
1448 status = lp->last_status;
1449 }
1450
1451 if (!WIFSTOPPED (status))
1452 {
1453 if (debug_threads)
1454 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1455 target_pid_to_str (ptid_of (thread)));
1456 return 0;
1457 }
1458
1459 /* Extended wait statuses aren't real SIGTRAPs. */
1460 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1461 {
1462 if (debug_threads)
1463 debug_printf ("GPS: lwp %s had stopped with extended "
1464 "status: no pending signal\n",
1465 target_pid_to_str (ptid_of (thread)));
1466 return 0;
1467 }
1468
1469 signo = gdb_signal_from_host (WSTOPSIG (status));
1470
1471 if (cs.program_signals_p && !cs.program_signals[signo])
1472 {
1473 if (debug_threads)
1474 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1475 target_pid_to_str (ptid_of (thread)),
1476 gdb_signal_to_string (signo));
1477 return 0;
1478 }
1479 else if (!cs.program_signals_p
1480 /* If we have no way to know which signals GDB does not
1481 want to have passed to the program, assume
1482 SIGTRAP/SIGINT, which is GDB's default. */
1483 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1484 {
1485 if (debug_threads)
1486 debug_printf ("GPS: lwp %s had signal %s, "
1487 "but we don't know if we should pass it. "
1488 "Default to not.\n",
1489 target_pid_to_str (ptid_of (thread)),
1490 gdb_signal_to_string (signo));
1491 return 0;
1492 }
1493 else
1494 {
1495 if (debug_threads)
1496 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1497 target_pid_to_str (ptid_of (thread)),
1498 gdb_signal_to_string (signo));
1499
1500 return WSTOPSIG (status);
1501 }
1502 }
1503
1504 /* Detach from LWP. */
1505
1506 static void
1507 linux_detach_one_lwp (struct lwp_info *lwp)
1508 {
1509 struct thread_info *thread = get_lwp_thread (lwp);
1510 int sig;
1511 int lwpid;
1512
1513 /* If there is a pending SIGSTOP, get rid of it. */
1514 if (lwp->stop_expected)
1515 {
1516 if (debug_threads)
1517 debug_printf ("Sending SIGCONT to %s\n",
1518 target_pid_to_str (ptid_of (thread)));
1519
1520 kill_lwp (lwpid_of (thread), SIGCONT);
1521 lwp->stop_expected = 0;
1522 }
1523
1524 /* Pass on any pending signal for this thread. */
1525 sig = get_detach_signal (thread);
1526
1527 /* Preparing to resume may try to write registers, and fail if the
1528 lwp is zombie. If that happens, ignore the error. We'll handle
1529 it below, when detach fails with ESRCH. */
1530 try
1531 {
1532 /* Flush any pending changes to the process's registers. */
1533 regcache_invalidate_thread (thread);
1534
1535 /* Finally, let it resume. */
1536 if (the_low_target.prepare_to_resume != NULL)
1537 the_low_target.prepare_to_resume (lwp);
1538 }
1539 catch (const gdb_exception_error &ex)
1540 {
1541 if (!check_ptrace_stopped_lwp_gone (lwp))
1542 throw;
1543 }
1544
1545 lwpid = lwpid_of (thread);
1546 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1547 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1548 {
1549 int save_errno = errno;
1550
1551 /* We know the thread exists, so ESRCH must mean the lwp is
1552 zombie. This can happen if one of the already-detached
1553 threads exits the whole thread group. In that case we're
1554 still attached, and must reap the lwp. */
1555 if (save_errno == ESRCH)
1556 {
1557 int ret, status;
1558
1559 ret = my_waitpid (lwpid, &status, __WALL);
1560 if (ret == -1)
1561 {
1562 warning (_("Couldn't reap LWP %d while detaching: %s"),
1563 lwpid, strerror (errno));
1564 }
1565 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1566 {
1567 warning (_("Reaping LWP %d while detaching "
1568 "returned unexpected status 0x%x"),
1569 lwpid, status);
1570 }
1571 }
1572 else
1573 {
1574 error (_("Can't detach %s: %s"),
1575 target_pid_to_str (ptid_of (thread)),
1576 strerror (save_errno));
1577 }
1578 }
1579 else if (debug_threads)
1580 {
1581 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1582 target_pid_to_str (ptid_of (thread)),
1583 strsignal (sig));
1584 }
1585
1586 delete_lwp (lwp);
1587 }
1588
1589 /* Callback for for_each_thread. Detaches from non-leader threads of a
1590 given process. */
1591
1592 static void
1593 linux_detach_lwp_callback (thread_info *thread)
1594 {
1595 /* We don't actually detach from the thread group leader just yet.
1596 If the thread group exits, we must reap the zombie clone lwps
1597 before we're able to reap the leader. */
1598 if (thread->id.pid () == thread->id.lwp ())
1599 return;
1600
1601 lwp_info *lwp = get_thread_lwp (thread);
1602 linux_detach_one_lwp (lwp);
1603 }
1604
1605 static int
1606 linux_detach (process_info *process)
1607 {
1608 struct lwp_info *main_lwp;
1609
1610 /* As there's a step over already in progress, let it finish first,
1611 otherwise nesting a stabilize_threads operation on top gets real
1612 messy. */
1613 complete_ongoing_step_over ();
1614
1615 /* Stop all threads before detaching. First, ptrace requires that
1616 the thread is stopped to sucessfully detach. Second, thread_db
1617 may need to uninstall thread event breakpoints from memory, which
1618 only works with a stopped process anyway. */
1619 stop_all_lwps (0, NULL);
1620
1621 #ifdef USE_THREAD_DB
1622 thread_db_detach (process);
1623 #endif
1624
1625 /* Stabilize threads (move out of jump pads). */
1626 stabilize_threads ();
1627
1628 /* Detach from the clone lwps first. If the thread group exits just
1629 while we're detaching, we must reap the clone lwps before we're
1630 able to reap the leader. */
1631 for_each_thread (process->pid, linux_detach_lwp_callback);
1632
1633 main_lwp = find_lwp_pid (ptid_t (process->pid));
1634 linux_detach_one_lwp (main_lwp);
1635
1636 the_target->mourn (process);
1637
1638 /* Since we presently can only stop all lwps of all processes, we
1639 need to unstop lwps of other processes. */
1640 unstop_all_lwps (0, NULL);
1641 return 0;
1642 }
1643
1644 /* Remove all LWPs that belong to process PROC from the lwp list. */
1645
1646 static void
1647 linux_mourn (struct process_info *process)
1648 {
1649 struct process_info_private *priv;
1650
1651 #ifdef USE_THREAD_DB
1652 thread_db_mourn (process);
1653 #endif
1654
1655 for_each_thread (process->pid, [] (thread_info *thread)
1656 {
1657 delete_lwp (get_thread_lwp (thread));
1658 });
1659
1660 /* Freeing all private data. */
1661 priv = process->priv;
1662 if (the_low_target.delete_process != NULL)
1663 the_low_target.delete_process (priv->arch_private);
1664 else
1665 gdb_assert (priv->arch_private == NULL);
1666 free (priv);
1667 process->priv = NULL;
1668
1669 remove_process (process);
1670 }
1671
1672 static void
1673 linux_join (int pid)
1674 {
1675 int status, ret;
1676
1677 do {
1678 ret = my_waitpid (pid, &status, 0);
1679 if (WIFEXITED (status) || WIFSIGNALED (status))
1680 break;
1681 } while (ret != -1 || errno != ECHILD);
1682 }
1683
1684 /* Return nonzero if the given thread is still alive. */
1685 static int
1686 linux_thread_alive (ptid_t ptid)
1687 {
1688 struct lwp_info *lwp = find_lwp_pid (ptid);
1689
1690 /* We assume we always know if a thread exits. If a whole process
1691 exited but we still haven't been able to report it to GDB, we'll
1692 hold on to the last lwp of the dead process. */
1693 if (lwp != NULL)
1694 return !lwp_is_marked_dead (lwp);
1695 else
1696 return 0;
1697 }
1698
1699 /* Return 1 if this lwp still has an interesting status pending. If
1700 not (e.g., it had stopped for a breakpoint that is gone), return
1701 false. */
1702
1703 static int
1704 thread_still_has_status_pending_p (struct thread_info *thread)
1705 {
1706 struct lwp_info *lp = get_thread_lwp (thread);
1707
1708 if (!lp->status_pending_p)
1709 return 0;
1710
1711 if (thread->last_resume_kind != resume_stop
1712 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1713 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1714 {
1715 struct thread_info *saved_thread;
1716 CORE_ADDR pc;
1717 int discard = 0;
1718
1719 gdb_assert (lp->last_status != 0);
1720
1721 pc = get_pc (lp);
1722
1723 saved_thread = current_thread;
1724 current_thread = thread;
1725
1726 if (pc != lp->stop_pc)
1727 {
1728 if (debug_threads)
1729 debug_printf ("PC of %ld changed\n",
1730 lwpid_of (thread));
1731 discard = 1;
1732 }
1733
1734 #if !USE_SIGTRAP_SIGINFO
1735 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1736 && !(*the_low_target.breakpoint_at) (pc))
1737 {
1738 if (debug_threads)
1739 debug_printf ("previous SW breakpoint of %ld gone\n",
1740 lwpid_of (thread));
1741 discard = 1;
1742 }
1743 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1744 && !hardware_breakpoint_inserted_here (pc))
1745 {
1746 if (debug_threads)
1747 debug_printf ("previous HW breakpoint of %ld gone\n",
1748 lwpid_of (thread));
1749 discard = 1;
1750 }
1751 #endif
1752
1753 current_thread = saved_thread;
1754
1755 if (discard)
1756 {
1757 if (debug_threads)
1758 debug_printf ("discarding pending breakpoint status\n");
1759 lp->status_pending_p = 0;
1760 return 0;
1761 }
1762 }
1763
1764 return 1;
1765 }
1766
1767 /* Returns true if LWP is resumed from the client's perspective. */
1768
1769 static int
1770 lwp_resumed (struct lwp_info *lwp)
1771 {
1772 struct thread_info *thread = get_lwp_thread (lwp);
1773
1774 if (thread->last_resume_kind != resume_stop)
1775 return 1;
1776
1777 /* Did gdb send us a `vCont;t', but we haven't reported the
1778 corresponding stop to gdb yet? If so, the thread is still
1779 resumed/running from gdb's perspective. */
1780 if (thread->last_resume_kind == resume_stop
1781 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1782 return 1;
1783
1784 return 0;
1785 }
1786
1787 /* Return true if this lwp has an interesting status pending. */
1788 static bool
1789 status_pending_p_callback (thread_info *thread, ptid_t ptid)
1790 {
1791 struct lwp_info *lp = get_thread_lwp (thread);
1792
1793 /* Check if we're only interested in events from a specific process
1794 or a specific LWP. */
1795 if (!thread->id.matches (ptid))
1796 return 0;
1797
1798 if (!lwp_resumed (lp))
1799 return 0;
1800
1801 if (lp->status_pending_p
1802 && !thread_still_has_status_pending_p (thread))
1803 {
1804 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1805 return 0;
1806 }
1807
1808 return lp->status_pending_p;
1809 }
1810
1811 struct lwp_info *
1812 find_lwp_pid (ptid_t ptid)
1813 {
1814 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1815 {
1816 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1817 return thr_arg->id.lwp () == lwp;
1818 });
1819
1820 if (thread == NULL)
1821 return NULL;
1822
1823 return get_thread_lwp (thread);
1824 }
1825
1826 /* Return the number of known LWPs in the tgid given by PID. */
1827
1828 static int
1829 num_lwps (int pid)
1830 {
1831 int count = 0;
1832
1833 for_each_thread (pid, [&] (thread_info *thread)
1834 {
1835 count++;
1836 });
1837
1838 return count;
1839 }
1840
1841 /* See nat/linux-nat.h. */
1842
1843 struct lwp_info *
1844 iterate_over_lwps (ptid_t filter,
1845 gdb::function_view<iterate_over_lwps_ftype> callback)
1846 {
1847 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1848 {
1849 lwp_info *lwp = get_thread_lwp (thr_arg);
1850
1851 return callback (lwp);
1852 });
1853
1854 if (thread == NULL)
1855 return NULL;
1856
1857 return get_thread_lwp (thread);
1858 }
1859
1860 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1861 their exits until all other threads in the group have exited. */
1862
1863 static void
1864 check_zombie_leaders (void)
1865 {
1866 for_each_process ([] (process_info *proc) {
1867 pid_t leader_pid = pid_of (proc);
1868 struct lwp_info *leader_lp;
1869
1870 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1871
1872 if (debug_threads)
1873 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1874 "num_lwps=%d, zombie=%d\n",
1875 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1876 linux_proc_pid_is_zombie (leader_pid));
1877
1878 if (leader_lp != NULL && !leader_lp->stopped
1879 /* Check if there are other threads in the group, as we may
1880 have raced with the inferior simply exiting. */
1881 && !last_thread_of_process_p (leader_pid)
1882 && linux_proc_pid_is_zombie (leader_pid))
1883 {
1884 /* A leader zombie can mean one of two things:
1885
1886 - It exited, and there's an exit status pending
1887 available, or only the leader exited (not the whole
1888 program). In the latter case, we can't waitpid the
1889 leader's exit status until all other threads are gone.
1890
1891 - There are 3 or more threads in the group, and a thread
1892 other than the leader exec'd. On an exec, the Linux
1893 kernel destroys all other threads (except the execing
1894 one) in the thread group, and resets the execing thread's
1895 tid to the tgid. No exit notification is sent for the
1896 execing thread -- from the ptracer's perspective, it
1897 appears as though the execing thread just vanishes.
1898 Until we reap all other threads except the leader and the
1899 execing thread, the leader will be zombie, and the
1900 execing thread will be in `D (disc sleep)'. As soon as
1901 all other threads are reaped, the execing thread changes
1902 it's tid to the tgid, and the previous (zombie) leader
1903 vanishes, giving place to the "new" leader. We could try
1904 distinguishing the exit and exec cases, by waiting once
1905 more, and seeing if something comes out, but it doesn't
1906 sound useful. The previous leader _does_ go away, and
1907 we'll re-add the new one once we see the exec event
1908 (which is just the same as what would happen if the
1909 previous leader did exit voluntarily before some other
1910 thread execs). */
1911
1912 if (debug_threads)
1913 debug_printf ("CZL: Thread group leader %d zombie "
1914 "(it exited, or another thread execd).\n",
1915 leader_pid);
1916
1917 delete_lwp (leader_lp);
1918 }
1919 });
1920 }
1921
1922 /* Callback for `find_thread'. Returns the first LWP that is not
1923 stopped. */
1924
1925 static bool
1926 not_stopped_callback (thread_info *thread, ptid_t filter)
1927 {
1928 if (!thread->id.matches (filter))
1929 return false;
1930
1931 lwp_info *lwp = get_thread_lwp (thread);
1932
1933 return !lwp->stopped;
1934 }
1935
1936 /* Increment LWP's suspend count. */
1937
1938 static void
1939 lwp_suspended_inc (struct lwp_info *lwp)
1940 {
1941 lwp->suspended++;
1942
1943 if (debug_threads && lwp->suspended > 4)
1944 {
1945 struct thread_info *thread = get_lwp_thread (lwp);
1946
1947 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1948 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1949 }
1950 }
1951
1952 /* Decrement LWP's suspend count. */
1953
1954 static void
1955 lwp_suspended_decr (struct lwp_info *lwp)
1956 {
1957 lwp->suspended--;
1958
1959 if (lwp->suspended < 0)
1960 {
1961 struct thread_info *thread = get_lwp_thread (lwp);
1962
1963 internal_error (__FILE__, __LINE__,
1964 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1965 lwp->suspended);
1966 }
1967 }
1968
1969 /* This function should only be called if the LWP got a SIGTRAP.
1970
1971 Handle any tracepoint steps or hits. Return true if a tracepoint
1972 event was handled, 0 otherwise. */
1973
1974 static int
1975 handle_tracepoints (struct lwp_info *lwp)
1976 {
1977 struct thread_info *tinfo = get_lwp_thread (lwp);
1978 int tpoint_related_event = 0;
1979
1980 gdb_assert (lwp->suspended == 0);
1981
1982 /* If this tracepoint hit causes a tracing stop, we'll immediately
1983 uninsert tracepoints. To do this, we temporarily pause all
1984 threads, unpatch away, and then unpause threads. We need to make
1985 sure the unpausing doesn't resume LWP too. */
1986 lwp_suspended_inc (lwp);
1987
1988 /* And we need to be sure that any all-threads-stopping doesn't try
1989 to move threads out of the jump pads, as it could deadlock the
1990 inferior (LWP could be in the jump pad, maybe even holding the
1991 lock.) */
1992
1993 /* Do any necessary step collect actions. */
1994 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1995
1996 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1997
1998 /* See if we just hit a tracepoint and do its main collect
1999 actions. */
2000 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2001
2002 lwp_suspended_decr (lwp);
2003
2004 gdb_assert (lwp->suspended == 0);
2005 gdb_assert (!stabilizing_threads
2006 || (lwp->collecting_fast_tracepoint
2007 != fast_tpoint_collect_result::not_collecting));
2008
2009 if (tpoint_related_event)
2010 {
2011 if (debug_threads)
2012 debug_printf ("got a tracepoint event\n");
2013 return 1;
2014 }
2015
2016 return 0;
2017 }
2018
2019 /* Convenience wrapper. Returns information about LWP's fast tracepoint
2020 collection status. */
2021
2022 static fast_tpoint_collect_result
2023 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2024 struct fast_tpoint_collect_status *status)
2025 {
2026 CORE_ADDR thread_area;
2027 struct thread_info *thread = get_lwp_thread (lwp);
2028
2029 if (the_low_target.get_thread_area == NULL)
2030 return fast_tpoint_collect_result::not_collecting;
2031
2032 /* Get the thread area address. This is used to recognize which
2033 thread is which when tracing with the in-process agent library.
2034 We don't read anything from the address, and treat it as opaque;
2035 it's the address itself that we assume is unique per-thread. */
2036 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2037 return fast_tpoint_collect_result::not_collecting;
2038
2039 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2040 }
2041
2042 /* The reason we resume in the caller, is because we want to be able
2043 to pass lwp->status_pending as WSTAT, and we need to clear
2044 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2045 refuses to resume. */
2046
2047 static int
2048 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2049 {
2050 struct thread_info *saved_thread;
2051
2052 saved_thread = current_thread;
2053 current_thread = get_lwp_thread (lwp);
2054
2055 if ((wstat == NULL
2056 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2057 && supports_fast_tracepoints ()
2058 && agent_loaded_p ())
2059 {
2060 struct fast_tpoint_collect_status status;
2061
2062 if (debug_threads)
2063 debug_printf ("Checking whether LWP %ld needs to move out of the "
2064 "jump pad.\n",
2065 lwpid_of (current_thread));
2066
2067 fast_tpoint_collect_result r
2068 = linux_fast_tracepoint_collecting (lwp, &status);
2069
2070 if (wstat == NULL
2071 || (WSTOPSIG (*wstat) != SIGILL
2072 && WSTOPSIG (*wstat) != SIGFPE
2073 && WSTOPSIG (*wstat) != SIGSEGV
2074 && WSTOPSIG (*wstat) != SIGBUS))
2075 {
2076 lwp->collecting_fast_tracepoint = r;
2077
2078 if (r != fast_tpoint_collect_result::not_collecting)
2079 {
2080 if (r == fast_tpoint_collect_result::before_insn
2081 && lwp->exit_jump_pad_bkpt == NULL)
2082 {
2083 /* Haven't executed the original instruction yet.
2084 Set breakpoint there, and wait till it's hit,
2085 then single-step until exiting the jump pad. */
2086 lwp->exit_jump_pad_bkpt
2087 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2088 }
2089
2090 if (debug_threads)
2091 debug_printf ("Checking whether LWP %ld needs to move out of "
2092 "the jump pad...it does\n",
2093 lwpid_of (current_thread));
2094 current_thread = saved_thread;
2095
2096 return 1;
2097 }
2098 }
2099 else
2100 {
2101 /* If we get a synchronous signal while collecting, *and*
2102 while executing the (relocated) original instruction,
2103 reset the PC to point at the tpoint address, before
2104 reporting to GDB. Otherwise, it's an IPA lib bug: just
2105 report the signal to GDB, and pray for the best. */
2106
2107 lwp->collecting_fast_tracepoint
2108 = fast_tpoint_collect_result::not_collecting;
2109
2110 if (r != fast_tpoint_collect_result::not_collecting
2111 && (status.adjusted_insn_addr <= lwp->stop_pc
2112 && lwp->stop_pc < status.adjusted_insn_addr_end))
2113 {
2114 siginfo_t info;
2115 struct regcache *regcache;
2116
2117 /* The si_addr on a few signals references the address
2118 of the faulting instruction. Adjust that as
2119 well. */
2120 if ((WSTOPSIG (*wstat) == SIGILL
2121 || WSTOPSIG (*wstat) == SIGFPE
2122 || WSTOPSIG (*wstat) == SIGBUS
2123 || WSTOPSIG (*wstat) == SIGSEGV)
2124 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2125 (PTRACE_TYPE_ARG3) 0, &info) == 0
2126 /* Final check just to make sure we don't clobber
2127 the siginfo of non-kernel-sent signals. */
2128 && (uintptr_t) info.si_addr == lwp->stop_pc)
2129 {
2130 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2131 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2132 (PTRACE_TYPE_ARG3) 0, &info);
2133 }
2134
2135 regcache = get_thread_regcache (current_thread, 1);
2136 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2137 lwp->stop_pc = status.tpoint_addr;
2138
2139 /* Cancel any fast tracepoint lock this thread was
2140 holding. */
2141 force_unlock_trace_buffer ();
2142 }
2143
2144 if (lwp->exit_jump_pad_bkpt != NULL)
2145 {
2146 if (debug_threads)
2147 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2148 "stopping all threads momentarily.\n");
2149
2150 stop_all_lwps (1, lwp);
2151
2152 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2153 lwp->exit_jump_pad_bkpt = NULL;
2154
2155 unstop_all_lwps (1, lwp);
2156
2157 gdb_assert (lwp->suspended >= 0);
2158 }
2159 }
2160 }
2161
2162 if (debug_threads)
2163 debug_printf ("Checking whether LWP %ld needs to move out of the "
2164 "jump pad...no\n",
2165 lwpid_of (current_thread));
2166
2167 current_thread = saved_thread;
2168 return 0;
2169 }
2170
2171 /* Enqueue one signal in the "signals to report later when out of the
2172 jump pad" list. */
2173
2174 static void
2175 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2176 {
2177 struct pending_signals *p_sig;
2178 struct thread_info *thread = get_lwp_thread (lwp);
2179
2180 if (debug_threads)
2181 debug_printf ("Deferring signal %d for LWP %ld.\n",
2182 WSTOPSIG (*wstat), lwpid_of (thread));
2183
2184 if (debug_threads)
2185 {
2186 struct pending_signals *sig;
2187
2188 for (sig = lwp->pending_signals_to_report;
2189 sig != NULL;
2190 sig = sig->prev)
2191 debug_printf (" Already queued %d\n",
2192 sig->signal);
2193
2194 debug_printf (" (no more currently queued signals)\n");
2195 }
2196
2197 /* Don't enqueue non-RT signals if they are already in the deferred
2198 queue. (SIGSTOP being the easiest signal to see ending up here
2199 twice) */
2200 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2201 {
2202 struct pending_signals *sig;
2203
2204 for (sig = lwp->pending_signals_to_report;
2205 sig != NULL;
2206 sig = sig->prev)
2207 {
2208 if (sig->signal == WSTOPSIG (*wstat))
2209 {
2210 if (debug_threads)
2211 debug_printf ("Not requeuing already queued non-RT signal %d"
2212 " for LWP %ld\n",
2213 sig->signal,
2214 lwpid_of (thread));
2215 return;
2216 }
2217 }
2218 }
2219
2220 p_sig = XCNEW (struct pending_signals);
2221 p_sig->prev = lwp->pending_signals_to_report;
2222 p_sig->signal = WSTOPSIG (*wstat);
2223
2224 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2225 &p_sig->info);
2226
2227 lwp->pending_signals_to_report = p_sig;
2228 }
2229
2230 /* Dequeue one signal from the "signals to report later when out of
2231 the jump pad" list. */
2232
2233 static int
2234 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2235 {
2236 struct thread_info *thread = get_lwp_thread (lwp);
2237
2238 if (lwp->pending_signals_to_report != NULL)
2239 {
2240 struct pending_signals **p_sig;
2241
2242 p_sig = &lwp->pending_signals_to_report;
2243 while ((*p_sig)->prev != NULL)
2244 p_sig = &(*p_sig)->prev;
2245
2246 *wstat = W_STOPCODE ((*p_sig)->signal);
2247 if ((*p_sig)->info.si_signo != 0)
2248 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2249 &(*p_sig)->info);
2250 free (*p_sig);
2251 *p_sig = NULL;
2252
2253 if (debug_threads)
2254 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2255 WSTOPSIG (*wstat), lwpid_of (thread));
2256
2257 if (debug_threads)
2258 {
2259 struct pending_signals *sig;
2260
2261 for (sig = lwp->pending_signals_to_report;
2262 sig != NULL;
2263 sig = sig->prev)
2264 debug_printf (" Still queued %d\n",
2265 sig->signal);
2266
2267 debug_printf (" (no more queued signals)\n");
2268 }
2269
2270 return 1;
2271 }
2272
2273 return 0;
2274 }
2275
2276 /* Fetch the possibly triggered data watchpoint info and store it in
2277 CHILD.
2278
2279 On some archs, like x86, that use debug registers to set
2280 watchpoints, it's possible that the way to know which watched
2281 address trapped, is to check the register that is used to select
2282 which address to watch. Problem is, between setting the watchpoint
2283 and reading back which data address trapped, the user may change
2284 the set of watchpoints, and, as a consequence, GDB changes the
2285 debug registers in the inferior. To avoid reading back a stale
2286 stopped-data-address when that happens, we cache in LP the fact
2287 that a watchpoint trapped, and the corresponding data address, as
2288 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2289 registers meanwhile, we have the cached data we can rely on. */
2290
2291 static int
2292 check_stopped_by_watchpoint (struct lwp_info *child)
2293 {
2294 if (the_low_target.stopped_by_watchpoint != NULL)
2295 {
2296 struct thread_info *saved_thread;
2297
2298 saved_thread = current_thread;
2299 current_thread = get_lwp_thread (child);
2300
2301 if (the_low_target.stopped_by_watchpoint ())
2302 {
2303 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2304
2305 if (the_low_target.stopped_data_address != NULL)
2306 child->stopped_data_address
2307 = the_low_target.stopped_data_address ();
2308 else
2309 child->stopped_data_address = 0;
2310 }
2311
2312 current_thread = saved_thread;
2313 }
2314
2315 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2316 }
2317
2318 /* Return the ptrace options that we want to try to enable. */
2319
2320 static int
2321 linux_low_ptrace_options (int attached)
2322 {
2323 client_state &cs = get_client_state ();
2324 int options = 0;
2325
2326 if (!attached)
2327 options |= PTRACE_O_EXITKILL;
2328
2329 if (cs.report_fork_events)
2330 options |= PTRACE_O_TRACEFORK;
2331
2332 if (cs.report_vfork_events)
2333 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2334
2335 if (cs.report_exec_events)
2336 options |= PTRACE_O_TRACEEXEC;
2337
2338 options |= PTRACE_O_TRACESYSGOOD;
2339
2340 return options;
2341 }
2342
2343 /* Do low-level handling of the event, and check if we should go on
2344 and pass it to caller code. Return the affected lwp if we are, or
2345 NULL otherwise. */
2346
2347 static struct lwp_info *
2348 linux_low_filter_event (int lwpid, int wstat)
2349 {
2350 client_state &cs = get_client_state ();
2351 struct lwp_info *child;
2352 struct thread_info *thread;
2353 int have_stop_pc = 0;
2354
2355 child = find_lwp_pid (ptid_t (lwpid));
2356
2357 /* Check for stop events reported by a process we didn't already
2358 know about - anything not already in our LWP list.
2359
2360 If we're expecting to receive stopped processes after
2361 fork, vfork, and clone events, then we'll just add the
2362 new one to our list and go back to waiting for the event
2363 to be reported - the stopped process might be returned
2364 from waitpid before or after the event is.
2365
2366 But note the case of a non-leader thread exec'ing after the
2367 leader having exited, and gone from our lists (because
2368 check_zombie_leaders deleted it). The non-leader thread
2369 changes its tid to the tgid. */
2370
2371 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2372 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2373 {
2374 ptid_t child_ptid;
2375
2376 /* A multi-thread exec after we had seen the leader exiting. */
2377 if (debug_threads)
2378 {
2379 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2380 "after exec.\n", lwpid);
2381 }
2382
2383 child_ptid = ptid_t (lwpid, lwpid, 0);
2384 child = add_lwp (child_ptid);
2385 child->stopped = 1;
2386 current_thread = child->thread;
2387 }
2388
2389 /* If we didn't find a process, one of two things presumably happened:
2390 - A process we started and then detached from has exited. Ignore it.
2391 - A process we are controlling has forked and the new child's stop
2392 was reported to us by the kernel. Save its PID. */
2393 if (child == NULL && WIFSTOPPED (wstat))
2394 {
2395 add_to_pid_list (&stopped_pids, lwpid, wstat);
2396 return NULL;
2397 }
2398 else if (child == NULL)
2399 return NULL;
2400
2401 thread = get_lwp_thread (child);
2402
2403 child->stopped = 1;
2404
2405 child->last_status = wstat;
2406
2407 /* Check if the thread has exited. */
2408 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2409 {
2410 if (debug_threads)
2411 debug_printf ("LLFE: %d exited.\n", lwpid);
2412
2413 if (finish_step_over (child))
2414 {
2415 /* Unsuspend all other LWPs, and set them back running again. */
2416 unsuspend_all_lwps (child);
2417 }
2418
2419 /* If there is at least one more LWP, then the exit signal was
2420 not the end of the debugged application and should be
2421 ignored, unless GDB wants to hear about thread exits. */
2422 if (cs.report_thread_events
2423 || last_thread_of_process_p (pid_of (thread)))
2424 {
2425 /* Since events are serialized to GDB core, and we can't
2426 report this one right now. Leave the status pending for
2427 the next time we're able to report it. */
2428 mark_lwp_dead (child, wstat);
2429 return child;
2430 }
2431 else
2432 {
2433 delete_lwp (child);
2434 return NULL;
2435 }
2436 }
2437
2438 gdb_assert (WIFSTOPPED (wstat));
2439
2440 if (WIFSTOPPED (wstat))
2441 {
2442 struct process_info *proc;
2443
2444 /* Architecture-specific setup after inferior is running. */
2445 proc = find_process_pid (pid_of (thread));
2446 if (proc->tdesc == NULL)
2447 {
2448 if (proc->attached)
2449 {
2450 /* This needs to happen after we have attached to the
2451 inferior and it is stopped for the first time, but
2452 before we access any inferior registers. */
2453 linux_arch_setup_thread (thread);
2454 }
2455 else
2456 {
2457 /* The process is started, but GDBserver will do
2458 architecture-specific setup after the program stops at
2459 the first instruction. */
2460 child->status_pending_p = 1;
2461 child->status_pending = wstat;
2462 return child;
2463 }
2464 }
2465 }
2466
2467 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2468 {
2469 struct process_info *proc = find_process_pid (pid_of (thread));
2470 int options = linux_low_ptrace_options (proc->attached);
2471
2472 linux_enable_event_reporting (lwpid, options);
2473 child->must_set_ptrace_flags = 0;
2474 }
2475
2476 /* Always update syscall_state, even if it will be filtered later. */
2477 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2478 {
2479 child->syscall_state
2480 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2481 ? TARGET_WAITKIND_SYSCALL_RETURN
2482 : TARGET_WAITKIND_SYSCALL_ENTRY);
2483 }
2484 else
2485 {
2486 /* Almost all other ptrace-stops are known to be outside of system
2487 calls, with further exceptions in handle_extended_wait. */
2488 child->syscall_state = TARGET_WAITKIND_IGNORE;
2489 }
2490
2491 /* Be careful to not overwrite stop_pc until save_stop_reason is
2492 called. */
2493 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2494 && linux_is_extended_waitstatus (wstat))
2495 {
2496 child->stop_pc = get_pc (child);
2497 if (handle_extended_wait (&child, wstat))
2498 {
2499 /* The event has been handled, so just return without
2500 reporting it. */
2501 return NULL;
2502 }
2503 }
2504
2505 if (linux_wstatus_maybe_breakpoint (wstat))
2506 {
2507 if (save_stop_reason (child))
2508 have_stop_pc = 1;
2509 }
2510
2511 if (!have_stop_pc)
2512 child->stop_pc = get_pc (child);
2513
2514 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2515 && child->stop_expected)
2516 {
2517 if (debug_threads)
2518 debug_printf ("Expected stop.\n");
2519 child->stop_expected = 0;
2520
2521 if (thread->last_resume_kind == resume_stop)
2522 {
2523 /* We want to report the stop to the core. Treat the
2524 SIGSTOP as a normal event. */
2525 if (debug_threads)
2526 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2527 target_pid_to_str (ptid_of (thread)));
2528 }
2529 else if (stopping_threads != NOT_STOPPING_THREADS)
2530 {
2531 /* Stopping threads. We don't want this SIGSTOP to end up
2532 pending. */
2533 if (debug_threads)
2534 debug_printf ("LLW: SIGSTOP caught for %s "
2535 "while stopping threads.\n",
2536 target_pid_to_str (ptid_of (thread)));
2537 return NULL;
2538 }
2539 else
2540 {
2541 /* This is a delayed SIGSTOP. Filter out the event. */
2542 if (debug_threads)
2543 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2544 child->stepping ? "step" : "continue",
2545 target_pid_to_str (ptid_of (thread)));
2546
2547 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2548 return NULL;
2549 }
2550 }
2551
2552 child->status_pending_p = 1;
2553 child->status_pending = wstat;
2554 return child;
2555 }
2556
2557 /* Return true if THREAD is doing hardware single step. */
2558
2559 static int
2560 maybe_hw_step (struct thread_info *thread)
2561 {
2562 if (can_hardware_single_step ())
2563 return 1;
2564 else
2565 {
2566 /* GDBserver must insert single-step breakpoint for software
2567 single step. */
2568 gdb_assert (has_single_step_breakpoints (thread));
2569 return 0;
2570 }
2571 }
2572
2573 /* Resume LWPs that are currently stopped without any pending status
2574 to report, but are resumed from the core's perspective. */
2575
2576 static void
2577 resume_stopped_resumed_lwps (thread_info *thread)
2578 {
2579 struct lwp_info *lp = get_thread_lwp (thread);
2580
2581 if (lp->stopped
2582 && !lp->suspended
2583 && !lp->status_pending_p
2584 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2585 {
2586 int step = 0;
2587
2588 if (thread->last_resume_kind == resume_step)
2589 step = maybe_hw_step (thread);
2590
2591 if (debug_threads)
2592 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2593 target_pid_to_str (ptid_of (thread)),
2594 paddress (lp->stop_pc),
2595 step);
2596
2597 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2598 }
2599 }
2600
2601 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2602 match FILTER_PTID (leaving others pending). The PTIDs can be:
2603 minus_one_ptid, to specify any child; a pid PTID, specifying all
2604 lwps of a thread group; or a PTID representing a single lwp. Store
2605 the stop status through the status pointer WSTAT. OPTIONS is
2606 passed to the waitpid call. Return 0 if no event was found and
2607 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2608 was found. Return the PID of the stopped child otherwise. */
2609
2610 static int
2611 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2612 int *wstatp, int options)
2613 {
2614 struct thread_info *event_thread;
2615 struct lwp_info *event_child, *requested_child;
2616 sigset_t block_mask, prev_mask;
2617
2618 retry:
2619 /* N.B. event_thread points to the thread_info struct that contains
2620 event_child. Keep them in sync. */
2621 event_thread = NULL;
2622 event_child = NULL;
2623 requested_child = NULL;
2624
2625 /* Check for a lwp with a pending status. */
2626
2627 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2628 {
2629 event_thread = find_thread_in_random ([&] (thread_info *thread)
2630 {
2631 return status_pending_p_callback (thread, filter_ptid);
2632 });
2633
2634 if (event_thread != NULL)
2635 event_child = get_thread_lwp (event_thread);
2636 if (debug_threads && event_thread)
2637 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2638 }
2639 else if (filter_ptid != null_ptid)
2640 {
2641 requested_child = find_lwp_pid (filter_ptid);
2642
2643 if (stopping_threads == NOT_STOPPING_THREADS
2644 && requested_child->status_pending_p
2645 && (requested_child->collecting_fast_tracepoint
2646 != fast_tpoint_collect_result::not_collecting))
2647 {
2648 enqueue_one_deferred_signal (requested_child,
2649 &requested_child->status_pending);
2650 requested_child->status_pending_p = 0;
2651 requested_child->status_pending = 0;
2652 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2653 }
2654
2655 if (requested_child->suspended
2656 && requested_child->status_pending_p)
2657 {
2658 internal_error (__FILE__, __LINE__,
2659 "requesting an event out of a"
2660 " suspended child?");
2661 }
2662
2663 if (requested_child->status_pending_p)
2664 {
2665 event_child = requested_child;
2666 event_thread = get_lwp_thread (event_child);
2667 }
2668 }
2669
2670 if (event_child != NULL)
2671 {
2672 if (debug_threads)
2673 debug_printf ("Got an event from pending child %ld (%04x)\n",
2674 lwpid_of (event_thread), event_child->status_pending);
2675 *wstatp = event_child->status_pending;
2676 event_child->status_pending_p = 0;
2677 event_child->status_pending = 0;
2678 current_thread = event_thread;
2679 return lwpid_of (event_thread);
2680 }
2681
2682 /* But if we don't find a pending event, we'll have to wait.
2683
2684 We only enter this loop if no process has a pending wait status.
2685 Thus any action taken in response to a wait status inside this
2686 loop is responding as soon as we detect the status, not after any
2687 pending events. */
2688
2689 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2690 all signals while here. */
2691 sigfillset (&block_mask);
2692 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2693
2694 /* Always pull all events out of the kernel. We'll randomly select
2695 an event LWP out of all that have events, to prevent
2696 starvation. */
2697 while (event_child == NULL)
2698 {
2699 pid_t ret = 0;
2700
2701 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2702 quirks:
2703
2704 - If the thread group leader exits while other threads in the
2705 thread group still exist, waitpid(TGID, ...) hangs. That
2706 waitpid won't return an exit status until the other threads
2707 in the group are reaped.
2708
2709 - When a non-leader thread execs, that thread just vanishes
2710 without reporting an exit (so we'd hang if we waited for it
2711 explicitly in that case). The exec event is reported to
2712 the TGID pid. */
2713 errno = 0;
2714 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2715
2716 if (debug_threads)
2717 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2718 ret, errno ? strerror (errno) : "ERRNO-OK");
2719
2720 if (ret > 0)
2721 {
2722 if (debug_threads)
2723 {
2724 debug_printf ("LLW: waitpid %ld received %s\n",
2725 (long) ret, status_to_str (*wstatp));
2726 }
2727
2728 /* Filter all events. IOW, leave all events pending. We'll
2729 randomly select an event LWP out of all that have events
2730 below. */
2731 linux_low_filter_event (ret, *wstatp);
2732 /* Retry until nothing comes out of waitpid. A single
2733 SIGCHLD can indicate more than one child stopped. */
2734 continue;
2735 }
2736
2737 /* Now that we've pulled all events out of the kernel, resume
2738 LWPs that don't have an interesting event to report. */
2739 if (stopping_threads == NOT_STOPPING_THREADS)
2740 for_each_thread (resume_stopped_resumed_lwps);
2741
2742 /* ... and find an LWP with a status to report to the core, if
2743 any. */
2744 event_thread = find_thread_in_random ([&] (thread_info *thread)
2745 {
2746 return status_pending_p_callback (thread, filter_ptid);
2747 });
2748
2749 if (event_thread != NULL)
2750 {
2751 event_child = get_thread_lwp (event_thread);
2752 *wstatp = event_child->status_pending;
2753 event_child->status_pending_p = 0;
2754 event_child->status_pending = 0;
2755 break;
2756 }
2757
2758 /* Check for zombie thread group leaders. Those can't be reaped
2759 until all other threads in the thread group are. */
2760 check_zombie_leaders ();
2761
2762 auto not_stopped = [&] (thread_info *thread)
2763 {
2764 return not_stopped_callback (thread, wait_ptid);
2765 };
2766
2767 /* If there are no resumed children left in the set of LWPs we
2768 want to wait for, bail. We can't just block in
2769 waitpid/sigsuspend, because lwps might have been left stopped
2770 in trace-stop state, and we'd be stuck forever waiting for
2771 their status to change (which would only happen if we resumed
2772 them). Even if WNOHANG is set, this return code is preferred
2773 over 0 (below), as it is more detailed. */
2774 if (find_thread (not_stopped) == NULL)
2775 {
2776 if (debug_threads)
2777 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2778 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2779 return -1;
2780 }
2781
2782 /* No interesting event to report to the caller. */
2783 if ((options & WNOHANG))
2784 {
2785 if (debug_threads)
2786 debug_printf ("WNOHANG set, no event found\n");
2787
2788 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2789 return 0;
2790 }
2791
2792 /* Block until we get an event reported with SIGCHLD. */
2793 if (debug_threads)
2794 debug_printf ("sigsuspend'ing\n");
2795
2796 sigsuspend (&prev_mask);
2797 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2798 goto retry;
2799 }
2800
2801 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2802
2803 current_thread = event_thread;
2804
2805 return lwpid_of (event_thread);
2806 }
2807
2808 /* Wait for an event from child(ren) PTID. PTIDs can be:
2809 minus_one_ptid, to specify any child; a pid PTID, specifying all
2810 lwps of a thread group; or a PTID representing a single lwp. Store
2811 the stop status through the status pointer WSTAT. OPTIONS is
2812 passed to the waitpid call. Return 0 if no event was found and
2813 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2814 was found. Return the PID of the stopped child otherwise. */
2815
2816 static int
2817 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2818 {
2819 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2820 }
2821
2822 /* Select one LWP out of those that have events pending. */
2823
2824 static void
2825 select_event_lwp (struct lwp_info **orig_lp)
2826 {
2827 struct thread_info *event_thread = NULL;
2828
2829 /* In all-stop, give preference to the LWP that is being
2830 single-stepped. There will be at most one, and it's the LWP that
2831 the core is most interested in. If we didn't do this, then we'd
2832 have to handle pending step SIGTRAPs somehow in case the core
2833 later continues the previously-stepped thread, otherwise we'd
2834 report the pending SIGTRAP, and the core, not having stepped the
2835 thread, wouldn't understand what the trap was for, and therefore
2836 would report it to the user as a random signal. */
2837 if (!non_stop)
2838 {
2839 event_thread = find_thread ([] (thread_info *thread)
2840 {
2841 lwp_info *lp = get_thread_lwp (thread);
2842
2843 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2844 && thread->last_resume_kind == resume_step
2845 && lp->status_pending_p);
2846 });
2847
2848 if (event_thread != NULL)
2849 {
2850 if (debug_threads)
2851 debug_printf ("SEL: Select single-step %s\n",
2852 target_pid_to_str (ptid_of (event_thread)));
2853 }
2854 }
2855 if (event_thread == NULL)
2856 {
2857 /* No single-stepping LWP. Select one at random, out of those
2858 which have had events. */
2859
2860 event_thread = find_thread_in_random ([&] (thread_info *thread)
2861 {
2862 lwp_info *lp = get_thread_lwp (thread);
2863
2864 /* Only resumed LWPs that have an event pending. */
2865 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2866 && lp->status_pending_p);
2867 });
2868 }
2869
2870 if (event_thread != NULL)
2871 {
2872 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2873
2874 /* Switch the event LWP. */
2875 *orig_lp = event_lp;
2876 }
2877 }
2878
2879 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2880 NULL. */
2881
2882 static void
2883 unsuspend_all_lwps (struct lwp_info *except)
2884 {
2885 for_each_thread ([&] (thread_info *thread)
2886 {
2887 lwp_info *lwp = get_thread_lwp (thread);
2888
2889 if (lwp != except)
2890 lwp_suspended_decr (lwp);
2891 });
2892 }
2893
2894 static void move_out_of_jump_pad_callback (thread_info *thread);
2895 static bool stuck_in_jump_pad_callback (thread_info *thread);
2896 static bool lwp_running (thread_info *thread);
2897 static ptid_t linux_wait_1 (ptid_t ptid,
2898 struct target_waitstatus *ourstatus,
2899 int target_options);
2900
2901 /* Stabilize threads (move out of jump pads).
2902
2903 If a thread is midway collecting a fast tracepoint, we need to
2904 finish the collection and move it out of the jump pad before
2905 reporting the signal.
2906
2907 This avoids recursion while collecting (when a signal arrives
2908 midway, and the signal handler itself collects), which would trash
2909 the trace buffer. In case the user set a breakpoint in a signal
2910 handler, this avoids the backtrace showing the jump pad, etc..
2911 Most importantly, there are certain things we can't do safely if
2912 threads are stopped in a jump pad (or in its callee's). For
2913 example:
2914
2915 - starting a new trace run. A thread still collecting the
2916 previous run, could trash the trace buffer when resumed. The trace
2917 buffer control structures would have been reset but the thread had
2918 no way to tell. The thread could even midway memcpy'ing to the
2919 buffer, which would mean that when resumed, it would clobber the
2920 trace buffer that had been set for a new run.
2921
2922 - we can't rewrite/reuse the jump pads for new tracepoints
2923 safely. Say you do tstart while a thread is stopped midway while
2924 collecting. When the thread is later resumed, it finishes the
2925 collection, and returns to the jump pad, to execute the original
2926 instruction that was under the tracepoint jump at the time the
2927 older run had been started. If the jump pad had been rewritten
2928 since for something else in the new run, the thread would now
2929 execute the wrong / random instructions. */
2930
2931 static void
2932 linux_stabilize_threads (void)
2933 {
2934 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
2935
2936 if (thread_stuck != NULL)
2937 {
2938 if (debug_threads)
2939 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2940 lwpid_of (thread_stuck));
2941 return;
2942 }
2943
2944 thread_info *saved_thread = current_thread;
2945
2946 stabilizing_threads = 1;
2947
2948 /* Kick 'em all. */
2949 for_each_thread (move_out_of_jump_pad_callback);
2950
2951 /* Loop until all are stopped out of the jump pads. */
2952 while (find_thread (lwp_running) != NULL)
2953 {
2954 struct target_waitstatus ourstatus;
2955 struct lwp_info *lwp;
2956 int wstat;
2957
2958 /* Note that we go through the full wait even loop. While
2959 moving threads out of jump pad, we need to be able to step
2960 over internal breakpoints and such. */
2961 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2962
2963 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2964 {
2965 lwp = get_thread_lwp (current_thread);
2966
2967 /* Lock it. */
2968 lwp_suspended_inc (lwp);
2969
2970 if (ourstatus.value.sig != GDB_SIGNAL_0
2971 || current_thread->last_resume_kind == resume_stop)
2972 {
2973 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2974 enqueue_one_deferred_signal (lwp, &wstat);
2975 }
2976 }
2977 }
2978
2979 unsuspend_all_lwps (NULL);
2980
2981 stabilizing_threads = 0;
2982
2983 current_thread = saved_thread;
2984
2985 if (debug_threads)
2986 {
2987 thread_stuck = find_thread (stuck_in_jump_pad_callback);
2988
2989 if (thread_stuck != NULL)
2990 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2991 lwpid_of (thread_stuck));
2992 }
2993 }
2994
2995 /* Convenience function that is called when the kernel reports an
2996 event that is not passed out to GDB. */
2997
2998 static ptid_t
2999 ignore_event (struct target_waitstatus *ourstatus)
3000 {
3001 /* If we got an event, there may still be others, as a single
3002 SIGCHLD can indicate more than one child stopped. This forces
3003 another target_wait call. */
3004 async_file_mark ();
3005
3006 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3007 return null_ptid;
3008 }
3009
3010 /* Convenience function that is called when the kernel reports an exit
3011 event. This decides whether to report the event to GDB as a
3012 process exit event, a thread exit event, or to suppress the
3013 event. */
3014
3015 static ptid_t
3016 filter_exit_event (struct lwp_info *event_child,
3017 struct target_waitstatus *ourstatus)
3018 {
3019 client_state &cs = get_client_state ();
3020 struct thread_info *thread = get_lwp_thread (event_child);
3021 ptid_t ptid = ptid_of (thread);
3022
3023 if (!last_thread_of_process_p (pid_of (thread)))
3024 {
3025 if (cs.report_thread_events)
3026 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3027 else
3028 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3029
3030 delete_lwp (event_child);
3031 }
3032 return ptid;
3033 }
3034
3035 /* Returns 1 if GDB is interested in any event_child syscalls. */
3036
3037 static int
3038 gdb_catching_syscalls_p (struct lwp_info *event_child)
3039 {
3040 struct thread_info *thread = get_lwp_thread (event_child);
3041 struct process_info *proc = get_thread_process (thread);
3042
3043 return !proc->syscalls_to_catch.empty ();
3044 }
3045
3046 /* Returns 1 if GDB is interested in the event_child syscall.
3047 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3048
3049 static int
3050 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3051 {
3052 int sysno;
3053 struct thread_info *thread = get_lwp_thread (event_child);
3054 struct process_info *proc = get_thread_process (thread);
3055
3056 if (proc->syscalls_to_catch.empty ())
3057 return 0;
3058
3059 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
3060 return 1;
3061
3062 get_syscall_trapinfo (event_child, &sysno);
3063
3064 for (int iter : proc->syscalls_to_catch)
3065 if (iter == sysno)
3066 return 1;
3067
3068 return 0;
3069 }
3070
3071 /* Wait for process, returns status. */
3072
3073 static ptid_t
3074 linux_wait_1 (ptid_t ptid,
3075 struct target_waitstatus *ourstatus, int target_options)
3076 {
3077 client_state &cs = get_client_state ();
3078 int w;
3079 struct lwp_info *event_child;
3080 int options;
3081 int pid;
3082 int step_over_finished;
3083 int bp_explains_trap;
3084 int maybe_internal_trap;
3085 int report_to_gdb;
3086 int trace_event;
3087 int in_step_range;
3088 int any_resumed;
3089
3090 if (debug_threads)
3091 {
3092 debug_enter ();
3093 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3094 }
3095
3096 /* Translate generic target options into linux options. */
3097 options = __WALL;
3098 if (target_options & TARGET_WNOHANG)
3099 options |= WNOHANG;
3100
3101 bp_explains_trap = 0;
3102 trace_event = 0;
3103 in_step_range = 0;
3104 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3105
3106 auto status_pending_p_any = [&] (thread_info *thread)
3107 {
3108 return status_pending_p_callback (thread, minus_one_ptid);
3109 };
3110
3111 auto not_stopped = [&] (thread_info *thread)
3112 {
3113 return not_stopped_callback (thread, minus_one_ptid);
3114 };
3115
3116 /* Find a resumed LWP, if any. */
3117 if (find_thread (status_pending_p_any) != NULL)
3118 any_resumed = 1;
3119 else if (find_thread (not_stopped) != NULL)
3120 any_resumed = 1;
3121 else
3122 any_resumed = 0;
3123
3124 if (step_over_bkpt == null_ptid)
3125 pid = linux_wait_for_event (ptid, &w, options);
3126 else
3127 {
3128 if (debug_threads)
3129 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3130 target_pid_to_str (step_over_bkpt));
3131 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3132 }
3133
3134 if (pid == 0 || (pid == -1 && !any_resumed))
3135 {
3136 gdb_assert (target_options & TARGET_WNOHANG);
3137
3138 if (debug_threads)
3139 {
3140 debug_printf ("linux_wait_1 ret = null_ptid, "
3141 "TARGET_WAITKIND_IGNORE\n");
3142 debug_exit ();
3143 }
3144
3145 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3146 return null_ptid;
3147 }
3148 else if (pid == -1)
3149 {
3150 if (debug_threads)
3151 {
3152 debug_printf ("linux_wait_1 ret = null_ptid, "
3153 "TARGET_WAITKIND_NO_RESUMED\n");
3154 debug_exit ();
3155 }
3156
3157 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3158 return null_ptid;
3159 }
3160
3161 event_child = get_thread_lwp (current_thread);
3162
3163 /* linux_wait_for_event only returns an exit status for the last
3164 child of a process. Report it. */
3165 if (WIFEXITED (w) || WIFSIGNALED (w))
3166 {
3167 if (WIFEXITED (w))
3168 {
3169 ourstatus->kind = TARGET_WAITKIND_EXITED;
3170 ourstatus->value.integer = WEXITSTATUS (w);
3171
3172 if (debug_threads)
3173 {
3174 debug_printf ("linux_wait_1 ret = %s, exited with "
3175 "retcode %d\n",
3176 target_pid_to_str (ptid_of (current_thread)),
3177 WEXITSTATUS (w));
3178 debug_exit ();
3179 }
3180 }
3181 else
3182 {
3183 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3184 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3185
3186 if (debug_threads)
3187 {
3188 debug_printf ("linux_wait_1 ret = %s, terminated with "
3189 "signal %d\n",
3190 target_pid_to_str (ptid_of (current_thread)),
3191 WTERMSIG (w));
3192 debug_exit ();
3193 }
3194 }
3195
3196 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3197 return filter_exit_event (event_child, ourstatus);
3198
3199 return ptid_of (current_thread);
3200 }
3201
3202 /* If step-over executes a breakpoint instruction, in the case of a
3203 hardware single step it means a gdb/gdbserver breakpoint had been
3204 planted on top of a permanent breakpoint, in the case of a software
3205 single step it may just mean that gdbserver hit the reinsert breakpoint.
3206 The PC has been adjusted by save_stop_reason to point at
3207 the breakpoint address.
3208 So in the case of the hardware single step advance the PC manually
3209 past the breakpoint and in the case of software single step advance only
3210 if it's not the single_step_breakpoint we are hitting.
3211 This avoids that a program would keep trapping a permanent breakpoint
3212 forever. */
3213 if (step_over_bkpt != null_ptid
3214 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3215 && (event_child->stepping
3216 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3217 {
3218 int increment_pc = 0;
3219 int breakpoint_kind = 0;
3220 CORE_ADDR stop_pc = event_child->stop_pc;
3221
3222 breakpoint_kind =
3223 the_target->breakpoint_kind_from_current_state (&stop_pc);
3224 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3225
3226 if (debug_threads)
3227 {
3228 debug_printf ("step-over for %s executed software breakpoint\n",
3229 target_pid_to_str (ptid_of (current_thread)));
3230 }
3231
3232 if (increment_pc != 0)
3233 {
3234 struct regcache *regcache
3235 = get_thread_regcache (current_thread, 1);
3236
3237 event_child->stop_pc += increment_pc;
3238 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3239
3240 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3241 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3242 }
3243 }
3244
3245 /* If this event was not handled before, and is not a SIGTRAP, we
3246 report it. SIGILL and SIGSEGV are also treated as traps in case
3247 a breakpoint is inserted at the current PC. If this target does
3248 not support internal breakpoints at all, we also report the
3249 SIGTRAP without further processing; it's of no concern to us. */
3250 maybe_internal_trap
3251 = (supports_breakpoints ()
3252 && (WSTOPSIG (w) == SIGTRAP
3253 || ((WSTOPSIG (w) == SIGILL
3254 || WSTOPSIG (w) == SIGSEGV)
3255 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3256
3257 if (maybe_internal_trap)
3258 {
3259 /* Handle anything that requires bookkeeping before deciding to
3260 report the event or continue waiting. */
3261
3262 /* First check if we can explain the SIGTRAP with an internal
3263 breakpoint, or if we should possibly report the event to GDB.
3264 Do this before anything that may remove or insert a
3265 breakpoint. */
3266 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3267
3268 /* We have a SIGTRAP, possibly a step-over dance has just
3269 finished. If so, tweak the state machine accordingly,
3270 reinsert breakpoints and delete any single-step
3271 breakpoints. */
3272 step_over_finished = finish_step_over (event_child);
3273
3274 /* Now invoke the callbacks of any internal breakpoints there. */
3275 check_breakpoints (event_child->stop_pc);
3276
3277 /* Handle tracepoint data collecting. This may overflow the
3278 trace buffer, and cause a tracing stop, removing
3279 breakpoints. */
3280 trace_event = handle_tracepoints (event_child);
3281
3282 if (bp_explains_trap)
3283 {
3284 if (debug_threads)
3285 debug_printf ("Hit a gdbserver breakpoint.\n");
3286 }
3287 }
3288 else
3289 {
3290 /* We have some other signal, possibly a step-over dance was in
3291 progress, and it should be cancelled too. */
3292 step_over_finished = finish_step_over (event_child);
3293 }
3294
3295 /* We have all the data we need. Either report the event to GDB, or
3296 resume threads and keep waiting for more. */
3297
3298 /* If we're collecting a fast tracepoint, finish the collection and
3299 move out of the jump pad before delivering a signal. See
3300 linux_stabilize_threads. */
3301
3302 if (WIFSTOPPED (w)
3303 && WSTOPSIG (w) != SIGTRAP
3304 && supports_fast_tracepoints ()
3305 && agent_loaded_p ())
3306 {
3307 if (debug_threads)
3308 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3309 "to defer or adjust it.\n",
3310 WSTOPSIG (w), lwpid_of (current_thread));
3311
3312 /* Allow debugging the jump pad itself. */
3313 if (current_thread->last_resume_kind != resume_step
3314 && maybe_move_out_of_jump_pad (event_child, &w))
3315 {
3316 enqueue_one_deferred_signal (event_child, &w);
3317
3318 if (debug_threads)
3319 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3320 WSTOPSIG (w), lwpid_of (current_thread));
3321
3322 linux_resume_one_lwp (event_child, 0, 0, NULL);
3323
3324 if (debug_threads)
3325 debug_exit ();
3326 return ignore_event (ourstatus);
3327 }
3328 }
3329
3330 if (event_child->collecting_fast_tracepoint
3331 != fast_tpoint_collect_result::not_collecting)
3332 {
3333 if (debug_threads)
3334 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3335 "Check if we're already there.\n",
3336 lwpid_of (current_thread),
3337 (int) event_child->collecting_fast_tracepoint);
3338
3339 trace_event = 1;
3340
3341 event_child->collecting_fast_tracepoint
3342 = linux_fast_tracepoint_collecting (event_child, NULL);
3343
3344 if (event_child->collecting_fast_tracepoint
3345 != fast_tpoint_collect_result::before_insn)
3346 {
3347 /* No longer need this breakpoint. */
3348 if (event_child->exit_jump_pad_bkpt != NULL)
3349 {
3350 if (debug_threads)
3351 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3352 "stopping all threads momentarily.\n");
3353
3354 /* Other running threads could hit this breakpoint.
3355 We don't handle moribund locations like GDB does,
3356 instead we always pause all threads when removing
3357 breakpoints, so that any step-over or
3358 decr_pc_after_break adjustment is always taken
3359 care of while the breakpoint is still
3360 inserted. */
3361 stop_all_lwps (1, event_child);
3362
3363 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3364 event_child->exit_jump_pad_bkpt = NULL;
3365
3366 unstop_all_lwps (1, event_child);
3367
3368 gdb_assert (event_child->suspended >= 0);
3369 }
3370 }
3371
3372 if (event_child->collecting_fast_tracepoint
3373 == fast_tpoint_collect_result::not_collecting)
3374 {
3375 if (debug_threads)
3376 debug_printf ("fast tracepoint finished "
3377 "collecting successfully.\n");
3378
3379 /* We may have a deferred signal to report. */
3380 if (dequeue_one_deferred_signal (event_child, &w))
3381 {
3382 if (debug_threads)
3383 debug_printf ("dequeued one signal.\n");
3384 }
3385 else
3386 {
3387 if (debug_threads)
3388 debug_printf ("no deferred signals.\n");
3389
3390 if (stabilizing_threads)
3391 {
3392 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3393 ourstatus->value.sig = GDB_SIGNAL_0;
3394
3395 if (debug_threads)
3396 {
3397 debug_printf ("linux_wait_1 ret = %s, stopped "
3398 "while stabilizing threads\n",
3399 target_pid_to_str (ptid_of (current_thread)));
3400 debug_exit ();
3401 }
3402
3403 return ptid_of (current_thread);
3404 }
3405 }
3406 }
3407 }
3408
3409 /* Check whether GDB would be interested in this event. */
3410
3411 /* Check if GDB is interested in this syscall. */
3412 if (WIFSTOPPED (w)
3413 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3414 && !gdb_catch_this_syscall_p (event_child))
3415 {
3416 if (debug_threads)
3417 {
3418 debug_printf ("Ignored syscall for LWP %ld.\n",
3419 lwpid_of (current_thread));
3420 }
3421
3422 linux_resume_one_lwp (event_child, event_child->stepping,
3423 0, NULL);
3424
3425 if (debug_threads)
3426 debug_exit ();
3427 return ignore_event (ourstatus);
3428 }
3429
3430 /* If GDB is not interested in this signal, don't stop other
3431 threads, and don't report it to GDB. Just resume the inferior
3432 right away. We do this for threading-related signals as well as
3433 any that GDB specifically requested we ignore. But never ignore
3434 SIGSTOP if we sent it ourselves, and do not ignore signals when
3435 stepping - they may require special handling to skip the signal
3436 handler. Also never ignore signals that could be caused by a
3437 breakpoint. */
3438 if (WIFSTOPPED (w)
3439 && current_thread->last_resume_kind != resume_step
3440 && (
3441 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3442 (current_process ()->priv->thread_db != NULL
3443 && (WSTOPSIG (w) == __SIGRTMIN
3444 || WSTOPSIG (w) == __SIGRTMIN + 1))
3445 ||
3446 #endif
3447 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3448 && !(WSTOPSIG (w) == SIGSTOP
3449 && current_thread->last_resume_kind == resume_stop)
3450 && !linux_wstatus_maybe_breakpoint (w))))
3451 {
3452 siginfo_t info, *info_p;
3453
3454 if (debug_threads)
3455 debug_printf ("Ignored signal %d for LWP %ld.\n",
3456 WSTOPSIG (w), lwpid_of (current_thread));
3457
3458 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3459 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3460 info_p = &info;
3461 else
3462 info_p = NULL;
3463
3464 if (step_over_finished)
3465 {
3466 /* We cancelled this thread's step-over above. We still
3467 need to unsuspend all other LWPs, and set them back
3468 running again while the signal handler runs. */
3469 unsuspend_all_lwps (event_child);
3470
3471 /* Enqueue the pending signal info so that proceed_all_lwps
3472 doesn't lose it. */
3473 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3474
3475 proceed_all_lwps ();
3476 }
3477 else
3478 {
3479 linux_resume_one_lwp (event_child, event_child->stepping,
3480 WSTOPSIG (w), info_p);
3481 }
3482
3483 if (debug_threads)
3484 debug_exit ();
3485
3486 return ignore_event (ourstatus);
3487 }
3488
3489 /* Note that all addresses are always "out of the step range" when
3490 there's no range to begin with. */
3491 in_step_range = lwp_in_step_range (event_child);
3492
3493 /* If GDB wanted this thread to single step, and the thread is out
3494 of the step range, we always want to report the SIGTRAP, and let
3495 GDB handle it. Watchpoints should always be reported. So should
3496 signals we can't explain. A SIGTRAP we can't explain could be a
3497 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3498 do, we're be able to handle GDB breakpoints on top of internal
3499 breakpoints, by handling the internal breakpoint and still
3500 reporting the event to GDB. If we don't, we're out of luck, GDB
3501 won't see the breakpoint hit. If we see a single-step event but
3502 the thread should be continuing, don't pass the trap to gdb.
3503 That indicates that we had previously finished a single-step but
3504 left the single-step pending -- see
3505 complete_ongoing_step_over. */
3506 report_to_gdb = (!maybe_internal_trap
3507 || (current_thread->last_resume_kind == resume_step
3508 && !in_step_range)
3509 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3510 || (!in_step_range
3511 && !bp_explains_trap
3512 && !trace_event
3513 && !step_over_finished
3514 && !(current_thread->last_resume_kind == resume_continue
3515 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3516 || (gdb_breakpoint_here (event_child->stop_pc)
3517 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3518 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3519 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3520
3521 run_breakpoint_commands (event_child->stop_pc);
3522
3523 /* We found no reason GDB would want us to stop. We either hit one
3524 of our own breakpoints, or finished an internal step GDB
3525 shouldn't know about. */
3526 if (!report_to_gdb)
3527 {
3528 if (debug_threads)
3529 {
3530 if (bp_explains_trap)
3531 debug_printf ("Hit a gdbserver breakpoint.\n");
3532 if (step_over_finished)
3533 debug_printf ("Step-over finished.\n");
3534 if (trace_event)
3535 debug_printf ("Tracepoint event.\n");
3536 if (lwp_in_step_range (event_child))
3537 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3538 paddress (event_child->stop_pc),
3539 paddress (event_child->step_range_start),
3540 paddress (event_child->step_range_end));
3541 }
3542
3543 /* We're not reporting this breakpoint to GDB, so apply the
3544 decr_pc_after_break adjustment to the inferior's regcache
3545 ourselves. */
3546
3547 if (the_low_target.set_pc != NULL)
3548 {
3549 struct regcache *regcache
3550 = get_thread_regcache (current_thread, 1);
3551 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3552 }
3553
3554 if (step_over_finished)
3555 {
3556 /* If we have finished stepping over a breakpoint, we've
3557 stopped and suspended all LWPs momentarily except the
3558 stepping one. This is where we resume them all again.
3559 We're going to keep waiting, so use proceed, which
3560 handles stepping over the next breakpoint. */
3561 unsuspend_all_lwps (event_child);
3562 }
3563 else
3564 {
3565 /* Remove the single-step breakpoints if any. Note that
3566 there isn't single-step breakpoint if we finished stepping
3567 over. */
3568 if (can_software_single_step ()
3569 && has_single_step_breakpoints (current_thread))
3570 {
3571 stop_all_lwps (0, event_child);
3572 delete_single_step_breakpoints (current_thread);
3573 unstop_all_lwps (0, event_child);
3574 }
3575 }
3576
3577 if (debug_threads)
3578 debug_printf ("proceeding all threads.\n");
3579 proceed_all_lwps ();
3580
3581 if (debug_threads)
3582 debug_exit ();
3583
3584 return ignore_event (ourstatus);
3585 }
3586
3587 if (debug_threads)
3588 {
3589 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3590 {
3591 std::string str
3592 = target_waitstatus_to_string (&event_child->waitstatus);
3593
3594 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3595 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3596 }
3597 if (current_thread->last_resume_kind == resume_step)
3598 {
3599 if (event_child->step_range_start == event_child->step_range_end)
3600 debug_printf ("GDB wanted to single-step, reporting event.\n");
3601 else if (!lwp_in_step_range (event_child))
3602 debug_printf ("Out of step range, reporting event.\n");
3603 }
3604 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3605 debug_printf ("Stopped by watchpoint.\n");
3606 else if (gdb_breakpoint_here (event_child->stop_pc))
3607 debug_printf ("Stopped by GDB breakpoint.\n");
3608 if (debug_threads)
3609 debug_printf ("Hit a non-gdbserver trap event.\n");
3610 }
3611
3612 /* Alright, we're going to report a stop. */
3613
3614 /* Remove single-step breakpoints. */
3615 if (can_software_single_step ())
3616 {
3617 /* Remove single-step breakpoints or not. It it is true, stop all
3618 lwps, so that other threads won't hit the breakpoint in the
3619 staled memory. */
3620 int remove_single_step_breakpoints_p = 0;
3621
3622 if (non_stop)
3623 {
3624 remove_single_step_breakpoints_p
3625 = has_single_step_breakpoints (current_thread);
3626 }
3627 else
3628 {
3629 /* In all-stop, a stop reply cancels all previous resume
3630 requests. Delete all single-step breakpoints. */
3631
3632 find_thread ([&] (thread_info *thread) {
3633 if (has_single_step_breakpoints (thread))
3634 {
3635 remove_single_step_breakpoints_p = 1;
3636 return true;
3637 }
3638
3639 return false;
3640 });
3641 }
3642
3643 if (remove_single_step_breakpoints_p)
3644 {
3645 /* If we remove single-step breakpoints from memory, stop all lwps,
3646 so that other threads won't hit the breakpoint in the staled
3647 memory. */
3648 stop_all_lwps (0, event_child);
3649
3650 if (non_stop)
3651 {
3652 gdb_assert (has_single_step_breakpoints (current_thread));
3653 delete_single_step_breakpoints (current_thread);
3654 }
3655 else
3656 {
3657 for_each_thread ([] (thread_info *thread){
3658 if (has_single_step_breakpoints (thread))
3659 delete_single_step_breakpoints (thread);
3660 });
3661 }
3662
3663 unstop_all_lwps (0, event_child);
3664 }
3665 }
3666
3667 if (!stabilizing_threads)
3668 {
3669 /* In all-stop, stop all threads. */
3670 if (!non_stop)
3671 stop_all_lwps (0, NULL);
3672
3673 if (step_over_finished)
3674 {
3675 if (!non_stop)
3676 {
3677 /* If we were doing a step-over, all other threads but
3678 the stepping one had been paused in start_step_over,
3679 with their suspend counts incremented. We don't want
3680 to do a full unstop/unpause, because we're in
3681 all-stop mode (so we want threads stopped), but we
3682 still need to unsuspend the other threads, to
3683 decrement their `suspended' count back. */
3684 unsuspend_all_lwps (event_child);
3685 }
3686 else
3687 {
3688 /* If we just finished a step-over, then all threads had
3689 been momentarily paused. In all-stop, that's fine,
3690 we want threads stopped by now anyway. In non-stop,
3691 we need to re-resume threads that GDB wanted to be
3692 running. */
3693 unstop_all_lwps (1, event_child);
3694 }
3695 }
3696
3697 /* If we're not waiting for a specific LWP, choose an event LWP
3698 from among those that have had events. Giving equal priority
3699 to all LWPs that have had events helps prevent
3700 starvation. */
3701 if (ptid == minus_one_ptid)
3702 {
3703 event_child->status_pending_p = 1;
3704 event_child->status_pending = w;
3705
3706 select_event_lwp (&event_child);
3707
3708 /* current_thread and event_child must stay in sync. */
3709 current_thread = get_lwp_thread (event_child);
3710
3711 event_child->status_pending_p = 0;
3712 w = event_child->status_pending;
3713 }
3714
3715
3716 /* Stabilize threads (move out of jump pads). */
3717 if (!non_stop)
3718 stabilize_threads ();
3719 }
3720 else
3721 {
3722 /* If we just finished a step-over, then all threads had been
3723 momentarily paused. In all-stop, that's fine, we want
3724 threads stopped by now anyway. In non-stop, we need to
3725 re-resume threads that GDB wanted to be running. */
3726 if (step_over_finished)
3727 unstop_all_lwps (1, event_child);
3728 }
3729
3730 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3731 {
3732 /* If the reported event is an exit, fork, vfork or exec, let
3733 GDB know. */
3734
3735 /* Break the unreported fork relationship chain. */
3736 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3737 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3738 {
3739 event_child->fork_relative->fork_relative = NULL;
3740 event_child->fork_relative = NULL;
3741 }
3742
3743 *ourstatus = event_child->waitstatus;
3744 /* Clear the event lwp's waitstatus since we handled it already. */
3745 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3746 }
3747 else
3748 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3749
3750 /* Now that we've selected our final event LWP, un-adjust its PC if
3751 it was a software breakpoint, and the client doesn't know we can
3752 adjust the breakpoint ourselves. */
3753 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3754 && !cs.swbreak_feature)
3755 {
3756 int decr_pc = the_low_target.decr_pc_after_break;
3757
3758 if (decr_pc != 0)
3759 {
3760 struct regcache *regcache
3761 = get_thread_regcache (current_thread, 1);
3762 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3763 }
3764 }
3765
3766 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3767 {
3768 get_syscall_trapinfo (event_child,
3769 &ourstatus->value.syscall_number);
3770 ourstatus->kind = event_child->syscall_state;
3771 }
3772 else if (current_thread->last_resume_kind == resume_stop
3773 && WSTOPSIG (w) == SIGSTOP)
3774 {
3775 /* A thread that has been requested to stop by GDB with vCont;t,
3776 and it stopped cleanly, so report as SIG0. The use of
3777 SIGSTOP is an implementation detail. */
3778 ourstatus->value.sig = GDB_SIGNAL_0;
3779 }
3780 else if (current_thread->last_resume_kind == resume_stop
3781 && WSTOPSIG (w) != SIGSTOP)
3782 {
3783 /* A thread that has been requested to stop by GDB with vCont;t,
3784 but, it stopped for other reasons. */
3785 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3786 }
3787 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3788 {
3789 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3790 }
3791
3792 gdb_assert (step_over_bkpt == null_ptid);
3793
3794 if (debug_threads)
3795 {
3796 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3797 target_pid_to_str (ptid_of (current_thread)),
3798 ourstatus->kind, ourstatus->value.sig);
3799 debug_exit ();
3800 }
3801
3802 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3803 return filter_exit_event (event_child, ourstatus);
3804
3805 return ptid_of (current_thread);
3806 }
3807
3808 /* Get rid of any pending event in the pipe. */
3809 static void
3810 async_file_flush (void)
3811 {
3812 int ret;
3813 char buf;
3814
3815 do
3816 ret = read (linux_event_pipe[0], &buf, 1);
3817 while (ret >= 0 || (ret == -1 && errno == EINTR));
3818 }
3819
3820 /* Put something in the pipe, so the event loop wakes up. */
3821 static void
3822 async_file_mark (void)
3823 {
3824 int ret;
3825
3826 async_file_flush ();
3827
3828 do
3829 ret = write (linux_event_pipe[1], "+", 1);
3830 while (ret == 0 || (ret == -1 && errno == EINTR));
3831
3832 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3833 be awakened anyway. */
3834 }
3835
3836 static ptid_t
3837 linux_wait (ptid_t ptid,
3838 struct target_waitstatus *ourstatus, int target_options)
3839 {
3840 ptid_t event_ptid;
3841
3842 /* Flush the async file first. */
3843 if (target_is_async_p ())
3844 async_file_flush ();
3845
3846 do
3847 {
3848 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3849 }
3850 while ((target_options & TARGET_WNOHANG) == 0
3851 && event_ptid == null_ptid
3852 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3853
3854 /* If at least one stop was reported, there may be more. A single
3855 SIGCHLD can signal more than one child stop. */
3856 if (target_is_async_p ()
3857 && (target_options & TARGET_WNOHANG) != 0
3858 && event_ptid != null_ptid)
3859 async_file_mark ();
3860
3861 return event_ptid;
3862 }
3863
3864 /* Send a signal to an LWP. */
3865
3866 static int
3867 kill_lwp (unsigned long lwpid, int signo)
3868 {
3869 int ret;
3870
3871 errno = 0;
3872 ret = syscall (__NR_tkill, lwpid, signo);
3873 if (errno == ENOSYS)
3874 {
3875 /* If tkill fails, then we are not using nptl threads, a
3876 configuration we no longer support. */
3877 perror_with_name (("tkill"));
3878 }
3879 return ret;
3880 }
3881
3882 void
3883 linux_stop_lwp (struct lwp_info *lwp)
3884 {
3885 send_sigstop (lwp);
3886 }
3887
3888 static void
3889 send_sigstop (struct lwp_info *lwp)
3890 {
3891 int pid;
3892
3893 pid = lwpid_of (get_lwp_thread (lwp));
3894
3895 /* If we already have a pending stop signal for this process, don't
3896 send another. */
3897 if (lwp->stop_expected)
3898 {
3899 if (debug_threads)
3900 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3901
3902 return;
3903 }
3904
3905 if (debug_threads)
3906 debug_printf ("Sending sigstop to lwp %d\n", pid);
3907
3908 lwp->stop_expected = 1;
3909 kill_lwp (pid, SIGSTOP);
3910 }
3911
3912 static void
3913 send_sigstop (thread_info *thread, lwp_info *except)
3914 {
3915 struct lwp_info *lwp = get_thread_lwp (thread);
3916
3917 /* Ignore EXCEPT. */
3918 if (lwp == except)
3919 return;
3920
3921 if (lwp->stopped)
3922 return;
3923
3924 send_sigstop (lwp);
3925 }
3926
3927 /* Increment the suspend count of an LWP, and stop it, if not stopped
3928 yet. */
3929 static void
3930 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3931 {
3932 struct lwp_info *lwp = get_thread_lwp (thread);
3933
3934 /* Ignore EXCEPT. */
3935 if (lwp == except)
3936 return;
3937
3938 lwp_suspended_inc (lwp);
3939
3940 send_sigstop (thread, except);
3941 }
3942
3943 static void
3944 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3945 {
3946 /* Store the exit status for later. */
3947 lwp->status_pending_p = 1;
3948 lwp->status_pending = wstat;
3949
3950 /* Store in waitstatus as well, as there's nothing else to process
3951 for this event. */
3952 if (WIFEXITED (wstat))
3953 {
3954 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3955 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3956 }
3957 else if (WIFSIGNALED (wstat))
3958 {
3959 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3960 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3961 }
3962
3963 /* Prevent trying to stop it. */
3964 lwp->stopped = 1;
3965
3966 /* No further stops are expected from a dead lwp. */
3967 lwp->stop_expected = 0;
3968 }
3969
3970 /* Return true if LWP has exited already, and has a pending exit event
3971 to report to GDB. */
3972
3973 static int
3974 lwp_is_marked_dead (struct lwp_info *lwp)
3975 {
3976 return (lwp->status_pending_p
3977 && (WIFEXITED (lwp->status_pending)
3978 || WIFSIGNALED (lwp->status_pending)));
3979 }
3980
3981 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3982
3983 static void
3984 wait_for_sigstop (void)
3985 {
3986 struct thread_info *saved_thread;
3987 ptid_t saved_tid;
3988 int wstat;
3989 int ret;
3990
3991 saved_thread = current_thread;
3992 if (saved_thread != NULL)
3993 saved_tid = saved_thread->id;
3994 else
3995 saved_tid = null_ptid; /* avoid bogus unused warning */
3996
3997 if (debug_threads)
3998 debug_printf ("wait_for_sigstop: pulling events\n");
3999
4000 /* Passing NULL_PTID as filter indicates we want all events to be
4001 left pending. Eventually this returns when there are no
4002 unwaited-for children left. */
4003 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4004 &wstat, __WALL);
4005 gdb_assert (ret == -1);
4006
4007 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4008 current_thread = saved_thread;
4009 else
4010 {
4011 if (debug_threads)
4012 debug_printf ("Previously current thread died.\n");
4013
4014 /* We can't change the current inferior behind GDB's back,
4015 otherwise, a subsequent command may apply to the wrong
4016 process. */
4017 current_thread = NULL;
4018 }
4019 }
4020
4021 /* Returns true if THREAD is stopped in a jump pad, and we can't
4022 move it out, because we need to report the stop event to GDB. For
4023 example, if the user puts a breakpoint in the jump pad, it's
4024 because she wants to debug it. */
4025
4026 static bool
4027 stuck_in_jump_pad_callback (thread_info *thread)
4028 {
4029 struct lwp_info *lwp = get_thread_lwp (thread);
4030
4031 if (lwp->suspended != 0)
4032 {
4033 internal_error (__FILE__, __LINE__,
4034 "LWP %ld is suspended, suspended=%d\n",
4035 lwpid_of (thread), lwp->suspended);
4036 }
4037 gdb_assert (lwp->stopped);
4038
4039 /* Allow debugging the jump pad, gdb_collect, etc.. */
4040 return (supports_fast_tracepoints ()
4041 && agent_loaded_p ()
4042 && (gdb_breakpoint_here (lwp->stop_pc)
4043 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4044 || thread->last_resume_kind == resume_step)
4045 && (linux_fast_tracepoint_collecting (lwp, NULL)
4046 != fast_tpoint_collect_result::not_collecting));
4047 }
4048
4049 static void
4050 move_out_of_jump_pad_callback (thread_info *thread)
4051 {
4052 struct thread_info *saved_thread;
4053 struct lwp_info *lwp = get_thread_lwp (thread);
4054 int *wstat;
4055
4056 if (lwp->suspended != 0)
4057 {
4058 internal_error (__FILE__, __LINE__,
4059 "LWP %ld is suspended, suspended=%d\n",
4060 lwpid_of (thread), lwp->suspended);
4061 }
4062 gdb_assert (lwp->stopped);
4063
4064 /* For gdb_breakpoint_here. */
4065 saved_thread = current_thread;
4066 current_thread = thread;
4067
4068 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4069
4070 /* Allow debugging the jump pad, gdb_collect, etc. */
4071 if (!gdb_breakpoint_here (lwp->stop_pc)
4072 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4073 && thread->last_resume_kind != resume_step
4074 && maybe_move_out_of_jump_pad (lwp, wstat))
4075 {
4076 if (debug_threads)
4077 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4078 lwpid_of (thread));
4079
4080 if (wstat)
4081 {
4082 lwp->status_pending_p = 0;
4083 enqueue_one_deferred_signal (lwp, wstat);
4084
4085 if (debug_threads)
4086 debug_printf ("Signal %d for LWP %ld deferred "
4087 "(in jump pad)\n",
4088 WSTOPSIG (*wstat), lwpid_of (thread));
4089 }
4090
4091 linux_resume_one_lwp (lwp, 0, 0, NULL);
4092 }
4093 else
4094 lwp_suspended_inc (lwp);
4095
4096 current_thread = saved_thread;
4097 }
4098
4099 static bool
4100 lwp_running (thread_info *thread)
4101 {
4102 struct lwp_info *lwp = get_thread_lwp (thread);
4103
4104 if (lwp_is_marked_dead (lwp))
4105 return false;
4106
4107 return !lwp->stopped;
4108 }
4109
4110 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4111 If SUSPEND, then also increase the suspend count of every LWP,
4112 except EXCEPT. */
4113
4114 static void
4115 stop_all_lwps (int suspend, struct lwp_info *except)
4116 {
4117 /* Should not be called recursively. */
4118 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4119
4120 if (debug_threads)
4121 {
4122 debug_enter ();
4123 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4124 suspend ? "stop-and-suspend" : "stop",
4125 except != NULL
4126 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4127 : "none");
4128 }
4129
4130 stopping_threads = (suspend
4131 ? STOPPING_AND_SUSPENDING_THREADS
4132 : STOPPING_THREADS);
4133
4134 if (suspend)
4135 for_each_thread ([&] (thread_info *thread)
4136 {
4137 suspend_and_send_sigstop (thread, except);
4138 });
4139 else
4140 for_each_thread ([&] (thread_info *thread)
4141 {
4142 send_sigstop (thread, except);
4143 });
4144
4145 wait_for_sigstop ();
4146 stopping_threads = NOT_STOPPING_THREADS;
4147
4148 if (debug_threads)
4149 {
4150 debug_printf ("stop_all_lwps done, setting stopping_threads "
4151 "back to !stopping\n");
4152 debug_exit ();
4153 }
4154 }
4155
4156 /* Enqueue one signal in the chain of signals which need to be
4157 delivered to this process on next resume. */
4158
4159 static void
4160 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4161 {
4162 struct pending_signals *p_sig = XNEW (struct pending_signals);
4163
4164 p_sig->prev = lwp->pending_signals;
4165 p_sig->signal = signal;
4166 if (info == NULL)
4167 memset (&p_sig->info, 0, sizeof (siginfo_t));
4168 else
4169 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4170 lwp->pending_signals = p_sig;
4171 }
4172
4173 /* Install breakpoints for software single stepping. */
4174
4175 static void
4176 install_software_single_step_breakpoints (struct lwp_info *lwp)
4177 {
4178 struct thread_info *thread = get_lwp_thread (lwp);
4179 struct regcache *regcache = get_thread_regcache (thread, 1);
4180
4181 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4182
4183 current_thread = thread;
4184 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
4185
4186 for (CORE_ADDR pc : next_pcs)
4187 set_single_step_breakpoint (pc, current_ptid);
4188 }
4189
4190 /* Single step via hardware or software single step.
4191 Return 1 if hardware single stepping, 0 if software single stepping
4192 or can't single step. */
4193
4194 static int
4195 single_step (struct lwp_info* lwp)
4196 {
4197 int step = 0;
4198
4199 if (can_hardware_single_step ())
4200 {
4201 step = 1;
4202 }
4203 else if (can_software_single_step ())
4204 {
4205 install_software_single_step_breakpoints (lwp);
4206 step = 0;
4207 }
4208 else
4209 {
4210 if (debug_threads)
4211 debug_printf ("stepping is not implemented on this target");
4212 }
4213
4214 return step;
4215 }
4216
4217 /* The signal can be delivered to the inferior if we are not trying to
4218 finish a fast tracepoint collect. Since signal can be delivered in
4219 the step-over, the program may go to signal handler and trap again
4220 after return from the signal handler. We can live with the spurious
4221 double traps. */
4222
4223 static int
4224 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4225 {
4226 return (lwp->collecting_fast_tracepoint
4227 == fast_tpoint_collect_result::not_collecting);
4228 }
4229
4230 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4231 SIGNAL is nonzero, give it that signal. */
4232
4233 static void
4234 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4235 int step, int signal, siginfo_t *info)
4236 {
4237 struct thread_info *thread = get_lwp_thread (lwp);
4238 struct thread_info *saved_thread;
4239 int ptrace_request;
4240 struct process_info *proc = get_thread_process (thread);
4241
4242 /* Note that target description may not be initialised
4243 (proc->tdesc == NULL) at this point because the program hasn't
4244 stopped at the first instruction yet. It means GDBserver skips
4245 the extra traps from the wrapper program (see option --wrapper).
4246 Code in this function that requires register access should be
4247 guarded by proc->tdesc == NULL or something else. */
4248
4249 if (lwp->stopped == 0)
4250 return;
4251
4252 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4253
4254 fast_tpoint_collect_result fast_tp_collecting
4255 = lwp->collecting_fast_tracepoint;
4256
4257 gdb_assert (!stabilizing_threads
4258 || (fast_tp_collecting
4259 != fast_tpoint_collect_result::not_collecting));
4260
4261 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4262 user used the "jump" command, or "set $pc = foo"). */
4263 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4264 {
4265 /* Collecting 'while-stepping' actions doesn't make sense
4266 anymore. */
4267 release_while_stepping_state_list (thread);
4268 }
4269
4270 /* If we have pending signals or status, and a new signal, enqueue the
4271 signal. Also enqueue the signal if it can't be delivered to the
4272 inferior right now. */
4273 if (signal != 0
4274 && (lwp->status_pending_p
4275 || lwp->pending_signals != NULL
4276 || !lwp_signal_can_be_delivered (lwp)))
4277 {
4278 enqueue_pending_signal (lwp, signal, info);
4279
4280 /* Postpone any pending signal. It was enqueued above. */
4281 signal = 0;
4282 }
4283
4284 if (lwp->status_pending_p)
4285 {
4286 if (debug_threads)
4287 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4288 " has pending status\n",
4289 lwpid_of (thread), step ? "step" : "continue",
4290 lwp->stop_expected ? "expected" : "not expected");
4291 return;
4292 }
4293
4294 saved_thread = current_thread;
4295 current_thread = thread;
4296
4297 /* This bit needs some thinking about. If we get a signal that
4298 we must report while a single-step reinsert is still pending,
4299 we often end up resuming the thread. It might be better to
4300 (ew) allow a stack of pending events; then we could be sure that
4301 the reinsert happened right away and not lose any signals.
4302
4303 Making this stack would also shrink the window in which breakpoints are
4304 uninserted (see comment in linux_wait_for_lwp) but not enough for
4305 complete correctness, so it won't solve that problem. It may be
4306 worthwhile just to solve this one, however. */
4307 if (lwp->bp_reinsert != 0)
4308 {
4309 if (debug_threads)
4310 debug_printf (" pending reinsert at 0x%s\n",
4311 paddress (lwp->bp_reinsert));
4312
4313 if (can_hardware_single_step ())
4314 {
4315 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4316 {
4317 if (step == 0)
4318 warning ("BAD - reinserting but not stepping.");
4319 if (lwp->suspended)
4320 warning ("BAD - reinserting and suspended(%d).",
4321 lwp->suspended);
4322 }
4323 }
4324
4325 step = maybe_hw_step (thread);
4326 }
4327
4328 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4329 {
4330 if (debug_threads)
4331 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4332 " (exit-jump-pad-bkpt)\n",
4333 lwpid_of (thread));
4334 }
4335 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4336 {
4337 if (debug_threads)
4338 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4339 " single-stepping\n",
4340 lwpid_of (thread));
4341
4342 if (can_hardware_single_step ())
4343 step = 1;
4344 else
4345 {
4346 internal_error (__FILE__, __LINE__,
4347 "moving out of jump pad single-stepping"
4348 " not implemented on this target");
4349 }
4350 }
4351
4352 /* If we have while-stepping actions in this thread set it stepping.
4353 If we have a signal to deliver, it may or may not be set to
4354 SIG_IGN, we don't know. Assume so, and allow collecting
4355 while-stepping into a signal handler. A possible smart thing to
4356 do would be to set an internal breakpoint at the signal return
4357 address, continue, and carry on catching this while-stepping
4358 action only when that breakpoint is hit. A future
4359 enhancement. */
4360 if (thread->while_stepping != NULL)
4361 {
4362 if (debug_threads)
4363 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4364 lwpid_of (thread));
4365
4366 step = single_step (lwp);
4367 }
4368
4369 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4370 {
4371 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4372
4373 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4374
4375 if (debug_threads)
4376 {
4377 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4378 (long) lwp->stop_pc);
4379 }
4380 }
4381
4382 /* If we have pending signals, consume one if it can be delivered to
4383 the inferior. */
4384 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4385 {
4386 struct pending_signals **p_sig;
4387
4388 p_sig = &lwp->pending_signals;
4389 while ((*p_sig)->prev != NULL)
4390 p_sig = &(*p_sig)->prev;
4391
4392 signal = (*p_sig)->signal;
4393 if ((*p_sig)->info.si_signo != 0)
4394 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4395 &(*p_sig)->info);
4396
4397 free (*p_sig);
4398 *p_sig = NULL;
4399 }
4400
4401 if (debug_threads)
4402 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4403 lwpid_of (thread), step ? "step" : "continue", signal,
4404 lwp->stop_expected ? "expected" : "not expected");
4405
4406 if (the_low_target.prepare_to_resume != NULL)
4407 the_low_target.prepare_to_resume (lwp);
4408
4409 regcache_invalidate_thread (thread);
4410 errno = 0;
4411 lwp->stepping = step;
4412 if (step)
4413 ptrace_request = PTRACE_SINGLESTEP;
4414 else if (gdb_catching_syscalls_p (lwp))
4415 ptrace_request = PTRACE_SYSCALL;
4416 else
4417 ptrace_request = PTRACE_CONT;
4418 ptrace (ptrace_request,
4419 lwpid_of (thread),
4420 (PTRACE_TYPE_ARG3) 0,
4421 /* Coerce to a uintptr_t first to avoid potential gcc warning
4422 of coercing an 8 byte integer to a 4 byte pointer. */
4423 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4424
4425 current_thread = saved_thread;
4426 if (errno)
4427 perror_with_name ("resuming thread");
4428
4429 /* Successfully resumed. Clear state that no longer makes sense,
4430 and mark the LWP as running. Must not do this before resuming
4431 otherwise if that fails other code will be confused. E.g., we'd
4432 later try to stop the LWP and hang forever waiting for a stop
4433 status. Note that we must not throw after this is cleared,
4434 otherwise handle_zombie_lwp_error would get confused. */
4435 lwp->stopped = 0;
4436 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4437 }
4438
4439 /* Called when we try to resume a stopped LWP and that errors out. If
4440 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4441 or about to become), discard the error, clear any pending status
4442 the LWP may have, and return true (we'll collect the exit status
4443 soon enough). Otherwise, return false. */
4444
4445 static int
4446 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4447 {
4448 struct thread_info *thread = get_lwp_thread (lp);
4449
4450 /* If we get an error after resuming the LWP successfully, we'd
4451 confuse !T state for the LWP being gone. */
4452 gdb_assert (lp->stopped);
4453
4454 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4455 because even if ptrace failed with ESRCH, the tracee may be "not
4456 yet fully dead", but already refusing ptrace requests. In that
4457 case the tracee has 'R (Running)' state for a little bit
4458 (observed in Linux 3.18). See also the note on ESRCH in the
4459 ptrace(2) man page. Instead, check whether the LWP has any state
4460 other than ptrace-stopped. */
4461
4462 /* Don't assume anything if /proc/PID/status can't be read. */
4463 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4464 {
4465 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4466 lp->status_pending_p = 0;
4467 return 1;
4468 }
4469 return 0;
4470 }
4471
4472 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4473 disappears while we try to resume it. */
4474
4475 static void
4476 linux_resume_one_lwp (struct lwp_info *lwp,
4477 int step, int signal, siginfo_t *info)
4478 {
4479 try
4480 {
4481 linux_resume_one_lwp_throw (lwp, step, signal, info);
4482 }
4483 catch (const gdb_exception_error &ex)
4484 {
4485 if (!check_ptrace_stopped_lwp_gone (lwp))
4486 throw;
4487 }
4488 }
4489
4490 /* This function is called once per thread via for_each_thread.
4491 We look up which resume request applies to THREAD and mark it with a
4492 pointer to the appropriate resume request.
4493
4494 This algorithm is O(threads * resume elements), but resume elements
4495 is small (and will remain small at least until GDB supports thread
4496 suspension). */
4497
4498 static void
4499 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4500 {
4501 struct lwp_info *lwp = get_thread_lwp (thread);
4502
4503 for (int ndx = 0; ndx < n; ndx++)
4504 {
4505 ptid_t ptid = resume[ndx].thread;
4506 if (ptid == minus_one_ptid
4507 || ptid == thread->id
4508 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4509 of PID'. */
4510 || (ptid.pid () == pid_of (thread)
4511 && (ptid.is_pid ()
4512 || ptid.lwp () == -1)))
4513 {
4514 if (resume[ndx].kind == resume_stop
4515 && thread->last_resume_kind == resume_stop)
4516 {
4517 if (debug_threads)
4518 debug_printf ("already %s LWP %ld at GDB's request\n",
4519 (thread->last_status.kind
4520 == TARGET_WAITKIND_STOPPED)
4521 ? "stopped"
4522 : "stopping",
4523 lwpid_of (thread));
4524
4525 continue;
4526 }
4527
4528 /* Ignore (wildcard) resume requests for already-resumed
4529 threads. */
4530 if (resume[ndx].kind != resume_stop
4531 && thread->last_resume_kind != resume_stop)
4532 {
4533 if (debug_threads)
4534 debug_printf ("already %s LWP %ld at GDB's request\n",
4535 (thread->last_resume_kind
4536 == resume_step)
4537 ? "stepping"
4538 : "continuing",
4539 lwpid_of (thread));
4540 continue;
4541 }
4542
4543 /* Don't let wildcard resumes resume fork children that GDB
4544 does not yet know are new fork children. */
4545 if (lwp->fork_relative != NULL)
4546 {
4547 struct lwp_info *rel = lwp->fork_relative;
4548
4549 if (rel->status_pending_p
4550 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4551 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4552 {
4553 if (debug_threads)
4554 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4555 lwpid_of (thread));
4556 continue;
4557 }
4558 }
4559
4560 /* If the thread has a pending event that has already been
4561 reported to GDBserver core, but GDB has not pulled the
4562 event out of the vStopped queue yet, likewise, ignore the
4563 (wildcard) resume request. */
4564 if (in_queued_stop_replies (thread->id))
4565 {
4566 if (debug_threads)
4567 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4568 lwpid_of (thread));
4569 continue;
4570 }
4571
4572 lwp->resume = &resume[ndx];
4573 thread->last_resume_kind = lwp->resume->kind;
4574
4575 lwp->step_range_start = lwp->resume->step_range_start;
4576 lwp->step_range_end = lwp->resume->step_range_end;
4577
4578 /* If we had a deferred signal to report, dequeue one now.
4579 This can happen if LWP gets more than one signal while
4580 trying to get out of a jump pad. */
4581 if (lwp->stopped
4582 && !lwp->status_pending_p
4583 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4584 {
4585 lwp->status_pending_p = 1;
4586
4587 if (debug_threads)
4588 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4589 "leaving status pending.\n",
4590 WSTOPSIG (lwp->status_pending),
4591 lwpid_of (thread));
4592 }
4593
4594 return;
4595 }
4596 }
4597
4598 /* No resume action for this thread. */
4599 lwp->resume = NULL;
4600 }
4601
4602 /* find_thread callback for linux_resume. Return true if this lwp has an
4603 interesting status pending. */
4604
4605 static bool
4606 resume_status_pending_p (thread_info *thread)
4607 {
4608 struct lwp_info *lwp = get_thread_lwp (thread);
4609
4610 /* LWPs which will not be resumed are not interesting, because
4611 we might not wait for them next time through linux_wait. */
4612 if (lwp->resume == NULL)
4613 return false;
4614
4615 return thread_still_has_status_pending_p (thread);
4616 }
4617
4618 /* Return 1 if this lwp that GDB wants running is stopped at an
4619 internal breakpoint that we need to step over. It assumes that any
4620 required STOP_PC adjustment has already been propagated to the
4621 inferior's regcache. */
4622
4623 static bool
4624 need_step_over_p (thread_info *thread)
4625 {
4626 struct lwp_info *lwp = get_thread_lwp (thread);
4627 struct thread_info *saved_thread;
4628 CORE_ADDR pc;
4629 struct process_info *proc = get_thread_process (thread);
4630
4631 /* GDBserver is skipping the extra traps from the wrapper program,
4632 don't have to do step over. */
4633 if (proc->tdesc == NULL)
4634 return false;
4635
4636 /* LWPs which will not be resumed are not interesting, because we
4637 might not wait for them next time through linux_wait. */
4638
4639 if (!lwp->stopped)
4640 {
4641 if (debug_threads)
4642 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4643 lwpid_of (thread));
4644 return false;
4645 }
4646
4647 if (thread->last_resume_kind == resume_stop)
4648 {
4649 if (debug_threads)
4650 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4651 " stopped\n",
4652 lwpid_of (thread));
4653 return false;
4654 }
4655
4656 gdb_assert (lwp->suspended >= 0);
4657
4658 if (lwp->suspended)
4659 {
4660 if (debug_threads)
4661 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4662 lwpid_of (thread));
4663 return false;
4664 }
4665
4666 if (lwp->status_pending_p)
4667 {
4668 if (debug_threads)
4669 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4670 " status.\n",
4671 lwpid_of (thread));
4672 return false;
4673 }
4674
4675 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4676 or we have. */
4677 pc = get_pc (lwp);
4678
4679 /* If the PC has changed since we stopped, then don't do anything,
4680 and let the breakpoint/tracepoint be hit. This happens if, for
4681 instance, GDB handled the decr_pc_after_break subtraction itself,
4682 GDB is OOL stepping this thread, or the user has issued a "jump"
4683 command, or poked thread's registers herself. */
4684 if (pc != lwp->stop_pc)
4685 {
4686 if (debug_threads)
4687 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4688 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4689 lwpid_of (thread),
4690 paddress (lwp->stop_pc), paddress (pc));
4691 return false;
4692 }
4693
4694 /* On software single step target, resume the inferior with signal
4695 rather than stepping over. */
4696 if (can_software_single_step ()
4697 && lwp->pending_signals != NULL
4698 && lwp_signal_can_be_delivered (lwp))
4699 {
4700 if (debug_threads)
4701 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4702 " signals.\n",
4703 lwpid_of (thread));
4704
4705 return false;
4706 }
4707
4708 saved_thread = current_thread;
4709 current_thread = thread;
4710
4711 /* We can only step over breakpoints we know about. */
4712 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4713 {
4714 /* Don't step over a breakpoint that GDB expects to hit
4715 though. If the condition is being evaluated on the target's side
4716 and it evaluate to false, step over this breakpoint as well. */
4717 if (gdb_breakpoint_here (pc)
4718 && gdb_condition_true_at_breakpoint (pc)
4719 && gdb_no_commands_at_breakpoint (pc))
4720 {
4721 if (debug_threads)
4722 debug_printf ("Need step over [LWP %ld]? yes, but found"
4723 " GDB breakpoint at 0x%s; skipping step over\n",
4724 lwpid_of (thread), paddress (pc));
4725
4726 current_thread = saved_thread;
4727 return false;
4728 }
4729 else
4730 {
4731 if (debug_threads)
4732 debug_printf ("Need step over [LWP %ld]? yes, "
4733 "found breakpoint at 0x%s\n",
4734 lwpid_of (thread), paddress (pc));
4735
4736 /* We've found an lwp that needs stepping over --- return 1 so
4737 that find_thread stops looking. */
4738 current_thread = saved_thread;
4739
4740 return true;
4741 }
4742 }
4743
4744 current_thread = saved_thread;
4745
4746 if (debug_threads)
4747 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4748 " at 0x%s\n",
4749 lwpid_of (thread), paddress (pc));
4750
4751 return false;
4752 }
4753
4754 /* Start a step-over operation on LWP. When LWP stopped at a
4755 breakpoint, to make progress, we need to remove the breakpoint out
4756 of the way. If we let other threads run while we do that, they may
4757 pass by the breakpoint location and miss hitting it. To avoid
4758 that, a step-over momentarily stops all threads while LWP is
4759 single-stepped by either hardware or software while the breakpoint
4760 is temporarily uninserted from the inferior. When the single-step
4761 finishes, we reinsert the breakpoint, and let all threads that are
4762 supposed to be running, run again. */
4763
4764 static int
4765 start_step_over (struct lwp_info *lwp)
4766 {
4767 struct thread_info *thread = get_lwp_thread (lwp);
4768 struct thread_info *saved_thread;
4769 CORE_ADDR pc;
4770 int step;
4771
4772 if (debug_threads)
4773 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4774 lwpid_of (thread));
4775
4776 stop_all_lwps (1, lwp);
4777
4778 if (lwp->suspended != 0)
4779 {
4780 internal_error (__FILE__, __LINE__,
4781 "LWP %ld suspended=%d\n", lwpid_of (thread),
4782 lwp->suspended);
4783 }
4784
4785 if (debug_threads)
4786 debug_printf ("Done stopping all threads for step-over.\n");
4787
4788 /* Note, we should always reach here with an already adjusted PC,
4789 either by GDB (if we're resuming due to GDB's request), or by our
4790 caller, if we just finished handling an internal breakpoint GDB
4791 shouldn't care about. */
4792 pc = get_pc (lwp);
4793
4794 saved_thread = current_thread;
4795 current_thread = thread;
4796
4797 lwp->bp_reinsert = pc;
4798 uninsert_breakpoints_at (pc);
4799 uninsert_fast_tracepoint_jumps_at (pc);
4800
4801 step = single_step (lwp);
4802
4803 current_thread = saved_thread;
4804
4805 linux_resume_one_lwp (lwp, step, 0, NULL);
4806
4807 /* Require next event from this LWP. */
4808 step_over_bkpt = thread->id;
4809 return 1;
4810 }
4811
4812 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4813 start_step_over, if still there, and delete any single-step
4814 breakpoints we've set, on non hardware single-step targets. */
4815
4816 static int
4817 finish_step_over (struct lwp_info *lwp)
4818 {
4819 if (lwp->bp_reinsert != 0)
4820 {
4821 struct thread_info *saved_thread = current_thread;
4822
4823 if (debug_threads)
4824 debug_printf ("Finished step over.\n");
4825
4826 current_thread = get_lwp_thread (lwp);
4827
4828 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4829 may be no breakpoint to reinsert there by now. */
4830 reinsert_breakpoints_at (lwp->bp_reinsert);
4831 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4832
4833 lwp->bp_reinsert = 0;
4834
4835 /* Delete any single-step breakpoints. No longer needed. We
4836 don't have to worry about other threads hitting this trap,
4837 and later not being able to explain it, because we were
4838 stepping over a breakpoint, and we hold all threads but
4839 LWP stopped while doing that. */
4840 if (!can_hardware_single_step ())
4841 {
4842 gdb_assert (has_single_step_breakpoints (current_thread));
4843 delete_single_step_breakpoints (current_thread);
4844 }
4845
4846 step_over_bkpt = null_ptid;
4847 current_thread = saved_thread;
4848 return 1;
4849 }
4850 else
4851 return 0;
4852 }
4853
4854 /* If there's a step over in progress, wait until all threads stop
4855 (that is, until the stepping thread finishes its step), and
4856 unsuspend all lwps. The stepping thread ends with its status
4857 pending, which is processed later when we get back to processing
4858 events. */
4859
4860 static void
4861 complete_ongoing_step_over (void)
4862 {
4863 if (step_over_bkpt != null_ptid)
4864 {
4865 struct lwp_info *lwp;
4866 int wstat;
4867 int ret;
4868
4869 if (debug_threads)
4870 debug_printf ("detach: step over in progress, finish it first\n");
4871
4872 /* Passing NULL_PTID as filter indicates we want all events to
4873 be left pending. Eventually this returns when there are no
4874 unwaited-for children left. */
4875 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4876 &wstat, __WALL);
4877 gdb_assert (ret == -1);
4878
4879 lwp = find_lwp_pid (step_over_bkpt);
4880 if (lwp != NULL)
4881 finish_step_over (lwp);
4882 step_over_bkpt = null_ptid;
4883 unsuspend_all_lwps (lwp);
4884 }
4885 }
4886
4887 /* This function is called once per thread. We check the thread's resume
4888 request, which will tell us whether to resume, step, or leave the thread
4889 stopped; and what signal, if any, it should be sent.
4890
4891 For threads which we aren't explicitly told otherwise, we preserve
4892 the stepping flag; this is used for stepping over gdbserver-placed
4893 breakpoints.
4894
4895 If pending_flags was set in any thread, we queue any needed
4896 signals, since we won't actually resume. We already have a pending
4897 event to report, so we don't need to preserve any step requests;
4898 they should be re-issued if necessary. */
4899
4900 static void
4901 linux_resume_one_thread (thread_info *thread, bool leave_all_stopped)
4902 {
4903 struct lwp_info *lwp = get_thread_lwp (thread);
4904 int leave_pending;
4905
4906 if (lwp->resume == NULL)
4907 return;
4908
4909 if (lwp->resume->kind == resume_stop)
4910 {
4911 if (debug_threads)
4912 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4913
4914 if (!lwp->stopped)
4915 {
4916 if (debug_threads)
4917 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4918
4919 /* Stop the thread, and wait for the event asynchronously,
4920 through the event loop. */
4921 send_sigstop (lwp);
4922 }
4923 else
4924 {
4925 if (debug_threads)
4926 debug_printf ("already stopped LWP %ld\n",
4927 lwpid_of (thread));
4928
4929 /* The LWP may have been stopped in an internal event that
4930 was not meant to be notified back to GDB (e.g., gdbserver
4931 breakpoint), so we should be reporting a stop event in
4932 this case too. */
4933
4934 /* If the thread already has a pending SIGSTOP, this is a
4935 no-op. Otherwise, something later will presumably resume
4936 the thread and this will cause it to cancel any pending
4937 operation, due to last_resume_kind == resume_stop. If
4938 the thread already has a pending status to report, we
4939 will still report it the next time we wait - see
4940 status_pending_p_callback. */
4941
4942 /* If we already have a pending signal to report, then
4943 there's no need to queue a SIGSTOP, as this means we're
4944 midway through moving the LWP out of the jumppad, and we
4945 will report the pending signal as soon as that is
4946 finished. */
4947 if (lwp->pending_signals_to_report == NULL)
4948 send_sigstop (lwp);
4949 }
4950
4951 /* For stop requests, we're done. */
4952 lwp->resume = NULL;
4953 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4954 return;
4955 }
4956
4957 /* If this thread which is about to be resumed has a pending status,
4958 then don't resume it - we can just report the pending status.
4959 Likewise if it is suspended, because e.g., another thread is
4960 stepping past a breakpoint. Make sure to queue any signals that
4961 would otherwise be sent. In all-stop mode, we do this decision
4962 based on if *any* thread has a pending status. If there's a
4963 thread that needs the step-over-breakpoint dance, then don't
4964 resume any other thread but that particular one. */
4965 leave_pending = (lwp->suspended
4966 || lwp->status_pending_p
4967 || leave_all_stopped);
4968
4969 /* If we have a new signal, enqueue the signal. */
4970 if (lwp->resume->sig != 0)
4971 {
4972 siginfo_t info, *info_p;
4973
4974 /* If this is the same signal we were previously stopped by,
4975 make sure to queue its siginfo. */
4976 if (WIFSTOPPED (lwp->last_status)
4977 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
4978 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
4979 (PTRACE_TYPE_ARG3) 0, &info) == 0)
4980 info_p = &info;
4981 else
4982 info_p = NULL;
4983
4984 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
4985 }
4986
4987 if (!leave_pending)
4988 {
4989 if (debug_threads)
4990 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4991
4992 proceed_one_lwp (thread, NULL);
4993 }
4994 else
4995 {
4996 if (debug_threads)
4997 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4998 }
4999
5000 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5001 lwp->resume = NULL;
5002 }
5003
5004 static void
5005 linux_resume (struct thread_resume *resume_info, size_t n)
5006 {
5007 struct thread_info *need_step_over = NULL;
5008
5009 if (debug_threads)
5010 {
5011 debug_enter ();
5012 debug_printf ("linux_resume:\n");
5013 }
5014
5015 for_each_thread ([&] (thread_info *thread)
5016 {
5017 linux_set_resume_request (thread, resume_info, n);
5018 });
5019
5020 /* If there is a thread which would otherwise be resumed, which has
5021 a pending status, then don't resume any threads - we can just
5022 report the pending status. Make sure to queue any signals that
5023 would otherwise be sent. In non-stop mode, we'll apply this
5024 logic to each thread individually. We consume all pending events
5025 before considering to start a step-over (in all-stop). */
5026 bool any_pending = false;
5027 if (!non_stop)
5028 any_pending = find_thread (resume_status_pending_p) != NULL;
5029
5030 /* If there is a thread which would otherwise be resumed, which is
5031 stopped at a breakpoint that needs stepping over, then don't
5032 resume any threads - have it step over the breakpoint with all
5033 other threads stopped, then resume all threads again. Make sure
5034 to queue any signals that would otherwise be delivered or
5035 queued. */
5036 if (!any_pending && supports_breakpoints ())
5037 need_step_over = find_thread (need_step_over_p);
5038
5039 bool leave_all_stopped = (need_step_over != NULL || any_pending);
5040
5041 if (debug_threads)
5042 {
5043 if (need_step_over != NULL)
5044 debug_printf ("Not resuming all, need step over\n");
5045 else if (any_pending)
5046 debug_printf ("Not resuming, all-stop and found "
5047 "an LWP with pending status\n");
5048 else
5049 debug_printf ("Resuming, no pending status or step over needed\n");
5050 }
5051
5052 /* Even if we're leaving threads stopped, queue all signals we'd
5053 otherwise deliver. */
5054 for_each_thread ([&] (thread_info *thread)
5055 {
5056 linux_resume_one_thread (thread, leave_all_stopped);
5057 });
5058
5059 if (need_step_over)
5060 start_step_over (get_thread_lwp (need_step_over));
5061
5062 if (debug_threads)
5063 {
5064 debug_printf ("linux_resume done\n");
5065 debug_exit ();
5066 }
5067
5068 /* We may have events that were pending that can/should be sent to
5069 the client now. Trigger a linux_wait call. */
5070 if (target_is_async_p ())
5071 async_file_mark ();
5072 }
5073
5074 /* This function is called once per thread. We check the thread's
5075 last resume request, which will tell us whether to resume, step, or
5076 leave the thread stopped. Any signal the client requested to be
5077 delivered has already been enqueued at this point.
5078
5079 If any thread that GDB wants running is stopped at an internal
5080 breakpoint that needs stepping over, we start a step-over operation
5081 on that particular thread, and leave all others stopped. */
5082
5083 static void
5084 proceed_one_lwp (thread_info *thread, lwp_info *except)
5085 {
5086 struct lwp_info *lwp = get_thread_lwp (thread);
5087 int step;
5088
5089 if (lwp == except)
5090 return;
5091
5092 if (debug_threads)
5093 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5094
5095 if (!lwp->stopped)
5096 {
5097 if (debug_threads)
5098 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5099 return;
5100 }
5101
5102 if (thread->last_resume_kind == resume_stop
5103 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5104 {
5105 if (debug_threads)
5106 debug_printf (" client wants LWP to remain %ld stopped\n",
5107 lwpid_of (thread));
5108 return;
5109 }
5110
5111 if (lwp->status_pending_p)
5112 {
5113 if (debug_threads)
5114 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5115 lwpid_of (thread));
5116 return;
5117 }
5118
5119 gdb_assert (lwp->suspended >= 0);
5120
5121 if (lwp->suspended)
5122 {
5123 if (debug_threads)
5124 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5125 return;
5126 }
5127
5128 if (thread->last_resume_kind == resume_stop
5129 && lwp->pending_signals_to_report == NULL
5130 && (lwp->collecting_fast_tracepoint
5131 == fast_tpoint_collect_result::not_collecting))
5132 {
5133 /* We haven't reported this LWP as stopped yet (otherwise, the
5134 last_status.kind check above would catch it, and we wouldn't
5135 reach here. This LWP may have been momentarily paused by a
5136 stop_all_lwps call while handling for example, another LWP's
5137 step-over. In that case, the pending expected SIGSTOP signal
5138 that was queued at vCont;t handling time will have already
5139 been consumed by wait_for_sigstop, and so we need to requeue
5140 another one here. Note that if the LWP already has a SIGSTOP
5141 pending, this is a no-op. */
5142
5143 if (debug_threads)
5144 debug_printf ("Client wants LWP %ld to stop. "
5145 "Making sure it has a SIGSTOP pending\n",
5146 lwpid_of (thread));
5147
5148 send_sigstop (lwp);
5149 }
5150
5151 if (thread->last_resume_kind == resume_step)
5152 {
5153 if (debug_threads)
5154 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5155 lwpid_of (thread));
5156
5157 /* If resume_step is requested by GDB, install single-step
5158 breakpoints when the thread is about to be actually resumed if
5159 the single-step breakpoints weren't removed. */
5160 if (can_software_single_step ()
5161 && !has_single_step_breakpoints (thread))
5162 install_software_single_step_breakpoints (lwp);
5163
5164 step = maybe_hw_step (thread);
5165 }
5166 else if (lwp->bp_reinsert != 0)
5167 {
5168 if (debug_threads)
5169 debug_printf (" stepping LWP %ld, reinsert set\n",
5170 lwpid_of (thread));
5171
5172 step = maybe_hw_step (thread);
5173 }
5174 else
5175 step = 0;
5176
5177 linux_resume_one_lwp (lwp, step, 0, NULL);
5178 }
5179
5180 static void
5181 unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except)
5182 {
5183 struct lwp_info *lwp = get_thread_lwp (thread);
5184
5185 if (lwp == except)
5186 return;
5187
5188 lwp_suspended_decr (lwp);
5189
5190 proceed_one_lwp (thread, except);
5191 }
5192
5193 /* When we finish a step-over, set threads running again. If there's
5194 another thread that may need a step-over, now's the time to start
5195 it. Eventually, we'll move all threads past their breakpoints. */
5196
5197 static void
5198 proceed_all_lwps (void)
5199 {
5200 struct thread_info *need_step_over;
5201
5202 /* If there is a thread which would otherwise be resumed, which is
5203 stopped at a breakpoint that needs stepping over, then don't
5204 resume any threads - have it step over the breakpoint with all
5205 other threads stopped, then resume all threads again. */
5206
5207 if (supports_breakpoints ())
5208 {
5209 need_step_over = find_thread (need_step_over_p);
5210
5211 if (need_step_over != NULL)
5212 {
5213 if (debug_threads)
5214 debug_printf ("proceed_all_lwps: found "
5215 "thread %ld needing a step-over\n",
5216 lwpid_of (need_step_over));
5217
5218 start_step_over (get_thread_lwp (need_step_over));
5219 return;
5220 }
5221 }
5222
5223 if (debug_threads)
5224 debug_printf ("Proceeding, no step-over needed\n");
5225
5226 for_each_thread ([] (thread_info *thread)
5227 {
5228 proceed_one_lwp (thread, NULL);
5229 });
5230 }
5231
5232 /* Stopped LWPs that the client wanted to be running, that don't have
5233 pending statuses, are set to run again, except for EXCEPT, if not
5234 NULL. This undoes a stop_all_lwps call. */
5235
5236 static void
5237 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5238 {
5239 if (debug_threads)
5240 {
5241 debug_enter ();
5242 if (except)
5243 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5244 lwpid_of (get_lwp_thread (except)));
5245 else
5246 debug_printf ("unstopping all lwps\n");
5247 }
5248
5249 if (unsuspend)
5250 for_each_thread ([&] (thread_info *thread)
5251 {
5252 unsuspend_and_proceed_one_lwp (thread, except);
5253 });
5254 else
5255 for_each_thread ([&] (thread_info *thread)
5256 {
5257 proceed_one_lwp (thread, except);
5258 });
5259
5260 if (debug_threads)
5261 {
5262 debug_printf ("unstop_all_lwps done\n");
5263 debug_exit ();
5264 }
5265 }
5266
5267
5268 #ifdef HAVE_LINUX_REGSETS
5269
5270 #define use_linux_regsets 1
5271
5272 /* Returns true if REGSET has been disabled. */
5273
5274 static int
5275 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5276 {
5277 return (info->disabled_regsets != NULL
5278 && info->disabled_regsets[regset - info->regsets]);
5279 }
5280
5281 /* Disable REGSET. */
5282
5283 static void
5284 disable_regset (struct regsets_info *info, struct regset_info *regset)
5285 {
5286 int dr_offset;
5287
5288 dr_offset = regset - info->regsets;
5289 if (info->disabled_regsets == NULL)
5290 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5291 info->disabled_regsets[dr_offset] = 1;
5292 }
5293
5294 static int
5295 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5296 struct regcache *regcache)
5297 {
5298 struct regset_info *regset;
5299 int saw_general_regs = 0;
5300 int pid;
5301 struct iovec iov;
5302
5303 pid = lwpid_of (current_thread);
5304 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5305 {
5306 void *buf, *data;
5307 int nt_type, res;
5308
5309 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5310 continue;
5311
5312 buf = xmalloc (regset->size);
5313
5314 nt_type = regset->nt_type;
5315 if (nt_type)
5316 {
5317 iov.iov_base = buf;
5318 iov.iov_len = regset->size;
5319 data = (void *) &iov;
5320 }
5321 else
5322 data = buf;
5323
5324 #ifndef __sparc__
5325 res = ptrace (regset->get_request, pid,
5326 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5327 #else
5328 res = ptrace (regset->get_request, pid, data, nt_type);
5329 #endif
5330 if (res < 0)
5331 {
5332 if (errno == EIO
5333 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5334 {
5335 /* If we get EIO on a regset, or an EINVAL and the regset is
5336 optional, do not try it again for this process mode. */
5337 disable_regset (regsets_info, regset);
5338 }
5339 else if (errno == ENODATA)
5340 {
5341 /* ENODATA may be returned if the regset is currently
5342 not "active". This can happen in normal operation,
5343 so suppress the warning in this case. */
5344 }
5345 else if (errno == ESRCH)
5346 {
5347 /* At this point, ESRCH should mean the process is
5348 already gone, in which case we simply ignore attempts
5349 to read its registers. */
5350 }
5351 else
5352 {
5353 char s[256];
5354 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5355 pid);
5356 perror (s);
5357 }
5358 }
5359 else
5360 {
5361 if (regset->type == GENERAL_REGS)
5362 saw_general_regs = 1;
5363 regset->store_function (regcache, buf);
5364 }
5365 free (buf);
5366 }
5367 if (saw_general_regs)
5368 return 0;
5369 else
5370 return 1;
5371 }
5372
5373 static int
5374 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5375 struct regcache *regcache)
5376 {
5377 struct regset_info *regset;
5378 int saw_general_regs = 0;
5379 int pid;
5380 struct iovec iov;
5381
5382 pid = lwpid_of (current_thread);
5383 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5384 {
5385 void *buf, *data;
5386 int nt_type, res;
5387
5388 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5389 || regset->fill_function == NULL)
5390 continue;
5391
5392 buf = xmalloc (regset->size);
5393
5394 /* First fill the buffer with the current register set contents,
5395 in case there are any items in the kernel's regset that are
5396 not in gdbserver's regcache. */
5397
5398 nt_type = regset->nt_type;
5399 if (nt_type)
5400 {
5401 iov.iov_base = buf;
5402 iov.iov_len = regset->size;
5403 data = (void *) &iov;
5404 }
5405 else
5406 data = buf;
5407
5408 #ifndef __sparc__
5409 res = ptrace (regset->get_request, pid,
5410 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5411 #else
5412 res = ptrace (regset->get_request, pid, data, nt_type);
5413 #endif
5414
5415 if (res == 0)
5416 {
5417 /* Then overlay our cached registers on that. */
5418 regset->fill_function (regcache, buf);
5419
5420 /* Only now do we write the register set. */
5421 #ifndef __sparc__
5422 res = ptrace (regset->set_request, pid,
5423 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5424 #else
5425 res = ptrace (regset->set_request, pid, data, nt_type);
5426 #endif
5427 }
5428
5429 if (res < 0)
5430 {
5431 if (errno == EIO
5432 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5433 {
5434 /* If we get EIO on a regset, or an EINVAL and the regset is
5435 optional, do not try it again for this process mode. */
5436 disable_regset (regsets_info, regset);
5437 }
5438 else if (errno == ESRCH)
5439 {
5440 /* At this point, ESRCH should mean the process is
5441 already gone, in which case we simply ignore attempts
5442 to change its registers. See also the related
5443 comment in linux_resume_one_lwp. */
5444 free (buf);
5445 return 0;
5446 }
5447 else
5448 {
5449 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5450 }
5451 }
5452 else if (regset->type == GENERAL_REGS)
5453 saw_general_regs = 1;
5454 free (buf);
5455 }
5456 if (saw_general_regs)
5457 return 0;
5458 else
5459 return 1;
5460 }
5461
5462 #else /* !HAVE_LINUX_REGSETS */
5463
5464 #define use_linux_regsets 0
5465 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5466 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5467
5468 #endif
5469
5470 /* Return 1 if register REGNO is supported by one of the regset ptrace
5471 calls or 0 if it has to be transferred individually. */
5472
5473 static int
5474 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5475 {
5476 unsigned char mask = 1 << (regno % 8);
5477 size_t index = regno / 8;
5478
5479 return (use_linux_regsets
5480 && (regs_info->regset_bitmap == NULL
5481 || (regs_info->regset_bitmap[index] & mask) != 0));
5482 }
5483
5484 #ifdef HAVE_LINUX_USRREGS
5485
5486 static int
5487 register_addr (const struct usrregs_info *usrregs, int regnum)
5488 {
5489 int addr;
5490
5491 if (regnum < 0 || regnum >= usrregs->num_regs)
5492 error ("Invalid register number %d.", regnum);
5493
5494 addr = usrregs->regmap[regnum];
5495
5496 return addr;
5497 }
5498
5499 /* Fetch one register. */
5500 static void
5501 fetch_register (const struct usrregs_info *usrregs,
5502 struct regcache *regcache, int regno)
5503 {
5504 CORE_ADDR regaddr;
5505 int i, size;
5506 char *buf;
5507 int pid;
5508
5509 if (regno >= usrregs->num_regs)
5510 return;
5511 if ((*the_low_target.cannot_fetch_register) (regno))
5512 return;
5513
5514 regaddr = register_addr (usrregs, regno);
5515 if (regaddr == -1)
5516 return;
5517
5518 size = ((register_size (regcache->tdesc, regno)
5519 + sizeof (PTRACE_XFER_TYPE) - 1)
5520 & -sizeof (PTRACE_XFER_TYPE));
5521 buf = (char *) alloca (size);
5522
5523 pid = lwpid_of (current_thread);
5524 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5525 {
5526 errno = 0;
5527 *(PTRACE_XFER_TYPE *) (buf + i) =
5528 ptrace (PTRACE_PEEKUSER, pid,
5529 /* Coerce to a uintptr_t first to avoid potential gcc warning
5530 of coercing an 8 byte integer to a 4 byte pointer. */
5531 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5532 regaddr += sizeof (PTRACE_XFER_TYPE);
5533 if (errno != 0)
5534 {
5535 /* Mark register REGNO unavailable. */
5536 supply_register (regcache, regno, NULL);
5537 return;
5538 }
5539 }
5540
5541 if (the_low_target.supply_ptrace_register)
5542 the_low_target.supply_ptrace_register (regcache, regno, buf);
5543 else
5544 supply_register (regcache, regno, buf);
5545 }
5546
5547 /* Store one register. */
5548 static void
5549 store_register (const struct usrregs_info *usrregs,
5550 struct regcache *regcache, int regno)
5551 {
5552 CORE_ADDR regaddr;
5553 int i, size;
5554 char *buf;
5555 int pid;
5556
5557 if (regno >= usrregs->num_regs)
5558 return;
5559 if ((*the_low_target.cannot_store_register) (regno))
5560 return;
5561
5562 regaddr = register_addr (usrregs, regno);
5563 if (regaddr == -1)
5564 return;
5565
5566 size = ((register_size (regcache->tdesc, regno)
5567 + sizeof (PTRACE_XFER_TYPE) - 1)
5568 & -sizeof (PTRACE_XFER_TYPE));
5569 buf = (char *) alloca (size);
5570 memset (buf, 0, size);
5571
5572 if (the_low_target.collect_ptrace_register)
5573 the_low_target.collect_ptrace_register (regcache, regno, buf);
5574 else
5575 collect_register (regcache, regno, buf);
5576
5577 pid = lwpid_of (current_thread);
5578 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5579 {
5580 errno = 0;
5581 ptrace (PTRACE_POKEUSER, pid,
5582 /* Coerce to a uintptr_t first to avoid potential gcc warning
5583 about coercing an 8 byte integer to a 4 byte pointer. */
5584 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5585 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5586 if (errno != 0)
5587 {
5588 /* At this point, ESRCH should mean the process is
5589 already gone, in which case we simply ignore attempts
5590 to change its registers. See also the related
5591 comment in linux_resume_one_lwp. */
5592 if (errno == ESRCH)
5593 return;
5594
5595 if ((*the_low_target.cannot_store_register) (regno) == 0)
5596 error ("writing register %d: %s", regno, strerror (errno));
5597 }
5598 regaddr += sizeof (PTRACE_XFER_TYPE);
5599 }
5600 }
5601
5602 /* Fetch all registers, or just one, from the child process.
5603 If REGNO is -1, do this for all registers, skipping any that are
5604 assumed to have been retrieved by regsets_fetch_inferior_registers,
5605 unless ALL is non-zero.
5606 Otherwise, REGNO specifies which register (so we can save time). */
5607 static void
5608 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5609 struct regcache *regcache, int regno, int all)
5610 {
5611 struct usrregs_info *usr = regs_info->usrregs;
5612
5613 if (regno == -1)
5614 {
5615 for (regno = 0; regno < usr->num_regs; regno++)
5616 if (all || !linux_register_in_regsets (regs_info, regno))
5617 fetch_register (usr, regcache, regno);
5618 }
5619 else
5620 fetch_register (usr, regcache, regno);
5621 }
5622
5623 /* Store our register values back into the inferior.
5624 If REGNO is -1, do this for all registers, skipping any that are
5625 assumed to have been saved by regsets_store_inferior_registers,
5626 unless ALL is non-zero.
5627 Otherwise, REGNO specifies which register (so we can save time). */
5628 static void
5629 usr_store_inferior_registers (const struct regs_info *regs_info,
5630 struct regcache *regcache, int regno, int all)
5631 {
5632 struct usrregs_info *usr = regs_info->usrregs;
5633
5634 if (regno == -1)
5635 {
5636 for (regno = 0; regno < usr->num_regs; regno++)
5637 if (all || !linux_register_in_regsets (regs_info, regno))
5638 store_register (usr, regcache, regno);
5639 }
5640 else
5641 store_register (usr, regcache, regno);
5642 }
5643
5644 #else /* !HAVE_LINUX_USRREGS */
5645
5646 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5647 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5648
5649 #endif
5650
5651
5652 static void
5653 linux_fetch_registers (struct regcache *regcache, int regno)
5654 {
5655 int use_regsets;
5656 int all = 0;
5657 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5658
5659 if (regno == -1)
5660 {
5661 if (the_low_target.fetch_register != NULL
5662 && regs_info->usrregs != NULL)
5663 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5664 (*the_low_target.fetch_register) (regcache, regno);
5665
5666 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5667 if (regs_info->usrregs != NULL)
5668 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5669 }
5670 else
5671 {
5672 if (the_low_target.fetch_register != NULL
5673 && (*the_low_target.fetch_register) (regcache, regno))
5674 return;
5675
5676 use_regsets = linux_register_in_regsets (regs_info, regno);
5677 if (use_regsets)
5678 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5679 regcache);
5680 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5681 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5682 }
5683 }
5684
5685 static void
5686 linux_store_registers (struct regcache *regcache, int regno)
5687 {
5688 int use_regsets;
5689 int all = 0;
5690 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5691
5692 if (regno == -1)
5693 {
5694 all = regsets_store_inferior_registers (regs_info->regsets_info,
5695 regcache);
5696 if (regs_info->usrregs != NULL)
5697 usr_store_inferior_registers (regs_info, regcache, regno, all);
5698 }
5699 else
5700 {
5701 use_regsets = linux_register_in_regsets (regs_info, regno);
5702 if (use_regsets)
5703 all = regsets_store_inferior_registers (regs_info->regsets_info,
5704 regcache);
5705 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5706 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5707 }
5708 }
5709
5710
5711 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5712 to debugger memory starting at MYADDR. */
5713
5714 static int
5715 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5716 {
5717 int pid = lwpid_of (current_thread);
5718 PTRACE_XFER_TYPE *buffer;
5719 CORE_ADDR addr;
5720 int count;
5721 char filename[64];
5722 int i;
5723 int ret;
5724 int fd;
5725
5726 /* Try using /proc. Don't bother for one word. */
5727 if (len >= 3 * sizeof (long))
5728 {
5729 int bytes;
5730
5731 /* We could keep this file open and cache it - possibly one per
5732 thread. That requires some juggling, but is even faster. */
5733 sprintf (filename, "/proc/%d/mem", pid);
5734 fd = open (filename, O_RDONLY | O_LARGEFILE);
5735 if (fd == -1)
5736 goto no_proc;
5737
5738 /* If pread64 is available, use it. It's faster if the kernel
5739 supports it (only one syscall), and it's 64-bit safe even on
5740 32-bit platforms (for instance, SPARC debugging a SPARC64
5741 application). */
5742 #ifdef HAVE_PREAD64
5743 bytes = pread64 (fd, myaddr, len, memaddr);
5744 #else
5745 bytes = -1;
5746 if (lseek (fd, memaddr, SEEK_SET) != -1)
5747 bytes = read (fd, myaddr, len);
5748 #endif
5749
5750 close (fd);
5751 if (bytes == len)
5752 return 0;
5753
5754 /* Some data was read, we'll try to get the rest with ptrace. */
5755 if (bytes > 0)
5756 {
5757 memaddr += bytes;
5758 myaddr += bytes;
5759 len -= bytes;
5760 }
5761 }
5762
5763 no_proc:
5764 /* Round starting address down to longword boundary. */
5765 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5766 /* Round ending address up; get number of longwords that makes. */
5767 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5768 / sizeof (PTRACE_XFER_TYPE));
5769 /* Allocate buffer of that many longwords. */
5770 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5771
5772 /* Read all the longwords */
5773 errno = 0;
5774 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5775 {
5776 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5777 about coercing an 8 byte integer to a 4 byte pointer. */
5778 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5779 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5780 (PTRACE_TYPE_ARG4) 0);
5781 if (errno)
5782 break;
5783 }
5784 ret = errno;
5785
5786 /* Copy appropriate bytes out of the buffer. */
5787 if (i > 0)
5788 {
5789 i *= sizeof (PTRACE_XFER_TYPE);
5790 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5791 memcpy (myaddr,
5792 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5793 i < len ? i : len);
5794 }
5795
5796 return ret;
5797 }
5798
5799 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5800 memory at MEMADDR. On failure (cannot write to the inferior)
5801 returns the value of errno. Always succeeds if LEN is zero. */
5802
5803 static int
5804 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5805 {
5806 int i;
5807 /* Round starting address down to longword boundary. */
5808 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5809 /* Round ending address up; get number of longwords that makes. */
5810 int count
5811 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5812 / sizeof (PTRACE_XFER_TYPE);
5813
5814 /* Allocate buffer of that many longwords. */
5815 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5816
5817 int pid = lwpid_of (current_thread);
5818
5819 if (len == 0)
5820 {
5821 /* Zero length write always succeeds. */
5822 return 0;
5823 }
5824
5825 if (debug_threads)
5826 {
5827 /* Dump up to four bytes. */
5828 char str[4 * 2 + 1];
5829 char *p = str;
5830 int dump = len < 4 ? len : 4;
5831
5832 for (i = 0; i < dump; i++)
5833 {
5834 sprintf (p, "%02x", myaddr[i]);
5835 p += 2;
5836 }
5837 *p = '\0';
5838
5839 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5840 str, (long) memaddr, pid);
5841 }
5842
5843 /* Fill start and end extra bytes of buffer with existing memory data. */
5844
5845 errno = 0;
5846 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5847 about coercing an 8 byte integer to a 4 byte pointer. */
5848 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5849 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5850 (PTRACE_TYPE_ARG4) 0);
5851 if (errno)
5852 return errno;
5853
5854 if (count > 1)
5855 {
5856 errno = 0;
5857 buffer[count - 1]
5858 = ptrace (PTRACE_PEEKTEXT, pid,
5859 /* Coerce to a uintptr_t first to avoid potential gcc warning
5860 about coercing an 8 byte integer to a 4 byte pointer. */
5861 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5862 * sizeof (PTRACE_XFER_TYPE)),
5863 (PTRACE_TYPE_ARG4) 0);
5864 if (errno)
5865 return errno;
5866 }
5867
5868 /* Copy data to be written over corresponding part of buffer. */
5869
5870 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5871 myaddr, len);
5872
5873 /* Write the entire buffer. */
5874
5875 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5876 {
5877 errno = 0;
5878 ptrace (PTRACE_POKETEXT, pid,
5879 /* Coerce to a uintptr_t first to avoid potential gcc warning
5880 about coercing an 8 byte integer to a 4 byte pointer. */
5881 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5882 (PTRACE_TYPE_ARG4) buffer[i]);
5883 if (errno)
5884 return errno;
5885 }
5886
5887 return 0;
5888 }
5889
5890 static void
5891 linux_look_up_symbols (void)
5892 {
5893 #ifdef USE_THREAD_DB
5894 struct process_info *proc = current_process ();
5895
5896 if (proc->priv->thread_db != NULL)
5897 return;
5898
5899 thread_db_init ();
5900 #endif
5901 }
5902
5903 static void
5904 linux_request_interrupt (void)
5905 {
5906 /* Send a SIGINT to the process group. This acts just like the user
5907 typed a ^C on the controlling terminal. */
5908 kill (-signal_pid, SIGINT);
5909 }
5910
5911 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5912 to debugger memory starting at MYADDR. */
5913
5914 static int
5915 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5916 {
5917 char filename[PATH_MAX];
5918 int fd, n;
5919 int pid = lwpid_of (current_thread);
5920
5921 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5922
5923 fd = open (filename, O_RDONLY);
5924 if (fd < 0)
5925 return -1;
5926
5927 if (offset != (CORE_ADDR) 0
5928 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5929 n = -1;
5930 else
5931 n = read (fd, myaddr, len);
5932
5933 close (fd);
5934
5935 return n;
5936 }
5937
5938 /* These breakpoint and watchpoint related wrapper functions simply
5939 pass on the function call if the target has registered a
5940 corresponding function. */
5941
5942 static int
5943 linux_supports_z_point_type (char z_type)
5944 {
5945 return (the_low_target.supports_z_point_type != NULL
5946 && the_low_target.supports_z_point_type (z_type));
5947 }
5948
5949 static int
5950 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5951 int size, struct raw_breakpoint *bp)
5952 {
5953 if (type == raw_bkpt_type_sw)
5954 return insert_memory_breakpoint (bp);
5955 else if (the_low_target.insert_point != NULL)
5956 return the_low_target.insert_point (type, addr, size, bp);
5957 else
5958 /* Unsupported (see target.h). */
5959 return 1;
5960 }
5961
5962 static int
5963 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5964 int size, struct raw_breakpoint *bp)
5965 {
5966 if (type == raw_bkpt_type_sw)
5967 return remove_memory_breakpoint (bp);
5968 else if (the_low_target.remove_point != NULL)
5969 return the_low_target.remove_point (type, addr, size, bp);
5970 else
5971 /* Unsupported (see target.h). */
5972 return 1;
5973 }
5974
5975 /* Implement the to_stopped_by_sw_breakpoint target_ops
5976 method. */
5977
5978 static int
5979 linux_stopped_by_sw_breakpoint (void)
5980 {
5981 struct lwp_info *lwp = get_thread_lwp (current_thread);
5982
5983 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5984 }
5985
5986 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5987 method. */
5988
5989 static int
5990 linux_supports_stopped_by_sw_breakpoint (void)
5991 {
5992 return USE_SIGTRAP_SIGINFO;
5993 }
5994
5995 /* Implement the to_stopped_by_hw_breakpoint target_ops
5996 method. */
5997
5998 static int
5999 linux_stopped_by_hw_breakpoint (void)
6000 {
6001 struct lwp_info *lwp = get_thread_lwp (current_thread);
6002
6003 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6004 }
6005
6006 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6007 method. */
6008
6009 static int
6010 linux_supports_stopped_by_hw_breakpoint (void)
6011 {
6012 return USE_SIGTRAP_SIGINFO;
6013 }
6014
6015 /* Implement the supports_hardware_single_step target_ops method. */
6016
6017 static int
6018 linux_supports_hardware_single_step (void)
6019 {
6020 return can_hardware_single_step ();
6021 }
6022
6023 static int
6024 linux_supports_software_single_step (void)
6025 {
6026 return can_software_single_step ();
6027 }
6028
6029 static int
6030 linux_stopped_by_watchpoint (void)
6031 {
6032 struct lwp_info *lwp = get_thread_lwp (current_thread);
6033
6034 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6035 }
6036
6037 static CORE_ADDR
6038 linux_stopped_data_address (void)
6039 {
6040 struct lwp_info *lwp = get_thread_lwp (current_thread);
6041
6042 return lwp->stopped_data_address;
6043 }
6044
6045 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6046 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6047 && defined(PT_TEXT_END_ADDR)
6048
6049 /* This is only used for targets that define PT_TEXT_ADDR,
6050 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6051 the target has different ways of acquiring this information, like
6052 loadmaps. */
6053
6054 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6055 to tell gdb about. */
6056
6057 static int
6058 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6059 {
6060 unsigned long text, text_end, data;
6061 int pid = lwpid_of (current_thread);
6062
6063 errno = 0;
6064
6065 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6066 (PTRACE_TYPE_ARG4) 0);
6067 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6068 (PTRACE_TYPE_ARG4) 0);
6069 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6070 (PTRACE_TYPE_ARG4) 0);
6071
6072 if (errno == 0)
6073 {
6074 /* Both text and data offsets produced at compile-time (and so
6075 used by gdb) are relative to the beginning of the program,
6076 with the data segment immediately following the text segment.
6077 However, the actual runtime layout in memory may put the data
6078 somewhere else, so when we send gdb a data base-address, we
6079 use the real data base address and subtract the compile-time
6080 data base-address from it (which is just the length of the
6081 text segment). BSS immediately follows data in both
6082 cases. */
6083 *text_p = text;
6084 *data_p = data - (text_end - text);
6085
6086 return 1;
6087 }
6088 return 0;
6089 }
6090 #endif
6091
6092 static int
6093 linux_qxfer_osdata (const char *annex,
6094 unsigned char *readbuf, unsigned const char *writebuf,
6095 CORE_ADDR offset, int len)
6096 {
6097 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6098 }
6099
6100 /* Convert a native/host siginfo object, into/from the siginfo in the
6101 layout of the inferiors' architecture. */
6102
6103 static void
6104 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6105 {
6106 int done = 0;
6107
6108 if (the_low_target.siginfo_fixup != NULL)
6109 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6110
6111 /* If there was no callback, or the callback didn't do anything,
6112 then just do a straight memcpy. */
6113 if (!done)
6114 {
6115 if (direction == 1)
6116 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6117 else
6118 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6119 }
6120 }
6121
6122 static int
6123 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6124 unsigned const char *writebuf, CORE_ADDR offset, int len)
6125 {
6126 int pid;
6127 siginfo_t siginfo;
6128 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6129
6130 if (current_thread == NULL)
6131 return -1;
6132
6133 pid = lwpid_of (current_thread);
6134
6135 if (debug_threads)
6136 debug_printf ("%s siginfo for lwp %d.\n",
6137 readbuf != NULL ? "Reading" : "Writing",
6138 pid);
6139
6140 if (offset >= sizeof (siginfo))
6141 return -1;
6142
6143 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6144 return -1;
6145
6146 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6147 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6148 inferior with a 64-bit GDBSERVER should look the same as debugging it
6149 with a 32-bit GDBSERVER, we need to convert it. */
6150 siginfo_fixup (&siginfo, inf_siginfo, 0);
6151
6152 if (offset + len > sizeof (siginfo))
6153 len = sizeof (siginfo) - offset;
6154
6155 if (readbuf != NULL)
6156 memcpy (readbuf, inf_siginfo + offset, len);
6157 else
6158 {
6159 memcpy (inf_siginfo + offset, writebuf, len);
6160
6161 /* Convert back to ptrace layout before flushing it out. */
6162 siginfo_fixup (&siginfo, inf_siginfo, 1);
6163
6164 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6165 return -1;
6166 }
6167
6168 return len;
6169 }
6170
6171 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6172 so we notice when children change state; as the handler for the
6173 sigsuspend in my_waitpid. */
6174
6175 static void
6176 sigchld_handler (int signo)
6177 {
6178 int old_errno = errno;
6179
6180 if (debug_threads)
6181 {
6182 do
6183 {
6184 /* Use the async signal safe debug function. */
6185 if (debug_write ("sigchld_handler\n",
6186 sizeof ("sigchld_handler\n") - 1) < 0)
6187 break; /* just ignore */
6188 } while (0);
6189 }
6190
6191 if (target_is_async_p ())
6192 async_file_mark (); /* trigger a linux_wait */
6193
6194 errno = old_errno;
6195 }
6196
6197 static int
6198 linux_supports_non_stop (void)
6199 {
6200 return 1;
6201 }
6202
6203 static int
6204 linux_async (int enable)
6205 {
6206 int previous = target_is_async_p ();
6207
6208 if (debug_threads)
6209 debug_printf ("linux_async (%d), previous=%d\n",
6210 enable, previous);
6211
6212 if (previous != enable)
6213 {
6214 sigset_t mask;
6215 sigemptyset (&mask);
6216 sigaddset (&mask, SIGCHLD);
6217
6218 sigprocmask (SIG_BLOCK, &mask, NULL);
6219
6220 if (enable)
6221 {
6222 if (pipe (linux_event_pipe) == -1)
6223 {
6224 linux_event_pipe[0] = -1;
6225 linux_event_pipe[1] = -1;
6226 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6227
6228 warning ("creating event pipe failed.");
6229 return previous;
6230 }
6231
6232 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6233 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6234
6235 /* Register the event loop handler. */
6236 add_file_handler (linux_event_pipe[0],
6237 handle_target_event, NULL);
6238
6239 /* Always trigger a linux_wait. */
6240 async_file_mark ();
6241 }
6242 else
6243 {
6244 delete_file_handler (linux_event_pipe[0]);
6245
6246 close (linux_event_pipe[0]);
6247 close (linux_event_pipe[1]);
6248 linux_event_pipe[0] = -1;
6249 linux_event_pipe[1] = -1;
6250 }
6251
6252 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6253 }
6254
6255 return previous;
6256 }
6257
6258 static int
6259 linux_start_non_stop (int nonstop)
6260 {
6261 /* Register or unregister from event-loop accordingly. */
6262 linux_async (nonstop);
6263
6264 if (target_is_async_p () != (nonstop != 0))
6265 return -1;
6266
6267 return 0;
6268 }
6269
6270 static int
6271 linux_supports_multi_process (void)
6272 {
6273 return 1;
6274 }
6275
6276 /* Check if fork events are supported. */
6277
6278 static int
6279 linux_supports_fork_events (void)
6280 {
6281 return linux_supports_tracefork ();
6282 }
6283
6284 /* Check if vfork events are supported. */
6285
6286 static int
6287 linux_supports_vfork_events (void)
6288 {
6289 return linux_supports_tracefork ();
6290 }
6291
6292 /* Check if exec events are supported. */
6293
6294 static int
6295 linux_supports_exec_events (void)
6296 {
6297 return linux_supports_traceexec ();
6298 }
6299
6300 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6301 ptrace flags for all inferiors. This is in case the new GDB connection
6302 doesn't support the same set of events that the previous one did. */
6303
6304 static void
6305 linux_handle_new_gdb_connection (void)
6306 {
6307 /* Request that all the lwps reset their ptrace options. */
6308 for_each_thread ([] (thread_info *thread)
6309 {
6310 struct lwp_info *lwp = get_thread_lwp (thread);
6311
6312 if (!lwp->stopped)
6313 {
6314 /* Stop the lwp so we can modify its ptrace options. */
6315 lwp->must_set_ptrace_flags = 1;
6316 linux_stop_lwp (lwp);
6317 }
6318 else
6319 {
6320 /* Already stopped; go ahead and set the ptrace options. */
6321 struct process_info *proc = find_process_pid (pid_of (thread));
6322 int options = linux_low_ptrace_options (proc->attached);
6323
6324 linux_enable_event_reporting (lwpid_of (thread), options);
6325 lwp->must_set_ptrace_flags = 0;
6326 }
6327 });
6328 }
6329
6330 static int
6331 linux_supports_disable_randomization (void)
6332 {
6333 #ifdef HAVE_PERSONALITY
6334 return 1;
6335 #else
6336 return 0;
6337 #endif
6338 }
6339
6340 static int
6341 linux_supports_agent (void)
6342 {
6343 return 1;
6344 }
6345
6346 static int
6347 linux_supports_range_stepping (void)
6348 {
6349 if (can_software_single_step ())
6350 return 1;
6351 if (*the_low_target.supports_range_stepping == NULL)
6352 return 0;
6353
6354 return (*the_low_target.supports_range_stepping) ();
6355 }
6356
6357 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6358 struct target_loadseg
6359 {
6360 /* Core address to which the segment is mapped. */
6361 Elf32_Addr addr;
6362 /* VMA recorded in the program header. */
6363 Elf32_Addr p_vaddr;
6364 /* Size of this segment in memory. */
6365 Elf32_Word p_memsz;
6366 };
6367
6368 # if defined PT_GETDSBT
6369 struct target_loadmap
6370 {
6371 /* Protocol version number, must be zero. */
6372 Elf32_Word version;
6373 /* Pointer to the DSBT table, its size, and the DSBT index. */
6374 unsigned *dsbt_table;
6375 unsigned dsbt_size, dsbt_index;
6376 /* Number of segments in this map. */
6377 Elf32_Word nsegs;
6378 /* The actual memory map. */
6379 struct target_loadseg segs[/*nsegs*/];
6380 };
6381 # define LINUX_LOADMAP PT_GETDSBT
6382 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6383 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6384 # else
6385 struct target_loadmap
6386 {
6387 /* Protocol version number, must be zero. */
6388 Elf32_Half version;
6389 /* Number of segments in this map. */
6390 Elf32_Half nsegs;
6391 /* The actual memory map. */
6392 struct target_loadseg segs[/*nsegs*/];
6393 };
6394 # define LINUX_LOADMAP PTRACE_GETFDPIC
6395 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6396 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6397 # endif
6398
6399 static int
6400 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6401 unsigned char *myaddr, unsigned int len)
6402 {
6403 int pid = lwpid_of (current_thread);
6404 int addr = -1;
6405 struct target_loadmap *data = NULL;
6406 unsigned int actual_length, copy_length;
6407
6408 if (strcmp (annex, "exec") == 0)
6409 addr = (int) LINUX_LOADMAP_EXEC;
6410 else if (strcmp (annex, "interp") == 0)
6411 addr = (int) LINUX_LOADMAP_INTERP;
6412 else
6413 return -1;
6414
6415 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6416 return -1;
6417
6418 if (data == NULL)
6419 return -1;
6420
6421 actual_length = sizeof (struct target_loadmap)
6422 + sizeof (struct target_loadseg) * data->nsegs;
6423
6424 if (offset < 0 || offset > actual_length)
6425 return -1;
6426
6427 copy_length = actual_length - offset < len ? actual_length - offset : len;
6428 memcpy (myaddr, (char *) data + offset, copy_length);
6429 return copy_length;
6430 }
6431 #else
6432 # define linux_read_loadmap NULL
6433 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6434
6435 static void
6436 linux_process_qsupported (char **features, int count)
6437 {
6438 if (the_low_target.process_qsupported != NULL)
6439 the_low_target.process_qsupported (features, count);
6440 }
6441
6442 static int
6443 linux_supports_catch_syscall (void)
6444 {
6445 return (the_low_target.get_syscall_trapinfo != NULL
6446 && linux_supports_tracesysgood ());
6447 }
6448
6449 static int
6450 linux_get_ipa_tdesc_idx (void)
6451 {
6452 if (the_low_target.get_ipa_tdesc_idx == NULL)
6453 return 0;
6454
6455 return (*the_low_target.get_ipa_tdesc_idx) ();
6456 }
6457
6458 static int
6459 linux_supports_tracepoints (void)
6460 {
6461 if (*the_low_target.supports_tracepoints == NULL)
6462 return 0;
6463
6464 return (*the_low_target.supports_tracepoints) ();
6465 }
6466
6467 static CORE_ADDR
6468 linux_read_pc (struct regcache *regcache)
6469 {
6470 if (the_low_target.get_pc == NULL)
6471 return 0;
6472
6473 return (*the_low_target.get_pc) (regcache);
6474 }
6475
6476 static void
6477 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6478 {
6479 gdb_assert (the_low_target.set_pc != NULL);
6480
6481 (*the_low_target.set_pc) (regcache, pc);
6482 }
6483
6484 static int
6485 linux_thread_stopped (struct thread_info *thread)
6486 {
6487 return get_thread_lwp (thread)->stopped;
6488 }
6489
6490 /* This exposes stop-all-threads functionality to other modules. */
6491
6492 static void
6493 linux_pause_all (int freeze)
6494 {
6495 stop_all_lwps (freeze, NULL);
6496 }
6497
6498 /* This exposes unstop-all-threads functionality to other gdbserver
6499 modules. */
6500
6501 static void
6502 linux_unpause_all (int unfreeze)
6503 {
6504 unstop_all_lwps (unfreeze, NULL);
6505 }
6506
6507 static int
6508 linux_prepare_to_access_memory (void)
6509 {
6510 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6511 running LWP. */
6512 if (non_stop)
6513 linux_pause_all (1);
6514 return 0;
6515 }
6516
6517 static void
6518 linux_done_accessing_memory (void)
6519 {
6520 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6521 running LWP. */
6522 if (non_stop)
6523 linux_unpause_all (1);
6524 }
6525
6526 static int
6527 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6528 CORE_ADDR collector,
6529 CORE_ADDR lockaddr,
6530 ULONGEST orig_size,
6531 CORE_ADDR *jump_entry,
6532 CORE_ADDR *trampoline,
6533 ULONGEST *trampoline_size,
6534 unsigned char *jjump_pad_insn,
6535 ULONGEST *jjump_pad_insn_size,
6536 CORE_ADDR *adjusted_insn_addr,
6537 CORE_ADDR *adjusted_insn_addr_end,
6538 char *err)
6539 {
6540 return (*the_low_target.install_fast_tracepoint_jump_pad)
6541 (tpoint, tpaddr, collector, lockaddr, orig_size,
6542 jump_entry, trampoline, trampoline_size,
6543 jjump_pad_insn, jjump_pad_insn_size,
6544 adjusted_insn_addr, adjusted_insn_addr_end,
6545 err);
6546 }
6547
6548 static struct emit_ops *
6549 linux_emit_ops (void)
6550 {
6551 if (the_low_target.emit_ops != NULL)
6552 return (*the_low_target.emit_ops) ();
6553 else
6554 return NULL;
6555 }
6556
6557 static int
6558 linux_get_min_fast_tracepoint_insn_len (void)
6559 {
6560 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6561 }
6562
6563 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6564
6565 static int
6566 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6567 CORE_ADDR *phdr_memaddr, int *num_phdr)
6568 {
6569 char filename[PATH_MAX];
6570 int fd;
6571 const int auxv_size = is_elf64
6572 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6573 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6574
6575 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6576
6577 fd = open (filename, O_RDONLY);
6578 if (fd < 0)
6579 return 1;
6580
6581 *phdr_memaddr = 0;
6582 *num_phdr = 0;
6583 while (read (fd, buf, auxv_size) == auxv_size
6584 && (*phdr_memaddr == 0 || *num_phdr == 0))
6585 {
6586 if (is_elf64)
6587 {
6588 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6589
6590 switch (aux->a_type)
6591 {
6592 case AT_PHDR:
6593 *phdr_memaddr = aux->a_un.a_val;
6594 break;
6595 case AT_PHNUM:
6596 *num_phdr = aux->a_un.a_val;
6597 break;
6598 }
6599 }
6600 else
6601 {
6602 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6603
6604 switch (aux->a_type)
6605 {
6606 case AT_PHDR:
6607 *phdr_memaddr = aux->a_un.a_val;
6608 break;
6609 case AT_PHNUM:
6610 *num_phdr = aux->a_un.a_val;
6611 break;
6612 }
6613 }
6614 }
6615
6616 close (fd);
6617
6618 if (*phdr_memaddr == 0 || *num_phdr == 0)
6619 {
6620 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6621 "phdr_memaddr = %ld, phdr_num = %d",
6622 (long) *phdr_memaddr, *num_phdr);
6623 return 2;
6624 }
6625
6626 return 0;
6627 }
6628
6629 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6630
6631 static CORE_ADDR
6632 get_dynamic (const int pid, const int is_elf64)
6633 {
6634 CORE_ADDR phdr_memaddr, relocation;
6635 int num_phdr, i;
6636 unsigned char *phdr_buf;
6637 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6638
6639 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6640 return 0;
6641
6642 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6643 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6644
6645 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6646 return 0;
6647
6648 /* Compute relocation: it is expected to be 0 for "regular" executables,
6649 non-zero for PIE ones. */
6650 relocation = -1;
6651 for (i = 0; relocation == -1 && i < num_phdr; i++)
6652 if (is_elf64)
6653 {
6654 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6655
6656 if (p->p_type == PT_PHDR)
6657 relocation = phdr_memaddr - p->p_vaddr;
6658 }
6659 else
6660 {
6661 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6662
6663 if (p->p_type == PT_PHDR)
6664 relocation = phdr_memaddr - p->p_vaddr;
6665 }
6666
6667 if (relocation == -1)
6668 {
6669 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6670 any real world executables, including PIE executables, have always
6671 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6672 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6673 or present DT_DEBUG anyway (fpc binaries are statically linked).
6674
6675 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6676
6677 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6678
6679 return 0;
6680 }
6681
6682 for (i = 0; i < num_phdr; i++)
6683 {
6684 if (is_elf64)
6685 {
6686 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6687
6688 if (p->p_type == PT_DYNAMIC)
6689 return p->p_vaddr + relocation;
6690 }
6691 else
6692 {
6693 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6694
6695 if (p->p_type == PT_DYNAMIC)
6696 return p->p_vaddr + relocation;
6697 }
6698 }
6699
6700 return 0;
6701 }
6702
6703 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6704 can be 0 if the inferior does not yet have the library list initialized.
6705 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6706 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6707
6708 static CORE_ADDR
6709 get_r_debug (const int pid, const int is_elf64)
6710 {
6711 CORE_ADDR dynamic_memaddr;
6712 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6713 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6714 CORE_ADDR map = -1;
6715
6716 dynamic_memaddr = get_dynamic (pid, is_elf64);
6717 if (dynamic_memaddr == 0)
6718 return map;
6719
6720 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6721 {
6722 if (is_elf64)
6723 {
6724 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6725 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6726 union
6727 {
6728 Elf64_Xword map;
6729 unsigned char buf[sizeof (Elf64_Xword)];
6730 }
6731 rld_map;
6732 #endif
6733 #ifdef DT_MIPS_RLD_MAP
6734 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6735 {
6736 if (linux_read_memory (dyn->d_un.d_val,
6737 rld_map.buf, sizeof (rld_map.buf)) == 0)
6738 return rld_map.map;
6739 else
6740 break;
6741 }
6742 #endif /* DT_MIPS_RLD_MAP */
6743 #ifdef DT_MIPS_RLD_MAP_REL
6744 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6745 {
6746 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6747 rld_map.buf, sizeof (rld_map.buf)) == 0)
6748 return rld_map.map;
6749 else
6750 break;
6751 }
6752 #endif /* DT_MIPS_RLD_MAP_REL */
6753
6754 if (dyn->d_tag == DT_DEBUG && map == -1)
6755 map = dyn->d_un.d_val;
6756
6757 if (dyn->d_tag == DT_NULL)
6758 break;
6759 }
6760 else
6761 {
6762 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6763 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6764 union
6765 {
6766 Elf32_Word map;
6767 unsigned char buf[sizeof (Elf32_Word)];
6768 }
6769 rld_map;
6770 #endif
6771 #ifdef DT_MIPS_RLD_MAP
6772 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6773 {
6774 if (linux_read_memory (dyn->d_un.d_val,
6775 rld_map.buf, sizeof (rld_map.buf)) == 0)
6776 return rld_map.map;
6777 else
6778 break;
6779 }
6780 #endif /* DT_MIPS_RLD_MAP */
6781 #ifdef DT_MIPS_RLD_MAP_REL
6782 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6783 {
6784 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6785 rld_map.buf, sizeof (rld_map.buf)) == 0)
6786 return rld_map.map;
6787 else
6788 break;
6789 }
6790 #endif /* DT_MIPS_RLD_MAP_REL */
6791
6792 if (dyn->d_tag == DT_DEBUG && map == -1)
6793 map = dyn->d_un.d_val;
6794
6795 if (dyn->d_tag == DT_NULL)
6796 break;
6797 }
6798
6799 dynamic_memaddr += dyn_size;
6800 }
6801
6802 return map;
6803 }
6804
6805 /* Read one pointer from MEMADDR in the inferior. */
6806
6807 static int
6808 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6809 {
6810 int ret;
6811
6812 /* Go through a union so this works on either big or little endian
6813 hosts, when the inferior's pointer size is smaller than the size
6814 of CORE_ADDR. It is assumed the inferior's endianness is the
6815 same of the superior's. */
6816 union
6817 {
6818 CORE_ADDR core_addr;
6819 unsigned int ui;
6820 unsigned char uc;
6821 } addr;
6822
6823 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6824 if (ret == 0)
6825 {
6826 if (ptr_size == sizeof (CORE_ADDR))
6827 *ptr = addr.core_addr;
6828 else if (ptr_size == sizeof (unsigned int))
6829 *ptr = addr.ui;
6830 else
6831 gdb_assert_not_reached ("unhandled pointer size");
6832 }
6833 return ret;
6834 }
6835
6836 struct link_map_offsets
6837 {
6838 /* Offset and size of r_debug.r_version. */
6839 int r_version_offset;
6840
6841 /* Offset and size of r_debug.r_map. */
6842 int r_map_offset;
6843
6844 /* Offset to l_addr field in struct link_map. */
6845 int l_addr_offset;
6846
6847 /* Offset to l_name field in struct link_map. */
6848 int l_name_offset;
6849
6850 /* Offset to l_ld field in struct link_map. */
6851 int l_ld_offset;
6852
6853 /* Offset to l_next field in struct link_map. */
6854 int l_next_offset;
6855
6856 /* Offset to l_prev field in struct link_map. */
6857 int l_prev_offset;
6858 };
6859
6860 /* Construct qXfer:libraries-svr4:read reply. */
6861
6862 static int
6863 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6864 unsigned const char *writebuf,
6865 CORE_ADDR offset, int len)
6866 {
6867 struct process_info_private *const priv = current_process ()->priv;
6868 char filename[PATH_MAX];
6869 int pid, is_elf64;
6870
6871 static const struct link_map_offsets lmo_32bit_offsets =
6872 {
6873 0, /* r_version offset. */
6874 4, /* r_debug.r_map offset. */
6875 0, /* l_addr offset in link_map. */
6876 4, /* l_name offset in link_map. */
6877 8, /* l_ld offset in link_map. */
6878 12, /* l_next offset in link_map. */
6879 16 /* l_prev offset in link_map. */
6880 };
6881
6882 static const struct link_map_offsets lmo_64bit_offsets =
6883 {
6884 0, /* r_version offset. */
6885 8, /* r_debug.r_map offset. */
6886 0, /* l_addr offset in link_map. */
6887 8, /* l_name offset in link_map. */
6888 16, /* l_ld offset in link_map. */
6889 24, /* l_next offset in link_map. */
6890 32 /* l_prev offset in link_map. */
6891 };
6892 const struct link_map_offsets *lmo;
6893 unsigned int machine;
6894 int ptr_size;
6895 CORE_ADDR lm_addr = 0, lm_prev = 0;
6896 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6897 int header_done = 0;
6898
6899 if (writebuf != NULL)
6900 return -2;
6901 if (readbuf == NULL)
6902 return -1;
6903
6904 pid = lwpid_of (current_thread);
6905 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6906 is_elf64 = elf_64_file_p (filename, &machine);
6907 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6908 ptr_size = is_elf64 ? 8 : 4;
6909
6910 while (annex[0] != '\0')
6911 {
6912 const char *sep;
6913 CORE_ADDR *addrp;
6914 int name_len;
6915
6916 sep = strchr (annex, '=');
6917 if (sep == NULL)
6918 break;
6919
6920 name_len = sep - annex;
6921 if (name_len == 5 && startswith (annex, "start"))
6922 addrp = &lm_addr;
6923 else if (name_len == 4 && startswith (annex, "prev"))
6924 addrp = &lm_prev;
6925 else
6926 {
6927 annex = strchr (sep, ';');
6928 if (annex == NULL)
6929 break;
6930 annex++;
6931 continue;
6932 }
6933
6934 annex = decode_address_to_semicolon (addrp, sep + 1);
6935 }
6936
6937 if (lm_addr == 0)
6938 {
6939 int r_version = 0;
6940
6941 if (priv->r_debug == 0)
6942 priv->r_debug = get_r_debug (pid, is_elf64);
6943
6944 /* We failed to find DT_DEBUG. Such situation will not change
6945 for this inferior - do not retry it. Report it to GDB as
6946 E01, see for the reasons at the GDB solib-svr4.c side. */
6947 if (priv->r_debug == (CORE_ADDR) -1)
6948 return -1;
6949
6950 if (priv->r_debug != 0)
6951 {
6952 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6953 (unsigned char *) &r_version,
6954 sizeof (r_version)) != 0
6955 || r_version != 1)
6956 {
6957 warning ("unexpected r_debug version %d", r_version);
6958 }
6959 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6960 &lm_addr, ptr_size) != 0)
6961 {
6962 warning ("unable to read r_map from 0x%lx",
6963 (long) priv->r_debug + lmo->r_map_offset);
6964 }
6965 }
6966 }
6967
6968 std::string document = "<library-list-svr4 version=\"1.0\"";
6969
6970 while (lm_addr
6971 && read_one_ptr (lm_addr + lmo->l_name_offset,
6972 &l_name, ptr_size) == 0
6973 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6974 &l_addr, ptr_size) == 0
6975 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6976 &l_ld, ptr_size) == 0
6977 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6978 &l_prev, ptr_size) == 0
6979 && read_one_ptr (lm_addr + lmo->l_next_offset,
6980 &l_next, ptr_size) == 0)
6981 {
6982 unsigned char libname[PATH_MAX];
6983
6984 if (lm_prev != l_prev)
6985 {
6986 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6987 (long) lm_prev, (long) l_prev);
6988 break;
6989 }
6990
6991 /* Ignore the first entry even if it has valid name as the first entry
6992 corresponds to the main executable. The first entry should not be
6993 skipped if the dynamic loader was loaded late by a static executable
6994 (see solib-svr4.c parameter ignore_first). But in such case the main
6995 executable does not have PT_DYNAMIC present and this function already
6996 exited above due to failed get_r_debug. */
6997 if (lm_prev == 0)
6998 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6999 else
7000 {
7001 /* Not checking for error because reading may stop before
7002 we've got PATH_MAX worth of characters. */
7003 libname[0] = '\0';
7004 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7005 libname[sizeof (libname) - 1] = '\0';
7006 if (libname[0] != '\0')
7007 {
7008 if (!header_done)
7009 {
7010 /* Terminate `<library-list-svr4'. */
7011 document += '>';
7012 header_done = 1;
7013 }
7014
7015 string_appendf (document, "<library name=\"");
7016 xml_escape_text_append (&document, (char *) libname);
7017 string_appendf (document, "\" lm=\"0x%lx\" "
7018 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7019 (unsigned long) lm_addr, (unsigned long) l_addr,
7020 (unsigned long) l_ld);
7021 }
7022 }
7023
7024 lm_prev = lm_addr;
7025 lm_addr = l_next;
7026 }
7027
7028 if (!header_done)
7029 {
7030 /* Empty list; terminate `<library-list-svr4'. */
7031 document += "/>";
7032 }
7033 else
7034 document += "</library-list-svr4>";
7035
7036 int document_len = document.length ();
7037 if (offset < document_len)
7038 document_len -= offset;
7039 else
7040 document_len = 0;
7041 if (len > document_len)
7042 len = document_len;
7043
7044 memcpy (readbuf, document.data () + offset, len);
7045
7046 return len;
7047 }
7048
7049 #ifdef HAVE_LINUX_BTRACE
7050
7051 /* See to_disable_btrace target method. */
7052
7053 static int
7054 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7055 {
7056 enum btrace_error err;
7057
7058 err = linux_disable_btrace (tinfo);
7059 return (err == BTRACE_ERR_NONE ? 0 : -1);
7060 }
7061
7062 /* Encode an Intel Processor Trace configuration. */
7063
7064 static void
7065 linux_low_encode_pt_config (struct buffer *buffer,
7066 const struct btrace_data_pt_config *config)
7067 {
7068 buffer_grow_str (buffer, "<pt-config>\n");
7069
7070 switch (config->cpu.vendor)
7071 {
7072 case CV_INTEL:
7073 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7074 "model=\"%u\" stepping=\"%u\"/>\n",
7075 config->cpu.family, config->cpu.model,
7076 config->cpu.stepping);
7077 break;
7078
7079 default:
7080 break;
7081 }
7082
7083 buffer_grow_str (buffer, "</pt-config>\n");
7084 }
7085
7086 /* Encode a raw buffer. */
7087
7088 static void
7089 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7090 unsigned int size)
7091 {
7092 if (size == 0)
7093 return;
7094
7095 /* We use hex encoding - see gdbsupport/rsp-low.h. */
7096 buffer_grow_str (buffer, "<raw>\n");
7097
7098 while (size-- > 0)
7099 {
7100 char elem[2];
7101
7102 elem[0] = tohex ((*data >> 4) & 0xf);
7103 elem[1] = tohex (*data++ & 0xf);
7104
7105 buffer_grow (buffer, elem, 2);
7106 }
7107
7108 buffer_grow_str (buffer, "</raw>\n");
7109 }
7110
7111 /* See to_read_btrace target method. */
7112
7113 static int
7114 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7115 enum btrace_read_type type)
7116 {
7117 struct btrace_data btrace;
7118 struct btrace_block *block;
7119 enum btrace_error err;
7120 int i;
7121
7122 err = linux_read_btrace (&btrace, tinfo, type);
7123 if (err != BTRACE_ERR_NONE)
7124 {
7125 if (err == BTRACE_ERR_OVERFLOW)
7126 buffer_grow_str0 (buffer, "E.Overflow.");
7127 else
7128 buffer_grow_str0 (buffer, "E.Generic Error.");
7129
7130 return -1;
7131 }
7132
7133 switch (btrace.format)
7134 {
7135 case BTRACE_FORMAT_NONE:
7136 buffer_grow_str0 (buffer, "E.No Trace.");
7137 return -1;
7138
7139 case BTRACE_FORMAT_BTS:
7140 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7141 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7142
7143 for (i = 0;
7144 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7145 i++)
7146 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7147 paddress (block->begin), paddress (block->end));
7148
7149 buffer_grow_str0 (buffer, "</btrace>\n");
7150 break;
7151
7152 case BTRACE_FORMAT_PT:
7153 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7154 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7155 buffer_grow_str (buffer, "<pt>\n");
7156
7157 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7158
7159 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7160 btrace.variant.pt.size);
7161
7162 buffer_grow_str (buffer, "</pt>\n");
7163 buffer_grow_str0 (buffer, "</btrace>\n");
7164 break;
7165
7166 default:
7167 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7168 return -1;
7169 }
7170
7171 return 0;
7172 }
7173
7174 /* See to_btrace_conf target method. */
7175
7176 static int
7177 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7178 struct buffer *buffer)
7179 {
7180 const struct btrace_config *conf;
7181
7182 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7183 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7184
7185 conf = linux_btrace_conf (tinfo);
7186 if (conf != NULL)
7187 {
7188 switch (conf->format)
7189 {
7190 case BTRACE_FORMAT_NONE:
7191 break;
7192
7193 case BTRACE_FORMAT_BTS:
7194 buffer_xml_printf (buffer, "<bts");
7195 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7196 buffer_xml_printf (buffer, " />\n");
7197 break;
7198
7199 case BTRACE_FORMAT_PT:
7200 buffer_xml_printf (buffer, "<pt");
7201 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7202 buffer_xml_printf (buffer, "/>\n");
7203 break;
7204 }
7205 }
7206
7207 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7208 return 0;
7209 }
7210 #endif /* HAVE_LINUX_BTRACE */
7211
7212 /* See nat/linux-nat.h. */
7213
7214 ptid_t
7215 current_lwp_ptid (void)
7216 {
7217 return ptid_of (current_thread);
7218 }
7219
7220 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7221
7222 static int
7223 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7224 {
7225 if (the_low_target.breakpoint_kind_from_pc != NULL)
7226 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7227 else
7228 return default_breakpoint_kind_from_pc (pcptr);
7229 }
7230
7231 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7232
7233 static const gdb_byte *
7234 linux_sw_breakpoint_from_kind (int kind, int *size)
7235 {
7236 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7237
7238 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7239 }
7240
7241 /* Implementation of the target_ops method
7242 "breakpoint_kind_from_current_state". */
7243
7244 static int
7245 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7246 {
7247 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7248 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7249 else
7250 return linux_breakpoint_kind_from_pc (pcptr);
7251 }
7252
7253 /* Default implementation of linux_target_ops method "set_pc" for
7254 32-bit pc register which is literally named "pc". */
7255
7256 void
7257 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7258 {
7259 uint32_t newpc = pc;
7260
7261 supply_register_by_name (regcache, "pc", &newpc);
7262 }
7263
7264 /* Default implementation of linux_target_ops method "get_pc" for
7265 32-bit pc register which is literally named "pc". */
7266
7267 CORE_ADDR
7268 linux_get_pc_32bit (struct regcache *regcache)
7269 {
7270 uint32_t pc;
7271
7272 collect_register_by_name (regcache, "pc", &pc);
7273 if (debug_threads)
7274 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7275 return pc;
7276 }
7277
7278 /* Default implementation of linux_target_ops method "set_pc" for
7279 64-bit pc register which is literally named "pc". */
7280
7281 void
7282 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7283 {
7284 uint64_t newpc = pc;
7285
7286 supply_register_by_name (regcache, "pc", &newpc);
7287 }
7288
7289 /* Default implementation of linux_target_ops method "get_pc" for
7290 64-bit pc register which is literally named "pc". */
7291
7292 CORE_ADDR
7293 linux_get_pc_64bit (struct regcache *regcache)
7294 {
7295 uint64_t pc;
7296
7297 collect_register_by_name (regcache, "pc", &pc);
7298 if (debug_threads)
7299 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7300 return pc;
7301 }
7302
7303 /* See linux-low.h. */
7304
7305 int
7306 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7307 {
7308 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7309 int offset = 0;
7310
7311 gdb_assert (wordsize == 4 || wordsize == 8);
7312
7313 while ((*the_target->read_auxv) (offset, data, 2 * wordsize) == 2 * wordsize)
7314 {
7315 if (wordsize == 4)
7316 {
7317 uint32_t *data_p = (uint32_t *) data;
7318 if (data_p[0] == match)
7319 {
7320 *valp = data_p[1];
7321 return 1;
7322 }
7323 }
7324 else
7325 {
7326 uint64_t *data_p = (uint64_t *) data;
7327 if (data_p[0] == match)
7328 {
7329 *valp = data_p[1];
7330 return 1;
7331 }
7332 }
7333
7334 offset += 2 * wordsize;
7335 }
7336
7337 return 0;
7338 }
7339
7340 /* See linux-low.h. */
7341
7342 CORE_ADDR
7343 linux_get_hwcap (int wordsize)
7344 {
7345 CORE_ADDR hwcap = 0;
7346 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7347 return hwcap;
7348 }
7349
7350 /* See linux-low.h. */
7351
7352 CORE_ADDR
7353 linux_get_hwcap2 (int wordsize)
7354 {
7355 CORE_ADDR hwcap2 = 0;
7356 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7357 return hwcap2;
7358 }
7359
7360 static struct target_ops linux_target_ops = {
7361 linux_create_inferior,
7362 linux_post_create_inferior,
7363 linux_attach,
7364 linux_kill,
7365 linux_detach,
7366 linux_mourn,
7367 linux_join,
7368 linux_thread_alive,
7369 linux_resume,
7370 linux_wait,
7371 linux_fetch_registers,
7372 linux_store_registers,
7373 linux_prepare_to_access_memory,
7374 linux_done_accessing_memory,
7375 linux_read_memory,
7376 linux_write_memory,
7377 linux_look_up_symbols,
7378 linux_request_interrupt,
7379 linux_read_auxv,
7380 linux_supports_z_point_type,
7381 linux_insert_point,
7382 linux_remove_point,
7383 linux_stopped_by_sw_breakpoint,
7384 linux_supports_stopped_by_sw_breakpoint,
7385 linux_stopped_by_hw_breakpoint,
7386 linux_supports_stopped_by_hw_breakpoint,
7387 linux_supports_hardware_single_step,
7388 linux_stopped_by_watchpoint,
7389 linux_stopped_data_address,
7390 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7391 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7392 && defined(PT_TEXT_END_ADDR)
7393 linux_read_offsets,
7394 #else
7395 NULL,
7396 #endif
7397 #ifdef USE_THREAD_DB
7398 thread_db_get_tls_address,
7399 #else
7400 NULL,
7401 #endif
7402 hostio_last_error_from_errno,
7403 linux_qxfer_osdata,
7404 linux_xfer_siginfo,
7405 linux_supports_non_stop,
7406 linux_async,
7407 linux_start_non_stop,
7408 linux_supports_multi_process,
7409 linux_supports_fork_events,
7410 linux_supports_vfork_events,
7411 linux_supports_exec_events,
7412 linux_handle_new_gdb_connection,
7413 #ifdef USE_THREAD_DB
7414 thread_db_handle_monitor_command,
7415 #else
7416 NULL,
7417 #endif
7418 linux_common_core_of_thread,
7419 linux_read_loadmap,
7420 linux_process_qsupported,
7421 linux_supports_tracepoints,
7422 linux_read_pc,
7423 linux_write_pc,
7424 linux_thread_stopped,
7425 NULL,
7426 linux_pause_all,
7427 linux_unpause_all,
7428 linux_stabilize_threads,
7429 linux_install_fast_tracepoint_jump_pad,
7430 linux_emit_ops,
7431 linux_supports_disable_randomization,
7432 linux_get_min_fast_tracepoint_insn_len,
7433 linux_qxfer_libraries_svr4,
7434 linux_supports_agent,
7435 #ifdef HAVE_LINUX_BTRACE
7436 linux_enable_btrace,
7437 linux_low_disable_btrace,
7438 linux_low_read_btrace,
7439 linux_low_btrace_conf,
7440 #else
7441 NULL,
7442 NULL,
7443 NULL,
7444 NULL,
7445 #endif
7446 linux_supports_range_stepping,
7447 linux_proc_pid_to_exec_file,
7448 linux_mntns_open_cloexec,
7449 linux_mntns_unlink,
7450 linux_mntns_readlink,
7451 linux_breakpoint_kind_from_pc,
7452 linux_sw_breakpoint_from_kind,
7453 linux_proc_tid_get_name,
7454 linux_breakpoint_kind_from_current_state,
7455 linux_supports_software_single_step,
7456 linux_supports_catch_syscall,
7457 linux_get_ipa_tdesc_idx,
7458 #if USE_THREAD_DB
7459 thread_db_thread_handle,
7460 #else
7461 NULL,
7462 #endif
7463 };
7464
7465 #ifdef HAVE_LINUX_REGSETS
7466 void
7467 initialize_regsets_info (struct regsets_info *info)
7468 {
7469 for (info->num_regsets = 0;
7470 info->regsets[info->num_regsets].size >= 0;
7471 info->num_regsets++)
7472 ;
7473 }
7474 #endif
7475
7476 void
7477 initialize_low (void)
7478 {
7479 struct sigaction sigchld_action;
7480
7481 memset (&sigchld_action, 0, sizeof (sigchld_action));
7482 set_target_ops (&linux_target_ops);
7483
7484 linux_ptrace_init_warnings ();
7485 linux_proc_init_warnings ();
7486
7487 sigchld_action.sa_handler = sigchld_handler;
7488 sigemptyset (&sigchld_action.sa_mask);
7489 sigchld_action.sa_flags = SA_RESTART;
7490 sigaction (SIGCHLD, &sigchld_action, NULL);
7491
7492 initialize_low_arch ();
7493
7494 linux_check_ptrace_features ();
7495 }
This page took 0.186084 seconds and 5 git commands to generate.