AArch64: Racy: Don't set empty set of hardware BPs/WPs on new thread
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2018 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25 #include "signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #include "common-inferior.h"
51 #include "nat/fork-inferior.h"
52 #include "environ.h"
53 #include "common/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifndef SPUFS_MAGIC
64 #define SPUFS_MAGIC 0x23c9b64e
65 #endif
66
67 #ifdef HAVE_PERSONALITY
68 # include <sys/personality.h>
69 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
70 # define ADDR_NO_RANDOMIZE 0x0040000
71 # endif
72 #endif
73
74 #ifndef O_LARGEFILE
75 #define O_LARGEFILE 0
76 #endif
77
78 /* Some targets did not define these ptrace constants from the start,
79 so gdbserver defines them locally here. In the future, these may
80 be removed after they are added to asm/ptrace.h. */
81 #if !(defined(PT_TEXT_ADDR) \
82 || defined(PT_DATA_ADDR) \
83 || defined(PT_TEXT_END_ADDR))
84 #if defined(__mcoldfire__)
85 /* These are still undefined in 3.10 kernels. */
86 #define PT_TEXT_ADDR 49*4
87 #define PT_DATA_ADDR 50*4
88 #define PT_TEXT_END_ADDR 51*4
89 /* BFIN already defines these since at least 2.6.32 kernels. */
90 #elif defined(BFIN)
91 #define PT_TEXT_ADDR 220
92 #define PT_TEXT_END_ADDR 224
93 #define PT_DATA_ADDR 228
94 /* These are still undefined in 3.10 kernels. */
95 #elif defined(__TMS320C6X__)
96 #define PT_TEXT_ADDR (0x10000*4)
97 #define PT_DATA_ADDR (0x10004*4)
98 #define PT_TEXT_END_ADDR (0x10008*4)
99 #endif
100 #endif
101
102 #ifdef HAVE_LINUX_BTRACE
103 # include "nat/linux-btrace.h"
104 # include "btrace-common.h"
105 #endif
106
107 #ifndef HAVE_ELF32_AUXV_T
108 /* Copied from glibc's elf.h. */
109 typedef struct
110 {
111 uint32_t a_type; /* Entry type */
112 union
113 {
114 uint32_t a_val; /* Integer value */
115 /* We use to have pointer elements added here. We cannot do that,
116 though, since it does not work when using 32-bit definitions
117 on 64-bit platforms and vice versa. */
118 } a_un;
119 } Elf32_auxv_t;
120 #endif
121
122 #ifndef HAVE_ELF64_AUXV_T
123 /* Copied from glibc's elf.h. */
124 typedef struct
125 {
126 uint64_t a_type; /* Entry type */
127 union
128 {
129 uint64_t a_val; /* Integer value */
130 /* We use to have pointer elements added here. We cannot do that,
131 though, since it does not work when using 32-bit definitions
132 on 64-bit platforms and vice versa. */
133 } a_un;
134 } Elf64_auxv_t;
135 #endif
136
137 /* Does the current host support PTRACE_GETREGSET? */
138 int have_ptrace_getregset = -1;
139
140 /* LWP accessors. */
141
142 /* See nat/linux-nat.h. */
143
144 ptid_t
145 ptid_of_lwp (struct lwp_info *lwp)
146 {
147 return ptid_of (get_lwp_thread (lwp));
148 }
149
150 /* See nat/linux-nat.h. */
151
152 void
153 lwp_set_arch_private_info (struct lwp_info *lwp,
154 struct arch_lwp_info *info)
155 {
156 lwp->arch_private = info;
157 }
158
159 /* See nat/linux-nat.h. */
160
161 struct arch_lwp_info *
162 lwp_arch_private_info (struct lwp_info *lwp)
163 {
164 return lwp->arch_private;
165 }
166
167 /* See nat/linux-nat.h. */
168
169 int
170 lwp_is_stopped (struct lwp_info *lwp)
171 {
172 return lwp->stopped;
173 }
174
175 /* See nat/linux-nat.h. */
176
177 enum target_stop_reason
178 lwp_stop_reason (struct lwp_info *lwp)
179 {
180 return lwp->stop_reason;
181 }
182
183 /* See nat/linux-nat.h. */
184
185 int
186 lwp_is_stepping (struct lwp_info *lwp)
187 {
188 return lwp->stepping;
189 }
190
191 /* A list of all unknown processes which receive stop signals. Some
192 other process will presumably claim each of these as forked
193 children momentarily. */
194
195 struct simple_pid_list
196 {
197 /* The process ID. */
198 int pid;
199
200 /* The status as reported by waitpid. */
201 int status;
202
203 /* Next in chain. */
204 struct simple_pid_list *next;
205 };
206 struct simple_pid_list *stopped_pids;
207
208 /* Trivial list manipulation functions to keep track of a list of new
209 stopped processes. */
210
211 static void
212 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
213 {
214 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
215
216 new_pid->pid = pid;
217 new_pid->status = status;
218 new_pid->next = *listp;
219 *listp = new_pid;
220 }
221
222 static int
223 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
224 {
225 struct simple_pid_list **p;
226
227 for (p = listp; *p != NULL; p = &(*p)->next)
228 if ((*p)->pid == pid)
229 {
230 struct simple_pid_list *next = (*p)->next;
231
232 *statusp = (*p)->status;
233 xfree (*p);
234 *p = next;
235 return 1;
236 }
237 return 0;
238 }
239
240 enum stopping_threads_kind
241 {
242 /* Not stopping threads presently. */
243 NOT_STOPPING_THREADS,
244
245 /* Stopping threads. */
246 STOPPING_THREADS,
247
248 /* Stopping and suspending threads. */
249 STOPPING_AND_SUSPENDING_THREADS
250 };
251
252 /* This is set while stop_all_lwps is in effect. */
253 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
254
255 /* FIXME make into a target method? */
256 int using_threads = 1;
257
258 /* True if we're presently stabilizing threads (moving them out of
259 jump pads). */
260 static int stabilizing_threads;
261
262 static void linux_resume_one_lwp (struct lwp_info *lwp,
263 int step, int signal, siginfo_t *info);
264 static void linux_resume (struct thread_resume *resume_info, size_t n);
265 static void stop_all_lwps (int suspend, struct lwp_info *except);
266 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
267 static void unsuspend_all_lwps (struct lwp_info *except);
268 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
269 int *wstat, int options);
270 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
271 static struct lwp_info *add_lwp (ptid_t ptid);
272 static void linux_mourn (struct process_info *process);
273 static int linux_stopped_by_watchpoint (void);
274 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
275 static int lwp_is_marked_dead (struct lwp_info *lwp);
276 static void proceed_all_lwps (void);
277 static int finish_step_over (struct lwp_info *lwp);
278 static int kill_lwp (unsigned long lwpid, int signo);
279 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
280 static void complete_ongoing_step_over (void);
281 static int linux_low_ptrace_options (int attached);
282 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
283 static void proceed_one_lwp (thread_info *thread, lwp_info *except);
284
285 /* When the event-loop is doing a step-over, this points at the thread
286 being stepped. */
287 ptid_t step_over_bkpt;
288
289 /* True if the low target can hardware single-step. */
290
291 static int
292 can_hardware_single_step (void)
293 {
294 if (the_low_target.supports_hardware_single_step != NULL)
295 return the_low_target.supports_hardware_single_step ();
296 else
297 return 0;
298 }
299
300 /* True if the low target can software single-step. Such targets
301 implement the GET_NEXT_PCS callback. */
302
303 static int
304 can_software_single_step (void)
305 {
306 return (the_low_target.get_next_pcs != NULL);
307 }
308
309 /* True if the low target supports memory breakpoints. If so, we'll
310 have a GET_PC implementation. */
311
312 static int
313 supports_breakpoints (void)
314 {
315 return (the_low_target.get_pc != NULL);
316 }
317
318 /* Returns true if this target can support fast tracepoints. This
319 does not mean that the in-process agent has been loaded in the
320 inferior. */
321
322 static int
323 supports_fast_tracepoints (void)
324 {
325 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
326 }
327
328 /* True if LWP is stopped in its stepping range. */
329
330 static int
331 lwp_in_step_range (struct lwp_info *lwp)
332 {
333 CORE_ADDR pc = lwp->stop_pc;
334
335 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
336 }
337
338 struct pending_signals
339 {
340 int signal;
341 siginfo_t info;
342 struct pending_signals *prev;
343 };
344
345 /* The read/write ends of the pipe registered as waitable file in the
346 event loop. */
347 static int linux_event_pipe[2] = { -1, -1 };
348
349 /* True if we're currently in async mode. */
350 #define target_is_async_p() (linux_event_pipe[0] != -1)
351
352 static void send_sigstop (struct lwp_info *lwp);
353 static void wait_for_sigstop (void);
354
355 /* Return non-zero if HEADER is a 64-bit ELF file. */
356
357 static int
358 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
359 {
360 if (header->e_ident[EI_MAG0] == ELFMAG0
361 && header->e_ident[EI_MAG1] == ELFMAG1
362 && header->e_ident[EI_MAG2] == ELFMAG2
363 && header->e_ident[EI_MAG3] == ELFMAG3)
364 {
365 *machine = header->e_machine;
366 return header->e_ident[EI_CLASS] == ELFCLASS64;
367
368 }
369 *machine = EM_NONE;
370 return -1;
371 }
372
373 /* Return non-zero if FILE is a 64-bit ELF file,
374 zero if the file is not a 64-bit ELF file,
375 and -1 if the file is not accessible or doesn't exist. */
376
377 static int
378 elf_64_file_p (const char *file, unsigned int *machine)
379 {
380 Elf64_Ehdr header;
381 int fd;
382
383 fd = open (file, O_RDONLY);
384 if (fd < 0)
385 return -1;
386
387 if (read (fd, &header, sizeof (header)) != sizeof (header))
388 {
389 close (fd);
390 return 0;
391 }
392 close (fd);
393
394 return elf_64_header_p (&header, machine);
395 }
396
397 /* Accepts an integer PID; Returns true if the executable PID is
398 running is a 64-bit ELF file.. */
399
400 int
401 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
402 {
403 char file[PATH_MAX];
404
405 sprintf (file, "/proc/%d/exe", pid);
406 return elf_64_file_p (file, machine);
407 }
408
409 static void
410 delete_lwp (struct lwp_info *lwp)
411 {
412 struct thread_info *thr = get_lwp_thread (lwp);
413
414 if (debug_threads)
415 debug_printf ("deleting %ld\n", lwpid_of (thr));
416
417 remove_thread (thr);
418
419 if (the_low_target.delete_thread != NULL)
420 the_low_target.delete_thread (lwp->arch_private);
421 else
422 gdb_assert (lwp->arch_private == NULL);
423
424 free (lwp);
425 }
426
427 /* Add a process to the common process list, and set its private
428 data. */
429
430 static struct process_info *
431 linux_add_process (int pid, int attached)
432 {
433 struct process_info *proc;
434
435 proc = add_process (pid, attached);
436 proc->priv = XCNEW (struct process_info_private);
437
438 if (the_low_target.new_process != NULL)
439 proc->priv->arch_private = the_low_target.new_process ();
440
441 return proc;
442 }
443
444 static CORE_ADDR get_pc (struct lwp_info *lwp);
445
446 /* Call the target arch_setup function on the current thread. */
447
448 static void
449 linux_arch_setup (void)
450 {
451 the_low_target.arch_setup ();
452 }
453
454 /* Call the target arch_setup function on THREAD. */
455
456 static void
457 linux_arch_setup_thread (struct thread_info *thread)
458 {
459 struct thread_info *saved_thread;
460
461 saved_thread = current_thread;
462 current_thread = thread;
463
464 linux_arch_setup ();
465
466 current_thread = saved_thread;
467 }
468
469 /* Handle a GNU/Linux extended wait response. If we see a clone,
470 fork, or vfork event, we need to add the new LWP to our list
471 (and return 0 so as not to report the trap to higher layers).
472 If we see an exec event, we will modify ORIG_EVENT_LWP to point
473 to a new LWP representing the new program. */
474
475 static int
476 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
477 {
478 client_state &cs = get_client_state ();
479 struct lwp_info *event_lwp = *orig_event_lwp;
480 int event = linux_ptrace_get_extended_event (wstat);
481 struct thread_info *event_thr = get_lwp_thread (event_lwp);
482 struct lwp_info *new_lwp;
483
484 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
485
486 /* All extended events we currently use are mid-syscall. Only
487 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
488 you have to be using PTRACE_SEIZE to get that. */
489 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
490
491 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
492 || (event == PTRACE_EVENT_CLONE))
493 {
494 ptid_t ptid;
495 unsigned long new_pid;
496 int ret, status;
497
498 /* Get the pid of the new lwp. */
499 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
500 &new_pid);
501
502 /* If we haven't already seen the new PID stop, wait for it now. */
503 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
504 {
505 /* The new child has a pending SIGSTOP. We can't affect it until it
506 hits the SIGSTOP, but we're already attached. */
507
508 ret = my_waitpid (new_pid, &status, __WALL);
509
510 if (ret == -1)
511 perror_with_name ("waiting for new child");
512 else if (ret != new_pid)
513 warning ("wait returned unexpected PID %d", ret);
514 else if (!WIFSTOPPED (status))
515 warning ("wait returned unexpected status 0x%x", status);
516 }
517
518 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
519 {
520 struct process_info *parent_proc;
521 struct process_info *child_proc;
522 struct lwp_info *child_lwp;
523 struct thread_info *child_thr;
524 struct target_desc *tdesc;
525
526 ptid = ptid_t (new_pid, new_pid, 0);
527
528 if (debug_threads)
529 {
530 debug_printf ("HEW: Got fork event from LWP %ld, "
531 "new child is %d\n",
532 ptid_of (event_thr).lwp (),
533 ptid.pid ());
534 }
535
536 /* Add the new process to the tables and clone the breakpoint
537 lists of the parent. We need to do this even if the new process
538 will be detached, since we will need the process object and the
539 breakpoints to remove any breakpoints from memory when we
540 detach, and the client side will access registers. */
541 child_proc = linux_add_process (new_pid, 0);
542 gdb_assert (child_proc != NULL);
543 child_lwp = add_lwp (ptid);
544 gdb_assert (child_lwp != NULL);
545 child_lwp->stopped = 1;
546 child_lwp->must_set_ptrace_flags = 1;
547 child_lwp->status_pending_p = 0;
548 child_thr = get_lwp_thread (child_lwp);
549 child_thr->last_resume_kind = resume_stop;
550 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
551
552 /* If we're suspending all threads, leave this one suspended
553 too. If the fork/clone parent is stepping over a breakpoint,
554 all other threads have been suspended already. Leave the
555 child suspended too. */
556 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
557 || event_lwp->bp_reinsert != 0)
558 {
559 if (debug_threads)
560 debug_printf ("HEW: leaving child suspended\n");
561 child_lwp->suspended = 1;
562 }
563
564 parent_proc = get_thread_process (event_thr);
565 child_proc->attached = parent_proc->attached;
566
567 if (event_lwp->bp_reinsert != 0
568 && can_software_single_step ()
569 && event == PTRACE_EVENT_VFORK)
570 {
571 /* If we leave single-step breakpoints there, child will
572 hit it, so uninsert single-step breakpoints from parent
573 (and child). Once vfork child is done, reinsert
574 them back to parent. */
575 uninsert_single_step_breakpoints (event_thr);
576 }
577
578 clone_all_breakpoints (child_thr, event_thr);
579
580 tdesc = allocate_target_description ();
581 copy_target_description (tdesc, parent_proc->tdesc);
582 child_proc->tdesc = tdesc;
583
584 /* Clone arch-specific process data. */
585 if (the_low_target.new_fork != NULL)
586 the_low_target.new_fork (parent_proc, child_proc);
587
588 /* Save fork info in the parent thread. */
589 if (event == PTRACE_EVENT_FORK)
590 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
591 else if (event == PTRACE_EVENT_VFORK)
592 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
593
594 event_lwp->waitstatus.value.related_pid = ptid;
595
596 /* The status_pending field contains bits denoting the
597 extended event, so when the pending event is handled,
598 the handler will look at lwp->waitstatus. */
599 event_lwp->status_pending_p = 1;
600 event_lwp->status_pending = wstat;
601
602 /* Link the threads until the parent event is passed on to
603 higher layers. */
604 event_lwp->fork_relative = child_lwp;
605 child_lwp->fork_relative = event_lwp;
606
607 /* If the parent thread is doing step-over with single-step
608 breakpoints, the list of single-step breakpoints are cloned
609 from the parent's. Remove them from the child process.
610 In case of vfork, we'll reinsert them back once vforked
611 child is done. */
612 if (event_lwp->bp_reinsert != 0
613 && can_software_single_step ())
614 {
615 /* The child process is forked and stopped, so it is safe
616 to access its memory without stopping all other threads
617 from other processes. */
618 delete_single_step_breakpoints (child_thr);
619
620 gdb_assert (has_single_step_breakpoints (event_thr));
621 gdb_assert (!has_single_step_breakpoints (child_thr));
622 }
623
624 /* Report the event. */
625 return 0;
626 }
627
628 if (debug_threads)
629 debug_printf ("HEW: Got clone event "
630 "from LWP %ld, new child is LWP %ld\n",
631 lwpid_of (event_thr), new_pid);
632
633 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
634 new_lwp = add_lwp (ptid);
635
636 /* Either we're going to immediately resume the new thread
637 or leave it stopped. linux_resume_one_lwp is a nop if it
638 thinks the thread is currently running, so set this first
639 before calling linux_resume_one_lwp. */
640 new_lwp->stopped = 1;
641
642 /* If we're suspending all threads, leave this one suspended
643 too. If the fork/clone parent is stepping over a breakpoint,
644 all other threads have been suspended already. Leave the
645 child suspended too. */
646 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
647 || event_lwp->bp_reinsert != 0)
648 new_lwp->suspended = 1;
649
650 /* Normally we will get the pending SIGSTOP. But in some cases
651 we might get another signal delivered to the group first.
652 If we do get another signal, be sure not to lose it. */
653 if (WSTOPSIG (status) != SIGSTOP)
654 {
655 new_lwp->stop_expected = 1;
656 new_lwp->status_pending_p = 1;
657 new_lwp->status_pending = status;
658 }
659 else if (cs.report_thread_events)
660 {
661 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
662 new_lwp->status_pending_p = 1;
663 new_lwp->status_pending = status;
664 }
665
666 #ifdef USE_THREAD_DB
667 thread_db_notice_clone (event_thr, ptid);
668 #endif
669
670 /* Don't report the event. */
671 return 1;
672 }
673 else if (event == PTRACE_EVENT_VFORK_DONE)
674 {
675 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
676
677 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
678 {
679 reinsert_single_step_breakpoints (event_thr);
680
681 gdb_assert (has_single_step_breakpoints (event_thr));
682 }
683
684 /* Report the event. */
685 return 0;
686 }
687 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
688 {
689 struct process_info *proc;
690 std::vector<int> syscalls_to_catch;
691 ptid_t event_ptid;
692 pid_t event_pid;
693
694 if (debug_threads)
695 {
696 debug_printf ("HEW: Got exec event from LWP %ld\n",
697 lwpid_of (event_thr));
698 }
699
700 /* Get the event ptid. */
701 event_ptid = ptid_of (event_thr);
702 event_pid = event_ptid.pid ();
703
704 /* Save the syscall list from the execing process. */
705 proc = get_thread_process (event_thr);
706 syscalls_to_catch = std::move (proc->syscalls_to_catch);
707
708 /* Delete the execing process and all its threads. */
709 linux_mourn (proc);
710 current_thread = NULL;
711
712 /* Create a new process/lwp/thread. */
713 proc = linux_add_process (event_pid, 0);
714 event_lwp = add_lwp (event_ptid);
715 event_thr = get_lwp_thread (event_lwp);
716 gdb_assert (current_thread == event_thr);
717 linux_arch_setup_thread (event_thr);
718
719 /* Set the event status. */
720 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
721 event_lwp->waitstatus.value.execd_pathname
722 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
723
724 /* Mark the exec status as pending. */
725 event_lwp->stopped = 1;
726 event_lwp->status_pending_p = 1;
727 event_lwp->status_pending = wstat;
728 event_thr->last_resume_kind = resume_continue;
729 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
730
731 /* Update syscall state in the new lwp, effectively mid-syscall too. */
732 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
733
734 /* Restore the list to catch. Don't rely on the client, which is free
735 to avoid sending a new list when the architecture doesn't change.
736 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
737 proc->syscalls_to_catch = std::move (syscalls_to_catch);
738
739 /* Report the event. */
740 *orig_event_lwp = event_lwp;
741 return 0;
742 }
743
744 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
745 }
746
747 /* Return the PC as read from the regcache of LWP, without any
748 adjustment. */
749
750 static CORE_ADDR
751 get_pc (struct lwp_info *lwp)
752 {
753 struct thread_info *saved_thread;
754 struct regcache *regcache;
755 CORE_ADDR pc;
756
757 if (the_low_target.get_pc == NULL)
758 return 0;
759
760 saved_thread = current_thread;
761 current_thread = get_lwp_thread (lwp);
762
763 regcache = get_thread_regcache (current_thread, 1);
764 pc = (*the_low_target.get_pc) (regcache);
765
766 if (debug_threads)
767 debug_printf ("pc is 0x%lx\n", (long) pc);
768
769 current_thread = saved_thread;
770 return pc;
771 }
772
773 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
774 Fill *SYSNO with the syscall nr trapped. */
775
776 static void
777 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
778 {
779 struct thread_info *saved_thread;
780 struct regcache *regcache;
781
782 if (the_low_target.get_syscall_trapinfo == NULL)
783 {
784 /* If we cannot get the syscall trapinfo, report an unknown
785 system call number. */
786 *sysno = UNKNOWN_SYSCALL;
787 return;
788 }
789
790 saved_thread = current_thread;
791 current_thread = get_lwp_thread (lwp);
792
793 regcache = get_thread_regcache (current_thread, 1);
794 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
795
796 if (debug_threads)
797 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
798
799 current_thread = saved_thread;
800 }
801
802 static int check_stopped_by_watchpoint (struct lwp_info *child);
803
804 /* Called when the LWP stopped for a signal/trap. If it stopped for a
805 trap check what caused it (breakpoint, watchpoint, trace, etc.),
806 and save the result in the LWP's stop_reason field. If it stopped
807 for a breakpoint, decrement the PC if necessary on the lwp's
808 architecture. Returns true if we now have the LWP's stop PC. */
809
810 static int
811 save_stop_reason (struct lwp_info *lwp)
812 {
813 CORE_ADDR pc;
814 CORE_ADDR sw_breakpoint_pc;
815 struct thread_info *saved_thread;
816 #if USE_SIGTRAP_SIGINFO
817 siginfo_t siginfo;
818 #endif
819
820 if (the_low_target.get_pc == NULL)
821 return 0;
822
823 pc = get_pc (lwp);
824 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
825
826 /* breakpoint_at reads from the current thread. */
827 saved_thread = current_thread;
828 current_thread = get_lwp_thread (lwp);
829
830 #if USE_SIGTRAP_SIGINFO
831 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
832 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
833 {
834 if (siginfo.si_signo == SIGTRAP)
835 {
836 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
837 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
838 {
839 /* The si_code is ambiguous on this arch -- check debug
840 registers. */
841 if (!check_stopped_by_watchpoint (lwp))
842 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
843 }
844 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
845 {
846 /* If we determine the LWP stopped for a SW breakpoint,
847 trust it. Particularly don't check watchpoint
848 registers, because at least on s390, we'd find
849 stopped-by-watchpoint as long as there's a watchpoint
850 set. */
851 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
852 }
853 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
854 {
855 /* This can indicate either a hardware breakpoint or
856 hardware watchpoint. Check debug registers. */
857 if (!check_stopped_by_watchpoint (lwp))
858 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
859 }
860 else if (siginfo.si_code == TRAP_TRACE)
861 {
862 /* We may have single stepped an instruction that
863 triggered a watchpoint. In that case, on some
864 architectures (such as x86), instead of TRAP_HWBKPT,
865 si_code indicates TRAP_TRACE, and we need to check
866 the debug registers separately. */
867 if (!check_stopped_by_watchpoint (lwp))
868 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
869 }
870 }
871 }
872 #else
873 /* We may have just stepped a breakpoint instruction. E.g., in
874 non-stop mode, GDB first tells the thread A to step a range, and
875 then the user inserts a breakpoint inside the range. In that
876 case we need to report the breakpoint PC. */
877 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
878 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
879 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
880
881 if (hardware_breakpoint_inserted_here (pc))
882 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
883
884 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
885 check_stopped_by_watchpoint (lwp);
886 #endif
887
888 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
889 {
890 if (debug_threads)
891 {
892 struct thread_info *thr = get_lwp_thread (lwp);
893
894 debug_printf ("CSBB: %s stopped by software breakpoint\n",
895 target_pid_to_str (ptid_of (thr)));
896 }
897
898 /* Back up the PC if necessary. */
899 if (pc != sw_breakpoint_pc)
900 {
901 struct regcache *regcache
902 = get_thread_regcache (current_thread, 1);
903 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
904 }
905
906 /* Update this so we record the correct stop PC below. */
907 pc = sw_breakpoint_pc;
908 }
909 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
910 {
911 if (debug_threads)
912 {
913 struct thread_info *thr = get_lwp_thread (lwp);
914
915 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
916 target_pid_to_str (ptid_of (thr)));
917 }
918 }
919 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
920 {
921 if (debug_threads)
922 {
923 struct thread_info *thr = get_lwp_thread (lwp);
924
925 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
926 target_pid_to_str (ptid_of (thr)));
927 }
928 }
929 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
930 {
931 if (debug_threads)
932 {
933 struct thread_info *thr = get_lwp_thread (lwp);
934
935 debug_printf ("CSBB: %s stopped by trace\n",
936 target_pid_to_str (ptid_of (thr)));
937 }
938 }
939
940 lwp->stop_pc = pc;
941 current_thread = saved_thread;
942 return 1;
943 }
944
945 static struct lwp_info *
946 add_lwp (ptid_t ptid)
947 {
948 struct lwp_info *lwp;
949
950 lwp = XCNEW (struct lwp_info);
951
952 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
953
954 lwp->thread = add_thread (ptid, lwp);
955
956 if (the_low_target.new_thread != NULL)
957 the_low_target.new_thread (lwp);
958
959 return lwp;
960 }
961
962 /* Callback to be used when calling fork_inferior, responsible for
963 actually initiating the tracing of the inferior. */
964
965 static void
966 linux_ptrace_fun ()
967 {
968 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
969 (PTRACE_TYPE_ARG4) 0) < 0)
970 trace_start_error_with_name ("ptrace");
971
972 if (setpgid (0, 0) < 0)
973 trace_start_error_with_name ("setpgid");
974
975 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
976 stdout to stderr so that inferior i/o doesn't corrupt the connection.
977 Also, redirect stdin to /dev/null. */
978 if (remote_connection_is_stdio ())
979 {
980 if (close (0) < 0)
981 trace_start_error_with_name ("close");
982 if (open ("/dev/null", O_RDONLY) < 0)
983 trace_start_error_with_name ("open");
984 if (dup2 (2, 1) < 0)
985 trace_start_error_with_name ("dup2");
986 if (write (2, "stdin/stdout redirected\n",
987 sizeof ("stdin/stdout redirected\n") - 1) < 0)
988 {
989 /* Errors ignored. */;
990 }
991 }
992 }
993
994 /* Start an inferior process and returns its pid.
995 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
996 are its arguments. */
997
998 static int
999 linux_create_inferior (const char *program,
1000 const std::vector<char *> &program_args)
1001 {
1002 client_state &cs = get_client_state ();
1003 struct lwp_info *new_lwp;
1004 int pid;
1005 ptid_t ptid;
1006
1007 {
1008 maybe_disable_address_space_randomization restore_personality
1009 (cs.disable_randomization);
1010 std::string str_program_args = stringify_argv (program_args);
1011
1012 pid = fork_inferior (program,
1013 str_program_args.c_str (),
1014 get_environ ()->envp (), linux_ptrace_fun,
1015 NULL, NULL, NULL, NULL);
1016 }
1017
1018 linux_add_process (pid, 0);
1019
1020 ptid = ptid_t (pid, pid, 0);
1021 new_lwp = add_lwp (ptid);
1022 new_lwp->must_set_ptrace_flags = 1;
1023
1024 post_fork_inferior (pid, program);
1025
1026 return pid;
1027 }
1028
1029 /* Implement the post_create_inferior target_ops method. */
1030
1031 static void
1032 linux_post_create_inferior (void)
1033 {
1034 struct lwp_info *lwp = get_thread_lwp (current_thread);
1035
1036 linux_arch_setup ();
1037
1038 if (lwp->must_set_ptrace_flags)
1039 {
1040 struct process_info *proc = current_process ();
1041 int options = linux_low_ptrace_options (proc->attached);
1042
1043 linux_enable_event_reporting (lwpid_of (current_thread), options);
1044 lwp->must_set_ptrace_flags = 0;
1045 }
1046 }
1047
1048 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1049 error. */
1050
1051 int
1052 linux_attach_lwp (ptid_t ptid)
1053 {
1054 struct lwp_info *new_lwp;
1055 int lwpid = ptid.lwp ();
1056
1057 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1058 != 0)
1059 return errno;
1060
1061 new_lwp = add_lwp (ptid);
1062
1063 /* We need to wait for SIGSTOP before being able to make the next
1064 ptrace call on this LWP. */
1065 new_lwp->must_set_ptrace_flags = 1;
1066
1067 if (linux_proc_pid_is_stopped (lwpid))
1068 {
1069 if (debug_threads)
1070 debug_printf ("Attached to a stopped process\n");
1071
1072 /* The process is definitely stopped. It is in a job control
1073 stop, unless the kernel predates the TASK_STOPPED /
1074 TASK_TRACED distinction, in which case it might be in a
1075 ptrace stop. Make sure it is in a ptrace stop; from there we
1076 can kill it, signal it, et cetera.
1077
1078 First make sure there is a pending SIGSTOP. Since we are
1079 already attached, the process can not transition from stopped
1080 to running without a PTRACE_CONT; so we know this signal will
1081 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1082 probably already in the queue (unless this kernel is old
1083 enough to use TASK_STOPPED for ptrace stops); but since
1084 SIGSTOP is not an RT signal, it can only be queued once. */
1085 kill_lwp (lwpid, SIGSTOP);
1086
1087 /* Finally, resume the stopped process. This will deliver the
1088 SIGSTOP (or a higher priority signal, just like normal
1089 PTRACE_ATTACH), which we'll catch later on. */
1090 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1091 }
1092
1093 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1094 brings it to a halt.
1095
1096 There are several cases to consider here:
1097
1098 1) gdbserver has already attached to the process and is being notified
1099 of a new thread that is being created.
1100 In this case we should ignore that SIGSTOP and resume the
1101 process. This is handled below by setting stop_expected = 1,
1102 and the fact that add_thread sets last_resume_kind ==
1103 resume_continue.
1104
1105 2) This is the first thread (the process thread), and we're attaching
1106 to it via attach_inferior.
1107 In this case we want the process thread to stop.
1108 This is handled by having linux_attach set last_resume_kind ==
1109 resume_stop after we return.
1110
1111 If the pid we are attaching to is also the tgid, we attach to and
1112 stop all the existing threads. Otherwise, we attach to pid and
1113 ignore any other threads in the same group as this pid.
1114
1115 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1116 existing threads.
1117 In this case we want the thread to stop.
1118 FIXME: This case is currently not properly handled.
1119 We should wait for the SIGSTOP but don't. Things work apparently
1120 because enough time passes between when we ptrace (ATTACH) and when
1121 gdb makes the next ptrace call on the thread.
1122
1123 On the other hand, if we are currently trying to stop all threads, we
1124 should treat the new thread as if we had sent it a SIGSTOP. This works
1125 because we are guaranteed that the add_lwp call above added us to the
1126 end of the list, and so the new thread has not yet reached
1127 wait_for_sigstop (but will). */
1128 new_lwp->stop_expected = 1;
1129
1130 return 0;
1131 }
1132
1133 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1134 already attached. Returns true if a new LWP is found, false
1135 otherwise. */
1136
1137 static int
1138 attach_proc_task_lwp_callback (ptid_t ptid)
1139 {
1140 /* Is this a new thread? */
1141 if (find_thread_ptid (ptid) == NULL)
1142 {
1143 int lwpid = ptid.lwp ();
1144 int err;
1145
1146 if (debug_threads)
1147 debug_printf ("Found new lwp %d\n", lwpid);
1148
1149 err = linux_attach_lwp (ptid);
1150
1151 /* Be quiet if we simply raced with the thread exiting. EPERM
1152 is returned if the thread's task still exists, and is marked
1153 as exited or zombie, as well as other conditions, so in that
1154 case, confirm the status in /proc/PID/status. */
1155 if (err == ESRCH
1156 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1157 {
1158 if (debug_threads)
1159 {
1160 debug_printf ("Cannot attach to lwp %d: "
1161 "thread is gone (%d: %s)\n",
1162 lwpid, err, strerror (err));
1163 }
1164 }
1165 else if (err != 0)
1166 {
1167 std::string reason
1168 = linux_ptrace_attach_fail_reason_string (ptid, err);
1169
1170 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1171 }
1172
1173 return 1;
1174 }
1175 return 0;
1176 }
1177
1178 static void async_file_mark (void);
1179
1180 /* Attach to PID. If PID is the tgid, attach to it and all
1181 of its threads. */
1182
1183 static int
1184 linux_attach (unsigned long pid)
1185 {
1186 struct process_info *proc;
1187 struct thread_info *initial_thread;
1188 ptid_t ptid = ptid_t (pid, pid, 0);
1189 int err;
1190
1191 /* Attach to PID. We will check for other threads
1192 soon. */
1193 err = linux_attach_lwp (ptid);
1194 if (err != 0)
1195 {
1196 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1197
1198 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1199 }
1200
1201 proc = linux_add_process (pid, 1);
1202
1203 /* Don't ignore the initial SIGSTOP if we just attached to this
1204 process. It will be collected by wait shortly. */
1205 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
1206 initial_thread->last_resume_kind = resume_stop;
1207
1208 /* We must attach to every LWP. If /proc is mounted, use that to
1209 find them now. On the one hand, the inferior may be using raw
1210 clone instead of using pthreads. On the other hand, even if it
1211 is using pthreads, GDB may not be connected yet (thread_db needs
1212 to do symbol lookups, through qSymbol). Also, thread_db walks
1213 structures in the inferior's address space to find the list of
1214 threads/LWPs, and those structures may well be corrupted. Note
1215 that once thread_db is loaded, we'll still use it to list threads
1216 and associate pthread info with each LWP. */
1217 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1218
1219 /* GDB will shortly read the xml target description for this
1220 process, to figure out the process' architecture. But the target
1221 description is only filled in when the first process/thread in
1222 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1223 that now, otherwise, if GDB is fast enough, it could read the
1224 target description _before_ that initial stop. */
1225 if (non_stop)
1226 {
1227 struct lwp_info *lwp;
1228 int wstat, lwpid;
1229 ptid_t pid_ptid = ptid_t (pid);
1230
1231 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1232 &wstat, __WALL);
1233 gdb_assert (lwpid > 0);
1234
1235 lwp = find_lwp_pid (ptid_t (lwpid));
1236
1237 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1238 {
1239 lwp->status_pending_p = 1;
1240 lwp->status_pending = wstat;
1241 }
1242
1243 initial_thread->last_resume_kind = resume_continue;
1244
1245 async_file_mark ();
1246
1247 gdb_assert (proc->tdesc != NULL);
1248 }
1249
1250 return 0;
1251 }
1252
1253 static int
1254 last_thread_of_process_p (int pid)
1255 {
1256 bool seen_one = false;
1257
1258 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1259 {
1260 if (!seen_one)
1261 {
1262 /* This is the first thread of this process we see. */
1263 seen_one = true;
1264 return false;
1265 }
1266 else
1267 {
1268 /* This is the second thread of this process we see. */
1269 return true;
1270 }
1271 });
1272
1273 return thread == NULL;
1274 }
1275
1276 /* Kill LWP. */
1277
1278 static void
1279 linux_kill_one_lwp (struct lwp_info *lwp)
1280 {
1281 struct thread_info *thr = get_lwp_thread (lwp);
1282 int pid = lwpid_of (thr);
1283
1284 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1285 there is no signal context, and ptrace(PTRACE_KILL) (or
1286 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1287 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1288 alternative is to kill with SIGKILL. We only need one SIGKILL
1289 per process, not one for each thread. But since we still support
1290 support debugging programs using raw clone without CLONE_THREAD,
1291 we send one for each thread. For years, we used PTRACE_KILL
1292 only, so we're being a bit paranoid about some old kernels where
1293 PTRACE_KILL might work better (dubious if there are any such, but
1294 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1295 second, and so we're fine everywhere. */
1296
1297 errno = 0;
1298 kill_lwp (pid, SIGKILL);
1299 if (debug_threads)
1300 {
1301 int save_errno = errno;
1302
1303 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1304 target_pid_to_str (ptid_of (thr)),
1305 save_errno ? strerror (save_errno) : "OK");
1306 }
1307
1308 errno = 0;
1309 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1310 if (debug_threads)
1311 {
1312 int save_errno = errno;
1313
1314 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1315 target_pid_to_str (ptid_of (thr)),
1316 save_errno ? strerror (save_errno) : "OK");
1317 }
1318 }
1319
1320 /* Kill LWP and wait for it to die. */
1321
1322 static void
1323 kill_wait_lwp (struct lwp_info *lwp)
1324 {
1325 struct thread_info *thr = get_lwp_thread (lwp);
1326 int pid = ptid_of (thr).pid ();
1327 int lwpid = ptid_of (thr).lwp ();
1328 int wstat;
1329 int res;
1330
1331 if (debug_threads)
1332 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1333
1334 do
1335 {
1336 linux_kill_one_lwp (lwp);
1337
1338 /* Make sure it died. Notes:
1339
1340 - The loop is most likely unnecessary.
1341
1342 - We don't use linux_wait_for_event as that could delete lwps
1343 while we're iterating over them. We're not interested in
1344 any pending status at this point, only in making sure all
1345 wait status on the kernel side are collected until the
1346 process is reaped.
1347
1348 - We don't use __WALL here as the __WALL emulation relies on
1349 SIGCHLD, and killing a stopped process doesn't generate
1350 one, nor an exit status.
1351 */
1352 res = my_waitpid (lwpid, &wstat, 0);
1353 if (res == -1 && errno == ECHILD)
1354 res = my_waitpid (lwpid, &wstat, __WCLONE);
1355 } while (res > 0 && WIFSTOPPED (wstat));
1356
1357 /* Even if it was stopped, the child may have already disappeared.
1358 E.g., if it was killed by SIGKILL. */
1359 if (res < 0 && errno != ECHILD)
1360 perror_with_name ("kill_wait_lwp");
1361 }
1362
1363 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1364 except the leader. */
1365
1366 static void
1367 kill_one_lwp_callback (thread_info *thread, int pid)
1368 {
1369 struct lwp_info *lwp = get_thread_lwp (thread);
1370
1371 /* We avoid killing the first thread here, because of a Linux kernel (at
1372 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1373 the children get a chance to be reaped, it will remain a zombie
1374 forever. */
1375
1376 if (lwpid_of (thread) == pid)
1377 {
1378 if (debug_threads)
1379 debug_printf ("lkop: is last of process %s\n",
1380 target_pid_to_str (thread->id));
1381 return;
1382 }
1383
1384 kill_wait_lwp (lwp);
1385 }
1386
1387 static int
1388 linux_kill (process_info *process)
1389 {
1390 int pid = process->pid;
1391
1392 /* If we're killing a running inferior, make sure it is stopped
1393 first, as PTRACE_KILL will not work otherwise. */
1394 stop_all_lwps (0, NULL);
1395
1396 for_each_thread (pid, [&] (thread_info *thread)
1397 {
1398 kill_one_lwp_callback (thread, pid);
1399 });
1400
1401 /* See the comment in linux_kill_one_lwp. We did not kill the first
1402 thread in the list, so do so now. */
1403 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1404
1405 if (lwp == NULL)
1406 {
1407 if (debug_threads)
1408 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1409 pid);
1410 }
1411 else
1412 kill_wait_lwp (lwp);
1413
1414 the_target->mourn (process);
1415
1416 /* Since we presently can only stop all lwps of all processes, we
1417 need to unstop lwps of other processes. */
1418 unstop_all_lwps (0, NULL);
1419 return 0;
1420 }
1421
1422 /* Get pending signal of THREAD, for detaching purposes. This is the
1423 signal the thread last stopped for, which we need to deliver to the
1424 thread when detaching, otherwise, it'd be suppressed/lost. */
1425
1426 static int
1427 get_detach_signal (struct thread_info *thread)
1428 {
1429 client_state &cs = get_client_state ();
1430 enum gdb_signal signo = GDB_SIGNAL_0;
1431 int status;
1432 struct lwp_info *lp = get_thread_lwp (thread);
1433
1434 if (lp->status_pending_p)
1435 status = lp->status_pending;
1436 else
1437 {
1438 /* If the thread had been suspended by gdbserver, and it stopped
1439 cleanly, then it'll have stopped with SIGSTOP. But we don't
1440 want to deliver that SIGSTOP. */
1441 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1442 || thread->last_status.value.sig == GDB_SIGNAL_0)
1443 return 0;
1444
1445 /* Otherwise, we may need to deliver the signal we
1446 intercepted. */
1447 status = lp->last_status;
1448 }
1449
1450 if (!WIFSTOPPED (status))
1451 {
1452 if (debug_threads)
1453 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1454 target_pid_to_str (ptid_of (thread)));
1455 return 0;
1456 }
1457
1458 /* Extended wait statuses aren't real SIGTRAPs. */
1459 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1460 {
1461 if (debug_threads)
1462 debug_printf ("GPS: lwp %s had stopped with extended "
1463 "status: no pending signal\n",
1464 target_pid_to_str (ptid_of (thread)));
1465 return 0;
1466 }
1467
1468 signo = gdb_signal_from_host (WSTOPSIG (status));
1469
1470 if (cs.program_signals_p && !cs.program_signals[signo])
1471 {
1472 if (debug_threads)
1473 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1474 target_pid_to_str (ptid_of (thread)),
1475 gdb_signal_to_string (signo));
1476 return 0;
1477 }
1478 else if (!cs.program_signals_p
1479 /* If we have no way to know which signals GDB does not
1480 want to have passed to the program, assume
1481 SIGTRAP/SIGINT, which is GDB's default. */
1482 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1483 {
1484 if (debug_threads)
1485 debug_printf ("GPS: lwp %s had signal %s, "
1486 "but we don't know if we should pass it. "
1487 "Default to not.\n",
1488 target_pid_to_str (ptid_of (thread)),
1489 gdb_signal_to_string (signo));
1490 return 0;
1491 }
1492 else
1493 {
1494 if (debug_threads)
1495 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1496 target_pid_to_str (ptid_of (thread)),
1497 gdb_signal_to_string (signo));
1498
1499 return WSTOPSIG (status);
1500 }
1501 }
1502
1503 /* Detach from LWP. */
1504
1505 static void
1506 linux_detach_one_lwp (struct lwp_info *lwp)
1507 {
1508 struct thread_info *thread = get_lwp_thread (lwp);
1509 int sig;
1510 int lwpid;
1511
1512 /* If there is a pending SIGSTOP, get rid of it. */
1513 if (lwp->stop_expected)
1514 {
1515 if (debug_threads)
1516 debug_printf ("Sending SIGCONT to %s\n",
1517 target_pid_to_str (ptid_of (thread)));
1518
1519 kill_lwp (lwpid_of (thread), SIGCONT);
1520 lwp->stop_expected = 0;
1521 }
1522
1523 /* Pass on any pending signal for this thread. */
1524 sig = get_detach_signal (thread);
1525
1526 /* Preparing to resume may try to write registers, and fail if the
1527 lwp is zombie. If that happens, ignore the error. We'll handle
1528 it below, when detach fails with ESRCH. */
1529 TRY
1530 {
1531 /* Flush any pending changes to the process's registers. */
1532 regcache_invalidate_thread (thread);
1533
1534 /* Finally, let it resume. */
1535 if (the_low_target.prepare_to_resume != NULL)
1536 the_low_target.prepare_to_resume (lwp);
1537 }
1538 CATCH (ex, RETURN_MASK_ERROR)
1539 {
1540 if (!check_ptrace_stopped_lwp_gone (lwp))
1541 throw_exception (ex);
1542 }
1543 END_CATCH
1544
1545 lwpid = lwpid_of (thread);
1546 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1547 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1548 {
1549 int save_errno = errno;
1550
1551 /* We know the thread exists, so ESRCH must mean the lwp is
1552 zombie. This can happen if one of the already-detached
1553 threads exits the whole thread group. In that case we're
1554 still attached, and must reap the lwp. */
1555 if (save_errno == ESRCH)
1556 {
1557 int ret, status;
1558
1559 ret = my_waitpid (lwpid, &status, __WALL);
1560 if (ret == -1)
1561 {
1562 warning (_("Couldn't reap LWP %d while detaching: %s"),
1563 lwpid, strerror (errno));
1564 }
1565 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1566 {
1567 warning (_("Reaping LWP %d while detaching "
1568 "returned unexpected status 0x%x"),
1569 lwpid, status);
1570 }
1571 }
1572 else
1573 {
1574 error (_("Can't detach %s: %s"),
1575 target_pid_to_str (ptid_of (thread)),
1576 strerror (save_errno));
1577 }
1578 }
1579 else if (debug_threads)
1580 {
1581 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1582 target_pid_to_str (ptid_of (thread)),
1583 strsignal (sig));
1584 }
1585
1586 delete_lwp (lwp);
1587 }
1588
1589 /* Callback for for_each_thread. Detaches from non-leader threads of a
1590 given process. */
1591
1592 static void
1593 linux_detach_lwp_callback (thread_info *thread)
1594 {
1595 /* We don't actually detach from the thread group leader just yet.
1596 If the thread group exits, we must reap the zombie clone lwps
1597 before we're able to reap the leader. */
1598 if (thread->id.pid () == thread->id.lwp ())
1599 return;
1600
1601 lwp_info *lwp = get_thread_lwp (thread);
1602 linux_detach_one_lwp (lwp);
1603 }
1604
1605 static int
1606 linux_detach (process_info *process)
1607 {
1608 struct lwp_info *main_lwp;
1609
1610 /* As there's a step over already in progress, let it finish first,
1611 otherwise nesting a stabilize_threads operation on top gets real
1612 messy. */
1613 complete_ongoing_step_over ();
1614
1615 /* Stop all threads before detaching. First, ptrace requires that
1616 the thread is stopped to sucessfully detach. Second, thread_db
1617 may need to uninstall thread event breakpoints from memory, which
1618 only works with a stopped process anyway. */
1619 stop_all_lwps (0, NULL);
1620
1621 #ifdef USE_THREAD_DB
1622 thread_db_detach (process);
1623 #endif
1624
1625 /* Stabilize threads (move out of jump pads). */
1626 stabilize_threads ();
1627
1628 /* Detach from the clone lwps first. If the thread group exits just
1629 while we're detaching, we must reap the clone lwps before we're
1630 able to reap the leader. */
1631 for_each_thread (process->pid, linux_detach_lwp_callback);
1632
1633 main_lwp = find_lwp_pid (ptid_t (process->pid));
1634 linux_detach_one_lwp (main_lwp);
1635
1636 the_target->mourn (process);
1637
1638 /* Since we presently can only stop all lwps of all processes, we
1639 need to unstop lwps of other processes. */
1640 unstop_all_lwps (0, NULL);
1641 return 0;
1642 }
1643
1644 /* Remove all LWPs that belong to process PROC from the lwp list. */
1645
1646 static void
1647 linux_mourn (struct process_info *process)
1648 {
1649 struct process_info_private *priv;
1650
1651 #ifdef USE_THREAD_DB
1652 thread_db_mourn (process);
1653 #endif
1654
1655 for_each_thread (process->pid, [] (thread_info *thread)
1656 {
1657 delete_lwp (get_thread_lwp (thread));
1658 });
1659
1660 /* Freeing all private data. */
1661 priv = process->priv;
1662 if (the_low_target.delete_process != NULL)
1663 the_low_target.delete_process (priv->arch_private);
1664 else
1665 gdb_assert (priv->arch_private == NULL);
1666 free (priv);
1667 process->priv = NULL;
1668
1669 remove_process (process);
1670 }
1671
1672 static void
1673 linux_join (int pid)
1674 {
1675 int status, ret;
1676
1677 do {
1678 ret = my_waitpid (pid, &status, 0);
1679 if (WIFEXITED (status) || WIFSIGNALED (status))
1680 break;
1681 } while (ret != -1 || errno != ECHILD);
1682 }
1683
1684 /* Return nonzero if the given thread is still alive. */
1685 static int
1686 linux_thread_alive (ptid_t ptid)
1687 {
1688 struct lwp_info *lwp = find_lwp_pid (ptid);
1689
1690 /* We assume we always know if a thread exits. If a whole process
1691 exited but we still haven't been able to report it to GDB, we'll
1692 hold on to the last lwp of the dead process. */
1693 if (lwp != NULL)
1694 return !lwp_is_marked_dead (lwp);
1695 else
1696 return 0;
1697 }
1698
1699 /* Return 1 if this lwp still has an interesting status pending. If
1700 not (e.g., it had stopped for a breakpoint that is gone), return
1701 false. */
1702
1703 static int
1704 thread_still_has_status_pending_p (struct thread_info *thread)
1705 {
1706 struct lwp_info *lp = get_thread_lwp (thread);
1707
1708 if (!lp->status_pending_p)
1709 return 0;
1710
1711 if (thread->last_resume_kind != resume_stop
1712 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1713 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1714 {
1715 struct thread_info *saved_thread;
1716 CORE_ADDR pc;
1717 int discard = 0;
1718
1719 gdb_assert (lp->last_status != 0);
1720
1721 pc = get_pc (lp);
1722
1723 saved_thread = current_thread;
1724 current_thread = thread;
1725
1726 if (pc != lp->stop_pc)
1727 {
1728 if (debug_threads)
1729 debug_printf ("PC of %ld changed\n",
1730 lwpid_of (thread));
1731 discard = 1;
1732 }
1733
1734 #if !USE_SIGTRAP_SIGINFO
1735 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1736 && !(*the_low_target.breakpoint_at) (pc))
1737 {
1738 if (debug_threads)
1739 debug_printf ("previous SW breakpoint of %ld gone\n",
1740 lwpid_of (thread));
1741 discard = 1;
1742 }
1743 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1744 && !hardware_breakpoint_inserted_here (pc))
1745 {
1746 if (debug_threads)
1747 debug_printf ("previous HW breakpoint of %ld gone\n",
1748 lwpid_of (thread));
1749 discard = 1;
1750 }
1751 #endif
1752
1753 current_thread = saved_thread;
1754
1755 if (discard)
1756 {
1757 if (debug_threads)
1758 debug_printf ("discarding pending breakpoint status\n");
1759 lp->status_pending_p = 0;
1760 return 0;
1761 }
1762 }
1763
1764 return 1;
1765 }
1766
1767 /* Returns true if LWP is resumed from the client's perspective. */
1768
1769 static int
1770 lwp_resumed (struct lwp_info *lwp)
1771 {
1772 struct thread_info *thread = get_lwp_thread (lwp);
1773
1774 if (thread->last_resume_kind != resume_stop)
1775 return 1;
1776
1777 /* Did gdb send us a `vCont;t', but we haven't reported the
1778 corresponding stop to gdb yet? If so, the thread is still
1779 resumed/running from gdb's perspective. */
1780 if (thread->last_resume_kind == resume_stop
1781 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1782 return 1;
1783
1784 return 0;
1785 }
1786
1787 /* Return true if this lwp has an interesting status pending. */
1788 static bool
1789 status_pending_p_callback (thread_info *thread, ptid_t ptid)
1790 {
1791 struct lwp_info *lp = get_thread_lwp (thread);
1792
1793 /* Check if we're only interested in events from a specific process
1794 or a specific LWP. */
1795 if (!thread->id.matches (ptid))
1796 return 0;
1797
1798 if (!lwp_resumed (lp))
1799 return 0;
1800
1801 if (lp->status_pending_p
1802 && !thread_still_has_status_pending_p (thread))
1803 {
1804 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1805 return 0;
1806 }
1807
1808 return lp->status_pending_p;
1809 }
1810
1811 struct lwp_info *
1812 find_lwp_pid (ptid_t ptid)
1813 {
1814 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1815 {
1816 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1817 return thr_arg->id.lwp () == lwp;
1818 });
1819
1820 if (thread == NULL)
1821 return NULL;
1822
1823 return get_thread_lwp (thread);
1824 }
1825
1826 /* Return the number of known LWPs in the tgid given by PID. */
1827
1828 static int
1829 num_lwps (int pid)
1830 {
1831 int count = 0;
1832
1833 for_each_thread (pid, [&] (thread_info *thread)
1834 {
1835 count++;
1836 });
1837
1838 return count;
1839 }
1840
1841 /* See nat/linux-nat.h. */
1842
1843 struct lwp_info *
1844 iterate_over_lwps (ptid_t filter,
1845 iterate_over_lwps_ftype callback,
1846 void *data)
1847 {
1848 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1849 {
1850 lwp_info *lwp = get_thread_lwp (thr_arg);
1851
1852 return callback (lwp, data);
1853 });
1854
1855 if (thread == NULL)
1856 return NULL;
1857
1858 return get_thread_lwp (thread);
1859 }
1860
1861 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1862 their exits until all other threads in the group have exited. */
1863
1864 static void
1865 check_zombie_leaders (void)
1866 {
1867 for_each_process ([] (process_info *proc) {
1868 pid_t leader_pid = pid_of (proc);
1869 struct lwp_info *leader_lp;
1870
1871 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1872
1873 if (debug_threads)
1874 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1875 "num_lwps=%d, zombie=%d\n",
1876 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1877 linux_proc_pid_is_zombie (leader_pid));
1878
1879 if (leader_lp != NULL && !leader_lp->stopped
1880 /* Check if there are other threads in the group, as we may
1881 have raced with the inferior simply exiting. */
1882 && !last_thread_of_process_p (leader_pid)
1883 && linux_proc_pid_is_zombie (leader_pid))
1884 {
1885 /* A leader zombie can mean one of two things:
1886
1887 - It exited, and there's an exit status pending
1888 available, or only the leader exited (not the whole
1889 program). In the latter case, we can't waitpid the
1890 leader's exit status until all other threads are gone.
1891
1892 - There are 3 or more threads in the group, and a thread
1893 other than the leader exec'd. On an exec, the Linux
1894 kernel destroys all other threads (except the execing
1895 one) in the thread group, and resets the execing thread's
1896 tid to the tgid. No exit notification is sent for the
1897 execing thread -- from the ptracer's perspective, it
1898 appears as though the execing thread just vanishes.
1899 Until we reap all other threads except the leader and the
1900 execing thread, the leader will be zombie, and the
1901 execing thread will be in `D (disc sleep)'. As soon as
1902 all other threads are reaped, the execing thread changes
1903 it's tid to the tgid, and the previous (zombie) leader
1904 vanishes, giving place to the "new" leader. We could try
1905 distinguishing the exit and exec cases, by waiting once
1906 more, and seeing if something comes out, but it doesn't
1907 sound useful. The previous leader _does_ go away, and
1908 we'll re-add the new one once we see the exec event
1909 (which is just the same as what would happen if the
1910 previous leader did exit voluntarily before some other
1911 thread execs). */
1912
1913 if (debug_threads)
1914 debug_printf ("CZL: Thread group leader %d zombie "
1915 "(it exited, or another thread execd).\n",
1916 leader_pid);
1917
1918 delete_lwp (leader_lp);
1919 }
1920 });
1921 }
1922
1923 /* Callback for `find_thread'. Returns the first LWP that is not
1924 stopped. */
1925
1926 static bool
1927 not_stopped_callback (thread_info *thread, ptid_t filter)
1928 {
1929 if (!thread->id.matches (filter))
1930 return false;
1931
1932 lwp_info *lwp = get_thread_lwp (thread);
1933
1934 return !lwp->stopped;
1935 }
1936
1937 /* Increment LWP's suspend count. */
1938
1939 static void
1940 lwp_suspended_inc (struct lwp_info *lwp)
1941 {
1942 lwp->suspended++;
1943
1944 if (debug_threads && lwp->suspended > 4)
1945 {
1946 struct thread_info *thread = get_lwp_thread (lwp);
1947
1948 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1949 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1950 }
1951 }
1952
1953 /* Decrement LWP's suspend count. */
1954
1955 static void
1956 lwp_suspended_decr (struct lwp_info *lwp)
1957 {
1958 lwp->suspended--;
1959
1960 if (lwp->suspended < 0)
1961 {
1962 struct thread_info *thread = get_lwp_thread (lwp);
1963
1964 internal_error (__FILE__, __LINE__,
1965 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1966 lwp->suspended);
1967 }
1968 }
1969
1970 /* This function should only be called if the LWP got a SIGTRAP.
1971
1972 Handle any tracepoint steps or hits. Return true if a tracepoint
1973 event was handled, 0 otherwise. */
1974
1975 static int
1976 handle_tracepoints (struct lwp_info *lwp)
1977 {
1978 struct thread_info *tinfo = get_lwp_thread (lwp);
1979 int tpoint_related_event = 0;
1980
1981 gdb_assert (lwp->suspended == 0);
1982
1983 /* If this tracepoint hit causes a tracing stop, we'll immediately
1984 uninsert tracepoints. To do this, we temporarily pause all
1985 threads, unpatch away, and then unpause threads. We need to make
1986 sure the unpausing doesn't resume LWP too. */
1987 lwp_suspended_inc (lwp);
1988
1989 /* And we need to be sure that any all-threads-stopping doesn't try
1990 to move threads out of the jump pads, as it could deadlock the
1991 inferior (LWP could be in the jump pad, maybe even holding the
1992 lock.) */
1993
1994 /* Do any necessary step collect actions. */
1995 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1996
1997 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1998
1999 /* See if we just hit a tracepoint and do its main collect
2000 actions. */
2001 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2002
2003 lwp_suspended_decr (lwp);
2004
2005 gdb_assert (lwp->suspended == 0);
2006 gdb_assert (!stabilizing_threads
2007 || (lwp->collecting_fast_tracepoint
2008 != fast_tpoint_collect_result::not_collecting));
2009
2010 if (tpoint_related_event)
2011 {
2012 if (debug_threads)
2013 debug_printf ("got a tracepoint event\n");
2014 return 1;
2015 }
2016
2017 return 0;
2018 }
2019
2020 /* Convenience wrapper. Returns information about LWP's fast tracepoint
2021 collection status. */
2022
2023 static fast_tpoint_collect_result
2024 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2025 struct fast_tpoint_collect_status *status)
2026 {
2027 CORE_ADDR thread_area;
2028 struct thread_info *thread = get_lwp_thread (lwp);
2029
2030 if (the_low_target.get_thread_area == NULL)
2031 return fast_tpoint_collect_result::not_collecting;
2032
2033 /* Get the thread area address. This is used to recognize which
2034 thread is which when tracing with the in-process agent library.
2035 We don't read anything from the address, and treat it as opaque;
2036 it's the address itself that we assume is unique per-thread. */
2037 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2038 return fast_tpoint_collect_result::not_collecting;
2039
2040 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2041 }
2042
2043 /* The reason we resume in the caller, is because we want to be able
2044 to pass lwp->status_pending as WSTAT, and we need to clear
2045 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2046 refuses to resume. */
2047
2048 static int
2049 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2050 {
2051 struct thread_info *saved_thread;
2052
2053 saved_thread = current_thread;
2054 current_thread = get_lwp_thread (lwp);
2055
2056 if ((wstat == NULL
2057 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2058 && supports_fast_tracepoints ()
2059 && agent_loaded_p ())
2060 {
2061 struct fast_tpoint_collect_status status;
2062
2063 if (debug_threads)
2064 debug_printf ("Checking whether LWP %ld needs to move out of the "
2065 "jump pad.\n",
2066 lwpid_of (current_thread));
2067
2068 fast_tpoint_collect_result r
2069 = linux_fast_tracepoint_collecting (lwp, &status);
2070
2071 if (wstat == NULL
2072 || (WSTOPSIG (*wstat) != SIGILL
2073 && WSTOPSIG (*wstat) != SIGFPE
2074 && WSTOPSIG (*wstat) != SIGSEGV
2075 && WSTOPSIG (*wstat) != SIGBUS))
2076 {
2077 lwp->collecting_fast_tracepoint = r;
2078
2079 if (r != fast_tpoint_collect_result::not_collecting)
2080 {
2081 if (r == fast_tpoint_collect_result::before_insn
2082 && lwp->exit_jump_pad_bkpt == NULL)
2083 {
2084 /* Haven't executed the original instruction yet.
2085 Set breakpoint there, and wait till it's hit,
2086 then single-step until exiting the jump pad. */
2087 lwp->exit_jump_pad_bkpt
2088 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2089 }
2090
2091 if (debug_threads)
2092 debug_printf ("Checking whether LWP %ld needs to move out of "
2093 "the jump pad...it does\n",
2094 lwpid_of (current_thread));
2095 current_thread = saved_thread;
2096
2097 return 1;
2098 }
2099 }
2100 else
2101 {
2102 /* If we get a synchronous signal while collecting, *and*
2103 while executing the (relocated) original instruction,
2104 reset the PC to point at the tpoint address, before
2105 reporting to GDB. Otherwise, it's an IPA lib bug: just
2106 report the signal to GDB, and pray for the best. */
2107
2108 lwp->collecting_fast_tracepoint
2109 = fast_tpoint_collect_result::not_collecting;
2110
2111 if (r != fast_tpoint_collect_result::not_collecting
2112 && (status.adjusted_insn_addr <= lwp->stop_pc
2113 && lwp->stop_pc < status.adjusted_insn_addr_end))
2114 {
2115 siginfo_t info;
2116 struct regcache *regcache;
2117
2118 /* The si_addr on a few signals references the address
2119 of the faulting instruction. Adjust that as
2120 well. */
2121 if ((WSTOPSIG (*wstat) == SIGILL
2122 || WSTOPSIG (*wstat) == SIGFPE
2123 || WSTOPSIG (*wstat) == SIGBUS
2124 || WSTOPSIG (*wstat) == SIGSEGV)
2125 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2126 (PTRACE_TYPE_ARG3) 0, &info) == 0
2127 /* Final check just to make sure we don't clobber
2128 the siginfo of non-kernel-sent signals. */
2129 && (uintptr_t) info.si_addr == lwp->stop_pc)
2130 {
2131 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2132 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2133 (PTRACE_TYPE_ARG3) 0, &info);
2134 }
2135
2136 regcache = get_thread_regcache (current_thread, 1);
2137 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2138 lwp->stop_pc = status.tpoint_addr;
2139
2140 /* Cancel any fast tracepoint lock this thread was
2141 holding. */
2142 force_unlock_trace_buffer ();
2143 }
2144
2145 if (lwp->exit_jump_pad_bkpt != NULL)
2146 {
2147 if (debug_threads)
2148 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2149 "stopping all threads momentarily.\n");
2150
2151 stop_all_lwps (1, lwp);
2152
2153 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2154 lwp->exit_jump_pad_bkpt = NULL;
2155
2156 unstop_all_lwps (1, lwp);
2157
2158 gdb_assert (lwp->suspended >= 0);
2159 }
2160 }
2161 }
2162
2163 if (debug_threads)
2164 debug_printf ("Checking whether LWP %ld needs to move out of the "
2165 "jump pad...no\n",
2166 lwpid_of (current_thread));
2167
2168 current_thread = saved_thread;
2169 return 0;
2170 }
2171
2172 /* Enqueue one signal in the "signals to report later when out of the
2173 jump pad" list. */
2174
2175 static void
2176 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2177 {
2178 struct pending_signals *p_sig;
2179 struct thread_info *thread = get_lwp_thread (lwp);
2180
2181 if (debug_threads)
2182 debug_printf ("Deferring signal %d for LWP %ld.\n",
2183 WSTOPSIG (*wstat), lwpid_of (thread));
2184
2185 if (debug_threads)
2186 {
2187 struct pending_signals *sig;
2188
2189 for (sig = lwp->pending_signals_to_report;
2190 sig != NULL;
2191 sig = sig->prev)
2192 debug_printf (" Already queued %d\n",
2193 sig->signal);
2194
2195 debug_printf (" (no more currently queued signals)\n");
2196 }
2197
2198 /* Don't enqueue non-RT signals if they are already in the deferred
2199 queue. (SIGSTOP being the easiest signal to see ending up here
2200 twice) */
2201 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2202 {
2203 struct pending_signals *sig;
2204
2205 for (sig = lwp->pending_signals_to_report;
2206 sig != NULL;
2207 sig = sig->prev)
2208 {
2209 if (sig->signal == WSTOPSIG (*wstat))
2210 {
2211 if (debug_threads)
2212 debug_printf ("Not requeuing already queued non-RT signal %d"
2213 " for LWP %ld\n",
2214 sig->signal,
2215 lwpid_of (thread));
2216 return;
2217 }
2218 }
2219 }
2220
2221 p_sig = XCNEW (struct pending_signals);
2222 p_sig->prev = lwp->pending_signals_to_report;
2223 p_sig->signal = WSTOPSIG (*wstat);
2224
2225 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2226 &p_sig->info);
2227
2228 lwp->pending_signals_to_report = p_sig;
2229 }
2230
2231 /* Dequeue one signal from the "signals to report later when out of
2232 the jump pad" list. */
2233
2234 static int
2235 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2236 {
2237 struct thread_info *thread = get_lwp_thread (lwp);
2238
2239 if (lwp->pending_signals_to_report != NULL)
2240 {
2241 struct pending_signals **p_sig;
2242
2243 p_sig = &lwp->pending_signals_to_report;
2244 while ((*p_sig)->prev != NULL)
2245 p_sig = &(*p_sig)->prev;
2246
2247 *wstat = W_STOPCODE ((*p_sig)->signal);
2248 if ((*p_sig)->info.si_signo != 0)
2249 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2250 &(*p_sig)->info);
2251 free (*p_sig);
2252 *p_sig = NULL;
2253
2254 if (debug_threads)
2255 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2256 WSTOPSIG (*wstat), lwpid_of (thread));
2257
2258 if (debug_threads)
2259 {
2260 struct pending_signals *sig;
2261
2262 for (sig = lwp->pending_signals_to_report;
2263 sig != NULL;
2264 sig = sig->prev)
2265 debug_printf (" Still queued %d\n",
2266 sig->signal);
2267
2268 debug_printf (" (no more queued signals)\n");
2269 }
2270
2271 return 1;
2272 }
2273
2274 return 0;
2275 }
2276
2277 /* Fetch the possibly triggered data watchpoint info and store it in
2278 CHILD.
2279
2280 On some archs, like x86, that use debug registers to set
2281 watchpoints, it's possible that the way to know which watched
2282 address trapped, is to check the register that is used to select
2283 which address to watch. Problem is, between setting the watchpoint
2284 and reading back which data address trapped, the user may change
2285 the set of watchpoints, and, as a consequence, GDB changes the
2286 debug registers in the inferior. To avoid reading back a stale
2287 stopped-data-address when that happens, we cache in LP the fact
2288 that a watchpoint trapped, and the corresponding data address, as
2289 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2290 registers meanwhile, we have the cached data we can rely on. */
2291
2292 static int
2293 check_stopped_by_watchpoint (struct lwp_info *child)
2294 {
2295 if (the_low_target.stopped_by_watchpoint != NULL)
2296 {
2297 struct thread_info *saved_thread;
2298
2299 saved_thread = current_thread;
2300 current_thread = get_lwp_thread (child);
2301
2302 if (the_low_target.stopped_by_watchpoint ())
2303 {
2304 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2305
2306 if (the_low_target.stopped_data_address != NULL)
2307 child->stopped_data_address
2308 = the_low_target.stopped_data_address ();
2309 else
2310 child->stopped_data_address = 0;
2311 }
2312
2313 current_thread = saved_thread;
2314 }
2315
2316 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2317 }
2318
2319 /* Return the ptrace options that we want to try to enable. */
2320
2321 static int
2322 linux_low_ptrace_options (int attached)
2323 {
2324 client_state &cs = get_client_state ();
2325 int options = 0;
2326
2327 if (!attached)
2328 options |= PTRACE_O_EXITKILL;
2329
2330 if (cs.report_fork_events)
2331 options |= PTRACE_O_TRACEFORK;
2332
2333 if (cs.report_vfork_events)
2334 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2335
2336 if (cs.report_exec_events)
2337 options |= PTRACE_O_TRACEEXEC;
2338
2339 options |= PTRACE_O_TRACESYSGOOD;
2340
2341 return options;
2342 }
2343
2344 /* Do low-level handling of the event, and check if we should go on
2345 and pass it to caller code. Return the affected lwp if we are, or
2346 NULL otherwise. */
2347
2348 static struct lwp_info *
2349 linux_low_filter_event (int lwpid, int wstat)
2350 {
2351 client_state &cs = get_client_state ();
2352 struct lwp_info *child;
2353 struct thread_info *thread;
2354 int have_stop_pc = 0;
2355
2356 child = find_lwp_pid (ptid_t (lwpid));
2357
2358 /* Check for stop events reported by a process we didn't already
2359 know about - anything not already in our LWP list.
2360
2361 If we're expecting to receive stopped processes after
2362 fork, vfork, and clone events, then we'll just add the
2363 new one to our list and go back to waiting for the event
2364 to be reported - the stopped process might be returned
2365 from waitpid before or after the event is.
2366
2367 But note the case of a non-leader thread exec'ing after the
2368 leader having exited, and gone from our lists (because
2369 check_zombie_leaders deleted it). The non-leader thread
2370 changes its tid to the tgid. */
2371
2372 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2373 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2374 {
2375 ptid_t child_ptid;
2376
2377 /* A multi-thread exec after we had seen the leader exiting. */
2378 if (debug_threads)
2379 {
2380 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2381 "after exec.\n", lwpid);
2382 }
2383
2384 child_ptid = ptid_t (lwpid, lwpid, 0);
2385 child = add_lwp (child_ptid);
2386 child->stopped = 1;
2387 current_thread = child->thread;
2388 }
2389
2390 /* If we didn't find a process, one of two things presumably happened:
2391 - A process we started and then detached from has exited. Ignore it.
2392 - A process we are controlling has forked and the new child's stop
2393 was reported to us by the kernel. Save its PID. */
2394 if (child == NULL && WIFSTOPPED (wstat))
2395 {
2396 add_to_pid_list (&stopped_pids, lwpid, wstat);
2397 return NULL;
2398 }
2399 else if (child == NULL)
2400 return NULL;
2401
2402 thread = get_lwp_thread (child);
2403
2404 child->stopped = 1;
2405
2406 child->last_status = wstat;
2407
2408 /* Check if the thread has exited. */
2409 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2410 {
2411 if (debug_threads)
2412 debug_printf ("LLFE: %d exited.\n", lwpid);
2413
2414 if (finish_step_over (child))
2415 {
2416 /* Unsuspend all other LWPs, and set them back running again. */
2417 unsuspend_all_lwps (child);
2418 }
2419
2420 /* If there is at least one more LWP, then the exit signal was
2421 not the end of the debugged application and should be
2422 ignored, unless GDB wants to hear about thread exits. */
2423 if (cs.report_thread_events
2424 || last_thread_of_process_p (pid_of (thread)))
2425 {
2426 /* Since events are serialized to GDB core, and we can't
2427 report this one right now. Leave the status pending for
2428 the next time we're able to report it. */
2429 mark_lwp_dead (child, wstat);
2430 return child;
2431 }
2432 else
2433 {
2434 delete_lwp (child);
2435 return NULL;
2436 }
2437 }
2438
2439 gdb_assert (WIFSTOPPED (wstat));
2440
2441 if (WIFSTOPPED (wstat))
2442 {
2443 struct process_info *proc;
2444
2445 /* Architecture-specific setup after inferior is running. */
2446 proc = find_process_pid (pid_of (thread));
2447 if (proc->tdesc == NULL)
2448 {
2449 if (proc->attached)
2450 {
2451 /* This needs to happen after we have attached to the
2452 inferior and it is stopped for the first time, but
2453 before we access any inferior registers. */
2454 linux_arch_setup_thread (thread);
2455 }
2456 else
2457 {
2458 /* The process is started, but GDBserver will do
2459 architecture-specific setup after the program stops at
2460 the first instruction. */
2461 child->status_pending_p = 1;
2462 child->status_pending = wstat;
2463 return child;
2464 }
2465 }
2466 }
2467
2468 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2469 {
2470 struct process_info *proc = find_process_pid (pid_of (thread));
2471 int options = linux_low_ptrace_options (proc->attached);
2472
2473 linux_enable_event_reporting (lwpid, options);
2474 child->must_set_ptrace_flags = 0;
2475 }
2476
2477 /* Always update syscall_state, even if it will be filtered later. */
2478 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2479 {
2480 child->syscall_state
2481 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2482 ? TARGET_WAITKIND_SYSCALL_RETURN
2483 : TARGET_WAITKIND_SYSCALL_ENTRY);
2484 }
2485 else
2486 {
2487 /* Almost all other ptrace-stops are known to be outside of system
2488 calls, with further exceptions in handle_extended_wait. */
2489 child->syscall_state = TARGET_WAITKIND_IGNORE;
2490 }
2491
2492 /* Be careful to not overwrite stop_pc until save_stop_reason is
2493 called. */
2494 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2495 && linux_is_extended_waitstatus (wstat))
2496 {
2497 child->stop_pc = get_pc (child);
2498 if (handle_extended_wait (&child, wstat))
2499 {
2500 /* The event has been handled, so just return without
2501 reporting it. */
2502 return NULL;
2503 }
2504 }
2505
2506 if (linux_wstatus_maybe_breakpoint (wstat))
2507 {
2508 if (save_stop_reason (child))
2509 have_stop_pc = 1;
2510 }
2511
2512 if (!have_stop_pc)
2513 child->stop_pc = get_pc (child);
2514
2515 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2516 && child->stop_expected)
2517 {
2518 if (debug_threads)
2519 debug_printf ("Expected stop.\n");
2520 child->stop_expected = 0;
2521
2522 if (thread->last_resume_kind == resume_stop)
2523 {
2524 /* We want to report the stop to the core. Treat the
2525 SIGSTOP as a normal event. */
2526 if (debug_threads)
2527 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2528 target_pid_to_str (ptid_of (thread)));
2529 }
2530 else if (stopping_threads != NOT_STOPPING_THREADS)
2531 {
2532 /* Stopping threads. We don't want this SIGSTOP to end up
2533 pending. */
2534 if (debug_threads)
2535 debug_printf ("LLW: SIGSTOP caught for %s "
2536 "while stopping threads.\n",
2537 target_pid_to_str (ptid_of (thread)));
2538 return NULL;
2539 }
2540 else
2541 {
2542 /* This is a delayed SIGSTOP. Filter out the event. */
2543 if (debug_threads)
2544 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2545 child->stepping ? "step" : "continue",
2546 target_pid_to_str (ptid_of (thread)));
2547
2548 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2549 return NULL;
2550 }
2551 }
2552
2553 child->status_pending_p = 1;
2554 child->status_pending = wstat;
2555 return child;
2556 }
2557
2558 /* Return true if THREAD is doing hardware single step. */
2559
2560 static int
2561 maybe_hw_step (struct thread_info *thread)
2562 {
2563 if (can_hardware_single_step ())
2564 return 1;
2565 else
2566 {
2567 /* GDBserver must insert single-step breakpoint for software
2568 single step. */
2569 gdb_assert (has_single_step_breakpoints (thread));
2570 return 0;
2571 }
2572 }
2573
2574 /* Resume LWPs that are currently stopped without any pending status
2575 to report, but are resumed from the core's perspective. */
2576
2577 static void
2578 resume_stopped_resumed_lwps (thread_info *thread)
2579 {
2580 struct lwp_info *lp = get_thread_lwp (thread);
2581
2582 if (lp->stopped
2583 && !lp->suspended
2584 && !lp->status_pending_p
2585 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2586 {
2587 int step = 0;
2588
2589 if (thread->last_resume_kind == resume_step)
2590 step = maybe_hw_step (thread);
2591
2592 if (debug_threads)
2593 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2594 target_pid_to_str (ptid_of (thread)),
2595 paddress (lp->stop_pc),
2596 step);
2597
2598 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2599 }
2600 }
2601
2602 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2603 match FILTER_PTID (leaving others pending). The PTIDs can be:
2604 minus_one_ptid, to specify any child; a pid PTID, specifying all
2605 lwps of a thread group; or a PTID representing a single lwp. Store
2606 the stop status through the status pointer WSTAT. OPTIONS is
2607 passed to the waitpid call. Return 0 if no event was found and
2608 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2609 was found. Return the PID of the stopped child otherwise. */
2610
2611 static int
2612 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2613 int *wstatp, int options)
2614 {
2615 struct thread_info *event_thread;
2616 struct lwp_info *event_child, *requested_child;
2617 sigset_t block_mask, prev_mask;
2618
2619 retry:
2620 /* N.B. event_thread points to the thread_info struct that contains
2621 event_child. Keep them in sync. */
2622 event_thread = NULL;
2623 event_child = NULL;
2624 requested_child = NULL;
2625
2626 /* Check for a lwp with a pending status. */
2627
2628 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2629 {
2630 event_thread = find_thread_in_random ([&] (thread_info *thread)
2631 {
2632 return status_pending_p_callback (thread, filter_ptid);
2633 });
2634
2635 if (event_thread != NULL)
2636 event_child = get_thread_lwp (event_thread);
2637 if (debug_threads && event_thread)
2638 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2639 }
2640 else if (filter_ptid != null_ptid)
2641 {
2642 requested_child = find_lwp_pid (filter_ptid);
2643
2644 if (stopping_threads == NOT_STOPPING_THREADS
2645 && requested_child->status_pending_p
2646 && (requested_child->collecting_fast_tracepoint
2647 != fast_tpoint_collect_result::not_collecting))
2648 {
2649 enqueue_one_deferred_signal (requested_child,
2650 &requested_child->status_pending);
2651 requested_child->status_pending_p = 0;
2652 requested_child->status_pending = 0;
2653 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2654 }
2655
2656 if (requested_child->suspended
2657 && requested_child->status_pending_p)
2658 {
2659 internal_error (__FILE__, __LINE__,
2660 "requesting an event out of a"
2661 " suspended child?");
2662 }
2663
2664 if (requested_child->status_pending_p)
2665 {
2666 event_child = requested_child;
2667 event_thread = get_lwp_thread (event_child);
2668 }
2669 }
2670
2671 if (event_child != NULL)
2672 {
2673 if (debug_threads)
2674 debug_printf ("Got an event from pending child %ld (%04x)\n",
2675 lwpid_of (event_thread), event_child->status_pending);
2676 *wstatp = event_child->status_pending;
2677 event_child->status_pending_p = 0;
2678 event_child->status_pending = 0;
2679 current_thread = event_thread;
2680 return lwpid_of (event_thread);
2681 }
2682
2683 /* But if we don't find a pending event, we'll have to wait.
2684
2685 We only enter this loop if no process has a pending wait status.
2686 Thus any action taken in response to a wait status inside this
2687 loop is responding as soon as we detect the status, not after any
2688 pending events. */
2689
2690 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2691 all signals while here. */
2692 sigfillset (&block_mask);
2693 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2694
2695 /* Always pull all events out of the kernel. We'll randomly select
2696 an event LWP out of all that have events, to prevent
2697 starvation. */
2698 while (event_child == NULL)
2699 {
2700 pid_t ret = 0;
2701
2702 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2703 quirks:
2704
2705 - If the thread group leader exits while other threads in the
2706 thread group still exist, waitpid(TGID, ...) hangs. That
2707 waitpid won't return an exit status until the other threads
2708 in the group are reaped.
2709
2710 - When a non-leader thread execs, that thread just vanishes
2711 without reporting an exit (so we'd hang if we waited for it
2712 explicitly in that case). The exec event is reported to
2713 the TGID pid. */
2714 errno = 0;
2715 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2716
2717 if (debug_threads)
2718 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2719 ret, errno ? strerror (errno) : "ERRNO-OK");
2720
2721 if (ret > 0)
2722 {
2723 if (debug_threads)
2724 {
2725 debug_printf ("LLW: waitpid %ld received %s\n",
2726 (long) ret, status_to_str (*wstatp));
2727 }
2728
2729 /* Filter all events. IOW, leave all events pending. We'll
2730 randomly select an event LWP out of all that have events
2731 below. */
2732 linux_low_filter_event (ret, *wstatp);
2733 /* Retry until nothing comes out of waitpid. A single
2734 SIGCHLD can indicate more than one child stopped. */
2735 continue;
2736 }
2737
2738 /* Now that we've pulled all events out of the kernel, resume
2739 LWPs that don't have an interesting event to report. */
2740 if (stopping_threads == NOT_STOPPING_THREADS)
2741 for_each_thread (resume_stopped_resumed_lwps);
2742
2743 /* ... and find an LWP with a status to report to the core, if
2744 any. */
2745 event_thread = find_thread_in_random ([&] (thread_info *thread)
2746 {
2747 return status_pending_p_callback (thread, filter_ptid);
2748 });
2749
2750 if (event_thread != NULL)
2751 {
2752 event_child = get_thread_lwp (event_thread);
2753 *wstatp = event_child->status_pending;
2754 event_child->status_pending_p = 0;
2755 event_child->status_pending = 0;
2756 break;
2757 }
2758
2759 /* Check for zombie thread group leaders. Those can't be reaped
2760 until all other threads in the thread group are. */
2761 check_zombie_leaders ();
2762
2763 auto not_stopped = [&] (thread_info *thread)
2764 {
2765 return not_stopped_callback (thread, wait_ptid);
2766 };
2767
2768 /* If there are no resumed children left in the set of LWPs we
2769 want to wait for, bail. We can't just block in
2770 waitpid/sigsuspend, because lwps might have been left stopped
2771 in trace-stop state, and we'd be stuck forever waiting for
2772 their status to change (which would only happen if we resumed
2773 them). Even if WNOHANG is set, this return code is preferred
2774 over 0 (below), as it is more detailed. */
2775 if (find_thread (not_stopped) == NULL)
2776 {
2777 if (debug_threads)
2778 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2779 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2780 return -1;
2781 }
2782
2783 /* No interesting event to report to the caller. */
2784 if ((options & WNOHANG))
2785 {
2786 if (debug_threads)
2787 debug_printf ("WNOHANG set, no event found\n");
2788
2789 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2790 return 0;
2791 }
2792
2793 /* Block until we get an event reported with SIGCHLD. */
2794 if (debug_threads)
2795 debug_printf ("sigsuspend'ing\n");
2796
2797 sigsuspend (&prev_mask);
2798 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2799 goto retry;
2800 }
2801
2802 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2803
2804 current_thread = event_thread;
2805
2806 return lwpid_of (event_thread);
2807 }
2808
2809 /* Wait for an event from child(ren) PTID. PTIDs can be:
2810 minus_one_ptid, to specify any child; a pid PTID, specifying all
2811 lwps of a thread group; or a PTID representing a single lwp. Store
2812 the stop status through the status pointer WSTAT. OPTIONS is
2813 passed to the waitpid call. Return 0 if no event was found and
2814 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2815 was found. Return the PID of the stopped child otherwise. */
2816
2817 static int
2818 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2819 {
2820 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2821 }
2822
2823 /* Select one LWP out of those that have events pending. */
2824
2825 static void
2826 select_event_lwp (struct lwp_info **orig_lp)
2827 {
2828 int random_selector;
2829 struct thread_info *event_thread = NULL;
2830
2831 /* In all-stop, give preference to the LWP that is being
2832 single-stepped. There will be at most one, and it's the LWP that
2833 the core is most interested in. If we didn't do this, then we'd
2834 have to handle pending step SIGTRAPs somehow in case the core
2835 later continues the previously-stepped thread, otherwise we'd
2836 report the pending SIGTRAP, and the core, not having stepped the
2837 thread, wouldn't understand what the trap was for, and therefore
2838 would report it to the user as a random signal. */
2839 if (!non_stop)
2840 {
2841 event_thread = find_thread ([] (thread_info *thread)
2842 {
2843 lwp_info *lp = get_thread_lwp (thread);
2844
2845 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2846 && thread->last_resume_kind == resume_step
2847 && lp->status_pending_p);
2848 });
2849
2850 if (event_thread != NULL)
2851 {
2852 if (debug_threads)
2853 debug_printf ("SEL: Select single-step %s\n",
2854 target_pid_to_str (ptid_of (event_thread)));
2855 }
2856 }
2857 if (event_thread == NULL)
2858 {
2859 /* No single-stepping LWP. Select one at random, out of those
2860 which have had events. */
2861
2862 /* First see how many events we have. */
2863 int num_events = 0;
2864 for_each_thread ([&] (thread_info *thread)
2865 {
2866 lwp_info *lp = get_thread_lwp (thread);
2867
2868 /* Count only resumed LWPs that have an event pending. */
2869 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2870 && lp->status_pending_p)
2871 num_events++;
2872 });
2873 gdb_assert (num_events > 0);
2874
2875 /* Now randomly pick a LWP out of those that have had
2876 events. */
2877 random_selector = (int)
2878 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2879
2880 if (debug_threads && num_events > 1)
2881 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2882 num_events, random_selector);
2883
2884 event_thread = find_thread ([&] (thread_info *thread)
2885 {
2886 lwp_info *lp = get_thread_lwp (thread);
2887
2888 /* Select only resumed LWPs that have an event pending. */
2889 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2890 && lp->status_pending_p)
2891 if (random_selector-- == 0)
2892 return true;
2893
2894 return false;
2895 });
2896 }
2897
2898 if (event_thread != NULL)
2899 {
2900 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2901
2902 /* Switch the event LWP. */
2903 *orig_lp = event_lp;
2904 }
2905 }
2906
2907 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2908 NULL. */
2909
2910 static void
2911 unsuspend_all_lwps (struct lwp_info *except)
2912 {
2913 for_each_thread ([&] (thread_info *thread)
2914 {
2915 lwp_info *lwp = get_thread_lwp (thread);
2916
2917 if (lwp != except)
2918 lwp_suspended_decr (lwp);
2919 });
2920 }
2921
2922 static void move_out_of_jump_pad_callback (thread_info *thread);
2923 static bool stuck_in_jump_pad_callback (thread_info *thread);
2924 static bool lwp_running (thread_info *thread);
2925 static ptid_t linux_wait_1 (ptid_t ptid,
2926 struct target_waitstatus *ourstatus,
2927 int target_options);
2928
2929 /* Stabilize threads (move out of jump pads).
2930
2931 If a thread is midway collecting a fast tracepoint, we need to
2932 finish the collection and move it out of the jump pad before
2933 reporting the signal.
2934
2935 This avoids recursion while collecting (when a signal arrives
2936 midway, and the signal handler itself collects), which would trash
2937 the trace buffer. In case the user set a breakpoint in a signal
2938 handler, this avoids the backtrace showing the jump pad, etc..
2939 Most importantly, there are certain things we can't do safely if
2940 threads are stopped in a jump pad (or in its callee's). For
2941 example:
2942
2943 - starting a new trace run. A thread still collecting the
2944 previous run, could trash the trace buffer when resumed. The trace
2945 buffer control structures would have been reset but the thread had
2946 no way to tell. The thread could even midway memcpy'ing to the
2947 buffer, which would mean that when resumed, it would clobber the
2948 trace buffer that had been set for a new run.
2949
2950 - we can't rewrite/reuse the jump pads for new tracepoints
2951 safely. Say you do tstart while a thread is stopped midway while
2952 collecting. When the thread is later resumed, it finishes the
2953 collection, and returns to the jump pad, to execute the original
2954 instruction that was under the tracepoint jump at the time the
2955 older run had been started. If the jump pad had been rewritten
2956 since for something else in the new run, the thread would now
2957 execute the wrong / random instructions. */
2958
2959 static void
2960 linux_stabilize_threads (void)
2961 {
2962 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
2963
2964 if (thread_stuck != NULL)
2965 {
2966 if (debug_threads)
2967 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2968 lwpid_of (thread_stuck));
2969 return;
2970 }
2971
2972 thread_info *saved_thread = current_thread;
2973
2974 stabilizing_threads = 1;
2975
2976 /* Kick 'em all. */
2977 for_each_thread (move_out_of_jump_pad_callback);
2978
2979 /* Loop until all are stopped out of the jump pads. */
2980 while (find_thread (lwp_running) != NULL)
2981 {
2982 struct target_waitstatus ourstatus;
2983 struct lwp_info *lwp;
2984 int wstat;
2985
2986 /* Note that we go through the full wait even loop. While
2987 moving threads out of jump pad, we need to be able to step
2988 over internal breakpoints and such. */
2989 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2990
2991 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2992 {
2993 lwp = get_thread_lwp (current_thread);
2994
2995 /* Lock it. */
2996 lwp_suspended_inc (lwp);
2997
2998 if (ourstatus.value.sig != GDB_SIGNAL_0
2999 || current_thread->last_resume_kind == resume_stop)
3000 {
3001 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3002 enqueue_one_deferred_signal (lwp, &wstat);
3003 }
3004 }
3005 }
3006
3007 unsuspend_all_lwps (NULL);
3008
3009 stabilizing_threads = 0;
3010
3011 current_thread = saved_thread;
3012
3013 if (debug_threads)
3014 {
3015 thread_stuck = find_thread (stuck_in_jump_pad_callback);
3016
3017 if (thread_stuck != NULL)
3018 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3019 lwpid_of (thread_stuck));
3020 }
3021 }
3022
3023 /* Convenience function that is called when the kernel reports an
3024 event that is not passed out to GDB. */
3025
3026 static ptid_t
3027 ignore_event (struct target_waitstatus *ourstatus)
3028 {
3029 /* If we got an event, there may still be others, as a single
3030 SIGCHLD can indicate more than one child stopped. This forces
3031 another target_wait call. */
3032 async_file_mark ();
3033
3034 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3035 return null_ptid;
3036 }
3037
3038 /* Convenience function that is called when the kernel reports an exit
3039 event. This decides whether to report the event to GDB as a
3040 process exit event, a thread exit event, or to suppress the
3041 event. */
3042
3043 static ptid_t
3044 filter_exit_event (struct lwp_info *event_child,
3045 struct target_waitstatus *ourstatus)
3046 {
3047 client_state &cs = get_client_state ();
3048 struct thread_info *thread = get_lwp_thread (event_child);
3049 ptid_t ptid = ptid_of (thread);
3050
3051 if (!last_thread_of_process_p (pid_of (thread)))
3052 {
3053 if (cs.report_thread_events)
3054 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3055 else
3056 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3057
3058 delete_lwp (event_child);
3059 }
3060 return ptid;
3061 }
3062
3063 /* Returns 1 if GDB is interested in any event_child syscalls. */
3064
3065 static int
3066 gdb_catching_syscalls_p (struct lwp_info *event_child)
3067 {
3068 struct thread_info *thread = get_lwp_thread (event_child);
3069 struct process_info *proc = get_thread_process (thread);
3070
3071 return !proc->syscalls_to_catch.empty ();
3072 }
3073
3074 /* Returns 1 if GDB is interested in the event_child syscall.
3075 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3076
3077 static int
3078 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3079 {
3080 int sysno;
3081 struct thread_info *thread = get_lwp_thread (event_child);
3082 struct process_info *proc = get_thread_process (thread);
3083
3084 if (proc->syscalls_to_catch.empty ())
3085 return 0;
3086
3087 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
3088 return 1;
3089
3090 get_syscall_trapinfo (event_child, &sysno);
3091
3092 for (int iter : proc->syscalls_to_catch)
3093 if (iter == sysno)
3094 return 1;
3095
3096 return 0;
3097 }
3098
3099 /* Wait for process, returns status. */
3100
3101 static ptid_t
3102 linux_wait_1 (ptid_t ptid,
3103 struct target_waitstatus *ourstatus, int target_options)
3104 {
3105 client_state &cs = get_client_state ();
3106 int w;
3107 struct lwp_info *event_child;
3108 int options;
3109 int pid;
3110 int step_over_finished;
3111 int bp_explains_trap;
3112 int maybe_internal_trap;
3113 int report_to_gdb;
3114 int trace_event;
3115 int in_step_range;
3116 int any_resumed;
3117
3118 if (debug_threads)
3119 {
3120 debug_enter ();
3121 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3122 }
3123
3124 /* Translate generic target options into linux options. */
3125 options = __WALL;
3126 if (target_options & TARGET_WNOHANG)
3127 options |= WNOHANG;
3128
3129 bp_explains_trap = 0;
3130 trace_event = 0;
3131 in_step_range = 0;
3132 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3133
3134 auto status_pending_p_any = [&] (thread_info *thread)
3135 {
3136 return status_pending_p_callback (thread, minus_one_ptid);
3137 };
3138
3139 auto not_stopped = [&] (thread_info *thread)
3140 {
3141 return not_stopped_callback (thread, minus_one_ptid);
3142 };
3143
3144 /* Find a resumed LWP, if any. */
3145 if (find_thread (status_pending_p_any) != NULL)
3146 any_resumed = 1;
3147 else if (find_thread (not_stopped) != NULL)
3148 any_resumed = 1;
3149 else
3150 any_resumed = 0;
3151
3152 if (step_over_bkpt == null_ptid)
3153 pid = linux_wait_for_event (ptid, &w, options);
3154 else
3155 {
3156 if (debug_threads)
3157 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3158 target_pid_to_str (step_over_bkpt));
3159 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3160 }
3161
3162 if (pid == 0 || (pid == -1 && !any_resumed))
3163 {
3164 gdb_assert (target_options & TARGET_WNOHANG);
3165
3166 if (debug_threads)
3167 {
3168 debug_printf ("linux_wait_1 ret = null_ptid, "
3169 "TARGET_WAITKIND_IGNORE\n");
3170 debug_exit ();
3171 }
3172
3173 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3174 return null_ptid;
3175 }
3176 else if (pid == -1)
3177 {
3178 if (debug_threads)
3179 {
3180 debug_printf ("linux_wait_1 ret = null_ptid, "
3181 "TARGET_WAITKIND_NO_RESUMED\n");
3182 debug_exit ();
3183 }
3184
3185 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3186 return null_ptid;
3187 }
3188
3189 event_child = get_thread_lwp (current_thread);
3190
3191 /* linux_wait_for_event only returns an exit status for the last
3192 child of a process. Report it. */
3193 if (WIFEXITED (w) || WIFSIGNALED (w))
3194 {
3195 if (WIFEXITED (w))
3196 {
3197 ourstatus->kind = TARGET_WAITKIND_EXITED;
3198 ourstatus->value.integer = WEXITSTATUS (w);
3199
3200 if (debug_threads)
3201 {
3202 debug_printf ("linux_wait_1 ret = %s, exited with "
3203 "retcode %d\n",
3204 target_pid_to_str (ptid_of (current_thread)),
3205 WEXITSTATUS (w));
3206 debug_exit ();
3207 }
3208 }
3209 else
3210 {
3211 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3212 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3213
3214 if (debug_threads)
3215 {
3216 debug_printf ("linux_wait_1 ret = %s, terminated with "
3217 "signal %d\n",
3218 target_pid_to_str (ptid_of (current_thread)),
3219 WTERMSIG (w));
3220 debug_exit ();
3221 }
3222 }
3223
3224 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3225 return filter_exit_event (event_child, ourstatus);
3226
3227 return ptid_of (current_thread);
3228 }
3229
3230 /* If step-over executes a breakpoint instruction, in the case of a
3231 hardware single step it means a gdb/gdbserver breakpoint had been
3232 planted on top of a permanent breakpoint, in the case of a software
3233 single step it may just mean that gdbserver hit the reinsert breakpoint.
3234 The PC has been adjusted by save_stop_reason to point at
3235 the breakpoint address.
3236 So in the case of the hardware single step advance the PC manually
3237 past the breakpoint and in the case of software single step advance only
3238 if it's not the single_step_breakpoint we are hitting.
3239 This avoids that a program would keep trapping a permanent breakpoint
3240 forever. */
3241 if (step_over_bkpt != null_ptid
3242 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3243 && (event_child->stepping
3244 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3245 {
3246 int increment_pc = 0;
3247 int breakpoint_kind = 0;
3248 CORE_ADDR stop_pc = event_child->stop_pc;
3249
3250 breakpoint_kind =
3251 the_target->breakpoint_kind_from_current_state (&stop_pc);
3252 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3253
3254 if (debug_threads)
3255 {
3256 debug_printf ("step-over for %s executed software breakpoint\n",
3257 target_pid_to_str (ptid_of (current_thread)));
3258 }
3259
3260 if (increment_pc != 0)
3261 {
3262 struct regcache *regcache
3263 = get_thread_regcache (current_thread, 1);
3264
3265 event_child->stop_pc += increment_pc;
3266 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3267
3268 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3269 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3270 }
3271 }
3272
3273 /* If this event was not handled before, and is not a SIGTRAP, we
3274 report it. SIGILL and SIGSEGV are also treated as traps in case
3275 a breakpoint is inserted at the current PC. If this target does
3276 not support internal breakpoints at all, we also report the
3277 SIGTRAP without further processing; it's of no concern to us. */
3278 maybe_internal_trap
3279 = (supports_breakpoints ()
3280 && (WSTOPSIG (w) == SIGTRAP
3281 || ((WSTOPSIG (w) == SIGILL
3282 || WSTOPSIG (w) == SIGSEGV)
3283 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3284
3285 if (maybe_internal_trap)
3286 {
3287 /* Handle anything that requires bookkeeping before deciding to
3288 report the event or continue waiting. */
3289
3290 /* First check if we can explain the SIGTRAP with an internal
3291 breakpoint, or if we should possibly report the event to GDB.
3292 Do this before anything that may remove or insert a
3293 breakpoint. */
3294 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3295
3296 /* We have a SIGTRAP, possibly a step-over dance has just
3297 finished. If so, tweak the state machine accordingly,
3298 reinsert breakpoints and delete any single-step
3299 breakpoints. */
3300 step_over_finished = finish_step_over (event_child);
3301
3302 /* Now invoke the callbacks of any internal breakpoints there. */
3303 check_breakpoints (event_child->stop_pc);
3304
3305 /* Handle tracepoint data collecting. This may overflow the
3306 trace buffer, and cause a tracing stop, removing
3307 breakpoints. */
3308 trace_event = handle_tracepoints (event_child);
3309
3310 if (bp_explains_trap)
3311 {
3312 if (debug_threads)
3313 debug_printf ("Hit a gdbserver breakpoint.\n");
3314 }
3315 }
3316 else
3317 {
3318 /* We have some other signal, possibly a step-over dance was in
3319 progress, and it should be cancelled too. */
3320 step_over_finished = finish_step_over (event_child);
3321 }
3322
3323 /* We have all the data we need. Either report the event to GDB, or
3324 resume threads and keep waiting for more. */
3325
3326 /* If we're collecting a fast tracepoint, finish the collection and
3327 move out of the jump pad before delivering a signal. See
3328 linux_stabilize_threads. */
3329
3330 if (WIFSTOPPED (w)
3331 && WSTOPSIG (w) != SIGTRAP
3332 && supports_fast_tracepoints ()
3333 && agent_loaded_p ())
3334 {
3335 if (debug_threads)
3336 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3337 "to defer or adjust it.\n",
3338 WSTOPSIG (w), lwpid_of (current_thread));
3339
3340 /* Allow debugging the jump pad itself. */
3341 if (current_thread->last_resume_kind != resume_step
3342 && maybe_move_out_of_jump_pad (event_child, &w))
3343 {
3344 enqueue_one_deferred_signal (event_child, &w);
3345
3346 if (debug_threads)
3347 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3348 WSTOPSIG (w), lwpid_of (current_thread));
3349
3350 linux_resume_one_lwp (event_child, 0, 0, NULL);
3351
3352 if (debug_threads)
3353 debug_exit ();
3354 return ignore_event (ourstatus);
3355 }
3356 }
3357
3358 if (event_child->collecting_fast_tracepoint
3359 != fast_tpoint_collect_result::not_collecting)
3360 {
3361 if (debug_threads)
3362 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3363 "Check if we're already there.\n",
3364 lwpid_of (current_thread),
3365 (int) event_child->collecting_fast_tracepoint);
3366
3367 trace_event = 1;
3368
3369 event_child->collecting_fast_tracepoint
3370 = linux_fast_tracepoint_collecting (event_child, NULL);
3371
3372 if (event_child->collecting_fast_tracepoint
3373 != fast_tpoint_collect_result::before_insn)
3374 {
3375 /* No longer need this breakpoint. */
3376 if (event_child->exit_jump_pad_bkpt != NULL)
3377 {
3378 if (debug_threads)
3379 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3380 "stopping all threads momentarily.\n");
3381
3382 /* Other running threads could hit this breakpoint.
3383 We don't handle moribund locations like GDB does,
3384 instead we always pause all threads when removing
3385 breakpoints, so that any step-over or
3386 decr_pc_after_break adjustment is always taken
3387 care of while the breakpoint is still
3388 inserted. */
3389 stop_all_lwps (1, event_child);
3390
3391 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3392 event_child->exit_jump_pad_bkpt = NULL;
3393
3394 unstop_all_lwps (1, event_child);
3395
3396 gdb_assert (event_child->suspended >= 0);
3397 }
3398 }
3399
3400 if (event_child->collecting_fast_tracepoint
3401 == fast_tpoint_collect_result::not_collecting)
3402 {
3403 if (debug_threads)
3404 debug_printf ("fast tracepoint finished "
3405 "collecting successfully.\n");
3406
3407 /* We may have a deferred signal to report. */
3408 if (dequeue_one_deferred_signal (event_child, &w))
3409 {
3410 if (debug_threads)
3411 debug_printf ("dequeued one signal.\n");
3412 }
3413 else
3414 {
3415 if (debug_threads)
3416 debug_printf ("no deferred signals.\n");
3417
3418 if (stabilizing_threads)
3419 {
3420 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3421 ourstatus->value.sig = GDB_SIGNAL_0;
3422
3423 if (debug_threads)
3424 {
3425 debug_printf ("linux_wait_1 ret = %s, stopped "
3426 "while stabilizing threads\n",
3427 target_pid_to_str (ptid_of (current_thread)));
3428 debug_exit ();
3429 }
3430
3431 return ptid_of (current_thread);
3432 }
3433 }
3434 }
3435 }
3436
3437 /* Check whether GDB would be interested in this event. */
3438
3439 /* Check if GDB is interested in this syscall. */
3440 if (WIFSTOPPED (w)
3441 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3442 && !gdb_catch_this_syscall_p (event_child))
3443 {
3444 if (debug_threads)
3445 {
3446 debug_printf ("Ignored syscall for LWP %ld.\n",
3447 lwpid_of (current_thread));
3448 }
3449
3450 linux_resume_one_lwp (event_child, event_child->stepping,
3451 0, NULL);
3452
3453 if (debug_threads)
3454 debug_exit ();
3455 return ignore_event (ourstatus);
3456 }
3457
3458 /* If GDB is not interested in this signal, don't stop other
3459 threads, and don't report it to GDB. Just resume the inferior
3460 right away. We do this for threading-related signals as well as
3461 any that GDB specifically requested we ignore. But never ignore
3462 SIGSTOP if we sent it ourselves, and do not ignore signals when
3463 stepping - they may require special handling to skip the signal
3464 handler. Also never ignore signals that could be caused by a
3465 breakpoint. */
3466 if (WIFSTOPPED (w)
3467 && current_thread->last_resume_kind != resume_step
3468 && (
3469 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3470 (current_process ()->priv->thread_db != NULL
3471 && (WSTOPSIG (w) == __SIGRTMIN
3472 || WSTOPSIG (w) == __SIGRTMIN + 1))
3473 ||
3474 #endif
3475 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3476 && !(WSTOPSIG (w) == SIGSTOP
3477 && current_thread->last_resume_kind == resume_stop)
3478 && !linux_wstatus_maybe_breakpoint (w))))
3479 {
3480 siginfo_t info, *info_p;
3481
3482 if (debug_threads)
3483 debug_printf ("Ignored signal %d for LWP %ld.\n",
3484 WSTOPSIG (w), lwpid_of (current_thread));
3485
3486 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3487 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3488 info_p = &info;
3489 else
3490 info_p = NULL;
3491
3492 if (step_over_finished)
3493 {
3494 /* We cancelled this thread's step-over above. We still
3495 need to unsuspend all other LWPs, and set them back
3496 running again while the signal handler runs. */
3497 unsuspend_all_lwps (event_child);
3498
3499 /* Enqueue the pending signal info so that proceed_all_lwps
3500 doesn't lose it. */
3501 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3502
3503 proceed_all_lwps ();
3504 }
3505 else
3506 {
3507 linux_resume_one_lwp (event_child, event_child->stepping,
3508 WSTOPSIG (w), info_p);
3509 }
3510
3511 if (debug_threads)
3512 debug_exit ();
3513
3514 return ignore_event (ourstatus);
3515 }
3516
3517 /* Note that all addresses are always "out of the step range" when
3518 there's no range to begin with. */
3519 in_step_range = lwp_in_step_range (event_child);
3520
3521 /* If GDB wanted this thread to single step, and the thread is out
3522 of the step range, we always want to report the SIGTRAP, and let
3523 GDB handle it. Watchpoints should always be reported. So should
3524 signals we can't explain. A SIGTRAP we can't explain could be a
3525 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3526 do, we're be able to handle GDB breakpoints on top of internal
3527 breakpoints, by handling the internal breakpoint and still
3528 reporting the event to GDB. If we don't, we're out of luck, GDB
3529 won't see the breakpoint hit. If we see a single-step event but
3530 the thread should be continuing, don't pass the trap to gdb.
3531 That indicates that we had previously finished a single-step but
3532 left the single-step pending -- see
3533 complete_ongoing_step_over. */
3534 report_to_gdb = (!maybe_internal_trap
3535 || (current_thread->last_resume_kind == resume_step
3536 && !in_step_range)
3537 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3538 || (!in_step_range
3539 && !bp_explains_trap
3540 && !trace_event
3541 && !step_over_finished
3542 && !(current_thread->last_resume_kind == resume_continue
3543 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3544 || (gdb_breakpoint_here (event_child->stop_pc)
3545 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3546 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3547 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3548
3549 run_breakpoint_commands (event_child->stop_pc);
3550
3551 /* We found no reason GDB would want us to stop. We either hit one
3552 of our own breakpoints, or finished an internal step GDB
3553 shouldn't know about. */
3554 if (!report_to_gdb)
3555 {
3556 if (debug_threads)
3557 {
3558 if (bp_explains_trap)
3559 debug_printf ("Hit a gdbserver breakpoint.\n");
3560 if (step_over_finished)
3561 debug_printf ("Step-over finished.\n");
3562 if (trace_event)
3563 debug_printf ("Tracepoint event.\n");
3564 if (lwp_in_step_range (event_child))
3565 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3566 paddress (event_child->stop_pc),
3567 paddress (event_child->step_range_start),
3568 paddress (event_child->step_range_end));
3569 }
3570
3571 /* We're not reporting this breakpoint to GDB, so apply the
3572 decr_pc_after_break adjustment to the inferior's regcache
3573 ourselves. */
3574
3575 if (the_low_target.set_pc != NULL)
3576 {
3577 struct regcache *regcache
3578 = get_thread_regcache (current_thread, 1);
3579 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3580 }
3581
3582 if (step_over_finished)
3583 {
3584 /* If we have finished stepping over a breakpoint, we've
3585 stopped and suspended all LWPs momentarily except the
3586 stepping one. This is where we resume them all again.
3587 We're going to keep waiting, so use proceed, which
3588 handles stepping over the next breakpoint. */
3589 unsuspend_all_lwps (event_child);
3590 }
3591 else
3592 {
3593 /* Remove the single-step breakpoints if any. Note that
3594 there isn't single-step breakpoint if we finished stepping
3595 over. */
3596 if (can_software_single_step ()
3597 && has_single_step_breakpoints (current_thread))
3598 {
3599 stop_all_lwps (0, event_child);
3600 delete_single_step_breakpoints (current_thread);
3601 unstop_all_lwps (0, event_child);
3602 }
3603 }
3604
3605 if (debug_threads)
3606 debug_printf ("proceeding all threads.\n");
3607 proceed_all_lwps ();
3608
3609 if (debug_threads)
3610 debug_exit ();
3611
3612 return ignore_event (ourstatus);
3613 }
3614
3615 if (debug_threads)
3616 {
3617 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3618 {
3619 std::string str
3620 = target_waitstatus_to_string (&event_child->waitstatus);
3621
3622 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3623 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3624 }
3625 if (current_thread->last_resume_kind == resume_step)
3626 {
3627 if (event_child->step_range_start == event_child->step_range_end)
3628 debug_printf ("GDB wanted to single-step, reporting event.\n");
3629 else if (!lwp_in_step_range (event_child))
3630 debug_printf ("Out of step range, reporting event.\n");
3631 }
3632 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3633 debug_printf ("Stopped by watchpoint.\n");
3634 else if (gdb_breakpoint_here (event_child->stop_pc))
3635 debug_printf ("Stopped by GDB breakpoint.\n");
3636 if (debug_threads)
3637 debug_printf ("Hit a non-gdbserver trap event.\n");
3638 }
3639
3640 /* Alright, we're going to report a stop. */
3641
3642 /* Remove single-step breakpoints. */
3643 if (can_software_single_step ())
3644 {
3645 /* Remove single-step breakpoints or not. It it is true, stop all
3646 lwps, so that other threads won't hit the breakpoint in the
3647 staled memory. */
3648 int remove_single_step_breakpoints_p = 0;
3649
3650 if (non_stop)
3651 {
3652 remove_single_step_breakpoints_p
3653 = has_single_step_breakpoints (current_thread);
3654 }
3655 else
3656 {
3657 /* In all-stop, a stop reply cancels all previous resume
3658 requests. Delete all single-step breakpoints. */
3659
3660 find_thread ([&] (thread_info *thread) {
3661 if (has_single_step_breakpoints (thread))
3662 {
3663 remove_single_step_breakpoints_p = 1;
3664 return true;
3665 }
3666
3667 return false;
3668 });
3669 }
3670
3671 if (remove_single_step_breakpoints_p)
3672 {
3673 /* If we remove single-step breakpoints from memory, stop all lwps,
3674 so that other threads won't hit the breakpoint in the staled
3675 memory. */
3676 stop_all_lwps (0, event_child);
3677
3678 if (non_stop)
3679 {
3680 gdb_assert (has_single_step_breakpoints (current_thread));
3681 delete_single_step_breakpoints (current_thread);
3682 }
3683 else
3684 {
3685 for_each_thread ([] (thread_info *thread){
3686 if (has_single_step_breakpoints (thread))
3687 delete_single_step_breakpoints (thread);
3688 });
3689 }
3690
3691 unstop_all_lwps (0, event_child);
3692 }
3693 }
3694
3695 if (!stabilizing_threads)
3696 {
3697 /* In all-stop, stop all threads. */
3698 if (!non_stop)
3699 stop_all_lwps (0, NULL);
3700
3701 if (step_over_finished)
3702 {
3703 if (!non_stop)
3704 {
3705 /* If we were doing a step-over, all other threads but
3706 the stepping one had been paused in start_step_over,
3707 with their suspend counts incremented. We don't want
3708 to do a full unstop/unpause, because we're in
3709 all-stop mode (so we want threads stopped), but we
3710 still need to unsuspend the other threads, to
3711 decrement their `suspended' count back. */
3712 unsuspend_all_lwps (event_child);
3713 }
3714 else
3715 {
3716 /* If we just finished a step-over, then all threads had
3717 been momentarily paused. In all-stop, that's fine,
3718 we want threads stopped by now anyway. In non-stop,
3719 we need to re-resume threads that GDB wanted to be
3720 running. */
3721 unstop_all_lwps (1, event_child);
3722 }
3723 }
3724
3725 /* If we're not waiting for a specific LWP, choose an event LWP
3726 from among those that have had events. Giving equal priority
3727 to all LWPs that have had events helps prevent
3728 starvation. */
3729 if (ptid == minus_one_ptid)
3730 {
3731 event_child->status_pending_p = 1;
3732 event_child->status_pending = w;
3733
3734 select_event_lwp (&event_child);
3735
3736 /* current_thread and event_child must stay in sync. */
3737 current_thread = get_lwp_thread (event_child);
3738
3739 event_child->status_pending_p = 0;
3740 w = event_child->status_pending;
3741 }
3742
3743
3744 /* Stabilize threads (move out of jump pads). */
3745 if (!non_stop)
3746 stabilize_threads ();
3747 }
3748 else
3749 {
3750 /* If we just finished a step-over, then all threads had been
3751 momentarily paused. In all-stop, that's fine, we want
3752 threads stopped by now anyway. In non-stop, we need to
3753 re-resume threads that GDB wanted to be running. */
3754 if (step_over_finished)
3755 unstop_all_lwps (1, event_child);
3756 }
3757
3758 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3759 {
3760 /* If the reported event is an exit, fork, vfork or exec, let
3761 GDB know. */
3762
3763 /* Break the unreported fork relationship chain. */
3764 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3765 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3766 {
3767 event_child->fork_relative->fork_relative = NULL;
3768 event_child->fork_relative = NULL;
3769 }
3770
3771 *ourstatus = event_child->waitstatus;
3772 /* Clear the event lwp's waitstatus since we handled it already. */
3773 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3774 }
3775 else
3776 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3777
3778 /* Now that we've selected our final event LWP, un-adjust its PC if
3779 it was a software breakpoint, and the client doesn't know we can
3780 adjust the breakpoint ourselves. */
3781 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3782 && !cs.swbreak_feature)
3783 {
3784 int decr_pc = the_low_target.decr_pc_after_break;
3785
3786 if (decr_pc != 0)
3787 {
3788 struct regcache *regcache
3789 = get_thread_regcache (current_thread, 1);
3790 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3791 }
3792 }
3793
3794 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3795 {
3796 get_syscall_trapinfo (event_child,
3797 &ourstatus->value.syscall_number);
3798 ourstatus->kind = event_child->syscall_state;
3799 }
3800 else if (current_thread->last_resume_kind == resume_stop
3801 && WSTOPSIG (w) == SIGSTOP)
3802 {
3803 /* A thread that has been requested to stop by GDB with vCont;t,
3804 and it stopped cleanly, so report as SIG0. The use of
3805 SIGSTOP is an implementation detail. */
3806 ourstatus->value.sig = GDB_SIGNAL_0;
3807 }
3808 else if (current_thread->last_resume_kind == resume_stop
3809 && WSTOPSIG (w) != SIGSTOP)
3810 {
3811 /* A thread that has been requested to stop by GDB with vCont;t,
3812 but, it stopped for other reasons. */
3813 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3814 }
3815 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3816 {
3817 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3818 }
3819
3820 gdb_assert (step_over_bkpt == null_ptid);
3821
3822 if (debug_threads)
3823 {
3824 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3825 target_pid_to_str (ptid_of (current_thread)),
3826 ourstatus->kind, ourstatus->value.sig);
3827 debug_exit ();
3828 }
3829
3830 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3831 return filter_exit_event (event_child, ourstatus);
3832
3833 return ptid_of (current_thread);
3834 }
3835
3836 /* Get rid of any pending event in the pipe. */
3837 static void
3838 async_file_flush (void)
3839 {
3840 int ret;
3841 char buf;
3842
3843 do
3844 ret = read (linux_event_pipe[0], &buf, 1);
3845 while (ret >= 0 || (ret == -1 && errno == EINTR));
3846 }
3847
3848 /* Put something in the pipe, so the event loop wakes up. */
3849 static void
3850 async_file_mark (void)
3851 {
3852 int ret;
3853
3854 async_file_flush ();
3855
3856 do
3857 ret = write (linux_event_pipe[1], "+", 1);
3858 while (ret == 0 || (ret == -1 && errno == EINTR));
3859
3860 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3861 be awakened anyway. */
3862 }
3863
3864 static ptid_t
3865 linux_wait (ptid_t ptid,
3866 struct target_waitstatus *ourstatus, int target_options)
3867 {
3868 ptid_t event_ptid;
3869
3870 /* Flush the async file first. */
3871 if (target_is_async_p ())
3872 async_file_flush ();
3873
3874 do
3875 {
3876 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3877 }
3878 while ((target_options & TARGET_WNOHANG) == 0
3879 && event_ptid == null_ptid
3880 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3881
3882 /* If at least one stop was reported, there may be more. A single
3883 SIGCHLD can signal more than one child stop. */
3884 if (target_is_async_p ()
3885 && (target_options & TARGET_WNOHANG) != 0
3886 && event_ptid != null_ptid)
3887 async_file_mark ();
3888
3889 return event_ptid;
3890 }
3891
3892 /* Send a signal to an LWP. */
3893
3894 static int
3895 kill_lwp (unsigned long lwpid, int signo)
3896 {
3897 int ret;
3898
3899 errno = 0;
3900 ret = syscall (__NR_tkill, lwpid, signo);
3901 if (errno == ENOSYS)
3902 {
3903 /* If tkill fails, then we are not using nptl threads, a
3904 configuration we no longer support. */
3905 perror_with_name (("tkill"));
3906 }
3907 return ret;
3908 }
3909
3910 void
3911 linux_stop_lwp (struct lwp_info *lwp)
3912 {
3913 send_sigstop (lwp);
3914 }
3915
3916 static void
3917 send_sigstop (struct lwp_info *lwp)
3918 {
3919 int pid;
3920
3921 pid = lwpid_of (get_lwp_thread (lwp));
3922
3923 /* If we already have a pending stop signal for this process, don't
3924 send another. */
3925 if (lwp->stop_expected)
3926 {
3927 if (debug_threads)
3928 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3929
3930 return;
3931 }
3932
3933 if (debug_threads)
3934 debug_printf ("Sending sigstop to lwp %d\n", pid);
3935
3936 lwp->stop_expected = 1;
3937 kill_lwp (pid, SIGSTOP);
3938 }
3939
3940 static void
3941 send_sigstop (thread_info *thread, lwp_info *except)
3942 {
3943 struct lwp_info *lwp = get_thread_lwp (thread);
3944
3945 /* Ignore EXCEPT. */
3946 if (lwp == except)
3947 return;
3948
3949 if (lwp->stopped)
3950 return;
3951
3952 send_sigstop (lwp);
3953 }
3954
3955 /* Increment the suspend count of an LWP, and stop it, if not stopped
3956 yet. */
3957 static void
3958 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3959 {
3960 struct lwp_info *lwp = get_thread_lwp (thread);
3961
3962 /* Ignore EXCEPT. */
3963 if (lwp == except)
3964 return;
3965
3966 lwp_suspended_inc (lwp);
3967
3968 send_sigstop (thread, except);
3969 }
3970
3971 static void
3972 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3973 {
3974 /* Store the exit status for later. */
3975 lwp->status_pending_p = 1;
3976 lwp->status_pending = wstat;
3977
3978 /* Store in waitstatus as well, as there's nothing else to process
3979 for this event. */
3980 if (WIFEXITED (wstat))
3981 {
3982 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3983 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3984 }
3985 else if (WIFSIGNALED (wstat))
3986 {
3987 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3988 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3989 }
3990
3991 /* Prevent trying to stop it. */
3992 lwp->stopped = 1;
3993
3994 /* No further stops are expected from a dead lwp. */
3995 lwp->stop_expected = 0;
3996 }
3997
3998 /* Return true if LWP has exited already, and has a pending exit event
3999 to report to GDB. */
4000
4001 static int
4002 lwp_is_marked_dead (struct lwp_info *lwp)
4003 {
4004 return (lwp->status_pending_p
4005 && (WIFEXITED (lwp->status_pending)
4006 || WIFSIGNALED (lwp->status_pending)));
4007 }
4008
4009 /* Wait for all children to stop for the SIGSTOPs we just queued. */
4010
4011 static void
4012 wait_for_sigstop (void)
4013 {
4014 struct thread_info *saved_thread;
4015 ptid_t saved_tid;
4016 int wstat;
4017 int ret;
4018
4019 saved_thread = current_thread;
4020 if (saved_thread != NULL)
4021 saved_tid = saved_thread->id;
4022 else
4023 saved_tid = null_ptid; /* avoid bogus unused warning */
4024
4025 if (debug_threads)
4026 debug_printf ("wait_for_sigstop: pulling events\n");
4027
4028 /* Passing NULL_PTID as filter indicates we want all events to be
4029 left pending. Eventually this returns when there are no
4030 unwaited-for children left. */
4031 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4032 &wstat, __WALL);
4033 gdb_assert (ret == -1);
4034
4035 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4036 current_thread = saved_thread;
4037 else
4038 {
4039 if (debug_threads)
4040 debug_printf ("Previously current thread died.\n");
4041
4042 /* We can't change the current inferior behind GDB's back,
4043 otherwise, a subsequent command may apply to the wrong
4044 process. */
4045 current_thread = NULL;
4046 }
4047 }
4048
4049 /* Returns true if THREAD is stopped in a jump pad, and we can't
4050 move it out, because we need to report the stop event to GDB. For
4051 example, if the user puts a breakpoint in the jump pad, it's
4052 because she wants to debug it. */
4053
4054 static bool
4055 stuck_in_jump_pad_callback (thread_info *thread)
4056 {
4057 struct lwp_info *lwp = get_thread_lwp (thread);
4058
4059 if (lwp->suspended != 0)
4060 {
4061 internal_error (__FILE__, __LINE__,
4062 "LWP %ld is suspended, suspended=%d\n",
4063 lwpid_of (thread), lwp->suspended);
4064 }
4065 gdb_assert (lwp->stopped);
4066
4067 /* Allow debugging the jump pad, gdb_collect, etc.. */
4068 return (supports_fast_tracepoints ()
4069 && agent_loaded_p ()
4070 && (gdb_breakpoint_here (lwp->stop_pc)
4071 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4072 || thread->last_resume_kind == resume_step)
4073 && (linux_fast_tracepoint_collecting (lwp, NULL)
4074 != fast_tpoint_collect_result::not_collecting));
4075 }
4076
4077 static void
4078 move_out_of_jump_pad_callback (thread_info *thread)
4079 {
4080 struct thread_info *saved_thread;
4081 struct lwp_info *lwp = get_thread_lwp (thread);
4082 int *wstat;
4083
4084 if (lwp->suspended != 0)
4085 {
4086 internal_error (__FILE__, __LINE__,
4087 "LWP %ld is suspended, suspended=%d\n",
4088 lwpid_of (thread), lwp->suspended);
4089 }
4090 gdb_assert (lwp->stopped);
4091
4092 /* For gdb_breakpoint_here. */
4093 saved_thread = current_thread;
4094 current_thread = thread;
4095
4096 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4097
4098 /* Allow debugging the jump pad, gdb_collect, etc. */
4099 if (!gdb_breakpoint_here (lwp->stop_pc)
4100 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4101 && thread->last_resume_kind != resume_step
4102 && maybe_move_out_of_jump_pad (lwp, wstat))
4103 {
4104 if (debug_threads)
4105 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4106 lwpid_of (thread));
4107
4108 if (wstat)
4109 {
4110 lwp->status_pending_p = 0;
4111 enqueue_one_deferred_signal (lwp, wstat);
4112
4113 if (debug_threads)
4114 debug_printf ("Signal %d for LWP %ld deferred "
4115 "(in jump pad)\n",
4116 WSTOPSIG (*wstat), lwpid_of (thread));
4117 }
4118
4119 linux_resume_one_lwp (lwp, 0, 0, NULL);
4120 }
4121 else
4122 lwp_suspended_inc (lwp);
4123
4124 current_thread = saved_thread;
4125 }
4126
4127 static bool
4128 lwp_running (thread_info *thread)
4129 {
4130 struct lwp_info *lwp = get_thread_lwp (thread);
4131
4132 if (lwp_is_marked_dead (lwp))
4133 return false;
4134
4135 return !lwp->stopped;
4136 }
4137
4138 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4139 If SUSPEND, then also increase the suspend count of every LWP,
4140 except EXCEPT. */
4141
4142 static void
4143 stop_all_lwps (int suspend, struct lwp_info *except)
4144 {
4145 /* Should not be called recursively. */
4146 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4147
4148 if (debug_threads)
4149 {
4150 debug_enter ();
4151 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4152 suspend ? "stop-and-suspend" : "stop",
4153 except != NULL
4154 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4155 : "none");
4156 }
4157
4158 stopping_threads = (suspend
4159 ? STOPPING_AND_SUSPENDING_THREADS
4160 : STOPPING_THREADS);
4161
4162 if (suspend)
4163 for_each_thread ([&] (thread_info *thread)
4164 {
4165 suspend_and_send_sigstop (thread, except);
4166 });
4167 else
4168 for_each_thread ([&] (thread_info *thread)
4169 {
4170 send_sigstop (thread, except);
4171 });
4172
4173 wait_for_sigstop ();
4174 stopping_threads = NOT_STOPPING_THREADS;
4175
4176 if (debug_threads)
4177 {
4178 debug_printf ("stop_all_lwps done, setting stopping_threads "
4179 "back to !stopping\n");
4180 debug_exit ();
4181 }
4182 }
4183
4184 /* Enqueue one signal in the chain of signals which need to be
4185 delivered to this process on next resume. */
4186
4187 static void
4188 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4189 {
4190 struct pending_signals *p_sig = XNEW (struct pending_signals);
4191
4192 p_sig->prev = lwp->pending_signals;
4193 p_sig->signal = signal;
4194 if (info == NULL)
4195 memset (&p_sig->info, 0, sizeof (siginfo_t));
4196 else
4197 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4198 lwp->pending_signals = p_sig;
4199 }
4200
4201 /* Install breakpoints for software single stepping. */
4202
4203 static void
4204 install_software_single_step_breakpoints (struct lwp_info *lwp)
4205 {
4206 struct thread_info *thread = get_lwp_thread (lwp);
4207 struct regcache *regcache = get_thread_regcache (thread, 1);
4208
4209 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4210
4211 current_thread = thread;
4212 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
4213
4214 for (CORE_ADDR pc : next_pcs)
4215 set_single_step_breakpoint (pc, current_ptid);
4216 }
4217
4218 /* Single step via hardware or software single step.
4219 Return 1 if hardware single stepping, 0 if software single stepping
4220 or can't single step. */
4221
4222 static int
4223 single_step (struct lwp_info* lwp)
4224 {
4225 int step = 0;
4226
4227 if (can_hardware_single_step ())
4228 {
4229 step = 1;
4230 }
4231 else if (can_software_single_step ())
4232 {
4233 install_software_single_step_breakpoints (lwp);
4234 step = 0;
4235 }
4236 else
4237 {
4238 if (debug_threads)
4239 debug_printf ("stepping is not implemented on this target");
4240 }
4241
4242 return step;
4243 }
4244
4245 /* The signal can be delivered to the inferior if we are not trying to
4246 finish a fast tracepoint collect. Since signal can be delivered in
4247 the step-over, the program may go to signal handler and trap again
4248 after return from the signal handler. We can live with the spurious
4249 double traps. */
4250
4251 static int
4252 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4253 {
4254 return (lwp->collecting_fast_tracepoint
4255 == fast_tpoint_collect_result::not_collecting);
4256 }
4257
4258 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4259 SIGNAL is nonzero, give it that signal. */
4260
4261 static void
4262 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4263 int step, int signal, siginfo_t *info)
4264 {
4265 struct thread_info *thread = get_lwp_thread (lwp);
4266 struct thread_info *saved_thread;
4267 int ptrace_request;
4268 struct process_info *proc = get_thread_process (thread);
4269
4270 /* Note that target description may not be initialised
4271 (proc->tdesc == NULL) at this point because the program hasn't
4272 stopped at the first instruction yet. It means GDBserver skips
4273 the extra traps from the wrapper program (see option --wrapper).
4274 Code in this function that requires register access should be
4275 guarded by proc->tdesc == NULL or something else. */
4276
4277 if (lwp->stopped == 0)
4278 return;
4279
4280 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4281
4282 fast_tpoint_collect_result fast_tp_collecting
4283 = lwp->collecting_fast_tracepoint;
4284
4285 gdb_assert (!stabilizing_threads
4286 || (fast_tp_collecting
4287 != fast_tpoint_collect_result::not_collecting));
4288
4289 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4290 user used the "jump" command, or "set $pc = foo"). */
4291 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4292 {
4293 /* Collecting 'while-stepping' actions doesn't make sense
4294 anymore. */
4295 release_while_stepping_state_list (thread);
4296 }
4297
4298 /* If we have pending signals or status, and a new signal, enqueue the
4299 signal. Also enqueue the signal if it can't be delivered to the
4300 inferior right now. */
4301 if (signal != 0
4302 && (lwp->status_pending_p
4303 || lwp->pending_signals != NULL
4304 || !lwp_signal_can_be_delivered (lwp)))
4305 {
4306 enqueue_pending_signal (lwp, signal, info);
4307
4308 /* Postpone any pending signal. It was enqueued above. */
4309 signal = 0;
4310 }
4311
4312 if (lwp->status_pending_p)
4313 {
4314 if (debug_threads)
4315 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4316 " has pending status\n",
4317 lwpid_of (thread), step ? "step" : "continue",
4318 lwp->stop_expected ? "expected" : "not expected");
4319 return;
4320 }
4321
4322 saved_thread = current_thread;
4323 current_thread = thread;
4324
4325 /* This bit needs some thinking about. If we get a signal that
4326 we must report while a single-step reinsert is still pending,
4327 we often end up resuming the thread. It might be better to
4328 (ew) allow a stack of pending events; then we could be sure that
4329 the reinsert happened right away and not lose any signals.
4330
4331 Making this stack would also shrink the window in which breakpoints are
4332 uninserted (see comment in linux_wait_for_lwp) but not enough for
4333 complete correctness, so it won't solve that problem. It may be
4334 worthwhile just to solve this one, however. */
4335 if (lwp->bp_reinsert != 0)
4336 {
4337 if (debug_threads)
4338 debug_printf (" pending reinsert at 0x%s\n",
4339 paddress (lwp->bp_reinsert));
4340
4341 if (can_hardware_single_step ())
4342 {
4343 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4344 {
4345 if (step == 0)
4346 warning ("BAD - reinserting but not stepping.");
4347 if (lwp->suspended)
4348 warning ("BAD - reinserting and suspended(%d).",
4349 lwp->suspended);
4350 }
4351 }
4352
4353 step = maybe_hw_step (thread);
4354 }
4355
4356 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4357 {
4358 if (debug_threads)
4359 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4360 " (exit-jump-pad-bkpt)\n",
4361 lwpid_of (thread));
4362 }
4363 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4364 {
4365 if (debug_threads)
4366 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4367 " single-stepping\n",
4368 lwpid_of (thread));
4369
4370 if (can_hardware_single_step ())
4371 step = 1;
4372 else
4373 {
4374 internal_error (__FILE__, __LINE__,
4375 "moving out of jump pad single-stepping"
4376 " not implemented on this target");
4377 }
4378 }
4379
4380 /* If we have while-stepping actions in this thread set it stepping.
4381 If we have a signal to deliver, it may or may not be set to
4382 SIG_IGN, we don't know. Assume so, and allow collecting
4383 while-stepping into a signal handler. A possible smart thing to
4384 do would be to set an internal breakpoint at the signal return
4385 address, continue, and carry on catching this while-stepping
4386 action only when that breakpoint is hit. A future
4387 enhancement. */
4388 if (thread->while_stepping != NULL)
4389 {
4390 if (debug_threads)
4391 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4392 lwpid_of (thread));
4393
4394 step = single_step (lwp);
4395 }
4396
4397 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4398 {
4399 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4400
4401 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4402
4403 if (debug_threads)
4404 {
4405 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4406 (long) lwp->stop_pc);
4407 }
4408 }
4409
4410 /* If we have pending signals, consume one if it can be delivered to
4411 the inferior. */
4412 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4413 {
4414 struct pending_signals **p_sig;
4415
4416 p_sig = &lwp->pending_signals;
4417 while ((*p_sig)->prev != NULL)
4418 p_sig = &(*p_sig)->prev;
4419
4420 signal = (*p_sig)->signal;
4421 if ((*p_sig)->info.si_signo != 0)
4422 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4423 &(*p_sig)->info);
4424
4425 free (*p_sig);
4426 *p_sig = NULL;
4427 }
4428
4429 if (debug_threads)
4430 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4431 lwpid_of (thread), step ? "step" : "continue", signal,
4432 lwp->stop_expected ? "expected" : "not expected");
4433
4434 if (the_low_target.prepare_to_resume != NULL)
4435 the_low_target.prepare_to_resume (lwp);
4436
4437 regcache_invalidate_thread (thread);
4438 errno = 0;
4439 lwp->stepping = step;
4440 if (step)
4441 ptrace_request = PTRACE_SINGLESTEP;
4442 else if (gdb_catching_syscalls_p (lwp))
4443 ptrace_request = PTRACE_SYSCALL;
4444 else
4445 ptrace_request = PTRACE_CONT;
4446 ptrace (ptrace_request,
4447 lwpid_of (thread),
4448 (PTRACE_TYPE_ARG3) 0,
4449 /* Coerce to a uintptr_t first to avoid potential gcc warning
4450 of coercing an 8 byte integer to a 4 byte pointer. */
4451 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4452
4453 current_thread = saved_thread;
4454 if (errno)
4455 perror_with_name ("resuming thread");
4456
4457 /* Successfully resumed. Clear state that no longer makes sense,
4458 and mark the LWP as running. Must not do this before resuming
4459 otherwise if that fails other code will be confused. E.g., we'd
4460 later try to stop the LWP and hang forever waiting for a stop
4461 status. Note that we must not throw after this is cleared,
4462 otherwise handle_zombie_lwp_error would get confused. */
4463 lwp->stopped = 0;
4464 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4465 }
4466
4467 /* Called when we try to resume a stopped LWP and that errors out. If
4468 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4469 or about to become), discard the error, clear any pending status
4470 the LWP may have, and return true (we'll collect the exit status
4471 soon enough). Otherwise, return false. */
4472
4473 static int
4474 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4475 {
4476 struct thread_info *thread = get_lwp_thread (lp);
4477
4478 /* If we get an error after resuming the LWP successfully, we'd
4479 confuse !T state for the LWP being gone. */
4480 gdb_assert (lp->stopped);
4481
4482 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4483 because even if ptrace failed with ESRCH, the tracee may be "not
4484 yet fully dead", but already refusing ptrace requests. In that
4485 case the tracee has 'R (Running)' state for a little bit
4486 (observed in Linux 3.18). See also the note on ESRCH in the
4487 ptrace(2) man page. Instead, check whether the LWP has any state
4488 other than ptrace-stopped. */
4489
4490 /* Don't assume anything if /proc/PID/status can't be read. */
4491 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4492 {
4493 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4494 lp->status_pending_p = 0;
4495 return 1;
4496 }
4497 return 0;
4498 }
4499
4500 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4501 disappears while we try to resume it. */
4502
4503 static void
4504 linux_resume_one_lwp (struct lwp_info *lwp,
4505 int step, int signal, siginfo_t *info)
4506 {
4507 TRY
4508 {
4509 linux_resume_one_lwp_throw (lwp, step, signal, info);
4510 }
4511 CATCH (ex, RETURN_MASK_ERROR)
4512 {
4513 if (!check_ptrace_stopped_lwp_gone (lwp))
4514 throw_exception (ex);
4515 }
4516 END_CATCH
4517 }
4518
4519 /* This function is called once per thread via for_each_thread.
4520 We look up which resume request applies to THREAD and mark it with a
4521 pointer to the appropriate resume request.
4522
4523 This algorithm is O(threads * resume elements), but resume elements
4524 is small (and will remain small at least until GDB supports thread
4525 suspension). */
4526
4527 static void
4528 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4529 {
4530 struct lwp_info *lwp = get_thread_lwp (thread);
4531
4532 for (int ndx = 0; ndx < n; ndx++)
4533 {
4534 ptid_t ptid = resume[ndx].thread;
4535 if (ptid == minus_one_ptid
4536 || ptid == thread->id
4537 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4538 of PID'. */
4539 || (ptid.pid () == pid_of (thread)
4540 && (ptid.is_pid ()
4541 || ptid.lwp () == -1)))
4542 {
4543 if (resume[ndx].kind == resume_stop
4544 && thread->last_resume_kind == resume_stop)
4545 {
4546 if (debug_threads)
4547 debug_printf ("already %s LWP %ld at GDB's request\n",
4548 (thread->last_status.kind
4549 == TARGET_WAITKIND_STOPPED)
4550 ? "stopped"
4551 : "stopping",
4552 lwpid_of (thread));
4553
4554 continue;
4555 }
4556
4557 /* Ignore (wildcard) resume requests for already-resumed
4558 threads. */
4559 if (resume[ndx].kind != resume_stop
4560 && thread->last_resume_kind != resume_stop)
4561 {
4562 if (debug_threads)
4563 debug_printf ("already %s LWP %ld at GDB's request\n",
4564 (thread->last_resume_kind
4565 == resume_step)
4566 ? "stepping"
4567 : "continuing",
4568 lwpid_of (thread));
4569 continue;
4570 }
4571
4572 /* Don't let wildcard resumes resume fork children that GDB
4573 does not yet know are new fork children. */
4574 if (lwp->fork_relative != NULL)
4575 {
4576 struct lwp_info *rel = lwp->fork_relative;
4577
4578 if (rel->status_pending_p
4579 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4580 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4581 {
4582 if (debug_threads)
4583 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4584 lwpid_of (thread));
4585 continue;
4586 }
4587 }
4588
4589 /* If the thread has a pending event that has already been
4590 reported to GDBserver core, but GDB has not pulled the
4591 event out of the vStopped queue yet, likewise, ignore the
4592 (wildcard) resume request. */
4593 if (in_queued_stop_replies (thread->id))
4594 {
4595 if (debug_threads)
4596 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4597 lwpid_of (thread));
4598 continue;
4599 }
4600
4601 lwp->resume = &resume[ndx];
4602 thread->last_resume_kind = lwp->resume->kind;
4603
4604 lwp->step_range_start = lwp->resume->step_range_start;
4605 lwp->step_range_end = lwp->resume->step_range_end;
4606
4607 /* If we had a deferred signal to report, dequeue one now.
4608 This can happen if LWP gets more than one signal while
4609 trying to get out of a jump pad. */
4610 if (lwp->stopped
4611 && !lwp->status_pending_p
4612 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4613 {
4614 lwp->status_pending_p = 1;
4615
4616 if (debug_threads)
4617 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4618 "leaving status pending.\n",
4619 WSTOPSIG (lwp->status_pending),
4620 lwpid_of (thread));
4621 }
4622
4623 return;
4624 }
4625 }
4626
4627 /* No resume action for this thread. */
4628 lwp->resume = NULL;
4629 }
4630
4631 /* find_thread callback for linux_resume. Return true if this lwp has an
4632 interesting status pending. */
4633
4634 static bool
4635 resume_status_pending_p (thread_info *thread)
4636 {
4637 struct lwp_info *lwp = get_thread_lwp (thread);
4638
4639 /* LWPs which will not be resumed are not interesting, because
4640 we might not wait for them next time through linux_wait. */
4641 if (lwp->resume == NULL)
4642 return false;
4643
4644 return thread_still_has_status_pending_p (thread);
4645 }
4646
4647 /* Return 1 if this lwp that GDB wants running is stopped at an
4648 internal breakpoint that we need to step over. It assumes that any
4649 required STOP_PC adjustment has already been propagated to the
4650 inferior's regcache. */
4651
4652 static bool
4653 need_step_over_p (thread_info *thread)
4654 {
4655 struct lwp_info *lwp = get_thread_lwp (thread);
4656 struct thread_info *saved_thread;
4657 CORE_ADDR pc;
4658 struct process_info *proc = get_thread_process (thread);
4659
4660 /* GDBserver is skipping the extra traps from the wrapper program,
4661 don't have to do step over. */
4662 if (proc->tdesc == NULL)
4663 return false;
4664
4665 /* LWPs which will not be resumed are not interesting, because we
4666 might not wait for them next time through linux_wait. */
4667
4668 if (!lwp->stopped)
4669 {
4670 if (debug_threads)
4671 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4672 lwpid_of (thread));
4673 return false;
4674 }
4675
4676 if (thread->last_resume_kind == resume_stop)
4677 {
4678 if (debug_threads)
4679 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4680 " stopped\n",
4681 lwpid_of (thread));
4682 return false;
4683 }
4684
4685 gdb_assert (lwp->suspended >= 0);
4686
4687 if (lwp->suspended)
4688 {
4689 if (debug_threads)
4690 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4691 lwpid_of (thread));
4692 return false;
4693 }
4694
4695 if (lwp->status_pending_p)
4696 {
4697 if (debug_threads)
4698 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4699 " status.\n",
4700 lwpid_of (thread));
4701 return false;
4702 }
4703
4704 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4705 or we have. */
4706 pc = get_pc (lwp);
4707
4708 /* If the PC has changed since we stopped, then don't do anything,
4709 and let the breakpoint/tracepoint be hit. This happens if, for
4710 instance, GDB handled the decr_pc_after_break subtraction itself,
4711 GDB is OOL stepping this thread, or the user has issued a "jump"
4712 command, or poked thread's registers herself. */
4713 if (pc != lwp->stop_pc)
4714 {
4715 if (debug_threads)
4716 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4717 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4718 lwpid_of (thread),
4719 paddress (lwp->stop_pc), paddress (pc));
4720 return false;
4721 }
4722
4723 /* On software single step target, resume the inferior with signal
4724 rather than stepping over. */
4725 if (can_software_single_step ()
4726 && lwp->pending_signals != NULL
4727 && lwp_signal_can_be_delivered (lwp))
4728 {
4729 if (debug_threads)
4730 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4731 " signals.\n",
4732 lwpid_of (thread));
4733
4734 return false;
4735 }
4736
4737 saved_thread = current_thread;
4738 current_thread = thread;
4739
4740 /* We can only step over breakpoints we know about. */
4741 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4742 {
4743 /* Don't step over a breakpoint that GDB expects to hit
4744 though. If the condition is being evaluated on the target's side
4745 and it evaluate to false, step over this breakpoint as well. */
4746 if (gdb_breakpoint_here (pc)
4747 && gdb_condition_true_at_breakpoint (pc)
4748 && gdb_no_commands_at_breakpoint (pc))
4749 {
4750 if (debug_threads)
4751 debug_printf ("Need step over [LWP %ld]? yes, but found"
4752 " GDB breakpoint at 0x%s; skipping step over\n",
4753 lwpid_of (thread), paddress (pc));
4754
4755 current_thread = saved_thread;
4756 return false;
4757 }
4758 else
4759 {
4760 if (debug_threads)
4761 debug_printf ("Need step over [LWP %ld]? yes, "
4762 "found breakpoint at 0x%s\n",
4763 lwpid_of (thread), paddress (pc));
4764
4765 /* We've found an lwp that needs stepping over --- return 1 so
4766 that find_thread stops looking. */
4767 current_thread = saved_thread;
4768
4769 return true;
4770 }
4771 }
4772
4773 current_thread = saved_thread;
4774
4775 if (debug_threads)
4776 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4777 " at 0x%s\n",
4778 lwpid_of (thread), paddress (pc));
4779
4780 return false;
4781 }
4782
4783 /* Start a step-over operation on LWP. When LWP stopped at a
4784 breakpoint, to make progress, we need to remove the breakpoint out
4785 of the way. If we let other threads run while we do that, they may
4786 pass by the breakpoint location and miss hitting it. To avoid
4787 that, a step-over momentarily stops all threads while LWP is
4788 single-stepped by either hardware or software while the breakpoint
4789 is temporarily uninserted from the inferior. When the single-step
4790 finishes, we reinsert the breakpoint, and let all threads that are
4791 supposed to be running, run again. */
4792
4793 static int
4794 start_step_over (struct lwp_info *lwp)
4795 {
4796 struct thread_info *thread = get_lwp_thread (lwp);
4797 struct thread_info *saved_thread;
4798 CORE_ADDR pc;
4799 int step;
4800
4801 if (debug_threads)
4802 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4803 lwpid_of (thread));
4804
4805 stop_all_lwps (1, lwp);
4806
4807 if (lwp->suspended != 0)
4808 {
4809 internal_error (__FILE__, __LINE__,
4810 "LWP %ld suspended=%d\n", lwpid_of (thread),
4811 lwp->suspended);
4812 }
4813
4814 if (debug_threads)
4815 debug_printf ("Done stopping all threads for step-over.\n");
4816
4817 /* Note, we should always reach here with an already adjusted PC,
4818 either by GDB (if we're resuming due to GDB's request), or by our
4819 caller, if we just finished handling an internal breakpoint GDB
4820 shouldn't care about. */
4821 pc = get_pc (lwp);
4822
4823 saved_thread = current_thread;
4824 current_thread = thread;
4825
4826 lwp->bp_reinsert = pc;
4827 uninsert_breakpoints_at (pc);
4828 uninsert_fast_tracepoint_jumps_at (pc);
4829
4830 step = single_step (lwp);
4831
4832 current_thread = saved_thread;
4833
4834 linux_resume_one_lwp (lwp, step, 0, NULL);
4835
4836 /* Require next event from this LWP. */
4837 step_over_bkpt = thread->id;
4838 return 1;
4839 }
4840
4841 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4842 start_step_over, if still there, and delete any single-step
4843 breakpoints we've set, on non hardware single-step targets. */
4844
4845 static int
4846 finish_step_over (struct lwp_info *lwp)
4847 {
4848 if (lwp->bp_reinsert != 0)
4849 {
4850 struct thread_info *saved_thread = current_thread;
4851
4852 if (debug_threads)
4853 debug_printf ("Finished step over.\n");
4854
4855 current_thread = get_lwp_thread (lwp);
4856
4857 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4858 may be no breakpoint to reinsert there by now. */
4859 reinsert_breakpoints_at (lwp->bp_reinsert);
4860 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4861
4862 lwp->bp_reinsert = 0;
4863
4864 /* Delete any single-step breakpoints. No longer needed. We
4865 don't have to worry about other threads hitting this trap,
4866 and later not being able to explain it, because we were
4867 stepping over a breakpoint, and we hold all threads but
4868 LWP stopped while doing that. */
4869 if (!can_hardware_single_step ())
4870 {
4871 gdb_assert (has_single_step_breakpoints (current_thread));
4872 delete_single_step_breakpoints (current_thread);
4873 }
4874
4875 step_over_bkpt = null_ptid;
4876 current_thread = saved_thread;
4877 return 1;
4878 }
4879 else
4880 return 0;
4881 }
4882
4883 /* If there's a step over in progress, wait until all threads stop
4884 (that is, until the stepping thread finishes its step), and
4885 unsuspend all lwps. The stepping thread ends with its status
4886 pending, which is processed later when we get back to processing
4887 events. */
4888
4889 static void
4890 complete_ongoing_step_over (void)
4891 {
4892 if (step_over_bkpt != null_ptid)
4893 {
4894 struct lwp_info *lwp;
4895 int wstat;
4896 int ret;
4897
4898 if (debug_threads)
4899 debug_printf ("detach: step over in progress, finish it first\n");
4900
4901 /* Passing NULL_PTID as filter indicates we want all events to
4902 be left pending. Eventually this returns when there are no
4903 unwaited-for children left. */
4904 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4905 &wstat, __WALL);
4906 gdb_assert (ret == -1);
4907
4908 lwp = find_lwp_pid (step_over_bkpt);
4909 if (lwp != NULL)
4910 finish_step_over (lwp);
4911 step_over_bkpt = null_ptid;
4912 unsuspend_all_lwps (lwp);
4913 }
4914 }
4915
4916 /* This function is called once per thread. We check the thread's resume
4917 request, which will tell us whether to resume, step, or leave the thread
4918 stopped; and what signal, if any, it should be sent.
4919
4920 For threads which we aren't explicitly told otherwise, we preserve
4921 the stepping flag; this is used for stepping over gdbserver-placed
4922 breakpoints.
4923
4924 If pending_flags was set in any thread, we queue any needed
4925 signals, since we won't actually resume. We already have a pending
4926 event to report, so we don't need to preserve any step requests;
4927 they should be re-issued if necessary. */
4928
4929 static void
4930 linux_resume_one_thread (thread_info *thread, bool leave_all_stopped)
4931 {
4932 struct lwp_info *lwp = get_thread_lwp (thread);
4933 int leave_pending;
4934
4935 if (lwp->resume == NULL)
4936 return;
4937
4938 if (lwp->resume->kind == resume_stop)
4939 {
4940 if (debug_threads)
4941 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4942
4943 if (!lwp->stopped)
4944 {
4945 if (debug_threads)
4946 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4947
4948 /* Stop the thread, and wait for the event asynchronously,
4949 through the event loop. */
4950 send_sigstop (lwp);
4951 }
4952 else
4953 {
4954 if (debug_threads)
4955 debug_printf ("already stopped LWP %ld\n",
4956 lwpid_of (thread));
4957
4958 /* The LWP may have been stopped in an internal event that
4959 was not meant to be notified back to GDB (e.g., gdbserver
4960 breakpoint), so we should be reporting a stop event in
4961 this case too. */
4962
4963 /* If the thread already has a pending SIGSTOP, this is a
4964 no-op. Otherwise, something later will presumably resume
4965 the thread and this will cause it to cancel any pending
4966 operation, due to last_resume_kind == resume_stop. If
4967 the thread already has a pending status to report, we
4968 will still report it the next time we wait - see
4969 status_pending_p_callback. */
4970
4971 /* If we already have a pending signal to report, then
4972 there's no need to queue a SIGSTOP, as this means we're
4973 midway through moving the LWP out of the jumppad, and we
4974 will report the pending signal as soon as that is
4975 finished. */
4976 if (lwp->pending_signals_to_report == NULL)
4977 send_sigstop (lwp);
4978 }
4979
4980 /* For stop requests, we're done. */
4981 lwp->resume = NULL;
4982 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4983 return;
4984 }
4985
4986 /* If this thread which is about to be resumed has a pending status,
4987 then don't resume it - we can just report the pending status.
4988 Likewise if it is suspended, because e.g., another thread is
4989 stepping past a breakpoint. Make sure to queue any signals that
4990 would otherwise be sent. In all-stop mode, we do this decision
4991 based on if *any* thread has a pending status. If there's a
4992 thread that needs the step-over-breakpoint dance, then don't
4993 resume any other thread but that particular one. */
4994 leave_pending = (lwp->suspended
4995 || lwp->status_pending_p
4996 || leave_all_stopped);
4997
4998 /* If we have a new signal, enqueue the signal. */
4999 if (lwp->resume->sig != 0)
5000 {
5001 siginfo_t info, *info_p;
5002
5003 /* If this is the same signal we were previously stopped by,
5004 make sure to queue its siginfo. */
5005 if (WIFSTOPPED (lwp->last_status)
5006 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
5007 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
5008 (PTRACE_TYPE_ARG3) 0, &info) == 0)
5009 info_p = &info;
5010 else
5011 info_p = NULL;
5012
5013 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
5014 }
5015
5016 if (!leave_pending)
5017 {
5018 if (debug_threads)
5019 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5020
5021 proceed_one_lwp (thread, NULL);
5022 }
5023 else
5024 {
5025 if (debug_threads)
5026 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5027 }
5028
5029 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5030 lwp->resume = NULL;
5031 }
5032
5033 static void
5034 linux_resume (struct thread_resume *resume_info, size_t n)
5035 {
5036 struct thread_info *need_step_over = NULL;
5037
5038 if (debug_threads)
5039 {
5040 debug_enter ();
5041 debug_printf ("linux_resume:\n");
5042 }
5043
5044 for_each_thread ([&] (thread_info *thread)
5045 {
5046 linux_set_resume_request (thread, resume_info, n);
5047 });
5048
5049 /* If there is a thread which would otherwise be resumed, which has
5050 a pending status, then don't resume any threads - we can just
5051 report the pending status. Make sure to queue any signals that
5052 would otherwise be sent. In non-stop mode, we'll apply this
5053 logic to each thread individually. We consume all pending events
5054 before considering to start a step-over (in all-stop). */
5055 bool any_pending = false;
5056 if (!non_stop)
5057 any_pending = find_thread (resume_status_pending_p) != NULL;
5058
5059 /* If there is a thread which would otherwise be resumed, which is
5060 stopped at a breakpoint that needs stepping over, then don't
5061 resume any threads - have it step over the breakpoint with all
5062 other threads stopped, then resume all threads again. Make sure
5063 to queue any signals that would otherwise be delivered or
5064 queued. */
5065 if (!any_pending && supports_breakpoints ())
5066 need_step_over = find_thread (need_step_over_p);
5067
5068 bool leave_all_stopped = (need_step_over != NULL || any_pending);
5069
5070 if (debug_threads)
5071 {
5072 if (need_step_over != NULL)
5073 debug_printf ("Not resuming all, need step over\n");
5074 else if (any_pending)
5075 debug_printf ("Not resuming, all-stop and found "
5076 "an LWP with pending status\n");
5077 else
5078 debug_printf ("Resuming, no pending status or step over needed\n");
5079 }
5080
5081 /* Even if we're leaving threads stopped, queue all signals we'd
5082 otherwise deliver. */
5083 for_each_thread ([&] (thread_info *thread)
5084 {
5085 linux_resume_one_thread (thread, leave_all_stopped);
5086 });
5087
5088 if (need_step_over)
5089 start_step_over (get_thread_lwp (need_step_over));
5090
5091 if (debug_threads)
5092 {
5093 debug_printf ("linux_resume done\n");
5094 debug_exit ();
5095 }
5096
5097 /* We may have events that were pending that can/should be sent to
5098 the client now. Trigger a linux_wait call. */
5099 if (target_is_async_p ())
5100 async_file_mark ();
5101 }
5102
5103 /* This function is called once per thread. We check the thread's
5104 last resume request, which will tell us whether to resume, step, or
5105 leave the thread stopped. Any signal the client requested to be
5106 delivered has already been enqueued at this point.
5107
5108 If any thread that GDB wants running is stopped at an internal
5109 breakpoint that needs stepping over, we start a step-over operation
5110 on that particular thread, and leave all others stopped. */
5111
5112 static void
5113 proceed_one_lwp (thread_info *thread, lwp_info *except)
5114 {
5115 struct lwp_info *lwp = get_thread_lwp (thread);
5116 int step;
5117
5118 if (lwp == except)
5119 return;
5120
5121 if (debug_threads)
5122 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5123
5124 if (!lwp->stopped)
5125 {
5126 if (debug_threads)
5127 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5128 return;
5129 }
5130
5131 if (thread->last_resume_kind == resume_stop
5132 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5133 {
5134 if (debug_threads)
5135 debug_printf (" client wants LWP to remain %ld stopped\n",
5136 lwpid_of (thread));
5137 return;
5138 }
5139
5140 if (lwp->status_pending_p)
5141 {
5142 if (debug_threads)
5143 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5144 lwpid_of (thread));
5145 return;
5146 }
5147
5148 gdb_assert (lwp->suspended >= 0);
5149
5150 if (lwp->suspended)
5151 {
5152 if (debug_threads)
5153 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5154 return;
5155 }
5156
5157 if (thread->last_resume_kind == resume_stop
5158 && lwp->pending_signals_to_report == NULL
5159 && (lwp->collecting_fast_tracepoint
5160 == fast_tpoint_collect_result::not_collecting))
5161 {
5162 /* We haven't reported this LWP as stopped yet (otherwise, the
5163 last_status.kind check above would catch it, and we wouldn't
5164 reach here. This LWP may have been momentarily paused by a
5165 stop_all_lwps call while handling for example, another LWP's
5166 step-over. In that case, the pending expected SIGSTOP signal
5167 that was queued at vCont;t handling time will have already
5168 been consumed by wait_for_sigstop, and so we need to requeue
5169 another one here. Note that if the LWP already has a SIGSTOP
5170 pending, this is a no-op. */
5171
5172 if (debug_threads)
5173 debug_printf ("Client wants LWP %ld to stop. "
5174 "Making sure it has a SIGSTOP pending\n",
5175 lwpid_of (thread));
5176
5177 send_sigstop (lwp);
5178 }
5179
5180 if (thread->last_resume_kind == resume_step)
5181 {
5182 if (debug_threads)
5183 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5184 lwpid_of (thread));
5185
5186 /* If resume_step is requested by GDB, install single-step
5187 breakpoints when the thread is about to be actually resumed if
5188 the single-step breakpoints weren't removed. */
5189 if (can_software_single_step ()
5190 && !has_single_step_breakpoints (thread))
5191 install_software_single_step_breakpoints (lwp);
5192
5193 step = maybe_hw_step (thread);
5194 }
5195 else if (lwp->bp_reinsert != 0)
5196 {
5197 if (debug_threads)
5198 debug_printf (" stepping LWP %ld, reinsert set\n",
5199 lwpid_of (thread));
5200
5201 step = maybe_hw_step (thread);
5202 }
5203 else
5204 step = 0;
5205
5206 linux_resume_one_lwp (lwp, step, 0, NULL);
5207 }
5208
5209 static void
5210 unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except)
5211 {
5212 struct lwp_info *lwp = get_thread_lwp (thread);
5213
5214 if (lwp == except)
5215 return;
5216
5217 lwp_suspended_decr (lwp);
5218
5219 proceed_one_lwp (thread, except);
5220 }
5221
5222 /* When we finish a step-over, set threads running again. If there's
5223 another thread that may need a step-over, now's the time to start
5224 it. Eventually, we'll move all threads past their breakpoints. */
5225
5226 static void
5227 proceed_all_lwps (void)
5228 {
5229 struct thread_info *need_step_over;
5230
5231 /* If there is a thread which would otherwise be resumed, which is
5232 stopped at a breakpoint that needs stepping over, then don't
5233 resume any threads - have it step over the breakpoint with all
5234 other threads stopped, then resume all threads again. */
5235
5236 if (supports_breakpoints ())
5237 {
5238 need_step_over = find_thread (need_step_over_p);
5239
5240 if (need_step_over != NULL)
5241 {
5242 if (debug_threads)
5243 debug_printf ("proceed_all_lwps: found "
5244 "thread %ld needing a step-over\n",
5245 lwpid_of (need_step_over));
5246
5247 start_step_over (get_thread_lwp (need_step_over));
5248 return;
5249 }
5250 }
5251
5252 if (debug_threads)
5253 debug_printf ("Proceeding, no step-over needed\n");
5254
5255 for_each_thread ([] (thread_info *thread)
5256 {
5257 proceed_one_lwp (thread, NULL);
5258 });
5259 }
5260
5261 /* Stopped LWPs that the client wanted to be running, that don't have
5262 pending statuses, are set to run again, except for EXCEPT, if not
5263 NULL. This undoes a stop_all_lwps call. */
5264
5265 static void
5266 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5267 {
5268 if (debug_threads)
5269 {
5270 debug_enter ();
5271 if (except)
5272 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5273 lwpid_of (get_lwp_thread (except)));
5274 else
5275 debug_printf ("unstopping all lwps\n");
5276 }
5277
5278 if (unsuspend)
5279 for_each_thread ([&] (thread_info *thread)
5280 {
5281 unsuspend_and_proceed_one_lwp (thread, except);
5282 });
5283 else
5284 for_each_thread ([&] (thread_info *thread)
5285 {
5286 proceed_one_lwp (thread, except);
5287 });
5288
5289 if (debug_threads)
5290 {
5291 debug_printf ("unstop_all_lwps done\n");
5292 debug_exit ();
5293 }
5294 }
5295
5296
5297 #ifdef HAVE_LINUX_REGSETS
5298
5299 #define use_linux_regsets 1
5300
5301 /* Returns true if REGSET has been disabled. */
5302
5303 static int
5304 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5305 {
5306 return (info->disabled_regsets != NULL
5307 && info->disabled_regsets[regset - info->regsets]);
5308 }
5309
5310 /* Disable REGSET. */
5311
5312 static void
5313 disable_regset (struct regsets_info *info, struct regset_info *regset)
5314 {
5315 int dr_offset;
5316
5317 dr_offset = regset - info->regsets;
5318 if (info->disabled_regsets == NULL)
5319 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5320 info->disabled_regsets[dr_offset] = 1;
5321 }
5322
5323 static int
5324 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5325 struct regcache *regcache)
5326 {
5327 struct regset_info *regset;
5328 int saw_general_regs = 0;
5329 int pid;
5330 struct iovec iov;
5331
5332 pid = lwpid_of (current_thread);
5333 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5334 {
5335 void *buf, *data;
5336 int nt_type, res;
5337
5338 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5339 continue;
5340
5341 buf = xmalloc (regset->size);
5342
5343 nt_type = regset->nt_type;
5344 if (nt_type)
5345 {
5346 iov.iov_base = buf;
5347 iov.iov_len = regset->size;
5348 data = (void *) &iov;
5349 }
5350 else
5351 data = buf;
5352
5353 #ifndef __sparc__
5354 res = ptrace (regset->get_request, pid,
5355 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5356 #else
5357 res = ptrace (regset->get_request, pid, data, nt_type);
5358 #endif
5359 if (res < 0)
5360 {
5361 if (errno == EIO)
5362 {
5363 /* If we get EIO on a regset, do not try it again for
5364 this process mode. */
5365 disable_regset (regsets_info, regset);
5366 }
5367 else if (errno == ENODATA)
5368 {
5369 /* ENODATA may be returned if the regset is currently
5370 not "active". This can happen in normal operation,
5371 so suppress the warning in this case. */
5372 }
5373 else if (errno == ESRCH)
5374 {
5375 /* At this point, ESRCH should mean the process is
5376 already gone, in which case we simply ignore attempts
5377 to read its registers. */
5378 }
5379 else
5380 {
5381 char s[256];
5382 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5383 pid);
5384 perror (s);
5385 }
5386 }
5387 else
5388 {
5389 if (regset->type == GENERAL_REGS)
5390 saw_general_regs = 1;
5391 regset->store_function (regcache, buf);
5392 }
5393 free (buf);
5394 }
5395 if (saw_general_regs)
5396 return 0;
5397 else
5398 return 1;
5399 }
5400
5401 static int
5402 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5403 struct regcache *regcache)
5404 {
5405 struct regset_info *regset;
5406 int saw_general_regs = 0;
5407 int pid;
5408 struct iovec iov;
5409
5410 pid = lwpid_of (current_thread);
5411 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5412 {
5413 void *buf, *data;
5414 int nt_type, res;
5415
5416 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5417 || regset->fill_function == NULL)
5418 continue;
5419
5420 buf = xmalloc (regset->size);
5421
5422 /* First fill the buffer with the current register set contents,
5423 in case there are any items in the kernel's regset that are
5424 not in gdbserver's regcache. */
5425
5426 nt_type = regset->nt_type;
5427 if (nt_type)
5428 {
5429 iov.iov_base = buf;
5430 iov.iov_len = regset->size;
5431 data = (void *) &iov;
5432 }
5433 else
5434 data = buf;
5435
5436 #ifndef __sparc__
5437 res = ptrace (regset->get_request, pid,
5438 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5439 #else
5440 res = ptrace (regset->get_request, pid, data, nt_type);
5441 #endif
5442
5443 if (res == 0)
5444 {
5445 /* Then overlay our cached registers on that. */
5446 regset->fill_function (regcache, buf);
5447
5448 /* Only now do we write the register set. */
5449 #ifndef __sparc__
5450 res = ptrace (regset->set_request, pid,
5451 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5452 #else
5453 res = ptrace (regset->set_request, pid, data, nt_type);
5454 #endif
5455 }
5456
5457 if (res < 0)
5458 {
5459 if (errno == EIO)
5460 {
5461 /* If we get EIO on a regset, do not try it again for
5462 this process mode. */
5463 disable_regset (regsets_info, regset);
5464 }
5465 else if (errno == ESRCH)
5466 {
5467 /* At this point, ESRCH should mean the process is
5468 already gone, in which case we simply ignore attempts
5469 to change its registers. See also the related
5470 comment in linux_resume_one_lwp. */
5471 free (buf);
5472 return 0;
5473 }
5474 else
5475 {
5476 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5477 }
5478 }
5479 else if (regset->type == GENERAL_REGS)
5480 saw_general_regs = 1;
5481 free (buf);
5482 }
5483 if (saw_general_regs)
5484 return 0;
5485 else
5486 return 1;
5487 }
5488
5489 #else /* !HAVE_LINUX_REGSETS */
5490
5491 #define use_linux_regsets 0
5492 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5493 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5494
5495 #endif
5496
5497 /* Return 1 if register REGNO is supported by one of the regset ptrace
5498 calls or 0 if it has to be transferred individually. */
5499
5500 static int
5501 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5502 {
5503 unsigned char mask = 1 << (regno % 8);
5504 size_t index = regno / 8;
5505
5506 return (use_linux_regsets
5507 && (regs_info->regset_bitmap == NULL
5508 || (regs_info->regset_bitmap[index] & mask) != 0));
5509 }
5510
5511 #ifdef HAVE_LINUX_USRREGS
5512
5513 static int
5514 register_addr (const struct usrregs_info *usrregs, int regnum)
5515 {
5516 int addr;
5517
5518 if (regnum < 0 || regnum >= usrregs->num_regs)
5519 error ("Invalid register number %d.", regnum);
5520
5521 addr = usrregs->regmap[regnum];
5522
5523 return addr;
5524 }
5525
5526 /* Fetch one register. */
5527 static void
5528 fetch_register (const struct usrregs_info *usrregs,
5529 struct regcache *regcache, int regno)
5530 {
5531 CORE_ADDR regaddr;
5532 int i, size;
5533 char *buf;
5534 int pid;
5535
5536 if (regno >= usrregs->num_regs)
5537 return;
5538 if ((*the_low_target.cannot_fetch_register) (regno))
5539 return;
5540
5541 regaddr = register_addr (usrregs, regno);
5542 if (regaddr == -1)
5543 return;
5544
5545 size = ((register_size (regcache->tdesc, regno)
5546 + sizeof (PTRACE_XFER_TYPE) - 1)
5547 & -sizeof (PTRACE_XFER_TYPE));
5548 buf = (char *) alloca (size);
5549
5550 pid = lwpid_of (current_thread);
5551 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5552 {
5553 errno = 0;
5554 *(PTRACE_XFER_TYPE *) (buf + i) =
5555 ptrace (PTRACE_PEEKUSER, pid,
5556 /* Coerce to a uintptr_t first to avoid potential gcc warning
5557 of coercing an 8 byte integer to a 4 byte pointer. */
5558 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5559 regaddr += sizeof (PTRACE_XFER_TYPE);
5560 if (errno != 0)
5561 {
5562 /* Mark register REGNO unavailable. */
5563 supply_register (regcache, regno, NULL);
5564 return;
5565 }
5566 }
5567
5568 if (the_low_target.supply_ptrace_register)
5569 the_low_target.supply_ptrace_register (regcache, regno, buf);
5570 else
5571 supply_register (regcache, regno, buf);
5572 }
5573
5574 /* Store one register. */
5575 static void
5576 store_register (const struct usrregs_info *usrregs,
5577 struct regcache *regcache, int regno)
5578 {
5579 CORE_ADDR regaddr;
5580 int i, size;
5581 char *buf;
5582 int pid;
5583
5584 if (regno >= usrregs->num_regs)
5585 return;
5586 if ((*the_low_target.cannot_store_register) (regno))
5587 return;
5588
5589 regaddr = register_addr (usrregs, regno);
5590 if (regaddr == -1)
5591 return;
5592
5593 size = ((register_size (regcache->tdesc, regno)
5594 + sizeof (PTRACE_XFER_TYPE) - 1)
5595 & -sizeof (PTRACE_XFER_TYPE));
5596 buf = (char *) alloca (size);
5597 memset (buf, 0, size);
5598
5599 if (the_low_target.collect_ptrace_register)
5600 the_low_target.collect_ptrace_register (regcache, regno, buf);
5601 else
5602 collect_register (regcache, regno, buf);
5603
5604 pid = lwpid_of (current_thread);
5605 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5606 {
5607 errno = 0;
5608 ptrace (PTRACE_POKEUSER, pid,
5609 /* Coerce to a uintptr_t first to avoid potential gcc warning
5610 about coercing an 8 byte integer to a 4 byte pointer. */
5611 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5612 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5613 if (errno != 0)
5614 {
5615 /* At this point, ESRCH should mean the process is
5616 already gone, in which case we simply ignore attempts
5617 to change its registers. See also the related
5618 comment in linux_resume_one_lwp. */
5619 if (errno == ESRCH)
5620 return;
5621
5622 if ((*the_low_target.cannot_store_register) (regno) == 0)
5623 error ("writing register %d: %s", regno, strerror (errno));
5624 }
5625 regaddr += sizeof (PTRACE_XFER_TYPE);
5626 }
5627 }
5628
5629 /* Fetch all registers, or just one, from the child process.
5630 If REGNO is -1, do this for all registers, skipping any that are
5631 assumed to have been retrieved by regsets_fetch_inferior_registers,
5632 unless ALL is non-zero.
5633 Otherwise, REGNO specifies which register (so we can save time). */
5634 static void
5635 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5636 struct regcache *regcache, int regno, int all)
5637 {
5638 struct usrregs_info *usr = regs_info->usrregs;
5639
5640 if (regno == -1)
5641 {
5642 for (regno = 0; regno < usr->num_regs; regno++)
5643 if (all || !linux_register_in_regsets (regs_info, regno))
5644 fetch_register (usr, regcache, regno);
5645 }
5646 else
5647 fetch_register (usr, regcache, regno);
5648 }
5649
5650 /* Store our register values back into the inferior.
5651 If REGNO is -1, do this for all registers, skipping any that are
5652 assumed to have been saved by regsets_store_inferior_registers,
5653 unless ALL is non-zero.
5654 Otherwise, REGNO specifies which register (so we can save time). */
5655 static void
5656 usr_store_inferior_registers (const struct regs_info *regs_info,
5657 struct regcache *regcache, int regno, int all)
5658 {
5659 struct usrregs_info *usr = regs_info->usrregs;
5660
5661 if (regno == -1)
5662 {
5663 for (regno = 0; regno < usr->num_regs; regno++)
5664 if (all || !linux_register_in_regsets (regs_info, regno))
5665 store_register (usr, regcache, regno);
5666 }
5667 else
5668 store_register (usr, regcache, regno);
5669 }
5670
5671 #else /* !HAVE_LINUX_USRREGS */
5672
5673 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5674 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5675
5676 #endif
5677
5678
5679 static void
5680 linux_fetch_registers (struct regcache *regcache, int regno)
5681 {
5682 int use_regsets;
5683 int all = 0;
5684 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5685
5686 if (regno == -1)
5687 {
5688 if (the_low_target.fetch_register != NULL
5689 && regs_info->usrregs != NULL)
5690 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5691 (*the_low_target.fetch_register) (regcache, regno);
5692
5693 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5694 if (regs_info->usrregs != NULL)
5695 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5696 }
5697 else
5698 {
5699 if (the_low_target.fetch_register != NULL
5700 && (*the_low_target.fetch_register) (regcache, regno))
5701 return;
5702
5703 use_regsets = linux_register_in_regsets (regs_info, regno);
5704 if (use_regsets)
5705 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5706 regcache);
5707 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5708 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5709 }
5710 }
5711
5712 static void
5713 linux_store_registers (struct regcache *regcache, int regno)
5714 {
5715 int use_regsets;
5716 int all = 0;
5717 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5718
5719 if (regno == -1)
5720 {
5721 all = regsets_store_inferior_registers (regs_info->regsets_info,
5722 regcache);
5723 if (regs_info->usrregs != NULL)
5724 usr_store_inferior_registers (regs_info, regcache, regno, all);
5725 }
5726 else
5727 {
5728 use_regsets = linux_register_in_regsets (regs_info, regno);
5729 if (use_regsets)
5730 all = regsets_store_inferior_registers (regs_info->regsets_info,
5731 regcache);
5732 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5733 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5734 }
5735 }
5736
5737
5738 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5739 to debugger memory starting at MYADDR. */
5740
5741 static int
5742 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5743 {
5744 int pid = lwpid_of (current_thread);
5745 PTRACE_XFER_TYPE *buffer;
5746 CORE_ADDR addr;
5747 int count;
5748 char filename[64];
5749 int i;
5750 int ret;
5751 int fd;
5752
5753 /* Try using /proc. Don't bother for one word. */
5754 if (len >= 3 * sizeof (long))
5755 {
5756 int bytes;
5757
5758 /* We could keep this file open and cache it - possibly one per
5759 thread. That requires some juggling, but is even faster. */
5760 sprintf (filename, "/proc/%d/mem", pid);
5761 fd = open (filename, O_RDONLY | O_LARGEFILE);
5762 if (fd == -1)
5763 goto no_proc;
5764
5765 /* If pread64 is available, use it. It's faster if the kernel
5766 supports it (only one syscall), and it's 64-bit safe even on
5767 32-bit platforms (for instance, SPARC debugging a SPARC64
5768 application). */
5769 #ifdef HAVE_PREAD64
5770 bytes = pread64 (fd, myaddr, len, memaddr);
5771 #else
5772 bytes = -1;
5773 if (lseek (fd, memaddr, SEEK_SET) != -1)
5774 bytes = read (fd, myaddr, len);
5775 #endif
5776
5777 close (fd);
5778 if (bytes == len)
5779 return 0;
5780
5781 /* Some data was read, we'll try to get the rest with ptrace. */
5782 if (bytes > 0)
5783 {
5784 memaddr += bytes;
5785 myaddr += bytes;
5786 len -= bytes;
5787 }
5788 }
5789
5790 no_proc:
5791 /* Round starting address down to longword boundary. */
5792 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5793 /* Round ending address up; get number of longwords that makes. */
5794 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5795 / sizeof (PTRACE_XFER_TYPE));
5796 /* Allocate buffer of that many longwords. */
5797 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5798
5799 /* Read all the longwords */
5800 errno = 0;
5801 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5802 {
5803 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5804 about coercing an 8 byte integer to a 4 byte pointer. */
5805 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5806 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5807 (PTRACE_TYPE_ARG4) 0);
5808 if (errno)
5809 break;
5810 }
5811 ret = errno;
5812
5813 /* Copy appropriate bytes out of the buffer. */
5814 if (i > 0)
5815 {
5816 i *= sizeof (PTRACE_XFER_TYPE);
5817 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5818 memcpy (myaddr,
5819 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5820 i < len ? i : len);
5821 }
5822
5823 return ret;
5824 }
5825
5826 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5827 memory at MEMADDR. On failure (cannot write to the inferior)
5828 returns the value of errno. Always succeeds if LEN is zero. */
5829
5830 static int
5831 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5832 {
5833 int i;
5834 /* Round starting address down to longword boundary. */
5835 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5836 /* Round ending address up; get number of longwords that makes. */
5837 int count
5838 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5839 / sizeof (PTRACE_XFER_TYPE);
5840
5841 /* Allocate buffer of that many longwords. */
5842 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5843
5844 int pid = lwpid_of (current_thread);
5845
5846 if (len == 0)
5847 {
5848 /* Zero length write always succeeds. */
5849 return 0;
5850 }
5851
5852 if (debug_threads)
5853 {
5854 /* Dump up to four bytes. */
5855 char str[4 * 2 + 1];
5856 char *p = str;
5857 int dump = len < 4 ? len : 4;
5858
5859 for (i = 0; i < dump; i++)
5860 {
5861 sprintf (p, "%02x", myaddr[i]);
5862 p += 2;
5863 }
5864 *p = '\0';
5865
5866 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5867 str, (long) memaddr, pid);
5868 }
5869
5870 /* Fill start and end extra bytes of buffer with existing memory data. */
5871
5872 errno = 0;
5873 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5874 about coercing an 8 byte integer to a 4 byte pointer. */
5875 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5876 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5877 (PTRACE_TYPE_ARG4) 0);
5878 if (errno)
5879 return errno;
5880
5881 if (count > 1)
5882 {
5883 errno = 0;
5884 buffer[count - 1]
5885 = ptrace (PTRACE_PEEKTEXT, pid,
5886 /* Coerce to a uintptr_t first to avoid potential gcc warning
5887 about coercing an 8 byte integer to a 4 byte pointer. */
5888 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5889 * sizeof (PTRACE_XFER_TYPE)),
5890 (PTRACE_TYPE_ARG4) 0);
5891 if (errno)
5892 return errno;
5893 }
5894
5895 /* Copy data to be written over corresponding part of buffer. */
5896
5897 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5898 myaddr, len);
5899
5900 /* Write the entire buffer. */
5901
5902 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5903 {
5904 errno = 0;
5905 ptrace (PTRACE_POKETEXT, pid,
5906 /* Coerce to a uintptr_t first to avoid potential gcc warning
5907 about coercing an 8 byte integer to a 4 byte pointer. */
5908 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5909 (PTRACE_TYPE_ARG4) buffer[i]);
5910 if (errno)
5911 return errno;
5912 }
5913
5914 return 0;
5915 }
5916
5917 static void
5918 linux_look_up_symbols (void)
5919 {
5920 #ifdef USE_THREAD_DB
5921 struct process_info *proc = current_process ();
5922
5923 if (proc->priv->thread_db != NULL)
5924 return;
5925
5926 thread_db_init ();
5927 #endif
5928 }
5929
5930 static void
5931 linux_request_interrupt (void)
5932 {
5933 /* Send a SIGINT to the process group. This acts just like the user
5934 typed a ^C on the controlling terminal. */
5935 kill (-signal_pid, SIGINT);
5936 }
5937
5938 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5939 to debugger memory starting at MYADDR. */
5940
5941 static int
5942 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5943 {
5944 char filename[PATH_MAX];
5945 int fd, n;
5946 int pid = lwpid_of (current_thread);
5947
5948 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5949
5950 fd = open (filename, O_RDONLY);
5951 if (fd < 0)
5952 return -1;
5953
5954 if (offset != (CORE_ADDR) 0
5955 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5956 n = -1;
5957 else
5958 n = read (fd, myaddr, len);
5959
5960 close (fd);
5961
5962 return n;
5963 }
5964
5965 /* These breakpoint and watchpoint related wrapper functions simply
5966 pass on the function call if the target has registered a
5967 corresponding function. */
5968
5969 static int
5970 linux_supports_z_point_type (char z_type)
5971 {
5972 return (the_low_target.supports_z_point_type != NULL
5973 && the_low_target.supports_z_point_type (z_type));
5974 }
5975
5976 static int
5977 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5978 int size, struct raw_breakpoint *bp)
5979 {
5980 if (type == raw_bkpt_type_sw)
5981 return insert_memory_breakpoint (bp);
5982 else if (the_low_target.insert_point != NULL)
5983 return the_low_target.insert_point (type, addr, size, bp);
5984 else
5985 /* Unsupported (see target.h). */
5986 return 1;
5987 }
5988
5989 static int
5990 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5991 int size, struct raw_breakpoint *bp)
5992 {
5993 if (type == raw_bkpt_type_sw)
5994 return remove_memory_breakpoint (bp);
5995 else if (the_low_target.remove_point != NULL)
5996 return the_low_target.remove_point (type, addr, size, bp);
5997 else
5998 /* Unsupported (see target.h). */
5999 return 1;
6000 }
6001
6002 /* Implement the to_stopped_by_sw_breakpoint target_ops
6003 method. */
6004
6005 static int
6006 linux_stopped_by_sw_breakpoint (void)
6007 {
6008 struct lwp_info *lwp = get_thread_lwp (current_thread);
6009
6010 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
6011 }
6012
6013 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
6014 method. */
6015
6016 static int
6017 linux_supports_stopped_by_sw_breakpoint (void)
6018 {
6019 return USE_SIGTRAP_SIGINFO;
6020 }
6021
6022 /* Implement the to_stopped_by_hw_breakpoint target_ops
6023 method. */
6024
6025 static int
6026 linux_stopped_by_hw_breakpoint (void)
6027 {
6028 struct lwp_info *lwp = get_thread_lwp (current_thread);
6029
6030 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6031 }
6032
6033 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6034 method. */
6035
6036 static int
6037 linux_supports_stopped_by_hw_breakpoint (void)
6038 {
6039 return USE_SIGTRAP_SIGINFO;
6040 }
6041
6042 /* Implement the supports_hardware_single_step target_ops method. */
6043
6044 static int
6045 linux_supports_hardware_single_step (void)
6046 {
6047 return can_hardware_single_step ();
6048 }
6049
6050 static int
6051 linux_supports_software_single_step (void)
6052 {
6053 return can_software_single_step ();
6054 }
6055
6056 static int
6057 linux_stopped_by_watchpoint (void)
6058 {
6059 struct lwp_info *lwp = get_thread_lwp (current_thread);
6060
6061 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6062 }
6063
6064 static CORE_ADDR
6065 linux_stopped_data_address (void)
6066 {
6067 struct lwp_info *lwp = get_thread_lwp (current_thread);
6068
6069 return lwp->stopped_data_address;
6070 }
6071
6072 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6073 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6074 && defined(PT_TEXT_END_ADDR)
6075
6076 /* This is only used for targets that define PT_TEXT_ADDR,
6077 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6078 the target has different ways of acquiring this information, like
6079 loadmaps. */
6080
6081 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6082 to tell gdb about. */
6083
6084 static int
6085 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6086 {
6087 unsigned long text, text_end, data;
6088 int pid = lwpid_of (current_thread);
6089
6090 errno = 0;
6091
6092 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6093 (PTRACE_TYPE_ARG4) 0);
6094 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6095 (PTRACE_TYPE_ARG4) 0);
6096 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6097 (PTRACE_TYPE_ARG4) 0);
6098
6099 if (errno == 0)
6100 {
6101 /* Both text and data offsets produced at compile-time (and so
6102 used by gdb) are relative to the beginning of the program,
6103 with the data segment immediately following the text segment.
6104 However, the actual runtime layout in memory may put the data
6105 somewhere else, so when we send gdb a data base-address, we
6106 use the real data base address and subtract the compile-time
6107 data base-address from it (which is just the length of the
6108 text segment). BSS immediately follows data in both
6109 cases. */
6110 *text_p = text;
6111 *data_p = data - (text_end - text);
6112
6113 return 1;
6114 }
6115 return 0;
6116 }
6117 #endif
6118
6119 static int
6120 linux_qxfer_osdata (const char *annex,
6121 unsigned char *readbuf, unsigned const char *writebuf,
6122 CORE_ADDR offset, int len)
6123 {
6124 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6125 }
6126
6127 /* Convert a native/host siginfo object, into/from the siginfo in the
6128 layout of the inferiors' architecture. */
6129
6130 static void
6131 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6132 {
6133 int done = 0;
6134
6135 if (the_low_target.siginfo_fixup != NULL)
6136 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6137
6138 /* If there was no callback, or the callback didn't do anything,
6139 then just do a straight memcpy. */
6140 if (!done)
6141 {
6142 if (direction == 1)
6143 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6144 else
6145 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6146 }
6147 }
6148
6149 static int
6150 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6151 unsigned const char *writebuf, CORE_ADDR offset, int len)
6152 {
6153 int pid;
6154 siginfo_t siginfo;
6155 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6156
6157 if (current_thread == NULL)
6158 return -1;
6159
6160 pid = lwpid_of (current_thread);
6161
6162 if (debug_threads)
6163 debug_printf ("%s siginfo for lwp %d.\n",
6164 readbuf != NULL ? "Reading" : "Writing",
6165 pid);
6166
6167 if (offset >= sizeof (siginfo))
6168 return -1;
6169
6170 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6171 return -1;
6172
6173 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6174 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6175 inferior with a 64-bit GDBSERVER should look the same as debugging it
6176 with a 32-bit GDBSERVER, we need to convert it. */
6177 siginfo_fixup (&siginfo, inf_siginfo, 0);
6178
6179 if (offset + len > sizeof (siginfo))
6180 len = sizeof (siginfo) - offset;
6181
6182 if (readbuf != NULL)
6183 memcpy (readbuf, inf_siginfo + offset, len);
6184 else
6185 {
6186 memcpy (inf_siginfo + offset, writebuf, len);
6187
6188 /* Convert back to ptrace layout before flushing it out. */
6189 siginfo_fixup (&siginfo, inf_siginfo, 1);
6190
6191 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6192 return -1;
6193 }
6194
6195 return len;
6196 }
6197
6198 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6199 so we notice when children change state; as the handler for the
6200 sigsuspend in my_waitpid. */
6201
6202 static void
6203 sigchld_handler (int signo)
6204 {
6205 int old_errno = errno;
6206
6207 if (debug_threads)
6208 {
6209 do
6210 {
6211 /* fprintf is not async-signal-safe, so call write
6212 directly. */
6213 if (write (2, "sigchld_handler\n",
6214 sizeof ("sigchld_handler\n") - 1) < 0)
6215 break; /* just ignore */
6216 } while (0);
6217 }
6218
6219 if (target_is_async_p ())
6220 async_file_mark (); /* trigger a linux_wait */
6221
6222 errno = old_errno;
6223 }
6224
6225 static int
6226 linux_supports_non_stop (void)
6227 {
6228 return 1;
6229 }
6230
6231 static int
6232 linux_async (int enable)
6233 {
6234 int previous = target_is_async_p ();
6235
6236 if (debug_threads)
6237 debug_printf ("linux_async (%d), previous=%d\n",
6238 enable, previous);
6239
6240 if (previous != enable)
6241 {
6242 sigset_t mask;
6243 sigemptyset (&mask);
6244 sigaddset (&mask, SIGCHLD);
6245
6246 sigprocmask (SIG_BLOCK, &mask, NULL);
6247
6248 if (enable)
6249 {
6250 if (pipe (linux_event_pipe) == -1)
6251 {
6252 linux_event_pipe[0] = -1;
6253 linux_event_pipe[1] = -1;
6254 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6255
6256 warning ("creating event pipe failed.");
6257 return previous;
6258 }
6259
6260 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6261 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6262
6263 /* Register the event loop handler. */
6264 add_file_handler (linux_event_pipe[0],
6265 handle_target_event, NULL);
6266
6267 /* Always trigger a linux_wait. */
6268 async_file_mark ();
6269 }
6270 else
6271 {
6272 delete_file_handler (linux_event_pipe[0]);
6273
6274 close (linux_event_pipe[0]);
6275 close (linux_event_pipe[1]);
6276 linux_event_pipe[0] = -1;
6277 linux_event_pipe[1] = -1;
6278 }
6279
6280 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6281 }
6282
6283 return previous;
6284 }
6285
6286 static int
6287 linux_start_non_stop (int nonstop)
6288 {
6289 /* Register or unregister from event-loop accordingly. */
6290 linux_async (nonstop);
6291
6292 if (target_is_async_p () != (nonstop != 0))
6293 return -1;
6294
6295 return 0;
6296 }
6297
6298 static int
6299 linux_supports_multi_process (void)
6300 {
6301 return 1;
6302 }
6303
6304 /* Check if fork events are supported. */
6305
6306 static int
6307 linux_supports_fork_events (void)
6308 {
6309 return linux_supports_tracefork ();
6310 }
6311
6312 /* Check if vfork events are supported. */
6313
6314 static int
6315 linux_supports_vfork_events (void)
6316 {
6317 return linux_supports_tracefork ();
6318 }
6319
6320 /* Check if exec events are supported. */
6321
6322 static int
6323 linux_supports_exec_events (void)
6324 {
6325 return linux_supports_traceexec ();
6326 }
6327
6328 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6329 ptrace flags for all inferiors. This is in case the new GDB connection
6330 doesn't support the same set of events that the previous one did. */
6331
6332 static void
6333 linux_handle_new_gdb_connection (void)
6334 {
6335 /* Request that all the lwps reset their ptrace options. */
6336 for_each_thread ([] (thread_info *thread)
6337 {
6338 struct lwp_info *lwp = get_thread_lwp (thread);
6339
6340 if (!lwp->stopped)
6341 {
6342 /* Stop the lwp so we can modify its ptrace options. */
6343 lwp->must_set_ptrace_flags = 1;
6344 linux_stop_lwp (lwp);
6345 }
6346 else
6347 {
6348 /* Already stopped; go ahead and set the ptrace options. */
6349 struct process_info *proc = find_process_pid (pid_of (thread));
6350 int options = linux_low_ptrace_options (proc->attached);
6351
6352 linux_enable_event_reporting (lwpid_of (thread), options);
6353 lwp->must_set_ptrace_flags = 0;
6354 }
6355 });
6356 }
6357
6358 static int
6359 linux_supports_disable_randomization (void)
6360 {
6361 #ifdef HAVE_PERSONALITY
6362 return 1;
6363 #else
6364 return 0;
6365 #endif
6366 }
6367
6368 static int
6369 linux_supports_agent (void)
6370 {
6371 return 1;
6372 }
6373
6374 static int
6375 linux_supports_range_stepping (void)
6376 {
6377 if (can_software_single_step ())
6378 return 1;
6379 if (*the_low_target.supports_range_stepping == NULL)
6380 return 0;
6381
6382 return (*the_low_target.supports_range_stepping) ();
6383 }
6384
6385 /* Enumerate spufs IDs for process PID. */
6386 static int
6387 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6388 {
6389 int pos = 0;
6390 int written = 0;
6391 char path[128];
6392 DIR *dir;
6393 struct dirent *entry;
6394
6395 sprintf (path, "/proc/%ld/fd", pid);
6396 dir = opendir (path);
6397 if (!dir)
6398 return -1;
6399
6400 rewinddir (dir);
6401 while ((entry = readdir (dir)) != NULL)
6402 {
6403 struct stat st;
6404 struct statfs stfs;
6405 int fd;
6406
6407 fd = atoi (entry->d_name);
6408 if (!fd)
6409 continue;
6410
6411 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6412 if (stat (path, &st) != 0)
6413 continue;
6414 if (!S_ISDIR (st.st_mode))
6415 continue;
6416
6417 if (statfs (path, &stfs) != 0)
6418 continue;
6419 if (stfs.f_type != SPUFS_MAGIC)
6420 continue;
6421
6422 if (pos >= offset && pos + 4 <= offset + len)
6423 {
6424 *(unsigned int *)(buf + pos - offset) = fd;
6425 written += 4;
6426 }
6427 pos += 4;
6428 }
6429
6430 closedir (dir);
6431 return written;
6432 }
6433
6434 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6435 object type, using the /proc file system. */
6436 static int
6437 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6438 unsigned const char *writebuf,
6439 CORE_ADDR offset, int len)
6440 {
6441 long pid = lwpid_of (current_thread);
6442 char buf[128];
6443 int fd = 0;
6444 int ret = 0;
6445
6446 if (!writebuf && !readbuf)
6447 return -1;
6448
6449 if (!*annex)
6450 {
6451 if (!readbuf)
6452 return -1;
6453 else
6454 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6455 }
6456
6457 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6458 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6459 if (fd <= 0)
6460 return -1;
6461
6462 if (offset != 0
6463 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6464 {
6465 close (fd);
6466 return 0;
6467 }
6468
6469 if (writebuf)
6470 ret = write (fd, writebuf, (size_t) len);
6471 else
6472 ret = read (fd, readbuf, (size_t) len);
6473
6474 close (fd);
6475 return ret;
6476 }
6477
6478 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6479 struct target_loadseg
6480 {
6481 /* Core address to which the segment is mapped. */
6482 Elf32_Addr addr;
6483 /* VMA recorded in the program header. */
6484 Elf32_Addr p_vaddr;
6485 /* Size of this segment in memory. */
6486 Elf32_Word p_memsz;
6487 };
6488
6489 # if defined PT_GETDSBT
6490 struct target_loadmap
6491 {
6492 /* Protocol version number, must be zero. */
6493 Elf32_Word version;
6494 /* Pointer to the DSBT table, its size, and the DSBT index. */
6495 unsigned *dsbt_table;
6496 unsigned dsbt_size, dsbt_index;
6497 /* Number of segments in this map. */
6498 Elf32_Word nsegs;
6499 /* The actual memory map. */
6500 struct target_loadseg segs[/*nsegs*/];
6501 };
6502 # define LINUX_LOADMAP PT_GETDSBT
6503 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6504 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6505 # else
6506 struct target_loadmap
6507 {
6508 /* Protocol version number, must be zero. */
6509 Elf32_Half version;
6510 /* Number of segments in this map. */
6511 Elf32_Half nsegs;
6512 /* The actual memory map. */
6513 struct target_loadseg segs[/*nsegs*/];
6514 };
6515 # define LINUX_LOADMAP PTRACE_GETFDPIC
6516 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6517 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6518 # endif
6519
6520 static int
6521 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6522 unsigned char *myaddr, unsigned int len)
6523 {
6524 int pid = lwpid_of (current_thread);
6525 int addr = -1;
6526 struct target_loadmap *data = NULL;
6527 unsigned int actual_length, copy_length;
6528
6529 if (strcmp (annex, "exec") == 0)
6530 addr = (int) LINUX_LOADMAP_EXEC;
6531 else if (strcmp (annex, "interp") == 0)
6532 addr = (int) LINUX_LOADMAP_INTERP;
6533 else
6534 return -1;
6535
6536 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6537 return -1;
6538
6539 if (data == NULL)
6540 return -1;
6541
6542 actual_length = sizeof (struct target_loadmap)
6543 + sizeof (struct target_loadseg) * data->nsegs;
6544
6545 if (offset < 0 || offset > actual_length)
6546 return -1;
6547
6548 copy_length = actual_length - offset < len ? actual_length - offset : len;
6549 memcpy (myaddr, (char *) data + offset, copy_length);
6550 return copy_length;
6551 }
6552 #else
6553 # define linux_read_loadmap NULL
6554 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6555
6556 static void
6557 linux_process_qsupported (char **features, int count)
6558 {
6559 if (the_low_target.process_qsupported != NULL)
6560 the_low_target.process_qsupported (features, count);
6561 }
6562
6563 static int
6564 linux_supports_catch_syscall (void)
6565 {
6566 return (the_low_target.get_syscall_trapinfo != NULL
6567 && linux_supports_tracesysgood ());
6568 }
6569
6570 static int
6571 linux_get_ipa_tdesc_idx (void)
6572 {
6573 if (the_low_target.get_ipa_tdesc_idx == NULL)
6574 return 0;
6575
6576 return (*the_low_target.get_ipa_tdesc_idx) ();
6577 }
6578
6579 static int
6580 linux_supports_tracepoints (void)
6581 {
6582 if (*the_low_target.supports_tracepoints == NULL)
6583 return 0;
6584
6585 return (*the_low_target.supports_tracepoints) ();
6586 }
6587
6588 static CORE_ADDR
6589 linux_read_pc (struct regcache *regcache)
6590 {
6591 if (the_low_target.get_pc == NULL)
6592 return 0;
6593
6594 return (*the_low_target.get_pc) (regcache);
6595 }
6596
6597 static void
6598 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6599 {
6600 gdb_assert (the_low_target.set_pc != NULL);
6601
6602 (*the_low_target.set_pc) (regcache, pc);
6603 }
6604
6605 static int
6606 linux_thread_stopped (struct thread_info *thread)
6607 {
6608 return get_thread_lwp (thread)->stopped;
6609 }
6610
6611 /* This exposes stop-all-threads functionality to other modules. */
6612
6613 static void
6614 linux_pause_all (int freeze)
6615 {
6616 stop_all_lwps (freeze, NULL);
6617 }
6618
6619 /* This exposes unstop-all-threads functionality to other gdbserver
6620 modules. */
6621
6622 static void
6623 linux_unpause_all (int unfreeze)
6624 {
6625 unstop_all_lwps (unfreeze, NULL);
6626 }
6627
6628 static int
6629 linux_prepare_to_access_memory (void)
6630 {
6631 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6632 running LWP. */
6633 if (non_stop)
6634 linux_pause_all (1);
6635 return 0;
6636 }
6637
6638 static void
6639 linux_done_accessing_memory (void)
6640 {
6641 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6642 running LWP. */
6643 if (non_stop)
6644 linux_unpause_all (1);
6645 }
6646
6647 static int
6648 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6649 CORE_ADDR collector,
6650 CORE_ADDR lockaddr,
6651 ULONGEST orig_size,
6652 CORE_ADDR *jump_entry,
6653 CORE_ADDR *trampoline,
6654 ULONGEST *trampoline_size,
6655 unsigned char *jjump_pad_insn,
6656 ULONGEST *jjump_pad_insn_size,
6657 CORE_ADDR *adjusted_insn_addr,
6658 CORE_ADDR *adjusted_insn_addr_end,
6659 char *err)
6660 {
6661 return (*the_low_target.install_fast_tracepoint_jump_pad)
6662 (tpoint, tpaddr, collector, lockaddr, orig_size,
6663 jump_entry, trampoline, trampoline_size,
6664 jjump_pad_insn, jjump_pad_insn_size,
6665 adjusted_insn_addr, adjusted_insn_addr_end,
6666 err);
6667 }
6668
6669 static struct emit_ops *
6670 linux_emit_ops (void)
6671 {
6672 if (the_low_target.emit_ops != NULL)
6673 return (*the_low_target.emit_ops) ();
6674 else
6675 return NULL;
6676 }
6677
6678 static int
6679 linux_get_min_fast_tracepoint_insn_len (void)
6680 {
6681 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6682 }
6683
6684 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6685
6686 static int
6687 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6688 CORE_ADDR *phdr_memaddr, int *num_phdr)
6689 {
6690 char filename[PATH_MAX];
6691 int fd;
6692 const int auxv_size = is_elf64
6693 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6694 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6695
6696 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6697
6698 fd = open (filename, O_RDONLY);
6699 if (fd < 0)
6700 return 1;
6701
6702 *phdr_memaddr = 0;
6703 *num_phdr = 0;
6704 while (read (fd, buf, auxv_size) == auxv_size
6705 && (*phdr_memaddr == 0 || *num_phdr == 0))
6706 {
6707 if (is_elf64)
6708 {
6709 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6710
6711 switch (aux->a_type)
6712 {
6713 case AT_PHDR:
6714 *phdr_memaddr = aux->a_un.a_val;
6715 break;
6716 case AT_PHNUM:
6717 *num_phdr = aux->a_un.a_val;
6718 break;
6719 }
6720 }
6721 else
6722 {
6723 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6724
6725 switch (aux->a_type)
6726 {
6727 case AT_PHDR:
6728 *phdr_memaddr = aux->a_un.a_val;
6729 break;
6730 case AT_PHNUM:
6731 *num_phdr = aux->a_un.a_val;
6732 break;
6733 }
6734 }
6735 }
6736
6737 close (fd);
6738
6739 if (*phdr_memaddr == 0 || *num_phdr == 0)
6740 {
6741 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6742 "phdr_memaddr = %ld, phdr_num = %d",
6743 (long) *phdr_memaddr, *num_phdr);
6744 return 2;
6745 }
6746
6747 return 0;
6748 }
6749
6750 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6751
6752 static CORE_ADDR
6753 get_dynamic (const int pid, const int is_elf64)
6754 {
6755 CORE_ADDR phdr_memaddr, relocation;
6756 int num_phdr, i;
6757 unsigned char *phdr_buf;
6758 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6759
6760 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6761 return 0;
6762
6763 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6764 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6765
6766 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6767 return 0;
6768
6769 /* Compute relocation: it is expected to be 0 for "regular" executables,
6770 non-zero for PIE ones. */
6771 relocation = -1;
6772 for (i = 0; relocation == -1 && i < num_phdr; i++)
6773 if (is_elf64)
6774 {
6775 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6776
6777 if (p->p_type == PT_PHDR)
6778 relocation = phdr_memaddr - p->p_vaddr;
6779 }
6780 else
6781 {
6782 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6783
6784 if (p->p_type == PT_PHDR)
6785 relocation = phdr_memaddr - p->p_vaddr;
6786 }
6787
6788 if (relocation == -1)
6789 {
6790 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6791 any real world executables, including PIE executables, have always
6792 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6793 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6794 or present DT_DEBUG anyway (fpc binaries are statically linked).
6795
6796 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6797
6798 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6799
6800 return 0;
6801 }
6802
6803 for (i = 0; i < num_phdr; i++)
6804 {
6805 if (is_elf64)
6806 {
6807 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6808
6809 if (p->p_type == PT_DYNAMIC)
6810 return p->p_vaddr + relocation;
6811 }
6812 else
6813 {
6814 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6815
6816 if (p->p_type == PT_DYNAMIC)
6817 return p->p_vaddr + relocation;
6818 }
6819 }
6820
6821 return 0;
6822 }
6823
6824 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6825 can be 0 if the inferior does not yet have the library list initialized.
6826 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6827 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6828
6829 static CORE_ADDR
6830 get_r_debug (const int pid, const int is_elf64)
6831 {
6832 CORE_ADDR dynamic_memaddr;
6833 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6834 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6835 CORE_ADDR map = -1;
6836
6837 dynamic_memaddr = get_dynamic (pid, is_elf64);
6838 if (dynamic_memaddr == 0)
6839 return map;
6840
6841 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6842 {
6843 if (is_elf64)
6844 {
6845 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6846 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6847 union
6848 {
6849 Elf64_Xword map;
6850 unsigned char buf[sizeof (Elf64_Xword)];
6851 }
6852 rld_map;
6853 #endif
6854 #ifdef DT_MIPS_RLD_MAP
6855 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6856 {
6857 if (linux_read_memory (dyn->d_un.d_val,
6858 rld_map.buf, sizeof (rld_map.buf)) == 0)
6859 return rld_map.map;
6860 else
6861 break;
6862 }
6863 #endif /* DT_MIPS_RLD_MAP */
6864 #ifdef DT_MIPS_RLD_MAP_REL
6865 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6866 {
6867 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6868 rld_map.buf, sizeof (rld_map.buf)) == 0)
6869 return rld_map.map;
6870 else
6871 break;
6872 }
6873 #endif /* DT_MIPS_RLD_MAP_REL */
6874
6875 if (dyn->d_tag == DT_DEBUG && map == -1)
6876 map = dyn->d_un.d_val;
6877
6878 if (dyn->d_tag == DT_NULL)
6879 break;
6880 }
6881 else
6882 {
6883 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6884 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6885 union
6886 {
6887 Elf32_Word map;
6888 unsigned char buf[sizeof (Elf32_Word)];
6889 }
6890 rld_map;
6891 #endif
6892 #ifdef DT_MIPS_RLD_MAP
6893 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6894 {
6895 if (linux_read_memory (dyn->d_un.d_val,
6896 rld_map.buf, sizeof (rld_map.buf)) == 0)
6897 return rld_map.map;
6898 else
6899 break;
6900 }
6901 #endif /* DT_MIPS_RLD_MAP */
6902 #ifdef DT_MIPS_RLD_MAP_REL
6903 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6904 {
6905 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6906 rld_map.buf, sizeof (rld_map.buf)) == 0)
6907 return rld_map.map;
6908 else
6909 break;
6910 }
6911 #endif /* DT_MIPS_RLD_MAP_REL */
6912
6913 if (dyn->d_tag == DT_DEBUG && map == -1)
6914 map = dyn->d_un.d_val;
6915
6916 if (dyn->d_tag == DT_NULL)
6917 break;
6918 }
6919
6920 dynamic_memaddr += dyn_size;
6921 }
6922
6923 return map;
6924 }
6925
6926 /* Read one pointer from MEMADDR in the inferior. */
6927
6928 static int
6929 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6930 {
6931 int ret;
6932
6933 /* Go through a union so this works on either big or little endian
6934 hosts, when the inferior's pointer size is smaller than the size
6935 of CORE_ADDR. It is assumed the inferior's endianness is the
6936 same of the superior's. */
6937 union
6938 {
6939 CORE_ADDR core_addr;
6940 unsigned int ui;
6941 unsigned char uc;
6942 } addr;
6943
6944 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6945 if (ret == 0)
6946 {
6947 if (ptr_size == sizeof (CORE_ADDR))
6948 *ptr = addr.core_addr;
6949 else if (ptr_size == sizeof (unsigned int))
6950 *ptr = addr.ui;
6951 else
6952 gdb_assert_not_reached ("unhandled pointer size");
6953 }
6954 return ret;
6955 }
6956
6957 struct link_map_offsets
6958 {
6959 /* Offset and size of r_debug.r_version. */
6960 int r_version_offset;
6961
6962 /* Offset and size of r_debug.r_map. */
6963 int r_map_offset;
6964
6965 /* Offset to l_addr field in struct link_map. */
6966 int l_addr_offset;
6967
6968 /* Offset to l_name field in struct link_map. */
6969 int l_name_offset;
6970
6971 /* Offset to l_ld field in struct link_map. */
6972 int l_ld_offset;
6973
6974 /* Offset to l_next field in struct link_map. */
6975 int l_next_offset;
6976
6977 /* Offset to l_prev field in struct link_map. */
6978 int l_prev_offset;
6979 };
6980
6981 /* Construct qXfer:libraries-svr4:read reply. */
6982
6983 static int
6984 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6985 unsigned const char *writebuf,
6986 CORE_ADDR offset, int len)
6987 {
6988 struct process_info_private *const priv = current_process ()->priv;
6989 char filename[PATH_MAX];
6990 int pid, is_elf64;
6991
6992 static const struct link_map_offsets lmo_32bit_offsets =
6993 {
6994 0, /* r_version offset. */
6995 4, /* r_debug.r_map offset. */
6996 0, /* l_addr offset in link_map. */
6997 4, /* l_name offset in link_map. */
6998 8, /* l_ld offset in link_map. */
6999 12, /* l_next offset in link_map. */
7000 16 /* l_prev offset in link_map. */
7001 };
7002
7003 static const struct link_map_offsets lmo_64bit_offsets =
7004 {
7005 0, /* r_version offset. */
7006 8, /* r_debug.r_map offset. */
7007 0, /* l_addr offset in link_map. */
7008 8, /* l_name offset in link_map. */
7009 16, /* l_ld offset in link_map. */
7010 24, /* l_next offset in link_map. */
7011 32 /* l_prev offset in link_map. */
7012 };
7013 const struct link_map_offsets *lmo;
7014 unsigned int machine;
7015 int ptr_size;
7016 CORE_ADDR lm_addr = 0, lm_prev = 0;
7017 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
7018 int header_done = 0;
7019
7020 if (writebuf != NULL)
7021 return -2;
7022 if (readbuf == NULL)
7023 return -1;
7024
7025 pid = lwpid_of (current_thread);
7026 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7027 is_elf64 = elf_64_file_p (filename, &machine);
7028 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7029 ptr_size = is_elf64 ? 8 : 4;
7030
7031 while (annex[0] != '\0')
7032 {
7033 const char *sep;
7034 CORE_ADDR *addrp;
7035 int name_len;
7036
7037 sep = strchr (annex, '=');
7038 if (sep == NULL)
7039 break;
7040
7041 name_len = sep - annex;
7042 if (name_len == 5 && startswith (annex, "start"))
7043 addrp = &lm_addr;
7044 else if (name_len == 4 && startswith (annex, "prev"))
7045 addrp = &lm_prev;
7046 else
7047 {
7048 annex = strchr (sep, ';');
7049 if (annex == NULL)
7050 break;
7051 annex++;
7052 continue;
7053 }
7054
7055 annex = decode_address_to_semicolon (addrp, sep + 1);
7056 }
7057
7058 if (lm_addr == 0)
7059 {
7060 int r_version = 0;
7061
7062 if (priv->r_debug == 0)
7063 priv->r_debug = get_r_debug (pid, is_elf64);
7064
7065 /* We failed to find DT_DEBUG. Such situation will not change
7066 for this inferior - do not retry it. Report it to GDB as
7067 E01, see for the reasons at the GDB solib-svr4.c side. */
7068 if (priv->r_debug == (CORE_ADDR) -1)
7069 return -1;
7070
7071 if (priv->r_debug != 0)
7072 {
7073 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7074 (unsigned char *) &r_version,
7075 sizeof (r_version)) != 0
7076 || r_version != 1)
7077 {
7078 warning ("unexpected r_debug version %d", r_version);
7079 }
7080 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7081 &lm_addr, ptr_size) != 0)
7082 {
7083 warning ("unable to read r_map from 0x%lx",
7084 (long) priv->r_debug + lmo->r_map_offset);
7085 }
7086 }
7087 }
7088
7089 std::string document = "<library-list-svr4 version=\"1.0\"";
7090
7091 while (lm_addr
7092 && read_one_ptr (lm_addr + lmo->l_name_offset,
7093 &l_name, ptr_size) == 0
7094 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7095 &l_addr, ptr_size) == 0
7096 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7097 &l_ld, ptr_size) == 0
7098 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7099 &l_prev, ptr_size) == 0
7100 && read_one_ptr (lm_addr + lmo->l_next_offset,
7101 &l_next, ptr_size) == 0)
7102 {
7103 unsigned char libname[PATH_MAX];
7104
7105 if (lm_prev != l_prev)
7106 {
7107 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7108 (long) lm_prev, (long) l_prev);
7109 break;
7110 }
7111
7112 /* Ignore the first entry even if it has valid name as the first entry
7113 corresponds to the main executable. The first entry should not be
7114 skipped if the dynamic loader was loaded late by a static executable
7115 (see solib-svr4.c parameter ignore_first). But in such case the main
7116 executable does not have PT_DYNAMIC present and this function already
7117 exited above due to failed get_r_debug. */
7118 if (lm_prev == 0)
7119 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7120 else
7121 {
7122 /* Not checking for error because reading may stop before
7123 we've got PATH_MAX worth of characters. */
7124 libname[0] = '\0';
7125 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7126 libname[sizeof (libname) - 1] = '\0';
7127 if (libname[0] != '\0')
7128 {
7129 if (!header_done)
7130 {
7131 /* Terminate `<library-list-svr4'. */
7132 document += '>';
7133 header_done = 1;
7134 }
7135
7136 string_appendf (document, "<library name=\"");
7137 xml_escape_text_append (&document, (char *) libname);
7138 string_appendf (document, "\" lm=\"0x%lx\" "
7139 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7140 (unsigned long) lm_addr, (unsigned long) l_addr,
7141 (unsigned long) l_ld);
7142 }
7143 }
7144
7145 lm_prev = lm_addr;
7146 lm_addr = l_next;
7147 }
7148
7149 if (!header_done)
7150 {
7151 /* Empty list; terminate `<library-list-svr4'. */
7152 document += "/>";
7153 }
7154 else
7155 document += "</library-list-svr4>";
7156
7157 int document_len = document.length ();
7158 if (offset < document_len)
7159 document_len -= offset;
7160 else
7161 document_len = 0;
7162 if (len > document_len)
7163 len = document_len;
7164
7165 memcpy (readbuf, document.data () + offset, len);
7166
7167 return len;
7168 }
7169
7170 #ifdef HAVE_LINUX_BTRACE
7171
7172 /* See to_disable_btrace target method. */
7173
7174 static int
7175 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7176 {
7177 enum btrace_error err;
7178
7179 err = linux_disable_btrace (tinfo);
7180 return (err == BTRACE_ERR_NONE ? 0 : -1);
7181 }
7182
7183 /* Encode an Intel Processor Trace configuration. */
7184
7185 static void
7186 linux_low_encode_pt_config (struct buffer *buffer,
7187 const struct btrace_data_pt_config *config)
7188 {
7189 buffer_grow_str (buffer, "<pt-config>\n");
7190
7191 switch (config->cpu.vendor)
7192 {
7193 case CV_INTEL:
7194 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7195 "model=\"%u\" stepping=\"%u\"/>\n",
7196 config->cpu.family, config->cpu.model,
7197 config->cpu.stepping);
7198 break;
7199
7200 default:
7201 break;
7202 }
7203
7204 buffer_grow_str (buffer, "</pt-config>\n");
7205 }
7206
7207 /* Encode a raw buffer. */
7208
7209 static void
7210 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7211 unsigned int size)
7212 {
7213 if (size == 0)
7214 return;
7215
7216 /* We use hex encoding - see common/rsp-low.h. */
7217 buffer_grow_str (buffer, "<raw>\n");
7218
7219 while (size-- > 0)
7220 {
7221 char elem[2];
7222
7223 elem[0] = tohex ((*data >> 4) & 0xf);
7224 elem[1] = tohex (*data++ & 0xf);
7225
7226 buffer_grow (buffer, elem, 2);
7227 }
7228
7229 buffer_grow_str (buffer, "</raw>\n");
7230 }
7231
7232 /* See to_read_btrace target method. */
7233
7234 static int
7235 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7236 enum btrace_read_type type)
7237 {
7238 struct btrace_data btrace;
7239 struct btrace_block *block;
7240 enum btrace_error err;
7241 int i;
7242
7243 err = linux_read_btrace (&btrace, tinfo, type);
7244 if (err != BTRACE_ERR_NONE)
7245 {
7246 if (err == BTRACE_ERR_OVERFLOW)
7247 buffer_grow_str0 (buffer, "E.Overflow.");
7248 else
7249 buffer_grow_str0 (buffer, "E.Generic Error.");
7250
7251 return -1;
7252 }
7253
7254 switch (btrace.format)
7255 {
7256 case BTRACE_FORMAT_NONE:
7257 buffer_grow_str0 (buffer, "E.No Trace.");
7258 return -1;
7259
7260 case BTRACE_FORMAT_BTS:
7261 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7262 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7263
7264 for (i = 0;
7265 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7266 i++)
7267 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7268 paddress (block->begin), paddress (block->end));
7269
7270 buffer_grow_str0 (buffer, "</btrace>\n");
7271 break;
7272
7273 case BTRACE_FORMAT_PT:
7274 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7275 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7276 buffer_grow_str (buffer, "<pt>\n");
7277
7278 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7279
7280 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7281 btrace.variant.pt.size);
7282
7283 buffer_grow_str (buffer, "</pt>\n");
7284 buffer_grow_str0 (buffer, "</btrace>\n");
7285 break;
7286
7287 default:
7288 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7289 return -1;
7290 }
7291
7292 return 0;
7293 }
7294
7295 /* See to_btrace_conf target method. */
7296
7297 static int
7298 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7299 struct buffer *buffer)
7300 {
7301 const struct btrace_config *conf;
7302
7303 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7304 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7305
7306 conf = linux_btrace_conf (tinfo);
7307 if (conf != NULL)
7308 {
7309 switch (conf->format)
7310 {
7311 case BTRACE_FORMAT_NONE:
7312 break;
7313
7314 case BTRACE_FORMAT_BTS:
7315 buffer_xml_printf (buffer, "<bts");
7316 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7317 buffer_xml_printf (buffer, " />\n");
7318 break;
7319
7320 case BTRACE_FORMAT_PT:
7321 buffer_xml_printf (buffer, "<pt");
7322 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7323 buffer_xml_printf (buffer, "/>\n");
7324 break;
7325 }
7326 }
7327
7328 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7329 return 0;
7330 }
7331 #endif /* HAVE_LINUX_BTRACE */
7332
7333 /* See nat/linux-nat.h. */
7334
7335 ptid_t
7336 current_lwp_ptid (void)
7337 {
7338 return ptid_of (current_thread);
7339 }
7340
7341 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7342
7343 static int
7344 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7345 {
7346 if (the_low_target.breakpoint_kind_from_pc != NULL)
7347 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7348 else
7349 return default_breakpoint_kind_from_pc (pcptr);
7350 }
7351
7352 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7353
7354 static const gdb_byte *
7355 linux_sw_breakpoint_from_kind (int kind, int *size)
7356 {
7357 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7358
7359 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7360 }
7361
7362 /* Implementation of the target_ops method
7363 "breakpoint_kind_from_current_state". */
7364
7365 static int
7366 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7367 {
7368 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7369 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7370 else
7371 return linux_breakpoint_kind_from_pc (pcptr);
7372 }
7373
7374 /* Default implementation of linux_target_ops method "set_pc" for
7375 32-bit pc register which is literally named "pc". */
7376
7377 void
7378 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7379 {
7380 uint32_t newpc = pc;
7381
7382 supply_register_by_name (regcache, "pc", &newpc);
7383 }
7384
7385 /* Default implementation of linux_target_ops method "get_pc" for
7386 32-bit pc register which is literally named "pc". */
7387
7388 CORE_ADDR
7389 linux_get_pc_32bit (struct regcache *regcache)
7390 {
7391 uint32_t pc;
7392
7393 collect_register_by_name (regcache, "pc", &pc);
7394 if (debug_threads)
7395 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7396 return pc;
7397 }
7398
7399 /* Default implementation of linux_target_ops method "set_pc" for
7400 64-bit pc register which is literally named "pc". */
7401
7402 void
7403 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7404 {
7405 uint64_t newpc = pc;
7406
7407 supply_register_by_name (regcache, "pc", &newpc);
7408 }
7409
7410 /* Default implementation of linux_target_ops method "get_pc" for
7411 64-bit pc register which is literally named "pc". */
7412
7413 CORE_ADDR
7414 linux_get_pc_64bit (struct regcache *regcache)
7415 {
7416 uint64_t pc;
7417
7418 collect_register_by_name (regcache, "pc", &pc);
7419 if (debug_threads)
7420 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7421 return pc;
7422 }
7423
7424
7425 static struct target_ops linux_target_ops = {
7426 linux_create_inferior,
7427 linux_post_create_inferior,
7428 linux_attach,
7429 linux_kill,
7430 linux_detach,
7431 linux_mourn,
7432 linux_join,
7433 linux_thread_alive,
7434 linux_resume,
7435 linux_wait,
7436 linux_fetch_registers,
7437 linux_store_registers,
7438 linux_prepare_to_access_memory,
7439 linux_done_accessing_memory,
7440 linux_read_memory,
7441 linux_write_memory,
7442 linux_look_up_symbols,
7443 linux_request_interrupt,
7444 linux_read_auxv,
7445 linux_supports_z_point_type,
7446 linux_insert_point,
7447 linux_remove_point,
7448 linux_stopped_by_sw_breakpoint,
7449 linux_supports_stopped_by_sw_breakpoint,
7450 linux_stopped_by_hw_breakpoint,
7451 linux_supports_stopped_by_hw_breakpoint,
7452 linux_supports_hardware_single_step,
7453 linux_stopped_by_watchpoint,
7454 linux_stopped_data_address,
7455 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7456 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7457 && defined(PT_TEXT_END_ADDR)
7458 linux_read_offsets,
7459 #else
7460 NULL,
7461 #endif
7462 #ifdef USE_THREAD_DB
7463 thread_db_get_tls_address,
7464 #else
7465 NULL,
7466 #endif
7467 linux_qxfer_spu,
7468 hostio_last_error_from_errno,
7469 linux_qxfer_osdata,
7470 linux_xfer_siginfo,
7471 linux_supports_non_stop,
7472 linux_async,
7473 linux_start_non_stop,
7474 linux_supports_multi_process,
7475 linux_supports_fork_events,
7476 linux_supports_vfork_events,
7477 linux_supports_exec_events,
7478 linux_handle_new_gdb_connection,
7479 #ifdef USE_THREAD_DB
7480 thread_db_handle_monitor_command,
7481 #else
7482 NULL,
7483 #endif
7484 linux_common_core_of_thread,
7485 linux_read_loadmap,
7486 linux_process_qsupported,
7487 linux_supports_tracepoints,
7488 linux_read_pc,
7489 linux_write_pc,
7490 linux_thread_stopped,
7491 NULL,
7492 linux_pause_all,
7493 linux_unpause_all,
7494 linux_stabilize_threads,
7495 linux_install_fast_tracepoint_jump_pad,
7496 linux_emit_ops,
7497 linux_supports_disable_randomization,
7498 linux_get_min_fast_tracepoint_insn_len,
7499 linux_qxfer_libraries_svr4,
7500 linux_supports_agent,
7501 #ifdef HAVE_LINUX_BTRACE
7502 linux_enable_btrace,
7503 linux_low_disable_btrace,
7504 linux_low_read_btrace,
7505 linux_low_btrace_conf,
7506 #else
7507 NULL,
7508 NULL,
7509 NULL,
7510 NULL,
7511 #endif
7512 linux_supports_range_stepping,
7513 linux_proc_pid_to_exec_file,
7514 linux_mntns_open_cloexec,
7515 linux_mntns_unlink,
7516 linux_mntns_readlink,
7517 linux_breakpoint_kind_from_pc,
7518 linux_sw_breakpoint_from_kind,
7519 linux_proc_tid_get_name,
7520 linux_breakpoint_kind_from_current_state,
7521 linux_supports_software_single_step,
7522 linux_supports_catch_syscall,
7523 linux_get_ipa_tdesc_idx,
7524 #if USE_THREAD_DB
7525 thread_db_thread_handle,
7526 #else
7527 NULL,
7528 #endif
7529 };
7530
7531 #ifdef HAVE_LINUX_REGSETS
7532 void
7533 initialize_regsets_info (struct regsets_info *info)
7534 {
7535 for (info->num_regsets = 0;
7536 info->regsets[info->num_regsets].size >= 0;
7537 info->num_regsets++)
7538 ;
7539 }
7540 #endif
7541
7542 void
7543 initialize_low (void)
7544 {
7545 struct sigaction sigchld_action;
7546
7547 memset (&sigchld_action, 0, sizeof (sigchld_action));
7548 set_target_ops (&linux_target_ops);
7549
7550 linux_ptrace_init_warnings ();
7551 linux_proc_init_warnings ();
7552
7553 sigchld_action.sa_handler = sigchld_handler;
7554 sigemptyset (&sigchld_action.sa_mask);
7555 sigchld_action.sa_flags = SA_RESTART;
7556 sigaction (SIGCHLD, &sigchld_action, NULL);
7557
7558 initialize_low_arch ();
7559
7560 linux_check_ptrace_features ();
7561 }
This page took 0.255808 seconds and 5 git commands to generate.