Remove usage of find_inferior when calling kill_one_lwp_callback
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2017 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25 #include "signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #include "common-inferior.h"
51 #include "nat/fork-inferior.h"
52 #include "environ.h"
53 #ifndef ELFMAG0
54 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
55 then ELFMAG0 will have been defined. If it didn't get included by
56 gdb_proc_service.h then including it will likely introduce a duplicate
57 definition of elf_fpregset_t. */
58 #include <elf.h>
59 #endif
60 #include "nat/linux-namespaces.h"
61
62 #ifndef SPUFS_MAGIC
63 #define SPUFS_MAGIC 0x23c9b64e
64 #endif
65
66 #ifdef HAVE_PERSONALITY
67 # include <sys/personality.h>
68 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
69 # define ADDR_NO_RANDOMIZE 0x0040000
70 # endif
71 #endif
72
73 #ifndef O_LARGEFILE
74 #define O_LARGEFILE 0
75 #endif
76
77 /* Some targets did not define these ptrace constants from the start,
78 so gdbserver defines them locally here. In the future, these may
79 be removed after they are added to asm/ptrace.h. */
80 #if !(defined(PT_TEXT_ADDR) \
81 || defined(PT_DATA_ADDR) \
82 || defined(PT_TEXT_END_ADDR))
83 #if defined(__mcoldfire__)
84 /* These are still undefined in 3.10 kernels. */
85 #define PT_TEXT_ADDR 49*4
86 #define PT_DATA_ADDR 50*4
87 #define PT_TEXT_END_ADDR 51*4
88 /* BFIN already defines these since at least 2.6.32 kernels. */
89 #elif defined(BFIN)
90 #define PT_TEXT_ADDR 220
91 #define PT_TEXT_END_ADDR 224
92 #define PT_DATA_ADDR 228
93 /* These are still undefined in 3.10 kernels. */
94 #elif defined(__TMS320C6X__)
95 #define PT_TEXT_ADDR (0x10000*4)
96 #define PT_DATA_ADDR (0x10004*4)
97 #define PT_TEXT_END_ADDR (0x10008*4)
98 #endif
99 #endif
100
101 #ifdef HAVE_LINUX_BTRACE
102 # include "nat/linux-btrace.h"
103 # include "btrace-common.h"
104 #endif
105
106 #ifndef HAVE_ELF32_AUXV_T
107 /* Copied from glibc's elf.h. */
108 typedef struct
109 {
110 uint32_t a_type; /* Entry type */
111 union
112 {
113 uint32_t a_val; /* Integer value */
114 /* We use to have pointer elements added here. We cannot do that,
115 though, since it does not work when using 32-bit definitions
116 on 64-bit platforms and vice versa. */
117 } a_un;
118 } Elf32_auxv_t;
119 #endif
120
121 #ifndef HAVE_ELF64_AUXV_T
122 /* Copied from glibc's elf.h. */
123 typedef struct
124 {
125 uint64_t a_type; /* Entry type */
126 union
127 {
128 uint64_t a_val; /* Integer value */
129 /* We use to have pointer elements added here. We cannot do that,
130 though, since it does not work when using 32-bit definitions
131 on 64-bit platforms and vice versa. */
132 } a_un;
133 } Elf64_auxv_t;
134 #endif
135
136 /* Does the current host support PTRACE_GETREGSET? */
137 int have_ptrace_getregset = -1;
138
139 /* LWP accessors. */
140
141 /* See nat/linux-nat.h. */
142
143 ptid_t
144 ptid_of_lwp (struct lwp_info *lwp)
145 {
146 return ptid_of (get_lwp_thread (lwp));
147 }
148
149 /* See nat/linux-nat.h. */
150
151 void
152 lwp_set_arch_private_info (struct lwp_info *lwp,
153 struct arch_lwp_info *info)
154 {
155 lwp->arch_private = info;
156 }
157
158 /* See nat/linux-nat.h. */
159
160 struct arch_lwp_info *
161 lwp_arch_private_info (struct lwp_info *lwp)
162 {
163 return lwp->arch_private;
164 }
165
166 /* See nat/linux-nat.h. */
167
168 int
169 lwp_is_stopped (struct lwp_info *lwp)
170 {
171 return lwp->stopped;
172 }
173
174 /* See nat/linux-nat.h. */
175
176 enum target_stop_reason
177 lwp_stop_reason (struct lwp_info *lwp)
178 {
179 return lwp->stop_reason;
180 }
181
182 /* See nat/linux-nat.h. */
183
184 int
185 lwp_is_stepping (struct lwp_info *lwp)
186 {
187 return lwp->stepping;
188 }
189
190 /* A list of all unknown processes which receive stop signals. Some
191 other process will presumably claim each of these as forked
192 children momentarily. */
193
194 struct simple_pid_list
195 {
196 /* The process ID. */
197 int pid;
198
199 /* The status as reported by waitpid. */
200 int status;
201
202 /* Next in chain. */
203 struct simple_pid_list *next;
204 };
205 struct simple_pid_list *stopped_pids;
206
207 /* Trivial list manipulation functions to keep track of a list of new
208 stopped processes. */
209
210 static void
211 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
212 {
213 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
214
215 new_pid->pid = pid;
216 new_pid->status = status;
217 new_pid->next = *listp;
218 *listp = new_pid;
219 }
220
221 static int
222 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
223 {
224 struct simple_pid_list **p;
225
226 for (p = listp; *p != NULL; p = &(*p)->next)
227 if ((*p)->pid == pid)
228 {
229 struct simple_pid_list *next = (*p)->next;
230
231 *statusp = (*p)->status;
232 xfree (*p);
233 *p = next;
234 return 1;
235 }
236 return 0;
237 }
238
239 enum stopping_threads_kind
240 {
241 /* Not stopping threads presently. */
242 NOT_STOPPING_THREADS,
243
244 /* Stopping threads. */
245 STOPPING_THREADS,
246
247 /* Stopping and suspending threads. */
248 STOPPING_AND_SUSPENDING_THREADS
249 };
250
251 /* This is set while stop_all_lwps is in effect. */
252 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
253
254 /* FIXME make into a target method? */
255 int using_threads = 1;
256
257 /* True if we're presently stabilizing threads (moving them out of
258 jump pads). */
259 static int stabilizing_threads;
260
261 static void linux_resume_one_lwp (struct lwp_info *lwp,
262 int step, int signal, siginfo_t *info);
263 static void linux_resume (struct thread_resume *resume_info, size_t n);
264 static void stop_all_lwps (int suspend, struct lwp_info *except);
265 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
266 static void unsuspend_all_lwps (struct lwp_info *except);
267 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
268 int *wstat, int options);
269 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
270 static struct lwp_info *add_lwp (ptid_t ptid);
271 static void linux_mourn (struct process_info *process);
272 static int linux_stopped_by_watchpoint (void);
273 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
274 static int lwp_is_marked_dead (struct lwp_info *lwp);
275 static void proceed_all_lwps (void);
276 static int finish_step_over (struct lwp_info *lwp);
277 static int kill_lwp (unsigned long lwpid, int signo);
278 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
279 static void complete_ongoing_step_over (void);
280 static int linux_low_ptrace_options (int attached);
281 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
282 static int proceed_one_lwp (thread_info *thread, void *except);
283
284 /* When the event-loop is doing a step-over, this points at the thread
285 being stepped. */
286 ptid_t step_over_bkpt;
287
288 /* True if the low target can hardware single-step. */
289
290 static int
291 can_hardware_single_step (void)
292 {
293 if (the_low_target.supports_hardware_single_step != NULL)
294 return the_low_target.supports_hardware_single_step ();
295 else
296 return 0;
297 }
298
299 /* True if the low target can software single-step. Such targets
300 implement the GET_NEXT_PCS callback. */
301
302 static int
303 can_software_single_step (void)
304 {
305 return (the_low_target.get_next_pcs != NULL);
306 }
307
308 /* True if the low target supports memory breakpoints. If so, we'll
309 have a GET_PC implementation. */
310
311 static int
312 supports_breakpoints (void)
313 {
314 return (the_low_target.get_pc != NULL);
315 }
316
317 /* Returns true if this target can support fast tracepoints. This
318 does not mean that the in-process agent has been loaded in the
319 inferior. */
320
321 static int
322 supports_fast_tracepoints (void)
323 {
324 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
325 }
326
327 /* True if LWP is stopped in its stepping range. */
328
329 static int
330 lwp_in_step_range (struct lwp_info *lwp)
331 {
332 CORE_ADDR pc = lwp->stop_pc;
333
334 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
335 }
336
337 struct pending_signals
338 {
339 int signal;
340 siginfo_t info;
341 struct pending_signals *prev;
342 };
343
344 /* The read/write ends of the pipe registered as waitable file in the
345 event loop. */
346 static int linux_event_pipe[2] = { -1, -1 };
347
348 /* True if we're currently in async mode. */
349 #define target_is_async_p() (linux_event_pipe[0] != -1)
350
351 static void send_sigstop (struct lwp_info *lwp);
352 static void wait_for_sigstop (void);
353
354 /* Return non-zero if HEADER is a 64-bit ELF file. */
355
356 static int
357 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
358 {
359 if (header->e_ident[EI_MAG0] == ELFMAG0
360 && header->e_ident[EI_MAG1] == ELFMAG1
361 && header->e_ident[EI_MAG2] == ELFMAG2
362 && header->e_ident[EI_MAG3] == ELFMAG3)
363 {
364 *machine = header->e_machine;
365 return header->e_ident[EI_CLASS] == ELFCLASS64;
366
367 }
368 *machine = EM_NONE;
369 return -1;
370 }
371
372 /* Return non-zero if FILE is a 64-bit ELF file,
373 zero if the file is not a 64-bit ELF file,
374 and -1 if the file is not accessible or doesn't exist. */
375
376 static int
377 elf_64_file_p (const char *file, unsigned int *machine)
378 {
379 Elf64_Ehdr header;
380 int fd;
381
382 fd = open (file, O_RDONLY);
383 if (fd < 0)
384 return -1;
385
386 if (read (fd, &header, sizeof (header)) != sizeof (header))
387 {
388 close (fd);
389 return 0;
390 }
391 close (fd);
392
393 return elf_64_header_p (&header, machine);
394 }
395
396 /* Accepts an integer PID; Returns true if the executable PID is
397 running is a 64-bit ELF file.. */
398
399 int
400 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
401 {
402 char file[PATH_MAX];
403
404 sprintf (file, "/proc/%d/exe", pid);
405 return elf_64_file_p (file, machine);
406 }
407
408 static void
409 delete_lwp (struct lwp_info *lwp)
410 {
411 struct thread_info *thr = get_lwp_thread (lwp);
412
413 if (debug_threads)
414 debug_printf ("deleting %ld\n", lwpid_of (thr));
415
416 remove_thread (thr);
417
418 if (the_low_target.delete_thread != NULL)
419 the_low_target.delete_thread (lwp->arch_private);
420 else
421 gdb_assert (lwp->arch_private == NULL);
422
423 free (lwp);
424 }
425
426 /* Add a process to the common process list, and set its private
427 data. */
428
429 static struct process_info *
430 linux_add_process (int pid, int attached)
431 {
432 struct process_info *proc;
433
434 proc = add_process (pid, attached);
435 proc->priv = XCNEW (struct process_info_private);
436
437 if (the_low_target.new_process != NULL)
438 proc->priv->arch_private = the_low_target.new_process ();
439
440 return proc;
441 }
442
443 static CORE_ADDR get_pc (struct lwp_info *lwp);
444
445 /* Call the target arch_setup function on the current thread. */
446
447 static void
448 linux_arch_setup (void)
449 {
450 the_low_target.arch_setup ();
451 }
452
453 /* Call the target arch_setup function on THREAD. */
454
455 static void
456 linux_arch_setup_thread (struct thread_info *thread)
457 {
458 struct thread_info *saved_thread;
459
460 saved_thread = current_thread;
461 current_thread = thread;
462
463 linux_arch_setup ();
464
465 current_thread = saved_thread;
466 }
467
468 /* Handle a GNU/Linux extended wait response. If we see a clone,
469 fork, or vfork event, we need to add the new LWP to our list
470 (and return 0 so as not to report the trap to higher layers).
471 If we see an exec event, we will modify ORIG_EVENT_LWP to point
472 to a new LWP representing the new program. */
473
474 static int
475 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
476 {
477 struct lwp_info *event_lwp = *orig_event_lwp;
478 int event = linux_ptrace_get_extended_event (wstat);
479 struct thread_info *event_thr = get_lwp_thread (event_lwp);
480 struct lwp_info *new_lwp;
481
482 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
483
484 /* All extended events we currently use are mid-syscall. Only
485 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
486 you have to be using PTRACE_SEIZE to get that. */
487 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
488
489 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
490 || (event == PTRACE_EVENT_CLONE))
491 {
492 ptid_t ptid;
493 unsigned long new_pid;
494 int ret, status;
495
496 /* Get the pid of the new lwp. */
497 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
498 &new_pid);
499
500 /* If we haven't already seen the new PID stop, wait for it now. */
501 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
502 {
503 /* The new child has a pending SIGSTOP. We can't affect it until it
504 hits the SIGSTOP, but we're already attached. */
505
506 ret = my_waitpid (new_pid, &status, __WALL);
507
508 if (ret == -1)
509 perror_with_name ("waiting for new child");
510 else if (ret != new_pid)
511 warning ("wait returned unexpected PID %d", ret);
512 else if (!WIFSTOPPED (status))
513 warning ("wait returned unexpected status 0x%x", status);
514 }
515
516 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
517 {
518 struct process_info *parent_proc;
519 struct process_info *child_proc;
520 struct lwp_info *child_lwp;
521 struct thread_info *child_thr;
522 struct target_desc *tdesc;
523
524 ptid = ptid_build (new_pid, new_pid, 0);
525
526 if (debug_threads)
527 {
528 debug_printf ("HEW: Got fork event from LWP %ld, "
529 "new child is %d\n",
530 ptid_get_lwp (ptid_of (event_thr)),
531 ptid_get_pid (ptid));
532 }
533
534 /* Add the new process to the tables and clone the breakpoint
535 lists of the parent. We need to do this even if the new process
536 will be detached, since we will need the process object and the
537 breakpoints to remove any breakpoints from memory when we
538 detach, and the client side will access registers. */
539 child_proc = linux_add_process (new_pid, 0);
540 gdb_assert (child_proc != NULL);
541 child_lwp = add_lwp (ptid);
542 gdb_assert (child_lwp != NULL);
543 child_lwp->stopped = 1;
544 child_lwp->must_set_ptrace_flags = 1;
545 child_lwp->status_pending_p = 0;
546 child_thr = get_lwp_thread (child_lwp);
547 child_thr->last_resume_kind = resume_stop;
548 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
549
550 /* If we're suspending all threads, leave this one suspended
551 too. If the fork/clone parent is stepping over a breakpoint,
552 all other threads have been suspended already. Leave the
553 child suspended too. */
554 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
555 || event_lwp->bp_reinsert != 0)
556 {
557 if (debug_threads)
558 debug_printf ("HEW: leaving child suspended\n");
559 child_lwp->suspended = 1;
560 }
561
562 parent_proc = get_thread_process (event_thr);
563 child_proc->attached = parent_proc->attached;
564
565 if (event_lwp->bp_reinsert != 0
566 && can_software_single_step ()
567 && event == PTRACE_EVENT_VFORK)
568 {
569 /* If we leave single-step breakpoints there, child will
570 hit it, so uninsert single-step breakpoints from parent
571 (and child). Once vfork child is done, reinsert
572 them back to parent. */
573 uninsert_single_step_breakpoints (event_thr);
574 }
575
576 clone_all_breakpoints (child_thr, event_thr);
577
578 tdesc = allocate_target_description ();
579 copy_target_description (tdesc, parent_proc->tdesc);
580 child_proc->tdesc = tdesc;
581
582 /* Clone arch-specific process data. */
583 if (the_low_target.new_fork != NULL)
584 the_low_target.new_fork (parent_proc, child_proc);
585
586 /* Save fork info in the parent thread. */
587 if (event == PTRACE_EVENT_FORK)
588 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
589 else if (event == PTRACE_EVENT_VFORK)
590 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
591
592 event_lwp->waitstatus.value.related_pid = ptid;
593
594 /* The status_pending field contains bits denoting the
595 extended event, so when the pending event is handled,
596 the handler will look at lwp->waitstatus. */
597 event_lwp->status_pending_p = 1;
598 event_lwp->status_pending = wstat;
599
600 /* Link the threads until the parent event is passed on to
601 higher layers. */
602 event_lwp->fork_relative = child_lwp;
603 child_lwp->fork_relative = event_lwp;
604
605 /* If the parent thread is doing step-over with single-step
606 breakpoints, the list of single-step breakpoints are cloned
607 from the parent's. Remove them from the child process.
608 In case of vfork, we'll reinsert them back once vforked
609 child is done. */
610 if (event_lwp->bp_reinsert != 0
611 && can_software_single_step ())
612 {
613 /* The child process is forked and stopped, so it is safe
614 to access its memory without stopping all other threads
615 from other processes. */
616 delete_single_step_breakpoints (child_thr);
617
618 gdb_assert (has_single_step_breakpoints (event_thr));
619 gdb_assert (!has_single_step_breakpoints (child_thr));
620 }
621
622 /* Report the event. */
623 return 0;
624 }
625
626 if (debug_threads)
627 debug_printf ("HEW: Got clone event "
628 "from LWP %ld, new child is LWP %ld\n",
629 lwpid_of (event_thr), new_pid);
630
631 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
632 new_lwp = add_lwp (ptid);
633
634 /* Either we're going to immediately resume the new thread
635 or leave it stopped. linux_resume_one_lwp is a nop if it
636 thinks the thread is currently running, so set this first
637 before calling linux_resume_one_lwp. */
638 new_lwp->stopped = 1;
639
640 /* If we're suspending all threads, leave this one suspended
641 too. If the fork/clone parent is stepping over a breakpoint,
642 all other threads have been suspended already. Leave the
643 child suspended too. */
644 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
645 || event_lwp->bp_reinsert != 0)
646 new_lwp->suspended = 1;
647
648 /* Normally we will get the pending SIGSTOP. But in some cases
649 we might get another signal delivered to the group first.
650 If we do get another signal, be sure not to lose it. */
651 if (WSTOPSIG (status) != SIGSTOP)
652 {
653 new_lwp->stop_expected = 1;
654 new_lwp->status_pending_p = 1;
655 new_lwp->status_pending = status;
656 }
657 else if (report_thread_events)
658 {
659 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
660 new_lwp->status_pending_p = 1;
661 new_lwp->status_pending = status;
662 }
663
664 thread_db_notice_clone (event_thr, ptid);
665
666 /* Don't report the event. */
667 return 1;
668 }
669 else if (event == PTRACE_EVENT_VFORK_DONE)
670 {
671 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
672
673 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
674 {
675 reinsert_single_step_breakpoints (event_thr);
676
677 gdb_assert (has_single_step_breakpoints (event_thr));
678 }
679
680 /* Report the event. */
681 return 0;
682 }
683 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
684 {
685 struct process_info *proc;
686 std::vector<int> syscalls_to_catch;
687 ptid_t event_ptid;
688 pid_t event_pid;
689
690 if (debug_threads)
691 {
692 debug_printf ("HEW: Got exec event from LWP %ld\n",
693 lwpid_of (event_thr));
694 }
695
696 /* Get the event ptid. */
697 event_ptid = ptid_of (event_thr);
698 event_pid = ptid_get_pid (event_ptid);
699
700 /* Save the syscall list from the execing process. */
701 proc = get_thread_process (event_thr);
702 syscalls_to_catch = std::move (proc->syscalls_to_catch);
703
704 /* Delete the execing process and all its threads. */
705 linux_mourn (proc);
706 current_thread = NULL;
707
708 /* Create a new process/lwp/thread. */
709 proc = linux_add_process (event_pid, 0);
710 event_lwp = add_lwp (event_ptid);
711 event_thr = get_lwp_thread (event_lwp);
712 gdb_assert (current_thread == event_thr);
713 linux_arch_setup_thread (event_thr);
714
715 /* Set the event status. */
716 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
717 event_lwp->waitstatus.value.execd_pathname
718 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
719
720 /* Mark the exec status as pending. */
721 event_lwp->stopped = 1;
722 event_lwp->status_pending_p = 1;
723 event_lwp->status_pending = wstat;
724 event_thr->last_resume_kind = resume_continue;
725 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
726
727 /* Update syscall state in the new lwp, effectively mid-syscall too. */
728 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
729
730 /* Restore the list to catch. Don't rely on the client, which is free
731 to avoid sending a new list when the architecture doesn't change.
732 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
733 proc->syscalls_to_catch = std::move (syscalls_to_catch);
734
735 /* Report the event. */
736 *orig_event_lwp = event_lwp;
737 return 0;
738 }
739
740 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
741 }
742
743 /* Return the PC as read from the regcache of LWP, without any
744 adjustment. */
745
746 static CORE_ADDR
747 get_pc (struct lwp_info *lwp)
748 {
749 struct thread_info *saved_thread;
750 struct regcache *regcache;
751 CORE_ADDR pc;
752
753 if (the_low_target.get_pc == NULL)
754 return 0;
755
756 saved_thread = current_thread;
757 current_thread = get_lwp_thread (lwp);
758
759 regcache = get_thread_regcache (current_thread, 1);
760 pc = (*the_low_target.get_pc) (regcache);
761
762 if (debug_threads)
763 debug_printf ("pc is 0x%lx\n", (long) pc);
764
765 current_thread = saved_thread;
766 return pc;
767 }
768
769 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
770 Fill *SYSNO with the syscall nr trapped. */
771
772 static void
773 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
774 {
775 struct thread_info *saved_thread;
776 struct regcache *regcache;
777
778 if (the_low_target.get_syscall_trapinfo == NULL)
779 {
780 /* If we cannot get the syscall trapinfo, report an unknown
781 system call number. */
782 *sysno = UNKNOWN_SYSCALL;
783 return;
784 }
785
786 saved_thread = current_thread;
787 current_thread = get_lwp_thread (lwp);
788
789 regcache = get_thread_regcache (current_thread, 1);
790 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
791
792 if (debug_threads)
793 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
794
795 current_thread = saved_thread;
796 }
797
798 static int check_stopped_by_watchpoint (struct lwp_info *child);
799
800 /* Called when the LWP stopped for a signal/trap. If it stopped for a
801 trap check what caused it (breakpoint, watchpoint, trace, etc.),
802 and save the result in the LWP's stop_reason field. If it stopped
803 for a breakpoint, decrement the PC if necessary on the lwp's
804 architecture. Returns true if we now have the LWP's stop PC. */
805
806 static int
807 save_stop_reason (struct lwp_info *lwp)
808 {
809 CORE_ADDR pc;
810 CORE_ADDR sw_breakpoint_pc;
811 struct thread_info *saved_thread;
812 #if USE_SIGTRAP_SIGINFO
813 siginfo_t siginfo;
814 #endif
815
816 if (the_low_target.get_pc == NULL)
817 return 0;
818
819 pc = get_pc (lwp);
820 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
821
822 /* breakpoint_at reads from the current thread. */
823 saved_thread = current_thread;
824 current_thread = get_lwp_thread (lwp);
825
826 #if USE_SIGTRAP_SIGINFO
827 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
828 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
829 {
830 if (siginfo.si_signo == SIGTRAP)
831 {
832 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
833 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
834 {
835 /* The si_code is ambiguous on this arch -- check debug
836 registers. */
837 if (!check_stopped_by_watchpoint (lwp))
838 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
839 }
840 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
841 {
842 /* If we determine the LWP stopped for a SW breakpoint,
843 trust it. Particularly don't check watchpoint
844 registers, because at least on s390, we'd find
845 stopped-by-watchpoint as long as there's a watchpoint
846 set. */
847 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
848 }
849 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
850 {
851 /* This can indicate either a hardware breakpoint or
852 hardware watchpoint. Check debug registers. */
853 if (!check_stopped_by_watchpoint (lwp))
854 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
855 }
856 else if (siginfo.si_code == TRAP_TRACE)
857 {
858 /* We may have single stepped an instruction that
859 triggered a watchpoint. In that case, on some
860 architectures (such as x86), instead of TRAP_HWBKPT,
861 si_code indicates TRAP_TRACE, and we need to check
862 the debug registers separately. */
863 if (!check_stopped_by_watchpoint (lwp))
864 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
865 }
866 }
867 }
868 #else
869 /* We may have just stepped a breakpoint instruction. E.g., in
870 non-stop mode, GDB first tells the thread A to step a range, and
871 then the user inserts a breakpoint inside the range. In that
872 case we need to report the breakpoint PC. */
873 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
874 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
875 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
876
877 if (hardware_breakpoint_inserted_here (pc))
878 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
879
880 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
881 check_stopped_by_watchpoint (lwp);
882 #endif
883
884 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
885 {
886 if (debug_threads)
887 {
888 struct thread_info *thr = get_lwp_thread (lwp);
889
890 debug_printf ("CSBB: %s stopped by software breakpoint\n",
891 target_pid_to_str (ptid_of (thr)));
892 }
893
894 /* Back up the PC if necessary. */
895 if (pc != sw_breakpoint_pc)
896 {
897 struct regcache *regcache
898 = get_thread_regcache (current_thread, 1);
899 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
900 }
901
902 /* Update this so we record the correct stop PC below. */
903 pc = sw_breakpoint_pc;
904 }
905 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
906 {
907 if (debug_threads)
908 {
909 struct thread_info *thr = get_lwp_thread (lwp);
910
911 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
912 target_pid_to_str (ptid_of (thr)));
913 }
914 }
915 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
916 {
917 if (debug_threads)
918 {
919 struct thread_info *thr = get_lwp_thread (lwp);
920
921 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
922 target_pid_to_str (ptid_of (thr)));
923 }
924 }
925 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
926 {
927 if (debug_threads)
928 {
929 struct thread_info *thr = get_lwp_thread (lwp);
930
931 debug_printf ("CSBB: %s stopped by trace\n",
932 target_pid_to_str (ptid_of (thr)));
933 }
934 }
935
936 lwp->stop_pc = pc;
937 current_thread = saved_thread;
938 return 1;
939 }
940
941 static struct lwp_info *
942 add_lwp (ptid_t ptid)
943 {
944 struct lwp_info *lwp;
945
946 lwp = XCNEW (struct lwp_info);
947
948 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
949
950 if (the_low_target.new_thread != NULL)
951 the_low_target.new_thread (lwp);
952
953 lwp->thread = add_thread (ptid, lwp);
954
955 return lwp;
956 }
957
958 /* Callback to be used when calling fork_inferior, responsible for
959 actually initiating the tracing of the inferior. */
960
961 static void
962 linux_ptrace_fun ()
963 {
964 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
965 (PTRACE_TYPE_ARG4) 0) < 0)
966 trace_start_error_with_name ("ptrace");
967
968 if (setpgid (0, 0) < 0)
969 trace_start_error_with_name ("setpgid");
970
971 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
972 stdout to stderr so that inferior i/o doesn't corrupt the connection.
973 Also, redirect stdin to /dev/null. */
974 if (remote_connection_is_stdio ())
975 {
976 if (close (0) < 0)
977 trace_start_error_with_name ("close");
978 if (open ("/dev/null", O_RDONLY) < 0)
979 trace_start_error_with_name ("open");
980 if (dup2 (2, 1) < 0)
981 trace_start_error_with_name ("dup2");
982 if (write (2, "stdin/stdout redirected\n",
983 sizeof ("stdin/stdout redirected\n") - 1) < 0)
984 {
985 /* Errors ignored. */;
986 }
987 }
988 }
989
990 /* Start an inferior process and returns its pid.
991 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
992 are its arguments. */
993
994 static int
995 linux_create_inferior (const char *program,
996 const std::vector<char *> &program_args)
997 {
998 struct lwp_info *new_lwp;
999 int pid;
1000 ptid_t ptid;
1001 struct cleanup *restore_personality
1002 = maybe_disable_address_space_randomization (disable_randomization);
1003 std::string str_program_args = stringify_argv (program_args);
1004
1005 pid = fork_inferior (program,
1006 str_program_args.c_str (),
1007 get_environ ()->envp (), linux_ptrace_fun,
1008 NULL, NULL, NULL, NULL);
1009
1010 do_cleanups (restore_personality);
1011
1012 linux_add_process (pid, 0);
1013
1014 ptid = ptid_build (pid, pid, 0);
1015 new_lwp = add_lwp (ptid);
1016 new_lwp->must_set_ptrace_flags = 1;
1017
1018 post_fork_inferior (pid, program);
1019
1020 return pid;
1021 }
1022
1023 /* Implement the post_create_inferior target_ops method. */
1024
1025 static void
1026 linux_post_create_inferior (void)
1027 {
1028 struct lwp_info *lwp = get_thread_lwp (current_thread);
1029
1030 linux_arch_setup ();
1031
1032 if (lwp->must_set_ptrace_flags)
1033 {
1034 struct process_info *proc = current_process ();
1035 int options = linux_low_ptrace_options (proc->attached);
1036
1037 linux_enable_event_reporting (lwpid_of (current_thread), options);
1038 lwp->must_set_ptrace_flags = 0;
1039 }
1040 }
1041
1042 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1043 error. */
1044
1045 int
1046 linux_attach_lwp (ptid_t ptid)
1047 {
1048 struct lwp_info *new_lwp;
1049 int lwpid = ptid_get_lwp (ptid);
1050
1051 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1052 != 0)
1053 return errno;
1054
1055 new_lwp = add_lwp (ptid);
1056
1057 /* We need to wait for SIGSTOP before being able to make the next
1058 ptrace call on this LWP. */
1059 new_lwp->must_set_ptrace_flags = 1;
1060
1061 if (linux_proc_pid_is_stopped (lwpid))
1062 {
1063 if (debug_threads)
1064 debug_printf ("Attached to a stopped process\n");
1065
1066 /* The process is definitely stopped. It is in a job control
1067 stop, unless the kernel predates the TASK_STOPPED /
1068 TASK_TRACED distinction, in which case it might be in a
1069 ptrace stop. Make sure it is in a ptrace stop; from there we
1070 can kill it, signal it, et cetera.
1071
1072 First make sure there is a pending SIGSTOP. Since we are
1073 already attached, the process can not transition from stopped
1074 to running without a PTRACE_CONT; so we know this signal will
1075 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1076 probably already in the queue (unless this kernel is old
1077 enough to use TASK_STOPPED for ptrace stops); but since
1078 SIGSTOP is not an RT signal, it can only be queued once. */
1079 kill_lwp (lwpid, SIGSTOP);
1080
1081 /* Finally, resume the stopped process. This will deliver the
1082 SIGSTOP (or a higher priority signal, just like normal
1083 PTRACE_ATTACH), which we'll catch later on. */
1084 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1085 }
1086
1087 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1088 brings it to a halt.
1089
1090 There are several cases to consider here:
1091
1092 1) gdbserver has already attached to the process and is being notified
1093 of a new thread that is being created.
1094 In this case we should ignore that SIGSTOP and resume the
1095 process. This is handled below by setting stop_expected = 1,
1096 and the fact that add_thread sets last_resume_kind ==
1097 resume_continue.
1098
1099 2) This is the first thread (the process thread), and we're attaching
1100 to it via attach_inferior.
1101 In this case we want the process thread to stop.
1102 This is handled by having linux_attach set last_resume_kind ==
1103 resume_stop after we return.
1104
1105 If the pid we are attaching to is also the tgid, we attach to and
1106 stop all the existing threads. Otherwise, we attach to pid and
1107 ignore any other threads in the same group as this pid.
1108
1109 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1110 existing threads.
1111 In this case we want the thread to stop.
1112 FIXME: This case is currently not properly handled.
1113 We should wait for the SIGSTOP but don't. Things work apparently
1114 because enough time passes between when we ptrace (ATTACH) and when
1115 gdb makes the next ptrace call on the thread.
1116
1117 On the other hand, if we are currently trying to stop all threads, we
1118 should treat the new thread as if we had sent it a SIGSTOP. This works
1119 because we are guaranteed that the add_lwp call above added us to the
1120 end of the list, and so the new thread has not yet reached
1121 wait_for_sigstop (but will). */
1122 new_lwp->stop_expected = 1;
1123
1124 return 0;
1125 }
1126
1127 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1128 already attached. Returns true if a new LWP is found, false
1129 otherwise. */
1130
1131 static int
1132 attach_proc_task_lwp_callback (ptid_t ptid)
1133 {
1134 /* Is this a new thread? */
1135 if (find_thread_ptid (ptid) == NULL)
1136 {
1137 int lwpid = ptid_get_lwp (ptid);
1138 int err;
1139
1140 if (debug_threads)
1141 debug_printf ("Found new lwp %d\n", lwpid);
1142
1143 err = linux_attach_lwp (ptid);
1144
1145 /* Be quiet if we simply raced with the thread exiting. EPERM
1146 is returned if the thread's task still exists, and is marked
1147 as exited or zombie, as well as other conditions, so in that
1148 case, confirm the status in /proc/PID/status. */
1149 if (err == ESRCH
1150 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1151 {
1152 if (debug_threads)
1153 {
1154 debug_printf ("Cannot attach to lwp %d: "
1155 "thread is gone (%d: %s)\n",
1156 lwpid, err, strerror (err));
1157 }
1158 }
1159 else if (err != 0)
1160 {
1161 warning (_("Cannot attach to lwp %d: %s"),
1162 lwpid,
1163 linux_ptrace_attach_fail_reason_string (ptid, err));
1164 }
1165
1166 return 1;
1167 }
1168 return 0;
1169 }
1170
1171 static void async_file_mark (void);
1172
1173 /* Attach to PID. If PID is the tgid, attach to it and all
1174 of its threads. */
1175
1176 static int
1177 linux_attach (unsigned long pid)
1178 {
1179 struct process_info *proc;
1180 struct thread_info *initial_thread;
1181 ptid_t ptid = ptid_build (pid, pid, 0);
1182 int err;
1183
1184 /* Attach to PID. We will check for other threads
1185 soon. */
1186 err = linux_attach_lwp (ptid);
1187 if (err != 0)
1188 error ("Cannot attach to process %ld: %s",
1189 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1190
1191 proc = linux_add_process (pid, 1);
1192
1193 /* Don't ignore the initial SIGSTOP if we just attached to this
1194 process. It will be collected by wait shortly. */
1195 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1196 initial_thread->last_resume_kind = resume_stop;
1197
1198 /* We must attach to every LWP. If /proc is mounted, use that to
1199 find them now. On the one hand, the inferior may be using raw
1200 clone instead of using pthreads. On the other hand, even if it
1201 is using pthreads, GDB may not be connected yet (thread_db needs
1202 to do symbol lookups, through qSymbol). Also, thread_db walks
1203 structures in the inferior's address space to find the list of
1204 threads/LWPs, and those structures may well be corrupted. Note
1205 that once thread_db is loaded, we'll still use it to list threads
1206 and associate pthread info with each LWP. */
1207 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1208
1209 /* GDB will shortly read the xml target description for this
1210 process, to figure out the process' architecture. But the target
1211 description is only filled in when the first process/thread in
1212 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1213 that now, otherwise, if GDB is fast enough, it could read the
1214 target description _before_ that initial stop. */
1215 if (non_stop)
1216 {
1217 struct lwp_info *lwp;
1218 int wstat, lwpid;
1219 ptid_t pid_ptid = pid_to_ptid (pid);
1220
1221 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1222 &wstat, __WALL);
1223 gdb_assert (lwpid > 0);
1224
1225 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1226
1227 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1228 {
1229 lwp->status_pending_p = 1;
1230 lwp->status_pending = wstat;
1231 }
1232
1233 initial_thread->last_resume_kind = resume_continue;
1234
1235 async_file_mark ();
1236
1237 gdb_assert (proc->tdesc != NULL);
1238 }
1239
1240 return 0;
1241 }
1242
1243 struct counter
1244 {
1245 int pid;
1246 int count;
1247 };
1248
1249 static int
1250 second_thread_of_pid_p (thread_info *thread, void *args)
1251 {
1252 struct counter *counter = (struct counter *) args;
1253
1254 if (thread->id.pid () == counter->pid)
1255 {
1256 if (++counter->count > 1)
1257 return 1;
1258 }
1259
1260 return 0;
1261 }
1262
1263 static int
1264 last_thread_of_process_p (int pid)
1265 {
1266 struct counter counter = { pid , 0 };
1267
1268 return (find_inferior (&all_threads,
1269 second_thread_of_pid_p, &counter) == NULL);
1270 }
1271
1272 /* Kill LWP. */
1273
1274 static void
1275 linux_kill_one_lwp (struct lwp_info *lwp)
1276 {
1277 struct thread_info *thr = get_lwp_thread (lwp);
1278 int pid = lwpid_of (thr);
1279
1280 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1281 there is no signal context, and ptrace(PTRACE_KILL) (or
1282 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1283 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1284 alternative is to kill with SIGKILL. We only need one SIGKILL
1285 per process, not one for each thread. But since we still support
1286 support debugging programs using raw clone without CLONE_THREAD,
1287 we send one for each thread. For years, we used PTRACE_KILL
1288 only, so we're being a bit paranoid about some old kernels where
1289 PTRACE_KILL might work better (dubious if there are any such, but
1290 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1291 second, and so we're fine everywhere. */
1292
1293 errno = 0;
1294 kill_lwp (pid, SIGKILL);
1295 if (debug_threads)
1296 {
1297 int save_errno = errno;
1298
1299 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1300 target_pid_to_str (ptid_of (thr)),
1301 save_errno ? strerror (save_errno) : "OK");
1302 }
1303
1304 errno = 0;
1305 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1306 if (debug_threads)
1307 {
1308 int save_errno = errno;
1309
1310 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1311 target_pid_to_str (ptid_of (thr)),
1312 save_errno ? strerror (save_errno) : "OK");
1313 }
1314 }
1315
1316 /* Kill LWP and wait for it to die. */
1317
1318 static void
1319 kill_wait_lwp (struct lwp_info *lwp)
1320 {
1321 struct thread_info *thr = get_lwp_thread (lwp);
1322 int pid = ptid_get_pid (ptid_of (thr));
1323 int lwpid = ptid_get_lwp (ptid_of (thr));
1324 int wstat;
1325 int res;
1326
1327 if (debug_threads)
1328 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1329
1330 do
1331 {
1332 linux_kill_one_lwp (lwp);
1333
1334 /* Make sure it died. Notes:
1335
1336 - The loop is most likely unnecessary.
1337
1338 - We don't use linux_wait_for_event as that could delete lwps
1339 while we're iterating over them. We're not interested in
1340 any pending status at this point, only in making sure all
1341 wait status on the kernel side are collected until the
1342 process is reaped.
1343
1344 - We don't use __WALL here as the __WALL emulation relies on
1345 SIGCHLD, and killing a stopped process doesn't generate
1346 one, nor an exit status.
1347 */
1348 res = my_waitpid (lwpid, &wstat, 0);
1349 if (res == -1 && errno == ECHILD)
1350 res = my_waitpid (lwpid, &wstat, __WCLONE);
1351 } while (res > 0 && WIFSTOPPED (wstat));
1352
1353 /* Even if it was stopped, the child may have already disappeared.
1354 E.g., if it was killed by SIGKILL. */
1355 if (res < 0 && errno != ECHILD)
1356 perror_with_name ("kill_wait_lwp");
1357 }
1358
1359 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1360 except the leader. */
1361
1362 static void
1363 kill_one_lwp_callback (thread_info *thread, int pid)
1364 {
1365 struct lwp_info *lwp = get_thread_lwp (thread);
1366
1367 /* We avoid killing the first thread here, because of a Linux kernel (at
1368 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1369 the children get a chance to be reaped, it will remain a zombie
1370 forever. */
1371
1372 if (lwpid_of (thread) == pid)
1373 {
1374 if (debug_threads)
1375 debug_printf ("lkop: is last of process %s\n",
1376 target_pid_to_str (thread->id));
1377 return;
1378 }
1379
1380 kill_wait_lwp (lwp);
1381 }
1382
1383 static int
1384 linux_kill (int pid)
1385 {
1386 struct process_info *process;
1387 struct lwp_info *lwp;
1388
1389 process = find_process_pid (pid);
1390 if (process == NULL)
1391 return -1;
1392
1393 /* If we're killing a running inferior, make sure it is stopped
1394 first, as PTRACE_KILL will not work otherwise. */
1395 stop_all_lwps (0, NULL);
1396
1397 for_each_thread (pid, [&] (thread_info *thread)
1398 {
1399 kill_one_lwp_callback (thread, pid);
1400 });
1401
1402 /* See the comment in linux_kill_one_lwp. We did not kill the first
1403 thread in the list, so do so now. */
1404 lwp = find_lwp_pid (pid_to_ptid (pid));
1405
1406 if (lwp == NULL)
1407 {
1408 if (debug_threads)
1409 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1410 pid);
1411 }
1412 else
1413 kill_wait_lwp (lwp);
1414
1415 the_target->mourn (process);
1416
1417 /* Since we presently can only stop all lwps of all processes, we
1418 need to unstop lwps of other processes. */
1419 unstop_all_lwps (0, NULL);
1420 return 0;
1421 }
1422
1423 /* Get pending signal of THREAD, for detaching purposes. This is the
1424 signal the thread last stopped for, which we need to deliver to the
1425 thread when detaching, otherwise, it'd be suppressed/lost. */
1426
1427 static int
1428 get_detach_signal (struct thread_info *thread)
1429 {
1430 enum gdb_signal signo = GDB_SIGNAL_0;
1431 int status;
1432 struct lwp_info *lp = get_thread_lwp (thread);
1433
1434 if (lp->status_pending_p)
1435 status = lp->status_pending;
1436 else
1437 {
1438 /* If the thread had been suspended by gdbserver, and it stopped
1439 cleanly, then it'll have stopped with SIGSTOP. But we don't
1440 want to deliver that SIGSTOP. */
1441 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1442 || thread->last_status.value.sig == GDB_SIGNAL_0)
1443 return 0;
1444
1445 /* Otherwise, we may need to deliver the signal we
1446 intercepted. */
1447 status = lp->last_status;
1448 }
1449
1450 if (!WIFSTOPPED (status))
1451 {
1452 if (debug_threads)
1453 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1454 target_pid_to_str (ptid_of (thread)));
1455 return 0;
1456 }
1457
1458 /* Extended wait statuses aren't real SIGTRAPs. */
1459 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1460 {
1461 if (debug_threads)
1462 debug_printf ("GPS: lwp %s had stopped with extended "
1463 "status: no pending signal\n",
1464 target_pid_to_str (ptid_of (thread)));
1465 return 0;
1466 }
1467
1468 signo = gdb_signal_from_host (WSTOPSIG (status));
1469
1470 if (program_signals_p && !program_signals[signo])
1471 {
1472 if (debug_threads)
1473 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1474 target_pid_to_str (ptid_of (thread)),
1475 gdb_signal_to_string (signo));
1476 return 0;
1477 }
1478 else if (!program_signals_p
1479 /* If we have no way to know which signals GDB does not
1480 want to have passed to the program, assume
1481 SIGTRAP/SIGINT, which is GDB's default. */
1482 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1483 {
1484 if (debug_threads)
1485 debug_printf ("GPS: lwp %s had signal %s, "
1486 "but we don't know if we should pass it. "
1487 "Default to not.\n",
1488 target_pid_to_str (ptid_of (thread)),
1489 gdb_signal_to_string (signo));
1490 return 0;
1491 }
1492 else
1493 {
1494 if (debug_threads)
1495 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1496 target_pid_to_str (ptid_of (thread)),
1497 gdb_signal_to_string (signo));
1498
1499 return WSTOPSIG (status);
1500 }
1501 }
1502
1503 /* Detach from LWP. */
1504
1505 static void
1506 linux_detach_one_lwp (struct lwp_info *lwp)
1507 {
1508 struct thread_info *thread = get_lwp_thread (lwp);
1509 int sig;
1510 int lwpid;
1511
1512 /* If there is a pending SIGSTOP, get rid of it. */
1513 if (lwp->stop_expected)
1514 {
1515 if (debug_threads)
1516 debug_printf ("Sending SIGCONT to %s\n",
1517 target_pid_to_str (ptid_of (thread)));
1518
1519 kill_lwp (lwpid_of (thread), SIGCONT);
1520 lwp->stop_expected = 0;
1521 }
1522
1523 /* Pass on any pending signal for this thread. */
1524 sig = get_detach_signal (thread);
1525
1526 /* Preparing to resume may try to write registers, and fail if the
1527 lwp is zombie. If that happens, ignore the error. We'll handle
1528 it below, when detach fails with ESRCH. */
1529 TRY
1530 {
1531 /* Flush any pending changes to the process's registers. */
1532 regcache_invalidate_thread (thread);
1533
1534 /* Finally, let it resume. */
1535 if (the_low_target.prepare_to_resume != NULL)
1536 the_low_target.prepare_to_resume (lwp);
1537 }
1538 CATCH (ex, RETURN_MASK_ERROR)
1539 {
1540 if (!check_ptrace_stopped_lwp_gone (lwp))
1541 throw_exception (ex);
1542 }
1543 END_CATCH
1544
1545 lwpid = lwpid_of (thread);
1546 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1547 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1548 {
1549 int save_errno = errno;
1550
1551 /* We know the thread exists, so ESRCH must mean the lwp is
1552 zombie. This can happen if one of the already-detached
1553 threads exits the whole thread group. In that case we're
1554 still attached, and must reap the lwp. */
1555 if (save_errno == ESRCH)
1556 {
1557 int ret, status;
1558
1559 ret = my_waitpid (lwpid, &status, __WALL);
1560 if (ret == -1)
1561 {
1562 warning (_("Couldn't reap LWP %d while detaching: %s"),
1563 lwpid, strerror (errno));
1564 }
1565 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1566 {
1567 warning (_("Reaping LWP %d while detaching "
1568 "returned unexpected status 0x%x"),
1569 lwpid, status);
1570 }
1571 }
1572 else
1573 {
1574 error (_("Can't detach %s: %s"),
1575 target_pid_to_str (ptid_of (thread)),
1576 strerror (save_errno));
1577 }
1578 }
1579 else if (debug_threads)
1580 {
1581 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1582 target_pid_to_str (ptid_of (thread)),
1583 strsignal (sig));
1584 }
1585
1586 delete_lwp (lwp);
1587 }
1588
1589 /* Callback for find_inferior. Detaches from non-leader threads of a
1590 given process. */
1591
1592 static int
1593 linux_detach_lwp_callback (thread_info *thread, void *args)
1594 {
1595 struct lwp_info *lwp = get_thread_lwp (thread);
1596 int pid = *(int *) args;
1597 int lwpid = lwpid_of (thread);
1598
1599 /* Skip other processes. */
1600 if (thread->id.pid () != pid)
1601 return 0;
1602
1603 /* We don't actually detach from the thread group leader just yet.
1604 If the thread group exits, we must reap the zombie clone lwps
1605 before we're able to reap the leader. */
1606 if (thread->id.pid () == lwpid)
1607 return 0;
1608
1609 linux_detach_one_lwp (lwp);
1610 return 0;
1611 }
1612
1613 static int
1614 linux_detach (int pid)
1615 {
1616 struct process_info *process;
1617 struct lwp_info *main_lwp;
1618
1619 process = find_process_pid (pid);
1620 if (process == NULL)
1621 return -1;
1622
1623 /* As there's a step over already in progress, let it finish first,
1624 otherwise nesting a stabilize_threads operation on top gets real
1625 messy. */
1626 complete_ongoing_step_over ();
1627
1628 /* Stop all threads before detaching. First, ptrace requires that
1629 the thread is stopped to sucessfully detach. Second, thread_db
1630 may need to uninstall thread event breakpoints from memory, which
1631 only works with a stopped process anyway. */
1632 stop_all_lwps (0, NULL);
1633
1634 #ifdef USE_THREAD_DB
1635 thread_db_detach (process);
1636 #endif
1637
1638 /* Stabilize threads (move out of jump pads). */
1639 stabilize_threads ();
1640
1641 /* Detach from the clone lwps first. If the thread group exits just
1642 while we're detaching, we must reap the clone lwps before we're
1643 able to reap the leader. */
1644 find_inferior (&all_threads, linux_detach_lwp_callback, &pid);
1645
1646 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1647 linux_detach_one_lwp (main_lwp);
1648
1649 the_target->mourn (process);
1650
1651 /* Since we presently can only stop all lwps of all processes, we
1652 need to unstop lwps of other processes. */
1653 unstop_all_lwps (0, NULL);
1654 return 0;
1655 }
1656
1657 /* Remove all LWPs that belong to process PROC from the lwp list. */
1658
1659 static int
1660 delete_lwp_callback (thread_info *thread, void *proc)
1661 {
1662 struct lwp_info *lwp = get_thread_lwp (thread);
1663 struct process_info *process = (struct process_info *) proc;
1664
1665 if (pid_of (thread) == pid_of (process))
1666 delete_lwp (lwp);
1667
1668 return 0;
1669 }
1670
1671 static void
1672 linux_mourn (struct process_info *process)
1673 {
1674 struct process_info_private *priv;
1675
1676 #ifdef USE_THREAD_DB
1677 thread_db_mourn (process);
1678 #endif
1679
1680 find_inferior (&all_threads, delete_lwp_callback, process);
1681
1682 /* Freeing all private data. */
1683 priv = process->priv;
1684 if (the_low_target.delete_process != NULL)
1685 the_low_target.delete_process (priv->arch_private);
1686 else
1687 gdb_assert (priv->arch_private == NULL);
1688 free (priv);
1689 process->priv = NULL;
1690
1691 remove_process (process);
1692 }
1693
1694 static void
1695 linux_join (int pid)
1696 {
1697 int status, ret;
1698
1699 do {
1700 ret = my_waitpid (pid, &status, 0);
1701 if (WIFEXITED (status) || WIFSIGNALED (status))
1702 break;
1703 } while (ret != -1 || errno != ECHILD);
1704 }
1705
1706 /* Return nonzero if the given thread is still alive. */
1707 static int
1708 linux_thread_alive (ptid_t ptid)
1709 {
1710 struct lwp_info *lwp = find_lwp_pid (ptid);
1711
1712 /* We assume we always know if a thread exits. If a whole process
1713 exited but we still haven't been able to report it to GDB, we'll
1714 hold on to the last lwp of the dead process. */
1715 if (lwp != NULL)
1716 return !lwp_is_marked_dead (lwp);
1717 else
1718 return 0;
1719 }
1720
1721 /* Return 1 if this lwp still has an interesting status pending. If
1722 not (e.g., it had stopped for a breakpoint that is gone), return
1723 false. */
1724
1725 static int
1726 thread_still_has_status_pending_p (struct thread_info *thread)
1727 {
1728 struct lwp_info *lp = get_thread_lwp (thread);
1729
1730 if (!lp->status_pending_p)
1731 return 0;
1732
1733 if (thread->last_resume_kind != resume_stop
1734 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1735 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1736 {
1737 struct thread_info *saved_thread;
1738 CORE_ADDR pc;
1739 int discard = 0;
1740
1741 gdb_assert (lp->last_status != 0);
1742
1743 pc = get_pc (lp);
1744
1745 saved_thread = current_thread;
1746 current_thread = thread;
1747
1748 if (pc != lp->stop_pc)
1749 {
1750 if (debug_threads)
1751 debug_printf ("PC of %ld changed\n",
1752 lwpid_of (thread));
1753 discard = 1;
1754 }
1755
1756 #if !USE_SIGTRAP_SIGINFO
1757 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1758 && !(*the_low_target.breakpoint_at) (pc))
1759 {
1760 if (debug_threads)
1761 debug_printf ("previous SW breakpoint of %ld gone\n",
1762 lwpid_of (thread));
1763 discard = 1;
1764 }
1765 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1766 && !hardware_breakpoint_inserted_here (pc))
1767 {
1768 if (debug_threads)
1769 debug_printf ("previous HW breakpoint of %ld gone\n",
1770 lwpid_of (thread));
1771 discard = 1;
1772 }
1773 #endif
1774
1775 current_thread = saved_thread;
1776
1777 if (discard)
1778 {
1779 if (debug_threads)
1780 debug_printf ("discarding pending breakpoint status\n");
1781 lp->status_pending_p = 0;
1782 return 0;
1783 }
1784 }
1785
1786 return 1;
1787 }
1788
1789 /* Returns true if LWP is resumed from the client's perspective. */
1790
1791 static int
1792 lwp_resumed (struct lwp_info *lwp)
1793 {
1794 struct thread_info *thread = get_lwp_thread (lwp);
1795
1796 if (thread->last_resume_kind != resume_stop)
1797 return 1;
1798
1799 /* Did gdb send us a `vCont;t', but we haven't reported the
1800 corresponding stop to gdb yet? If so, the thread is still
1801 resumed/running from gdb's perspective. */
1802 if (thread->last_resume_kind == resume_stop
1803 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1804 return 1;
1805
1806 return 0;
1807 }
1808
1809 /* Return 1 if this lwp has an interesting status pending. */
1810 static int
1811 status_pending_p_callback (thread_info *thread, void *arg)
1812 {
1813 struct lwp_info *lp = get_thread_lwp (thread);
1814 ptid_t ptid = * (ptid_t *) arg;
1815
1816 /* Check if we're only interested in events from a specific process
1817 or a specific LWP. */
1818 if (!ptid_match (ptid_of (thread), ptid))
1819 return 0;
1820
1821 if (!lwp_resumed (lp))
1822 return 0;
1823
1824 if (lp->status_pending_p
1825 && !thread_still_has_status_pending_p (thread))
1826 {
1827 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1828 return 0;
1829 }
1830
1831 return lp->status_pending_p;
1832 }
1833
1834 static int
1835 same_lwp (thread_info *thread, void *data)
1836 {
1837 ptid_t ptid = *(ptid_t *) data;
1838 int lwp;
1839
1840 if (ptid_get_lwp (ptid) != 0)
1841 lwp = ptid_get_lwp (ptid);
1842 else
1843 lwp = ptid_get_pid (ptid);
1844
1845 if (thread->id.lwp () == lwp)
1846 return 1;
1847
1848 return 0;
1849 }
1850
1851 struct lwp_info *
1852 find_lwp_pid (ptid_t ptid)
1853 {
1854 thread_info *thread = find_inferior (&all_threads, same_lwp, &ptid);
1855
1856 if (thread == NULL)
1857 return NULL;
1858
1859 return get_thread_lwp (thread);
1860 }
1861
1862 /* Return the number of known LWPs in the tgid given by PID. */
1863
1864 static int
1865 num_lwps (int pid)
1866 {
1867 int count = 0;
1868
1869 for_each_thread (pid, [&] (thread_info *thread)
1870 {
1871 count++;
1872 });
1873
1874 return count;
1875 }
1876
1877 /* See nat/linux-nat.h. */
1878
1879 struct lwp_info *
1880 iterate_over_lwps (ptid_t filter,
1881 iterate_over_lwps_ftype callback,
1882 void *data)
1883 {
1884 thread_info *thread = find_thread (filter, [&] (thread_info *thread)
1885 {
1886 lwp_info *lwp = get_thread_lwp (thread);
1887
1888 return callback (lwp, data);
1889 });
1890
1891 if (thread == NULL)
1892 return NULL;
1893
1894 return get_thread_lwp (thread);
1895 }
1896
1897 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1898 their exits until all other threads in the group have exited. */
1899
1900 static void
1901 check_zombie_leaders (void)
1902 {
1903 for_each_process ([] (process_info *proc) {
1904 pid_t leader_pid = pid_of (proc);
1905 struct lwp_info *leader_lp;
1906
1907 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1908
1909 if (debug_threads)
1910 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1911 "num_lwps=%d, zombie=%d\n",
1912 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1913 linux_proc_pid_is_zombie (leader_pid));
1914
1915 if (leader_lp != NULL && !leader_lp->stopped
1916 /* Check if there are other threads in the group, as we may
1917 have raced with the inferior simply exiting. */
1918 && !last_thread_of_process_p (leader_pid)
1919 && linux_proc_pid_is_zombie (leader_pid))
1920 {
1921 /* A leader zombie can mean one of two things:
1922
1923 - It exited, and there's an exit status pending
1924 available, or only the leader exited (not the whole
1925 program). In the latter case, we can't waitpid the
1926 leader's exit status until all other threads are gone.
1927
1928 - There are 3 or more threads in the group, and a thread
1929 other than the leader exec'd. On an exec, the Linux
1930 kernel destroys all other threads (except the execing
1931 one) in the thread group, and resets the execing thread's
1932 tid to the tgid. No exit notification is sent for the
1933 execing thread -- from the ptracer's perspective, it
1934 appears as though the execing thread just vanishes.
1935 Until we reap all other threads except the leader and the
1936 execing thread, the leader will be zombie, and the
1937 execing thread will be in `D (disc sleep)'. As soon as
1938 all other threads are reaped, the execing thread changes
1939 it's tid to the tgid, and the previous (zombie) leader
1940 vanishes, giving place to the "new" leader. We could try
1941 distinguishing the exit and exec cases, by waiting once
1942 more, and seeing if something comes out, but it doesn't
1943 sound useful. The previous leader _does_ go away, and
1944 we'll re-add the new one once we see the exec event
1945 (which is just the same as what would happen if the
1946 previous leader did exit voluntarily before some other
1947 thread execs). */
1948
1949 if (debug_threads)
1950 debug_printf ("CZL: Thread group leader %d zombie "
1951 "(it exited, or another thread execd).\n",
1952 leader_pid);
1953
1954 delete_lwp (leader_lp);
1955 }
1956 });
1957 }
1958
1959 /* Callback for `find_inferior'. Returns the first LWP that is not
1960 stopped. ARG is a PTID filter. */
1961
1962 static int
1963 not_stopped_callback (thread_info *thread, void *arg)
1964 {
1965 struct lwp_info *lwp;
1966 ptid_t filter = *(ptid_t *) arg;
1967
1968 if (!ptid_match (ptid_of (thread), filter))
1969 return 0;
1970
1971 lwp = get_thread_lwp (thread);
1972 if (!lwp->stopped)
1973 return 1;
1974
1975 return 0;
1976 }
1977
1978 /* Increment LWP's suspend count. */
1979
1980 static void
1981 lwp_suspended_inc (struct lwp_info *lwp)
1982 {
1983 lwp->suspended++;
1984
1985 if (debug_threads && lwp->suspended > 4)
1986 {
1987 struct thread_info *thread = get_lwp_thread (lwp);
1988
1989 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1990 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1991 }
1992 }
1993
1994 /* Decrement LWP's suspend count. */
1995
1996 static void
1997 lwp_suspended_decr (struct lwp_info *lwp)
1998 {
1999 lwp->suspended--;
2000
2001 if (lwp->suspended < 0)
2002 {
2003 struct thread_info *thread = get_lwp_thread (lwp);
2004
2005 internal_error (__FILE__, __LINE__,
2006 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
2007 lwp->suspended);
2008 }
2009 }
2010
2011 /* This function should only be called if the LWP got a SIGTRAP.
2012
2013 Handle any tracepoint steps or hits. Return true if a tracepoint
2014 event was handled, 0 otherwise. */
2015
2016 static int
2017 handle_tracepoints (struct lwp_info *lwp)
2018 {
2019 struct thread_info *tinfo = get_lwp_thread (lwp);
2020 int tpoint_related_event = 0;
2021
2022 gdb_assert (lwp->suspended == 0);
2023
2024 /* If this tracepoint hit causes a tracing stop, we'll immediately
2025 uninsert tracepoints. To do this, we temporarily pause all
2026 threads, unpatch away, and then unpause threads. We need to make
2027 sure the unpausing doesn't resume LWP too. */
2028 lwp_suspended_inc (lwp);
2029
2030 /* And we need to be sure that any all-threads-stopping doesn't try
2031 to move threads out of the jump pads, as it could deadlock the
2032 inferior (LWP could be in the jump pad, maybe even holding the
2033 lock.) */
2034
2035 /* Do any necessary step collect actions. */
2036 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
2037
2038 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2039
2040 /* See if we just hit a tracepoint and do its main collect
2041 actions. */
2042 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2043
2044 lwp_suspended_decr (lwp);
2045
2046 gdb_assert (lwp->suspended == 0);
2047 gdb_assert (!stabilizing_threads
2048 || (lwp->collecting_fast_tracepoint
2049 != fast_tpoint_collect_result::not_collecting));
2050
2051 if (tpoint_related_event)
2052 {
2053 if (debug_threads)
2054 debug_printf ("got a tracepoint event\n");
2055 return 1;
2056 }
2057
2058 return 0;
2059 }
2060
2061 /* Convenience wrapper. Returns information about LWP's fast tracepoint
2062 collection status. */
2063
2064 static fast_tpoint_collect_result
2065 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2066 struct fast_tpoint_collect_status *status)
2067 {
2068 CORE_ADDR thread_area;
2069 struct thread_info *thread = get_lwp_thread (lwp);
2070
2071 if (the_low_target.get_thread_area == NULL)
2072 return fast_tpoint_collect_result::not_collecting;
2073
2074 /* Get the thread area address. This is used to recognize which
2075 thread is which when tracing with the in-process agent library.
2076 We don't read anything from the address, and treat it as opaque;
2077 it's the address itself that we assume is unique per-thread. */
2078 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2079 return fast_tpoint_collect_result::not_collecting;
2080
2081 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2082 }
2083
2084 /* The reason we resume in the caller, is because we want to be able
2085 to pass lwp->status_pending as WSTAT, and we need to clear
2086 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2087 refuses to resume. */
2088
2089 static int
2090 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2091 {
2092 struct thread_info *saved_thread;
2093
2094 saved_thread = current_thread;
2095 current_thread = get_lwp_thread (lwp);
2096
2097 if ((wstat == NULL
2098 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2099 && supports_fast_tracepoints ()
2100 && agent_loaded_p ())
2101 {
2102 struct fast_tpoint_collect_status status;
2103
2104 if (debug_threads)
2105 debug_printf ("Checking whether LWP %ld needs to move out of the "
2106 "jump pad.\n",
2107 lwpid_of (current_thread));
2108
2109 fast_tpoint_collect_result r
2110 = linux_fast_tracepoint_collecting (lwp, &status);
2111
2112 if (wstat == NULL
2113 || (WSTOPSIG (*wstat) != SIGILL
2114 && WSTOPSIG (*wstat) != SIGFPE
2115 && WSTOPSIG (*wstat) != SIGSEGV
2116 && WSTOPSIG (*wstat) != SIGBUS))
2117 {
2118 lwp->collecting_fast_tracepoint = r;
2119
2120 if (r != fast_tpoint_collect_result::not_collecting)
2121 {
2122 if (r == fast_tpoint_collect_result::before_insn
2123 && lwp->exit_jump_pad_bkpt == NULL)
2124 {
2125 /* Haven't executed the original instruction yet.
2126 Set breakpoint there, and wait till it's hit,
2127 then single-step until exiting the jump pad. */
2128 lwp->exit_jump_pad_bkpt
2129 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2130 }
2131
2132 if (debug_threads)
2133 debug_printf ("Checking whether LWP %ld needs to move out of "
2134 "the jump pad...it does\n",
2135 lwpid_of (current_thread));
2136 current_thread = saved_thread;
2137
2138 return 1;
2139 }
2140 }
2141 else
2142 {
2143 /* If we get a synchronous signal while collecting, *and*
2144 while executing the (relocated) original instruction,
2145 reset the PC to point at the tpoint address, before
2146 reporting to GDB. Otherwise, it's an IPA lib bug: just
2147 report the signal to GDB, and pray for the best. */
2148
2149 lwp->collecting_fast_tracepoint
2150 = fast_tpoint_collect_result::not_collecting;
2151
2152 if (r != fast_tpoint_collect_result::not_collecting
2153 && (status.adjusted_insn_addr <= lwp->stop_pc
2154 && lwp->stop_pc < status.adjusted_insn_addr_end))
2155 {
2156 siginfo_t info;
2157 struct regcache *regcache;
2158
2159 /* The si_addr on a few signals references the address
2160 of the faulting instruction. Adjust that as
2161 well. */
2162 if ((WSTOPSIG (*wstat) == SIGILL
2163 || WSTOPSIG (*wstat) == SIGFPE
2164 || WSTOPSIG (*wstat) == SIGBUS
2165 || WSTOPSIG (*wstat) == SIGSEGV)
2166 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2167 (PTRACE_TYPE_ARG3) 0, &info) == 0
2168 /* Final check just to make sure we don't clobber
2169 the siginfo of non-kernel-sent signals. */
2170 && (uintptr_t) info.si_addr == lwp->stop_pc)
2171 {
2172 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2173 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2174 (PTRACE_TYPE_ARG3) 0, &info);
2175 }
2176
2177 regcache = get_thread_regcache (current_thread, 1);
2178 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2179 lwp->stop_pc = status.tpoint_addr;
2180
2181 /* Cancel any fast tracepoint lock this thread was
2182 holding. */
2183 force_unlock_trace_buffer ();
2184 }
2185
2186 if (lwp->exit_jump_pad_bkpt != NULL)
2187 {
2188 if (debug_threads)
2189 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2190 "stopping all threads momentarily.\n");
2191
2192 stop_all_lwps (1, lwp);
2193
2194 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2195 lwp->exit_jump_pad_bkpt = NULL;
2196
2197 unstop_all_lwps (1, lwp);
2198
2199 gdb_assert (lwp->suspended >= 0);
2200 }
2201 }
2202 }
2203
2204 if (debug_threads)
2205 debug_printf ("Checking whether LWP %ld needs to move out of the "
2206 "jump pad...no\n",
2207 lwpid_of (current_thread));
2208
2209 current_thread = saved_thread;
2210 return 0;
2211 }
2212
2213 /* Enqueue one signal in the "signals to report later when out of the
2214 jump pad" list. */
2215
2216 static void
2217 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2218 {
2219 struct pending_signals *p_sig;
2220 struct thread_info *thread = get_lwp_thread (lwp);
2221
2222 if (debug_threads)
2223 debug_printf ("Deferring signal %d for LWP %ld.\n",
2224 WSTOPSIG (*wstat), lwpid_of (thread));
2225
2226 if (debug_threads)
2227 {
2228 struct pending_signals *sig;
2229
2230 for (sig = lwp->pending_signals_to_report;
2231 sig != NULL;
2232 sig = sig->prev)
2233 debug_printf (" Already queued %d\n",
2234 sig->signal);
2235
2236 debug_printf (" (no more currently queued signals)\n");
2237 }
2238
2239 /* Don't enqueue non-RT signals if they are already in the deferred
2240 queue. (SIGSTOP being the easiest signal to see ending up here
2241 twice) */
2242 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2243 {
2244 struct pending_signals *sig;
2245
2246 for (sig = lwp->pending_signals_to_report;
2247 sig != NULL;
2248 sig = sig->prev)
2249 {
2250 if (sig->signal == WSTOPSIG (*wstat))
2251 {
2252 if (debug_threads)
2253 debug_printf ("Not requeuing already queued non-RT signal %d"
2254 " for LWP %ld\n",
2255 sig->signal,
2256 lwpid_of (thread));
2257 return;
2258 }
2259 }
2260 }
2261
2262 p_sig = XCNEW (struct pending_signals);
2263 p_sig->prev = lwp->pending_signals_to_report;
2264 p_sig->signal = WSTOPSIG (*wstat);
2265
2266 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2267 &p_sig->info);
2268
2269 lwp->pending_signals_to_report = p_sig;
2270 }
2271
2272 /* Dequeue one signal from the "signals to report later when out of
2273 the jump pad" list. */
2274
2275 static int
2276 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2277 {
2278 struct thread_info *thread = get_lwp_thread (lwp);
2279
2280 if (lwp->pending_signals_to_report != NULL)
2281 {
2282 struct pending_signals **p_sig;
2283
2284 p_sig = &lwp->pending_signals_to_report;
2285 while ((*p_sig)->prev != NULL)
2286 p_sig = &(*p_sig)->prev;
2287
2288 *wstat = W_STOPCODE ((*p_sig)->signal);
2289 if ((*p_sig)->info.si_signo != 0)
2290 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2291 &(*p_sig)->info);
2292 free (*p_sig);
2293 *p_sig = NULL;
2294
2295 if (debug_threads)
2296 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2297 WSTOPSIG (*wstat), lwpid_of (thread));
2298
2299 if (debug_threads)
2300 {
2301 struct pending_signals *sig;
2302
2303 for (sig = lwp->pending_signals_to_report;
2304 sig != NULL;
2305 sig = sig->prev)
2306 debug_printf (" Still queued %d\n",
2307 sig->signal);
2308
2309 debug_printf (" (no more queued signals)\n");
2310 }
2311
2312 return 1;
2313 }
2314
2315 return 0;
2316 }
2317
2318 /* Fetch the possibly triggered data watchpoint info and store it in
2319 CHILD.
2320
2321 On some archs, like x86, that use debug registers to set
2322 watchpoints, it's possible that the way to know which watched
2323 address trapped, is to check the register that is used to select
2324 which address to watch. Problem is, between setting the watchpoint
2325 and reading back which data address trapped, the user may change
2326 the set of watchpoints, and, as a consequence, GDB changes the
2327 debug registers in the inferior. To avoid reading back a stale
2328 stopped-data-address when that happens, we cache in LP the fact
2329 that a watchpoint trapped, and the corresponding data address, as
2330 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2331 registers meanwhile, we have the cached data we can rely on. */
2332
2333 static int
2334 check_stopped_by_watchpoint (struct lwp_info *child)
2335 {
2336 if (the_low_target.stopped_by_watchpoint != NULL)
2337 {
2338 struct thread_info *saved_thread;
2339
2340 saved_thread = current_thread;
2341 current_thread = get_lwp_thread (child);
2342
2343 if (the_low_target.stopped_by_watchpoint ())
2344 {
2345 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2346
2347 if (the_low_target.stopped_data_address != NULL)
2348 child->stopped_data_address
2349 = the_low_target.stopped_data_address ();
2350 else
2351 child->stopped_data_address = 0;
2352 }
2353
2354 current_thread = saved_thread;
2355 }
2356
2357 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2358 }
2359
2360 /* Return the ptrace options that we want to try to enable. */
2361
2362 static int
2363 linux_low_ptrace_options (int attached)
2364 {
2365 int options = 0;
2366
2367 if (!attached)
2368 options |= PTRACE_O_EXITKILL;
2369
2370 if (report_fork_events)
2371 options |= PTRACE_O_TRACEFORK;
2372
2373 if (report_vfork_events)
2374 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2375
2376 if (report_exec_events)
2377 options |= PTRACE_O_TRACEEXEC;
2378
2379 options |= PTRACE_O_TRACESYSGOOD;
2380
2381 return options;
2382 }
2383
2384 /* Do low-level handling of the event, and check if we should go on
2385 and pass it to caller code. Return the affected lwp if we are, or
2386 NULL otherwise. */
2387
2388 static struct lwp_info *
2389 linux_low_filter_event (int lwpid, int wstat)
2390 {
2391 struct lwp_info *child;
2392 struct thread_info *thread;
2393 int have_stop_pc = 0;
2394
2395 child = find_lwp_pid (pid_to_ptid (lwpid));
2396
2397 /* Check for stop events reported by a process we didn't already
2398 know about - anything not already in our LWP list.
2399
2400 If we're expecting to receive stopped processes after
2401 fork, vfork, and clone events, then we'll just add the
2402 new one to our list and go back to waiting for the event
2403 to be reported - the stopped process might be returned
2404 from waitpid before or after the event is.
2405
2406 But note the case of a non-leader thread exec'ing after the
2407 leader having exited, and gone from our lists (because
2408 check_zombie_leaders deleted it). The non-leader thread
2409 changes its tid to the tgid. */
2410
2411 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2412 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2413 {
2414 ptid_t child_ptid;
2415
2416 /* A multi-thread exec after we had seen the leader exiting. */
2417 if (debug_threads)
2418 {
2419 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2420 "after exec.\n", lwpid);
2421 }
2422
2423 child_ptid = ptid_build (lwpid, lwpid, 0);
2424 child = add_lwp (child_ptid);
2425 child->stopped = 1;
2426 current_thread = child->thread;
2427 }
2428
2429 /* If we didn't find a process, one of two things presumably happened:
2430 - A process we started and then detached from has exited. Ignore it.
2431 - A process we are controlling has forked and the new child's stop
2432 was reported to us by the kernel. Save its PID. */
2433 if (child == NULL && WIFSTOPPED (wstat))
2434 {
2435 add_to_pid_list (&stopped_pids, lwpid, wstat);
2436 return NULL;
2437 }
2438 else if (child == NULL)
2439 return NULL;
2440
2441 thread = get_lwp_thread (child);
2442
2443 child->stopped = 1;
2444
2445 child->last_status = wstat;
2446
2447 /* Check if the thread has exited. */
2448 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2449 {
2450 if (debug_threads)
2451 debug_printf ("LLFE: %d exited.\n", lwpid);
2452
2453 if (finish_step_over (child))
2454 {
2455 /* Unsuspend all other LWPs, and set them back running again. */
2456 unsuspend_all_lwps (child);
2457 }
2458
2459 /* If there is at least one more LWP, then the exit signal was
2460 not the end of the debugged application and should be
2461 ignored, unless GDB wants to hear about thread exits. */
2462 if (report_thread_events
2463 || last_thread_of_process_p (pid_of (thread)))
2464 {
2465 /* Since events are serialized to GDB core, and we can't
2466 report this one right now. Leave the status pending for
2467 the next time we're able to report it. */
2468 mark_lwp_dead (child, wstat);
2469 return child;
2470 }
2471 else
2472 {
2473 delete_lwp (child);
2474 return NULL;
2475 }
2476 }
2477
2478 gdb_assert (WIFSTOPPED (wstat));
2479
2480 if (WIFSTOPPED (wstat))
2481 {
2482 struct process_info *proc;
2483
2484 /* Architecture-specific setup after inferior is running. */
2485 proc = find_process_pid (pid_of (thread));
2486 if (proc->tdesc == NULL)
2487 {
2488 if (proc->attached)
2489 {
2490 /* This needs to happen after we have attached to the
2491 inferior and it is stopped for the first time, but
2492 before we access any inferior registers. */
2493 linux_arch_setup_thread (thread);
2494 }
2495 else
2496 {
2497 /* The process is started, but GDBserver will do
2498 architecture-specific setup after the program stops at
2499 the first instruction. */
2500 child->status_pending_p = 1;
2501 child->status_pending = wstat;
2502 return child;
2503 }
2504 }
2505 }
2506
2507 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2508 {
2509 struct process_info *proc = find_process_pid (pid_of (thread));
2510 int options = linux_low_ptrace_options (proc->attached);
2511
2512 linux_enable_event_reporting (lwpid, options);
2513 child->must_set_ptrace_flags = 0;
2514 }
2515
2516 /* Always update syscall_state, even if it will be filtered later. */
2517 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2518 {
2519 child->syscall_state
2520 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2521 ? TARGET_WAITKIND_SYSCALL_RETURN
2522 : TARGET_WAITKIND_SYSCALL_ENTRY);
2523 }
2524 else
2525 {
2526 /* Almost all other ptrace-stops are known to be outside of system
2527 calls, with further exceptions in handle_extended_wait. */
2528 child->syscall_state = TARGET_WAITKIND_IGNORE;
2529 }
2530
2531 /* Be careful to not overwrite stop_pc until save_stop_reason is
2532 called. */
2533 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2534 && linux_is_extended_waitstatus (wstat))
2535 {
2536 child->stop_pc = get_pc (child);
2537 if (handle_extended_wait (&child, wstat))
2538 {
2539 /* The event has been handled, so just return without
2540 reporting it. */
2541 return NULL;
2542 }
2543 }
2544
2545 if (linux_wstatus_maybe_breakpoint (wstat))
2546 {
2547 if (save_stop_reason (child))
2548 have_stop_pc = 1;
2549 }
2550
2551 if (!have_stop_pc)
2552 child->stop_pc = get_pc (child);
2553
2554 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2555 && child->stop_expected)
2556 {
2557 if (debug_threads)
2558 debug_printf ("Expected stop.\n");
2559 child->stop_expected = 0;
2560
2561 if (thread->last_resume_kind == resume_stop)
2562 {
2563 /* We want to report the stop to the core. Treat the
2564 SIGSTOP as a normal event. */
2565 if (debug_threads)
2566 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2567 target_pid_to_str (ptid_of (thread)));
2568 }
2569 else if (stopping_threads != NOT_STOPPING_THREADS)
2570 {
2571 /* Stopping threads. We don't want this SIGSTOP to end up
2572 pending. */
2573 if (debug_threads)
2574 debug_printf ("LLW: SIGSTOP caught for %s "
2575 "while stopping threads.\n",
2576 target_pid_to_str (ptid_of (thread)));
2577 return NULL;
2578 }
2579 else
2580 {
2581 /* This is a delayed SIGSTOP. Filter out the event. */
2582 if (debug_threads)
2583 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2584 child->stepping ? "step" : "continue",
2585 target_pid_to_str (ptid_of (thread)));
2586
2587 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2588 return NULL;
2589 }
2590 }
2591
2592 child->status_pending_p = 1;
2593 child->status_pending = wstat;
2594 return child;
2595 }
2596
2597 /* Return true if THREAD is doing hardware single step. */
2598
2599 static int
2600 maybe_hw_step (struct thread_info *thread)
2601 {
2602 if (can_hardware_single_step ())
2603 return 1;
2604 else
2605 {
2606 /* GDBserver must insert single-step breakpoint for software
2607 single step. */
2608 gdb_assert (has_single_step_breakpoints (thread));
2609 return 0;
2610 }
2611 }
2612
2613 /* Resume LWPs that are currently stopped without any pending status
2614 to report, but are resumed from the core's perspective. */
2615
2616 static void
2617 resume_stopped_resumed_lwps (thread_info *thread)
2618 {
2619 struct lwp_info *lp = get_thread_lwp (thread);
2620
2621 if (lp->stopped
2622 && !lp->suspended
2623 && !lp->status_pending_p
2624 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2625 {
2626 int step = 0;
2627
2628 if (thread->last_resume_kind == resume_step)
2629 step = maybe_hw_step (thread);
2630
2631 if (debug_threads)
2632 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2633 target_pid_to_str (ptid_of (thread)),
2634 paddress (lp->stop_pc),
2635 step);
2636
2637 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2638 }
2639 }
2640
2641 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2642 match FILTER_PTID (leaving others pending). The PTIDs can be:
2643 minus_one_ptid, to specify any child; a pid PTID, specifying all
2644 lwps of a thread group; or a PTID representing a single lwp. Store
2645 the stop status through the status pointer WSTAT. OPTIONS is
2646 passed to the waitpid call. Return 0 if no event was found and
2647 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2648 was found. Return the PID of the stopped child otherwise. */
2649
2650 static int
2651 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2652 int *wstatp, int options)
2653 {
2654 struct thread_info *event_thread;
2655 struct lwp_info *event_child, *requested_child;
2656 sigset_t block_mask, prev_mask;
2657
2658 retry:
2659 /* N.B. event_thread points to the thread_info struct that contains
2660 event_child. Keep them in sync. */
2661 event_thread = NULL;
2662 event_child = NULL;
2663 requested_child = NULL;
2664
2665 /* Check for a lwp with a pending status. */
2666
2667 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2668 {
2669 event_thread = (struct thread_info *)
2670 find_inferior_in_random (&all_threads, status_pending_p_callback,
2671 &filter_ptid);
2672 if (event_thread != NULL)
2673 event_child = get_thread_lwp (event_thread);
2674 if (debug_threads && event_thread)
2675 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2676 }
2677 else if (!ptid_equal (filter_ptid, null_ptid))
2678 {
2679 requested_child = find_lwp_pid (filter_ptid);
2680
2681 if (stopping_threads == NOT_STOPPING_THREADS
2682 && requested_child->status_pending_p
2683 && (requested_child->collecting_fast_tracepoint
2684 != fast_tpoint_collect_result::not_collecting))
2685 {
2686 enqueue_one_deferred_signal (requested_child,
2687 &requested_child->status_pending);
2688 requested_child->status_pending_p = 0;
2689 requested_child->status_pending = 0;
2690 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2691 }
2692
2693 if (requested_child->suspended
2694 && requested_child->status_pending_p)
2695 {
2696 internal_error (__FILE__, __LINE__,
2697 "requesting an event out of a"
2698 " suspended child?");
2699 }
2700
2701 if (requested_child->status_pending_p)
2702 {
2703 event_child = requested_child;
2704 event_thread = get_lwp_thread (event_child);
2705 }
2706 }
2707
2708 if (event_child != NULL)
2709 {
2710 if (debug_threads)
2711 debug_printf ("Got an event from pending child %ld (%04x)\n",
2712 lwpid_of (event_thread), event_child->status_pending);
2713 *wstatp = event_child->status_pending;
2714 event_child->status_pending_p = 0;
2715 event_child->status_pending = 0;
2716 current_thread = event_thread;
2717 return lwpid_of (event_thread);
2718 }
2719
2720 /* But if we don't find a pending event, we'll have to wait.
2721
2722 We only enter this loop if no process has a pending wait status.
2723 Thus any action taken in response to a wait status inside this
2724 loop is responding as soon as we detect the status, not after any
2725 pending events. */
2726
2727 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2728 all signals while here. */
2729 sigfillset (&block_mask);
2730 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2731
2732 /* Always pull all events out of the kernel. We'll randomly select
2733 an event LWP out of all that have events, to prevent
2734 starvation. */
2735 while (event_child == NULL)
2736 {
2737 pid_t ret = 0;
2738
2739 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2740 quirks:
2741
2742 - If the thread group leader exits while other threads in the
2743 thread group still exist, waitpid(TGID, ...) hangs. That
2744 waitpid won't return an exit status until the other threads
2745 in the group are reaped.
2746
2747 - When a non-leader thread execs, that thread just vanishes
2748 without reporting an exit (so we'd hang if we waited for it
2749 explicitly in that case). The exec event is reported to
2750 the TGID pid. */
2751 errno = 0;
2752 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2753
2754 if (debug_threads)
2755 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2756 ret, errno ? strerror (errno) : "ERRNO-OK");
2757
2758 if (ret > 0)
2759 {
2760 if (debug_threads)
2761 {
2762 debug_printf ("LLW: waitpid %ld received %s\n",
2763 (long) ret, status_to_str (*wstatp));
2764 }
2765
2766 /* Filter all events. IOW, leave all events pending. We'll
2767 randomly select an event LWP out of all that have events
2768 below. */
2769 linux_low_filter_event (ret, *wstatp);
2770 /* Retry until nothing comes out of waitpid. A single
2771 SIGCHLD can indicate more than one child stopped. */
2772 continue;
2773 }
2774
2775 /* Now that we've pulled all events out of the kernel, resume
2776 LWPs that don't have an interesting event to report. */
2777 if (stopping_threads == NOT_STOPPING_THREADS)
2778 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2779
2780 /* ... and find an LWP with a status to report to the core, if
2781 any. */
2782 event_thread = (struct thread_info *)
2783 find_inferior_in_random (&all_threads, status_pending_p_callback,
2784 &filter_ptid);
2785 if (event_thread != NULL)
2786 {
2787 event_child = get_thread_lwp (event_thread);
2788 *wstatp = event_child->status_pending;
2789 event_child->status_pending_p = 0;
2790 event_child->status_pending = 0;
2791 break;
2792 }
2793
2794 /* Check for zombie thread group leaders. Those can't be reaped
2795 until all other threads in the thread group are. */
2796 check_zombie_leaders ();
2797
2798 /* If there are no resumed children left in the set of LWPs we
2799 want to wait for, bail. We can't just block in
2800 waitpid/sigsuspend, because lwps might have been left stopped
2801 in trace-stop state, and we'd be stuck forever waiting for
2802 their status to change (which would only happen if we resumed
2803 them). Even if WNOHANG is set, this return code is preferred
2804 over 0 (below), as it is more detailed. */
2805 if ((find_inferior (&all_threads,
2806 not_stopped_callback,
2807 &wait_ptid) == NULL))
2808 {
2809 if (debug_threads)
2810 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2811 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2812 return -1;
2813 }
2814
2815 /* No interesting event to report to the caller. */
2816 if ((options & WNOHANG))
2817 {
2818 if (debug_threads)
2819 debug_printf ("WNOHANG set, no event found\n");
2820
2821 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2822 return 0;
2823 }
2824
2825 /* Block until we get an event reported with SIGCHLD. */
2826 if (debug_threads)
2827 debug_printf ("sigsuspend'ing\n");
2828
2829 sigsuspend (&prev_mask);
2830 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2831 goto retry;
2832 }
2833
2834 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2835
2836 current_thread = event_thread;
2837
2838 return lwpid_of (event_thread);
2839 }
2840
2841 /* Wait for an event from child(ren) PTID. PTIDs can be:
2842 minus_one_ptid, to specify any child; a pid PTID, specifying all
2843 lwps of a thread group; or a PTID representing a single lwp. Store
2844 the stop status through the status pointer WSTAT. OPTIONS is
2845 passed to the waitpid call. Return 0 if no event was found and
2846 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2847 was found. Return the PID of the stopped child otherwise. */
2848
2849 static int
2850 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2851 {
2852 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2853 }
2854
2855 /* Count the LWP's that have had events. */
2856
2857 static int
2858 count_events_callback (thread_info *thread, void *data)
2859 {
2860 struct lwp_info *lp = get_thread_lwp (thread);
2861 int *count = (int *) data;
2862
2863 gdb_assert (count != NULL);
2864
2865 /* Count only resumed LWPs that have an event pending. */
2866 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2867 && lp->status_pending_p)
2868 (*count)++;
2869
2870 return 0;
2871 }
2872
2873 /* Select the LWP (if any) that is currently being single-stepped. */
2874
2875 static int
2876 select_singlestep_lwp_callback (thread_info *thread, void *data)
2877 {
2878 struct lwp_info *lp = get_thread_lwp (thread);
2879
2880 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2881 && thread->last_resume_kind == resume_step
2882 && lp->status_pending_p)
2883 return 1;
2884 else
2885 return 0;
2886 }
2887
2888 /* Select the Nth LWP that has had an event. */
2889
2890 static int
2891 select_event_lwp_callback (thread_info *thread, void *data)
2892 {
2893 struct lwp_info *lp = get_thread_lwp (thread);
2894 int *selector = (int *) data;
2895
2896 gdb_assert (selector != NULL);
2897
2898 /* Select only resumed LWPs that have an event pending. */
2899 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2900 && lp->status_pending_p)
2901 if ((*selector)-- == 0)
2902 return 1;
2903
2904 return 0;
2905 }
2906
2907 /* Select one LWP out of those that have events pending. */
2908
2909 static void
2910 select_event_lwp (struct lwp_info **orig_lp)
2911 {
2912 int num_events = 0;
2913 int random_selector;
2914 struct thread_info *event_thread = NULL;
2915
2916 /* In all-stop, give preference to the LWP that is being
2917 single-stepped. There will be at most one, and it's the LWP that
2918 the core is most interested in. If we didn't do this, then we'd
2919 have to handle pending step SIGTRAPs somehow in case the core
2920 later continues the previously-stepped thread, otherwise we'd
2921 report the pending SIGTRAP, and the core, not having stepped the
2922 thread, wouldn't understand what the trap was for, and therefore
2923 would report it to the user as a random signal. */
2924 if (!non_stop)
2925 {
2926 event_thread
2927 = (struct thread_info *) find_inferior (&all_threads,
2928 select_singlestep_lwp_callback,
2929 NULL);
2930 if (event_thread != NULL)
2931 {
2932 if (debug_threads)
2933 debug_printf ("SEL: Select single-step %s\n",
2934 target_pid_to_str (ptid_of (event_thread)));
2935 }
2936 }
2937 if (event_thread == NULL)
2938 {
2939 /* No single-stepping LWP. Select one at random, out of those
2940 which have had events. */
2941
2942 /* First see how many events we have. */
2943 find_inferior (&all_threads, count_events_callback, &num_events);
2944 gdb_assert (num_events > 0);
2945
2946 /* Now randomly pick a LWP out of those that have had
2947 events. */
2948 random_selector = (int)
2949 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2950
2951 if (debug_threads && num_events > 1)
2952 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2953 num_events, random_selector);
2954
2955 event_thread
2956 = (struct thread_info *) find_inferior (&all_threads,
2957 select_event_lwp_callback,
2958 &random_selector);
2959 }
2960
2961 if (event_thread != NULL)
2962 {
2963 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2964
2965 /* Switch the event LWP. */
2966 *orig_lp = event_lp;
2967 }
2968 }
2969
2970 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2971 NULL. */
2972
2973 static void
2974 unsuspend_all_lwps (struct lwp_info *except)
2975 {
2976 for_each_thread ([&] (thread_info *thread)
2977 {
2978 lwp_info *lwp = get_thread_lwp (thread);
2979
2980 if (lwp != except)
2981 lwp_suspended_decr (lwp);
2982 });
2983 }
2984
2985 static void move_out_of_jump_pad_callback (thread_info *thread);
2986 static bool stuck_in_jump_pad_callback (thread_info *thread);
2987 static int lwp_running (thread_info *thread, void *data);
2988 static ptid_t linux_wait_1 (ptid_t ptid,
2989 struct target_waitstatus *ourstatus,
2990 int target_options);
2991
2992 /* Stabilize threads (move out of jump pads).
2993
2994 If a thread is midway collecting a fast tracepoint, we need to
2995 finish the collection and move it out of the jump pad before
2996 reporting the signal.
2997
2998 This avoids recursion while collecting (when a signal arrives
2999 midway, and the signal handler itself collects), which would trash
3000 the trace buffer. In case the user set a breakpoint in a signal
3001 handler, this avoids the backtrace showing the jump pad, etc..
3002 Most importantly, there are certain things we can't do safely if
3003 threads are stopped in a jump pad (or in its callee's). For
3004 example:
3005
3006 - starting a new trace run. A thread still collecting the
3007 previous run, could trash the trace buffer when resumed. The trace
3008 buffer control structures would have been reset but the thread had
3009 no way to tell. The thread could even midway memcpy'ing to the
3010 buffer, which would mean that when resumed, it would clobber the
3011 trace buffer that had been set for a new run.
3012
3013 - we can't rewrite/reuse the jump pads for new tracepoints
3014 safely. Say you do tstart while a thread is stopped midway while
3015 collecting. When the thread is later resumed, it finishes the
3016 collection, and returns to the jump pad, to execute the original
3017 instruction that was under the tracepoint jump at the time the
3018 older run had been started. If the jump pad had been rewritten
3019 since for something else in the new run, the thread would now
3020 execute the wrong / random instructions. */
3021
3022 static void
3023 linux_stabilize_threads (void)
3024 {
3025 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
3026
3027 if (thread_stuck != NULL)
3028 {
3029 if (debug_threads)
3030 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
3031 lwpid_of (thread_stuck));
3032 return;
3033 }
3034
3035 thread_info *saved_thread = current_thread;
3036
3037 stabilizing_threads = 1;
3038
3039 /* Kick 'em all. */
3040 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
3041
3042 /* Loop until all are stopped out of the jump pads. */
3043 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
3044 {
3045 struct target_waitstatus ourstatus;
3046 struct lwp_info *lwp;
3047 int wstat;
3048
3049 /* Note that we go through the full wait even loop. While
3050 moving threads out of jump pad, we need to be able to step
3051 over internal breakpoints and such. */
3052 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
3053
3054 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
3055 {
3056 lwp = get_thread_lwp (current_thread);
3057
3058 /* Lock it. */
3059 lwp_suspended_inc (lwp);
3060
3061 if (ourstatus.value.sig != GDB_SIGNAL_0
3062 || current_thread->last_resume_kind == resume_stop)
3063 {
3064 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3065 enqueue_one_deferred_signal (lwp, &wstat);
3066 }
3067 }
3068 }
3069
3070 unsuspend_all_lwps (NULL);
3071
3072 stabilizing_threads = 0;
3073
3074 current_thread = saved_thread;
3075
3076 if (debug_threads)
3077 {
3078 thread_stuck = find_thread (stuck_in_jump_pad_callback);
3079
3080 if (thread_stuck != NULL)
3081 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3082 lwpid_of (thread_stuck));
3083 }
3084 }
3085
3086 /* Convenience function that is called when the kernel reports an
3087 event that is not passed out to GDB. */
3088
3089 static ptid_t
3090 ignore_event (struct target_waitstatus *ourstatus)
3091 {
3092 /* If we got an event, there may still be others, as a single
3093 SIGCHLD can indicate more than one child stopped. This forces
3094 another target_wait call. */
3095 async_file_mark ();
3096
3097 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3098 return null_ptid;
3099 }
3100
3101 /* Convenience function that is called when the kernel reports an exit
3102 event. This decides whether to report the event to GDB as a
3103 process exit event, a thread exit event, or to suppress the
3104 event. */
3105
3106 static ptid_t
3107 filter_exit_event (struct lwp_info *event_child,
3108 struct target_waitstatus *ourstatus)
3109 {
3110 struct thread_info *thread = get_lwp_thread (event_child);
3111 ptid_t ptid = ptid_of (thread);
3112
3113 if (!last_thread_of_process_p (pid_of (thread)))
3114 {
3115 if (report_thread_events)
3116 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3117 else
3118 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3119
3120 delete_lwp (event_child);
3121 }
3122 return ptid;
3123 }
3124
3125 /* Returns 1 if GDB is interested in any event_child syscalls. */
3126
3127 static int
3128 gdb_catching_syscalls_p (struct lwp_info *event_child)
3129 {
3130 struct thread_info *thread = get_lwp_thread (event_child);
3131 struct process_info *proc = get_thread_process (thread);
3132
3133 return !proc->syscalls_to_catch.empty ();
3134 }
3135
3136 /* Returns 1 if GDB is interested in the event_child syscall.
3137 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3138
3139 static int
3140 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3141 {
3142 int sysno;
3143 struct thread_info *thread = get_lwp_thread (event_child);
3144 struct process_info *proc = get_thread_process (thread);
3145
3146 if (proc->syscalls_to_catch.empty ())
3147 return 0;
3148
3149 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
3150 return 1;
3151
3152 get_syscall_trapinfo (event_child, &sysno);
3153
3154 for (int iter : proc->syscalls_to_catch)
3155 if (iter == sysno)
3156 return 1;
3157
3158 return 0;
3159 }
3160
3161 /* Wait for process, returns status. */
3162
3163 static ptid_t
3164 linux_wait_1 (ptid_t ptid,
3165 struct target_waitstatus *ourstatus, int target_options)
3166 {
3167 int w;
3168 struct lwp_info *event_child;
3169 int options;
3170 int pid;
3171 int step_over_finished;
3172 int bp_explains_trap;
3173 int maybe_internal_trap;
3174 int report_to_gdb;
3175 int trace_event;
3176 int in_step_range;
3177 int any_resumed;
3178
3179 if (debug_threads)
3180 {
3181 debug_enter ();
3182 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3183 }
3184
3185 /* Translate generic target options into linux options. */
3186 options = __WALL;
3187 if (target_options & TARGET_WNOHANG)
3188 options |= WNOHANG;
3189
3190 bp_explains_trap = 0;
3191 trace_event = 0;
3192 in_step_range = 0;
3193 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3194
3195 /* Find a resumed LWP, if any. */
3196 if (find_inferior (&all_threads,
3197 status_pending_p_callback,
3198 &minus_one_ptid) != NULL)
3199 any_resumed = 1;
3200 else if ((find_inferior (&all_threads,
3201 not_stopped_callback,
3202 &minus_one_ptid) != NULL))
3203 any_resumed = 1;
3204 else
3205 any_resumed = 0;
3206
3207 if (ptid_equal (step_over_bkpt, null_ptid))
3208 pid = linux_wait_for_event (ptid, &w, options);
3209 else
3210 {
3211 if (debug_threads)
3212 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3213 target_pid_to_str (step_over_bkpt));
3214 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3215 }
3216
3217 if (pid == 0 || (pid == -1 && !any_resumed))
3218 {
3219 gdb_assert (target_options & TARGET_WNOHANG);
3220
3221 if (debug_threads)
3222 {
3223 debug_printf ("linux_wait_1 ret = null_ptid, "
3224 "TARGET_WAITKIND_IGNORE\n");
3225 debug_exit ();
3226 }
3227
3228 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3229 return null_ptid;
3230 }
3231 else if (pid == -1)
3232 {
3233 if (debug_threads)
3234 {
3235 debug_printf ("linux_wait_1 ret = null_ptid, "
3236 "TARGET_WAITKIND_NO_RESUMED\n");
3237 debug_exit ();
3238 }
3239
3240 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3241 return null_ptid;
3242 }
3243
3244 event_child = get_thread_lwp (current_thread);
3245
3246 /* linux_wait_for_event only returns an exit status for the last
3247 child of a process. Report it. */
3248 if (WIFEXITED (w) || WIFSIGNALED (w))
3249 {
3250 if (WIFEXITED (w))
3251 {
3252 ourstatus->kind = TARGET_WAITKIND_EXITED;
3253 ourstatus->value.integer = WEXITSTATUS (w);
3254
3255 if (debug_threads)
3256 {
3257 debug_printf ("linux_wait_1 ret = %s, exited with "
3258 "retcode %d\n",
3259 target_pid_to_str (ptid_of (current_thread)),
3260 WEXITSTATUS (w));
3261 debug_exit ();
3262 }
3263 }
3264 else
3265 {
3266 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3267 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3268
3269 if (debug_threads)
3270 {
3271 debug_printf ("linux_wait_1 ret = %s, terminated with "
3272 "signal %d\n",
3273 target_pid_to_str (ptid_of (current_thread)),
3274 WTERMSIG (w));
3275 debug_exit ();
3276 }
3277 }
3278
3279 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3280 return filter_exit_event (event_child, ourstatus);
3281
3282 return ptid_of (current_thread);
3283 }
3284
3285 /* If step-over executes a breakpoint instruction, in the case of a
3286 hardware single step it means a gdb/gdbserver breakpoint had been
3287 planted on top of a permanent breakpoint, in the case of a software
3288 single step it may just mean that gdbserver hit the reinsert breakpoint.
3289 The PC has been adjusted by save_stop_reason to point at
3290 the breakpoint address.
3291 So in the case of the hardware single step advance the PC manually
3292 past the breakpoint and in the case of software single step advance only
3293 if it's not the single_step_breakpoint we are hitting.
3294 This avoids that a program would keep trapping a permanent breakpoint
3295 forever. */
3296 if (!ptid_equal (step_over_bkpt, null_ptid)
3297 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3298 && (event_child->stepping
3299 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3300 {
3301 int increment_pc = 0;
3302 int breakpoint_kind = 0;
3303 CORE_ADDR stop_pc = event_child->stop_pc;
3304
3305 breakpoint_kind =
3306 the_target->breakpoint_kind_from_current_state (&stop_pc);
3307 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3308
3309 if (debug_threads)
3310 {
3311 debug_printf ("step-over for %s executed software breakpoint\n",
3312 target_pid_to_str (ptid_of (current_thread)));
3313 }
3314
3315 if (increment_pc != 0)
3316 {
3317 struct regcache *regcache
3318 = get_thread_regcache (current_thread, 1);
3319
3320 event_child->stop_pc += increment_pc;
3321 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3322
3323 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3324 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3325 }
3326 }
3327
3328 /* If this event was not handled before, and is not a SIGTRAP, we
3329 report it. SIGILL and SIGSEGV are also treated as traps in case
3330 a breakpoint is inserted at the current PC. If this target does
3331 not support internal breakpoints at all, we also report the
3332 SIGTRAP without further processing; it's of no concern to us. */
3333 maybe_internal_trap
3334 = (supports_breakpoints ()
3335 && (WSTOPSIG (w) == SIGTRAP
3336 || ((WSTOPSIG (w) == SIGILL
3337 || WSTOPSIG (w) == SIGSEGV)
3338 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3339
3340 if (maybe_internal_trap)
3341 {
3342 /* Handle anything that requires bookkeeping before deciding to
3343 report the event or continue waiting. */
3344
3345 /* First check if we can explain the SIGTRAP with an internal
3346 breakpoint, or if we should possibly report the event to GDB.
3347 Do this before anything that may remove or insert a
3348 breakpoint. */
3349 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3350
3351 /* We have a SIGTRAP, possibly a step-over dance has just
3352 finished. If so, tweak the state machine accordingly,
3353 reinsert breakpoints and delete any single-step
3354 breakpoints. */
3355 step_over_finished = finish_step_over (event_child);
3356
3357 /* Now invoke the callbacks of any internal breakpoints there. */
3358 check_breakpoints (event_child->stop_pc);
3359
3360 /* Handle tracepoint data collecting. This may overflow the
3361 trace buffer, and cause a tracing stop, removing
3362 breakpoints. */
3363 trace_event = handle_tracepoints (event_child);
3364
3365 if (bp_explains_trap)
3366 {
3367 if (debug_threads)
3368 debug_printf ("Hit a gdbserver breakpoint.\n");
3369 }
3370 }
3371 else
3372 {
3373 /* We have some other signal, possibly a step-over dance was in
3374 progress, and it should be cancelled too. */
3375 step_over_finished = finish_step_over (event_child);
3376 }
3377
3378 /* We have all the data we need. Either report the event to GDB, or
3379 resume threads and keep waiting for more. */
3380
3381 /* If we're collecting a fast tracepoint, finish the collection and
3382 move out of the jump pad before delivering a signal. See
3383 linux_stabilize_threads. */
3384
3385 if (WIFSTOPPED (w)
3386 && WSTOPSIG (w) != SIGTRAP
3387 && supports_fast_tracepoints ()
3388 && agent_loaded_p ())
3389 {
3390 if (debug_threads)
3391 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3392 "to defer or adjust it.\n",
3393 WSTOPSIG (w), lwpid_of (current_thread));
3394
3395 /* Allow debugging the jump pad itself. */
3396 if (current_thread->last_resume_kind != resume_step
3397 && maybe_move_out_of_jump_pad (event_child, &w))
3398 {
3399 enqueue_one_deferred_signal (event_child, &w);
3400
3401 if (debug_threads)
3402 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3403 WSTOPSIG (w), lwpid_of (current_thread));
3404
3405 linux_resume_one_lwp (event_child, 0, 0, NULL);
3406
3407 if (debug_threads)
3408 debug_exit ();
3409 return ignore_event (ourstatus);
3410 }
3411 }
3412
3413 if (event_child->collecting_fast_tracepoint
3414 != fast_tpoint_collect_result::not_collecting)
3415 {
3416 if (debug_threads)
3417 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3418 "Check if we're already there.\n",
3419 lwpid_of (current_thread),
3420 (int) event_child->collecting_fast_tracepoint);
3421
3422 trace_event = 1;
3423
3424 event_child->collecting_fast_tracepoint
3425 = linux_fast_tracepoint_collecting (event_child, NULL);
3426
3427 if (event_child->collecting_fast_tracepoint
3428 != fast_tpoint_collect_result::before_insn)
3429 {
3430 /* No longer need this breakpoint. */
3431 if (event_child->exit_jump_pad_bkpt != NULL)
3432 {
3433 if (debug_threads)
3434 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3435 "stopping all threads momentarily.\n");
3436
3437 /* Other running threads could hit this breakpoint.
3438 We don't handle moribund locations like GDB does,
3439 instead we always pause all threads when removing
3440 breakpoints, so that any step-over or
3441 decr_pc_after_break adjustment is always taken
3442 care of while the breakpoint is still
3443 inserted. */
3444 stop_all_lwps (1, event_child);
3445
3446 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3447 event_child->exit_jump_pad_bkpt = NULL;
3448
3449 unstop_all_lwps (1, event_child);
3450
3451 gdb_assert (event_child->suspended >= 0);
3452 }
3453 }
3454
3455 if (event_child->collecting_fast_tracepoint
3456 == fast_tpoint_collect_result::not_collecting)
3457 {
3458 if (debug_threads)
3459 debug_printf ("fast tracepoint finished "
3460 "collecting successfully.\n");
3461
3462 /* We may have a deferred signal to report. */
3463 if (dequeue_one_deferred_signal (event_child, &w))
3464 {
3465 if (debug_threads)
3466 debug_printf ("dequeued one signal.\n");
3467 }
3468 else
3469 {
3470 if (debug_threads)
3471 debug_printf ("no deferred signals.\n");
3472
3473 if (stabilizing_threads)
3474 {
3475 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3476 ourstatus->value.sig = GDB_SIGNAL_0;
3477
3478 if (debug_threads)
3479 {
3480 debug_printf ("linux_wait_1 ret = %s, stopped "
3481 "while stabilizing threads\n",
3482 target_pid_to_str (ptid_of (current_thread)));
3483 debug_exit ();
3484 }
3485
3486 return ptid_of (current_thread);
3487 }
3488 }
3489 }
3490 }
3491
3492 /* Check whether GDB would be interested in this event. */
3493
3494 /* Check if GDB is interested in this syscall. */
3495 if (WIFSTOPPED (w)
3496 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3497 && !gdb_catch_this_syscall_p (event_child))
3498 {
3499 if (debug_threads)
3500 {
3501 debug_printf ("Ignored syscall for LWP %ld.\n",
3502 lwpid_of (current_thread));
3503 }
3504
3505 linux_resume_one_lwp (event_child, event_child->stepping,
3506 0, NULL);
3507
3508 if (debug_threads)
3509 debug_exit ();
3510 return ignore_event (ourstatus);
3511 }
3512
3513 /* If GDB is not interested in this signal, don't stop other
3514 threads, and don't report it to GDB. Just resume the inferior
3515 right away. We do this for threading-related signals as well as
3516 any that GDB specifically requested we ignore. But never ignore
3517 SIGSTOP if we sent it ourselves, and do not ignore signals when
3518 stepping - they may require special handling to skip the signal
3519 handler. Also never ignore signals that could be caused by a
3520 breakpoint. */
3521 if (WIFSTOPPED (w)
3522 && current_thread->last_resume_kind != resume_step
3523 && (
3524 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3525 (current_process ()->priv->thread_db != NULL
3526 && (WSTOPSIG (w) == __SIGRTMIN
3527 || WSTOPSIG (w) == __SIGRTMIN + 1))
3528 ||
3529 #endif
3530 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3531 && !(WSTOPSIG (w) == SIGSTOP
3532 && current_thread->last_resume_kind == resume_stop)
3533 && !linux_wstatus_maybe_breakpoint (w))))
3534 {
3535 siginfo_t info, *info_p;
3536
3537 if (debug_threads)
3538 debug_printf ("Ignored signal %d for LWP %ld.\n",
3539 WSTOPSIG (w), lwpid_of (current_thread));
3540
3541 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3542 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3543 info_p = &info;
3544 else
3545 info_p = NULL;
3546
3547 if (step_over_finished)
3548 {
3549 /* We cancelled this thread's step-over above. We still
3550 need to unsuspend all other LWPs, and set them back
3551 running again while the signal handler runs. */
3552 unsuspend_all_lwps (event_child);
3553
3554 /* Enqueue the pending signal info so that proceed_all_lwps
3555 doesn't lose it. */
3556 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3557
3558 proceed_all_lwps ();
3559 }
3560 else
3561 {
3562 linux_resume_one_lwp (event_child, event_child->stepping,
3563 WSTOPSIG (w), info_p);
3564 }
3565
3566 if (debug_threads)
3567 debug_exit ();
3568
3569 return ignore_event (ourstatus);
3570 }
3571
3572 /* Note that all addresses are always "out of the step range" when
3573 there's no range to begin with. */
3574 in_step_range = lwp_in_step_range (event_child);
3575
3576 /* If GDB wanted this thread to single step, and the thread is out
3577 of the step range, we always want to report the SIGTRAP, and let
3578 GDB handle it. Watchpoints should always be reported. So should
3579 signals we can't explain. A SIGTRAP we can't explain could be a
3580 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3581 do, we're be able to handle GDB breakpoints on top of internal
3582 breakpoints, by handling the internal breakpoint and still
3583 reporting the event to GDB. If we don't, we're out of luck, GDB
3584 won't see the breakpoint hit. If we see a single-step event but
3585 the thread should be continuing, don't pass the trap to gdb.
3586 That indicates that we had previously finished a single-step but
3587 left the single-step pending -- see
3588 complete_ongoing_step_over. */
3589 report_to_gdb = (!maybe_internal_trap
3590 || (current_thread->last_resume_kind == resume_step
3591 && !in_step_range)
3592 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3593 || (!in_step_range
3594 && !bp_explains_trap
3595 && !trace_event
3596 && !step_over_finished
3597 && !(current_thread->last_resume_kind == resume_continue
3598 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3599 || (gdb_breakpoint_here (event_child->stop_pc)
3600 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3601 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3602 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3603
3604 run_breakpoint_commands (event_child->stop_pc);
3605
3606 /* We found no reason GDB would want us to stop. We either hit one
3607 of our own breakpoints, or finished an internal step GDB
3608 shouldn't know about. */
3609 if (!report_to_gdb)
3610 {
3611 if (debug_threads)
3612 {
3613 if (bp_explains_trap)
3614 debug_printf ("Hit a gdbserver breakpoint.\n");
3615 if (step_over_finished)
3616 debug_printf ("Step-over finished.\n");
3617 if (trace_event)
3618 debug_printf ("Tracepoint event.\n");
3619 if (lwp_in_step_range (event_child))
3620 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3621 paddress (event_child->stop_pc),
3622 paddress (event_child->step_range_start),
3623 paddress (event_child->step_range_end));
3624 }
3625
3626 /* We're not reporting this breakpoint to GDB, so apply the
3627 decr_pc_after_break adjustment to the inferior's regcache
3628 ourselves. */
3629
3630 if (the_low_target.set_pc != NULL)
3631 {
3632 struct regcache *regcache
3633 = get_thread_regcache (current_thread, 1);
3634 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3635 }
3636
3637 if (step_over_finished)
3638 {
3639 /* If we have finished stepping over a breakpoint, we've
3640 stopped and suspended all LWPs momentarily except the
3641 stepping one. This is where we resume them all again.
3642 We're going to keep waiting, so use proceed, which
3643 handles stepping over the next breakpoint. */
3644 unsuspend_all_lwps (event_child);
3645 }
3646 else
3647 {
3648 /* Remove the single-step breakpoints if any. Note that
3649 there isn't single-step breakpoint if we finished stepping
3650 over. */
3651 if (can_software_single_step ()
3652 && has_single_step_breakpoints (current_thread))
3653 {
3654 stop_all_lwps (0, event_child);
3655 delete_single_step_breakpoints (current_thread);
3656 unstop_all_lwps (0, event_child);
3657 }
3658 }
3659
3660 if (debug_threads)
3661 debug_printf ("proceeding all threads.\n");
3662 proceed_all_lwps ();
3663
3664 if (debug_threads)
3665 debug_exit ();
3666
3667 return ignore_event (ourstatus);
3668 }
3669
3670 if (debug_threads)
3671 {
3672 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3673 {
3674 std::string str
3675 = target_waitstatus_to_string (&event_child->waitstatus);
3676
3677 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3678 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3679 }
3680 if (current_thread->last_resume_kind == resume_step)
3681 {
3682 if (event_child->step_range_start == event_child->step_range_end)
3683 debug_printf ("GDB wanted to single-step, reporting event.\n");
3684 else if (!lwp_in_step_range (event_child))
3685 debug_printf ("Out of step range, reporting event.\n");
3686 }
3687 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3688 debug_printf ("Stopped by watchpoint.\n");
3689 else if (gdb_breakpoint_here (event_child->stop_pc))
3690 debug_printf ("Stopped by GDB breakpoint.\n");
3691 if (debug_threads)
3692 debug_printf ("Hit a non-gdbserver trap event.\n");
3693 }
3694
3695 /* Alright, we're going to report a stop. */
3696
3697 /* Remove single-step breakpoints. */
3698 if (can_software_single_step ())
3699 {
3700 /* Remove single-step breakpoints or not. It it is true, stop all
3701 lwps, so that other threads won't hit the breakpoint in the
3702 staled memory. */
3703 int remove_single_step_breakpoints_p = 0;
3704
3705 if (non_stop)
3706 {
3707 remove_single_step_breakpoints_p
3708 = has_single_step_breakpoints (current_thread);
3709 }
3710 else
3711 {
3712 /* In all-stop, a stop reply cancels all previous resume
3713 requests. Delete all single-step breakpoints. */
3714
3715 find_thread ([&] (thread_info *thread) {
3716 if (has_single_step_breakpoints (thread))
3717 {
3718 remove_single_step_breakpoints_p = 1;
3719 return true;
3720 }
3721
3722 return false;
3723 });
3724 }
3725
3726 if (remove_single_step_breakpoints_p)
3727 {
3728 /* If we remove single-step breakpoints from memory, stop all lwps,
3729 so that other threads won't hit the breakpoint in the staled
3730 memory. */
3731 stop_all_lwps (0, event_child);
3732
3733 if (non_stop)
3734 {
3735 gdb_assert (has_single_step_breakpoints (current_thread));
3736 delete_single_step_breakpoints (current_thread);
3737 }
3738 else
3739 {
3740 for_each_thread ([] (thread_info *thread){
3741 if (has_single_step_breakpoints (thread))
3742 delete_single_step_breakpoints (thread);
3743 });
3744 }
3745
3746 unstop_all_lwps (0, event_child);
3747 }
3748 }
3749
3750 if (!stabilizing_threads)
3751 {
3752 /* In all-stop, stop all threads. */
3753 if (!non_stop)
3754 stop_all_lwps (0, NULL);
3755
3756 if (step_over_finished)
3757 {
3758 if (!non_stop)
3759 {
3760 /* If we were doing a step-over, all other threads but
3761 the stepping one had been paused in start_step_over,
3762 with their suspend counts incremented. We don't want
3763 to do a full unstop/unpause, because we're in
3764 all-stop mode (so we want threads stopped), but we
3765 still need to unsuspend the other threads, to
3766 decrement their `suspended' count back. */
3767 unsuspend_all_lwps (event_child);
3768 }
3769 else
3770 {
3771 /* If we just finished a step-over, then all threads had
3772 been momentarily paused. In all-stop, that's fine,
3773 we want threads stopped by now anyway. In non-stop,
3774 we need to re-resume threads that GDB wanted to be
3775 running. */
3776 unstop_all_lwps (1, event_child);
3777 }
3778 }
3779
3780 /* If we're not waiting for a specific LWP, choose an event LWP
3781 from among those that have had events. Giving equal priority
3782 to all LWPs that have had events helps prevent
3783 starvation. */
3784 if (ptid_equal (ptid, minus_one_ptid))
3785 {
3786 event_child->status_pending_p = 1;
3787 event_child->status_pending = w;
3788
3789 select_event_lwp (&event_child);
3790
3791 /* current_thread and event_child must stay in sync. */
3792 current_thread = get_lwp_thread (event_child);
3793
3794 event_child->status_pending_p = 0;
3795 w = event_child->status_pending;
3796 }
3797
3798
3799 /* Stabilize threads (move out of jump pads). */
3800 if (!non_stop)
3801 stabilize_threads ();
3802 }
3803 else
3804 {
3805 /* If we just finished a step-over, then all threads had been
3806 momentarily paused. In all-stop, that's fine, we want
3807 threads stopped by now anyway. In non-stop, we need to
3808 re-resume threads that GDB wanted to be running. */
3809 if (step_over_finished)
3810 unstop_all_lwps (1, event_child);
3811 }
3812
3813 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3814 {
3815 /* If the reported event is an exit, fork, vfork or exec, let
3816 GDB know. */
3817
3818 /* Break the unreported fork relationship chain. */
3819 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3820 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3821 {
3822 event_child->fork_relative->fork_relative = NULL;
3823 event_child->fork_relative = NULL;
3824 }
3825
3826 *ourstatus = event_child->waitstatus;
3827 /* Clear the event lwp's waitstatus since we handled it already. */
3828 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3829 }
3830 else
3831 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3832
3833 /* Now that we've selected our final event LWP, un-adjust its PC if
3834 it was a software breakpoint, and the client doesn't know we can
3835 adjust the breakpoint ourselves. */
3836 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3837 && !swbreak_feature)
3838 {
3839 int decr_pc = the_low_target.decr_pc_after_break;
3840
3841 if (decr_pc != 0)
3842 {
3843 struct regcache *regcache
3844 = get_thread_regcache (current_thread, 1);
3845 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3846 }
3847 }
3848
3849 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3850 {
3851 get_syscall_trapinfo (event_child,
3852 &ourstatus->value.syscall_number);
3853 ourstatus->kind = event_child->syscall_state;
3854 }
3855 else if (current_thread->last_resume_kind == resume_stop
3856 && WSTOPSIG (w) == SIGSTOP)
3857 {
3858 /* A thread that has been requested to stop by GDB with vCont;t,
3859 and it stopped cleanly, so report as SIG0. The use of
3860 SIGSTOP is an implementation detail. */
3861 ourstatus->value.sig = GDB_SIGNAL_0;
3862 }
3863 else if (current_thread->last_resume_kind == resume_stop
3864 && WSTOPSIG (w) != SIGSTOP)
3865 {
3866 /* A thread that has been requested to stop by GDB with vCont;t,
3867 but, it stopped for other reasons. */
3868 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3869 }
3870 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3871 {
3872 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3873 }
3874
3875 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3876
3877 if (debug_threads)
3878 {
3879 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3880 target_pid_to_str (ptid_of (current_thread)),
3881 ourstatus->kind, ourstatus->value.sig);
3882 debug_exit ();
3883 }
3884
3885 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3886 return filter_exit_event (event_child, ourstatus);
3887
3888 return ptid_of (current_thread);
3889 }
3890
3891 /* Get rid of any pending event in the pipe. */
3892 static void
3893 async_file_flush (void)
3894 {
3895 int ret;
3896 char buf;
3897
3898 do
3899 ret = read (linux_event_pipe[0], &buf, 1);
3900 while (ret >= 0 || (ret == -1 && errno == EINTR));
3901 }
3902
3903 /* Put something in the pipe, so the event loop wakes up. */
3904 static void
3905 async_file_mark (void)
3906 {
3907 int ret;
3908
3909 async_file_flush ();
3910
3911 do
3912 ret = write (linux_event_pipe[1], "+", 1);
3913 while (ret == 0 || (ret == -1 && errno == EINTR));
3914
3915 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3916 be awakened anyway. */
3917 }
3918
3919 static ptid_t
3920 linux_wait (ptid_t ptid,
3921 struct target_waitstatus *ourstatus, int target_options)
3922 {
3923 ptid_t event_ptid;
3924
3925 /* Flush the async file first. */
3926 if (target_is_async_p ())
3927 async_file_flush ();
3928
3929 do
3930 {
3931 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3932 }
3933 while ((target_options & TARGET_WNOHANG) == 0
3934 && ptid_equal (event_ptid, null_ptid)
3935 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3936
3937 /* If at least one stop was reported, there may be more. A single
3938 SIGCHLD can signal more than one child stop. */
3939 if (target_is_async_p ()
3940 && (target_options & TARGET_WNOHANG) != 0
3941 && !ptid_equal (event_ptid, null_ptid))
3942 async_file_mark ();
3943
3944 return event_ptid;
3945 }
3946
3947 /* Send a signal to an LWP. */
3948
3949 static int
3950 kill_lwp (unsigned long lwpid, int signo)
3951 {
3952 int ret;
3953
3954 errno = 0;
3955 ret = syscall (__NR_tkill, lwpid, signo);
3956 if (errno == ENOSYS)
3957 {
3958 /* If tkill fails, then we are not using nptl threads, a
3959 configuration we no longer support. */
3960 perror_with_name (("tkill"));
3961 }
3962 return ret;
3963 }
3964
3965 void
3966 linux_stop_lwp (struct lwp_info *lwp)
3967 {
3968 send_sigstop (lwp);
3969 }
3970
3971 static void
3972 send_sigstop (struct lwp_info *lwp)
3973 {
3974 int pid;
3975
3976 pid = lwpid_of (get_lwp_thread (lwp));
3977
3978 /* If we already have a pending stop signal for this process, don't
3979 send another. */
3980 if (lwp->stop_expected)
3981 {
3982 if (debug_threads)
3983 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3984
3985 return;
3986 }
3987
3988 if (debug_threads)
3989 debug_printf ("Sending sigstop to lwp %d\n", pid);
3990
3991 lwp->stop_expected = 1;
3992 kill_lwp (pid, SIGSTOP);
3993 }
3994
3995 static int
3996 send_sigstop_callback (thread_info *thread, void *except)
3997 {
3998 struct lwp_info *lwp = get_thread_lwp (thread);
3999
4000 /* Ignore EXCEPT. */
4001 if (lwp == except)
4002 return 0;
4003
4004 if (lwp->stopped)
4005 return 0;
4006
4007 send_sigstop (lwp);
4008 return 0;
4009 }
4010
4011 /* Increment the suspend count of an LWP, and stop it, if not stopped
4012 yet. */
4013 static int
4014 suspend_and_send_sigstop_callback (thread_info *thread, void *except)
4015 {
4016 struct lwp_info *lwp = get_thread_lwp (thread);
4017
4018 /* Ignore EXCEPT. */
4019 if (lwp == except)
4020 return 0;
4021
4022 lwp_suspended_inc (lwp);
4023
4024 return send_sigstop_callback (thread, except);
4025 }
4026
4027 static void
4028 mark_lwp_dead (struct lwp_info *lwp, int wstat)
4029 {
4030 /* Store the exit status for later. */
4031 lwp->status_pending_p = 1;
4032 lwp->status_pending = wstat;
4033
4034 /* Store in waitstatus as well, as there's nothing else to process
4035 for this event. */
4036 if (WIFEXITED (wstat))
4037 {
4038 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
4039 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
4040 }
4041 else if (WIFSIGNALED (wstat))
4042 {
4043 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
4044 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
4045 }
4046
4047 /* Prevent trying to stop it. */
4048 lwp->stopped = 1;
4049
4050 /* No further stops are expected from a dead lwp. */
4051 lwp->stop_expected = 0;
4052 }
4053
4054 /* Return true if LWP has exited already, and has a pending exit event
4055 to report to GDB. */
4056
4057 static int
4058 lwp_is_marked_dead (struct lwp_info *lwp)
4059 {
4060 return (lwp->status_pending_p
4061 && (WIFEXITED (lwp->status_pending)
4062 || WIFSIGNALED (lwp->status_pending)));
4063 }
4064
4065 /* Wait for all children to stop for the SIGSTOPs we just queued. */
4066
4067 static void
4068 wait_for_sigstop (void)
4069 {
4070 struct thread_info *saved_thread;
4071 ptid_t saved_tid;
4072 int wstat;
4073 int ret;
4074
4075 saved_thread = current_thread;
4076 if (saved_thread != NULL)
4077 saved_tid = saved_thread->id;
4078 else
4079 saved_tid = null_ptid; /* avoid bogus unused warning */
4080
4081 if (debug_threads)
4082 debug_printf ("wait_for_sigstop: pulling events\n");
4083
4084 /* Passing NULL_PTID as filter indicates we want all events to be
4085 left pending. Eventually this returns when there are no
4086 unwaited-for children left. */
4087 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4088 &wstat, __WALL);
4089 gdb_assert (ret == -1);
4090
4091 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4092 current_thread = saved_thread;
4093 else
4094 {
4095 if (debug_threads)
4096 debug_printf ("Previously current thread died.\n");
4097
4098 /* We can't change the current inferior behind GDB's back,
4099 otherwise, a subsequent command may apply to the wrong
4100 process. */
4101 current_thread = NULL;
4102 }
4103 }
4104
4105 /* Returns true if THREAD is stopped in a jump pad, and we can't
4106 move it out, because we need to report the stop event to GDB. For
4107 example, if the user puts a breakpoint in the jump pad, it's
4108 because she wants to debug it. */
4109
4110 static bool
4111 stuck_in_jump_pad_callback (thread_info *thread)
4112 {
4113 struct lwp_info *lwp = get_thread_lwp (thread);
4114
4115 if (lwp->suspended != 0)
4116 {
4117 internal_error (__FILE__, __LINE__,
4118 "LWP %ld is suspended, suspended=%d\n",
4119 lwpid_of (thread), lwp->suspended);
4120 }
4121 gdb_assert (lwp->stopped);
4122
4123 /* Allow debugging the jump pad, gdb_collect, etc.. */
4124 return (supports_fast_tracepoints ()
4125 && agent_loaded_p ()
4126 && (gdb_breakpoint_here (lwp->stop_pc)
4127 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4128 || thread->last_resume_kind == resume_step)
4129 && (linux_fast_tracepoint_collecting (lwp, NULL)
4130 != fast_tpoint_collect_result::not_collecting));
4131 }
4132
4133 static void
4134 move_out_of_jump_pad_callback (thread_info *thread)
4135 {
4136 struct thread_info *saved_thread;
4137 struct lwp_info *lwp = get_thread_lwp (thread);
4138 int *wstat;
4139
4140 if (lwp->suspended != 0)
4141 {
4142 internal_error (__FILE__, __LINE__,
4143 "LWP %ld is suspended, suspended=%d\n",
4144 lwpid_of (thread), lwp->suspended);
4145 }
4146 gdb_assert (lwp->stopped);
4147
4148 /* For gdb_breakpoint_here. */
4149 saved_thread = current_thread;
4150 current_thread = thread;
4151
4152 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4153
4154 /* Allow debugging the jump pad, gdb_collect, etc. */
4155 if (!gdb_breakpoint_here (lwp->stop_pc)
4156 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4157 && thread->last_resume_kind != resume_step
4158 && maybe_move_out_of_jump_pad (lwp, wstat))
4159 {
4160 if (debug_threads)
4161 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4162 lwpid_of (thread));
4163
4164 if (wstat)
4165 {
4166 lwp->status_pending_p = 0;
4167 enqueue_one_deferred_signal (lwp, wstat);
4168
4169 if (debug_threads)
4170 debug_printf ("Signal %d for LWP %ld deferred "
4171 "(in jump pad)\n",
4172 WSTOPSIG (*wstat), lwpid_of (thread));
4173 }
4174
4175 linux_resume_one_lwp (lwp, 0, 0, NULL);
4176 }
4177 else
4178 lwp_suspended_inc (lwp);
4179
4180 current_thread = saved_thread;
4181 }
4182
4183 static int
4184 lwp_running (thread_info *thread, void *data)
4185 {
4186 struct lwp_info *lwp = get_thread_lwp (thread);
4187
4188 if (lwp_is_marked_dead (lwp))
4189 return 0;
4190 if (lwp->stopped)
4191 return 0;
4192 return 1;
4193 }
4194
4195 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4196 If SUSPEND, then also increase the suspend count of every LWP,
4197 except EXCEPT. */
4198
4199 static void
4200 stop_all_lwps (int suspend, struct lwp_info *except)
4201 {
4202 /* Should not be called recursively. */
4203 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4204
4205 if (debug_threads)
4206 {
4207 debug_enter ();
4208 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4209 suspend ? "stop-and-suspend" : "stop",
4210 except != NULL
4211 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4212 : "none");
4213 }
4214
4215 stopping_threads = (suspend
4216 ? STOPPING_AND_SUSPENDING_THREADS
4217 : STOPPING_THREADS);
4218
4219 if (suspend)
4220 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4221 else
4222 find_inferior (&all_threads, send_sigstop_callback, except);
4223 wait_for_sigstop ();
4224 stopping_threads = NOT_STOPPING_THREADS;
4225
4226 if (debug_threads)
4227 {
4228 debug_printf ("stop_all_lwps done, setting stopping_threads "
4229 "back to !stopping\n");
4230 debug_exit ();
4231 }
4232 }
4233
4234 /* Enqueue one signal in the chain of signals which need to be
4235 delivered to this process on next resume. */
4236
4237 static void
4238 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4239 {
4240 struct pending_signals *p_sig = XNEW (struct pending_signals);
4241
4242 p_sig->prev = lwp->pending_signals;
4243 p_sig->signal = signal;
4244 if (info == NULL)
4245 memset (&p_sig->info, 0, sizeof (siginfo_t));
4246 else
4247 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4248 lwp->pending_signals = p_sig;
4249 }
4250
4251 /* Install breakpoints for software single stepping. */
4252
4253 static void
4254 install_software_single_step_breakpoints (struct lwp_info *lwp)
4255 {
4256 struct thread_info *thread = get_lwp_thread (lwp);
4257 struct regcache *regcache = get_thread_regcache (thread, 1);
4258 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
4259
4260 current_thread = thread;
4261 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
4262
4263 for (CORE_ADDR pc : next_pcs)
4264 set_single_step_breakpoint (pc, current_ptid);
4265
4266 do_cleanups (old_chain);
4267 }
4268
4269 /* Single step via hardware or software single step.
4270 Return 1 if hardware single stepping, 0 if software single stepping
4271 or can't single step. */
4272
4273 static int
4274 single_step (struct lwp_info* lwp)
4275 {
4276 int step = 0;
4277
4278 if (can_hardware_single_step ())
4279 {
4280 step = 1;
4281 }
4282 else if (can_software_single_step ())
4283 {
4284 install_software_single_step_breakpoints (lwp);
4285 step = 0;
4286 }
4287 else
4288 {
4289 if (debug_threads)
4290 debug_printf ("stepping is not implemented on this target");
4291 }
4292
4293 return step;
4294 }
4295
4296 /* The signal can be delivered to the inferior if we are not trying to
4297 finish a fast tracepoint collect. Since signal can be delivered in
4298 the step-over, the program may go to signal handler and trap again
4299 after return from the signal handler. We can live with the spurious
4300 double traps. */
4301
4302 static int
4303 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4304 {
4305 return (lwp->collecting_fast_tracepoint
4306 == fast_tpoint_collect_result::not_collecting);
4307 }
4308
4309 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4310 SIGNAL is nonzero, give it that signal. */
4311
4312 static void
4313 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4314 int step, int signal, siginfo_t *info)
4315 {
4316 struct thread_info *thread = get_lwp_thread (lwp);
4317 struct thread_info *saved_thread;
4318 int ptrace_request;
4319 struct process_info *proc = get_thread_process (thread);
4320
4321 /* Note that target description may not be initialised
4322 (proc->tdesc == NULL) at this point because the program hasn't
4323 stopped at the first instruction yet. It means GDBserver skips
4324 the extra traps from the wrapper program (see option --wrapper).
4325 Code in this function that requires register access should be
4326 guarded by proc->tdesc == NULL or something else. */
4327
4328 if (lwp->stopped == 0)
4329 return;
4330
4331 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4332
4333 fast_tpoint_collect_result fast_tp_collecting
4334 = lwp->collecting_fast_tracepoint;
4335
4336 gdb_assert (!stabilizing_threads
4337 || (fast_tp_collecting
4338 != fast_tpoint_collect_result::not_collecting));
4339
4340 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4341 user used the "jump" command, or "set $pc = foo"). */
4342 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4343 {
4344 /* Collecting 'while-stepping' actions doesn't make sense
4345 anymore. */
4346 release_while_stepping_state_list (thread);
4347 }
4348
4349 /* If we have pending signals or status, and a new signal, enqueue the
4350 signal. Also enqueue the signal if it can't be delivered to the
4351 inferior right now. */
4352 if (signal != 0
4353 && (lwp->status_pending_p
4354 || lwp->pending_signals != NULL
4355 || !lwp_signal_can_be_delivered (lwp)))
4356 {
4357 enqueue_pending_signal (lwp, signal, info);
4358
4359 /* Postpone any pending signal. It was enqueued above. */
4360 signal = 0;
4361 }
4362
4363 if (lwp->status_pending_p)
4364 {
4365 if (debug_threads)
4366 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4367 " has pending status\n",
4368 lwpid_of (thread), step ? "step" : "continue",
4369 lwp->stop_expected ? "expected" : "not expected");
4370 return;
4371 }
4372
4373 saved_thread = current_thread;
4374 current_thread = thread;
4375
4376 /* This bit needs some thinking about. If we get a signal that
4377 we must report while a single-step reinsert is still pending,
4378 we often end up resuming the thread. It might be better to
4379 (ew) allow a stack of pending events; then we could be sure that
4380 the reinsert happened right away and not lose any signals.
4381
4382 Making this stack would also shrink the window in which breakpoints are
4383 uninserted (see comment in linux_wait_for_lwp) but not enough for
4384 complete correctness, so it won't solve that problem. It may be
4385 worthwhile just to solve this one, however. */
4386 if (lwp->bp_reinsert != 0)
4387 {
4388 if (debug_threads)
4389 debug_printf (" pending reinsert at 0x%s\n",
4390 paddress (lwp->bp_reinsert));
4391
4392 if (can_hardware_single_step ())
4393 {
4394 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4395 {
4396 if (step == 0)
4397 warning ("BAD - reinserting but not stepping.");
4398 if (lwp->suspended)
4399 warning ("BAD - reinserting and suspended(%d).",
4400 lwp->suspended);
4401 }
4402 }
4403
4404 step = maybe_hw_step (thread);
4405 }
4406
4407 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4408 {
4409 if (debug_threads)
4410 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4411 " (exit-jump-pad-bkpt)\n",
4412 lwpid_of (thread));
4413 }
4414 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4415 {
4416 if (debug_threads)
4417 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4418 " single-stepping\n",
4419 lwpid_of (thread));
4420
4421 if (can_hardware_single_step ())
4422 step = 1;
4423 else
4424 {
4425 internal_error (__FILE__, __LINE__,
4426 "moving out of jump pad single-stepping"
4427 " not implemented on this target");
4428 }
4429 }
4430
4431 /* If we have while-stepping actions in this thread set it stepping.
4432 If we have a signal to deliver, it may or may not be set to
4433 SIG_IGN, we don't know. Assume so, and allow collecting
4434 while-stepping into a signal handler. A possible smart thing to
4435 do would be to set an internal breakpoint at the signal return
4436 address, continue, and carry on catching this while-stepping
4437 action only when that breakpoint is hit. A future
4438 enhancement. */
4439 if (thread->while_stepping != NULL)
4440 {
4441 if (debug_threads)
4442 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4443 lwpid_of (thread));
4444
4445 step = single_step (lwp);
4446 }
4447
4448 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4449 {
4450 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4451
4452 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4453
4454 if (debug_threads)
4455 {
4456 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4457 (long) lwp->stop_pc);
4458 }
4459 }
4460
4461 /* If we have pending signals, consume one if it can be delivered to
4462 the inferior. */
4463 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4464 {
4465 struct pending_signals **p_sig;
4466
4467 p_sig = &lwp->pending_signals;
4468 while ((*p_sig)->prev != NULL)
4469 p_sig = &(*p_sig)->prev;
4470
4471 signal = (*p_sig)->signal;
4472 if ((*p_sig)->info.si_signo != 0)
4473 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4474 &(*p_sig)->info);
4475
4476 free (*p_sig);
4477 *p_sig = NULL;
4478 }
4479
4480 if (debug_threads)
4481 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4482 lwpid_of (thread), step ? "step" : "continue", signal,
4483 lwp->stop_expected ? "expected" : "not expected");
4484
4485 if (the_low_target.prepare_to_resume != NULL)
4486 the_low_target.prepare_to_resume (lwp);
4487
4488 regcache_invalidate_thread (thread);
4489 errno = 0;
4490 lwp->stepping = step;
4491 if (step)
4492 ptrace_request = PTRACE_SINGLESTEP;
4493 else if (gdb_catching_syscalls_p (lwp))
4494 ptrace_request = PTRACE_SYSCALL;
4495 else
4496 ptrace_request = PTRACE_CONT;
4497 ptrace (ptrace_request,
4498 lwpid_of (thread),
4499 (PTRACE_TYPE_ARG3) 0,
4500 /* Coerce to a uintptr_t first to avoid potential gcc warning
4501 of coercing an 8 byte integer to a 4 byte pointer. */
4502 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4503
4504 current_thread = saved_thread;
4505 if (errno)
4506 perror_with_name ("resuming thread");
4507
4508 /* Successfully resumed. Clear state that no longer makes sense,
4509 and mark the LWP as running. Must not do this before resuming
4510 otherwise if that fails other code will be confused. E.g., we'd
4511 later try to stop the LWP and hang forever waiting for a stop
4512 status. Note that we must not throw after this is cleared,
4513 otherwise handle_zombie_lwp_error would get confused. */
4514 lwp->stopped = 0;
4515 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4516 }
4517
4518 /* Called when we try to resume a stopped LWP and that errors out. If
4519 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4520 or about to become), discard the error, clear any pending status
4521 the LWP may have, and return true (we'll collect the exit status
4522 soon enough). Otherwise, return false. */
4523
4524 static int
4525 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4526 {
4527 struct thread_info *thread = get_lwp_thread (lp);
4528
4529 /* If we get an error after resuming the LWP successfully, we'd
4530 confuse !T state for the LWP being gone. */
4531 gdb_assert (lp->stopped);
4532
4533 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4534 because even if ptrace failed with ESRCH, the tracee may be "not
4535 yet fully dead", but already refusing ptrace requests. In that
4536 case the tracee has 'R (Running)' state for a little bit
4537 (observed in Linux 3.18). See also the note on ESRCH in the
4538 ptrace(2) man page. Instead, check whether the LWP has any state
4539 other than ptrace-stopped. */
4540
4541 /* Don't assume anything if /proc/PID/status can't be read. */
4542 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4543 {
4544 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4545 lp->status_pending_p = 0;
4546 return 1;
4547 }
4548 return 0;
4549 }
4550
4551 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4552 disappears while we try to resume it. */
4553
4554 static void
4555 linux_resume_one_lwp (struct lwp_info *lwp,
4556 int step, int signal, siginfo_t *info)
4557 {
4558 TRY
4559 {
4560 linux_resume_one_lwp_throw (lwp, step, signal, info);
4561 }
4562 CATCH (ex, RETURN_MASK_ERROR)
4563 {
4564 if (!check_ptrace_stopped_lwp_gone (lwp))
4565 throw_exception (ex);
4566 }
4567 END_CATCH
4568 }
4569
4570 /* This function is called once per thread via for_each_thread.
4571 We look up which resume request applies to THREAD and mark it with a
4572 pointer to the appropriate resume request.
4573
4574 This algorithm is O(threads * resume elements), but resume elements
4575 is small (and will remain small at least until GDB supports thread
4576 suspension). */
4577
4578 static void
4579 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4580 {
4581 struct lwp_info *lwp = get_thread_lwp (thread);
4582
4583 for (int ndx = 0; ndx < n; ndx++)
4584 {
4585 ptid_t ptid = resume[ndx].thread;
4586 if (ptid_equal (ptid, minus_one_ptid)
4587 || ptid == thread->id
4588 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4589 of PID'. */
4590 || (ptid_get_pid (ptid) == pid_of (thread)
4591 && (ptid_is_pid (ptid)
4592 || ptid_get_lwp (ptid) == -1)))
4593 {
4594 if (resume[ndx].kind == resume_stop
4595 && thread->last_resume_kind == resume_stop)
4596 {
4597 if (debug_threads)
4598 debug_printf ("already %s LWP %ld at GDB's request\n",
4599 (thread->last_status.kind
4600 == TARGET_WAITKIND_STOPPED)
4601 ? "stopped"
4602 : "stopping",
4603 lwpid_of (thread));
4604
4605 continue;
4606 }
4607
4608 /* Ignore (wildcard) resume requests for already-resumed
4609 threads. */
4610 if (resume[ndx].kind != resume_stop
4611 && thread->last_resume_kind != resume_stop)
4612 {
4613 if (debug_threads)
4614 debug_printf ("already %s LWP %ld at GDB's request\n",
4615 (thread->last_resume_kind
4616 == resume_step)
4617 ? "stepping"
4618 : "continuing",
4619 lwpid_of (thread));
4620 continue;
4621 }
4622
4623 /* Don't let wildcard resumes resume fork children that GDB
4624 does not yet know are new fork children. */
4625 if (lwp->fork_relative != NULL)
4626 {
4627 struct lwp_info *rel = lwp->fork_relative;
4628
4629 if (rel->status_pending_p
4630 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4631 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4632 {
4633 if (debug_threads)
4634 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4635 lwpid_of (thread));
4636 continue;
4637 }
4638 }
4639
4640 /* If the thread has a pending event that has already been
4641 reported to GDBserver core, but GDB has not pulled the
4642 event out of the vStopped queue yet, likewise, ignore the
4643 (wildcard) resume request. */
4644 if (in_queued_stop_replies (thread->id))
4645 {
4646 if (debug_threads)
4647 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4648 lwpid_of (thread));
4649 continue;
4650 }
4651
4652 lwp->resume = &resume[ndx];
4653 thread->last_resume_kind = lwp->resume->kind;
4654
4655 lwp->step_range_start = lwp->resume->step_range_start;
4656 lwp->step_range_end = lwp->resume->step_range_end;
4657
4658 /* If we had a deferred signal to report, dequeue one now.
4659 This can happen if LWP gets more than one signal while
4660 trying to get out of a jump pad. */
4661 if (lwp->stopped
4662 && !lwp->status_pending_p
4663 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4664 {
4665 lwp->status_pending_p = 1;
4666
4667 if (debug_threads)
4668 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4669 "leaving status pending.\n",
4670 WSTOPSIG (lwp->status_pending),
4671 lwpid_of (thread));
4672 }
4673
4674 return;
4675 }
4676 }
4677
4678 /* No resume action for this thread. */
4679 lwp->resume = NULL;
4680 }
4681
4682 /* find_inferior callback for linux_resume.
4683 Set *FLAG_P if this lwp has an interesting status pending. */
4684
4685 static bool
4686 resume_status_pending_p (thread_info *thread)
4687 {
4688 struct lwp_info *lwp = get_thread_lwp (thread);
4689
4690 /* LWPs which will not be resumed are not interesting, because
4691 we might not wait for them next time through linux_wait. */
4692 if (lwp->resume == NULL)
4693 return false;
4694
4695 return thread_still_has_status_pending_p (thread);
4696 }
4697
4698 /* Return 1 if this lwp that GDB wants running is stopped at an
4699 internal breakpoint that we need to step over. It assumes that any
4700 required STOP_PC adjustment has already been propagated to the
4701 inferior's regcache. */
4702
4703 static bool
4704 need_step_over_p (thread_info *thread)
4705 {
4706 struct lwp_info *lwp = get_thread_lwp (thread);
4707 struct thread_info *saved_thread;
4708 CORE_ADDR pc;
4709 struct process_info *proc = get_thread_process (thread);
4710
4711 /* GDBserver is skipping the extra traps from the wrapper program,
4712 don't have to do step over. */
4713 if (proc->tdesc == NULL)
4714 return false;
4715
4716 /* LWPs which will not be resumed are not interesting, because we
4717 might not wait for them next time through linux_wait. */
4718
4719 if (!lwp->stopped)
4720 {
4721 if (debug_threads)
4722 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4723 lwpid_of (thread));
4724 return false;
4725 }
4726
4727 if (thread->last_resume_kind == resume_stop)
4728 {
4729 if (debug_threads)
4730 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4731 " stopped\n",
4732 lwpid_of (thread));
4733 return false;
4734 }
4735
4736 gdb_assert (lwp->suspended >= 0);
4737
4738 if (lwp->suspended)
4739 {
4740 if (debug_threads)
4741 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4742 lwpid_of (thread));
4743 return false;
4744 }
4745
4746 if (lwp->status_pending_p)
4747 {
4748 if (debug_threads)
4749 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4750 " status.\n",
4751 lwpid_of (thread));
4752 return false;
4753 }
4754
4755 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4756 or we have. */
4757 pc = get_pc (lwp);
4758
4759 /* If the PC has changed since we stopped, then don't do anything,
4760 and let the breakpoint/tracepoint be hit. This happens if, for
4761 instance, GDB handled the decr_pc_after_break subtraction itself,
4762 GDB is OOL stepping this thread, or the user has issued a "jump"
4763 command, or poked thread's registers herself. */
4764 if (pc != lwp->stop_pc)
4765 {
4766 if (debug_threads)
4767 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4768 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4769 lwpid_of (thread),
4770 paddress (lwp->stop_pc), paddress (pc));
4771 return false;
4772 }
4773
4774 /* On software single step target, resume the inferior with signal
4775 rather than stepping over. */
4776 if (can_software_single_step ()
4777 && lwp->pending_signals != NULL
4778 && lwp_signal_can_be_delivered (lwp))
4779 {
4780 if (debug_threads)
4781 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4782 " signals.\n",
4783 lwpid_of (thread));
4784
4785 return false;
4786 }
4787
4788 saved_thread = current_thread;
4789 current_thread = thread;
4790
4791 /* We can only step over breakpoints we know about. */
4792 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4793 {
4794 /* Don't step over a breakpoint that GDB expects to hit
4795 though. If the condition is being evaluated on the target's side
4796 and it evaluate to false, step over this breakpoint as well. */
4797 if (gdb_breakpoint_here (pc)
4798 && gdb_condition_true_at_breakpoint (pc)
4799 && gdb_no_commands_at_breakpoint (pc))
4800 {
4801 if (debug_threads)
4802 debug_printf ("Need step over [LWP %ld]? yes, but found"
4803 " GDB breakpoint at 0x%s; skipping step over\n",
4804 lwpid_of (thread), paddress (pc));
4805
4806 current_thread = saved_thread;
4807 return false;
4808 }
4809 else
4810 {
4811 if (debug_threads)
4812 debug_printf ("Need step over [LWP %ld]? yes, "
4813 "found breakpoint at 0x%s\n",
4814 lwpid_of (thread), paddress (pc));
4815
4816 /* We've found an lwp that needs stepping over --- return 1 so
4817 that find_inferior stops looking. */
4818 current_thread = saved_thread;
4819
4820 return true;
4821 }
4822 }
4823
4824 current_thread = saved_thread;
4825
4826 if (debug_threads)
4827 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4828 " at 0x%s\n",
4829 lwpid_of (thread), paddress (pc));
4830
4831 return false;
4832 }
4833
4834 /* Start a step-over operation on LWP. When LWP stopped at a
4835 breakpoint, to make progress, we need to remove the breakpoint out
4836 of the way. If we let other threads run while we do that, they may
4837 pass by the breakpoint location and miss hitting it. To avoid
4838 that, a step-over momentarily stops all threads while LWP is
4839 single-stepped by either hardware or software while the breakpoint
4840 is temporarily uninserted from the inferior. When the single-step
4841 finishes, we reinsert the breakpoint, and let all threads that are
4842 supposed to be running, run again. */
4843
4844 static int
4845 start_step_over (struct lwp_info *lwp)
4846 {
4847 struct thread_info *thread = get_lwp_thread (lwp);
4848 struct thread_info *saved_thread;
4849 CORE_ADDR pc;
4850 int step;
4851
4852 if (debug_threads)
4853 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4854 lwpid_of (thread));
4855
4856 stop_all_lwps (1, lwp);
4857
4858 if (lwp->suspended != 0)
4859 {
4860 internal_error (__FILE__, __LINE__,
4861 "LWP %ld suspended=%d\n", lwpid_of (thread),
4862 lwp->suspended);
4863 }
4864
4865 if (debug_threads)
4866 debug_printf ("Done stopping all threads for step-over.\n");
4867
4868 /* Note, we should always reach here with an already adjusted PC,
4869 either by GDB (if we're resuming due to GDB's request), or by our
4870 caller, if we just finished handling an internal breakpoint GDB
4871 shouldn't care about. */
4872 pc = get_pc (lwp);
4873
4874 saved_thread = current_thread;
4875 current_thread = thread;
4876
4877 lwp->bp_reinsert = pc;
4878 uninsert_breakpoints_at (pc);
4879 uninsert_fast_tracepoint_jumps_at (pc);
4880
4881 step = single_step (lwp);
4882
4883 current_thread = saved_thread;
4884
4885 linux_resume_one_lwp (lwp, step, 0, NULL);
4886
4887 /* Require next event from this LWP. */
4888 step_over_bkpt = thread->id;
4889 return 1;
4890 }
4891
4892 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4893 start_step_over, if still there, and delete any single-step
4894 breakpoints we've set, on non hardware single-step targets. */
4895
4896 static int
4897 finish_step_over (struct lwp_info *lwp)
4898 {
4899 if (lwp->bp_reinsert != 0)
4900 {
4901 struct thread_info *saved_thread = current_thread;
4902
4903 if (debug_threads)
4904 debug_printf ("Finished step over.\n");
4905
4906 current_thread = get_lwp_thread (lwp);
4907
4908 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4909 may be no breakpoint to reinsert there by now. */
4910 reinsert_breakpoints_at (lwp->bp_reinsert);
4911 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4912
4913 lwp->bp_reinsert = 0;
4914
4915 /* Delete any single-step breakpoints. No longer needed. We
4916 don't have to worry about other threads hitting this trap,
4917 and later not being able to explain it, because we were
4918 stepping over a breakpoint, and we hold all threads but
4919 LWP stopped while doing that. */
4920 if (!can_hardware_single_step ())
4921 {
4922 gdb_assert (has_single_step_breakpoints (current_thread));
4923 delete_single_step_breakpoints (current_thread);
4924 }
4925
4926 step_over_bkpt = null_ptid;
4927 current_thread = saved_thread;
4928 return 1;
4929 }
4930 else
4931 return 0;
4932 }
4933
4934 /* If there's a step over in progress, wait until all threads stop
4935 (that is, until the stepping thread finishes its step), and
4936 unsuspend all lwps. The stepping thread ends with its status
4937 pending, which is processed later when we get back to processing
4938 events. */
4939
4940 static void
4941 complete_ongoing_step_over (void)
4942 {
4943 if (!ptid_equal (step_over_bkpt, null_ptid))
4944 {
4945 struct lwp_info *lwp;
4946 int wstat;
4947 int ret;
4948
4949 if (debug_threads)
4950 debug_printf ("detach: step over in progress, finish it first\n");
4951
4952 /* Passing NULL_PTID as filter indicates we want all events to
4953 be left pending. Eventually this returns when there are no
4954 unwaited-for children left. */
4955 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4956 &wstat, __WALL);
4957 gdb_assert (ret == -1);
4958
4959 lwp = find_lwp_pid (step_over_bkpt);
4960 if (lwp != NULL)
4961 finish_step_over (lwp);
4962 step_over_bkpt = null_ptid;
4963 unsuspend_all_lwps (lwp);
4964 }
4965 }
4966
4967 /* This function is called once per thread. We check the thread's resume
4968 request, which will tell us whether to resume, step, or leave the thread
4969 stopped; and what signal, if any, it should be sent.
4970
4971 For threads which we aren't explicitly told otherwise, we preserve
4972 the stepping flag; this is used for stepping over gdbserver-placed
4973 breakpoints.
4974
4975 If pending_flags was set in any thread, we queue any needed
4976 signals, since we won't actually resume. We already have a pending
4977 event to report, so we don't need to preserve any step requests;
4978 they should be re-issued if necessary. */
4979
4980 static int
4981 linux_resume_one_thread (thread_info *thread, void *arg)
4982 {
4983 struct lwp_info *lwp = get_thread_lwp (thread);
4984 int leave_all_stopped = * (int *) arg;
4985 int leave_pending;
4986
4987 if (lwp->resume == NULL)
4988 return 0;
4989
4990 if (lwp->resume->kind == resume_stop)
4991 {
4992 if (debug_threads)
4993 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4994
4995 if (!lwp->stopped)
4996 {
4997 if (debug_threads)
4998 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4999
5000 /* Stop the thread, and wait for the event asynchronously,
5001 through the event loop. */
5002 send_sigstop (lwp);
5003 }
5004 else
5005 {
5006 if (debug_threads)
5007 debug_printf ("already stopped LWP %ld\n",
5008 lwpid_of (thread));
5009
5010 /* The LWP may have been stopped in an internal event that
5011 was not meant to be notified back to GDB (e.g., gdbserver
5012 breakpoint), so we should be reporting a stop event in
5013 this case too. */
5014
5015 /* If the thread already has a pending SIGSTOP, this is a
5016 no-op. Otherwise, something later will presumably resume
5017 the thread and this will cause it to cancel any pending
5018 operation, due to last_resume_kind == resume_stop. If
5019 the thread already has a pending status to report, we
5020 will still report it the next time we wait - see
5021 status_pending_p_callback. */
5022
5023 /* If we already have a pending signal to report, then
5024 there's no need to queue a SIGSTOP, as this means we're
5025 midway through moving the LWP out of the jumppad, and we
5026 will report the pending signal as soon as that is
5027 finished. */
5028 if (lwp->pending_signals_to_report == NULL)
5029 send_sigstop (lwp);
5030 }
5031
5032 /* For stop requests, we're done. */
5033 lwp->resume = NULL;
5034 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5035 return 0;
5036 }
5037
5038 /* If this thread which is about to be resumed has a pending status,
5039 then don't resume it - we can just report the pending status.
5040 Likewise if it is suspended, because e.g., another thread is
5041 stepping past a breakpoint. Make sure to queue any signals that
5042 would otherwise be sent. In all-stop mode, we do this decision
5043 based on if *any* thread has a pending status. If there's a
5044 thread that needs the step-over-breakpoint dance, then don't
5045 resume any other thread but that particular one. */
5046 leave_pending = (lwp->suspended
5047 || lwp->status_pending_p
5048 || leave_all_stopped);
5049
5050 /* If we have a new signal, enqueue the signal. */
5051 if (lwp->resume->sig != 0)
5052 {
5053 siginfo_t info, *info_p;
5054
5055 /* If this is the same signal we were previously stopped by,
5056 make sure to queue its siginfo. */
5057 if (WIFSTOPPED (lwp->last_status)
5058 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
5059 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
5060 (PTRACE_TYPE_ARG3) 0, &info) == 0)
5061 info_p = &info;
5062 else
5063 info_p = NULL;
5064
5065 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
5066 }
5067
5068 if (!leave_pending)
5069 {
5070 if (debug_threads)
5071 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5072
5073 proceed_one_lwp (thread, NULL);
5074 }
5075 else
5076 {
5077 if (debug_threads)
5078 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5079 }
5080
5081 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5082 lwp->resume = NULL;
5083 return 0;
5084 }
5085
5086 static void
5087 linux_resume (struct thread_resume *resume_info, size_t n)
5088 {
5089 struct thread_info *need_step_over = NULL;
5090 int leave_all_stopped;
5091
5092 if (debug_threads)
5093 {
5094 debug_enter ();
5095 debug_printf ("linux_resume:\n");
5096 }
5097
5098 for_each_thread ([&] (thread_info *thread)
5099 {
5100 linux_set_resume_request (thread, resume_info, n);
5101 });
5102
5103 /* If there is a thread which would otherwise be resumed, which has
5104 a pending status, then don't resume any threads - we can just
5105 report the pending status. Make sure to queue any signals that
5106 would otherwise be sent. In non-stop mode, we'll apply this
5107 logic to each thread individually. We consume all pending events
5108 before considering to start a step-over (in all-stop). */
5109 bool any_pending = false;
5110 if (!non_stop)
5111 any_pending = find_thread (resume_status_pending_p) != NULL;
5112
5113 /* If there is a thread which would otherwise be resumed, which is
5114 stopped at a breakpoint that needs stepping over, then don't
5115 resume any threads - have it step over the breakpoint with all
5116 other threads stopped, then resume all threads again. Make sure
5117 to queue any signals that would otherwise be delivered or
5118 queued. */
5119 if (!any_pending && supports_breakpoints ())
5120 need_step_over = find_thread (need_step_over_p);
5121
5122 leave_all_stopped = (need_step_over != NULL || any_pending);
5123
5124 if (debug_threads)
5125 {
5126 if (need_step_over != NULL)
5127 debug_printf ("Not resuming all, need step over\n");
5128 else if (any_pending)
5129 debug_printf ("Not resuming, all-stop and found "
5130 "an LWP with pending status\n");
5131 else
5132 debug_printf ("Resuming, no pending status or step over needed\n");
5133 }
5134
5135 /* Even if we're leaving threads stopped, queue all signals we'd
5136 otherwise deliver. */
5137 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
5138
5139 if (need_step_over)
5140 start_step_over (get_thread_lwp (need_step_over));
5141
5142 if (debug_threads)
5143 {
5144 debug_printf ("linux_resume done\n");
5145 debug_exit ();
5146 }
5147
5148 /* We may have events that were pending that can/should be sent to
5149 the client now. Trigger a linux_wait call. */
5150 if (target_is_async_p ())
5151 async_file_mark ();
5152 }
5153
5154 /* This function is called once per thread. We check the thread's
5155 last resume request, which will tell us whether to resume, step, or
5156 leave the thread stopped. Any signal the client requested to be
5157 delivered has already been enqueued at this point.
5158
5159 If any thread that GDB wants running is stopped at an internal
5160 breakpoint that needs stepping over, we start a step-over operation
5161 on that particular thread, and leave all others stopped. */
5162
5163 static int
5164 proceed_one_lwp (thread_info *thread, void *except)
5165 {
5166 struct lwp_info *lwp = get_thread_lwp (thread);
5167 int step;
5168
5169 if (lwp == except)
5170 return 0;
5171
5172 if (debug_threads)
5173 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5174
5175 if (!lwp->stopped)
5176 {
5177 if (debug_threads)
5178 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5179 return 0;
5180 }
5181
5182 if (thread->last_resume_kind == resume_stop
5183 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5184 {
5185 if (debug_threads)
5186 debug_printf (" client wants LWP to remain %ld stopped\n",
5187 lwpid_of (thread));
5188 return 0;
5189 }
5190
5191 if (lwp->status_pending_p)
5192 {
5193 if (debug_threads)
5194 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5195 lwpid_of (thread));
5196 return 0;
5197 }
5198
5199 gdb_assert (lwp->suspended >= 0);
5200
5201 if (lwp->suspended)
5202 {
5203 if (debug_threads)
5204 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5205 return 0;
5206 }
5207
5208 if (thread->last_resume_kind == resume_stop
5209 && lwp->pending_signals_to_report == NULL
5210 && (lwp->collecting_fast_tracepoint
5211 == fast_tpoint_collect_result::not_collecting))
5212 {
5213 /* We haven't reported this LWP as stopped yet (otherwise, the
5214 last_status.kind check above would catch it, and we wouldn't
5215 reach here. This LWP may have been momentarily paused by a
5216 stop_all_lwps call while handling for example, another LWP's
5217 step-over. In that case, the pending expected SIGSTOP signal
5218 that was queued at vCont;t handling time will have already
5219 been consumed by wait_for_sigstop, and so we need to requeue
5220 another one here. Note that if the LWP already has a SIGSTOP
5221 pending, this is a no-op. */
5222
5223 if (debug_threads)
5224 debug_printf ("Client wants LWP %ld to stop. "
5225 "Making sure it has a SIGSTOP pending\n",
5226 lwpid_of (thread));
5227
5228 send_sigstop (lwp);
5229 }
5230
5231 if (thread->last_resume_kind == resume_step)
5232 {
5233 if (debug_threads)
5234 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5235 lwpid_of (thread));
5236
5237 /* If resume_step is requested by GDB, install single-step
5238 breakpoints when the thread is about to be actually resumed if
5239 the single-step breakpoints weren't removed. */
5240 if (can_software_single_step ()
5241 && !has_single_step_breakpoints (thread))
5242 install_software_single_step_breakpoints (lwp);
5243
5244 step = maybe_hw_step (thread);
5245 }
5246 else if (lwp->bp_reinsert != 0)
5247 {
5248 if (debug_threads)
5249 debug_printf (" stepping LWP %ld, reinsert set\n",
5250 lwpid_of (thread));
5251
5252 step = maybe_hw_step (thread);
5253 }
5254 else
5255 step = 0;
5256
5257 linux_resume_one_lwp (lwp, step, 0, NULL);
5258 return 0;
5259 }
5260
5261 static int
5262 unsuspend_and_proceed_one_lwp (thread_info *thread, void *except)
5263 {
5264 struct lwp_info *lwp = get_thread_lwp (thread);
5265
5266 if (lwp == except)
5267 return 0;
5268
5269 lwp_suspended_decr (lwp);
5270
5271 return proceed_one_lwp (thread, except);
5272 }
5273
5274 /* When we finish a step-over, set threads running again. If there's
5275 another thread that may need a step-over, now's the time to start
5276 it. Eventually, we'll move all threads past their breakpoints. */
5277
5278 static void
5279 proceed_all_lwps (void)
5280 {
5281 struct thread_info *need_step_over;
5282
5283 /* If there is a thread which would otherwise be resumed, which is
5284 stopped at a breakpoint that needs stepping over, then don't
5285 resume any threads - have it step over the breakpoint with all
5286 other threads stopped, then resume all threads again. */
5287
5288 if (supports_breakpoints ())
5289 {
5290 need_step_over = find_thread (need_step_over_p);
5291
5292 if (need_step_over != NULL)
5293 {
5294 if (debug_threads)
5295 debug_printf ("proceed_all_lwps: found "
5296 "thread %ld needing a step-over\n",
5297 lwpid_of (need_step_over));
5298
5299 start_step_over (get_thread_lwp (need_step_over));
5300 return;
5301 }
5302 }
5303
5304 if (debug_threads)
5305 debug_printf ("Proceeding, no step-over needed\n");
5306
5307 find_inferior (&all_threads, proceed_one_lwp, NULL);
5308 }
5309
5310 /* Stopped LWPs that the client wanted to be running, that don't have
5311 pending statuses, are set to run again, except for EXCEPT, if not
5312 NULL. This undoes a stop_all_lwps call. */
5313
5314 static void
5315 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5316 {
5317 if (debug_threads)
5318 {
5319 debug_enter ();
5320 if (except)
5321 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5322 lwpid_of (get_lwp_thread (except)));
5323 else
5324 debug_printf ("unstopping all lwps\n");
5325 }
5326
5327 if (unsuspend)
5328 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5329 else
5330 find_inferior (&all_threads, proceed_one_lwp, except);
5331
5332 if (debug_threads)
5333 {
5334 debug_printf ("unstop_all_lwps done\n");
5335 debug_exit ();
5336 }
5337 }
5338
5339
5340 #ifdef HAVE_LINUX_REGSETS
5341
5342 #define use_linux_regsets 1
5343
5344 /* Returns true if REGSET has been disabled. */
5345
5346 static int
5347 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5348 {
5349 return (info->disabled_regsets != NULL
5350 && info->disabled_regsets[regset - info->regsets]);
5351 }
5352
5353 /* Disable REGSET. */
5354
5355 static void
5356 disable_regset (struct regsets_info *info, struct regset_info *regset)
5357 {
5358 int dr_offset;
5359
5360 dr_offset = regset - info->regsets;
5361 if (info->disabled_regsets == NULL)
5362 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5363 info->disabled_regsets[dr_offset] = 1;
5364 }
5365
5366 static int
5367 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5368 struct regcache *regcache)
5369 {
5370 struct regset_info *regset;
5371 int saw_general_regs = 0;
5372 int pid;
5373 struct iovec iov;
5374
5375 pid = lwpid_of (current_thread);
5376 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5377 {
5378 void *buf, *data;
5379 int nt_type, res;
5380
5381 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5382 continue;
5383
5384 buf = xmalloc (regset->size);
5385
5386 nt_type = regset->nt_type;
5387 if (nt_type)
5388 {
5389 iov.iov_base = buf;
5390 iov.iov_len = regset->size;
5391 data = (void *) &iov;
5392 }
5393 else
5394 data = buf;
5395
5396 #ifndef __sparc__
5397 res = ptrace (regset->get_request, pid,
5398 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5399 #else
5400 res = ptrace (regset->get_request, pid, data, nt_type);
5401 #endif
5402 if (res < 0)
5403 {
5404 if (errno == EIO)
5405 {
5406 /* If we get EIO on a regset, do not try it again for
5407 this process mode. */
5408 disable_regset (regsets_info, regset);
5409 }
5410 else if (errno == ENODATA)
5411 {
5412 /* ENODATA may be returned if the regset is currently
5413 not "active". This can happen in normal operation,
5414 so suppress the warning in this case. */
5415 }
5416 else if (errno == ESRCH)
5417 {
5418 /* At this point, ESRCH should mean the process is
5419 already gone, in which case we simply ignore attempts
5420 to read its registers. */
5421 }
5422 else
5423 {
5424 char s[256];
5425 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5426 pid);
5427 perror (s);
5428 }
5429 }
5430 else
5431 {
5432 if (regset->type == GENERAL_REGS)
5433 saw_general_regs = 1;
5434 regset->store_function (regcache, buf);
5435 }
5436 free (buf);
5437 }
5438 if (saw_general_regs)
5439 return 0;
5440 else
5441 return 1;
5442 }
5443
5444 static int
5445 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5446 struct regcache *regcache)
5447 {
5448 struct regset_info *regset;
5449 int saw_general_regs = 0;
5450 int pid;
5451 struct iovec iov;
5452
5453 pid = lwpid_of (current_thread);
5454 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5455 {
5456 void *buf, *data;
5457 int nt_type, res;
5458
5459 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5460 || regset->fill_function == NULL)
5461 continue;
5462
5463 buf = xmalloc (regset->size);
5464
5465 /* First fill the buffer with the current register set contents,
5466 in case there are any items in the kernel's regset that are
5467 not in gdbserver's regcache. */
5468
5469 nt_type = regset->nt_type;
5470 if (nt_type)
5471 {
5472 iov.iov_base = buf;
5473 iov.iov_len = regset->size;
5474 data = (void *) &iov;
5475 }
5476 else
5477 data = buf;
5478
5479 #ifndef __sparc__
5480 res = ptrace (regset->get_request, pid,
5481 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5482 #else
5483 res = ptrace (regset->get_request, pid, data, nt_type);
5484 #endif
5485
5486 if (res == 0)
5487 {
5488 /* Then overlay our cached registers on that. */
5489 regset->fill_function (regcache, buf);
5490
5491 /* Only now do we write the register set. */
5492 #ifndef __sparc__
5493 res = ptrace (regset->set_request, pid,
5494 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5495 #else
5496 res = ptrace (regset->set_request, pid, data, nt_type);
5497 #endif
5498 }
5499
5500 if (res < 0)
5501 {
5502 if (errno == EIO)
5503 {
5504 /* If we get EIO on a regset, do not try it again for
5505 this process mode. */
5506 disable_regset (regsets_info, regset);
5507 }
5508 else if (errno == ESRCH)
5509 {
5510 /* At this point, ESRCH should mean the process is
5511 already gone, in which case we simply ignore attempts
5512 to change its registers. See also the related
5513 comment in linux_resume_one_lwp. */
5514 free (buf);
5515 return 0;
5516 }
5517 else
5518 {
5519 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5520 }
5521 }
5522 else if (regset->type == GENERAL_REGS)
5523 saw_general_regs = 1;
5524 free (buf);
5525 }
5526 if (saw_general_regs)
5527 return 0;
5528 else
5529 return 1;
5530 }
5531
5532 #else /* !HAVE_LINUX_REGSETS */
5533
5534 #define use_linux_regsets 0
5535 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5536 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5537
5538 #endif
5539
5540 /* Return 1 if register REGNO is supported by one of the regset ptrace
5541 calls or 0 if it has to be transferred individually. */
5542
5543 static int
5544 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5545 {
5546 unsigned char mask = 1 << (regno % 8);
5547 size_t index = regno / 8;
5548
5549 return (use_linux_regsets
5550 && (regs_info->regset_bitmap == NULL
5551 || (regs_info->regset_bitmap[index] & mask) != 0));
5552 }
5553
5554 #ifdef HAVE_LINUX_USRREGS
5555
5556 static int
5557 register_addr (const struct usrregs_info *usrregs, int regnum)
5558 {
5559 int addr;
5560
5561 if (regnum < 0 || regnum >= usrregs->num_regs)
5562 error ("Invalid register number %d.", regnum);
5563
5564 addr = usrregs->regmap[regnum];
5565
5566 return addr;
5567 }
5568
5569 /* Fetch one register. */
5570 static void
5571 fetch_register (const struct usrregs_info *usrregs,
5572 struct regcache *regcache, int regno)
5573 {
5574 CORE_ADDR regaddr;
5575 int i, size;
5576 char *buf;
5577 int pid;
5578
5579 if (regno >= usrregs->num_regs)
5580 return;
5581 if ((*the_low_target.cannot_fetch_register) (regno))
5582 return;
5583
5584 regaddr = register_addr (usrregs, regno);
5585 if (regaddr == -1)
5586 return;
5587
5588 size = ((register_size (regcache->tdesc, regno)
5589 + sizeof (PTRACE_XFER_TYPE) - 1)
5590 & -sizeof (PTRACE_XFER_TYPE));
5591 buf = (char *) alloca (size);
5592
5593 pid = lwpid_of (current_thread);
5594 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5595 {
5596 errno = 0;
5597 *(PTRACE_XFER_TYPE *) (buf + i) =
5598 ptrace (PTRACE_PEEKUSER, pid,
5599 /* Coerce to a uintptr_t first to avoid potential gcc warning
5600 of coercing an 8 byte integer to a 4 byte pointer. */
5601 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5602 regaddr += sizeof (PTRACE_XFER_TYPE);
5603 if (errno != 0)
5604 error ("reading register %d: %s", regno, strerror (errno));
5605 }
5606
5607 if (the_low_target.supply_ptrace_register)
5608 the_low_target.supply_ptrace_register (regcache, regno, buf);
5609 else
5610 supply_register (regcache, regno, buf);
5611 }
5612
5613 /* Store one register. */
5614 static void
5615 store_register (const struct usrregs_info *usrregs,
5616 struct regcache *regcache, int regno)
5617 {
5618 CORE_ADDR regaddr;
5619 int i, size;
5620 char *buf;
5621 int pid;
5622
5623 if (regno >= usrregs->num_regs)
5624 return;
5625 if ((*the_low_target.cannot_store_register) (regno))
5626 return;
5627
5628 regaddr = register_addr (usrregs, regno);
5629 if (regaddr == -1)
5630 return;
5631
5632 size = ((register_size (regcache->tdesc, regno)
5633 + sizeof (PTRACE_XFER_TYPE) - 1)
5634 & -sizeof (PTRACE_XFER_TYPE));
5635 buf = (char *) alloca (size);
5636 memset (buf, 0, size);
5637
5638 if (the_low_target.collect_ptrace_register)
5639 the_low_target.collect_ptrace_register (regcache, regno, buf);
5640 else
5641 collect_register (regcache, regno, buf);
5642
5643 pid = lwpid_of (current_thread);
5644 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5645 {
5646 errno = 0;
5647 ptrace (PTRACE_POKEUSER, pid,
5648 /* Coerce to a uintptr_t first to avoid potential gcc warning
5649 about coercing an 8 byte integer to a 4 byte pointer. */
5650 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5651 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5652 if (errno != 0)
5653 {
5654 /* At this point, ESRCH should mean the process is
5655 already gone, in which case we simply ignore attempts
5656 to change its registers. See also the related
5657 comment in linux_resume_one_lwp. */
5658 if (errno == ESRCH)
5659 return;
5660
5661 if ((*the_low_target.cannot_store_register) (regno) == 0)
5662 error ("writing register %d: %s", regno, strerror (errno));
5663 }
5664 regaddr += sizeof (PTRACE_XFER_TYPE);
5665 }
5666 }
5667
5668 /* Fetch all registers, or just one, from the child process.
5669 If REGNO is -1, do this for all registers, skipping any that are
5670 assumed to have been retrieved by regsets_fetch_inferior_registers,
5671 unless ALL is non-zero.
5672 Otherwise, REGNO specifies which register (so we can save time). */
5673 static void
5674 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5675 struct regcache *regcache, int regno, int all)
5676 {
5677 struct usrregs_info *usr = regs_info->usrregs;
5678
5679 if (regno == -1)
5680 {
5681 for (regno = 0; regno < usr->num_regs; regno++)
5682 if (all || !linux_register_in_regsets (regs_info, regno))
5683 fetch_register (usr, regcache, regno);
5684 }
5685 else
5686 fetch_register (usr, regcache, regno);
5687 }
5688
5689 /* Store our register values back into the inferior.
5690 If REGNO is -1, do this for all registers, skipping any that are
5691 assumed to have been saved by regsets_store_inferior_registers,
5692 unless ALL is non-zero.
5693 Otherwise, REGNO specifies which register (so we can save time). */
5694 static void
5695 usr_store_inferior_registers (const struct regs_info *regs_info,
5696 struct regcache *regcache, int regno, int all)
5697 {
5698 struct usrregs_info *usr = regs_info->usrregs;
5699
5700 if (regno == -1)
5701 {
5702 for (regno = 0; regno < usr->num_regs; regno++)
5703 if (all || !linux_register_in_regsets (regs_info, regno))
5704 store_register (usr, regcache, regno);
5705 }
5706 else
5707 store_register (usr, regcache, regno);
5708 }
5709
5710 #else /* !HAVE_LINUX_USRREGS */
5711
5712 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5713 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5714
5715 #endif
5716
5717
5718 static void
5719 linux_fetch_registers (struct regcache *regcache, int regno)
5720 {
5721 int use_regsets;
5722 int all = 0;
5723 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5724
5725 if (regno == -1)
5726 {
5727 if (the_low_target.fetch_register != NULL
5728 && regs_info->usrregs != NULL)
5729 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5730 (*the_low_target.fetch_register) (regcache, regno);
5731
5732 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5733 if (regs_info->usrregs != NULL)
5734 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5735 }
5736 else
5737 {
5738 if (the_low_target.fetch_register != NULL
5739 && (*the_low_target.fetch_register) (regcache, regno))
5740 return;
5741
5742 use_regsets = linux_register_in_regsets (regs_info, regno);
5743 if (use_regsets)
5744 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5745 regcache);
5746 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5747 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5748 }
5749 }
5750
5751 static void
5752 linux_store_registers (struct regcache *regcache, int regno)
5753 {
5754 int use_regsets;
5755 int all = 0;
5756 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5757
5758 if (regno == -1)
5759 {
5760 all = regsets_store_inferior_registers (regs_info->regsets_info,
5761 regcache);
5762 if (regs_info->usrregs != NULL)
5763 usr_store_inferior_registers (regs_info, regcache, regno, all);
5764 }
5765 else
5766 {
5767 use_regsets = linux_register_in_regsets (regs_info, regno);
5768 if (use_regsets)
5769 all = regsets_store_inferior_registers (regs_info->regsets_info,
5770 regcache);
5771 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5772 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5773 }
5774 }
5775
5776
5777 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5778 to debugger memory starting at MYADDR. */
5779
5780 static int
5781 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5782 {
5783 int pid = lwpid_of (current_thread);
5784 PTRACE_XFER_TYPE *buffer;
5785 CORE_ADDR addr;
5786 int count;
5787 char filename[64];
5788 int i;
5789 int ret;
5790 int fd;
5791
5792 /* Try using /proc. Don't bother for one word. */
5793 if (len >= 3 * sizeof (long))
5794 {
5795 int bytes;
5796
5797 /* We could keep this file open and cache it - possibly one per
5798 thread. That requires some juggling, but is even faster. */
5799 sprintf (filename, "/proc/%d/mem", pid);
5800 fd = open (filename, O_RDONLY | O_LARGEFILE);
5801 if (fd == -1)
5802 goto no_proc;
5803
5804 /* If pread64 is available, use it. It's faster if the kernel
5805 supports it (only one syscall), and it's 64-bit safe even on
5806 32-bit platforms (for instance, SPARC debugging a SPARC64
5807 application). */
5808 #ifdef HAVE_PREAD64
5809 bytes = pread64 (fd, myaddr, len, memaddr);
5810 #else
5811 bytes = -1;
5812 if (lseek (fd, memaddr, SEEK_SET) != -1)
5813 bytes = read (fd, myaddr, len);
5814 #endif
5815
5816 close (fd);
5817 if (bytes == len)
5818 return 0;
5819
5820 /* Some data was read, we'll try to get the rest with ptrace. */
5821 if (bytes > 0)
5822 {
5823 memaddr += bytes;
5824 myaddr += bytes;
5825 len -= bytes;
5826 }
5827 }
5828
5829 no_proc:
5830 /* Round starting address down to longword boundary. */
5831 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5832 /* Round ending address up; get number of longwords that makes. */
5833 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5834 / sizeof (PTRACE_XFER_TYPE));
5835 /* Allocate buffer of that many longwords. */
5836 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5837
5838 /* Read all the longwords */
5839 errno = 0;
5840 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5841 {
5842 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5843 about coercing an 8 byte integer to a 4 byte pointer. */
5844 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5845 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5846 (PTRACE_TYPE_ARG4) 0);
5847 if (errno)
5848 break;
5849 }
5850 ret = errno;
5851
5852 /* Copy appropriate bytes out of the buffer. */
5853 if (i > 0)
5854 {
5855 i *= sizeof (PTRACE_XFER_TYPE);
5856 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5857 memcpy (myaddr,
5858 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5859 i < len ? i : len);
5860 }
5861
5862 return ret;
5863 }
5864
5865 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5866 memory at MEMADDR. On failure (cannot write to the inferior)
5867 returns the value of errno. Always succeeds if LEN is zero. */
5868
5869 static int
5870 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5871 {
5872 int i;
5873 /* Round starting address down to longword boundary. */
5874 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5875 /* Round ending address up; get number of longwords that makes. */
5876 int count
5877 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5878 / sizeof (PTRACE_XFER_TYPE);
5879
5880 /* Allocate buffer of that many longwords. */
5881 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5882
5883 int pid = lwpid_of (current_thread);
5884
5885 if (len == 0)
5886 {
5887 /* Zero length write always succeeds. */
5888 return 0;
5889 }
5890
5891 if (debug_threads)
5892 {
5893 /* Dump up to four bytes. */
5894 char str[4 * 2 + 1];
5895 char *p = str;
5896 int dump = len < 4 ? len : 4;
5897
5898 for (i = 0; i < dump; i++)
5899 {
5900 sprintf (p, "%02x", myaddr[i]);
5901 p += 2;
5902 }
5903 *p = '\0';
5904
5905 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5906 str, (long) memaddr, pid);
5907 }
5908
5909 /* Fill start and end extra bytes of buffer with existing memory data. */
5910
5911 errno = 0;
5912 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5913 about coercing an 8 byte integer to a 4 byte pointer. */
5914 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5915 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5916 (PTRACE_TYPE_ARG4) 0);
5917 if (errno)
5918 return errno;
5919
5920 if (count > 1)
5921 {
5922 errno = 0;
5923 buffer[count - 1]
5924 = ptrace (PTRACE_PEEKTEXT, pid,
5925 /* Coerce to a uintptr_t first to avoid potential gcc warning
5926 about coercing an 8 byte integer to a 4 byte pointer. */
5927 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5928 * sizeof (PTRACE_XFER_TYPE)),
5929 (PTRACE_TYPE_ARG4) 0);
5930 if (errno)
5931 return errno;
5932 }
5933
5934 /* Copy data to be written over corresponding part of buffer. */
5935
5936 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5937 myaddr, len);
5938
5939 /* Write the entire buffer. */
5940
5941 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5942 {
5943 errno = 0;
5944 ptrace (PTRACE_POKETEXT, pid,
5945 /* Coerce to a uintptr_t first to avoid potential gcc warning
5946 about coercing an 8 byte integer to a 4 byte pointer. */
5947 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5948 (PTRACE_TYPE_ARG4) buffer[i]);
5949 if (errno)
5950 return errno;
5951 }
5952
5953 return 0;
5954 }
5955
5956 static void
5957 linux_look_up_symbols (void)
5958 {
5959 #ifdef USE_THREAD_DB
5960 struct process_info *proc = current_process ();
5961
5962 if (proc->priv->thread_db != NULL)
5963 return;
5964
5965 thread_db_init ();
5966 #endif
5967 }
5968
5969 static void
5970 linux_request_interrupt (void)
5971 {
5972 /* Send a SIGINT to the process group. This acts just like the user
5973 typed a ^C on the controlling terminal. */
5974 kill (-signal_pid, SIGINT);
5975 }
5976
5977 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5978 to debugger memory starting at MYADDR. */
5979
5980 static int
5981 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5982 {
5983 char filename[PATH_MAX];
5984 int fd, n;
5985 int pid = lwpid_of (current_thread);
5986
5987 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5988
5989 fd = open (filename, O_RDONLY);
5990 if (fd < 0)
5991 return -1;
5992
5993 if (offset != (CORE_ADDR) 0
5994 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5995 n = -1;
5996 else
5997 n = read (fd, myaddr, len);
5998
5999 close (fd);
6000
6001 return n;
6002 }
6003
6004 /* These breakpoint and watchpoint related wrapper functions simply
6005 pass on the function call if the target has registered a
6006 corresponding function. */
6007
6008 static int
6009 linux_supports_z_point_type (char z_type)
6010 {
6011 return (the_low_target.supports_z_point_type != NULL
6012 && the_low_target.supports_z_point_type (z_type));
6013 }
6014
6015 static int
6016 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
6017 int size, struct raw_breakpoint *bp)
6018 {
6019 if (type == raw_bkpt_type_sw)
6020 return insert_memory_breakpoint (bp);
6021 else if (the_low_target.insert_point != NULL)
6022 return the_low_target.insert_point (type, addr, size, bp);
6023 else
6024 /* Unsupported (see target.h). */
6025 return 1;
6026 }
6027
6028 static int
6029 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
6030 int size, struct raw_breakpoint *bp)
6031 {
6032 if (type == raw_bkpt_type_sw)
6033 return remove_memory_breakpoint (bp);
6034 else if (the_low_target.remove_point != NULL)
6035 return the_low_target.remove_point (type, addr, size, bp);
6036 else
6037 /* Unsupported (see target.h). */
6038 return 1;
6039 }
6040
6041 /* Implement the to_stopped_by_sw_breakpoint target_ops
6042 method. */
6043
6044 static int
6045 linux_stopped_by_sw_breakpoint (void)
6046 {
6047 struct lwp_info *lwp = get_thread_lwp (current_thread);
6048
6049 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
6050 }
6051
6052 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
6053 method. */
6054
6055 static int
6056 linux_supports_stopped_by_sw_breakpoint (void)
6057 {
6058 return USE_SIGTRAP_SIGINFO;
6059 }
6060
6061 /* Implement the to_stopped_by_hw_breakpoint target_ops
6062 method. */
6063
6064 static int
6065 linux_stopped_by_hw_breakpoint (void)
6066 {
6067 struct lwp_info *lwp = get_thread_lwp (current_thread);
6068
6069 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6070 }
6071
6072 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6073 method. */
6074
6075 static int
6076 linux_supports_stopped_by_hw_breakpoint (void)
6077 {
6078 return USE_SIGTRAP_SIGINFO;
6079 }
6080
6081 /* Implement the supports_hardware_single_step target_ops method. */
6082
6083 static int
6084 linux_supports_hardware_single_step (void)
6085 {
6086 return can_hardware_single_step ();
6087 }
6088
6089 static int
6090 linux_supports_software_single_step (void)
6091 {
6092 return can_software_single_step ();
6093 }
6094
6095 static int
6096 linux_stopped_by_watchpoint (void)
6097 {
6098 struct lwp_info *lwp = get_thread_lwp (current_thread);
6099
6100 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6101 }
6102
6103 static CORE_ADDR
6104 linux_stopped_data_address (void)
6105 {
6106 struct lwp_info *lwp = get_thread_lwp (current_thread);
6107
6108 return lwp->stopped_data_address;
6109 }
6110
6111 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6112 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6113 && defined(PT_TEXT_END_ADDR)
6114
6115 /* This is only used for targets that define PT_TEXT_ADDR,
6116 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6117 the target has different ways of acquiring this information, like
6118 loadmaps. */
6119
6120 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6121 to tell gdb about. */
6122
6123 static int
6124 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6125 {
6126 unsigned long text, text_end, data;
6127 int pid = lwpid_of (current_thread);
6128
6129 errno = 0;
6130
6131 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6132 (PTRACE_TYPE_ARG4) 0);
6133 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6134 (PTRACE_TYPE_ARG4) 0);
6135 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6136 (PTRACE_TYPE_ARG4) 0);
6137
6138 if (errno == 0)
6139 {
6140 /* Both text and data offsets produced at compile-time (and so
6141 used by gdb) are relative to the beginning of the program,
6142 with the data segment immediately following the text segment.
6143 However, the actual runtime layout in memory may put the data
6144 somewhere else, so when we send gdb a data base-address, we
6145 use the real data base address and subtract the compile-time
6146 data base-address from it (which is just the length of the
6147 text segment). BSS immediately follows data in both
6148 cases. */
6149 *text_p = text;
6150 *data_p = data - (text_end - text);
6151
6152 return 1;
6153 }
6154 return 0;
6155 }
6156 #endif
6157
6158 static int
6159 linux_qxfer_osdata (const char *annex,
6160 unsigned char *readbuf, unsigned const char *writebuf,
6161 CORE_ADDR offset, int len)
6162 {
6163 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6164 }
6165
6166 /* Convert a native/host siginfo object, into/from the siginfo in the
6167 layout of the inferiors' architecture. */
6168
6169 static void
6170 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6171 {
6172 int done = 0;
6173
6174 if (the_low_target.siginfo_fixup != NULL)
6175 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6176
6177 /* If there was no callback, or the callback didn't do anything,
6178 then just do a straight memcpy. */
6179 if (!done)
6180 {
6181 if (direction == 1)
6182 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6183 else
6184 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6185 }
6186 }
6187
6188 static int
6189 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6190 unsigned const char *writebuf, CORE_ADDR offset, int len)
6191 {
6192 int pid;
6193 siginfo_t siginfo;
6194 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6195
6196 if (current_thread == NULL)
6197 return -1;
6198
6199 pid = lwpid_of (current_thread);
6200
6201 if (debug_threads)
6202 debug_printf ("%s siginfo for lwp %d.\n",
6203 readbuf != NULL ? "Reading" : "Writing",
6204 pid);
6205
6206 if (offset >= sizeof (siginfo))
6207 return -1;
6208
6209 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6210 return -1;
6211
6212 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6213 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6214 inferior with a 64-bit GDBSERVER should look the same as debugging it
6215 with a 32-bit GDBSERVER, we need to convert it. */
6216 siginfo_fixup (&siginfo, inf_siginfo, 0);
6217
6218 if (offset + len > sizeof (siginfo))
6219 len = sizeof (siginfo) - offset;
6220
6221 if (readbuf != NULL)
6222 memcpy (readbuf, inf_siginfo + offset, len);
6223 else
6224 {
6225 memcpy (inf_siginfo + offset, writebuf, len);
6226
6227 /* Convert back to ptrace layout before flushing it out. */
6228 siginfo_fixup (&siginfo, inf_siginfo, 1);
6229
6230 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6231 return -1;
6232 }
6233
6234 return len;
6235 }
6236
6237 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6238 so we notice when children change state; as the handler for the
6239 sigsuspend in my_waitpid. */
6240
6241 static void
6242 sigchld_handler (int signo)
6243 {
6244 int old_errno = errno;
6245
6246 if (debug_threads)
6247 {
6248 do
6249 {
6250 /* fprintf is not async-signal-safe, so call write
6251 directly. */
6252 if (write (2, "sigchld_handler\n",
6253 sizeof ("sigchld_handler\n") - 1) < 0)
6254 break; /* just ignore */
6255 } while (0);
6256 }
6257
6258 if (target_is_async_p ())
6259 async_file_mark (); /* trigger a linux_wait */
6260
6261 errno = old_errno;
6262 }
6263
6264 static int
6265 linux_supports_non_stop (void)
6266 {
6267 return 1;
6268 }
6269
6270 static int
6271 linux_async (int enable)
6272 {
6273 int previous = target_is_async_p ();
6274
6275 if (debug_threads)
6276 debug_printf ("linux_async (%d), previous=%d\n",
6277 enable, previous);
6278
6279 if (previous != enable)
6280 {
6281 sigset_t mask;
6282 sigemptyset (&mask);
6283 sigaddset (&mask, SIGCHLD);
6284
6285 sigprocmask (SIG_BLOCK, &mask, NULL);
6286
6287 if (enable)
6288 {
6289 if (pipe (linux_event_pipe) == -1)
6290 {
6291 linux_event_pipe[0] = -1;
6292 linux_event_pipe[1] = -1;
6293 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6294
6295 warning ("creating event pipe failed.");
6296 return previous;
6297 }
6298
6299 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6300 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6301
6302 /* Register the event loop handler. */
6303 add_file_handler (linux_event_pipe[0],
6304 handle_target_event, NULL);
6305
6306 /* Always trigger a linux_wait. */
6307 async_file_mark ();
6308 }
6309 else
6310 {
6311 delete_file_handler (linux_event_pipe[0]);
6312
6313 close (linux_event_pipe[0]);
6314 close (linux_event_pipe[1]);
6315 linux_event_pipe[0] = -1;
6316 linux_event_pipe[1] = -1;
6317 }
6318
6319 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6320 }
6321
6322 return previous;
6323 }
6324
6325 static int
6326 linux_start_non_stop (int nonstop)
6327 {
6328 /* Register or unregister from event-loop accordingly. */
6329 linux_async (nonstop);
6330
6331 if (target_is_async_p () != (nonstop != 0))
6332 return -1;
6333
6334 return 0;
6335 }
6336
6337 static int
6338 linux_supports_multi_process (void)
6339 {
6340 return 1;
6341 }
6342
6343 /* Check if fork events are supported. */
6344
6345 static int
6346 linux_supports_fork_events (void)
6347 {
6348 return linux_supports_tracefork ();
6349 }
6350
6351 /* Check if vfork events are supported. */
6352
6353 static int
6354 linux_supports_vfork_events (void)
6355 {
6356 return linux_supports_tracefork ();
6357 }
6358
6359 /* Check if exec events are supported. */
6360
6361 static int
6362 linux_supports_exec_events (void)
6363 {
6364 return linux_supports_traceexec ();
6365 }
6366
6367 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6368 ptrace flags for all inferiors. This is in case the new GDB connection
6369 doesn't support the same set of events that the previous one did. */
6370
6371 static void
6372 linux_handle_new_gdb_connection (void)
6373 {
6374 /* Request that all the lwps reset their ptrace options. */
6375 for_each_thread ([] (thread_info *thread)
6376 {
6377 struct lwp_info *lwp = get_thread_lwp (thread);
6378
6379 if (!lwp->stopped)
6380 {
6381 /* Stop the lwp so we can modify its ptrace options. */
6382 lwp->must_set_ptrace_flags = 1;
6383 linux_stop_lwp (lwp);
6384 }
6385 else
6386 {
6387 /* Already stopped; go ahead and set the ptrace options. */
6388 struct process_info *proc = find_process_pid (pid_of (thread));
6389 int options = linux_low_ptrace_options (proc->attached);
6390
6391 linux_enable_event_reporting (lwpid_of (thread), options);
6392 lwp->must_set_ptrace_flags = 0;
6393 }
6394 });
6395 }
6396
6397 static int
6398 linux_supports_disable_randomization (void)
6399 {
6400 #ifdef HAVE_PERSONALITY
6401 return 1;
6402 #else
6403 return 0;
6404 #endif
6405 }
6406
6407 static int
6408 linux_supports_agent (void)
6409 {
6410 return 1;
6411 }
6412
6413 static int
6414 linux_supports_range_stepping (void)
6415 {
6416 if (can_software_single_step ())
6417 return 1;
6418 if (*the_low_target.supports_range_stepping == NULL)
6419 return 0;
6420
6421 return (*the_low_target.supports_range_stepping) ();
6422 }
6423
6424 /* Enumerate spufs IDs for process PID. */
6425 static int
6426 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6427 {
6428 int pos = 0;
6429 int written = 0;
6430 char path[128];
6431 DIR *dir;
6432 struct dirent *entry;
6433
6434 sprintf (path, "/proc/%ld/fd", pid);
6435 dir = opendir (path);
6436 if (!dir)
6437 return -1;
6438
6439 rewinddir (dir);
6440 while ((entry = readdir (dir)) != NULL)
6441 {
6442 struct stat st;
6443 struct statfs stfs;
6444 int fd;
6445
6446 fd = atoi (entry->d_name);
6447 if (!fd)
6448 continue;
6449
6450 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6451 if (stat (path, &st) != 0)
6452 continue;
6453 if (!S_ISDIR (st.st_mode))
6454 continue;
6455
6456 if (statfs (path, &stfs) != 0)
6457 continue;
6458 if (stfs.f_type != SPUFS_MAGIC)
6459 continue;
6460
6461 if (pos >= offset && pos + 4 <= offset + len)
6462 {
6463 *(unsigned int *)(buf + pos - offset) = fd;
6464 written += 4;
6465 }
6466 pos += 4;
6467 }
6468
6469 closedir (dir);
6470 return written;
6471 }
6472
6473 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6474 object type, using the /proc file system. */
6475 static int
6476 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6477 unsigned const char *writebuf,
6478 CORE_ADDR offset, int len)
6479 {
6480 long pid = lwpid_of (current_thread);
6481 char buf[128];
6482 int fd = 0;
6483 int ret = 0;
6484
6485 if (!writebuf && !readbuf)
6486 return -1;
6487
6488 if (!*annex)
6489 {
6490 if (!readbuf)
6491 return -1;
6492 else
6493 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6494 }
6495
6496 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6497 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6498 if (fd <= 0)
6499 return -1;
6500
6501 if (offset != 0
6502 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6503 {
6504 close (fd);
6505 return 0;
6506 }
6507
6508 if (writebuf)
6509 ret = write (fd, writebuf, (size_t) len);
6510 else
6511 ret = read (fd, readbuf, (size_t) len);
6512
6513 close (fd);
6514 return ret;
6515 }
6516
6517 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6518 struct target_loadseg
6519 {
6520 /* Core address to which the segment is mapped. */
6521 Elf32_Addr addr;
6522 /* VMA recorded in the program header. */
6523 Elf32_Addr p_vaddr;
6524 /* Size of this segment in memory. */
6525 Elf32_Word p_memsz;
6526 };
6527
6528 # if defined PT_GETDSBT
6529 struct target_loadmap
6530 {
6531 /* Protocol version number, must be zero. */
6532 Elf32_Word version;
6533 /* Pointer to the DSBT table, its size, and the DSBT index. */
6534 unsigned *dsbt_table;
6535 unsigned dsbt_size, dsbt_index;
6536 /* Number of segments in this map. */
6537 Elf32_Word nsegs;
6538 /* The actual memory map. */
6539 struct target_loadseg segs[/*nsegs*/];
6540 };
6541 # define LINUX_LOADMAP PT_GETDSBT
6542 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6543 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6544 # else
6545 struct target_loadmap
6546 {
6547 /* Protocol version number, must be zero. */
6548 Elf32_Half version;
6549 /* Number of segments in this map. */
6550 Elf32_Half nsegs;
6551 /* The actual memory map. */
6552 struct target_loadseg segs[/*nsegs*/];
6553 };
6554 # define LINUX_LOADMAP PTRACE_GETFDPIC
6555 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6556 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6557 # endif
6558
6559 static int
6560 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6561 unsigned char *myaddr, unsigned int len)
6562 {
6563 int pid = lwpid_of (current_thread);
6564 int addr = -1;
6565 struct target_loadmap *data = NULL;
6566 unsigned int actual_length, copy_length;
6567
6568 if (strcmp (annex, "exec") == 0)
6569 addr = (int) LINUX_LOADMAP_EXEC;
6570 else if (strcmp (annex, "interp") == 0)
6571 addr = (int) LINUX_LOADMAP_INTERP;
6572 else
6573 return -1;
6574
6575 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6576 return -1;
6577
6578 if (data == NULL)
6579 return -1;
6580
6581 actual_length = sizeof (struct target_loadmap)
6582 + sizeof (struct target_loadseg) * data->nsegs;
6583
6584 if (offset < 0 || offset > actual_length)
6585 return -1;
6586
6587 copy_length = actual_length - offset < len ? actual_length - offset : len;
6588 memcpy (myaddr, (char *) data + offset, copy_length);
6589 return copy_length;
6590 }
6591 #else
6592 # define linux_read_loadmap NULL
6593 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6594
6595 static void
6596 linux_process_qsupported (char **features, int count)
6597 {
6598 if (the_low_target.process_qsupported != NULL)
6599 the_low_target.process_qsupported (features, count);
6600 }
6601
6602 static int
6603 linux_supports_catch_syscall (void)
6604 {
6605 return (the_low_target.get_syscall_trapinfo != NULL
6606 && linux_supports_tracesysgood ());
6607 }
6608
6609 static int
6610 linux_get_ipa_tdesc_idx (void)
6611 {
6612 if (the_low_target.get_ipa_tdesc_idx == NULL)
6613 return 0;
6614
6615 return (*the_low_target.get_ipa_tdesc_idx) ();
6616 }
6617
6618 static int
6619 linux_supports_tracepoints (void)
6620 {
6621 if (*the_low_target.supports_tracepoints == NULL)
6622 return 0;
6623
6624 return (*the_low_target.supports_tracepoints) ();
6625 }
6626
6627 static CORE_ADDR
6628 linux_read_pc (struct regcache *regcache)
6629 {
6630 if (the_low_target.get_pc == NULL)
6631 return 0;
6632
6633 return (*the_low_target.get_pc) (regcache);
6634 }
6635
6636 static void
6637 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6638 {
6639 gdb_assert (the_low_target.set_pc != NULL);
6640
6641 (*the_low_target.set_pc) (regcache, pc);
6642 }
6643
6644 static int
6645 linux_thread_stopped (struct thread_info *thread)
6646 {
6647 return get_thread_lwp (thread)->stopped;
6648 }
6649
6650 /* This exposes stop-all-threads functionality to other modules. */
6651
6652 static void
6653 linux_pause_all (int freeze)
6654 {
6655 stop_all_lwps (freeze, NULL);
6656 }
6657
6658 /* This exposes unstop-all-threads functionality to other gdbserver
6659 modules. */
6660
6661 static void
6662 linux_unpause_all (int unfreeze)
6663 {
6664 unstop_all_lwps (unfreeze, NULL);
6665 }
6666
6667 static int
6668 linux_prepare_to_access_memory (void)
6669 {
6670 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6671 running LWP. */
6672 if (non_stop)
6673 linux_pause_all (1);
6674 return 0;
6675 }
6676
6677 static void
6678 linux_done_accessing_memory (void)
6679 {
6680 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6681 running LWP. */
6682 if (non_stop)
6683 linux_unpause_all (1);
6684 }
6685
6686 static int
6687 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6688 CORE_ADDR collector,
6689 CORE_ADDR lockaddr,
6690 ULONGEST orig_size,
6691 CORE_ADDR *jump_entry,
6692 CORE_ADDR *trampoline,
6693 ULONGEST *trampoline_size,
6694 unsigned char *jjump_pad_insn,
6695 ULONGEST *jjump_pad_insn_size,
6696 CORE_ADDR *adjusted_insn_addr,
6697 CORE_ADDR *adjusted_insn_addr_end,
6698 char *err)
6699 {
6700 return (*the_low_target.install_fast_tracepoint_jump_pad)
6701 (tpoint, tpaddr, collector, lockaddr, orig_size,
6702 jump_entry, trampoline, trampoline_size,
6703 jjump_pad_insn, jjump_pad_insn_size,
6704 adjusted_insn_addr, adjusted_insn_addr_end,
6705 err);
6706 }
6707
6708 static struct emit_ops *
6709 linux_emit_ops (void)
6710 {
6711 if (the_low_target.emit_ops != NULL)
6712 return (*the_low_target.emit_ops) ();
6713 else
6714 return NULL;
6715 }
6716
6717 static int
6718 linux_get_min_fast_tracepoint_insn_len (void)
6719 {
6720 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6721 }
6722
6723 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6724
6725 static int
6726 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6727 CORE_ADDR *phdr_memaddr, int *num_phdr)
6728 {
6729 char filename[PATH_MAX];
6730 int fd;
6731 const int auxv_size = is_elf64
6732 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6733 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6734
6735 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6736
6737 fd = open (filename, O_RDONLY);
6738 if (fd < 0)
6739 return 1;
6740
6741 *phdr_memaddr = 0;
6742 *num_phdr = 0;
6743 while (read (fd, buf, auxv_size) == auxv_size
6744 && (*phdr_memaddr == 0 || *num_phdr == 0))
6745 {
6746 if (is_elf64)
6747 {
6748 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6749
6750 switch (aux->a_type)
6751 {
6752 case AT_PHDR:
6753 *phdr_memaddr = aux->a_un.a_val;
6754 break;
6755 case AT_PHNUM:
6756 *num_phdr = aux->a_un.a_val;
6757 break;
6758 }
6759 }
6760 else
6761 {
6762 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6763
6764 switch (aux->a_type)
6765 {
6766 case AT_PHDR:
6767 *phdr_memaddr = aux->a_un.a_val;
6768 break;
6769 case AT_PHNUM:
6770 *num_phdr = aux->a_un.a_val;
6771 break;
6772 }
6773 }
6774 }
6775
6776 close (fd);
6777
6778 if (*phdr_memaddr == 0 || *num_phdr == 0)
6779 {
6780 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6781 "phdr_memaddr = %ld, phdr_num = %d",
6782 (long) *phdr_memaddr, *num_phdr);
6783 return 2;
6784 }
6785
6786 return 0;
6787 }
6788
6789 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6790
6791 static CORE_ADDR
6792 get_dynamic (const int pid, const int is_elf64)
6793 {
6794 CORE_ADDR phdr_memaddr, relocation;
6795 int num_phdr, i;
6796 unsigned char *phdr_buf;
6797 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6798
6799 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6800 return 0;
6801
6802 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6803 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6804
6805 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6806 return 0;
6807
6808 /* Compute relocation: it is expected to be 0 for "regular" executables,
6809 non-zero for PIE ones. */
6810 relocation = -1;
6811 for (i = 0; relocation == -1 && i < num_phdr; i++)
6812 if (is_elf64)
6813 {
6814 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6815
6816 if (p->p_type == PT_PHDR)
6817 relocation = phdr_memaddr - p->p_vaddr;
6818 }
6819 else
6820 {
6821 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6822
6823 if (p->p_type == PT_PHDR)
6824 relocation = phdr_memaddr - p->p_vaddr;
6825 }
6826
6827 if (relocation == -1)
6828 {
6829 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6830 any real world executables, including PIE executables, have always
6831 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6832 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6833 or present DT_DEBUG anyway (fpc binaries are statically linked).
6834
6835 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6836
6837 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6838
6839 return 0;
6840 }
6841
6842 for (i = 0; i < num_phdr; i++)
6843 {
6844 if (is_elf64)
6845 {
6846 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6847
6848 if (p->p_type == PT_DYNAMIC)
6849 return p->p_vaddr + relocation;
6850 }
6851 else
6852 {
6853 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6854
6855 if (p->p_type == PT_DYNAMIC)
6856 return p->p_vaddr + relocation;
6857 }
6858 }
6859
6860 return 0;
6861 }
6862
6863 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6864 can be 0 if the inferior does not yet have the library list initialized.
6865 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6866 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6867
6868 static CORE_ADDR
6869 get_r_debug (const int pid, const int is_elf64)
6870 {
6871 CORE_ADDR dynamic_memaddr;
6872 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6873 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6874 CORE_ADDR map = -1;
6875
6876 dynamic_memaddr = get_dynamic (pid, is_elf64);
6877 if (dynamic_memaddr == 0)
6878 return map;
6879
6880 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6881 {
6882 if (is_elf64)
6883 {
6884 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6885 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6886 union
6887 {
6888 Elf64_Xword map;
6889 unsigned char buf[sizeof (Elf64_Xword)];
6890 }
6891 rld_map;
6892 #endif
6893 #ifdef DT_MIPS_RLD_MAP
6894 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6895 {
6896 if (linux_read_memory (dyn->d_un.d_val,
6897 rld_map.buf, sizeof (rld_map.buf)) == 0)
6898 return rld_map.map;
6899 else
6900 break;
6901 }
6902 #endif /* DT_MIPS_RLD_MAP */
6903 #ifdef DT_MIPS_RLD_MAP_REL
6904 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6905 {
6906 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6907 rld_map.buf, sizeof (rld_map.buf)) == 0)
6908 return rld_map.map;
6909 else
6910 break;
6911 }
6912 #endif /* DT_MIPS_RLD_MAP_REL */
6913
6914 if (dyn->d_tag == DT_DEBUG && map == -1)
6915 map = dyn->d_un.d_val;
6916
6917 if (dyn->d_tag == DT_NULL)
6918 break;
6919 }
6920 else
6921 {
6922 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6923 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6924 union
6925 {
6926 Elf32_Word map;
6927 unsigned char buf[sizeof (Elf32_Word)];
6928 }
6929 rld_map;
6930 #endif
6931 #ifdef DT_MIPS_RLD_MAP
6932 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6933 {
6934 if (linux_read_memory (dyn->d_un.d_val,
6935 rld_map.buf, sizeof (rld_map.buf)) == 0)
6936 return rld_map.map;
6937 else
6938 break;
6939 }
6940 #endif /* DT_MIPS_RLD_MAP */
6941 #ifdef DT_MIPS_RLD_MAP_REL
6942 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6943 {
6944 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6945 rld_map.buf, sizeof (rld_map.buf)) == 0)
6946 return rld_map.map;
6947 else
6948 break;
6949 }
6950 #endif /* DT_MIPS_RLD_MAP_REL */
6951
6952 if (dyn->d_tag == DT_DEBUG && map == -1)
6953 map = dyn->d_un.d_val;
6954
6955 if (dyn->d_tag == DT_NULL)
6956 break;
6957 }
6958
6959 dynamic_memaddr += dyn_size;
6960 }
6961
6962 return map;
6963 }
6964
6965 /* Read one pointer from MEMADDR in the inferior. */
6966
6967 static int
6968 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6969 {
6970 int ret;
6971
6972 /* Go through a union so this works on either big or little endian
6973 hosts, when the inferior's pointer size is smaller than the size
6974 of CORE_ADDR. It is assumed the inferior's endianness is the
6975 same of the superior's. */
6976 union
6977 {
6978 CORE_ADDR core_addr;
6979 unsigned int ui;
6980 unsigned char uc;
6981 } addr;
6982
6983 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6984 if (ret == 0)
6985 {
6986 if (ptr_size == sizeof (CORE_ADDR))
6987 *ptr = addr.core_addr;
6988 else if (ptr_size == sizeof (unsigned int))
6989 *ptr = addr.ui;
6990 else
6991 gdb_assert_not_reached ("unhandled pointer size");
6992 }
6993 return ret;
6994 }
6995
6996 struct link_map_offsets
6997 {
6998 /* Offset and size of r_debug.r_version. */
6999 int r_version_offset;
7000
7001 /* Offset and size of r_debug.r_map. */
7002 int r_map_offset;
7003
7004 /* Offset to l_addr field in struct link_map. */
7005 int l_addr_offset;
7006
7007 /* Offset to l_name field in struct link_map. */
7008 int l_name_offset;
7009
7010 /* Offset to l_ld field in struct link_map. */
7011 int l_ld_offset;
7012
7013 /* Offset to l_next field in struct link_map. */
7014 int l_next_offset;
7015
7016 /* Offset to l_prev field in struct link_map. */
7017 int l_prev_offset;
7018 };
7019
7020 /* Construct qXfer:libraries-svr4:read reply. */
7021
7022 static int
7023 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
7024 unsigned const char *writebuf,
7025 CORE_ADDR offset, int len)
7026 {
7027 char *document;
7028 unsigned document_len;
7029 struct process_info_private *const priv = current_process ()->priv;
7030 char filename[PATH_MAX];
7031 int pid, is_elf64;
7032
7033 static const struct link_map_offsets lmo_32bit_offsets =
7034 {
7035 0, /* r_version offset. */
7036 4, /* r_debug.r_map offset. */
7037 0, /* l_addr offset in link_map. */
7038 4, /* l_name offset in link_map. */
7039 8, /* l_ld offset in link_map. */
7040 12, /* l_next offset in link_map. */
7041 16 /* l_prev offset in link_map. */
7042 };
7043
7044 static const struct link_map_offsets lmo_64bit_offsets =
7045 {
7046 0, /* r_version offset. */
7047 8, /* r_debug.r_map offset. */
7048 0, /* l_addr offset in link_map. */
7049 8, /* l_name offset in link_map. */
7050 16, /* l_ld offset in link_map. */
7051 24, /* l_next offset in link_map. */
7052 32 /* l_prev offset in link_map. */
7053 };
7054 const struct link_map_offsets *lmo;
7055 unsigned int machine;
7056 int ptr_size;
7057 CORE_ADDR lm_addr = 0, lm_prev = 0;
7058 int allocated = 1024;
7059 char *p;
7060 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
7061 int header_done = 0;
7062
7063 if (writebuf != NULL)
7064 return -2;
7065 if (readbuf == NULL)
7066 return -1;
7067
7068 pid = lwpid_of (current_thread);
7069 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7070 is_elf64 = elf_64_file_p (filename, &machine);
7071 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7072 ptr_size = is_elf64 ? 8 : 4;
7073
7074 while (annex[0] != '\0')
7075 {
7076 const char *sep;
7077 CORE_ADDR *addrp;
7078 int len;
7079
7080 sep = strchr (annex, '=');
7081 if (sep == NULL)
7082 break;
7083
7084 len = sep - annex;
7085 if (len == 5 && startswith (annex, "start"))
7086 addrp = &lm_addr;
7087 else if (len == 4 && startswith (annex, "prev"))
7088 addrp = &lm_prev;
7089 else
7090 {
7091 annex = strchr (sep, ';');
7092 if (annex == NULL)
7093 break;
7094 annex++;
7095 continue;
7096 }
7097
7098 annex = decode_address_to_semicolon (addrp, sep + 1);
7099 }
7100
7101 if (lm_addr == 0)
7102 {
7103 int r_version = 0;
7104
7105 if (priv->r_debug == 0)
7106 priv->r_debug = get_r_debug (pid, is_elf64);
7107
7108 /* We failed to find DT_DEBUG. Such situation will not change
7109 for this inferior - do not retry it. Report it to GDB as
7110 E01, see for the reasons at the GDB solib-svr4.c side. */
7111 if (priv->r_debug == (CORE_ADDR) -1)
7112 return -1;
7113
7114 if (priv->r_debug != 0)
7115 {
7116 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7117 (unsigned char *) &r_version,
7118 sizeof (r_version)) != 0
7119 || r_version != 1)
7120 {
7121 warning ("unexpected r_debug version %d", r_version);
7122 }
7123 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7124 &lm_addr, ptr_size) != 0)
7125 {
7126 warning ("unable to read r_map from 0x%lx",
7127 (long) priv->r_debug + lmo->r_map_offset);
7128 }
7129 }
7130 }
7131
7132 document = (char *) xmalloc (allocated);
7133 strcpy (document, "<library-list-svr4 version=\"1.0\"");
7134 p = document + strlen (document);
7135
7136 while (lm_addr
7137 && read_one_ptr (lm_addr + lmo->l_name_offset,
7138 &l_name, ptr_size) == 0
7139 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7140 &l_addr, ptr_size) == 0
7141 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7142 &l_ld, ptr_size) == 0
7143 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7144 &l_prev, ptr_size) == 0
7145 && read_one_ptr (lm_addr + lmo->l_next_offset,
7146 &l_next, ptr_size) == 0)
7147 {
7148 unsigned char libname[PATH_MAX];
7149
7150 if (lm_prev != l_prev)
7151 {
7152 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7153 (long) lm_prev, (long) l_prev);
7154 break;
7155 }
7156
7157 /* Ignore the first entry even if it has valid name as the first entry
7158 corresponds to the main executable. The first entry should not be
7159 skipped if the dynamic loader was loaded late by a static executable
7160 (see solib-svr4.c parameter ignore_first). But in such case the main
7161 executable does not have PT_DYNAMIC present and this function already
7162 exited above due to failed get_r_debug. */
7163 if (lm_prev == 0)
7164 {
7165 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7166 p = p + strlen (p);
7167 }
7168 else
7169 {
7170 /* Not checking for error because reading may stop before
7171 we've got PATH_MAX worth of characters. */
7172 libname[0] = '\0';
7173 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7174 libname[sizeof (libname) - 1] = '\0';
7175 if (libname[0] != '\0')
7176 {
7177 /* 6x the size for xml_escape_text below. */
7178 size_t len = 6 * strlen ((char *) libname);
7179
7180 if (!header_done)
7181 {
7182 /* Terminate `<library-list-svr4'. */
7183 *p++ = '>';
7184 header_done = 1;
7185 }
7186
7187 while (allocated < p - document + len + 200)
7188 {
7189 /* Expand to guarantee sufficient storage. */
7190 uintptr_t document_len = p - document;
7191
7192 document = (char *) xrealloc (document, 2 * allocated);
7193 allocated *= 2;
7194 p = document + document_len;
7195 }
7196
7197 std::string name = xml_escape_text ((char *) libname);
7198 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
7199 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7200 name.c_str (), (unsigned long) lm_addr,
7201 (unsigned long) l_addr, (unsigned long) l_ld);
7202 }
7203 }
7204
7205 lm_prev = lm_addr;
7206 lm_addr = l_next;
7207 }
7208
7209 if (!header_done)
7210 {
7211 /* Empty list; terminate `<library-list-svr4'. */
7212 strcpy (p, "/>");
7213 }
7214 else
7215 strcpy (p, "</library-list-svr4>");
7216
7217 document_len = strlen (document);
7218 if (offset < document_len)
7219 document_len -= offset;
7220 else
7221 document_len = 0;
7222 if (len > document_len)
7223 len = document_len;
7224
7225 memcpy (readbuf, document + offset, len);
7226 xfree (document);
7227
7228 return len;
7229 }
7230
7231 #ifdef HAVE_LINUX_BTRACE
7232
7233 /* See to_disable_btrace target method. */
7234
7235 static int
7236 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7237 {
7238 enum btrace_error err;
7239
7240 err = linux_disable_btrace (tinfo);
7241 return (err == BTRACE_ERR_NONE ? 0 : -1);
7242 }
7243
7244 /* Encode an Intel Processor Trace configuration. */
7245
7246 static void
7247 linux_low_encode_pt_config (struct buffer *buffer,
7248 const struct btrace_data_pt_config *config)
7249 {
7250 buffer_grow_str (buffer, "<pt-config>\n");
7251
7252 switch (config->cpu.vendor)
7253 {
7254 case CV_INTEL:
7255 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7256 "model=\"%u\" stepping=\"%u\"/>\n",
7257 config->cpu.family, config->cpu.model,
7258 config->cpu.stepping);
7259 break;
7260
7261 default:
7262 break;
7263 }
7264
7265 buffer_grow_str (buffer, "</pt-config>\n");
7266 }
7267
7268 /* Encode a raw buffer. */
7269
7270 static void
7271 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7272 unsigned int size)
7273 {
7274 if (size == 0)
7275 return;
7276
7277 /* We use hex encoding - see common/rsp-low.h. */
7278 buffer_grow_str (buffer, "<raw>\n");
7279
7280 while (size-- > 0)
7281 {
7282 char elem[2];
7283
7284 elem[0] = tohex ((*data >> 4) & 0xf);
7285 elem[1] = tohex (*data++ & 0xf);
7286
7287 buffer_grow (buffer, elem, 2);
7288 }
7289
7290 buffer_grow_str (buffer, "</raw>\n");
7291 }
7292
7293 /* See to_read_btrace target method. */
7294
7295 static int
7296 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7297 enum btrace_read_type type)
7298 {
7299 struct btrace_data btrace;
7300 struct btrace_block *block;
7301 enum btrace_error err;
7302 int i;
7303
7304 btrace_data_init (&btrace);
7305
7306 err = linux_read_btrace (&btrace, tinfo, type);
7307 if (err != BTRACE_ERR_NONE)
7308 {
7309 if (err == BTRACE_ERR_OVERFLOW)
7310 buffer_grow_str0 (buffer, "E.Overflow.");
7311 else
7312 buffer_grow_str0 (buffer, "E.Generic Error.");
7313
7314 goto err;
7315 }
7316
7317 switch (btrace.format)
7318 {
7319 case BTRACE_FORMAT_NONE:
7320 buffer_grow_str0 (buffer, "E.No Trace.");
7321 goto err;
7322
7323 case BTRACE_FORMAT_BTS:
7324 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7325 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7326
7327 for (i = 0;
7328 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7329 i++)
7330 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7331 paddress (block->begin), paddress (block->end));
7332
7333 buffer_grow_str0 (buffer, "</btrace>\n");
7334 break;
7335
7336 case BTRACE_FORMAT_PT:
7337 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7338 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7339 buffer_grow_str (buffer, "<pt>\n");
7340
7341 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7342
7343 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7344 btrace.variant.pt.size);
7345
7346 buffer_grow_str (buffer, "</pt>\n");
7347 buffer_grow_str0 (buffer, "</btrace>\n");
7348 break;
7349
7350 default:
7351 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7352 goto err;
7353 }
7354
7355 btrace_data_fini (&btrace);
7356 return 0;
7357
7358 err:
7359 btrace_data_fini (&btrace);
7360 return -1;
7361 }
7362
7363 /* See to_btrace_conf target method. */
7364
7365 static int
7366 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7367 struct buffer *buffer)
7368 {
7369 const struct btrace_config *conf;
7370
7371 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7372 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7373
7374 conf = linux_btrace_conf (tinfo);
7375 if (conf != NULL)
7376 {
7377 switch (conf->format)
7378 {
7379 case BTRACE_FORMAT_NONE:
7380 break;
7381
7382 case BTRACE_FORMAT_BTS:
7383 buffer_xml_printf (buffer, "<bts");
7384 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7385 buffer_xml_printf (buffer, " />\n");
7386 break;
7387
7388 case BTRACE_FORMAT_PT:
7389 buffer_xml_printf (buffer, "<pt");
7390 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7391 buffer_xml_printf (buffer, "/>\n");
7392 break;
7393 }
7394 }
7395
7396 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7397 return 0;
7398 }
7399 #endif /* HAVE_LINUX_BTRACE */
7400
7401 /* See nat/linux-nat.h. */
7402
7403 ptid_t
7404 current_lwp_ptid (void)
7405 {
7406 return ptid_of (current_thread);
7407 }
7408
7409 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7410
7411 static int
7412 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7413 {
7414 if (the_low_target.breakpoint_kind_from_pc != NULL)
7415 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7416 else
7417 return default_breakpoint_kind_from_pc (pcptr);
7418 }
7419
7420 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7421
7422 static const gdb_byte *
7423 linux_sw_breakpoint_from_kind (int kind, int *size)
7424 {
7425 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7426
7427 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7428 }
7429
7430 /* Implementation of the target_ops method
7431 "breakpoint_kind_from_current_state". */
7432
7433 static int
7434 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7435 {
7436 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7437 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7438 else
7439 return linux_breakpoint_kind_from_pc (pcptr);
7440 }
7441
7442 /* Default implementation of linux_target_ops method "set_pc" for
7443 32-bit pc register which is literally named "pc". */
7444
7445 void
7446 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7447 {
7448 uint32_t newpc = pc;
7449
7450 supply_register_by_name (regcache, "pc", &newpc);
7451 }
7452
7453 /* Default implementation of linux_target_ops method "get_pc" for
7454 32-bit pc register which is literally named "pc". */
7455
7456 CORE_ADDR
7457 linux_get_pc_32bit (struct regcache *regcache)
7458 {
7459 uint32_t pc;
7460
7461 collect_register_by_name (regcache, "pc", &pc);
7462 if (debug_threads)
7463 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7464 return pc;
7465 }
7466
7467 /* Default implementation of linux_target_ops method "set_pc" for
7468 64-bit pc register which is literally named "pc". */
7469
7470 void
7471 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7472 {
7473 uint64_t newpc = pc;
7474
7475 supply_register_by_name (regcache, "pc", &newpc);
7476 }
7477
7478 /* Default implementation of linux_target_ops method "get_pc" for
7479 64-bit pc register which is literally named "pc". */
7480
7481 CORE_ADDR
7482 linux_get_pc_64bit (struct regcache *regcache)
7483 {
7484 uint64_t pc;
7485
7486 collect_register_by_name (regcache, "pc", &pc);
7487 if (debug_threads)
7488 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7489 return pc;
7490 }
7491
7492
7493 static struct target_ops linux_target_ops = {
7494 linux_create_inferior,
7495 linux_post_create_inferior,
7496 linux_attach,
7497 linux_kill,
7498 linux_detach,
7499 linux_mourn,
7500 linux_join,
7501 linux_thread_alive,
7502 linux_resume,
7503 linux_wait,
7504 linux_fetch_registers,
7505 linux_store_registers,
7506 linux_prepare_to_access_memory,
7507 linux_done_accessing_memory,
7508 linux_read_memory,
7509 linux_write_memory,
7510 linux_look_up_symbols,
7511 linux_request_interrupt,
7512 linux_read_auxv,
7513 linux_supports_z_point_type,
7514 linux_insert_point,
7515 linux_remove_point,
7516 linux_stopped_by_sw_breakpoint,
7517 linux_supports_stopped_by_sw_breakpoint,
7518 linux_stopped_by_hw_breakpoint,
7519 linux_supports_stopped_by_hw_breakpoint,
7520 linux_supports_hardware_single_step,
7521 linux_stopped_by_watchpoint,
7522 linux_stopped_data_address,
7523 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7524 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7525 && defined(PT_TEXT_END_ADDR)
7526 linux_read_offsets,
7527 #else
7528 NULL,
7529 #endif
7530 #ifdef USE_THREAD_DB
7531 thread_db_get_tls_address,
7532 #else
7533 NULL,
7534 #endif
7535 linux_qxfer_spu,
7536 hostio_last_error_from_errno,
7537 linux_qxfer_osdata,
7538 linux_xfer_siginfo,
7539 linux_supports_non_stop,
7540 linux_async,
7541 linux_start_non_stop,
7542 linux_supports_multi_process,
7543 linux_supports_fork_events,
7544 linux_supports_vfork_events,
7545 linux_supports_exec_events,
7546 linux_handle_new_gdb_connection,
7547 #ifdef USE_THREAD_DB
7548 thread_db_handle_monitor_command,
7549 #else
7550 NULL,
7551 #endif
7552 linux_common_core_of_thread,
7553 linux_read_loadmap,
7554 linux_process_qsupported,
7555 linux_supports_tracepoints,
7556 linux_read_pc,
7557 linux_write_pc,
7558 linux_thread_stopped,
7559 NULL,
7560 linux_pause_all,
7561 linux_unpause_all,
7562 linux_stabilize_threads,
7563 linux_install_fast_tracepoint_jump_pad,
7564 linux_emit_ops,
7565 linux_supports_disable_randomization,
7566 linux_get_min_fast_tracepoint_insn_len,
7567 linux_qxfer_libraries_svr4,
7568 linux_supports_agent,
7569 #ifdef HAVE_LINUX_BTRACE
7570 linux_supports_btrace,
7571 linux_enable_btrace,
7572 linux_low_disable_btrace,
7573 linux_low_read_btrace,
7574 linux_low_btrace_conf,
7575 #else
7576 NULL,
7577 NULL,
7578 NULL,
7579 NULL,
7580 NULL,
7581 #endif
7582 linux_supports_range_stepping,
7583 linux_proc_pid_to_exec_file,
7584 linux_mntns_open_cloexec,
7585 linux_mntns_unlink,
7586 linux_mntns_readlink,
7587 linux_breakpoint_kind_from_pc,
7588 linux_sw_breakpoint_from_kind,
7589 linux_proc_tid_get_name,
7590 linux_breakpoint_kind_from_current_state,
7591 linux_supports_software_single_step,
7592 linux_supports_catch_syscall,
7593 linux_get_ipa_tdesc_idx,
7594 #if USE_THREAD_DB
7595 thread_db_thread_handle,
7596 #else
7597 NULL,
7598 #endif
7599 };
7600
7601 #ifdef HAVE_LINUX_REGSETS
7602 void
7603 initialize_regsets_info (struct regsets_info *info)
7604 {
7605 for (info->num_regsets = 0;
7606 info->regsets[info->num_regsets].size >= 0;
7607 info->num_regsets++)
7608 ;
7609 }
7610 #endif
7611
7612 void
7613 initialize_low (void)
7614 {
7615 struct sigaction sigchld_action;
7616
7617 memset (&sigchld_action, 0, sizeof (sigchld_action));
7618 set_target_ops (&linux_target_ops);
7619
7620 linux_ptrace_init_warnings ();
7621
7622 sigchld_action.sa_handler = sigchld_handler;
7623 sigemptyset (&sigchld_action.sa_mask);
7624 sigchld_action.sa_flags = SA_RESTART;
7625 sigaction (SIGCHLD, &sigchld_action, NULL);
7626
7627 initialize_low_arch ();
7628
7629 linux_check_ptrace_features ();
7630 }
This page took 0.214139 seconds and 5 git commands to generate.