Rename gdb exception types
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2019 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "common/agent.h"
23 #include "tdesc.h"
24 #include "common/rsp-low.h"
25 #include "common/signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "common/gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "common/filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #include "common/common-inferior.h"
51 #include "nat/fork-inferior.h"
52 #include "common/environ.h"
53 #include "common/scoped_restore.h"
54 #ifndef ELFMAG0
55 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
56 then ELFMAG0 will have been defined. If it didn't get included by
57 gdb_proc_service.h then including it will likely introduce a duplicate
58 definition of elf_fpregset_t. */
59 #include <elf.h>
60 #endif
61 #include "nat/linux-namespaces.h"
62
63 #ifndef SPUFS_MAGIC
64 #define SPUFS_MAGIC 0x23c9b64e
65 #endif
66
67 #ifdef HAVE_PERSONALITY
68 # include <sys/personality.h>
69 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
70 # define ADDR_NO_RANDOMIZE 0x0040000
71 # endif
72 #endif
73
74 #ifndef O_LARGEFILE
75 #define O_LARGEFILE 0
76 #endif
77
78 #ifndef AT_HWCAP2
79 #define AT_HWCAP2 26
80 #endif
81
82 /* Some targets did not define these ptrace constants from the start,
83 so gdbserver defines them locally here. In the future, these may
84 be removed after they are added to asm/ptrace.h. */
85 #if !(defined(PT_TEXT_ADDR) \
86 || defined(PT_DATA_ADDR) \
87 || defined(PT_TEXT_END_ADDR))
88 #if defined(__mcoldfire__)
89 /* These are still undefined in 3.10 kernels. */
90 #define PT_TEXT_ADDR 49*4
91 #define PT_DATA_ADDR 50*4
92 #define PT_TEXT_END_ADDR 51*4
93 /* BFIN already defines these since at least 2.6.32 kernels. */
94 #elif defined(BFIN)
95 #define PT_TEXT_ADDR 220
96 #define PT_TEXT_END_ADDR 224
97 #define PT_DATA_ADDR 228
98 /* These are still undefined in 3.10 kernels. */
99 #elif defined(__TMS320C6X__)
100 #define PT_TEXT_ADDR (0x10000*4)
101 #define PT_DATA_ADDR (0x10004*4)
102 #define PT_TEXT_END_ADDR (0x10008*4)
103 #endif
104 #endif
105
106 #ifdef HAVE_LINUX_BTRACE
107 # include "nat/linux-btrace.h"
108 # include "common/btrace-common.h"
109 #endif
110
111 #ifndef HAVE_ELF32_AUXV_T
112 /* Copied from glibc's elf.h. */
113 typedef struct
114 {
115 uint32_t a_type; /* Entry type */
116 union
117 {
118 uint32_t a_val; /* Integer value */
119 /* We use to have pointer elements added here. We cannot do that,
120 though, since it does not work when using 32-bit definitions
121 on 64-bit platforms and vice versa. */
122 } a_un;
123 } Elf32_auxv_t;
124 #endif
125
126 #ifndef HAVE_ELF64_AUXV_T
127 /* Copied from glibc's elf.h. */
128 typedef struct
129 {
130 uint64_t a_type; /* Entry type */
131 union
132 {
133 uint64_t a_val; /* Integer value */
134 /* We use to have pointer elements added here. We cannot do that,
135 though, since it does not work when using 32-bit definitions
136 on 64-bit platforms and vice versa. */
137 } a_un;
138 } Elf64_auxv_t;
139 #endif
140
141 /* Does the current host support PTRACE_GETREGSET? */
142 int have_ptrace_getregset = -1;
143
144 /* LWP accessors. */
145
146 /* See nat/linux-nat.h. */
147
148 ptid_t
149 ptid_of_lwp (struct lwp_info *lwp)
150 {
151 return ptid_of (get_lwp_thread (lwp));
152 }
153
154 /* See nat/linux-nat.h. */
155
156 void
157 lwp_set_arch_private_info (struct lwp_info *lwp,
158 struct arch_lwp_info *info)
159 {
160 lwp->arch_private = info;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 struct arch_lwp_info *
166 lwp_arch_private_info (struct lwp_info *lwp)
167 {
168 return lwp->arch_private;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 int
174 lwp_is_stopped (struct lwp_info *lwp)
175 {
176 return lwp->stopped;
177 }
178
179 /* See nat/linux-nat.h. */
180
181 enum target_stop_reason
182 lwp_stop_reason (struct lwp_info *lwp)
183 {
184 return lwp->stop_reason;
185 }
186
187 /* See nat/linux-nat.h. */
188
189 int
190 lwp_is_stepping (struct lwp_info *lwp)
191 {
192 return lwp->stepping;
193 }
194
195 /* A list of all unknown processes which receive stop signals. Some
196 other process will presumably claim each of these as forked
197 children momentarily. */
198
199 struct simple_pid_list
200 {
201 /* The process ID. */
202 int pid;
203
204 /* The status as reported by waitpid. */
205 int status;
206
207 /* Next in chain. */
208 struct simple_pid_list *next;
209 };
210 struct simple_pid_list *stopped_pids;
211
212 /* Trivial list manipulation functions to keep track of a list of new
213 stopped processes. */
214
215 static void
216 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
217 {
218 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
219
220 new_pid->pid = pid;
221 new_pid->status = status;
222 new_pid->next = *listp;
223 *listp = new_pid;
224 }
225
226 static int
227 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
228 {
229 struct simple_pid_list **p;
230
231 for (p = listp; *p != NULL; p = &(*p)->next)
232 if ((*p)->pid == pid)
233 {
234 struct simple_pid_list *next = (*p)->next;
235
236 *statusp = (*p)->status;
237 xfree (*p);
238 *p = next;
239 return 1;
240 }
241 return 0;
242 }
243
244 enum stopping_threads_kind
245 {
246 /* Not stopping threads presently. */
247 NOT_STOPPING_THREADS,
248
249 /* Stopping threads. */
250 STOPPING_THREADS,
251
252 /* Stopping and suspending threads. */
253 STOPPING_AND_SUSPENDING_THREADS
254 };
255
256 /* This is set while stop_all_lwps is in effect. */
257 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
258
259 /* FIXME make into a target method? */
260 int using_threads = 1;
261
262 /* True if we're presently stabilizing threads (moving them out of
263 jump pads). */
264 static int stabilizing_threads;
265
266 static void linux_resume_one_lwp (struct lwp_info *lwp,
267 int step, int signal, siginfo_t *info);
268 static void linux_resume (struct thread_resume *resume_info, size_t n);
269 static void stop_all_lwps (int suspend, struct lwp_info *except);
270 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
271 static void unsuspend_all_lwps (struct lwp_info *except);
272 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
273 int *wstat, int options);
274 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
275 static struct lwp_info *add_lwp (ptid_t ptid);
276 static void linux_mourn (struct process_info *process);
277 static int linux_stopped_by_watchpoint (void);
278 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
279 static int lwp_is_marked_dead (struct lwp_info *lwp);
280 static void proceed_all_lwps (void);
281 static int finish_step_over (struct lwp_info *lwp);
282 static int kill_lwp (unsigned long lwpid, int signo);
283 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
284 static void complete_ongoing_step_over (void);
285 static int linux_low_ptrace_options (int attached);
286 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
287 static void proceed_one_lwp (thread_info *thread, lwp_info *except);
288
289 /* When the event-loop is doing a step-over, this points at the thread
290 being stepped. */
291 ptid_t step_over_bkpt;
292
293 /* True if the low target can hardware single-step. */
294
295 static int
296 can_hardware_single_step (void)
297 {
298 if (the_low_target.supports_hardware_single_step != NULL)
299 return the_low_target.supports_hardware_single_step ();
300 else
301 return 0;
302 }
303
304 /* True if the low target can software single-step. Such targets
305 implement the GET_NEXT_PCS callback. */
306
307 static int
308 can_software_single_step (void)
309 {
310 return (the_low_target.get_next_pcs != NULL);
311 }
312
313 /* True if the low target supports memory breakpoints. If so, we'll
314 have a GET_PC implementation. */
315
316 static int
317 supports_breakpoints (void)
318 {
319 return (the_low_target.get_pc != NULL);
320 }
321
322 /* Returns true if this target can support fast tracepoints. This
323 does not mean that the in-process agent has been loaded in the
324 inferior. */
325
326 static int
327 supports_fast_tracepoints (void)
328 {
329 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
330 }
331
332 /* True if LWP is stopped in its stepping range. */
333
334 static int
335 lwp_in_step_range (struct lwp_info *lwp)
336 {
337 CORE_ADDR pc = lwp->stop_pc;
338
339 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
340 }
341
342 struct pending_signals
343 {
344 int signal;
345 siginfo_t info;
346 struct pending_signals *prev;
347 };
348
349 /* The read/write ends of the pipe registered as waitable file in the
350 event loop. */
351 static int linux_event_pipe[2] = { -1, -1 };
352
353 /* True if we're currently in async mode. */
354 #define target_is_async_p() (linux_event_pipe[0] != -1)
355
356 static void send_sigstop (struct lwp_info *lwp);
357 static void wait_for_sigstop (void);
358
359 /* Return non-zero if HEADER is a 64-bit ELF file. */
360
361 static int
362 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
363 {
364 if (header->e_ident[EI_MAG0] == ELFMAG0
365 && header->e_ident[EI_MAG1] == ELFMAG1
366 && header->e_ident[EI_MAG2] == ELFMAG2
367 && header->e_ident[EI_MAG3] == ELFMAG3)
368 {
369 *machine = header->e_machine;
370 return header->e_ident[EI_CLASS] == ELFCLASS64;
371
372 }
373 *machine = EM_NONE;
374 return -1;
375 }
376
377 /* Return non-zero if FILE is a 64-bit ELF file,
378 zero if the file is not a 64-bit ELF file,
379 and -1 if the file is not accessible or doesn't exist. */
380
381 static int
382 elf_64_file_p (const char *file, unsigned int *machine)
383 {
384 Elf64_Ehdr header;
385 int fd;
386
387 fd = open (file, O_RDONLY);
388 if (fd < 0)
389 return -1;
390
391 if (read (fd, &header, sizeof (header)) != sizeof (header))
392 {
393 close (fd);
394 return 0;
395 }
396 close (fd);
397
398 return elf_64_header_p (&header, machine);
399 }
400
401 /* Accepts an integer PID; Returns true if the executable PID is
402 running is a 64-bit ELF file.. */
403
404 int
405 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
406 {
407 char file[PATH_MAX];
408
409 sprintf (file, "/proc/%d/exe", pid);
410 return elf_64_file_p (file, machine);
411 }
412
413 static void
414 delete_lwp (struct lwp_info *lwp)
415 {
416 struct thread_info *thr = get_lwp_thread (lwp);
417
418 if (debug_threads)
419 debug_printf ("deleting %ld\n", lwpid_of (thr));
420
421 remove_thread (thr);
422
423 if (the_low_target.delete_thread != NULL)
424 the_low_target.delete_thread (lwp->arch_private);
425 else
426 gdb_assert (lwp->arch_private == NULL);
427
428 free (lwp);
429 }
430
431 /* Add a process to the common process list, and set its private
432 data. */
433
434 static struct process_info *
435 linux_add_process (int pid, int attached)
436 {
437 struct process_info *proc;
438
439 proc = add_process (pid, attached);
440 proc->priv = XCNEW (struct process_info_private);
441
442 if (the_low_target.new_process != NULL)
443 proc->priv->arch_private = the_low_target.new_process ();
444
445 return proc;
446 }
447
448 static CORE_ADDR get_pc (struct lwp_info *lwp);
449
450 /* Call the target arch_setup function on the current thread. */
451
452 static void
453 linux_arch_setup (void)
454 {
455 the_low_target.arch_setup ();
456 }
457
458 /* Call the target arch_setup function on THREAD. */
459
460 static void
461 linux_arch_setup_thread (struct thread_info *thread)
462 {
463 struct thread_info *saved_thread;
464
465 saved_thread = current_thread;
466 current_thread = thread;
467
468 linux_arch_setup ();
469
470 current_thread = saved_thread;
471 }
472
473 /* Handle a GNU/Linux extended wait response. If we see a clone,
474 fork, or vfork event, we need to add the new LWP to our list
475 (and return 0 so as not to report the trap to higher layers).
476 If we see an exec event, we will modify ORIG_EVENT_LWP to point
477 to a new LWP representing the new program. */
478
479 static int
480 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
481 {
482 client_state &cs = get_client_state ();
483 struct lwp_info *event_lwp = *orig_event_lwp;
484 int event = linux_ptrace_get_extended_event (wstat);
485 struct thread_info *event_thr = get_lwp_thread (event_lwp);
486 struct lwp_info *new_lwp;
487
488 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
489
490 /* All extended events we currently use are mid-syscall. Only
491 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
492 you have to be using PTRACE_SEIZE to get that. */
493 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
494
495 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
496 || (event == PTRACE_EVENT_CLONE))
497 {
498 ptid_t ptid;
499 unsigned long new_pid;
500 int ret, status;
501
502 /* Get the pid of the new lwp. */
503 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
504 &new_pid);
505
506 /* If we haven't already seen the new PID stop, wait for it now. */
507 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
508 {
509 /* The new child has a pending SIGSTOP. We can't affect it until it
510 hits the SIGSTOP, but we're already attached. */
511
512 ret = my_waitpid (new_pid, &status, __WALL);
513
514 if (ret == -1)
515 perror_with_name ("waiting for new child");
516 else if (ret != new_pid)
517 warning ("wait returned unexpected PID %d", ret);
518 else if (!WIFSTOPPED (status))
519 warning ("wait returned unexpected status 0x%x", status);
520 }
521
522 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
523 {
524 struct process_info *parent_proc;
525 struct process_info *child_proc;
526 struct lwp_info *child_lwp;
527 struct thread_info *child_thr;
528 struct target_desc *tdesc;
529
530 ptid = ptid_t (new_pid, new_pid, 0);
531
532 if (debug_threads)
533 {
534 debug_printf ("HEW: Got fork event from LWP %ld, "
535 "new child is %d\n",
536 ptid_of (event_thr).lwp (),
537 ptid.pid ());
538 }
539
540 /* Add the new process to the tables and clone the breakpoint
541 lists of the parent. We need to do this even if the new process
542 will be detached, since we will need the process object and the
543 breakpoints to remove any breakpoints from memory when we
544 detach, and the client side will access registers. */
545 child_proc = linux_add_process (new_pid, 0);
546 gdb_assert (child_proc != NULL);
547 child_lwp = add_lwp (ptid);
548 gdb_assert (child_lwp != NULL);
549 child_lwp->stopped = 1;
550 child_lwp->must_set_ptrace_flags = 1;
551 child_lwp->status_pending_p = 0;
552 child_thr = get_lwp_thread (child_lwp);
553 child_thr->last_resume_kind = resume_stop;
554 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
555
556 /* If we're suspending all threads, leave this one suspended
557 too. If the fork/clone parent is stepping over a breakpoint,
558 all other threads have been suspended already. Leave the
559 child suspended too. */
560 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
561 || event_lwp->bp_reinsert != 0)
562 {
563 if (debug_threads)
564 debug_printf ("HEW: leaving child suspended\n");
565 child_lwp->suspended = 1;
566 }
567
568 parent_proc = get_thread_process (event_thr);
569 child_proc->attached = parent_proc->attached;
570
571 if (event_lwp->bp_reinsert != 0
572 && can_software_single_step ()
573 && event == PTRACE_EVENT_VFORK)
574 {
575 /* If we leave single-step breakpoints there, child will
576 hit it, so uninsert single-step breakpoints from parent
577 (and child). Once vfork child is done, reinsert
578 them back to parent. */
579 uninsert_single_step_breakpoints (event_thr);
580 }
581
582 clone_all_breakpoints (child_thr, event_thr);
583
584 tdesc = allocate_target_description ();
585 copy_target_description (tdesc, parent_proc->tdesc);
586 child_proc->tdesc = tdesc;
587
588 /* Clone arch-specific process data. */
589 if (the_low_target.new_fork != NULL)
590 the_low_target.new_fork (parent_proc, child_proc);
591
592 /* Save fork info in the parent thread. */
593 if (event == PTRACE_EVENT_FORK)
594 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
595 else if (event == PTRACE_EVENT_VFORK)
596 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
597
598 event_lwp->waitstatus.value.related_pid = ptid;
599
600 /* The status_pending field contains bits denoting the
601 extended event, so when the pending event is handled,
602 the handler will look at lwp->waitstatus. */
603 event_lwp->status_pending_p = 1;
604 event_lwp->status_pending = wstat;
605
606 /* Link the threads until the parent event is passed on to
607 higher layers. */
608 event_lwp->fork_relative = child_lwp;
609 child_lwp->fork_relative = event_lwp;
610
611 /* If the parent thread is doing step-over with single-step
612 breakpoints, the list of single-step breakpoints are cloned
613 from the parent's. Remove them from the child process.
614 In case of vfork, we'll reinsert them back once vforked
615 child is done. */
616 if (event_lwp->bp_reinsert != 0
617 && can_software_single_step ())
618 {
619 /* The child process is forked and stopped, so it is safe
620 to access its memory without stopping all other threads
621 from other processes. */
622 delete_single_step_breakpoints (child_thr);
623
624 gdb_assert (has_single_step_breakpoints (event_thr));
625 gdb_assert (!has_single_step_breakpoints (child_thr));
626 }
627
628 /* Report the event. */
629 return 0;
630 }
631
632 if (debug_threads)
633 debug_printf ("HEW: Got clone event "
634 "from LWP %ld, new child is LWP %ld\n",
635 lwpid_of (event_thr), new_pid);
636
637 ptid = ptid_t (pid_of (event_thr), new_pid, 0);
638 new_lwp = add_lwp (ptid);
639
640 /* Either we're going to immediately resume the new thread
641 or leave it stopped. linux_resume_one_lwp is a nop if it
642 thinks the thread is currently running, so set this first
643 before calling linux_resume_one_lwp. */
644 new_lwp->stopped = 1;
645
646 /* If we're suspending all threads, leave this one suspended
647 too. If the fork/clone parent is stepping over a breakpoint,
648 all other threads have been suspended already. Leave the
649 child suspended too. */
650 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
651 || event_lwp->bp_reinsert != 0)
652 new_lwp->suspended = 1;
653
654 /* Normally we will get the pending SIGSTOP. But in some cases
655 we might get another signal delivered to the group first.
656 If we do get another signal, be sure not to lose it. */
657 if (WSTOPSIG (status) != SIGSTOP)
658 {
659 new_lwp->stop_expected = 1;
660 new_lwp->status_pending_p = 1;
661 new_lwp->status_pending = status;
662 }
663 else if (cs.report_thread_events)
664 {
665 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
666 new_lwp->status_pending_p = 1;
667 new_lwp->status_pending = status;
668 }
669
670 #ifdef USE_THREAD_DB
671 thread_db_notice_clone (event_thr, ptid);
672 #endif
673
674 /* Don't report the event. */
675 return 1;
676 }
677 else if (event == PTRACE_EVENT_VFORK_DONE)
678 {
679 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
680
681 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
682 {
683 reinsert_single_step_breakpoints (event_thr);
684
685 gdb_assert (has_single_step_breakpoints (event_thr));
686 }
687
688 /* Report the event. */
689 return 0;
690 }
691 else if (event == PTRACE_EVENT_EXEC && cs.report_exec_events)
692 {
693 struct process_info *proc;
694 std::vector<int> syscalls_to_catch;
695 ptid_t event_ptid;
696 pid_t event_pid;
697
698 if (debug_threads)
699 {
700 debug_printf ("HEW: Got exec event from LWP %ld\n",
701 lwpid_of (event_thr));
702 }
703
704 /* Get the event ptid. */
705 event_ptid = ptid_of (event_thr);
706 event_pid = event_ptid.pid ();
707
708 /* Save the syscall list from the execing process. */
709 proc = get_thread_process (event_thr);
710 syscalls_to_catch = std::move (proc->syscalls_to_catch);
711
712 /* Delete the execing process and all its threads. */
713 linux_mourn (proc);
714 current_thread = NULL;
715
716 /* Create a new process/lwp/thread. */
717 proc = linux_add_process (event_pid, 0);
718 event_lwp = add_lwp (event_ptid);
719 event_thr = get_lwp_thread (event_lwp);
720 gdb_assert (current_thread == event_thr);
721 linux_arch_setup_thread (event_thr);
722
723 /* Set the event status. */
724 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
725 event_lwp->waitstatus.value.execd_pathname
726 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
727
728 /* Mark the exec status as pending. */
729 event_lwp->stopped = 1;
730 event_lwp->status_pending_p = 1;
731 event_lwp->status_pending = wstat;
732 event_thr->last_resume_kind = resume_continue;
733 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
734
735 /* Update syscall state in the new lwp, effectively mid-syscall too. */
736 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
737
738 /* Restore the list to catch. Don't rely on the client, which is free
739 to avoid sending a new list when the architecture doesn't change.
740 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
741 proc->syscalls_to_catch = std::move (syscalls_to_catch);
742
743 /* Report the event. */
744 *orig_event_lwp = event_lwp;
745 return 0;
746 }
747
748 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
749 }
750
751 /* Return the PC as read from the regcache of LWP, without any
752 adjustment. */
753
754 static CORE_ADDR
755 get_pc (struct lwp_info *lwp)
756 {
757 struct thread_info *saved_thread;
758 struct regcache *regcache;
759 CORE_ADDR pc;
760
761 if (the_low_target.get_pc == NULL)
762 return 0;
763
764 saved_thread = current_thread;
765 current_thread = get_lwp_thread (lwp);
766
767 regcache = get_thread_regcache (current_thread, 1);
768 pc = (*the_low_target.get_pc) (regcache);
769
770 if (debug_threads)
771 debug_printf ("pc is 0x%lx\n", (long) pc);
772
773 current_thread = saved_thread;
774 return pc;
775 }
776
777 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
778 Fill *SYSNO with the syscall nr trapped. */
779
780 static void
781 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
782 {
783 struct thread_info *saved_thread;
784 struct regcache *regcache;
785
786 if (the_low_target.get_syscall_trapinfo == NULL)
787 {
788 /* If we cannot get the syscall trapinfo, report an unknown
789 system call number. */
790 *sysno = UNKNOWN_SYSCALL;
791 return;
792 }
793
794 saved_thread = current_thread;
795 current_thread = get_lwp_thread (lwp);
796
797 regcache = get_thread_regcache (current_thread, 1);
798 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
799
800 if (debug_threads)
801 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
802
803 current_thread = saved_thread;
804 }
805
806 static int check_stopped_by_watchpoint (struct lwp_info *child);
807
808 /* Called when the LWP stopped for a signal/trap. If it stopped for a
809 trap check what caused it (breakpoint, watchpoint, trace, etc.),
810 and save the result in the LWP's stop_reason field. If it stopped
811 for a breakpoint, decrement the PC if necessary on the lwp's
812 architecture. Returns true if we now have the LWP's stop PC. */
813
814 static int
815 save_stop_reason (struct lwp_info *lwp)
816 {
817 CORE_ADDR pc;
818 CORE_ADDR sw_breakpoint_pc;
819 struct thread_info *saved_thread;
820 #if USE_SIGTRAP_SIGINFO
821 siginfo_t siginfo;
822 #endif
823
824 if (the_low_target.get_pc == NULL)
825 return 0;
826
827 pc = get_pc (lwp);
828 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
829
830 /* breakpoint_at reads from the current thread. */
831 saved_thread = current_thread;
832 current_thread = get_lwp_thread (lwp);
833
834 #if USE_SIGTRAP_SIGINFO
835 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
836 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
837 {
838 if (siginfo.si_signo == SIGTRAP)
839 {
840 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
841 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
842 {
843 /* The si_code is ambiguous on this arch -- check debug
844 registers. */
845 if (!check_stopped_by_watchpoint (lwp))
846 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
847 }
848 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
849 {
850 /* If we determine the LWP stopped for a SW breakpoint,
851 trust it. Particularly don't check watchpoint
852 registers, because at least on s390, we'd find
853 stopped-by-watchpoint as long as there's a watchpoint
854 set. */
855 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
856 }
857 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
858 {
859 /* This can indicate either a hardware breakpoint or
860 hardware watchpoint. Check debug registers. */
861 if (!check_stopped_by_watchpoint (lwp))
862 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
863 }
864 else if (siginfo.si_code == TRAP_TRACE)
865 {
866 /* We may have single stepped an instruction that
867 triggered a watchpoint. In that case, on some
868 architectures (such as x86), instead of TRAP_HWBKPT,
869 si_code indicates TRAP_TRACE, and we need to check
870 the debug registers separately. */
871 if (!check_stopped_by_watchpoint (lwp))
872 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
873 }
874 }
875 }
876 #else
877 /* We may have just stepped a breakpoint instruction. E.g., in
878 non-stop mode, GDB first tells the thread A to step a range, and
879 then the user inserts a breakpoint inside the range. In that
880 case we need to report the breakpoint PC. */
881 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
882 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
883 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
884
885 if (hardware_breakpoint_inserted_here (pc))
886 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
887
888 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
889 check_stopped_by_watchpoint (lwp);
890 #endif
891
892 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
893 {
894 if (debug_threads)
895 {
896 struct thread_info *thr = get_lwp_thread (lwp);
897
898 debug_printf ("CSBB: %s stopped by software breakpoint\n",
899 target_pid_to_str (ptid_of (thr)));
900 }
901
902 /* Back up the PC if necessary. */
903 if (pc != sw_breakpoint_pc)
904 {
905 struct regcache *regcache
906 = get_thread_regcache (current_thread, 1);
907 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
908 }
909
910 /* Update this so we record the correct stop PC below. */
911 pc = sw_breakpoint_pc;
912 }
913 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
914 {
915 if (debug_threads)
916 {
917 struct thread_info *thr = get_lwp_thread (lwp);
918
919 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
920 target_pid_to_str (ptid_of (thr)));
921 }
922 }
923 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
924 {
925 if (debug_threads)
926 {
927 struct thread_info *thr = get_lwp_thread (lwp);
928
929 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
930 target_pid_to_str (ptid_of (thr)));
931 }
932 }
933 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
934 {
935 if (debug_threads)
936 {
937 struct thread_info *thr = get_lwp_thread (lwp);
938
939 debug_printf ("CSBB: %s stopped by trace\n",
940 target_pid_to_str (ptid_of (thr)));
941 }
942 }
943
944 lwp->stop_pc = pc;
945 current_thread = saved_thread;
946 return 1;
947 }
948
949 static struct lwp_info *
950 add_lwp (ptid_t ptid)
951 {
952 struct lwp_info *lwp;
953
954 lwp = XCNEW (struct lwp_info);
955
956 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
957
958 lwp->thread = add_thread (ptid, lwp);
959
960 if (the_low_target.new_thread != NULL)
961 the_low_target.new_thread (lwp);
962
963 return lwp;
964 }
965
966 /* Callback to be used when calling fork_inferior, responsible for
967 actually initiating the tracing of the inferior. */
968
969 static void
970 linux_ptrace_fun ()
971 {
972 if (ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0,
973 (PTRACE_TYPE_ARG4) 0) < 0)
974 trace_start_error_with_name ("ptrace");
975
976 if (setpgid (0, 0) < 0)
977 trace_start_error_with_name ("setpgid");
978
979 /* If GDBserver is connected to gdb via stdio, redirect the inferior's
980 stdout to stderr so that inferior i/o doesn't corrupt the connection.
981 Also, redirect stdin to /dev/null. */
982 if (remote_connection_is_stdio ())
983 {
984 if (close (0) < 0)
985 trace_start_error_with_name ("close");
986 if (open ("/dev/null", O_RDONLY) < 0)
987 trace_start_error_with_name ("open");
988 if (dup2 (2, 1) < 0)
989 trace_start_error_with_name ("dup2");
990 if (write (2, "stdin/stdout redirected\n",
991 sizeof ("stdin/stdout redirected\n") - 1) < 0)
992 {
993 /* Errors ignored. */;
994 }
995 }
996 }
997
998 /* Start an inferior process and returns its pid.
999 PROGRAM is the name of the program to be started, and PROGRAM_ARGS
1000 are its arguments. */
1001
1002 static int
1003 linux_create_inferior (const char *program,
1004 const std::vector<char *> &program_args)
1005 {
1006 client_state &cs = get_client_state ();
1007 struct lwp_info *new_lwp;
1008 int pid;
1009 ptid_t ptid;
1010
1011 {
1012 maybe_disable_address_space_randomization restore_personality
1013 (cs.disable_randomization);
1014 std::string str_program_args = stringify_argv (program_args);
1015
1016 pid = fork_inferior (program,
1017 str_program_args.c_str (),
1018 get_environ ()->envp (), linux_ptrace_fun,
1019 NULL, NULL, NULL, NULL);
1020 }
1021
1022 linux_add_process (pid, 0);
1023
1024 ptid = ptid_t (pid, pid, 0);
1025 new_lwp = add_lwp (ptid);
1026 new_lwp->must_set_ptrace_flags = 1;
1027
1028 post_fork_inferior (pid, program);
1029
1030 return pid;
1031 }
1032
1033 /* Implement the post_create_inferior target_ops method. */
1034
1035 static void
1036 linux_post_create_inferior (void)
1037 {
1038 struct lwp_info *lwp = get_thread_lwp (current_thread);
1039
1040 linux_arch_setup ();
1041
1042 if (lwp->must_set_ptrace_flags)
1043 {
1044 struct process_info *proc = current_process ();
1045 int options = linux_low_ptrace_options (proc->attached);
1046
1047 linux_enable_event_reporting (lwpid_of (current_thread), options);
1048 lwp->must_set_ptrace_flags = 0;
1049 }
1050 }
1051
1052 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1053 error. */
1054
1055 int
1056 linux_attach_lwp (ptid_t ptid)
1057 {
1058 struct lwp_info *new_lwp;
1059 int lwpid = ptid.lwp ();
1060
1061 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1062 != 0)
1063 return errno;
1064
1065 new_lwp = add_lwp (ptid);
1066
1067 /* We need to wait for SIGSTOP before being able to make the next
1068 ptrace call on this LWP. */
1069 new_lwp->must_set_ptrace_flags = 1;
1070
1071 if (linux_proc_pid_is_stopped (lwpid))
1072 {
1073 if (debug_threads)
1074 debug_printf ("Attached to a stopped process\n");
1075
1076 /* The process is definitely stopped. It is in a job control
1077 stop, unless the kernel predates the TASK_STOPPED /
1078 TASK_TRACED distinction, in which case it might be in a
1079 ptrace stop. Make sure it is in a ptrace stop; from there we
1080 can kill it, signal it, et cetera.
1081
1082 First make sure there is a pending SIGSTOP. Since we are
1083 already attached, the process can not transition from stopped
1084 to running without a PTRACE_CONT; so we know this signal will
1085 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1086 probably already in the queue (unless this kernel is old
1087 enough to use TASK_STOPPED for ptrace stops); but since
1088 SIGSTOP is not an RT signal, it can only be queued once. */
1089 kill_lwp (lwpid, SIGSTOP);
1090
1091 /* Finally, resume the stopped process. This will deliver the
1092 SIGSTOP (or a higher priority signal, just like normal
1093 PTRACE_ATTACH), which we'll catch later on. */
1094 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1095 }
1096
1097 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1098 brings it to a halt.
1099
1100 There are several cases to consider here:
1101
1102 1) gdbserver has already attached to the process and is being notified
1103 of a new thread that is being created.
1104 In this case we should ignore that SIGSTOP and resume the
1105 process. This is handled below by setting stop_expected = 1,
1106 and the fact that add_thread sets last_resume_kind ==
1107 resume_continue.
1108
1109 2) This is the first thread (the process thread), and we're attaching
1110 to it via attach_inferior.
1111 In this case we want the process thread to stop.
1112 This is handled by having linux_attach set last_resume_kind ==
1113 resume_stop after we return.
1114
1115 If the pid we are attaching to is also the tgid, we attach to and
1116 stop all the existing threads. Otherwise, we attach to pid and
1117 ignore any other threads in the same group as this pid.
1118
1119 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1120 existing threads.
1121 In this case we want the thread to stop.
1122 FIXME: This case is currently not properly handled.
1123 We should wait for the SIGSTOP but don't. Things work apparently
1124 because enough time passes between when we ptrace (ATTACH) and when
1125 gdb makes the next ptrace call on the thread.
1126
1127 On the other hand, if we are currently trying to stop all threads, we
1128 should treat the new thread as if we had sent it a SIGSTOP. This works
1129 because we are guaranteed that the add_lwp call above added us to the
1130 end of the list, and so the new thread has not yet reached
1131 wait_for_sigstop (but will). */
1132 new_lwp->stop_expected = 1;
1133
1134 return 0;
1135 }
1136
1137 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1138 already attached. Returns true if a new LWP is found, false
1139 otherwise. */
1140
1141 static int
1142 attach_proc_task_lwp_callback (ptid_t ptid)
1143 {
1144 /* Is this a new thread? */
1145 if (find_thread_ptid (ptid) == NULL)
1146 {
1147 int lwpid = ptid.lwp ();
1148 int err;
1149
1150 if (debug_threads)
1151 debug_printf ("Found new lwp %d\n", lwpid);
1152
1153 err = linux_attach_lwp (ptid);
1154
1155 /* Be quiet if we simply raced with the thread exiting. EPERM
1156 is returned if the thread's task still exists, and is marked
1157 as exited or zombie, as well as other conditions, so in that
1158 case, confirm the status in /proc/PID/status. */
1159 if (err == ESRCH
1160 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1161 {
1162 if (debug_threads)
1163 {
1164 debug_printf ("Cannot attach to lwp %d: "
1165 "thread is gone (%d: %s)\n",
1166 lwpid, err, strerror (err));
1167 }
1168 }
1169 else if (err != 0)
1170 {
1171 std::string reason
1172 = linux_ptrace_attach_fail_reason_string (ptid, err);
1173
1174 warning (_("Cannot attach to lwp %d: %s"), lwpid, reason.c_str ());
1175 }
1176
1177 return 1;
1178 }
1179 return 0;
1180 }
1181
1182 static void async_file_mark (void);
1183
1184 /* Attach to PID. If PID is the tgid, attach to it and all
1185 of its threads. */
1186
1187 static int
1188 linux_attach (unsigned long pid)
1189 {
1190 struct process_info *proc;
1191 struct thread_info *initial_thread;
1192 ptid_t ptid = ptid_t (pid, pid, 0);
1193 int err;
1194
1195 proc = linux_add_process (pid, 1);
1196
1197 /* Attach to PID. We will check for other threads
1198 soon. */
1199 err = linux_attach_lwp (ptid);
1200 if (err != 0)
1201 {
1202 remove_process (proc);
1203
1204 std::string reason = linux_ptrace_attach_fail_reason_string (ptid, err);
1205 error ("Cannot attach to process %ld: %s", pid, reason.c_str ());
1206 }
1207
1208 /* Don't ignore the initial SIGSTOP if we just attached to this
1209 process. It will be collected by wait shortly. */
1210 initial_thread = find_thread_ptid (ptid_t (pid, pid, 0));
1211 initial_thread->last_resume_kind = resume_stop;
1212
1213 /* We must attach to every LWP. If /proc is mounted, use that to
1214 find them now. On the one hand, the inferior may be using raw
1215 clone instead of using pthreads. On the other hand, even if it
1216 is using pthreads, GDB may not be connected yet (thread_db needs
1217 to do symbol lookups, through qSymbol). Also, thread_db walks
1218 structures in the inferior's address space to find the list of
1219 threads/LWPs, and those structures may well be corrupted. Note
1220 that once thread_db is loaded, we'll still use it to list threads
1221 and associate pthread info with each LWP. */
1222 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1223
1224 /* GDB will shortly read the xml target description for this
1225 process, to figure out the process' architecture. But the target
1226 description is only filled in when the first process/thread in
1227 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1228 that now, otherwise, if GDB is fast enough, it could read the
1229 target description _before_ that initial stop. */
1230 if (non_stop)
1231 {
1232 struct lwp_info *lwp;
1233 int wstat, lwpid;
1234 ptid_t pid_ptid = ptid_t (pid);
1235
1236 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1237 &wstat, __WALL);
1238 gdb_assert (lwpid > 0);
1239
1240 lwp = find_lwp_pid (ptid_t (lwpid));
1241
1242 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1243 {
1244 lwp->status_pending_p = 1;
1245 lwp->status_pending = wstat;
1246 }
1247
1248 initial_thread->last_resume_kind = resume_continue;
1249
1250 async_file_mark ();
1251
1252 gdb_assert (proc->tdesc != NULL);
1253 }
1254
1255 return 0;
1256 }
1257
1258 static int
1259 last_thread_of_process_p (int pid)
1260 {
1261 bool seen_one = false;
1262
1263 thread_info *thread = find_thread (pid, [&] (thread_info *thr_arg)
1264 {
1265 if (!seen_one)
1266 {
1267 /* This is the first thread of this process we see. */
1268 seen_one = true;
1269 return false;
1270 }
1271 else
1272 {
1273 /* This is the second thread of this process we see. */
1274 return true;
1275 }
1276 });
1277
1278 return thread == NULL;
1279 }
1280
1281 /* Kill LWP. */
1282
1283 static void
1284 linux_kill_one_lwp (struct lwp_info *lwp)
1285 {
1286 struct thread_info *thr = get_lwp_thread (lwp);
1287 int pid = lwpid_of (thr);
1288
1289 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1290 there is no signal context, and ptrace(PTRACE_KILL) (or
1291 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1292 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1293 alternative is to kill with SIGKILL. We only need one SIGKILL
1294 per process, not one for each thread. But since we still support
1295 support debugging programs using raw clone without CLONE_THREAD,
1296 we send one for each thread. For years, we used PTRACE_KILL
1297 only, so we're being a bit paranoid about some old kernels where
1298 PTRACE_KILL might work better (dubious if there are any such, but
1299 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1300 second, and so we're fine everywhere. */
1301
1302 errno = 0;
1303 kill_lwp (pid, SIGKILL);
1304 if (debug_threads)
1305 {
1306 int save_errno = errno;
1307
1308 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1309 target_pid_to_str (ptid_of (thr)),
1310 save_errno ? strerror (save_errno) : "OK");
1311 }
1312
1313 errno = 0;
1314 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1315 if (debug_threads)
1316 {
1317 int save_errno = errno;
1318
1319 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1320 target_pid_to_str (ptid_of (thr)),
1321 save_errno ? strerror (save_errno) : "OK");
1322 }
1323 }
1324
1325 /* Kill LWP and wait for it to die. */
1326
1327 static void
1328 kill_wait_lwp (struct lwp_info *lwp)
1329 {
1330 struct thread_info *thr = get_lwp_thread (lwp);
1331 int pid = ptid_of (thr).pid ();
1332 int lwpid = ptid_of (thr).lwp ();
1333 int wstat;
1334 int res;
1335
1336 if (debug_threads)
1337 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1338
1339 do
1340 {
1341 linux_kill_one_lwp (lwp);
1342
1343 /* Make sure it died. Notes:
1344
1345 - The loop is most likely unnecessary.
1346
1347 - We don't use linux_wait_for_event as that could delete lwps
1348 while we're iterating over them. We're not interested in
1349 any pending status at this point, only in making sure all
1350 wait status on the kernel side are collected until the
1351 process is reaped.
1352
1353 - We don't use __WALL here as the __WALL emulation relies on
1354 SIGCHLD, and killing a stopped process doesn't generate
1355 one, nor an exit status.
1356 */
1357 res = my_waitpid (lwpid, &wstat, 0);
1358 if (res == -1 && errno == ECHILD)
1359 res = my_waitpid (lwpid, &wstat, __WCLONE);
1360 } while (res > 0 && WIFSTOPPED (wstat));
1361
1362 /* Even if it was stopped, the child may have already disappeared.
1363 E.g., if it was killed by SIGKILL. */
1364 if (res < 0 && errno != ECHILD)
1365 perror_with_name ("kill_wait_lwp");
1366 }
1367
1368 /* Callback for `for_each_thread'. Kills an lwp of a given process,
1369 except the leader. */
1370
1371 static void
1372 kill_one_lwp_callback (thread_info *thread, int pid)
1373 {
1374 struct lwp_info *lwp = get_thread_lwp (thread);
1375
1376 /* We avoid killing the first thread here, because of a Linux kernel (at
1377 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1378 the children get a chance to be reaped, it will remain a zombie
1379 forever. */
1380
1381 if (lwpid_of (thread) == pid)
1382 {
1383 if (debug_threads)
1384 debug_printf ("lkop: is last of process %s\n",
1385 target_pid_to_str (thread->id));
1386 return;
1387 }
1388
1389 kill_wait_lwp (lwp);
1390 }
1391
1392 static int
1393 linux_kill (process_info *process)
1394 {
1395 int pid = process->pid;
1396
1397 /* If we're killing a running inferior, make sure it is stopped
1398 first, as PTRACE_KILL will not work otherwise. */
1399 stop_all_lwps (0, NULL);
1400
1401 for_each_thread (pid, [&] (thread_info *thread)
1402 {
1403 kill_one_lwp_callback (thread, pid);
1404 });
1405
1406 /* See the comment in linux_kill_one_lwp. We did not kill the first
1407 thread in the list, so do so now. */
1408 lwp_info *lwp = find_lwp_pid (ptid_t (pid));
1409
1410 if (lwp == NULL)
1411 {
1412 if (debug_threads)
1413 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1414 pid);
1415 }
1416 else
1417 kill_wait_lwp (lwp);
1418
1419 the_target->mourn (process);
1420
1421 /* Since we presently can only stop all lwps of all processes, we
1422 need to unstop lwps of other processes. */
1423 unstop_all_lwps (0, NULL);
1424 return 0;
1425 }
1426
1427 /* Get pending signal of THREAD, for detaching purposes. This is the
1428 signal the thread last stopped for, which we need to deliver to the
1429 thread when detaching, otherwise, it'd be suppressed/lost. */
1430
1431 static int
1432 get_detach_signal (struct thread_info *thread)
1433 {
1434 client_state &cs = get_client_state ();
1435 enum gdb_signal signo = GDB_SIGNAL_0;
1436 int status;
1437 struct lwp_info *lp = get_thread_lwp (thread);
1438
1439 if (lp->status_pending_p)
1440 status = lp->status_pending;
1441 else
1442 {
1443 /* If the thread had been suspended by gdbserver, and it stopped
1444 cleanly, then it'll have stopped with SIGSTOP. But we don't
1445 want to deliver that SIGSTOP. */
1446 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1447 || thread->last_status.value.sig == GDB_SIGNAL_0)
1448 return 0;
1449
1450 /* Otherwise, we may need to deliver the signal we
1451 intercepted. */
1452 status = lp->last_status;
1453 }
1454
1455 if (!WIFSTOPPED (status))
1456 {
1457 if (debug_threads)
1458 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1459 target_pid_to_str (ptid_of (thread)));
1460 return 0;
1461 }
1462
1463 /* Extended wait statuses aren't real SIGTRAPs. */
1464 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1465 {
1466 if (debug_threads)
1467 debug_printf ("GPS: lwp %s had stopped with extended "
1468 "status: no pending signal\n",
1469 target_pid_to_str (ptid_of (thread)));
1470 return 0;
1471 }
1472
1473 signo = gdb_signal_from_host (WSTOPSIG (status));
1474
1475 if (cs.program_signals_p && !cs.program_signals[signo])
1476 {
1477 if (debug_threads)
1478 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1479 target_pid_to_str (ptid_of (thread)),
1480 gdb_signal_to_string (signo));
1481 return 0;
1482 }
1483 else if (!cs.program_signals_p
1484 /* If we have no way to know which signals GDB does not
1485 want to have passed to the program, assume
1486 SIGTRAP/SIGINT, which is GDB's default. */
1487 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1488 {
1489 if (debug_threads)
1490 debug_printf ("GPS: lwp %s had signal %s, "
1491 "but we don't know if we should pass it. "
1492 "Default to not.\n",
1493 target_pid_to_str (ptid_of (thread)),
1494 gdb_signal_to_string (signo));
1495 return 0;
1496 }
1497 else
1498 {
1499 if (debug_threads)
1500 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1501 target_pid_to_str (ptid_of (thread)),
1502 gdb_signal_to_string (signo));
1503
1504 return WSTOPSIG (status);
1505 }
1506 }
1507
1508 /* Detach from LWP. */
1509
1510 static void
1511 linux_detach_one_lwp (struct lwp_info *lwp)
1512 {
1513 struct thread_info *thread = get_lwp_thread (lwp);
1514 int sig;
1515 int lwpid;
1516
1517 /* If there is a pending SIGSTOP, get rid of it. */
1518 if (lwp->stop_expected)
1519 {
1520 if (debug_threads)
1521 debug_printf ("Sending SIGCONT to %s\n",
1522 target_pid_to_str (ptid_of (thread)));
1523
1524 kill_lwp (lwpid_of (thread), SIGCONT);
1525 lwp->stop_expected = 0;
1526 }
1527
1528 /* Pass on any pending signal for this thread. */
1529 sig = get_detach_signal (thread);
1530
1531 /* Preparing to resume may try to write registers, and fail if the
1532 lwp is zombie. If that happens, ignore the error. We'll handle
1533 it below, when detach fails with ESRCH. */
1534 try
1535 {
1536 /* Flush any pending changes to the process's registers. */
1537 regcache_invalidate_thread (thread);
1538
1539 /* Finally, let it resume. */
1540 if (the_low_target.prepare_to_resume != NULL)
1541 the_low_target.prepare_to_resume (lwp);
1542 }
1543 catch (const gdb_exception_error &ex)
1544 {
1545 if (!check_ptrace_stopped_lwp_gone (lwp))
1546 throw_exception (ex);
1547 }
1548
1549 lwpid = lwpid_of (thread);
1550 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1551 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1552 {
1553 int save_errno = errno;
1554
1555 /* We know the thread exists, so ESRCH must mean the lwp is
1556 zombie. This can happen if one of the already-detached
1557 threads exits the whole thread group. In that case we're
1558 still attached, and must reap the lwp. */
1559 if (save_errno == ESRCH)
1560 {
1561 int ret, status;
1562
1563 ret = my_waitpid (lwpid, &status, __WALL);
1564 if (ret == -1)
1565 {
1566 warning (_("Couldn't reap LWP %d while detaching: %s"),
1567 lwpid, strerror (errno));
1568 }
1569 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1570 {
1571 warning (_("Reaping LWP %d while detaching "
1572 "returned unexpected status 0x%x"),
1573 lwpid, status);
1574 }
1575 }
1576 else
1577 {
1578 error (_("Can't detach %s: %s"),
1579 target_pid_to_str (ptid_of (thread)),
1580 strerror (save_errno));
1581 }
1582 }
1583 else if (debug_threads)
1584 {
1585 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1586 target_pid_to_str (ptid_of (thread)),
1587 strsignal (sig));
1588 }
1589
1590 delete_lwp (lwp);
1591 }
1592
1593 /* Callback for for_each_thread. Detaches from non-leader threads of a
1594 given process. */
1595
1596 static void
1597 linux_detach_lwp_callback (thread_info *thread)
1598 {
1599 /* We don't actually detach from the thread group leader just yet.
1600 If the thread group exits, we must reap the zombie clone lwps
1601 before we're able to reap the leader. */
1602 if (thread->id.pid () == thread->id.lwp ())
1603 return;
1604
1605 lwp_info *lwp = get_thread_lwp (thread);
1606 linux_detach_one_lwp (lwp);
1607 }
1608
1609 static int
1610 linux_detach (process_info *process)
1611 {
1612 struct lwp_info *main_lwp;
1613
1614 /* As there's a step over already in progress, let it finish first,
1615 otherwise nesting a stabilize_threads operation on top gets real
1616 messy. */
1617 complete_ongoing_step_over ();
1618
1619 /* Stop all threads before detaching. First, ptrace requires that
1620 the thread is stopped to sucessfully detach. Second, thread_db
1621 may need to uninstall thread event breakpoints from memory, which
1622 only works with a stopped process anyway. */
1623 stop_all_lwps (0, NULL);
1624
1625 #ifdef USE_THREAD_DB
1626 thread_db_detach (process);
1627 #endif
1628
1629 /* Stabilize threads (move out of jump pads). */
1630 stabilize_threads ();
1631
1632 /* Detach from the clone lwps first. If the thread group exits just
1633 while we're detaching, we must reap the clone lwps before we're
1634 able to reap the leader. */
1635 for_each_thread (process->pid, linux_detach_lwp_callback);
1636
1637 main_lwp = find_lwp_pid (ptid_t (process->pid));
1638 linux_detach_one_lwp (main_lwp);
1639
1640 the_target->mourn (process);
1641
1642 /* Since we presently can only stop all lwps of all processes, we
1643 need to unstop lwps of other processes. */
1644 unstop_all_lwps (0, NULL);
1645 return 0;
1646 }
1647
1648 /* Remove all LWPs that belong to process PROC from the lwp list. */
1649
1650 static void
1651 linux_mourn (struct process_info *process)
1652 {
1653 struct process_info_private *priv;
1654
1655 #ifdef USE_THREAD_DB
1656 thread_db_mourn (process);
1657 #endif
1658
1659 for_each_thread (process->pid, [] (thread_info *thread)
1660 {
1661 delete_lwp (get_thread_lwp (thread));
1662 });
1663
1664 /* Freeing all private data. */
1665 priv = process->priv;
1666 if (the_low_target.delete_process != NULL)
1667 the_low_target.delete_process (priv->arch_private);
1668 else
1669 gdb_assert (priv->arch_private == NULL);
1670 free (priv);
1671 process->priv = NULL;
1672
1673 remove_process (process);
1674 }
1675
1676 static void
1677 linux_join (int pid)
1678 {
1679 int status, ret;
1680
1681 do {
1682 ret = my_waitpid (pid, &status, 0);
1683 if (WIFEXITED (status) || WIFSIGNALED (status))
1684 break;
1685 } while (ret != -1 || errno != ECHILD);
1686 }
1687
1688 /* Return nonzero if the given thread is still alive. */
1689 static int
1690 linux_thread_alive (ptid_t ptid)
1691 {
1692 struct lwp_info *lwp = find_lwp_pid (ptid);
1693
1694 /* We assume we always know if a thread exits. If a whole process
1695 exited but we still haven't been able to report it to GDB, we'll
1696 hold on to the last lwp of the dead process. */
1697 if (lwp != NULL)
1698 return !lwp_is_marked_dead (lwp);
1699 else
1700 return 0;
1701 }
1702
1703 /* Return 1 if this lwp still has an interesting status pending. If
1704 not (e.g., it had stopped for a breakpoint that is gone), return
1705 false. */
1706
1707 static int
1708 thread_still_has_status_pending_p (struct thread_info *thread)
1709 {
1710 struct lwp_info *lp = get_thread_lwp (thread);
1711
1712 if (!lp->status_pending_p)
1713 return 0;
1714
1715 if (thread->last_resume_kind != resume_stop
1716 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1717 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1718 {
1719 struct thread_info *saved_thread;
1720 CORE_ADDR pc;
1721 int discard = 0;
1722
1723 gdb_assert (lp->last_status != 0);
1724
1725 pc = get_pc (lp);
1726
1727 saved_thread = current_thread;
1728 current_thread = thread;
1729
1730 if (pc != lp->stop_pc)
1731 {
1732 if (debug_threads)
1733 debug_printf ("PC of %ld changed\n",
1734 lwpid_of (thread));
1735 discard = 1;
1736 }
1737
1738 #if !USE_SIGTRAP_SIGINFO
1739 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1740 && !(*the_low_target.breakpoint_at) (pc))
1741 {
1742 if (debug_threads)
1743 debug_printf ("previous SW breakpoint of %ld gone\n",
1744 lwpid_of (thread));
1745 discard = 1;
1746 }
1747 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1748 && !hardware_breakpoint_inserted_here (pc))
1749 {
1750 if (debug_threads)
1751 debug_printf ("previous HW breakpoint of %ld gone\n",
1752 lwpid_of (thread));
1753 discard = 1;
1754 }
1755 #endif
1756
1757 current_thread = saved_thread;
1758
1759 if (discard)
1760 {
1761 if (debug_threads)
1762 debug_printf ("discarding pending breakpoint status\n");
1763 lp->status_pending_p = 0;
1764 return 0;
1765 }
1766 }
1767
1768 return 1;
1769 }
1770
1771 /* Returns true if LWP is resumed from the client's perspective. */
1772
1773 static int
1774 lwp_resumed (struct lwp_info *lwp)
1775 {
1776 struct thread_info *thread = get_lwp_thread (lwp);
1777
1778 if (thread->last_resume_kind != resume_stop)
1779 return 1;
1780
1781 /* Did gdb send us a `vCont;t', but we haven't reported the
1782 corresponding stop to gdb yet? If so, the thread is still
1783 resumed/running from gdb's perspective. */
1784 if (thread->last_resume_kind == resume_stop
1785 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1786 return 1;
1787
1788 return 0;
1789 }
1790
1791 /* Return true if this lwp has an interesting status pending. */
1792 static bool
1793 status_pending_p_callback (thread_info *thread, ptid_t ptid)
1794 {
1795 struct lwp_info *lp = get_thread_lwp (thread);
1796
1797 /* Check if we're only interested in events from a specific process
1798 or a specific LWP. */
1799 if (!thread->id.matches (ptid))
1800 return 0;
1801
1802 if (!lwp_resumed (lp))
1803 return 0;
1804
1805 if (lp->status_pending_p
1806 && !thread_still_has_status_pending_p (thread))
1807 {
1808 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1809 return 0;
1810 }
1811
1812 return lp->status_pending_p;
1813 }
1814
1815 struct lwp_info *
1816 find_lwp_pid (ptid_t ptid)
1817 {
1818 thread_info *thread = find_thread ([&] (thread_info *thr_arg)
1819 {
1820 int lwp = ptid.lwp () != 0 ? ptid.lwp () : ptid.pid ();
1821 return thr_arg->id.lwp () == lwp;
1822 });
1823
1824 if (thread == NULL)
1825 return NULL;
1826
1827 return get_thread_lwp (thread);
1828 }
1829
1830 /* Return the number of known LWPs in the tgid given by PID. */
1831
1832 static int
1833 num_lwps (int pid)
1834 {
1835 int count = 0;
1836
1837 for_each_thread (pid, [&] (thread_info *thread)
1838 {
1839 count++;
1840 });
1841
1842 return count;
1843 }
1844
1845 /* See nat/linux-nat.h. */
1846
1847 struct lwp_info *
1848 iterate_over_lwps (ptid_t filter,
1849 gdb::function_view<iterate_over_lwps_ftype> callback)
1850 {
1851 thread_info *thread = find_thread (filter, [&] (thread_info *thr_arg)
1852 {
1853 lwp_info *lwp = get_thread_lwp (thr_arg);
1854
1855 return callback (lwp);
1856 });
1857
1858 if (thread == NULL)
1859 return NULL;
1860
1861 return get_thread_lwp (thread);
1862 }
1863
1864 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1865 their exits until all other threads in the group have exited. */
1866
1867 static void
1868 check_zombie_leaders (void)
1869 {
1870 for_each_process ([] (process_info *proc) {
1871 pid_t leader_pid = pid_of (proc);
1872 struct lwp_info *leader_lp;
1873
1874 leader_lp = find_lwp_pid (ptid_t (leader_pid));
1875
1876 if (debug_threads)
1877 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1878 "num_lwps=%d, zombie=%d\n",
1879 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1880 linux_proc_pid_is_zombie (leader_pid));
1881
1882 if (leader_lp != NULL && !leader_lp->stopped
1883 /* Check if there are other threads in the group, as we may
1884 have raced with the inferior simply exiting. */
1885 && !last_thread_of_process_p (leader_pid)
1886 && linux_proc_pid_is_zombie (leader_pid))
1887 {
1888 /* A leader zombie can mean one of two things:
1889
1890 - It exited, and there's an exit status pending
1891 available, or only the leader exited (not the whole
1892 program). In the latter case, we can't waitpid the
1893 leader's exit status until all other threads are gone.
1894
1895 - There are 3 or more threads in the group, and a thread
1896 other than the leader exec'd. On an exec, the Linux
1897 kernel destroys all other threads (except the execing
1898 one) in the thread group, and resets the execing thread's
1899 tid to the tgid. No exit notification is sent for the
1900 execing thread -- from the ptracer's perspective, it
1901 appears as though the execing thread just vanishes.
1902 Until we reap all other threads except the leader and the
1903 execing thread, the leader will be zombie, and the
1904 execing thread will be in `D (disc sleep)'. As soon as
1905 all other threads are reaped, the execing thread changes
1906 it's tid to the tgid, and the previous (zombie) leader
1907 vanishes, giving place to the "new" leader. We could try
1908 distinguishing the exit and exec cases, by waiting once
1909 more, and seeing if something comes out, but it doesn't
1910 sound useful. The previous leader _does_ go away, and
1911 we'll re-add the new one once we see the exec event
1912 (which is just the same as what would happen if the
1913 previous leader did exit voluntarily before some other
1914 thread execs). */
1915
1916 if (debug_threads)
1917 debug_printf ("CZL: Thread group leader %d zombie "
1918 "(it exited, or another thread execd).\n",
1919 leader_pid);
1920
1921 delete_lwp (leader_lp);
1922 }
1923 });
1924 }
1925
1926 /* Callback for `find_thread'. Returns the first LWP that is not
1927 stopped. */
1928
1929 static bool
1930 not_stopped_callback (thread_info *thread, ptid_t filter)
1931 {
1932 if (!thread->id.matches (filter))
1933 return false;
1934
1935 lwp_info *lwp = get_thread_lwp (thread);
1936
1937 return !lwp->stopped;
1938 }
1939
1940 /* Increment LWP's suspend count. */
1941
1942 static void
1943 lwp_suspended_inc (struct lwp_info *lwp)
1944 {
1945 lwp->suspended++;
1946
1947 if (debug_threads && lwp->suspended > 4)
1948 {
1949 struct thread_info *thread = get_lwp_thread (lwp);
1950
1951 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1952 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1953 }
1954 }
1955
1956 /* Decrement LWP's suspend count. */
1957
1958 static void
1959 lwp_suspended_decr (struct lwp_info *lwp)
1960 {
1961 lwp->suspended--;
1962
1963 if (lwp->suspended < 0)
1964 {
1965 struct thread_info *thread = get_lwp_thread (lwp);
1966
1967 internal_error (__FILE__, __LINE__,
1968 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1969 lwp->suspended);
1970 }
1971 }
1972
1973 /* This function should only be called if the LWP got a SIGTRAP.
1974
1975 Handle any tracepoint steps or hits. Return true if a tracepoint
1976 event was handled, 0 otherwise. */
1977
1978 static int
1979 handle_tracepoints (struct lwp_info *lwp)
1980 {
1981 struct thread_info *tinfo = get_lwp_thread (lwp);
1982 int tpoint_related_event = 0;
1983
1984 gdb_assert (lwp->suspended == 0);
1985
1986 /* If this tracepoint hit causes a tracing stop, we'll immediately
1987 uninsert tracepoints. To do this, we temporarily pause all
1988 threads, unpatch away, and then unpause threads. We need to make
1989 sure the unpausing doesn't resume LWP too. */
1990 lwp_suspended_inc (lwp);
1991
1992 /* And we need to be sure that any all-threads-stopping doesn't try
1993 to move threads out of the jump pads, as it could deadlock the
1994 inferior (LWP could be in the jump pad, maybe even holding the
1995 lock.) */
1996
1997 /* Do any necessary step collect actions. */
1998 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1999
2000 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2001
2002 /* See if we just hit a tracepoint and do its main collect
2003 actions. */
2004 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2005
2006 lwp_suspended_decr (lwp);
2007
2008 gdb_assert (lwp->suspended == 0);
2009 gdb_assert (!stabilizing_threads
2010 || (lwp->collecting_fast_tracepoint
2011 != fast_tpoint_collect_result::not_collecting));
2012
2013 if (tpoint_related_event)
2014 {
2015 if (debug_threads)
2016 debug_printf ("got a tracepoint event\n");
2017 return 1;
2018 }
2019
2020 return 0;
2021 }
2022
2023 /* Convenience wrapper. Returns information about LWP's fast tracepoint
2024 collection status. */
2025
2026 static fast_tpoint_collect_result
2027 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2028 struct fast_tpoint_collect_status *status)
2029 {
2030 CORE_ADDR thread_area;
2031 struct thread_info *thread = get_lwp_thread (lwp);
2032
2033 if (the_low_target.get_thread_area == NULL)
2034 return fast_tpoint_collect_result::not_collecting;
2035
2036 /* Get the thread area address. This is used to recognize which
2037 thread is which when tracing with the in-process agent library.
2038 We don't read anything from the address, and treat it as opaque;
2039 it's the address itself that we assume is unique per-thread. */
2040 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2041 return fast_tpoint_collect_result::not_collecting;
2042
2043 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2044 }
2045
2046 /* The reason we resume in the caller, is because we want to be able
2047 to pass lwp->status_pending as WSTAT, and we need to clear
2048 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2049 refuses to resume. */
2050
2051 static int
2052 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2053 {
2054 struct thread_info *saved_thread;
2055
2056 saved_thread = current_thread;
2057 current_thread = get_lwp_thread (lwp);
2058
2059 if ((wstat == NULL
2060 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2061 && supports_fast_tracepoints ()
2062 && agent_loaded_p ())
2063 {
2064 struct fast_tpoint_collect_status status;
2065
2066 if (debug_threads)
2067 debug_printf ("Checking whether LWP %ld needs to move out of the "
2068 "jump pad.\n",
2069 lwpid_of (current_thread));
2070
2071 fast_tpoint_collect_result r
2072 = linux_fast_tracepoint_collecting (lwp, &status);
2073
2074 if (wstat == NULL
2075 || (WSTOPSIG (*wstat) != SIGILL
2076 && WSTOPSIG (*wstat) != SIGFPE
2077 && WSTOPSIG (*wstat) != SIGSEGV
2078 && WSTOPSIG (*wstat) != SIGBUS))
2079 {
2080 lwp->collecting_fast_tracepoint = r;
2081
2082 if (r != fast_tpoint_collect_result::not_collecting)
2083 {
2084 if (r == fast_tpoint_collect_result::before_insn
2085 && lwp->exit_jump_pad_bkpt == NULL)
2086 {
2087 /* Haven't executed the original instruction yet.
2088 Set breakpoint there, and wait till it's hit,
2089 then single-step until exiting the jump pad. */
2090 lwp->exit_jump_pad_bkpt
2091 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2092 }
2093
2094 if (debug_threads)
2095 debug_printf ("Checking whether LWP %ld needs to move out of "
2096 "the jump pad...it does\n",
2097 lwpid_of (current_thread));
2098 current_thread = saved_thread;
2099
2100 return 1;
2101 }
2102 }
2103 else
2104 {
2105 /* If we get a synchronous signal while collecting, *and*
2106 while executing the (relocated) original instruction,
2107 reset the PC to point at the tpoint address, before
2108 reporting to GDB. Otherwise, it's an IPA lib bug: just
2109 report the signal to GDB, and pray for the best. */
2110
2111 lwp->collecting_fast_tracepoint
2112 = fast_tpoint_collect_result::not_collecting;
2113
2114 if (r != fast_tpoint_collect_result::not_collecting
2115 && (status.adjusted_insn_addr <= lwp->stop_pc
2116 && lwp->stop_pc < status.adjusted_insn_addr_end))
2117 {
2118 siginfo_t info;
2119 struct regcache *regcache;
2120
2121 /* The si_addr on a few signals references the address
2122 of the faulting instruction. Adjust that as
2123 well. */
2124 if ((WSTOPSIG (*wstat) == SIGILL
2125 || WSTOPSIG (*wstat) == SIGFPE
2126 || WSTOPSIG (*wstat) == SIGBUS
2127 || WSTOPSIG (*wstat) == SIGSEGV)
2128 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2129 (PTRACE_TYPE_ARG3) 0, &info) == 0
2130 /* Final check just to make sure we don't clobber
2131 the siginfo of non-kernel-sent signals. */
2132 && (uintptr_t) info.si_addr == lwp->stop_pc)
2133 {
2134 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2135 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2136 (PTRACE_TYPE_ARG3) 0, &info);
2137 }
2138
2139 regcache = get_thread_regcache (current_thread, 1);
2140 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2141 lwp->stop_pc = status.tpoint_addr;
2142
2143 /* Cancel any fast tracepoint lock this thread was
2144 holding. */
2145 force_unlock_trace_buffer ();
2146 }
2147
2148 if (lwp->exit_jump_pad_bkpt != NULL)
2149 {
2150 if (debug_threads)
2151 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2152 "stopping all threads momentarily.\n");
2153
2154 stop_all_lwps (1, lwp);
2155
2156 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2157 lwp->exit_jump_pad_bkpt = NULL;
2158
2159 unstop_all_lwps (1, lwp);
2160
2161 gdb_assert (lwp->suspended >= 0);
2162 }
2163 }
2164 }
2165
2166 if (debug_threads)
2167 debug_printf ("Checking whether LWP %ld needs to move out of the "
2168 "jump pad...no\n",
2169 lwpid_of (current_thread));
2170
2171 current_thread = saved_thread;
2172 return 0;
2173 }
2174
2175 /* Enqueue one signal in the "signals to report later when out of the
2176 jump pad" list. */
2177
2178 static void
2179 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2180 {
2181 struct pending_signals *p_sig;
2182 struct thread_info *thread = get_lwp_thread (lwp);
2183
2184 if (debug_threads)
2185 debug_printf ("Deferring signal %d for LWP %ld.\n",
2186 WSTOPSIG (*wstat), lwpid_of (thread));
2187
2188 if (debug_threads)
2189 {
2190 struct pending_signals *sig;
2191
2192 for (sig = lwp->pending_signals_to_report;
2193 sig != NULL;
2194 sig = sig->prev)
2195 debug_printf (" Already queued %d\n",
2196 sig->signal);
2197
2198 debug_printf (" (no more currently queued signals)\n");
2199 }
2200
2201 /* Don't enqueue non-RT signals if they are already in the deferred
2202 queue. (SIGSTOP being the easiest signal to see ending up here
2203 twice) */
2204 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2205 {
2206 struct pending_signals *sig;
2207
2208 for (sig = lwp->pending_signals_to_report;
2209 sig != NULL;
2210 sig = sig->prev)
2211 {
2212 if (sig->signal == WSTOPSIG (*wstat))
2213 {
2214 if (debug_threads)
2215 debug_printf ("Not requeuing already queued non-RT signal %d"
2216 " for LWP %ld\n",
2217 sig->signal,
2218 lwpid_of (thread));
2219 return;
2220 }
2221 }
2222 }
2223
2224 p_sig = XCNEW (struct pending_signals);
2225 p_sig->prev = lwp->pending_signals_to_report;
2226 p_sig->signal = WSTOPSIG (*wstat);
2227
2228 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2229 &p_sig->info);
2230
2231 lwp->pending_signals_to_report = p_sig;
2232 }
2233
2234 /* Dequeue one signal from the "signals to report later when out of
2235 the jump pad" list. */
2236
2237 static int
2238 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2239 {
2240 struct thread_info *thread = get_lwp_thread (lwp);
2241
2242 if (lwp->pending_signals_to_report != NULL)
2243 {
2244 struct pending_signals **p_sig;
2245
2246 p_sig = &lwp->pending_signals_to_report;
2247 while ((*p_sig)->prev != NULL)
2248 p_sig = &(*p_sig)->prev;
2249
2250 *wstat = W_STOPCODE ((*p_sig)->signal);
2251 if ((*p_sig)->info.si_signo != 0)
2252 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2253 &(*p_sig)->info);
2254 free (*p_sig);
2255 *p_sig = NULL;
2256
2257 if (debug_threads)
2258 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2259 WSTOPSIG (*wstat), lwpid_of (thread));
2260
2261 if (debug_threads)
2262 {
2263 struct pending_signals *sig;
2264
2265 for (sig = lwp->pending_signals_to_report;
2266 sig != NULL;
2267 sig = sig->prev)
2268 debug_printf (" Still queued %d\n",
2269 sig->signal);
2270
2271 debug_printf (" (no more queued signals)\n");
2272 }
2273
2274 return 1;
2275 }
2276
2277 return 0;
2278 }
2279
2280 /* Fetch the possibly triggered data watchpoint info and store it in
2281 CHILD.
2282
2283 On some archs, like x86, that use debug registers to set
2284 watchpoints, it's possible that the way to know which watched
2285 address trapped, is to check the register that is used to select
2286 which address to watch. Problem is, between setting the watchpoint
2287 and reading back which data address trapped, the user may change
2288 the set of watchpoints, and, as a consequence, GDB changes the
2289 debug registers in the inferior. To avoid reading back a stale
2290 stopped-data-address when that happens, we cache in LP the fact
2291 that a watchpoint trapped, and the corresponding data address, as
2292 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2293 registers meanwhile, we have the cached data we can rely on. */
2294
2295 static int
2296 check_stopped_by_watchpoint (struct lwp_info *child)
2297 {
2298 if (the_low_target.stopped_by_watchpoint != NULL)
2299 {
2300 struct thread_info *saved_thread;
2301
2302 saved_thread = current_thread;
2303 current_thread = get_lwp_thread (child);
2304
2305 if (the_low_target.stopped_by_watchpoint ())
2306 {
2307 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2308
2309 if (the_low_target.stopped_data_address != NULL)
2310 child->stopped_data_address
2311 = the_low_target.stopped_data_address ();
2312 else
2313 child->stopped_data_address = 0;
2314 }
2315
2316 current_thread = saved_thread;
2317 }
2318
2319 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2320 }
2321
2322 /* Return the ptrace options that we want to try to enable. */
2323
2324 static int
2325 linux_low_ptrace_options (int attached)
2326 {
2327 client_state &cs = get_client_state ();
2328 int options = 0;
2329
2330 if (!attached)
2331 options |= PTRACE_O_EXITKILL;
2332
2333 if (cs.report_fork_events)
2334 options |= PTRACE_O_TRACEFORK;
2335
2336 if (cs.report_vfork_events)
2337 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2338
2339 if (cs.report_exec_events)
2340 options |= PTRACE_O_TRACEEXEC;
2341
2342 options |= PTRACE_O_TRACESYSGOOD;
2343
2344 return options;
2345 }
2346
2347 /* Do low-level handling of the event, and check if we should go on
2348 and pass it to caller code. Return the affected lwp if we are, or
2349 NULL otherwise. */
2350
2351 static struct lwp_info *
2352 linux_low_filter_event (int lwpid, int wstat)
2353 {
2354 client_state &cs = get_client_state ();
2355 struct lwp_info *child;
2356 struct thread_info *thread;
2357 int have_stop_pc = 0;
2358
2359 child = find_lwp_pid (ptid_t (lwpid));
2360
2361 /* Check for stop events reported by a process we didn't already
2362 know about - anything not already in our LWP list.
2363
2364 If we're expecting to receive stopped processes after
2365 fork, vfork, and clone events, then we'll just add the
2366 new one to our list and go back to waiting for the event
2367 to be reported - the stopped process might be returned
2368 from waitpid before or after the event is.
2369
2370 But note the case of a non-leader thread exec'ing after the
2371 leader having exited, and gone from our lists (because
2372 check_zombie_leaders deleted it). The non-leader thread
2373 changes its tid to the tgid. */
2374
2375 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2376 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2377 {
2378 ptid_t child_ptid;
2379
2380 /* A multi-thread exec after we had seen the leader exiting. */
2381 if (debug_threads)
2382 {
2383 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2384 "after exec.\n", lwpid);
2385 }
2386
2387 child_ptid = ptid_t (lwpid, lwpid, 0);
2388 child = add_lwp (child_ptid);
2389 child->stopped = 1;
2390 current_thread = child->thread;
2391 }
2392
2393 /* If we didn't find a process, one of two things presumably happened:
2394 - A process we started and then detached from has exited. Ignore it.
2395 - A process we are controlling has forked and the new child's stop
2396 was reported to us by the kernel. Save its PID. */
2397 if (child == NULL && WIFSTOPPED (wstat))
2398 {
2399 add_to_pid_list (&stopped_pids, lwpid, wstat);
2400 return NULL;
2401 }
2402 else if (child == NULL)
2403 return NULL;
2404
2405 thread = get_lwp_thread (child);
2406
2407 child->stopped = 1;
2408
2409 child->last_status = wstat;
2410
2411 /* Check if the thread has exited. */
2412 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2413 {
2414 if (debug_threads)
2415 debug_printf ("LLFE: %d exited.\n", lwpid);
2416
2417 if (finish_step_over (child))
2418 {
2419 /* Unsuspend all other LWPs, and set them back running again. */
2420 unsuspend_all_lwps (child);
2421 }
2422
2423 /* If there is at least one more LWP, then the exit signal was
2424 not the end of the debugged application and should be
2425 ignored, unless GDB wants to hear about thread exits. */
2426 if (cs.report_thread_events
2427 || last_thread_of_process_p (pid_of (thread)))
2428 {
2429 /* Since events are serialized to GDB core, and we can't
2430 report this one right now. Leave the status pending for
2431 the next time we're able to report it. */
2432 mark_lwp_dead (child, wstat);
2433 return child;
2434 }
2435 else
2436 {
2437 delete_lwp (child);
2438 return NULL;
2439 }
2440 }
2441
2442 gdb_assert (WIFSTOPPED (wstat));
2443
2444 if (WIFSTOPPED (wstat))
2445 {
2446 struct process_info *proc;
2447
2448 /* Architecture-specific setup after inferior is running. */
2449 proc = find_process_pid (pid_of (thread));
2450 if (proc->tdesc == NULL)
2451 {
2452 if (proc->attached)
2453 {
2454 /* This needs to happen after we have attached to the
2455 inferior and it is stopped for the first time, but
2456 before we access any inferior registers. */
2457 linux_arch_setup_thread (thread);
2458 }
2459 else
2460 {
2461 /* The process is started, but GDBserver will do
2462 architecture-specific setup after the program stops at
2463 the first instruction. */
2464 child->status_pending_p = 1;
2465 child->status_pending = wstat;
2466 return child;
2467 }
2468 }
2469 }
2470
2471 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2472 {
2473 struct process_info *proc = find_process_pid (pid_of (thread));
2474 int options = linux_low_ptrace_options (proc->attached);
2475
2476 linux_enable_event_reporting (lwpid, options);
2477 child->must_set_ptrace_flags = 0;
2478 }
2479
2480 /* Always update syscall_state, even if it will be filtered later. */
2481 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2482 {
2483 child->syscall_state
2484 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2485 ? TARGET_WAITKIND_SYSCALL_RETURN
2486 : TARGET_WAITKIND_SYSCALL_ENTRY);
2487 }
2488 else
2489 {
2490 /* Almost all other ptrace-stops are known to be outside of system
2491 calls, with further exceptions in handle_extended_wait. */
2492 child->syscall_state = TARGET_WAITKIND_IGNORE;
2493 }
2494
2495 /* Be careful to not overwrite stop_pc until save_stop_reason is
2496 called. */
2497 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2498 && linux_is_extended_waitstatus (wstat))
2499 {
2500 child->stop_pc = get_pc (child);
2501 if (handle_extended_wait (&child, wstat))
2502 {
2503 /* The event has been handled, so just return without
2504 reporting it. */
2505 return NULL;
2506 }
2507 }
2508
2509 if (linux_wstatus_maybe_breakpoint (wstat))
2510 {
2511 if (save_stop_reason (child))
2512 have_stop_pc = 1;
2513 }
2514
2515 if (!have_stop_pc)
2516 child->stop_pc = get_pc (child);
2517
2518 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2519 && child->stop_expected)
2520 {
2521 if (debug_threads)
2522 debug_printf ("Expected stop.\n");
2523 child->stop_expected = 0;
2524
2525 if (thread->last_resume_kind == resume_stop)
2526 {
2527 /* We want to report the stop to the core. Treat the
2528 SIGSTOP as a normal event. */
2529 if (debug_threads)
2530 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2531 target_pid_to_str (ptid_of (thread)));
2532 }
2533 else if (stopping_threads != NOT_STOPPING_THREADS)
2534 {
2535 /* Stopping threads. We don't want this SIGSTOP to end up
2536 pending. */
2537 if (debug_threads)
2538 debug_printf ("LLW: SIGSTOP caught for %s "
2539 "while stopping threads.\n",
2540 target_pid_to_str (ptid_of (thread)));
2541 return NULL;
2542 }
2543 else
2544 {
2545 /* This is a delayed SIGSTOP. Filter out the event. */
2546 if (debug_threads)
2547 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2548 child->stepping ? "step" : "continue",
2549 target_pid_to_str (ptid_of (thread)));
2550
2551 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2552 return NULL;
2553 }
2554 }
2555
2556 child->status_pending_p = 1;
2557 child->status_pending = wstat;
2558 return child;
2559 }
2560
2561 /* Return true if THREAD is doing hardware single step. */
2562
2563 static int
2564 maybe_hw_step (struct thread_info *thread)
2565 {
2566 if (can_hardware_single_step ())
2567 return 1;
2568 else
2569 {
2570 /* GDBserver must insert single-step breakpoint for software
2571 single step. */
2572 gdb_assert (has_single_step_breakpoints (thread));
2573 return 0;
2574 }
2575 }
2576
2577 /* Resume LWPs that are currently stopped without any pending status
2578 to report, but are resumed from the core's perspective. */
2579
2580 static void
2581 resume_stopped_resumed_lwps (thread_info *thread)
2582 {
2583 struct lwp_info *lp = get_thread_lwp (thread);
2584
2585 if (lp->stopped
2586 && !lp->suspended
2587 && !lp->status_pending_p
2588 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2589 {
2590 int step = 0;
2591
2592 if (thread->last_resume_kind == resume_step)
2593 step = maybe_hw_step (thread);
2594
2595 if (debug_threads)
2596 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2597 target_pid_to_str (ptid_of (thread)),
2598 paddress (lp->stop_pc),
2599 step);
2600
2601 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2602 }
2603 }
2604
2605 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2606 match FILTER_PTID (leaving others pending). The PTIDs can be:
2607 minus_one_ptid, to specify any child; a pid PTID, specifying all
2608 lwps of a thread group; or a PTID representing a single lwp. Store
2609 the stop status through the status pointer WSTAT. OPTIONS is
2610 passed to the waitpid call. Return 0 if no event was found and
2611 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2612 was found. Return the PID of the stopped child otherwise. */
2613
2614 static int
2615 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2616 int *wstatp, int options)
2617 {
2618 struct thread_info *event_thread;
2619 struct lwp_info *event_child, *requested_child;
2620 sigset_t block_mask, prev_mask;
2621
2622 retry:
2623 /* N.B. event_thread points to the thread_info struct that contains
2624 event_child. Keep them in sync. */
2625 event_thread = NULL;
2626 event_child = NULL;
2627 requested_child = NULL;
2628
2629 /* Check for a lwp with a pending status. */
2630
2631 if (filter_ptid == minus_one_ptid || filter_ptid.is_pid ())
2632 {
2633 event_thread = find_thread_in_random ([&] (thread_info *thread)
2634 {
2635 return status_pending_p_callback (thread, filter_ptid);
2636 });
2637
2638 if (event_thread != NULL)
2639 event_child = get_thread_lwp (event_thread);
2640 if (debug_threads && event_thread)
2641 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2642 }
2643 else if (filter_ptid != null_ptid)
2644 {
2645 requested_child = find_lwp_pid (filter_ptid);
2646
2647 if (stopping_threads == NOT_STOPPING_THREADS
2648 && requested_child->status_pending_p
2649 && (requested_child->collecting_fast_tracepoint
2650 != fast_tpoint_collect_result::not_collecting))
2651 {
2652 enqueue_one_deferred_signal (requested_child,
2653 &requested_child->status_pending);
2654 requested_child->status_pending_p = 0;
2655 requested_child->status_pending = 0;
2656 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2657 }
2658
2659 if (requested_child->suspended
2660 && requested_child->status_pending_p)
2661 {
2662 internal_error (__FILE__, __LINE__,
2663 "requesting an event out of a"
2664 " suspended child?");
2665 }
2666
2667 if (requested_child->status_pending_p)
2668 {
2669 event_child = requested_child;
2670 event_thread = get_lwp_thread (event_child);
2671 }
2672 }
2673
2674 if (event_child != NULL)
2675 {
2676 if (debug_threads)
2677 debug_printf ("Got an event from pending child %ld (%04x)\n",
2678 lwpid_of (event_thread), event_child->status_pending);
2679 *wstatp = event_child->status_pending;
2680 event_child->status_pending_p = 0;
2681 event_child->status_pending = 0;
2682 current_thread = event_thread;
2683 return lwpid_of (event_thread);
2684 }
2685
2686 /* But if we don't find a pending event, we'll have to wait.
2687
2688 We only enter this loop if no process has a pending wait status.
2689 Thus any action taken in response to a wait status inside this
2690 loop is responding as soon as we detect the status, not after any
2691 pending events. */
2692
2693 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2694 all signals while here. */
2695 sigfillset (&block_mask);
2696 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2697
2698 /* Always pull all events out of the kernel. We'll randomly select
2699 an event LWP out of all that have events, to prevent
2700 starvation. */
2701 while (event_child == NULL)
2702 {
2703 pid_t ret = 0;
2704
2705 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2706 quirks:
2707
2708 - If the thread group leader exits while other threads in the
2709 thread group still exist, waitpid(TGID, ...) hangs. That
2710 waitpid won't return an exit status until the other threads
2711 in the group are reaped.
2712
2713 - When a non-leader thread execs, that thread just vanishes
2714 without reporting an exit (so we'd hang if we waited for it
2715 explicitly in that case). The exec event is reported to
2716 the TGID pid. */
2717 errno = 0;
2718 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2719
2720 if (debug_threads)
2721 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2722 ret, errno ? strerror (errno) : "ERRNO-OK");
2723
2724 if (ret > 0)
2725 {
2726 if (debug_threads)
2727 {
2728 debug_printf ("LLW: waitpid %ld received %s\n",
2729 (long) ret, status_to_str (*wstatp));
2730 }
2731
2732 /* Filter all events. IOW, leave all events pending. We'll
2733 randomly select an event LWP out of all that have events
2734 below. */
2735 linux_low_filter_event (ret, *wstatp);
2736 /* Retry until nothing comes out of waitpid. A single
2737 SIGCHLD can indicate more than one child stopped. */
2738 continue;
2739 }
2740
2741 /* Now that we've pulled all events out of the kernel, resume
2742 LWPs that don't have an interesting event to report. */
2743 if (stopping_threads == NOT_STOPPING_THREADS)
2744 for_each_thread (resume_stopped_resumed_lwps);
2745
2746 /* ... and find an LWP with a status to report to the core, if
2747 any. */
2748 event_thread = find_thread_in_random ([&] (thread_info *thread)
2749 {
2750 return status_pending_p_callback (thread, filter_ptid);
2751 });
2752
2753 if (event_thread != NULL)
2754 {
2755 event_child = get_thread_lwp (event_thread);
2756 *wstatp = event_child->status_pending;
2757 event_child->status_pending_p = 0;
2758 event_child->status_pending = 0;
2759 break;
2760 }
2761
2762 /* Check for zombie thread group leaders. Those can't be reaped
2763 until all other threads in the thread group are. */
2764 check_zombie_leaders ();
2765
2766 auto not_stopped = [&] (thread_info *thread)
2767 {
2768 return not_stopped_callback (thread, wait_ptid);
2769 };
2770
2771 /* If there are no resumed children left in the set of LWPs we
2772 want to wait for, bail. We can't just block in
2773 waitpid/sigsuspend, because lwps might have been left stopped
2774 in trace-stop state, and we'd be stuck forever waiting for
2775 their status to change (which would only happen if we resumed
2776 them). Even if WNOHANG is set, this return code is preferred
2777 over 0 (below), as it is more detailed. */
2778 if (find_thread (not_stopped) == NULL)
2779 {
2780 if (debug_threads)
2781 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2782 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2783 return -1;
2784 }
2785
2786 /* No interesting event to report to the caller. */
2787 if ((options & WNOHANG))
2788 {
2789 if (debug_threads)
2790 debug_printf ("WNOHANG set, no event found\n");
2791
2792 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2793 return 0;
2794 }
2795
2796 /* Block until we get an event reported with SIGCHLD. */
2797 if (debug_threads)
2798 debug_printf ("sigsuspend'ing\n");
2799
2800 sigsuspend (&prev_mask);
2801 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2802 goto retry;
2803 }
2804
2805 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2806
2807 current_thread = event_thread;
2808
2809 return lwpid_of (event_thread);
2810 }
2811
2812 /* Wait for an event from child(ren) PTID. PTIDs can be:
2813 minus_one_ptid, to specify any child; a pid PTID, specifying all
2814 lwps of a thread group; or a PTID representing a single lwp. Store
2815 the stop status through the status pointer WSTAT. OPTIONS is
2816 passed to the waitpid call. Return 0 if no event was found and
2817 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2818 was found. Return the PID of the stopped child otherwise. */
2819
2820 static int
2821 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2822 {
2823 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2824 }
2825
2826 /* Select one LWP out of those that have events pending. */
2827
2828 static void
2829 select_event_lwp (struct lwp_info **orig_lp)
2830 {
2831 int random_selector;
2832 struct thread_info *event_thread = NULL;
2833
2834 /* In all-stop, give preference to the LWP that is being
2835 single-stepped. There will be at most one, and it's the LWP that
2836 the core is most interested in. If we didn't do this, then we'd
2837 have to handle pending step SIGTRAPs somehow in case the core
2838 later continues the previously-stepped thread, otherwise we'd
2839 report the pending SIGTRAP, and the core, not having stepped the
2840 thread, wouldn't understand what the trap was for, and therefore
2841 would report it to the user as a random signal. */
2842 if (!non_stop)
2843 {
2844 event_thread = find_thread ([] (thread_info *thread)
2845 {
2846 lwp_info *lp = get_thread_lwp (thread);
2847
2848 return (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2849 && thread->last_resume_kind == resume_step
2850 && lp->status_pending_p);
2851 });
2852
2853 if (event_thread != NULL)
2854 {
2855 if (debug_threads)
2856 debug_printf ("SEL: Select single-step %s\n",
2857 target_pid_to_str (ptid_of (event_thread)));
2858 }
2859 }
2860 if (event_thread == NULL)
2861 {
2862 /* No single-stepping LWP. Select one at random, out of those
2863 which have had events. */
2864
2865 /* First see how many events we have. */
2866 int num_events = 0;
2867 for_each_thread ([&] (thread_info *thread)
2868 {
2869 lwp_info *lp = get_thread_lwp (thread);
2870
2871 /* Count only resumed LWPs that have an event pending. */
2872 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2873 && lp->status_pending_p)
2874 num_events++;
2875 });
2876 gdb_assert (num_events > 0);
2877
2878 /* Now randomly pick a LWP out of those that have had
2879 events. */
2880 random_selector = (int)
2881 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2882
2883 if (debug_threads && num_events > 1)
2884 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2885 num_events, random_selector);
2886
2887 event_thread = find_thread ([&] (thread_info *thread)
2888 {
2889 lwp_info *lp = get_thread_lwp (thread);
2890
2891 /* Select only resumed LWPs that have an event pending. */
2892 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2893 && lp->status_pending_p)
2894 if (random_selector-- == 0)
2895 return true;
2896
2897 return false;
2898 });
2899 }
2900
2901 if (event_thread != NULL)
2902 {
2903 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2904
2905 /* Switch the event LWP. */
2906 *orig_lp = event_lp;
2907 }
2908 }
2909
2910 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2911 NULL. */
2912
2913 static void
2914 unsuspend_all_lwps (struct lwp_info *except)
2915 {
2916 for_each_thread ([&] (thread_info *thread)
2917 {
2918 lwp_info *lwp = get_thread_lwp (thread);
2919
2920 if (lwp != except)
2921 lwp_suspended_decr (lwp);
2922 });
2923 }
2924
2925 static void move_out_of_jump_pad_callback (thread_info *thread);
2926 static bool stuck_in_jump_pad_callback (thread_info *thread);
2927 static bool lwp_running (thread_info *thread);
2928 static ptid_t linux_wait_1 (ptid_t ptid,
2929 struct target_waitstatus *ourstatus,
2930 int target_options);
2931
2932 /* Stabilize threads (move out of jump pads).
2933
2934 If a thread is midway collecting a fast tracepoint, we need to
2935 finish the collection and move it out of the jump pad before
2936 reporting the signal.
2937
2938 This avoids recursion while collecting (when a signal arrives
2939 midway, and the signal handler itself collects), which would trash
2940 the trace buffer. In case the user set a breakpoint in a signal
2941 handler, this avoids the backtrace showing the jump pad, etc..
2942 Most importantly, there are certain things we can't do safely if
2943 threads are stopped in a jump pad (or in its callee's). For
2944 example:
2945
2946 - starting a new trace run. A thread still collecting the
2947 previous run, could trash the trace buffer when resumed. The trace
2948 buffer control structures would have been reset but the thread had
2949 no way to tell. The thread could even midway memcpy'ing to the
2950 buffer, which would mean that when resumed, it would clobber the
2951 trace buffer that had been set for a new run.
2952
2953 - we can't rewrite/reuse the jump pads for new tracepoints
2954 safely. Say you do tstart while a thread is stopped midway while
2955 collecting. When the thread is later resumed, it finishes the
2956 collection, and returns to the jump pad, to execute the original
2957 instruction that was under the tracepoint jump at the time the
2958 older run had been started. If the jump pad had been rewritten
2959 since for something else in the new run, the thread would now
2960 execute the wrong / random instructions. */
2961
2962 static void
2963 linux_stabilize_threads (void)
2964 {
2965 thread_info *thread_stuck = find_thread (stuck_in_jump_pad_callback);
2966
2967 if (thread_stuck != NULL)
2968 {
2969 if (debug_threads)
2970 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2971 lwpid_of (thread_stuck));
2972 return;
2973 }
2974
2975 thread_info *saved_thread = current_thread;
2976
2977 stabilizing_threads = 1;
2978
2979 /* Kick 'em all. */
2980 for_each_thread (move_out_of_jump_pad_callback);
2981
2982 /* Loop until all are stopped out of the jump pads. */
2983 while (find_thread (lwp_running) != NULL)
2984 {
2985 struct target_waitstatus ourstatus;
2986 struct lwp_info *lwp;
2987 int wstat;
2988
2989 /* Note that we go through the full wait even loop. While
2990 moving threads out of jump pad, we need to be able to step
2991 over internal breakpoints and such. */
2992 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2993
2994 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2995 {
2996 lwp = get_thread_lwp (current_thread);
2997
2998 /* Lock it. */
2999 lwp_suspended_inc (lwp);
3000
3001 if (ourstatus.value.sig != GDB_SIGNAL_0
3002 || current_thread->last_resume_kind == resume_stop)
3003 {
3004 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3005 enqueue_one_deferred_signal (lwp, &wstat);
3006 }
3007 }
3008 }
3009
3010 unsuspend_all_lwps (NULL);
3011
3012 stabilizing_threads = 0;
3013
3014 current_thread = saved_thread;
3015
3016 if (debug_threads)
3017 {
3018 thread_stuck = find_thread (stuck_in_jump_pad_callback);
3019
3020 if (thread_stuck != NULL)
3021 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3022 lwpid_of (thread_stuck));
3023 }
3024 }
3025
3026 /* Convenience function that is called when the kernel reports an
3027 event that is not passed out to GDB. */
3028
3029 static ptid_t
3030 ignore_event (struct target_waitstatus *ourstatus)
3031 {
3032 /* If we got an event, there may still be others, as a single
3033 SIGCHLD can indicate more than one child stopped. This forces
3034 another target_wait call. */
3035 async_file_mark ();
3036
3037 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3038 return null_ptid;
3039 }
3040
3041 /* Convenience function that is called when the kernel reports an exit
3042 event. This decides whether to report the event to GDB as a
3043 process exit event, a thread exit event, or to suppress the
3044 event. */
3045
3046 static ptid_t
3047 filter_exit_event (struct lwp_info *event_child,
3048 struct target_waitstatus *ourstatus)
3049 {
3050 client_state &cs = get_client_state ();
3051 struct thread_info *thread = get_lwp_thread (event_child);
3052 ptid_t ptid = ptid_of (thread);
3053
3054 if (!last_thread_of_process_p (pid_of (thread)))
3055 {
3056 if (cs.report_thread_events)
3057 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3058 else
3059 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3060
3061 delete_lwp (event_child);
3062 }
3063 return ptid;
3064 }
3065
3066 /* Returns 1 if GDB is interested in any event_child syscalls. */
3067
3068 static int
3069 gdb_catching_syscalls_p (struct lwp_info *event_child)
3070 {
3071 struct thread_info *thread = get_lwp_thread (event_child);
3072 struct process_info *proc = get_thread_process (thread);
3073
3074 return !proc->syscalls_to_catch.empty ();
3075 }
3076
3077 /* Returns 1 if GDB is interested in the event_child syscall.
3078 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3079
3080 static int
3081 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3082 {
3083 int sysno;
3084 struct thread_info *thread = get_lwp_thread (event_child);
3085 struct process_info *proc = get_thread_process (thread);
3086
3087 if (proc->syscalls_to_catch.empty ())
3088 return 0;
3089
3090 if (proc->syscalls_to_catch[0] == ANY_SYSCALL)
3091 return 1;
3092
3093 get_syscall_trapinfo (event_child, &sysno);
3094
3095 for (int iter : proc->syscalls_to_catch)
3096 if (iter == sysno)
3097 return 1;
3098
3099 return 0;
3100 }
3101
3102 /* Wait for process, returns status. */
3103
3104 static ptid_t
3105 linux_wait_1 (ptid_t ptid,
3106 struct target_waitstatus *ourstatus, int target_options)
3107 {
3108 client_state &cs = get_client_state ();
3109 int w;
3110 struct lwp_info *event_child;
3111 int options;
3112 int pid;
3113 int step_over_finished;
3114 int bp_explains_trap;
3115 int maybe_internal_trap;
3116 int report_to_gdb;
3117 int trace_event;
3118 int in_step_range;
3119 int any_resumed;
3120
3121 if (debug_threads)
3122 {
3123 debug_enter ();
3124 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3125 }
3126
3127 /* Translate generic target options into linux options. */
3128 options = __WALL;
3129 if (target_options & TARGET_WNOHANG)
3130 options |= WNOHANG;
3131
3132 bp_explains_trap = 0;
3133 trace_event = 0;
3134 in_step_range = 0;
3135 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3136
3137 auto status_pending_p_any = [&] (thread_info *thread)
3138 {
3139 return status_pending_p_callback (thread, minus_one_ptid);
3140 };
3141
3142 auto not_stopped = [&] (thread_info *thread)
3143 {
3144 return not_stopped_callback (thread, minus_one_ptid);
3145 };
3146
3147 /* Find a resumed LWP, if any. */
3148 if (find_thread (status_pending_p_any) != NULL)
3149 any_resumed = 1;
3150 else if (find_thread (not_stopped) != NULL)
3151 any_resumed = 1;
3152 else
3153 any_resumed = 0;
3154
3155 if (step_over_bkpt == null_ptid)
3156 pid = linux_wait_for_event (ptid, &w, options);
3157 else
3158 {
3159 if (debug_threads)
3160 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3161 target_pid_to_str (step_over_bkpt));
3162 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3163 }
3164
3165 if (pid == 0 || (pid == -1 && !any_resumed))
3166 {
3167 gdb_assert (target_options & TARGET_WNOHANG);
3168
3169 if (debug_threads)
3170 {
3171 debug_printf ("linux_wait_1 ret = null_ptid, "
3172 "TARGET_WAITKIND_IGNORE\n");
3173 debug_exit ();
3174 }
3175
3176 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3177 return null_ptid;
3178 }
3179 else if (pid == -1)
3180 {
3181 if (debug_threads)
3182 {
3183 debug_printf ("linux_wait_1 ret = null_ptid, "
3184 "TARGET_WAITKIND_NO_RESUMED\n");
3185 debug_exit ();
3186 }
3187
3188 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3189 return null_ptid;
3190 }
3191
3192 event_child = get_thread_lwp (current_thread);
3193
3194 /* linux_wait_for_event only returns an exit status for the last
3195 child of a process. Report it. */
3196 if (WIFEXITED (w) || WIFSIGNALED (w))
3197 {
3198 if (WIFEXITED (w))
3199 {
3200 ourstatus->kind = TARGET_WAITKIND_EXITED;
3201 ourstatus->value.integer = WEXITSTATUS (w);
3202
3203 if (debug_threads)
3204 {
3205 debug_printf ("linux_wait_1 ret = %s, exited with "
3206 "retcode %d\n",
3207 target_pid_to_str (ptid_of (current_thread)),
3208 WEXITSTATUS (w));
3209 debug_exit ();
3210 }
3211 }
3212 else
3213 {
3214 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3215 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3216
3217 if (debug_threads)
3218 {
3219 debug_printf ("linux_wait_1 ret = %s, terminated with "
3220 "signal %d\n",
3221 target_pid_to_str (ptid_of (current_thread)),
3222 WTERMSIG (w));
3223 debug_exit ();
3224 }
3225 }
3226
3227 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3228 return filter_exit_event (event_child, ourstatus);
3229
3230 return ptid_of (current_thread);
3231 }
3232
3233 /* If step-over executes a breakpoint instruction, in the case of a
3234 hardware single step it means a gdb/gdbserver breakpoint had been
3235 planted on top of a permanent breakpoint, in the case of a software
3236 single step it may just mean that gdbserver hit the reinsert breakpoint.
3237 The PC has been adjusted by save_stop_reason to point at
3238 the breakpoint address.
3239 So in the case of the hardware single step advance the PC manually
3240 past the breakpoint and in the case of software single step advance only
3241 if it's not the single_step_breakpoint we are hitting.
3242 This avoids that a program would keep trapping a permanent breakpoint
3243 forever. */
3244 if (step_over_bkpt != null_ptid
3245 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3246 && (event_child->stepping
3247 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3248 {
3249 int increment_pc = 0;
3250 int breakpoint_kind = 0;
3251 CORE_ADDR stop_pc = event_child->stop_pc;
3252
3253 breakpoint_kind =
3254 the_target->breakpoint_kind_from_current_state (&stop_pc);
3255 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3256
3257 if (debug_threads)
3258 {
3259 debug_printf ("step-over for %s executed software breakpoint\n",
3260 target_pid_to_str (ptid_of (current_thread)));
3261 }
3262
3263 if (increment_pc != 0)
3264 {
3265 struct regcache *regcache
3266 = get_thread_regcache (current_thread, 1);
3267
3268 event_child->stop_pc += increment_pc;
3269 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3270
3271 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3272 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3273 }
3274 }
3275
3276 /* If this event was not handled before, and is not a SIGTRAP, we
3277 report it. SIGILL and SIGSEGV are also treated as traps in case
3278 a breakpoint is inserted at the current PC. If this target does
3279 not support internal breakpoints at all, we also report the
3280 SIGTRAP without further processing; it's of no concern to us. */
3281 maybe_internal_trap
3282 = (supports_breakpoints ()
3283 && (WSTOPSIG (w) == SIGTRAP
3284 || ((WSTOPSIG (w) == SIGILL
3285 || WSTOPSIG (w) == SIGSEGV)
3286 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3287
3288 if (maybe_internal_trap)
3289 {
3290 /* Handle anything that requires bookkeeping before deciding to
3291 report the event or continue waiting. */
3292
3293 /* First check if we can explain the SIGTRAP with an internal
3294 breakpoint, or if we should possibly report the event to GDB.
3295 Do this before anything that may remove or insert a
3296 breakpoint. */
3297 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3298
3299 /* We have a SIGTRAP, possibly a step-over dance has just
3300 finished. If so, tweak the state machine accordingly,
3301 reinsert breakpoints and delete any single-step
3302 breakpoints. */
3303 step_over_finished = finish_step_over (event_child);
3304
3305 /* Now invoke the callbacks of any internal breakpoints there. */
3306 check_breakpoints (event_child->stop_pc);
3307
3308 /* Handle tracepoint data collecting. This may overflow the
3309 trace buffer, and cause a tracing stop, removing
3310 breakpoints. */
3311 trace_event = handle_tracepoints (event_child);
3312
3313 if (bp_explains_trap)
3314 {
3315 if (debug_threads)
3316 debug_printf ("Hit a gdbserver breakpoint.\n");
3317 }
3318 }
3319 else
3320 {
3321 /* We have some other signal, possibly a step-over dance was in
3322 progress, and it should be cancelled too. */
3323 step_over_finished = finish_step_over (event_child);
3324 }
3325
3326 /* We have all the data we need. Either report the event to GDB, or
3327 resume threads and keep waiting for more. */
3328
3329 /* If we're collecting a fast tracepoint, finish the collection and
3330 move out of the jump pad before delivering a signal. See
3331 linux_stabilize_threads. */
3332
3333 if (WIFSTOPPED (w)
3334 && WSTOPSIG (w) != SIGTRAP
3335 && supports_fast_tracepoints ()
3336 && agent_loaded_p ())
3337 {
3338 if (debug_threads)
3339 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3340 "to defer or adjust it.\n",
3341 WSTOPSIG (w), lwpid_of (current_thread));
3342
3343 /* Allow debugging the jump pad itself. */
3344 if (current_thread->last_resume_kind != resume_step
3345 && maybe_move_out_of_jump_pad (event_child, &w))
3346 {
3347 enqueue_one_deferred_signal (event_child, &w);
3348
3349 if (debug_threads)
3350 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3351 WSTOPSIG (w), lwpid_of (current_thread));
3352
3353 linux_resume_one_lwp (event_child, 0, 0, NULL);
3354
3355 if (debug_threads)
3356 debug_exit ();
3357 return ignore_event (ourstatus);
3358 }
3359 }
3360
3361 if (event_child->collecting_fast_tracepoint
3362 != fast_tpoint_collect_result::not_collecting)
3363 {
3364 if (debug_threads)
3365 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3366 "Check if we're already there.\n",
3367 lwpid_of (current_thread),
3368 (int) event_child->collecting_fast_tracepoint);
3369
3370 trace_event = 1;
3371
3372 event_child->collecting_fast_tracepoint
3373 = linux_fast_tracepoint_collecting (event_child, NULL);
3374
3375 if (event_child->collecting_fast_tracepoint
3376 != fast_tpoint_collect_result::before_insn)
3377 {
3378 /* No longer need this breakpoint. */
3379 if (event_child->exit_jump_pad_bkpt != NULL)
3380 {
3381 if (debug_threads)
3382 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3383 "stopping all threads momentarily.\n");
3384
3385 /* Other running threads could hit this breakpoint.
3386 We don't handle moribund locations like GDB does,
3387 instead we always pause all threads when removing
3388 breakpoints, so that any step-over or
3389 decr_pc_after_break adjustment is always taken
3390 care of while the breakpoint is still
3391 inserted. */
3392 stop_all_lwps (1, event_child);
3393
3394 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3395 event_child->exit_jump_pad_bkpt = NULL;
3396
3397 unstop_all_lwps (1, event_child);
3398
3399 gdb_assert (event_child->suspended >= 0);
3400 }
3401 }
3402
3403 if (event_child->collecting_fast_tracepoint
3404 == fast_tpoint_collect_result::not_collecting)
3405 {
3406 if (debug_threads)
3407 debug_printf ("fast tracepoint finished "
3408 "collecting successfully.\n");
3409
3410 /* We may have a deferred signal to report. */
3411 if (dequeue_one_deferred_signal (event_child, &w))
3412 {
3413 if (debug_threads)
3414 debug_printf ("dequeued one signal.\n");
3415 }
3416 else
3417 {
3418 if (debug_threads)
3419 debug_printf ("no deferred signals.\n");
3420
3421 if (stabilizing_threads)
3422 {
3423 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3424 ourstatus->value.sig = GDB_SIGNAL_0;
3425
3426 if (debug_threads)
3427 {
3428 debug_printf ("linux_wait_1 ret = %s, stopped "
3429 "while stabilizing threads\n",
3430 target_pid_to_str (ptid_of (current_thread)));
3431 debug_exit ();
3432 }
3433
3434 return ptid_of (current_thread);
3435 }
3436 }
3437 }
3438 }
3439
3440 /* Check whether GDB would be interested in this event. */
3441
3442 /* Check if GDB is interested in this syscall. */
3443 if (WIFSTOPPED (w)
3444 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3445 && !gdb_catch_this_syscall_p (event_child))
3446 {
3447 if (debug_threads)
3448 {
3449 debug_printf ("Ignored syscall for LWP %ld.\n",
3450 lwpid_of (current_thread));
3451 }
3452
3453 linux_resume_one_lwp (event_child, event_child->stepping,
3454 0, NULL);
3455
3456 if (debug_threads)
3457 debug_exit ();
3458 return ignore_event (ourstatus);
3459 }
3460
3461 /* If GDB is not interested in this signal, don't stop other
3462 threads, and don't report it to GDB. Just resume the inferior
3463 right away. We do this for threading-related signals as well as
3464 any that GDB specifically requested we ignore. But never ignore
3465 SIGSTOP if we sent it ourselves, and do not ignore signals when
3466 stepping - they may require special handling to skip the signal
3467 handler. Also never ignore signals that could be caused by a
3468 breakpoint. */
3469 if (WIFSTOPPED (w)
3470 && current_thread->last_resume_kind != resume_step
3471 && (
3472 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3473 (current_process ()->priv->thread_db != NULL
3474 && (WSTOPSIG (w) == __SIGRTMIN
3475 || WSTOPSIG (w) == __SIGRTMIN + 1))
3476 ||
3477 #endif
3478 (cs.pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3479 && !(WSTOPSIG (w) == SIGSTOP
3480 && current_thread->last_resume_kind == resume_stop)
3481 && !linux_wstatus_maybe_breakpoint (w))))
3482 {
3483 siginfo_t info, *info_p;
3484
3485 if (debug_threads)
3486 debug_printf ("Ignored signal %d for LWP %ld.\n",
3487 WSTOPSIG (w), lwpid_of (current_thread));
3488
3489 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3490 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3491 info_p = &info;
3492 else
3493 info_p = NULL;
3494
3495 if (step_over_finished)
3496 {
3497 /* We cancelled this thread's step-over above. We still
3498 need to unsuspend all other LWPs, and set them back
3499 running again while the signal handler runs. */
3500 unsuspend_all_lwps (event_child);
3501
3502 /* Enqueue the pending signal info so that proceed_all_lwps
3503 doesn't lose it. */
3504 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3505
3506 proceed_all_lwps ();
3507 }
3508 else
3509 {
3510 linux_resume_one_lwp (event_child, event_child->stepping,
3511 WSTOPSIG (w), info_p);
3512 }
3513
3514 if (debug_threads)
3515 debug_exit ();
3516
3517 return ignore_event (ourstatus);
3518 }
3519
3520 /* Note that all addresses are always "out of the step range" when
3521 there's no range to begin with. */
3522 in_step_range = lwp_in_step_range (event_child);
3523
3524 /* If GDB wanted this thread to single step, and the thread is out
3525 of the step range, we always want to report the SIGTRAP, and let
3526 GDB handle it. Watchpoints should always be reported. So should
3527 signals we can't explain. A SIGTRAP we can't explain could be a
3528 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3529 do, we're be able to handle GDB breakpoints on top of internal
3530 breakpoints, by handling the internal breakpoint and still
3531 reporting the event to GDB. If we don't, we're out of luck, GDB
3532 won't see the breakpoint hit. If we see a single-step event but
3533 the thread should be continuing, don't pass the trap to gdb.
3534 That indicates that we had previously finished a single-step but
3535 left the single-step pending -- see
3536 complete_ongoing_step_over. */
3537 report_to_gdb = (!maybe_internal_trap
3538 || (current_thread->last_resume_kind == resume_step
3539 && !in_step_range)
3540 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3541 || (!in_step_range
3542 && !bp_explains_trap
3543 && !trace_event
3544 && !step_over_finished
3545 && !(current_thread->last_resume_kind == resume_continue
3546 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3547 || (gdb_breakpoint_here (event_child->stop_pc)
3548 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3549 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3550 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3551
3552 run_breakpoint_commands (event_child->stop_pc);
3553
3554 /* We found no reason GDB would want us to stop. We either hit one
3555 of our own breakpoints, or finished an internal step GDB
3556 shouldn't know about. */
3557 if (!report_to_gdb)
3558 {
3559 if (debug_threads)
3560 {
3561 if (bp_explains_trap)
3562 debug_printf ("Hit a gdbserver breakpoint.\n");
3563 if (step_over_finished)
3564 debug_printf ("Step-over finished.\n");
3565 if (trace_event)
3566 debug_printf ("Tracepoint event.\n");
3567 if (lwp_in_step_range (event_child))
3568 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3569 paddress (event_child->stop_pc),
3570 paddress (event_child->step_range_start),
3571 paddress (event_child->step_range_end));
3572 }
3573
3574 /* We're not reporting this breakpoint to GDB, so apply the
3575 decr_pc_after_break adjustment to the inferior's regcache
3576 ourselves. */
3577
3578 if (the_low_target.set_pc != NULL)
3579 {
3580 struct regcache *regcache
3581 = get_thread_regcache (current_thread, 1);
3582 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3583 }
3584
3585 if (step_over_finished)
3586 {
3587 /* If we have finished stepping over a breakpoint, we've
3588 stopped and suspended all LWPs momentarily except the
3589 stepping one. This is where we resume them all again.
3590 We're going to keep waiting, so use proceed, which
3591 handles stepping over the next breakpoint. */
3592 unsuspend_all_lwps (event_child);
3593 }
3594 else
3595 {
3596 /* Remove the single-step breakpoints if any. Note that
3597 there isn't single-step breakpoint if we finished stepping
3598 over. */
3599 if (can_software_single_step ()
3600 && has_single_step_breakpoints (current_thread))
3601 {
3602 stop_all_lwps (0, event_child);
3603 delete_single_step_breakpoints (current_thread);
3604 unstop_all_lwps (0, event_child);
3605 }
3606 }
3607
3608 if (debug_threads)
3609 debug_printf ("proceeding all threads.\n");
3610 proceed_all_lwps ();
3611
3612 if (debug_threads)
3613 debug_exit ();
3614
3615 return ignore_event (ourstatus);
3616 }
3617
3618 if (debug_threads)
3619 {
3620 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3621 {
3622 std::string str
3623 = target_waitstatus_to_string (&event_child->waitstatus);
3624
3625 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3626 lwpid_of (get_lwp_thread (event_child)), str.c_str ());
3627 }
3628 if (current_thread->last_resume_kind == resume_step)
3629 {
3630 if (event_child->step_range_start == event_child->step_range_end)
3631 debug_printf ("GDB wanted to single-step, reporting event.\n");
3632 else if (!lwp_in_step_range (event_child))
3633 debug_printf ("Out of step range, reporting event.\n");
3634 }
3635 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3636 debug_printf ("Stopped by watchpoint.\n");
3637 else if (gdb_breakpoint_here (event_child->stop_pc))
3638 debug_printf ("Stopped by GDB breakpoint.\n");
3639 if (debug_threads)
3640 debug_printf ("Hit a non-gdbserver trap event.\n");
3641 }
3642
3643 /* Alright, we're going to report a stop. */
3644
3645 /* Remove single-step breakpoints. */
3646 if (can_software_single_step ())
3647 {
3648 /* Remove single-step breakpoints or not. It it is true, stop all
3649 lwps, so that other threads won't hit the breakpoint in the
3650 staled memory. */
3651 int remove_single_step_breakpoints_p = 0;
3652
3653 if (non_stop)
3654 {
3655 remove_single_step_breakpoints_p
3656 = has_single_step_breakpoints (current_thread);
3657 }
3658 else
3659 {
3660 /* In all-stop, a stop reply cancels all previous resume
3661 requests. Delete all single-step breakpoints. */
3662
3663 find_thread ([&] (thread_info *thread) {
3664 if (has_single_step_breakpoints (thread))
3665 {
3666 remove_single_step_breakpoints_p = 1;
3667 return true;
3668 }
3669
3670 return false;
3671 });
3672 }
3673
3674 if (remove_single_step_breakpoints_p)
3675 {
3676 /* If we remove single-step breakpoints from memory, stop all lwps,
3677 so that other threads won't hit the breakpoint in the staled
3678 memory. */
3679 stop_all_lwps (0, event_child);
3680
3681 if (non_stop)
3682 {
3683 gdb_assert (has_single_step_breakpoints (current_thread));
3684 delete_single_step_breakpoints (current_thread);
3685 }
3686 else
3687 {
3688 for_each_thread ([] (thread_info *thread){
3689 if (has_single_step_breakpoints (thread))
3690 delete_single_step_breakpoints (thread);
3691 });
3692 }
3693
3694 unstop_all_lwps (0, event_child);
3695 }
3696 }
3697
3698 if (!stabilizing_threads)
3699 {
3700 /* In all-stop, stop all threads. */
3701 if (!non_stop)
3702 stop_all_lwps (0, NULL);
3703
3704 if (step_over_finished)
3705 {
3706 if (!non_stop)
3707 {
3708 /* If we were doing a step-over, all other threads but
3709 the stepping one had been paused in start_step_over,
3710 with their suspend counts incremented. We don't want
3711 to do a full unstop/unpause, because we're in
3712 all-stop mode (so we want threads stopped), but we
3713 still need to unsuspend the other threads, to
3714 decrement their `suspended' count back. */
3715 unsuspend_all_lwps (event_child);
3716 }
3717 else
3718 {
3719 /* If we just finished a step-over, then all threads had
3720 been momentarily paused. In all-stop, that's fine,
3721 we want threads stopped by now anyway. In non-stop,
3722 we need to re-resume threads that GDB wanted to be
3723 running. */
3724 unstop_all_lwps (1, event_child);
3725 }
3726 }
3727
3728 /* If we're not waiting for a specific LWP, choose an event LWP
3729 from among those that have had events. Giving equal priority
3730 to all LWPs that have had events helps prevent
3731 starvation. */
3732 if (ptid == minus_one_ptid)
3733 {
3734 event_child->status_pending_p = 1;
3735 event_child->status_pending = w;
3736
3737 select_event_lwp (&event_child);
3738
3739 /* current_thread and event_child must stay in sync. */
3740 current_thread = get_lwp_thread (event_child);
3741
3742 event_child->status_pending_p = 0;
3743 w = event_child->status_pending;
3744 }
3745
3746
3747 /* Stabilize threads (move out of jump pads). */
3748 if (!non_stop)
3749 stabilize_threads ();
3750 }
3751 else
3752 {
3753 /* If we just finished a step-over, then all threads had been
3754 momentarily paused. In all-stop, that's fine, we want
3755 threads stopped by now anyway. In non-stop, we need to
3756 re-resume threads that GDB wanted to be running. */
3757 if (step_over_finished)
3758 unstop_all_lwps (1, event_child);
3759 }
3760
3761 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3762 {
3763 /* If the reported event is an exit, fork, vfork or exec, let
3764 GDB know. */
3765
3766 /* Break the unreported fork relationship chain. */
3767 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3768 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3769 {
3770 event_child->fork_relative->fork_relative = NULL;
3771 event_child->fork_relative = NULL;
3772 }
3773
3774 *ourstatus = event_child->waitstatus;
3775 /* Clear the event lwp's waitstatus since we handled it already. */
3776 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3777 }
3778 else
3779 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3780
3781 /* Now that we've selected our final event LWP, un-adjust its PC if
3782 it was a software breakpoint, and the client doesn't know we can
3783 adjust the breakpoint ourselves. */
3784 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3785 && !cs.swbreak_feature)
3786 {
3787 int decr_pc = the_low_target.decr_pc_after_break;
3788
3789 if (decr_pc != 0)
3790 {
3791 struct regcache *regcache
3792 = get_thread_regcache (current_thread, 1);
3793 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3794 }
3795 }
3796
3797 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3798 {
3799 get_syscall_trapinfo (event_child,
3800 &ourstatus->value.syscall_number);
3801 ourstatus->kind = event_child->syscall_state;
3802 }
3803 else if (current_thread->last_resume_kind == resume_stop
3804 && WSTOPSIG (w) == SIGSTOP)
3805 {
3806 /* A thread that has been requested to stop by GDB with vCont;t,
3807 and it stopped cleanly, so report as SIG0. The use of
3808 SIGSTOP is an implementation detail. */
3809 ourstatus->value.sig = GDB_SIGNAL_0;
3810 }
3811 else if (current_thread->last_resume_kind == resume_stop
3812 && WSTOPSIG (w) != SIGSTOP)
3813 {
3814 /* A thread that has been requested to stop by GDB with vCont;t,
3815 but, it stopped for other reasons. */
3816 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3817 }
3818 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3819 {
3820 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3821 }
3822
3823 gdb_assert (step_over_bkpt == null_ptid);
3824
3825 if (debug_threads)
3826 {
3827 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3828 target_pid_to_str (ptid_of (current_thread)),
3829 ourstatus->kind, ourstatus->value.sig);
3830 debug_exit ();
3831 }
3832
3833 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3834 return filter_exit_event (event_child, ourstatus);
3835
3836 return ptid_of (current_thread);
3837 }
3838
3839 /* Get rid of any pending event in the pipe. */
3840 static void
3841 async_file_flush (void)
3842 {
3843 int ret;
3844 char buf;
3845
3846 do
3847 ret = read (linux_event_pipe[0], &buf, 1);
3848 while (ret >= 0 || (ret == -1 && errno == EINTR));
3849 }
3850
3851 /* Put something in the pipe, so the event loop wakes up. */
3852 static void
3853 async_file_mark (void)
3854 {
3855 int ret;
3856
3857 async_file_flush ();
3858
3859 do
3860 ret = write (linux_event_pipe[1], "+", 1);
3861 while (ret == 0 || (ret == -1 && errno == EINTR));
3862
3863 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3864 be awakened anyway. */
3865 }
3866
3867 static ptid_t
3868 linux_wait (ptid_t ptid,
3869 struct target_waitstatus *ourstatus, int target_options)
3870 {
3871 ptid_t event_ptid;
3872
3873 /* Flush the async file first. */
3874 if (target_is_async_p ())
3875 async_file_flush ();
3876
3877 do
3878 {
3879 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3880 }
3881 while ((target_options & TARGET_WNOHANG) == 0
3882 && event_ptid == null_ptid
3883 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3884
3885 /* If at least one stop was reported, there may be more. A single
3886 SIGCHLD can signal more than one child stop. */
3887 if (target_is_async_p ()
3888 && (target_options & TARGET_WNOHANG) != 0
3889 && event_ptid != null_ptid)
3890 async_file_mark ();
3891
3892 return event_ptid;
3893 }
3894
3895 /* Send a signal to an LWP. */
3896
3897 static int
3898 kill_lwp (unsigned long lwpid, int signo)
3899 {
3900 int ret;
3901
3902 errno = 0;
3903 ret = syscall (__NR_tkill, lwpid, signo);
3904 if (errno == ENOSYS)
3905 {
3906 /* If tkill fails, then we are not using nptl threads, a
3907 configuration we no longer support. */
3908 perror_with_name (("tkill"));
3909 }
3910 return ret;
3911 }
3912
3913 void
3914 linux_stop_lwp (struct lwp_info *lwp)
3915 {
3916 send_sigstop (lwp);
3917 }
3918
3919 static void
3920 send_sigstop (struct lwp_info *lwp)
3921 {
3922 int pid;
3923
3924 pid = lwpid_of (get_lwp_thread (lwp));
3925
3926 /* If we already have a pending stop signal for this process, don't
3927 send another. */
3928 if (lwp->stop_expected)
3929 {
3930 if (debug_threads)
3931 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3932
3933 return;
3934 }
3935
3936 if (debug_threads)
3937 debug_printf ("Sending sigstop to lwp %d\n", pid);
3938
3939 lwp->stop_expected = 1;
3940 kill_lwp (pid, SIGSTOP);
3941 }
3942
3943 static void
3944 send_sigstop (thread_info *thread, lwp_info *except)
3945 {
3946 struct lwp_info *lwp = get_thread_lwp (thread);
3947
3948 /* Ignore EXCEPT. */
3949 if (lwp == except)
3950 return;
3951
3952 if (lwp->stopped)
3953 return;
3954
3955 send_sigstop (lwp);
3956 }
3957
3958 /* Increment the suspend count of an LWP, and stop it, if not stopped
3959 yet. */
3960 static void
3961 suspend_and_send_sigstop (thread_info *thread, lwp_info *except)
3962 {
3963 struct lwp_info *lwp = get_thread_lwp (thread);
3964
3965 /* Ignore EXCEPT. */
3966 if (lwp == except)
3967 return;
3968
3969 lwp_suspended_inc (lwp);
3970
3971 send_sigstop (thread, except);
3972 }
3973
3974 static void
3975 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3976 {
3977 /* Store the exit status for later. */
3978 lwp->status_pending_p = 1;
3979 lwp->status_pending = wstat;
3980
3981 /* Store in waitstatus as well, as there's nothing else to process
3982 for this event. */
3983 if (WIFEXITED (wstat))
3984 {
3985 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3986 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3987 }
3988 else if (WIFSIGNALED (wstat))
3989 {
3990 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3991 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3992 }
3993
3994 /* Prevent trying to stop it. */
3995 lwp->stopped = 1;
3996
3997 /* No further stops are expected from a dead lwp. */
3998 lwp->stop_expected = 0;
3999 }
4000
4001 /* Return true if LWP has exited already, and has a pending exit event
4002 to report to GDB. */
4003
4004 static int
4005 lwp_is_marked_dead (struct lwp_info *lwp)
4006 {
4007 return (lwp->status_pending_p
4008 && (WIFEXITED (lwp->status_pending)
4009 || WIFSIGNALED (lwp->status_pending)));
4010 }
4011
4012 /* Wait for all children to stop for the SIGSTOPs we just queued. */
4013
4014 static void
4015 wait_for_sigstop (void)
4016 {
4017 struct thread_info *saved_thread;
4018 ptid_t saved_tid;
4019 int wstat;
4020 int ret;
4021
4022 saved_thread = current_thread;
4023 if (saved_thread != NULL)
4024 saved_tid = saved_thread->id;
4025 else
4026 saved_tid = null_ptid; /* avoid bogus unused warning */
4027
4028 if (debug_threads)
4029 debug_printf ("wait_for_sigstop: pulling events\n");
4030
4031 /* Passing NULL_PTID as filter indicates we want all events to be
4032 left pending. Eventually this returns when there are no
4033 unwaited-for children left. */
4034 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4035 &wstat, __WALL);
4036 gdb_assert (ret == -1);
4037
4038 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4039 current_thread = saved_thread;
4040 else
4041 {
4042 if (debug_threads)
4043 debug_printf ("Previously current thread died.\n");
4044
4045 /* We can't change the current inferior behind GDB's back,
4046 otherwise, a subsequent command may apply to the wrong
4047 process. */
4048 current_thread = NULL;
4049 }
4050 }
4051
4052 /* Returns true if THREAD is stopped in a jump pad, and we can't
4053 move it out, because we need to report the stop event to GDB. For
4054 example, if the user puts a breakpoint in the jump pad, it's
4055 because she wants to debug it. */
4056
4057 static bool
4058 stuck_in_jump_pad_callback (thread_info *thread)
4059 {
4060 struct lwp_info *lwp = get_thread_lwp (thread);
4061
4062 if (lwp->suspended != 0)
4063 {
4064 internal_error (__FILE__, __LINE__,
4065 "LWP %ld is suspended, suspended=%d\n",
4066 lwpid_of (thread), lwp->suspended);
4067 }
4068 gdb_assert (lwp->stopped);
4069
4070 /* Allow debugging the jump pad, gdb_collect, etc.. */
4071 return (supports_fast_tracepoints ()
4072 && agent_loaded_p ()
4073 && (gdb_breakpoint_here (lwp->stop_pc)
4074 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4075 || thread->last_resume_kind == resume_step)
4076 && (linux_fast_tracepoint_collecting (lwp, NULL)
4077 != fast_tpoint_collect_result::not_collecting));
4078 }
4079
4080 static void
4081 move_out_of_jump_pad_callback (thread_info *thread)
4082 {
4083 struct thread_info *saved_thread;
4084 struct lwp_info *lwp = get_thread_lwp (thread);
4085 int *wstat;
4086
4087 if (lwp->suspended != 0)
4088 {
4089 internal_error (__FILE__, __LINE__,
4090 "LWP %ld is suspended, suspended=%d\n",
4091 lwpid_of (thread), lwp->suspended);
4092 }
4093 gdb_assert (lwp->stopped);
4094
4095 /* For gdb_breakpoint_here. */
4096 saved_thread = current_thread;
4097 current_thread = thread;
4098
4099 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4100
4101 /* Allow debugging the jump pad, gdb_collect, etc. */
4102 if (!gdb_breakpoint_here (lwp->stop_pc)
4103 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4104 && thread->last_resume_kind != resume_step
4105 && maybe_move_out_of_jump_pad (lwp, wstat))
4106 {
4107 if (debug_threads)
4108 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4109 lwpid_of (thread));
4110
4111 if (wstat)
4112 {
4113 lwp->status_pending_p = 0;
4114 enqueue_one_deferred_signal (lwp, wstat);
4115
4116 if (debug_threads)
4117 debug_printf ("Signal %d for LWP %ld deferred "
4118 "(in jump pad)\n",
4119 WSTOPSIG (*wstat), lwpid_of (thread));
4120 }
4121
4122 linux_resume_one_lwp (lwp, 0, 0, NULL);
4123 }
4124 else
4125 lwp_suspended_inc (lwp);
4126
4127 current_thread = saved_thread;
4128 }
4129
4130 static bool
4131 lwp_running (thread_info *thread)
4132 {
4133 struct lwp_info *lwp = get_thread_lwp (thread);
4134
4135 if (lwp_is_marked_dead (lwp))
4136 return false;
4137
4138 return !lwp->stopped;
4139 }
4140
4141 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4142 If SUSPEND, then also increase the suspend count of every LWP,
4143 except EXCEPT. */
4144
4145 static void
4146 stop_all_lwps (int suspend, struct lwp_info *except)
4147 {
4148 /* Should not be called recursively. */
4149 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4150
4151 if (debug_threads)
4152 {
4153 debug_enter ();
4154 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4155 suspend ? "stop-and-suspend" : "stop",
4156 except != NULL
4157 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4158 : "none");
4159 }
4160
4161 stopping_threads = (suspend
4162 ? STOPPING_AND_SUSPENDING_THREADS
4163 : STOPPING_THREADS);
4164
4165 if (suspend)
4166 for_each_thread ([&] (thread_info *thread)
4167 {
4168 suspend_and_send_sigstop (thread, except);
4169 });
4170 else
4171 for_each_thread ([&] (thread_info *thread)
4172 {
4173 send_sigstop (thread, except);
4174 });
4175
4176 wait_for_sigstop ();
4177 stopping_threads = NOT_STOPPING_THREADS;
4178
4179 if (debug_threads)
4180 {
4181 debug_printf ("stop_all_lwps done, setting stopping_threads "
4182 "back to !stopping\n");
4183 debug_exit ();
4184 }
4185 }
4186
4187 /* Enqueue one signal in the chain of signals which need to be
4188 delivered to this process on next resume. */
4189
4190 static void
4191 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4192 {
4193 struct pending_signals *p_sig = XNEW (struct pending_signals);
4194
4195 p_sig->prev = lwp->pending_signals;
4196 p_sig->signal = signal;
4197 if (info == NULL)
4198 memset (&p_sig->info, 0, sizeof (siginfo_t));
4199 else
4200 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4201 lwp->pending_signals = p_sig;
4202 }
4203
4204 /* Install breakpoints for software single stepping. */
4205
4206 static void
4207 install_software_single_step_breakpoints (struct lwp_info *lwp)
4208 {
4209 struct thread_info *thread = get_lwp_thread (lwp);
4210 struct regcache *regcache = get_thread_regcache (thread, 1);
4211
4212 scoped_restore save_current_thread = make_scoped_restore (&current_thread);
4213
4214 current_thread = thread;
4215 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
4216
4217 for (CORE_ADDR pc : next_pcs)
4218 set_single_step_breakpoint (pc, current_ptid);
4219 }
4220
4221 /* Single step via hardware or software single step.
4222 Return 1 if hardware single stepping, 0 if software single stepping
4223 or can't single step. */
4224
4225 static int
4226 single_step (struct lwp_info* lwp)
4227 {
4228 int step = 0;
4229
4230 if (can_hardware_single_step ())
4231 {
4232 step = 1;
4233 }
4234 else if (can_software_single_step ())
4235 {
4236 install_software_single_step_breakpoints (lwp);
4237 step = 0;
4238 }
4239 else
4240 {
4241 if (debug_threads)
4242 debug_printf ("stepping is not implemented on this target");
4243 }
4244
4245 return step;
4246 }
4247
4248 /* The signal can be delivered to the inferior if we are not trying to
4249 finish a fast tracepoint collect. Since signal can be delivered in
4250 the step-over, the program may go to signal handler and trap again
4251 after return from the signal handler. We can live with the spurious
4252 double traps. */
4253
4254 static int
4255 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4256 {
4257 return (lwp->collecting_fast_tracepoint
4258 == fast_tpoint_collect_result::not_collecting);
4259 }
4260
4261 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4262 SIGNAL is nonzero, give it that signal. */
4263
4264 static void
4265 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4266 int step, int signal, siginfo_t *info)
4267 {
4268 struct thread_info *thread = get_lwp_thread (lwp);
4269 struct thread_info *saved_thread;
4270 int ptrace_request;
4271 struct process_info *proc = get_thread_process (thread);
4272
4273 /* Note that target description may not be initialised
4274 (proc->tdesc == NULL) at this point because the program hasn't
4275 stopped at the first instruction yet. It means GDBserver skips
4276 the extra traps from the wrapper program (see option --wrapper).
4277 Code in this function that requires register access should be
4278 guarded by proc->tdesc == NULL or something else. */
4279
4280 if (lwp->stopped == 0)
4281 return;
4282
4283 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4284
4285 fast_tpoint_collect_result fast_tp_collecting
4286 = lwp->collecting_fast_tracepoint;
4287
4288 gdb_assert (!stabilizing_threads
4289 || (fast_tp_collecting
4290 != fast_tpoint_collect_result::not_collecting));
4291
4292 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4293 user used the "jump" command, or "set $pc = foo"). */
4294 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4295 {
4296 /* Collecting 'while-stepping' actions doesn't make sense
4297 anymore. */
4298 release_while_stepping_state_list (thread);
4299 }
4300
4301 /* If we have pending signals or status, and a new signal, enqueue the
4302 signal. Also enqueue the signal if it can't be delivered to the
4303 inferior right now. */
4304 if (signal != 0
4305 && (lwp->status_pending_p
4306 || lwp->pending_signals != NULL
4307 || !lwp_signal_can_be_delivered (lwp)))
4308 {
4309 enqueue_pending_signal (lwp, signal, info);
4310
4311 /* Postpone any pending signal. It was enqueued above. */
4312 signal = 0;
4313 }
4314
4315 if (lwp->status_pending_p)
4316 {
4317 if (debug_threads)
4318 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4319 " has pending status\n",
4320 lwpid_of (thread), step ? "step" : "continue",
4321 lwp->stop_expected ? "expected" : "not expected");
4322 return;
4323 }
4324
4325 saved_thread = current_thread;
4326 current_thread = thread;
4327
4328 /* This bit needs some thinking about. If we get a signal that
4329 we must report while a single-step reinsert is still pending,
4330 we often end up resuming the thread. It might be better to
4331 (ew) allow a stack of pending events; then we could be sure that
4332 the reinsert happened right away and not lose any signals.
4333
4334 Making this stack would also shrink the window in which breakpoints are
4335 uninserted (see comment in linux_wait_for_lwp) but not enough for
4336 complete correctness, so it won't solve that problem. It may be
4337 worthwhile just to solve this one, however. */
4338 if (lwp->bp_reinsert != 0)
4339 {
4340 if (debug_threads)
4341 debug_printf (" pending reinsert at 0x%s\n",
4342 paddress (lwp->bp_reinsert));
4343
4344 if (can_hardware_single_step ())
4345 {
4346 if (fast_tp_collecting == fast_tpoint_collect_result::not_collecting)
4347 {
4348 if (step == 0)
4349 warning ("BAD - reinserting but not stepping.");
4350 if (lwp->suspended)
4351 warning ("BAD - reinserting and suspended(%d).",
4352 lwp->suspended);
4353 }
4354 }
4355
4356 step = maybe_hw_step (thread);
4357 }
4358
4359 if (fast_tp_collecting == fast_tpoint_collect_result::before_insn)
4360 {
4361 if (debug_threads)
4362 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4363 " (exit-jump-pad-bkpt)\n",
4364 lwpid_of (thread));
4365 }
4366 else if (fast_tp_collecting == fast_tpoint_collect_result::at_insn)
4367 {
4368 if (debug_threads)
4369 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4370 " single-stepping\n",
4371 lwpid_of (thread));
4372
4373 if (can_hardware_single_step ())
4374 step = 1;
4375 else
4376 {
4377 internal_error (__FILE__, __LINE__,
4378 "moving out of jump pad single-stepping"
4379 " not implemented on this target");
4380 }
4381 }
4382
4383 /* If we have while-stepping actions in this thread set it stepping.
4384 If we have a signal to deliver, it may or may not be set to
4385 SIG_IGN, we don't know. Assume so, and allow collecting
4386 while-stepping into a signal handler. A possible smart thing to
4387 do would be to set an internal breakpoint at the signal return
4388 address, continue, and carry on catching this while-stepping
4389 action only when that breakpoint is hit. A future
4390 enhancement. */
4391 if (thread->while_stepping != NULL)
4392 {
4393 if (debug_threads)
4394 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4395 lwpid_of (thread));
4396
4397 step = single_step (lwp);
4398 }
4399
4400 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4401 {
4402 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4403
4404 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4405
4406 if (debug_threads)
4407 {
4408 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4409 (long) lwp->stop_pc);
4410 }
4411 }
4412
4413 /* If we have pending signals, consume one if it can be delivered to
4414 the inferior. */
4415 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4416 {
4417 struct pending_signals **p_sig;
4418
4419 p_sig = &lwp->pending_signals;
4420 while ((*p_sig)->prev != NULL)
4421 p_sig = &(*p_sig)->prev;
4422
4423 signal = (*p_sig)->signal;
4424 if ((*p_sig)->info.si_signo != 0)
4425 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4426 &(*p_sig)->info);
4427
4428 free (*p_sig);
4429 *p_sig = NULL;
4430 }
4431
4432 if (debug_threads)
4433 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4434 lwpid_of (thread), step ? "step" : "continue", signal,
4435 lwp->stop_expected ? "expected" : "not expected");
4436
4437 if (the_low_target.prepare_to_resume != NULL)
4438 the_low_target.prepare_to_resume (lwp);
4439
4440 regcache_invalidate_thread (thread);
4441 errno = 0;
4442 lwp->stepping = step;
4443 if (step)
4444 ptrace_request = PTRACE_SINGLESTEP;
4445 else if (gdb_catching_syscalls_p (lwp))
4446 ptrace_request = PTRACE_SYSCALL;
4447 else
4448 ptrace_request = PTRACE_CONT;
4449 ptrace (ptrace_request,
4450 lwpid_of (thread),
4451 (PTRACE_TYPE_ARG3) 0,
4452 /* Coerce to a uintptr_t first to avoid potential gcc warning
4453 of coercing an 8 byte integer to a 4 byte pointer. */
4454 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4455
4456 current_thread = saved_thread;
4457 if (errno)
4458 perror_with_name ("resuming thread");
4459
4460 /* Successfully resumed. Clear state that no longer makes sense,
4461 and mark the LWP as running. Must not do this before resuming
4462 otherwise if that fails other code will be confused. E.g., we'd
4463 later try to stop the LWP and hang forever waiting for a stop
4464 status. Note that we must not throw after this is cleared,
4465 otherwise handle_zombie_lwp_error would get confused. */
4466 lwp->stopped = 0;
4467 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4468 }
4469
4470 /* Called when we try to resume a stopped LWP and that errors out. If
4471 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4472 or about to become), discard the error, clear any pending status
4473 the LWP may have, and return true (we'll collect the exit status
4474 soon enough). Otherwise, return false. */
4475
4476 static int
4477 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4478 {
4479 struct thread_info *thread = get_lwp_thread (lp);
4480
4481 /* If we get an error after resuming the LWP successfully, we'd
4482 confuse !T state for the LWP being gone. */
4483 gdb_assert (lp->stopped);
4484
4485 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4486 because even if ptrace failed with ESRCH, the tracee may be "not
4487 yet fully dead", but already refusing ptrace requests. In that
4488 case the tracee has 'R (Running)' state for a little bit
4489 (observed in Linux 3.18). See also the note on ESRCH in the
4490 ptrace(2) man page. Instead, check whether the LWP has any state
4491 other than ptrace-stopped. */
4492
4493 /* Don't assume anything if /proc/PID/status can't be read. */
4494 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4495 {
4496 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4497 lp->status_pending_p = 0;
4498 return 1;
4499 }
4500 return 0;
4501 }
4502
4503 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4504 disappears while we try to resume it. */
4505
4506 static void
4507 linux_resume_one_lwp (struct lwp_info *lwp,
4508 int step, int signal, siginfo_t *info)
4509 {
4510 try
4511 {
4512 linux_resume_one_lwp_throw (lwp, step, signal, info);
4513 }
4514 catch (const gdb_exception_error &ex)
4515 {
4516 if (!check_ptrace_stopped_lwp_gone (lwp))
4517 throw_exception (ex);
4518 }
4519 }
4520
4521 /* This function is called once per thread via for_each_thread.
4522 We look up which resume request applies to THREAD and mark it with a
4523 pointer to the appropriate resume request.
4524
4525 This algorithm is O(threads * resume elements), but resume elements
4526 is small (and will remain small at least until GDB supports thread
4527 suspension). */
4528
4529 static void
4530 linux_set_resume_request (thread_info *thread, thread_resume *resume, size_t n)
4531 {
4532 struct lwp_info *lwp = get_thread_lwp (thread);
4533
4534 for (int ndx = 0; ndx < n; ndx++)
4535 {
4536 ptid_t ptid = resume[ndx].thread;
4537 if (ptid == minus_one_ptid
4538 || ptid == thread->id
4539 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4540 of PID'. */
4541 || (ptid.pid () == pid_of (thread)
4542 && (ptid.is_pid ()
4543 || ptid.lwp () == -1)))
4544 {
4545 if (resume[ndx].kind == resume_stop
4546 && thread->last_resume_kind == resume_stop)
4547 {
4548 if (debug_threads)
4549 debug_printf ("already %s LWP %ld at GDB's request\n",
4550 (thread->last_status.kind
4551 == TARGET_WAITKIND_STOPPED)
4552 ? "stopped"
4553 : "stopping",
4554 lwpid_of (thread));
4555
4556 continue;
4557 }
4558
4559 /* Ignore (wildcard) resume requests for already-resumed
4560 threads. */
4561 if (resume[ndx].kind != resume_stop
4562 && thread->last_resume_kind != resume_stop)
4563 {
4564 if (debug_threads)
4565 debug_printf ("already %s LWP %ld at GDB's request\n",
4566 (thread->last_resume_kind
4567 == resume_step)
4568 ? "stepping"
4569 : "continuing",
4570 lwpid_of (thread));
4571 continue;
4572 }
4573
4574 /* Don't let wildcard resumes resume fork children that GDB
4575 does not yet know are new fork children. */
4576 if (lwp->fork_relative != NULL)
4577 {
4578 struct lwp_info *rel = lwp->fork_relative;
4579
4580 if (rel->status_pending_p
4581 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4582 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4583 {
4584 if (debug_threads)
4585 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4586 lwpid_of (thread));
4587 continue;
4588 }
4589 }
4590
4591 /* If the thread has a pending event that has already been
4592 reported to GDBserver core, but GDB has not pulled the
4593 event out of the vStopped queue yet, likewise, ignore the
4594 (wildcard) resume request. */
4595 if (in_queued_stop_replies (thread->id))
4596 {
4597 if (debug_threads)
4598 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4599 lwpid_of (thread));
4600 continue;
4601 }
4602
4603 lwp->resume = &resume[ndx];
4604 thread->last_resume_kind = lwp->resume->kind;
4605
4606 lwp->step_range_start = lwp->resume->step_range_start;
4607 lwp->step_range_end = lwp->resume->step_range_end;
4608
4609 /* If we had a deferred signal to report, dequeue one now.
4610 This can happen if LWP gets more than one signal while
4611 trying to get out of a jump pad. */
4612 if (lwp->stopped
4613 && !lwp->status_pending_p
4614 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4615 {
4616 lwp->status_pending_p = 1;
4617
4618 if (debug_threads)
4619 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4620 "leaving status pending.\n",
4621 WSTOPSIG (lwp->status_pending),
4622 lwpid_of (thread));
4623 }
4624
4625 return;
4626 }
4627 }
4628
4629 /* No resume action for this thread. */
4630 lwp->resume = NULL;
4631 }
4632
4633 /* find_thread callback for linux_resume. Return true if this lwp has an
4634 interesting status pending. */
4635
4636 static bool
4637 resume_status_pending_p (thread_info *thread)
4638 {
4639 struct lwp_info *lwp = get_thread_lwp (thread);
4640
4641 /* LWPs which will not be resumed are not interesting, because
4642 we might not wait for them next time through linux_wait. */
4643 if (lwp->resume == NULL)
4644 return false;
4645
4646 return thread_still_has_status_pending_p (thread);
4647 }
4648
4649 /* Return 1 if this lwp that GDB wants running is stopped at an
4650 internal breakpoint that we need to step over. It assumes that any
4651 required STOP_PC adjustment has already been propagated to the
4652 inferior's regcache. */
4653
4654 static bool
4655 need_step_over_p (thread_info *thread)
4656 {
4657 struct lwp_info *lwp = get_thread_lwp (thread);
4658 struct thread_info *saved_thread;
4659 CORE_ADDR pc;
4660 struct process_info *proc = get_thread_process (thread);
4661
4662 /* GDBserver is skipping the extra traps from the wrapper program,
4663 don't have to do step over. */
4664 if (proc->tdesc == NULL)
4665 return false;
4666
4667 /* LWPs which will not be resumed are not interesting, because we
4668 might not wait for them next time through linux_wait. */
4669
4670 if (!lwp->stopped)
4671 {
4672 if (debug_threads)
4673 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4674 lwpid_of (thread));
4675 return false;
4676 }
4677
4678 if (thread->last_resume_kind == resume_stop)
4679 {
4680 if (debug_threads)
4681 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4682 " stopped\n",
4683 lwpid_of (thread));
4684 return false;
4685 }
4686
4687 gdb_assert (lwp->suspended >= 0);
4688
4689 if (lwp->suspended)
4690 {
4691 if (debug_threads)
4692 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4693 lwpid_of (thread));
4694 return false;
4695 }
4696
4697 if (lwp->status_pending_p)
4698 {
4699 if (debug_threads)
4700 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4701 " status.\n",
4702 lwpid_of (thread));
4703 return false;
4704 }
4705
4706 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4707 or we have. */
4708 pc = get_pc (lwp);
4709
4710 /* If the PC has changed since we stopped, then don't do anything,
4711 and let the breakpoint/tracepoint be hit. This happens if, for
4712 instance, GDB handled the decr_pc_after_break subtraction itself,
4713 GDB is OOL stepping this thread, or the user has issued a "jump"
4714 command, or poked thread's registers herself. */
4715 if (pc != lwp->stop_pc)
4716 {
4717 if (debug_threads)
4718 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4719 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4720 lwpid_of (thread),
4721 paddress (lwp->stop_pc), paddress (pc));
4722 return false;
4723 }
4724
4725 /* On software single step target, resume the inferior with signal
4726 rather than stepping over. */
4727 if (can_software_single_step ()
4728 && lwp->pending_signals != NULL
4729 && lwp_signal_can_be_delivered (lwp))
4730 {
4731 if (debug_threads)
4732 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4733 " signals.\n",
4734 lwpid_of (thread));
4735
4736 return false;
4737 }
4738
4739 saved_thread = current_thread;
4740 current_thread = thread;
4741
4742 /* We can only step over breakpoints we know about. */
4743 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4744 {
4745 /* Don't step over a breakpoint that GDB expects to hit
4746 though. If the condition is being evaluated on the target's side
4747 and it evaluate to false, step over this breakpoint as well. */
4748 if (gdb_breakpoint_here (pc)
4749 && gdb_condition_true_at_breakpoint (pc)
4750 && gdb_no_commands_at_breakpoint (pc))
4751 {
4752 if (debug_threads)
4753 debug_printf ("Need step over [LWP %ld]? yes, but found"
4754 " GDB breakpoint at 0x%s; skipping step over\n",
4755 lwpid_of (thread), paddress (pc));
4756
4757 current_thread = saved_thread;
4758 return false;
4759 }
4760 else
4761 {
4762 if (debug_threads)
4763 debug_printf ("Need step over [LWP %ld]? yes, "
4764 "found breakpoint at 0x%s\n",
4765 lwpid_of (thread), paddress (pc));
4766
4767 /* We've found an lwp that needs stepping over --- return 1 so
4768 that find_thread stops looking. */
4769 current_thread = saved_thread;
4770
4771 return true;
4772 }
4773 }
4774
4775 current_thread = saved_thread;
4776
4777 if (debug_threads)
4778 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4779 " at 0x%s\n",
4780 lwpid_of (thread), paddress (pc));
4781
4782 return false;
4783 }
4784
4785 /* Start a step-over operation on LWP. When LWP stopped at a
4786 breakpoint, to make progress, we need to remove the breakpoint out
4787 of the way. If we let other threads run while we do that, they may
4788 pass by the breakpoint location and miss hitting it. To avoid
4789 that, a step-over momentarily stops all threads while LWP is
4790 single-stepped by either hardware or software while the breakpoint
4791 is temporarily uninserted from the inferior. When the single-step
4792 finishes, we reinsert the breakpoint, and let all threads that are
4793 supposed to be running, run again. */
4794
4795 static int
4796 start_step_over (struct lwp_info *lwp)
4797 {
4798 struct thread_info *thread = get_lwp_thread (lwp);
4799 struct thread_info *saved_thread;
4800 CORE_ADDR pc;
4801 int step;
4802
4803 if (debug_threads)
4804 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4805 lwpid_of (thread));
4806
4807 stop_all_lwps (1, lwp);
4808
4809 if (lwp->suspended != 0)
4810 {
4811 internal_error (__FILE__, __LINE__,
4812 "LWP %ld suspended=%d\n", lwpid_of (thread),
4813 lwp->suspended);
4814 }
4815
4816 if (debug_threads)
4817 debug_printf ("Done stopping all threads for step-over.\n");
4818
4819 /* Note, we should always reach here with an already adjusted PC,
4820 either by GDB (if we're resuming due to GDB's request), or by our
4821 caller, if we just finished handling an internal breakpoint GDB
4822 shouldn't care about. */
4823 pc = get_pc (lwp);
4824
4825 saved_thread = current_thread;
4826 current_thread = thread;
4827
4828 lwp->bp_reinsert = pc;
4829 uninsert_breakpoints_at (pc);
4830 uninsert_fast_tracepoint_jumps_at (pc);
4831
4832 step = single_step (lwp);
4833
4834 current_thread = saved_thread;
4835
4836 linux_resume_one_lwp (lwp, step, 0, NULL);
4837
4838 /* Require next event from this LWP. */
4839 step_over_bkpt = thread->id;
4840 return 1;
4841 }
4842
4843 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4844 start_step_over, if still there, and delete any single-step
4845 breakpoints we've set, on non hardware single-step targets. */
4846
4847 static int
4848 finish_step_over (struct lwp_info *lwp)
4849 {
4850 if (lwp->bp_reinsert != 0)
4851 {
4852 struct thread_info *saved_thread = current_thread;
4853
4854 if (debug_threads)
4855 debug_printf ("Finished step over.\n");
4856
4857 current_thread = get_lwp_thread (lwp);
4858
4859 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4860 may be no breakpoint to reinsert there by now. */
4861 reinsert_breakpoints_at (lwp->bp_reinsert);
4862 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4863
4864 lwp->bp_reinsert = 0;
4865
4866 /* Delete any single-step breakpoints. No longer needed. We
4867 don't have to worry about other threads hitting this trap,
4868 and later not being able to explain it, because we were
4869 stepping over a breakpoint, and we hold all threads but
4870 LWP stopped while doing that. */
4871 if (!can_hardware_single_step ())
4872 {
4873 gdb_assert (has_single_step_breakpoints (current_thread));
4874 delete_single_step_breakpoints (current_thread);
4875 }
4876
4877 step_over_bkpt = null_ptid;
4878 current_thread = saved_thread;
4879 return 1;
4880 }
4881 else
4882 return 0;
4883 }
4884
4885 /* If there's a step over in progress, wait until all threads stop
4886 (that is, until the stepping thread finishes its step), and
4887 unsuspend all lwps. The stepping thread ends with its status
4888 pending, which is processed later when we get back to processing
4889 events. */
4890
4891 static void
4892 complete_ongoing_step_over (void)
4893 {
4894 if (step_over_bkpt != null_ptid)
4895 {
4896 struct lwp_info *lwp;
4897 int wstat;
4898 int ret;
4899
4900 if (debug_threads)
4901 debug_printf ("detach: step over in progress, finish it first\n");
4902
4903 /* Passing NULL_PTID as filter indicates we want all events to
4904 be left pending. Eventually this returns when there are no
4905 unwaited-for children left. */
4906 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4907 &wstat, __WALL);
4908 gdb_assert (ret == -1);
4909
4910 lwp = find_lwp_pid (step_over_bkpt);
4911 if (lwp != NULL)
4912 finish_step_over (lwp);
4913 step_over_bkpt = null_ptid;
4914 unsuspend_all_lwps (lwp);
4915 }
4916 }
4917
4918 /* This function is called once per thread. We check the thread's resume
4919 request, which will tell us whether to resume, step, or leave the thread
4920 stopped; and what signal, if any, it should be sent.
4921
4922 For threads which we aren't explicitly told otherwise, we preserve
4923 the stepping flag; this is used for stepping over gdbserver-placed
4924 breakpoints.
4925
4926 If pending_flags was set in any thread, we queue any needed
4927 signals, since we won't actually resume. We already have a pending
4928 event to report, so we don't need to preserve any step requests;
4929 they should be re-issued if necessary. */
4930
4931 static void
4932 linux_resume_one_thread (thread_info *thread, bool leave_all_stopped)
4933 {
4934 struct lwp_info *lwp = get_thread_lwp (thread);
4935 int leave_pending;
4936
4937 if (lwp->resume == NULL)
4938 return;
4939
4940 if (lwp->resume->kind == resume_stop)
4941 {
4942 if (debug_threads)
4943 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4944
4945 if (!lwp->stopped)
4946 {
4947 if (debug_threads)
4948 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4949
4950 /* Stop the thread, and wait for the event asynchronously,
4951 through the event loop. */
4952 send_sigstop (lwp);
4953 }
4954 else
4955 {
4956 if (debug_threads)
4957 debug_printf ("already stopped LWP %ld\n",
4958 lwpid_of (thread));
4959
4960 /* The LWP may have been stopped in an internal event that
4961 was not meant to be notified back to GDB (e.g., gdbserver
4962 breakpoint), so we should be reporting a stop event in
4963 this case too. */
4964
4965 /* If the thread already has a pending SIGSTOP, this is a
4966 no-op. Otherwise, something later will presumably resume
4967 the thread and this will cause it to cancel any pending
4968 operation, due to last_resume_kind == resume_stop. If
4969 the thread already has a pending status to report, we
4970 will still report it the next time we wait - see
4971 status_pending_p_callback. */
4972
4973 /* If we already have a pending signal to report, then
4974 there's no need to queue a SIGSTOP, as this means we're
4975 midway through moving the LWP out of the jumppad, and we
4976 will report the pending signal as soon as that is
4977 finished. */
4978 if (lwp->pending_signals_to_report == NULL)
4979 send_sigstop (lwp);
4980 }
4981
4982 /* For stop requests, we're done. */
4983 lwp->resume = NULL;
4984 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4985 return;
4986 }
4987
4988 /* If this thread which is about to be resumed has a pending status,
4989 then don't resume it - we can just report the pending status.
4990 Likewise if it is suspended, because e.g., another thread is
4991 stepping past a breakpoint. Make sure to queue any signals that
4992 would otherwise be sent. In all-stop mode, we do this decision
4993 based on if *any* thread has a pending status. If there's a
4994 thread that needs the step-over-breakpoint dance, then don't
4995 resume any other thread but that particular one. */
4996 leave_pending = (lwp->suspended
4997 || lwp->status_pending_p
4998 || leave_all_stopped);
4999
5000 /* If we have a new signal, enqueue the signal. */
5001 if (lwp->resume->sig != 0)
5002 {
5003 siginfo_t info, *info_p;
5004
5005 /* If this is the same signal we were previously stopped by,
5006 make sure to queue its siginfo. */
5007 if (WIFSTOPPED (lwp->last_status)
5008 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
5009 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
5010 (PTRACE_TYPE_ARG3) 0, &info) == 0)
5011 info_p = &info;
5012 else
5013 info_p = NULL;
5014
5015 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
5016 }
5017
5018 if (!leave_pending)
5019 {
5020 if (debug_threads)
5021 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5022
5023 proceed_one_lwp (thread, NULL);
5024 }
5025 else
5026 {
5027 if (debug_threads)
5028 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5029 }
5030
5031 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5032 lwp->resume = NULL;
5033 }
5034
5035 static void
5036 linux_resume (struct thread_resume *resume_info, size_t n)
5037 {
5038 struct thread_info *need_step_over = NULL;
5039
5040 if (debug_threads)
5041 {
5042 debug_enter ();
5043 debug_printf ("linux_resume:\n");
5044 }
5045
5046 for_each_thread ([&] (thread_info *thread)
5047 {
5048 linux_set_resume_request (thread, resume_info, n);
5049 });
5050
5051 /* If there is a thread which would otherwise be resumed, which has
5052 a pending status, then don't resume any threads - we can just
5053 report the pending status. Make sure to queue any signals that
5054 would otherwise be sent. In non-stop mode, we'll apply this
5055 logic to each thread individually. We consume all pending events
5056 before considering to start a step-over (in all-stop). */
5057 bool any_pending = false;
5058 if (!non_stop)
5059 any_pending = find_thread (resume_status_pending_p) != NULL;
5060
5061 /* If there is a thread which would otherwise be resumed, which is
5062 stopped at a breakpoint that needs stepping over, then don't
5063 resume any threads - have it step over the breakpoint with all
5064 other threads stopped, then resume all threads again. Make sure
5065 to queue any signals that would otherwise be delivered or
5066 queued. */
5067 if (!any_pending && supports_breakpoints ())
5068 need_step_over = find_thread (need_step_over_p);
5069
5070 bool leave_all_stopped = (need_step_over != NULL || any_pending);
5071
5072 if (debug_threads)
5073 {
5074 if (need_step_over != NULL)
5075 debug_printf ("Not resuming all, need step over\n");
5076 else if (any_pending)
5077 debug_printf ("Not resuming, all-stop and found "
5078 "an LWP with pending status\n");
5079 else
5080 debug_printf ("Resuming, no pending status or step over needed\n");
5081 }
5082
5083 /* Even if we're leaving threads stopped, queue all signals we'd
5084 otherwise deliver. */
5085 for_each_thread ([&] (thread_info *thread)
5086 {
5087 linux_resume_one_thread (thread, leave_all_stopped);
5088 });
5089
5090 if (need_step_over)
5091 start_step_over (get_thread_lwp (need_step_over));
5092
5093 if (debug_threads)
5094 {
5095 debug_printf ("linux_resume done\n");
5096 debug_exit ();
5097 }
5098
5099 /* We may have events that were pending that can/should be sent to
5100 the client now. Trigger a linux_wait call. */
5101 if (target_is_async_p ())
5102 async_file_mark ();
5103 }
5104
5105 /* This function is called once per thread. We check the thread's
5106 last resume request, which will tell us whether to resume, step, or
5107 leave the thread stopped. Any signal the client requested to be
5108 delivered has already been enqueued at this point.
5109
5110 If any thread that GDB wants running is stopped at an internal
5111 breakpoint that needs stepping over, we start a step-over operation
5112 on that particular thread, and leave all others stopped. */
5113
5114 static void
5115 proceed_one_lwp (thread_info *thread, lwp_info *except)
5116 {
5117 struct lwp_info *lwp = get_thread_lwp (thread);
5118 int step;
5119
5120 if (lwp == except)
5121 return;
5122
5123 if (debug_threads)
5124 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5125
5126 if (!lwp->stopped)
5127 {
5128 if (debug_threads)
5129 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5130 return;
5131 }
5132
5133 if (thread->last_resume_kind == resume_stop
5134 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5135 {
5136 if (debug_threads)
5137 debug_printf (" client wants LWP to remain %ld stopped\n",
5138 lwpid_of (thread));
5139 return;
5140 }
5141
5142 if (lwp->status_pending_p)
5143 {
5144 if (debug_threads)
5145 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5146 lwpid_of (thread));
5147 return;
5148 }
5149
5150 gdb_assert (lwp->suspended >= 0);
5151
5152 if (lwp->suspended)
5153 {
5154 if (debug_threads)
5155 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5156 return;
5157 }
5158
5159 if (thread->last_resume_kind == resume_stop
5160 && lwp->pending_signals_to_report == NULL
5161 && (lwp->collecting_fast_tracepoint
5162 == fast_tpoint_collect_result::not_collecting))
5163 {
5164 /* We haven't reported this LWP as stopped yet (otherwise, the
5165 last_status.kind check above would catch it, and we wouldn't
5166 reach here. This LWP may have been momentarily paused by a
5167 stop_all_lwps call while handling for example, another LWP's
5168 step-over. In that case, the pending expected SIGSTOP signal
5169 that was queued at vCont;t handling time will have already
5170 been consumed by wait_for_sigstop, and so we need to requeue
5171 another one here. Note that if the LWP already has a SIGSTOP
5172 pending, this is a no-op. */
5173
5174 if (debug_threads)
5175 debug_printf ("Client wants LWP %ld to stop. "
5176 "Making sure it has a SIGSTOP pending\n",
5177 lwpid_of (thread));
5178
5179 send_sigstop (lwp);
5180 }
5181
5182 if (thread->last_resume_kind == resume_step)
5183 {
5184 if (debug_threads)
5185 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5186 lwpid_of (thread));
5187
5188 /* If resume_step is requested by GDB, install single-step
5189 breakpoints when the thread is about to be actually resumed if
5190 the single-step breakpoints weren't removed. */
5191 if (can_software_single_step ()
5192 && !has_single_step_breakpoints (thread))
5193 install_software_single_step_breakpoints (lwp);
5194
5195 step = maybe_hw_step (thread);
5196 }
5197 else if (lwp->bp_reinsert != 0)
5198 {
5199 if (debug_threads)
5200 debug_printf (" stepping LWP %ld, reinsert set\n",
5201 lwpid_of (thread));
5202
5203 step = maybe_hw_step (thread);
5204 }
5205 else
5206 step = 0;
5207
5208 linux_resume_one_lwp (lwp, step, 0, NULL);
5209 }
5210
5211 static void
5212 unsuspend_and_proceed_one_lwp (thread_info *thread, lwp_info *except)
5213 {
5214 struct lwp_info *lwp = get_thread_lwp (thread);
5215
5216 if (lwp == except)
5217 return;
5218
5219 lwp_suspended_decr (lwp);
5220
5221 proceed_one_lwp (thread, except);
5222 }
5223
5224 /* When we finish a step-over, set threads running again. If there's
5225 another thread that may need a step-over, now's the time to start
5226 it. Eventually, we'll move all threads past their breakpoints. */
5227
5228 static void
5229 proceed_all_lwps (void)
5230 {
5231 struct thread_info *need_step_over;
5232
5233 /* If there is a thread which would otherwise be resumed, which is
5234 stopped at a breakpoint that needs stepping over, then don't
5235 resume any threads - have it step over the breakpoint with all
5236 other threads stopped, then resume all threads again. */
5237
5238 if (supports_breakpoints ())
5239 {
5240 need_step_over = find_thread (need_step_over_p);
5241
5242 if (need_step_over != NULL)
5243 {
5244 if (debug_threads)
5245 debug_printf ("proceed_all_lwps: found "
5246 "thread %ld needing a step-over\n",
5247 lwpid_of (need_step_over));
5248
5249 start_step_over (get_thread_lwp (need_step_over));
5250 return;
5251 }
5252 }
5253
5254 if (debug_threads)
5255 debug_printf ("Proceeding, no step-over needed\n");
5256
5257 for_each_thread ([] (thread_info *thread)
5258 {
5259 proceed_one_lwp (thread, NULL);
5260 });
5261 }
5262
5263 /* Stopped LWPs that the client wanted to be running, that don't have
5264 pending statuses, are set to run again, except for EXCEPT, if not
5265 NULL. This undoes a stop_all_lwps call. */
5266
5267 static void
5268 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5269 {
5270 if (debug_threads)
5271 {
5272 debug_enter ();
5273 if (except)
5274 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5275 lwpid_of (get_lwp_thread (except)));
5276 else
5277 debug_printf ("unstopping all lwps\n");
5278 }
5279
5280 if (unsuspend)
5281 for_each_thread ([&] (thread_info *thread)
5282 {
5283 unsuspend_and_proceed_one_lwp (thread, except);
5284 });
5285 else
5286 for_each_thread ([&] (thread_info *thread)
5287 {
5288 proceed_one_lwp (thread, except);
5289 });
5290
5291 if (debug_threads)
5292 {
5293 debug_printf ("unstop_all_lwps done\n");
5294 debug_exit ();
5295 }
5296 }
5297
5298
5299 #ifdef HAVE_LINUX_REGSETS
5300
5301 #define use_linux_regsets 1
5302
5303 /* Returns true if REGSET has been disabled. */
5304
5305 static int
5306 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5307 {
5308 return (info->disabled_regsets != NULL
5309 && info->disabled_regsets[regset - info->regsets]);
5310 }
5311
5312 /* Disable REGSET. */
5313
5314 static void
5315 disable_regset (struct regsets_info *info, struct regset_info *regset)
5316 {
5317 int dr_offset;
5318
5319 dr_offset = regset - info->regsets;
5320 if (info->disabled_regsets == NULL)
5321 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5322 info->disabled_regsets[dr_offset] = 1;
5323 }
5324
5325 static int
5326 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5327 struct regcache *regcache)
5328 {
5329 struct regset_info *regset;
5330 int saw_general_regs = 0;
5331 int pid;
5332 struct iovec iov;
5333
5334 pid = lwpid_of (current_thread);
5335 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5336 {
5337 void *buf, *data;
5338 int nt_type, res;
5339
5340 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5341 continue;
5342
5343 buf = xmalloc (regset->size);
5344
5345 nt_type = regset->nt_type;
5346 if (nt_type)
5347 {
5348 iov.iov_base = buf;
5349 iov.iov_len = regset->size;
5350 data = (void *) &iov;
5351 }
5352 else
5353 data = buf;
5354
5355 #ifndef __sparc__
5356 res = ptrace (regset->get_request, pid,
5357 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5358 #else
5359 res = ptrace (regset->get_request, pid, data, nt_type);
5360 #endif
5361 if (res < 0)
5362 {
5363 if (errno == EIO
5364 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5365 {
5366 /* If we get EIO on a regset, or an EINVAL and the regset is
5367 optional, do not try it again for this process mode. */
5368 disable_regset (regsets_info, regset);
5369 }
5370 else if (errno == ENODATA)
5371 {
5372 /* ENODATA may be returned if the regset is currently
5373 not "active". This can happen in normal operation,
5374 so suppress the warning in this case. */
5375 }
5376 else if (errno == ESRCH)
5377 {
5378 /* At this point, ESRCH should mean the process is
5379 already gone, in which case we simply ignore attempts
5380 to read its registers. */
5381 }
5382 else
5383 {
5384 char s[256];
5385 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5386 pid);
5387 perror (s);
5388 }
5389 }
5390 else
5391 {
5392 if (regset->type == GENERAL_REGS)
5393 saw_general_regs = 1;
5394 regset->store_function (regcache, buf);
5395 }
5396 free (buf);
5397 }
5398 if (saw_general_regs)
5399 return 0;
5400 else
5401 return 1;
5402 }
5403
5404 static int
5405 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5406 struct regcache *regcache)
5407 {
5408 struct regset_info *regset;
5409 int saw_general_regs = 0;
5410 int pid;
5411 struct iovec iov;
5412
5413 pid = lwpid_of (current_thread);
5414 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5415 {
5416 void *buf, *data;
5417 int nt_type, res;
5418
5419 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5420 || regset->fill_function == NULL)
5421 continue;
5422
5423 buf = xmalloc (regset->size);
5424
5425 /* First fill the buffer with the current register set contents,
5426 in case there are any items in the kernel's regset that are
5427 not in gdbserver's regcache. */
5428
5429 nt_type = regset->nt_type;
5430 if (nt_type)
5431 {
5432 iov.iov_base = buf;
5433 iov.iov_len = regset->size;
5434 data = (void *) &iov;
5435 }
5436 else
5437 data = buf;
5438
5439 #ifndef __sparc__
5440 res = ptrace (regset->get_request, pid,
5441 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5442 #else
5443 res = ptrace (regset->get_request, pid, data, nt_type);
5444 #endif
5445
5446 if (res == 0)
5447 {
5448 /* Then overlay our cached registers on that. */
5449 regset->fill_function (regcache, buf);
5450
5451 /* Only now do we write the register set. */
5452 #ifndef __sparc__
5453 res = ptrace (regset->set_request, pid,
5454 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5455 #else
5456 res = ptrace (regset->set_request, pid, data, nt_type);
5457 #endif
5458 }
5459
5460 if (res < 0)
5461 {
5462 if (errno == EIO
5463 || (errno == EINVAL && regset->type == OPTIONAL_REGS))
5464 {
5465 /* If we get EIO on a regset, or an EINVAL and the regset is
5466 optional, do not try it again for this process mode. */
5467 disable_regset (regsets_info, regset);
5468 }
5469 else if (errno == ESRCH)
5470 {
5471 /* At this point, ESRCH should mean the process is
5472 already gone, in which case we simply ignore attempts
5473 to change its registers. See also the related
5474 comment in linux_resume_one_lwp. */
5475 free (buf);
5476 return 0;
5477 }
5478 else
5479 {
5480 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5481 }
5482 }
5483 else if (regset->type == GENERAL_REGS)
5484 saw_general_regs = 1;
5485 free (buf);
5486 }
5487 if (saw_general_regs)
5488 return 0;
5489 else
5490 return 1;
5491 }
5492
5493 #else /* !HAVE_LINUX_REGSETS */
5494
5495 #define use_linux_regsets 0
5496 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5497 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5498
5499 #endif
5500
5501 /* Return 1 if register REGNO is supported by one of the regset ptrace
5502 calls or 0 if it has to be transferred individually. */
5503
5504 static int
5505 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5506 {
5507 unsigned char mask = 1 << (regno % 8);
5508 size_t index = regno / 8;
5509
5510 return (use_linux_regsets
5511 && (regs_info->regset_bitmap == NULL
5512 || (regs_info->regset_bitmap[index] & mask) != 0));
5513 }
5514
5515 #ifdef HAVE_LINUX_USRREGS
5516
5517 static int
5518 register_addr (const struct usrregs_info *usrregs, int regnum)
5519 {
5520 int addr;
5521
5522 if (regnum < 0 || regnum >= usrregs->num_regs)
5523 error ("Invalid register number %d.", regnum);
5524
5525 addr = usrregs->regmap[regnum];
5526
5527 return addr;
5528 }
5529
5530 /* Fetch one register. */
5531 static void
5532 fetch_register (const struct usrregs_info *usrregs,
5533 struct regcache *regcache, int regno)
5534 {
5535 CORE_ADDR regaddr;
5536 int i, size;
5537 char *buf;
5538 int pid;
5539
5540 if (regno >= usrregs->num_regs)
5541 return;
5542 if ((*the_low_target.cannot_fetch_register) (regno))
5543 return;
5544
5545 regaddr = register_addr (usrregs, regno);
5546 if (regaddr == -1)
5547 return;
5548
5549 size = ((register_size (regcache->tdesc, regno)
5550 + sizeof (PTRACE_XFER_TYPE) - 1)
5551 & -sizeof (PTRACE_XFER_TYPE));
5552 buf = (char *) alloca (size);
5553
5554 pid = lwpid_of (current_thread);
5555 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5556 {
5557 errno = 0;
5558 *(PTRACE_XFER_TYPE *) (buf + i) =
5559 ptrace (PTRACE_PEEKUSER, pid,
5560 /* Coerce to a uintptr_t first to avoid potential gcc warning
5561 of coercing an 8 byte integer to a 4 byte pointer. */
5562 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5563 regaddr += sizeof (PTRACE_XFER_TYPE);
5564 if (errno != 0)
5565 {
5566 /* Mark register REGNO unavailable. */
5567 supply_register (regcache, regno, NULL);
5568 return;
5569 }
5570 }
5571
5572 if (the_low_target.supply_ptrace_register)
5573 the_low_target.supply_ptrace_register (regcache, regno, buf);
5574 else
5575 supply_register (regcache, regno, buf);
5576 }
5577
5578 /* Store one register. */
5579 static void
5580 store_register (const struct usrregs_info *usrregs,
5581 struct regcache *regcache, int regno)
5582 {
5583 CORE_ADDR regaddr;
5584 int i, size;
5585 char *buf;
5586 int pid;
5587
5588 if (regno >= usrregs->num_regs)
5589 return;
5590 if ((*the_low_target.cannot_store_register) (regno))
5591 return;
5592
5593 regaddr = register_addr (usrregs, regno);
5594 if (regaddr == -1)
5595 return;
5596
5597 size = ((register_size (regcache->tdesc, regno)
5598 + sizeof (PTRACE_XFER_TYPE) - 1)
5599 & -sizeof (PTRACE_XFER_TYPE));
5600 buf = (char *) alloca (size);
5601 memset (buf, 0, size);
5602
5603 if (the_low_target.collect_ptrace_register)
5604 the_low_target.collect_ptrace_register (regcache, regno, buf);
5605 else
5606 collect_register (regcache, regno, buf);
5607
5608 pid = lwpid_of (current_thread);
5609 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5610 {
5611 errno = 0;
5612 ptrace (PTRACE_POKEUSER, pid,
5613 /* Coerce to a uintptr_t first to avoid potential gcc warning
5614 about coercing an 8 byte integer to a 4 byte pointer. */
5615 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5616 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5617 if (errno != 0)
5618 {
5619 /* At this point, ESRCH should mean the process is
5620 already gone, in which case we simply ignore attempts
5621 to change its registers. See also the related
5622 comment in linux_resume_one_lwp. */
5623 if (errno == ESRCH)
5624 return;
5625
5626 if ((*the_low_target.cannot_store_register) (regno) == 0)
5627 error ("writing register %d: %s", regno, strerror (errno));
5628 }
5629 regaddr += sizeof (PTRACE_XFER_TYPE);
5630 }
5631 }
5632
5633 /* Fetch all registers, or just one, from the child process.
5634 If REGNO is -1, do this for all registers, skipping any that are
5635 assumed to have been retrieved by regsets_fetch_inferior_registers,
5636 unless ALL is non-zero.
5637 Otherwise, REGNO specifies which register (so we can save time). */
5638 static void
5639 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5640 struct regcache *regcache, int regno, int all)
5641 {
5642 struct usrregs_info *usr = regs_info->usrregs;
5643
5644 if (regno == -1)
5645 {
5646 for (regno = 0; regno < usr->num_regs; regno++)
5647 if (all || !linux_register_in_regsets (regs_info, regno))
5648 fetch_register (usr, regcache, regno);
5649 }
5650 else
5651 fetch_register (usr, regcache, regno);
5652 }
5653
5654 /* Store our register values back into the inferior.
5655 If REGNO is -1, do this for all registers, skipping any that are
5656 assumed to have been saved by regsets_store_inferior_registers,
5657 unless ALL is non-zero.
5658 Otherwise, REGNO specifies which register (so we can save time). */
5659 static void
5660 usr_store_inferior_registers (const struct regs_info *regs_info,
5661 struct regcache *regcache, int regno, int all)
5662 {
5663 struct usrregs_info *usr = regs_info->usrregs;
5664
5665 if (regno == -1)
5666 {
5667 for (regno = 0; regno < usr->num_regs; regno++)
5668 if (all || !linux_register_in_regsets (regs_info, regno))
5669 store_register (usr, regcache, regno);
5670 }
5671 else
5672 store_register (usr, regcache, regno);
5673 }
5674
5675 #else /* !HAVE_LINUX_USRREGS */
5676
5677 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5678 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5679
5680 #endif
5681
5682
5683 static void
5684 linux_fetch_registers (struct regcache *regcache, int regno)
5685 {
5686 int use_regsets;
5687 int all = 0;
5688 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5689
5690 if (regno == -1)
5691 {
5692 if (the_low_target.fetch_register != NULL
5693 && regs_info->usrregs != NULL)
5694 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5695 (*the_low_target.fetch_register) (regcache, regno);
5696
5697 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5698 if (regs_info->usrregs != NULL)
5699 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5700 }
5701 else
5702 {
5703 if (the_low_target.fetch_register != NULL
5704 && (*the_low_target.fetch_register) (regcache, regno))
5705 return;
5706
5707 use_regsets = linux_register_in_regsets (regs_info, regno);
5708 if (use_regsets)
5709 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5710 regcache);
5711 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5712 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5713 }
5714 }
5715
5716 static void
5717 linux_store_registers (struct regcache *regcache, int regno)
5718 {
5719 int use_regsets;
5720 int all = 0;
5721 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5722
5723 if (regno == -1)
5724 {
5725 all = regsets_store_inferior_registers (regs_info->regsets_info,
5726 regcache);
5727 if (regs_info->usrregs != NULL)
5728 usr_store_inferior_registers (regs_info, regcache, regno, all);
5729 }
5730 else
5731 {
5732 use_regsets = linux_register_in_regsets (regs_info, regno);
5733 if (use_regsets)
5734 all = regsets_store_inferior_registers (regs_info->regsets_info,
5735 regcache);
5736 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5737 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5738 }
5739 }
5740
5741
5742 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5743 to debugger memory starting at MYADDR. */
5744
5745 static int
5746 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5747 {
5748 int pid = lwpid_of (current_thread);
5749 PTRACE_XFER_TYPE *buffer;
5750 CORE_ADDR addr;
5751 int count;
5752 char filename[64];
5753 int i;
5754 int ret;
5755 int fd;
5756
5757 /* Try using /proc. Don't bother for one word. */
5758 if (len >= 3 * sizeof (long))
5759 {
5760 int bytes;
5761
5762 /* We could keep this file open and cache it - possibly one per
5763 thread. That requires some juggling, but is even faster. */
5764 sprintf (filename, "/proc/%d/mem", pid);
5765 fd = open (filename, O_RDONLY | O_LARGEFILE);
5766 if (fd == -1)
5767 goto no_proc;
5768
5769 /* If pread64 is available, use it. It's faster if the kernel
5770 supports it (only one syscall), and it's 64-bit safe even on
5771 32-bit platforms (for instance, SPARC debugging a SPARC64
5772 application). */
5773 #ifdef HAVE_PREAD64
5774 bytes = pread64 (fd, myaddr, len, memaddr);
5775 #else
5776 bytes = -1;
5777 if (lseek (fd, memaddr, SEEK_SET) != -1)
5778 bytes = read (fd, myaddr, len);
5779 #endif
5780
5781 close (fd);
5782 if (bytes == len)
5783 return 0;
5784
5785 /* Some data was read, we'll try to get the rest with ptrace. */
5786 if (bytes > 0)
5787 {
5788 memaddr += bytes;
5789 myaddr += bytes;
5790 len -= bytes;
5791 }
5792 }
5793
5794 no_proc:
5795 /* Round starting address down to longword boundary. */
5796 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5797 /* Round ending address up; get number of longwords that makes. */
5798 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5799 / sizeof (PTRACE_XFER_TYPE));
5800 /* Allocate buffer of that many longwords. */
5801 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5802
5803 /* Read all the longwords */
5804 errno = 0;
5805 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5806 {
5807 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5808 about coercing an 8 byte integer to a 4 byte pointer. */
5809 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5810 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5811 (PTRACE_TYPE_ARG4) 0);
5812 if (errno)
5813 break;
5814 }
5815 ret = errno;
5816
5817 /* Copy appropriate bytes out of the buffer. */
5818 if (i > 0)
5819 {
5820 i *= sizeof (PTRACE_XFER_TYPE);
5821 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5822 memcpy (myaddr,
5823 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5824 i < len ? i : len);
5825 }
5826
5827 return ret;
5828 }
5829
5830 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5831 memory at MEMADDR. On failure (cannot write to the inferior)
5832 returns the value of errno. Always succeeds if LEN is zero. */
5833
5834 static int
5835 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5836 {
5837 int i;
5838 /* Round starting address down to longword boundary. */
5839 CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5840 /* Round ending address up; get number of longwords that makes. */
5841 int count
5842 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5843 / sizeof (PTRACE_XFER_TYPE);
5844
5845 /* Allocate buffer of that many longwords. */
5846 PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5847
5848 int pid = lwpid_of (current_thread);
5849
5850 if (len == 0)
5851 {
5852 /* Zero length write always succeeds. */
5853 return 0;
5854 }
5855
5856 if (debug_threads)
5857 {
5858 /* Dump up to four bytes. */
5859 char str[4 * 2 + 1];
5860 char *p = str;
5861 int dump = len < 4 ? len : 4;
5862
5863 for (i = 0; i < dump; i++)
5864 {
5865 sprintf (p, "%02x", myaddr[i]);
5866 p += 2;
5867 }
5868 *p = '\0';
5869
5870 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5871 str, (long) memaddr, pid);
5872 }
5873
5874 /* Fill start and end extra bytes of buffer with existing memory data. */
5875
5876 errno = 0;
5877 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5878 about coercing an 8 byte integer to a 4 byte pointer. */
5879 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5880 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5881 (PTRACE_TYPE_ARG4) 0);
5882 if (errno)
5883 return errno;
5884
5885 if (count > 1)
5886 {
5887 errno = 0;
5888 buffer[count - 1]
5889 = ptrace (PTRACE_PEEKTEXT, pid,
5890 /* Coerce to a uintptr_t first to avoid potential gcc warning
5891 about coercing an 8 byte integer to a 4 byte pointer. */
5892 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5893 * sizeof (PTRACE_XFER_TYPE)),
5894 (PTRACE_TYPE_ARG4) 0);
5895 if (errno)
5896 return errno;
5897 }
5898
5899 /* Copy data to be written over corresponding part of buffer. */
5900
5901 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5902 myaddr, len);
5903
5904 /* Write the entire buffer. */
5905
5906 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5907 {
5908 errno = 0;
5909 ptrace (PTRACE_POKETEXT, pid,
5910 /* Coerce to a uintptr_t first to avoid potential gcc warning
5911 about coercing an 8 byte integer to a 4 byte pointer. */
5912 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5913 (PTRACE_TYPE_ARG4) buffer[i]);
5914 if (errno)
5915 return errno;
5916 }
5917
5918 return 0;
5919 }
5920
5921 static void
5922 linux_look_up_symbols (void)
5923 {
5924 #ifdef USE_THREAD_DB
5925 struct process_info *proc = current_process ();
5926
5927 if (proc->priv->thread_db != NULL)
5928 return;
5929
5930 thread_db_init ();
5931 #endif
5932 }
5933
5934 static void
5935 linux_request_interrupt (void)
5936 {
5937 /* Send a SIGINT to the process group. This acts just like the user
5938 typed a ^C on the controlling terminal. */
5939 kill (-signal_pid, SIGINT);
5940 }
5941
5942 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5943 to debugger memory starting at MYADDR. */
5944
5945 static int
5946 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5947 {
5948 char filename[PATH_MAX];
5949 int fd, n;
5950 int pid = lwpid_of (current_thread);
5951
5952 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5953
5954 fd = open (filename, O_RDONLY);
5955 if (fd < 0)
5956 return -1;
5957
5958 if (offset != (CORE_ADDR) 0
5959 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5960 n = -1;
5961 else
5962 n = read (fd, myaddr, len);
5963
5964 close (fd);
5965
5966 return n;
5967 }
5968
5969 /* These breakpoint and watchpoint related wrapper functions simply
5970 pass on the function call if the target has registered a
5971 corresponding function. */
5972
5973 static int
5974 linux_supports_z_point_type (char z_type)
5975 {
5976 return (the_low_target.supports_z_point_type != NULL
5977 && the_low_target.supports_z_point_type (z_type));
5978 }
5979
5980 static int
5981 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5982 int size, struct raw_breakpoint *bp)
5983 {
5984 if (type == raw_bkpt_type_sw)
5985 return insert_memory_breakpoint (bp);
5986 else if (the_low_target.insert_point != NULL)
5987 return the_low_target.insert_point (type, addr, size, bp);
5988 else
5989 /* Unsupported (see target.h). */
5990 return 1;
5991 }
5992
5993 static int
5994 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5995 int size, struct raw_breakpoint *bp)
5996 {
5997 if (type == raw_bkpt_type_sw)
5998 return remove_memory_breakpoint (bp);
5999 else if (the_low_target.remove_point != NULL)
6000 return the_low_target.remove_point (type, addr, size, bp);
6001 else
6002 /* Unsupported (see target.h). */
6003 return 1;
6004 }
6005
6006 /* Implement the to_stopped_by_sw_breakpoint target_ops
6007 method. */
6008
6009 static int
6010 linux_stopped_by_sw_breakpoint (void)
6011 {
6012 struct lwp_info *lwp = get_thread_lwp (current_thread);
6013
6014 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
6015 }
6016
6017 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
6018 method. */
6019
6020 static int
6021 linux_supports_stopped_by_sw_breakpoint (void)
6022 {
6023 return USE_SIGTRAP_SIGINFO;
6024 }
6025
6026 /* Implement the to_stopped_by_hw_breakpoint target_ops
6027 method. */
6028
6029 static int
6030 linux_stopped_by_hw_breakpoint (void)
6031 {
6032 struct lwp_info *lwp = get_thread_lwp (current_thread);
6033
6034 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6035 }
6036
6037 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6038 method. */
6039
6040 static int
6041 linux_supports_stopped_by_hw_breakpoint (void)
6042 {
6043 return USE_SIGTRAP_SIGINFO;
6044 }
6045
6046 /* Implement the supports_hardware_single_step target_ops method. */
6047
6048 static int
6049 linux_supports_hardware_single_step (void)
6050 {
6051 return can_hardware_single_step ();
6052 }
6053
6054 static int
6055 linux_supports_software_single_step (void)
6056 {
6057 return can_software_single_step ();
6058 }
6059
6060 static int
6061 linux_stopped_by_watchpoint (void)
6062 {
6063 struct lwp_info *lwp = get_thread_lwp (current_thread);
6064
6065 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6066 }
6067
6068 static CORE_ADDR
6069 linux_stopped_data_address (void)
6070 {
6071 struct lwp_info *lwp = get_thread_lwp (current_thread);
6072
6073 return lwp->stopped_data_address;
6074 }
6075
6076 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6077 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6078 && defined(PT_TEXT_END_ADDR)
6079
6080 /* This is only used for targets that define PT_TEXT_ADDR,
6081 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6082 the target has different ways of acquiring this information, like
6083 loadmaps. */
6084
6085 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6086 to tell gdb about. */
6087
6088 static int
6089 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6090 {
6091 unsigned long text, text_end, data;
6092 int pid = lwpid_of (current_thread);
6093
6094 errno = 0;
6095
6096 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6097 (PTRACE_TYPE_ARG4) 0);
6098 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6099 (PTRACE_TYPE_ARG4) 0);
6100 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6101 (PTRACE_TYPE_ARG4) 0);
6102
6103 if (errno == 0)
6104 {
6105 /* Both text and data offsets produced at compile-time (and so
6106 used by gdb) are relative to the beginning of the program,
6107 with the data segment immediately following the text segment.
6108 However, the actual runtime layout in memory may put the data
6109 somewhere else, so when we send gdb a data base-address, we
6110 use the real data base address and subtract the compile-time
6111 data base-address from it (which is just the length of the
6112 text segment). BSS immediately follows data in both
6113 cases. */
6114 *text_p = text;
6115 *data_p = data - (text_end - text);
6116
6117 return 1;
6118 }
6119 return 0;
6120 }
6121 #endif
6122
6123 static int
6124 linux_qxfer_osdata (const char *annex,
6125 unsigned char *readbuf, unsigned const char *writebuf,
6126 CORE_ADDR offset, int len)
6127 {
6128 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6129 }
6130
6131 /* Convert a native/host siginfo object, into/from the siginfo in the
6132 layout of the inferiors' architecture. */
6133
6134 static void
6135 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6136 {
6137 int done = 0;
6138
6139 if (the_low_target.siginfo_fixup != NULL)
6140 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6141
6142 /* If there was no callback, or the callback didn't do anything,
6143 then just do a straight memcpy. */
6144 if (!done)
6145 {
6146 if (direction == 1)
6147 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6148 else
6149 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6150 }
6151 }
6152
6153 static int
6154 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6155 unsigned const char *writebuf, CORE_ADDR offset, int len)
6156 {
6157 int pid;
6158 siginfo_t siginfo;
6159 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6160
6161 if (current_thread == NULL)
6162 return -1;
6163
6164 pid = lwpid_of (current_thread);
6165
6166 if (debug_threads)
6167 debug_printf ("%s siginfo for lwp %d.\n",
6168 readbuf != NULL ? "Reading" : "Writing",
6169 pid);
6170
6171 if (offset >= sizeof (siginfo))
6172 return -1;
6173
6174 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6175 return -1;
6176
6177 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6178 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6179 inferior with a 64-bit GDBSERVER should look the same as debugging it
6180 with a 32-bit GDBSERVER, we need to convert it. */
6181 siginfo_fixup (&siginfo, inf_siginfo, 0);
6182
6183 if (offset + len > sizeof (siginfo))
6184 len = sizeof (siginfo) - offset;
6185
6186 if (readbuf != NULL)
6187 memcpy (readbuf, inf_siginfo + offset, len);
6188 else
6189 {
6190 memcpy (inf_siginfo + offset, writebuf, len);
6191
6192 /* Convert back to ptrace layout before flushing it out. */
6193 siginfo_fixup (&siginfo, inf_siginfo, 1);
6194
6195 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6196 return -1;
6197 }
6198
6199 return len;
6200 }
6201
6202 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6203 so we notice when children change state; as the handler for the
6204 sigsuspend in my_waitpid. */
6205
6206 static void
6207 sigchld_handler (int signo)
6208 {
6209 int old_errno = errno;
6210
6211 if (debug_threads)
6212 {
6213 do
6214 {
6215 /* fprintf is not async-signal-safe, so call write
6216 directly. */
6217 if (write (2, "sigchld_handler\n",
6218 sizeof ("sigchld_handler\n") - 1) < 0)
6219 break; /* just ignore */
6220 } while (0);
6221 }
6222
6223 if (target_is_async_p ())
6224 async_file_mark (); /* trigger a linux_wait */
6225
6226 errno = old_errno;
6227 }
6228
6229 static int
6230 linux_supports_non_stop (void)
6231 {
6232 return 1;
6233 }
6234
6235 static int
6236 linux_async (int enable)
6237 {
6238 int previous = target_is_async_p ();
6239
6240 if (debug_threads)
6241 debug_printf ("linux_async (%d), previous=%d\n",
6242 enable, previous);
6243
6244 if (previous != enable)
6245 {
6246 sigset_t mask;
6247 sigemptyset (&mask);
6248 sigaddset (&mask, SIGCHLD);
6249
6250 sigprocmask (SIG_BLOCK, &mask, NULL);
6251
6252 if (enable)
6253 {
6254 if (pipe (linux_event_pipe) == -1)
6255 {
6256 linux_event_pipe[0] = -1;
6257 linux_event_pipe[1] = -1;
6258 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6259
6260 warning ("creating event pipe failed.");
6261 return previous;
6262 }
6263
6264 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6265 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6266
6267 /* Register the event loop handler. */
6268 add_file_handler (linux_event_pipe[0],
6269 handle_target_event, NULL);
6270
6271 /* Always trigger a linux_wait. */
6272 async_file_mark ();
6273 }
6274 else
6275 {
6276 delete_file_handler (linux_event_pipe[0]);
6277
6278 close (linux_event_pipe[0]);
6279 close (linux_event_pipe[1]);
6280 linux_event_pipe[0] = -1;
6281 linux_event_pipe[1] = -1;
6282 }
6283
6284 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6285 }
6286
6287 return previous;
6288 }
6289
6290 static int
6291 linux_start_non_stop (int nonstop)
6292 {
6293 /* Register or unregister from event-loop accordingly. */
6294 linux_async (nonstop);
6295
6296 if (target_is_async_p () != (nonstop != 0))
6297 return -1;
6298
6299 return 0;
6300 }
6301
6302 static int
6303 linux_supports_multi_process (void)
6304 {
6305 return 1;
6306 }
6307
6308 /* Check if fork events are supported. */
6309
6310 static int
6311 linux_supports_fork_events (void)
6312 {
6313 return linux_supports_tracefork ();
6314 }
6315
6316 /* Check if vfork events are supported. */
6317
6318 static int
6319 linux_supports_vfork_events (void)
6320 {
6321 return linux_supports_tracefork ();
6322 }
6323
6324 /* Check if exec events are supported. */
6325
6326 static int
6327 linux_supports_exec_events (void)
6328 {
6329 return linux_supports_traceexec ();
6330 }
6331
6332 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6333 ptrace flags for all inferiors. This is in case the new GDB connection
6334 doesn't support the same set of events that the previous one did. */
6335
6336 static void
6337 linux_handle_new_gdb_connection (void)
6338 {
6339 /* Request that all the lwps reset their ptrace options. */
6340 for_each_thread ([] (thread_info *thread)
6341 {
6342 struct lwp_info *lwp = get_thread_lwp (thread);
6343
6344 if (!lwp->stopped)
6345 {
6346 /* Stop the lwp so we can modify its ptrace options. */
6347 lwp->must_set_ptrace_flags = 1;
6348 linux_stop_lwp (lwp);
6349 }
6350 else
6351 {
6352 /* Already stopped; go ahead and set the ptrace options. */
6353 struct process_info *proc = find_process_pid (pid_of (thread));
6354 int options = linux_low_ptrace_options (proc->attached);
6355
6356 linux_enable_event_reporting (lwpid_of (thread), options);
6357 lwp->must_set_ptrace_flags = 0;
6358 }
6359 });
6360 }
6361
6362 static int
6363 linux_supports_disable_randomization (void)
6364 {
6365 #ifdef HAVE_PERSONALITY
6366 return 1;
6367 #else
6368 return 0;
6369 #endif
6370 }
6371
6372 static int
6373 linux_supports_agent (void)
6374 {
6375 return 1;
6376 }
6377
6378 static int
6379 linux_supports_range_stepping (void)
6380 {
6381 if (can_software_single_step ())
6382 return 1;
6383 if (*the_low_target.supports_range_stepping == NULL)
6384 return 0;
6385
6386 return (*the_low_target.supports_range_stepping) ();
6387 }
6388
6389 /* Enumerate spufs IDs for process PID. */
6390 static int
6391 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6392 {
6393 int pos = 0;
6394 int written = 0;
6395 char path[128];
6396 DIR *dir;
6397 struct dirent *entry;
6398
6399 sprintf (path, "/proc/%ld/fd", pid);
6400 dir = opendir (path);
6401 if (!dir)
6402 return -1;
6403
6404 rewinddir (dir);
6405 while ((entry = readdir (dir)) != NULL)
6406 {
6407 struct stat st;
6408 struct statfs stfs;
6409 int fd;
6410
6411 fd = atoi (entry->d_name);
6412 if (!fd)
6413 continue;
6414
6415 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6416 if (stat (path, &st) != 0)
6417 continue;
6418 if (!S_ISDIR (st.st_mode))
6419 continue;
6420
6421 if (statfs (path, &stfs) != 0)
6422 continue;
6423 if (stfs.f_type != SPUFS_MAGIC)
6424 continue;
6425
6426 if (pos >= offset && pos + 4 <= offset + len)
6427 {
6428 *(unsigned int *)(buf + pos - offset) = fd;
6429 written += 4;
6430 }
6431 pos += 4;
6432 }
6433
6434 closedir (dir);
6435 return written;
6436 }
6437
6438 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6439 object type, using the /proc file system. */
6440 static int
6441 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6442 unsigned const char *writebuf,
6443 CORE_ADDR offset, int len)
6444 {
6445 long pid = lwpid_of (current_thread);
6446 char buf[128];
6447 int fd = 0;
6448 int ret = 0;
6449
6450 if (!writebuf && !readbuf)
6451 return -1;
6452
6453 if (!*annex)
6454 {
6455 if (!readbuf)
6456 return -1;
6457 else
6458 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6459 }
6460
6461 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6462 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6463 if (fd <= 0)
6464 return -1;
6465
6466 if (offset != 0
6467 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6468 {
6469 close (fd);
6470 return 0;
6471 }
6472
6473 if (writebuf)
6474 ret = write (fd, writebuf, (size_t) len);
6475 else
6476 ret = read (fd, readbuf, (size_t) len);
6477
6478 close (fd);
6479 return ret;
6480 }
6481
6482 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6483 struct target_loadseg
6484 {
6485 /* Core address to which the segment is mapped. */
6486 Elf32_Addr addr;
6487 /* VMA recorded in the program header. */
6488 Elf32_Addr p_vaddr;
6489 /* Size of this segment in memory. */
6490 Elf32_Word p_memsz;
6491 };
6492
6493 # if defined PT_GETDSBT
6494 struct target_loadmap
6495 {
6496 /* Protocol version number, must be zero. */
6497 Elf32_Word version;
6498 /* Pointer to the DSBT table, its size, and the DSBT index. */
6499 unsigned *dsbt_table;
6500 unsigned dsbt_size, dsbt_index;
6501 /* Number of segments in this map. */
6502 Elf32_Word nsegs;
6503 /* The actual memory map. */
6504 struct target_loadseg segs[/*nsegs*/];
6505 };
6506 # define LINUX_LOADMAP PT_GETDSBT
6507 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6508 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6509 # else
6510 struct target_loadmap
6511 {
6512 /* Protocol version number, must be zero. */
6513 Elf32_Half version;
6514 /* Number of segments in this map. */
6515 Elf32_Half nsegs;
6516 /* The actual memory map. */
6517 struct target_loadseg segs[/*nsegs*/];
6518 };
6519 # define LINUX_LOADMAP PTRACE_GETFDPIC
6520 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6521 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6522 # endif
6523
6524 static int
6525 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6526 unsigned char *myaddr, unsigned int len)
6527 {
6528 int pid = lwpid_of (current_thread);
6529 int addr = -1;
6530 struct target_loadmap *data = NULL;
6531 unsigned int actual_length, copy_length;
6532
6533 if (strcmp (annex, "exec") == 0)
6534 addr = (int) LINUX_LOADMAP_EXEC;
6535 else if (strcmp (annex, "interp") == 0)
6536 addr = (int) LINUX_LOADMAP_INTERP;
6537 else
6538 return -1;
6539
6540 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6541 return -1;
6542
6543 if (data == NULL)
6544 return -1;
6545
6546 actual_length = sizeof (struct target_loadmap)
6547 + sizeof (struct target_loadseg) * data->nsegs;
6548
6549 if (offset < 0 || offset > actual_length)
6550 return -1;
6551
6552 copy_length = actual_length - offset < len ? actual_length - offset : len;
6553 memcpy (myaddr, (char *) data + offset, copy_length);
6554 return copy_length;
6555 }
6556 #else
6557 # define linux_read_loadmap NULL
6558 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6559
6560 static void
6561 linux_process_qsupported (char **features, int count)
6562 {
6563 if (the_low_target.process_qsupported != NULL)
6564 the_low_target.process_qsupported (features, count);
6565 }
6566
6567 static int
6568 linux_supports_catch_syscall (void)
6569 {
6570 return (the_low_target.get_syscall_trapinfo != NULL
6571 && linux_supports_tracesysgood ());
6572 }
6573
6574 static int
6575 linux_get_ipa_tdesc_idx (void)
6576 {
6577 if (the_low_target.get_ipa_tdesc_idx == NULL)
6578 return 0;
6579
6580 return (*the_low_target.get_ipa_tdesc_idx) ();
6581 }
6582
6583 static int
6584 linux_supports_tracepoints (void)
6585 {
6586 if (*the_low_target.supports_tracepoints == NULL)
6587 return 0;
6588
6589 return (*the_low_target.supports_tracepoints) ();
6590 }
6591
6592 static CORE_ADDR
6593 linux_read_pc (struct regcache *regcache)
6594 {
6595 if (the_low_target.get_pc == NULL)
6596 return 0;
6597
6598 return (*the_low_target.get_pc) (regcache);
6599 }
6600
6601 static void
6602 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6603 {
6604 gdb_assert (the_low_target.set_pc != NULL);
6605
6606 (*the_low_target.set_pc) (regcache, pc);
6607 }
6608
6609 static int
6610 linux_thread_stopped (struct thread_info *thread)
6611 {
6612 return get_thread_lwp (thread)->stopped;
6613 }
6614
6615 /* This exposes stop-all-threads functionality to other modules. */
6616
6617 static void
6618 linux_pause_all (int freeze)
6619 {
6620 stop_all_lwps (freeze, NULL);
6621 }
6622
6623 /* This exposes unstop-all-threads functionality to other gdbserver
6624 modules. */
6625
6626 static void
6627 linux_unpause_all (int unfreeze)
6628 {
6629 unstop_all_lwps (unfreeze, NULL);
6630 }
6631
6632 static int
6633 linux_prepare_to_access_memory (void)
6634 {
6635 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6636 running LWP. */
6637 if (non_stop)
6638 linux_pause_all (1);
6639 return 0;
6640 }
6641
6642 static void
6643 linux_done_accessing_memory (void)
6644 {
6645 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6646 running LWP. */
6647 if (non_stop)
6648 linux_unpause_all (1);
6649 }
6650
6651 static int
6652 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6653 CORE_ADDR collector,
6654 CORE_ADDR lockaddr,
6655 ULONGEST orig_size,
6656 CORE_ADDR *jump_entry,
6657 CORE_ADDR *trampoline,
6658 ULONGEST *trampoline_size,
6659 unsigned char *jjump_pad_insn,
6660 ULONGEST *jjump_pad_insn_size,
6661 CORE_ADDR *adjusted_insn_addr,
6662 CORE_ADDR *adjusted_insn_addr_end,
6663 char *err)
6664 {
6665 return (*the_low_target.install_fast_tracepoint_jump_pad)
6666 (tpoint, tpaddr, collector, lockaddr, orig_size,
6667 jump_entry, trampoline, trampoline_size,
6668 jjump_pad_insn, jjump_pad_insn_size,
6669 adjusted_insn_addr, adjusted_insn_addr_end,
6670 err);
6671 }
6672
6673 static struct emit_ops *
6674 linux_emit_ops (void)
6675 {
6676 if (the_low_target.emit_ops != NULL)
6677 return (*the_low_target.emit_ops) ();
6678 else
6679 return NULL;
6680 }
6681
6682 static int
6683 linux_get_min_fast_tracepoint_insn_len (void)
6684 {
6685 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6686 }
6687
6688 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6689
6690 static int
6691 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6692 CORE_ADDR *phdr_memaddr, int *num_phdr)
6693 {
6694 char filename[PATH_MAX];
6695 int fd;
6696 const int auxv_size = is_elf64
6697 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6698 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6699
6700 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6701
6702 fd = open (filename, O_RDONLY);
6703 if (fd < 0)
6704 return 1;
6705
6706 *phdr_memaddr = 0;
6707 *num_phdr = 0;
6708 while (read (fd, buf, auxv_size) == auxv_size
6709 && (*phdr_memaddr == 0 || *num_phdr == 0))
6710 {
6711 if (is_elf64)
6712 {
6713 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6714
6715 switch (aux->a_type)
6716 {
6717 case AT_PHDR:
6718 *phdr_memaddr = aux->a_un.a_val;
6719 break;
6720 case AT_PHNUM:
6721 *num_phdr = aux->a_un.a_val;
6722 break;
6723 }
6724 }
6725 else
6726 {
6727 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6728
6729 switch (aux->a_type)
6730 {
6731 case AT_PHDR:
6732 *phdr_memaddr = aux->a_un.a_val;
6733 break;
6734 case AT_PHNUM:
6735 *num_phdr = aux->a_un.a_val;
6736 break;
6737 }
6738 }
6739 }
6740
6741 close (fd);
6742
6743 if (*phdr_memaddr == 0 || *num_phdr == 0)
6744 {
6745 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6746 "phdr_memaddr = %ld, phdr_num = %d",
6747 (long) *phdr_memaddr, *num_phdr);
6748 return 2;
6749 }
6750
6751 return 0;
6752 }
6753
6754 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6755
6756 static CORE_ADDR
6757 get_dynamic (const int pid, const int is_elf64)
6758 {
6759 CORE_ADDR phdr_memaddr, relocation;
6760 int num_phdr, i;
6761 unsigned char *phdr_buf;
6762 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6763
6764 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6765 return 0;
6766
6767 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6768 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6769
6770 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6771 return 0;
6772
6773 /* Compute relocation: it is expected to be 0 for "regular" executables,
6774 non-zero for PIE ones. */
6775 relocation = -1;
6776 for (i = 0; relocation == -1 && i < num_phdr; i++)
6777 if (is_elf64)
6778 {
6779 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6780
6781 if (p->p_type == PT_PHDR)
6782 relocation = phdr_memaddr - p->p_vaddr;
6783 }
6784 else
6785 {
6786 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6787
6788 if (p->p_type == PT_PHDR)
6789 relocation = phdr_memaddr - p->p_vaddr;
6790 }
6791
6792 if (relocation == -1)
6793 {
6794 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6795 any real world executables, including PIE executables, have always
6796 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6797 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6798 or present DT_DEBUG anyway (fpc binaries are statically linked).
6799
6800 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6801
6802 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6803
6804 return 0;
6805 }
6806
6807 for (i = 0; i < num_phdr; i++)
6808 {
6809 if (is_elf64)
6810 {
6811 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6812
6813 if (p->p_type == PT_DYNAMIC)
6814 return p->p_vaddr + relocation;
6815 }
6816 else
6817 {
6818 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6819
6820 if (p->p_type == PT_DYNAMIC)
6821 return p->p_vaddr + relocation;
6822 }
6823 }
6824
6825 return 0;
6826 }
6827
6828 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6829 can be 0 if the inferior does not yet have the library list initialized.
6830 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6831 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6832
6833 static CORE_ADDR
6834 get_r_debug (const int pid, const int is_elf64)
6835 {
6836 CORE_ADDR dynamic_memaddr;
6837 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6838 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6839 CORE_ADDR map = -1;
6840
6841 dynamic_memaddr = get_dynamic (pid, is_elf64);
6842 if (dynamic_memaddr == 0)
6843 return map;
6844
6845 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6846 {
6847 if (is_elf64)
6848 {
6849 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6850 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6851 union
6852 {
6853 Elf64_Xword map;
6854 unsigned char buf[sizeof (Elf64_Xword)];
6855 }
6856 rld_map;
6857 #endif
6858 #ifdef DT_MIPS_RLD_MAP
6859 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6860 {
6861 if (linux_read_memory (dyn->d_un.d_val,
6862 rld_map.buf, sizeof (rld_map.buf)) == 0)
6863 return rld_map.map;
6864 else
6865 break;
6866 }
6867 #endif /* DT_MIPS_RLD_MAP */
6868 #ifdef DT_MIPS_RLD_MAP_REL
6869 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6870 {
6871 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6872 rld_map.buf, sizeof (rld_map.buf)) == 0)
6873 return rld_map.map;
6874 else
6875 break;
6876 }
6877 #endif /* DT_MIPS_RLD_MAP_REL */
6878
6879 if (dyn->d_tag == DT_DEBUG && map == -1)
6880 map = dyn->d_un.d_val;
6881
6882 if (dyn->d_tag == DT_NULL)
6883 break;
6884 }
6885 else
6886 {
6887 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6888 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6889 union
6890 {
6891 Elf32_Word map;
6892 unsigned char buf[sizeof (Elf32_Word)];
6893 }
6894 rld_map;
6895 #endif
6896 #ifdef DT_MIPS_RLD_MAP
6897 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6898 {
6899 if (linux_read_memory (dyn->d_un.d_val,
6900 rld_map.buf, sizeof (rld_map.buf)) == 0)
6901 return rld_map.map;
6902 else
6903 break;
6904 }
6905 #endif /* DT_MIPS_RLD_MAP */
6906 #ifdef DT_MIPS_RLD_MAP_REL
6907 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6908 {
6909 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6910 rld_map.buf, sizeof (rld_map.buf)) == 0)
6911 return rld_map.map;
6912 else
6913 break;
6914 }
6915 #endif /* DT_MIPS_RLD_MAP_REL */
6916
6917 if (dyn->d_tag == DT_DEBUG && map == -1)
6918 map = dyn->d_un.d_val;
6919
6920 if (dyn->d_tag == DT_NULL)
6921 break;
6922 }
6923
6924 dynamic_memaddr += dyn_size;
6925 }
6926
6927 return map;
6928 }
6929
6930 /* Read one pointer from MEMADDR in the inferior. */
6931
6932 static int
6933 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6934 {
6935 int ret;
6936
6937 /* Go through a union so this works on either big or little endian
6938 hosts, when the inferior's pointer size is smaller than the size
6939 of CORE_ADDR. It is assumed the inferior's endianness is the
6940 same of the superior's. */
6941 union
6942 {
6943 CORE_ADDR core_addr;
6944 unsigned int ui;
6945 unsigned char uc;
6946 } addr;
6947
6948 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6949 if (ret == 0)
6950 {
6951 if (ptr_size == sizeof (CORE_ADDR))
6952 *ptr = addr.core_addr;
6953 else if (ptr_size == sizeof (unsigned int))
6954 *ptr = addr.ui;
6955 else
6956 gdb_assert_not_reached ("unhandled pointer size");
6957 }
6958 return ret;
6959 }
6960
6961 struct link_map_offsets
6962 {
6963 /* Offset and size of r_debug.r_version. */
6964 int r_version_offset;
6965
6966 /* Offset and size of r_debug.r_map. */
6967 int r_map_offset;
6968
6969 /* Offset to l_addr field in struct link_map. */
6970 int l_addr_offset;
6971
6972 /* Offset to l_name field in struct link_map. */
6973 int l_name_offset;
6974
6975 /* Offset to l_ld field in struct link_map. */
6976 int l_ld_offset;
6977
6978 /* Offset to l_next field in struct link_map. */
6979 int l_next_offset;
6980
6981 /* Offset to l_prev field in struct link_map. */
6982 int l_prev_offset;
6983 };
6984
6985 /* Construct qXfer:libraries-svr4:read reply. */
6986
6987 static int
6988 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6989 unsigned const char *writebuf,
6990 CORE_ADDR offset, int len)
6991 {
6992 struct process_info_private *const priv = current_process ()->priv;
6993 char filename[PATH_MAX];
6994 int pid, is_elf64;
6995
6996 static const struct link_map_offsets lmo_32bit_offsets =
6997 {
6998 0, /* r_version offset. */
6999 4, /* r_debug.r_map offset. */
7000 0, /* l_addr offset in link_map. */
7001 4, /* l_name offset in link_map. */
7002 8, /* l_ld offset in link_map. */
7003 12, /* l_next offset in link_map. */
7004 16 /* l_prev offset in link_map. */
7005 };
7006
7007 static const struct link_map_offsets lmo_64bit_offsets =
7008 {
7009 0, /* r_version offset. */
7010 8, /* r_debug.r_map offset. */
7011 0, /* l_addr offset in link_map. */
7012 8, /* l_name offset in link_map. */
7013 16, /* l_ld offset in link_map. */
7014 24, /* l_next offset in link_map. */
7015 32 /* l_prev offset in link_map. */
7016 };
7017 const struct link_map_offsets *lmo;
7018 unsigned int machine;
7019 int ptr_size;
7020 CORE_ADDR lm_addr = 0, lm_prev = 0;
7021 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
7022 int header_done = 0;
7023
7024 if (writebuf != NULL)
7025 return -2;
7026 if (readbuf == NULL)
7027 return -1;
7028
7029 pid = lwpid_of (current_thread);
7030 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7031 is_elf64 = elf_64_file_p (filename, &machine);
7032 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7033 ptr_size = is_elf64 ? 8 : 4;
7034
7035 while (annex[0] != '\0')
7036 {
7037 const char *sep;
7038 CORE_ADDR *addrp;
7039 int name_len;
7040
7041 sep = strchr (annex, '=');
7042 if (sep == NULL)
7043 break;
7044
7045 name_len = sep - annex;
7046 if (name_len == 5 && startswith (annex, "start"))
7047 addrp = &lm_addr;
7048 else if (name_len == 4 && startswith (annex, "prev"))
7049 addrp = &lm_prev;
7050 else
7051 {
7052 annex = strchr (sep, ';');
7053 if (annex == NULL)
7054 break;
7055 annex++;
7056 continue;
7057 }
7058
7059 annex = decode_address_to_semicolon (addrp, sep + 1);
7060 }
7061
7062 if (lm_addr == 0)
7063 {
7064 int r_version = 0;
7065
7066 if (priv->r_debug == 0)
7067 priv->r_debug = get_r_debug (pid, is_elf64);
7068
7069 /* We failed to find DT_DEBUG. Such situation will not change
7070 for this inferior - do not retry it. Report it to GDB as
7071 E01, see for the reasons at the GDB solib-svr4.c side. */
7072 if (priv->r_debug == (CORE_ADDR) -1)
7073 return -1;
7074
7075 if (priv->r_debug != 0)
7076 {
7077 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7078 (unsigned char *) &r_version,
7079 sizeof (r_version)) != 0
7080 || r_version != 1)
7081 {
7082 warning ("unexpected r_debug version %d", r_version);
7083 }
7084 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7085 &lm_addr, ptr_size) != 0)
7086 {
7087 warning ("unable to read r_map from 0x%lx",
7088 (long) priv->r_debug + lmo->r_map_offset);
7089 }
7090 }
7091 }
7092
7093 std::string document = "<library-list-svr4 version=\"1.0\"";
7094
7095 while (lm_addr
7096 && read_one_ptr (lm_addr + lmo->l_name_offset,
7097 &l_name, ptr_size) == 0
7098 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7099 &l_addr, ptr_size) == 0
7100 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7101 &l_ld, ptr_size) == 0
7102 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7103 &l_prev, ptr_size) == 0
7104 && read_one_ptr (lm_addr + lmo->l_next_offset,
7105 &l_next, ptr_size) == 0)
7106 {
7107 unsigned char libname[PATH_MAX];
7108
7109 if (lm_prev != l_prev)
7110 {
7111 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7112 (long) lm_prev, (long) l_prev);
7113 break;
7114 }
7115
7116 /* Ignore the first entry even if it has valid name as the first entry
7117 corresponds to the main executable. The first entry should not be
7118 skipped if the dynamic loader was loaded late by a static executable
7119 (see solib-svr4.c parameter ignore_first). But in such case the main
7120 executable does not have PT_DYNAMIC present and this function already
7121 exited above due to failed get_r_debug. */
7122 if (lm_prev == 0)
7123 string_appendf (document, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7124 else
7125 {
7126 /* Not checking for error because reading may stop before
7127 we've got PATH_MAX worth of characters. */
7128 libname[0] = '\0';
7129 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7130 libname[sizeof (libname) - 1] = '\0';
7131 if (libname[0] != '\0')
7132 {
7133 if (!header_done)
7134 {
7135 /* Terminate `<library-list-svr4'. */
7136 document += '>';
7137 header_done = 1;
7138 }
7139
7140 string_appendf (document, "<library name=\"");
7141 xml_escape_text_append (&document, (char *) libname);
7142 string_appendf (document, "\" lm=\"0x%lx\" "
7143 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7144 (unsigned long) lm_addr, (unsigned long) l_addr,
7145 (unsigned long) l_ld);
7146 }
7147 }
7148
7149 lm_prev = lm_addr;
7150 lm_addr = l_next;
7151 }
7152
7153 if (!header_done)
7154 {
7155 /* Empty list; terminate `<library-list-svr4'. */
7156 document += "/>";
7157 }
7158 else
7159 document += "</library-list-svr4>";
7160
7161 int document_len = document.length ();
7162 if (offset < document_len)
7163 document_len -= offset;
7164 else
7165 document_len = 0;
7166 if (len > document_len)
7167 len = document_len;
7168
7169 memcpy (readbuf, document.data () + offset, len);
7170
7171 return len;
7172 }
7173
7174 #ifdef HAVE_LINUX_BTRACE
7175
7176 /* See to_disable_btrace target method. */
7177
7178 static int
7179 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7180 {
7181 enum btrace_error err;
7182
7183 err = linux_disable_btrace (tinfo);
7184 return (err == BTRACE_ERR_NONE ? 0 : -1);
7185 }
7186
7187 /* Encode an Intel Processor Trace configuration. */
7188
7189 static void
7190 linux_low_encode_pt_config (struct buffer *buffer,
7191 const struct btrace_data_pt_config *config)
7192 {
7193 buffer_grow_str (buffer, "<pt-config>\n");
7194
7195 switch (config->cpu.vendor)
7196 {
7197 case CV_INTEL:
7198 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7199 "model=\"%u\" stepping=\"%u\"/>\n",
7200 config->cpu.family, config->cpu.model,
7201 config->cpu.stepping);
7202 break;
7203
7204 default:
7205 break;
7206 }
7207
7208 buffer_grow_str (buffer, "</pt-config>\n");
7209 }
7210
7211 /* Encode a raw buffer. */
7212
7213 static void
7214 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7215 unsigned int size)
7216 {
7217 if (size == 0)
7218 return;
7219
7220 /* We use hex encoding - see common/rsp-low.h. */
7221 buffer_grow_str (buffer, "<raw>\n");
7222
7223 while (size-- > 0)
7224 {
7225 char elem[2];
7226
7227 elem[0] = tohex ((*data >> 4) & 0xf);
7228 elem[1] = tohex (*data++ & 0xf);
7229
7230 buffer_grow (buffer, elem, 2);
7231 }
7232
7233 buffer_grow_str (buffer, "</raw>\n");
7234 }
7235
7236 /* See to_read_btrace target method. */
7237
7238 static int
7239 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7240 enum btrace_read_type type)
7241 {
7242 struct btrace_data btrace;
7243 struct btrace_block *block;
7244 enum btrace_error err;
7245 int i;
7246
7247 err = linux_read_btrace (&btrace, tinfo, type);
7248 if (err != BTRACE_ERR_NONE)
7249 {
7250 if (err == BTRACE_ERR_OVERFLOW)
7251 buffer_grow_str0 (buffer, "E.Overflow.");
7252 else
7253 buffer_grow_str0 (buffer, "E.Generic Error.");
7254
7255 return -1;
7256 }
7257
7258 switch (btrace.format)
7259 {
7260 case BTRACE_FORMAT_NONE:
7261 buffer_grow_str0 (buffer, "E.No Trace.");
7262 return -1;
7263
7264 case BTRACE_FORMAT_BTS:
7265 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7266 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7267
7268 for (i = 0;
7269 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7270 i++)
7271 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7272 paddress (block->begin), paddress (block->end));
7273
7274 buffer_grow_str0 (buffer, "</btrace>\n");
7275 break;
7276
7277 case BTRACE_FORMAT_PT:
7278 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7279 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7280 buffer_grow_str (buffer, "<pt>\n");
7281
7282 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7283
7284 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7285 btrace.variant.pt.size);
7286
7287 buffer_grow_str (buffer, "</pt>\n");
7288 buffer_grow_str0 (buffer, "</btrace>\n");
7289 break;
7290
7291 default:
7292 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7293 return -1;
7294 }
7295
7296 return 0;
7297 }
7298
7299 /* See to_btrace_conf target method. */
7300
7301 static int
7302 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7303 struct buffer *buffer)
7304 {
7305 const struct btrace_config *conf;
7306
7307 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7308 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7309
7310 conf = linux_btrace_conf (tinfo);
7311 if (conf != NULL)
7312 {
7313 switch (conf->format)
7314 {
7315 case BTRACE_FORMAT_NONE:
7316 break;
7317
7318 case BTRACE_FORMAT_BTS:
7319 buffer_xml_printf (buffer, "<bts");
7320 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7321 buffer_xml_printf (buffer, " />\n");
7322 break;
7323
7324 case BTRACE_FORMAT_PT:
7325 buffer_xml_printf (buffer, "<pt");
7326 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7327 buffer_xml_printf (buffer, "/>\n");
7328 break;
7329 }
7330 }
7331
7332 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7333 return 0;
7334 }
7335 #endif /* HAVE_LINUX_BTRACE */
7336
7337 /* See nat/linux-nat.h. */
7338
7339 ptid_t
7340 current_lwp_ptid (void)
7341 {
7342 return ptid_of (current_thread);
7343 }
7344
7345 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7346
7347 static int
7348 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7349 {
7350 if (the_low_target.breakpoint_kind_from_pc != NULL)
7351 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7352 else
7353 return default_breakpoint_kind_from_pc (pcptr);
7354 }
7355
7356 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7357
7358 static const gdb_byte *
7359 linux_sw_breakpoint_from_kind (int kind, int *size)
7360 {
7361 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7362
7363 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7364 }
7365
7366 /* Implementation of the target_ops method
7367 "breakpoint_kind_from_current_state". */
7368
7369 static int
7370 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7371 {
7372 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7373 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7374 else
7375 return linux_breakpoint_kind_from_pc (pcptr);
7376 }
7377
7378 /* Default implementation of linux_target_ops method "set_pc" for
7379 32-bit pc register which is literally named "pc". */
7380
7381 void
7382 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7383 {
7384 uint32_t newpc = pc;
7385
7386 supply_register_by_name (regcache, "pc", &newpc);
7387 }
7388
7389 /* Default implementation of linux_target_ops method "get_pc" for
7390 32-bit pc register which is literally named "pc". */
7391
7392 CORE_ADDR
7393 linux_get_pc_32bit (struct regcache *regcache)
7394 {
7395 uint32_t pc;
7396
7397 collect_register_by_name (regcache, "pc", &pc);
7398 if (debug_threads)
7399 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7400 return pc;
7401 }
7402
7403 /* Default implementation of linux_target_ops method "set_pc" for
7404 64-bit pc register which is literally named "pc". */
7405
7406 void
7407 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7408 {
7409 uint64_t newpc = pc;
7410
7411 supply_register_by_name (regcache, "pc", &newpc);
7412 }
7413
7414 /* Default implementation of linux_target_ops method "get_pc" for
7415 64-bit pc register which is literally named "pc". */
7416
7417 CORE_ADDR
7418 linux_get_pc_64bit (struct regcache *regcache)
7419 {
7420 uint64_t pc;
7421
7422 collect_register_by_name (regcache, "pc", &pc);
7423 if (debug_threads)
7424 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7425 return pc;
7426 }
7427
7428 /* See linux-low.h. */
7429
7430 int
7431 linux_get_auxv (int wordsize, CORE_ADDR match, CORE_ADDR *valp)
7432 {
7433 gdb_byte *data = (gdb_byte *) alloca (2 * wordsize);
7434 int offset = 0;
7435
7436 gdb_assert (wordsize == 4 || wordsize == 8);
7437
7438 while ((*the_target->read_auxv) (offset, data, 2 * wordsize) == 2 * wordsize)
7439 {
7440 if (wordsize == 4)
7441 {
7442 uint32_t *data_p = (uint32_t *) data;
7443 if (data_p[0] == match)
7444 {
7445 *valp = data_p[1];
7446 return 1;
7447 }
7448 }
7449 else
7450 {
7451 uint64_t *data_p = (uint64_t *) data;
7452 if (data_p[0] == match)
7453 {
7454 *valp = data_p[1];
7455 return 1;
7456 }
7457 }
7458
7459 offset += 2 * wordsize;
7460 }
7461
7462 return 0;
7463 }
7464
7465 /* See linux-low.h. */
7466
7467 CORE_ADDR
7468 linux_get_hwcap (int wordsize)
7469 {
7470 CORE_ADDR hwcap = 0;
7471 linux_get_auxv (wordsize, AT_HWCAP, &hwcap);
7472 return hwcap;
7473 }
7474
7475 /* See linux-low.h. */
7476
7477 CORE_ADDR
7478 linux_get_hwcap2 (int wordsize)
7479 {
7480 CORE_ADDR hwcap2 = 0;
7481 linux_get_auxv (wordsize, AT_HWCAP2, &hwcap2);
7482 return hwcap2;
7483 }
7484
7485 static struct target_ops linux_target_ops = {
7486 linux_create_inferior,
7487 linux_post_create_inferior,
7488 linux_attach,
7489 linux_kill,
7490 linux_detach,
7491 linux_mourn,
7492 linux_join,
7493 linux_thread_alive,
7494 linux_resume,
7495 linux_wait,
7496 linux_fetch_registers,
7497 linux_store_registers,
7498 linux_prepare_to_access_memory,
7499 linux_done_accessing_memory,
7500 linux_read_memory,
7501 linux_write_memory,
7502 linux_look_up_symbols,
7503 linux_request_interrupt,
7504 linux_read_auxv,
7505 linux_supports_z_point_type,
7506 linux_insert_point,
7507 linux_remove_point,
7508 linux_stopped_by_sw_breakpoint,
7509 linux_supports_stopped_by_sw_breakpoint,
7510 linux_stopped_by_hw_breakpoint,
7511 linux_supports_stopped_by_hw_breakpoint,
7512 linux_supports_hardware_single_step,
7513 linux_stopped_by_watchpoint,
7514 linux_stopped_data_address,
7515 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7516 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7517 && defined(PT_TEXT_END_ADDR)
7518 linux_read_offsets,
7519 #else
7520 NULL,
7521 #endif
7522 #ifdef USE_THREAD_DB
7523 thread_db_get_tls_address,
7524 #else
7525 NULL,
7526 #endif
7527 linux_qxfer_spu,
7528 hostio_last_error_from_errno,
7529 linux_qxfer_osdata,
7530 linux_xfer_siginfo,
7531 linux_supports_non_stop,
7532 linux_async,
7533 linux_start_non_stop,
7534 linux_supports_multi_process,
7535 linux_supports_fork_events,
7536 linux_supports_vfork_events,
7537 linux_supports_exec_events,
7538 linux_handle_new_gdb_connection,
7539 #ifdef USE_THREAD_DB
7540 thread_db_handle_monitor_command,
7541 #else
7542 NULL,
7543 #endif
7544 linux_common_core_of_thread,
7545 linux_read_loadmap,
7546 linux_process_qsupported,
7547 linux_supports_tracepoints,
7548 linux_read_pc,
7549 linux_write_pc,
7550 linux_thread_stopped,
7551 NULL,
7552 linux_pause_all,
7553 linux_unpause_all,
7554 linux_stabilize_threads,
7555 linux_install_fast_tracepoint_jump_pad,
7556 linux_emit_ops,
7557 linux_supports_disable_randomization,
7558 linux_get_min_fast_tracepoint_insn_len,
7559 linux_qxfer_libraries_svr4,
7560 linux_supports_agent,
7561 #ifdef HAVE_LINUX_BTRACE
7562 linux_enable_btrace,
7563 linux_low_disable_btrace,
7564 linux_low_read_btrace,
7565 linux_low_btrace_conf,
7566 #else
7567 NULL,
7568 NULL,
7569 NULL,
7570 NULL,
7571 #endif
7572 linux_supports_range_stepping,
7573 linux_proc_pid_to_exec_file,
7574 linux_mntns_open_cloexec,
7575 linux_mntns_unlink,
7576 linux_mntns_readlink,
7577 linux_breakpoint_kind_from_pc,
7578 linux_sw_breakpoint_from_kind,
7579 linux_proc_tid_get_name,
7580 linux_breakpoint_kind_from_current_state,
7581 linux_supports_software_single_step,
7582 linux_supports_catch_syscall,
7583 linux_get_ipa_tdesc_idx,
7584 #if USE_THREAD_DB
7585 thread_db_thread_handle,
7586 #else
7587 NULL,
7588 #endif
7589 };
7590
7591 #ifdef HAVE_LINUX_REGSETS
7592 void
7593 initialize_regsets_info (struct regsets_info *info)
7594 {
7595 for (info->num_regsets = 0;
7596 info->regsets[info->num_regsets].size >= 0;
7597 info->num_regsets++)
7598 ;
7599 }
7600 #endif
7601
7602 void
7603 initialize_low (void)
7604 {
7605 struct sigaction sigchld_action;
7606
7607 memset (&sigchld_action, 0, sizeof (sigchld_action));
7608 set_target_ops (&linux_target_ops);
7609
7610 linux_ptrace_init_warnings ();
7611 linux_proc_init_warnings ();
7612
7613 sigchld_action.sa_handler = sigchld_handler;
7614 sigemptyset (&sigchld_action.sa_mask);
7615 sigchld_action.sa_flags = SA_RESTART;
7616 sigaction (SIGCHLD, &sigchld_action, NULL);
7617
7618 initialize_low_arch ();
7619
7620 linux_check_ptrace_features ();
7621 }
This page took 0.19068 seconds and 5 git commands to generate.