Refactor clone_all_breakpoints
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2016 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #ifndef ELFMAG0
51 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55 #include <elf.h>
56 #endif
57 #include "nat/linux-namespaces.h"
58
59 #ifndef SPUFS_MAGIC
60 #define SPUFS_MAGIC 0x23c9b64e
61 #endif
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 /* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77 #if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80 #if defined(__mcoldfire__)
81 /* These are still undefined in 3.10 kernels. */
82 #define PT_TEXT_ADDR 49*4
83 #define PT_DATA_ADDR 50*4
84 #define PT_TEXT_END_ADDR 51*4
85 /* BFIN already defines these since at least 2.6.32 kernels. */
86 #elif defined(BFIN)
87 #define PT_TEXT_ADDR 220
88 #define PT_TEXT_END_ADDR 224
89 #define PT_DATA_ADDR 228
90 /* These are still undefined in 3.10 kernels. */
91 #elif defined(__TMS320C6X__)
92 #define PT_TEXT_ADDR (0x10000*4)
93 #define PT_DATA_ADDR (0x10004*4)
94 #define PT_TEXT_END_ADDR (0x10008*4)
95 #endif
96 #endif
97
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "btrace-common.h"
101 #endif
102
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
105 typedef struct
106 {
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115 } Elf32_auxv_t;
116 #endif
117
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
120 typedef struct
121 {
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130 } Elf64_auxv_t;
131 #endif
132
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset = -1;
135
136 /* LWP accessors. */
137
138 /* See nat/linux-nat.h. */
139
140 ptid_t
141 ptid_of_lwp (struct lwp_info *lwp)
142 {
143 return ptid_of (get_lwp_thread (lwp));
144 }
145
146 /* See nat/linux-nat.h. */
147
148 void
149 lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151 {
152 lwp->arch_private = info;
153 }
154
155 /* See nat/linux-nat.h. */
156
157 struct arch_lwp_info *
158 lwp_arch_private_info (struct lwp_info *lwp)
159 {
160 return lwp->arch_private;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 int
166 lwp_is_stopped (struct lwp_info *lwp)
167 {
168 return lwp->stopped;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info *lwp)
175 {
176 return lwp->stop_reason;
177 }
178
179 /* A list of all unknown processes which receive stop signals. Some
180 other process will presumably claim each of these as forked
181 children momentarily. */
182
183 struct simple_pid_list
184 {
185 /* The process ID. */
186 int pid;
187
188 /* The status as reported by waitpid. */
189 int status;
190
191 /* Next in chain. */
192 struct simple_pid_list *next;
193 };
194 struct simple_pid_list *stopped_pids;
195
196 /* Trivial list manipulation functions to keep track of a list of new
197 stopped processes. */
198
199 static void
200 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
201 {
202 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
203
204 new_pid->pid = pid;
205 new_pid->status = status;
206 new_pid->next = *listp;
207 *listp = new_pid;
208 }
209
210 static int
211 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
212 {
213 struct simple_pid_list **p;
214
215 for (p = listp; *p != NULL; p = &(*p)->next)
216 if ((*p)->pid == pid)
217 {
218 struct simple_pid_list *next = (*p)->next;
219
220 *statusp = (*p)->status;
221 xfree (*p);
222 *p = next;
223 return 1;
224 }
225 return 0;
226 }
227
228 enum stopping_threads_kind
229 {
230 /* Not stopping threads presently. */
231 NOT_STOPPING_THREADS,
232
233 /* Stopping threads. */
234 STOPPING_THREADS,
235
236 /* Stopping and suspending threads. */
237 STOPPING_AND_SUSPENDING_THREADS
238 };
239
240 /* This is set while stop_all_lwps is in effect. */
241 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
242
243 /* FIXME make into a target method? */
244 int using_threads = 1;
245
246 /* True if we're presently stabilizing threads (moving them out of
247 jump pads). */
248 static int stabilizing_threads;
249
250 static void linux_resume_one_lwp (struct lwp_info *lwp,
251 int step, int signal, siginfo_t *info);
252 static void linux_resume (struct thread_resume *resume_info, size_t n);
253 static void stop_all_lwps (int suspend, struct lwp_info *except);
254 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
255 static void unsuspend_all_lwps (struct lwp_info *except);
256 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
257 int *wstat, int options);
258 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
259 static struct lwp_info *add_lwp (ptid_t ptid);
260 static void linux_mourn (struct process_info *process);
261 static int linux_stopped_by_watchpoint (void);
262 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
263 static int lwp_is_marked_dead (struct lwp_info *lwp);
264 static void proceed_all_lwps (void);
265 static int finish_step_over (struct lwp_info *lwp);
266 static int kill_lwp (unsigned long lwpid, int signo);
267 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
268 static void complete_ongoing_step_over (void);
269 static int linux_low_ptrace_options (int attached);
270 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
271
272 /* When the event-loop is doing a step-over, this points at the thread
273 being stepped. */
274 ptid_t step_over_bkpt;
275
276 /* True if the low target can hardware single-step. */
277
278 static int
279 can_hardware_single_step (void)
280 {
281 if (the_low_target.supports_hardware_single_step != NULL)
282 return the_low_target.supports_hardware_single_step ();
283 else
284 return 0;
285 }
286
287 /* True if the low target can software single-step. Such targets
288 implement the GET_NEXT_PCS callback. */
289
290 static int
291 can_software_single_step (void)
292 {
293 return (the_low_target.get_next_pcs != NULL);
294 }
295
296 /* True if the low target supports memory breakpoints. If so, we'll
297 have a GET_PC implementation. */
298
299 static int
300 supports_breakpoints (void)
301 {
302 return (the_low_target.get_pc != NULL);
303 }
304
305 /* Returns true if this target can support fast tracepoints. This
306 does not mean that the in-process agent has been loaded in the
307 inferior. */
308
309 static int
310 supports_fast_tracepoints (void)
311 {
312 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
313 }
314
315 /* True if LWP is stopped in its stepping range. */
316
317 static int
318 lwp_in_step_range (struct lwp_info *lwp)
319 {
320 CORE_ADDR pc = lwp->stop_pc;
321
322 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
323 }
324
325 struct pending_signals
326 {
327 int signal;
328 siginfo_t info;
329 struct pending_signals *prev;
330 };
331
332 /* The read/write ends of the pipe registered as waitable file in the
333 event loop. */
334 static int linux_event_pipe[2] = { -1, -1 };
335
336 /* True if we're currently in async mode. */
337 #define target_is_async_p() (linux_event_pipe[0] != -1)
338
339 static void send_sigstop (struct lwp_info *lwp);
340 static void wait_for_sigstop (void);
341
342 /* Return non-zero if HEADER is a 64-bit ELF file. */
343
344 static int
345 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
346 {
347 if (header->e_ident[EI_MAG0] == ELFMAG0
348 && header->e_ident[EI_MAG1] == ELFMAG1
349 && header->e_ident[EI_MAG2] == ELFMAG2
350 && header->e_ident[EI_MAG3] == ELFMAG3)
351 {
352 *machine = header->e_machine;
353 return header->e_ident[EI_CLASS] == ELFCLASS64;
354
355 }
356 *machine = EM_NONE;
357 return -1;
358 }
359
360 /* Return non-zero if FILE is a 64-bit ELF file,
361 zero if the file is not a 64-bit ELF file,
362 and -1 if the file is not accessible or doesn't exist. */
363
364 static int
365 elf_64_file_p (const char *file, unsigned int *machine)
366 {
367 Elf64_Ehdr header;
368 int fd;
369
370 fd = open (file, O_RDONLY);
371 if (fd < 0)
372 return -1;
373
374 if (read (fd, &header, sizeof (header)) != sizeof (header))
375 {
376 close (fd);
377 return 0;
378 }
379 close (fd);
380
381 return elf_64_header_p (&header, machine);
382 }
383
384 /* Accepts an integer PID; Returns true if the executable PID is
385 running is a 64-bit ELF file.. */
386
387 int
388 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
389 {
390 char file[PATH_MAX];
391
392 sprintf (file, "/proc/%d/exe", pid);
393 return elf_64_file_p (file, machine);
394 }
395
396 static void
397 delete_lwp (struct lwp_info *lwp)
398 {
399 struct thread_info *thr = get_lwp_thread (lwp);
400
401 if (debug_threads)
402 debug_printf ("deleting %ld\n", lwpid_of (thr));
403
404 remove_thread (thr);
405 free (lwp->arch_private);
406 free (lwp);
407 }
408
409 /* Add a process to the common process list, and set its private
410 data. */
411
412 static struct process_info *
413 linux_add_process (int pid, int attached)
414 {
415 struct process_info *proc;
416
417 proc = add_process (pid, attached);
418 proc->priv = XCNEW (struct process_info_private);
419
420 if (the_low_target.new_process != NULL)
421 proc->priv->arch_private = the_low_target.new_process ();
422
423 return proc;
424 }
425
426 static CORE_ADDR get_pc (struct lwp_info *lwp);
427
428 /* Call the target arch_setup function on the current thread. */
429
430 static void
431 linux_arch_setup (void)
432 {
433 the_low_target.arch_setup ();
434 }
435
436 /* Call the target arch_setup function on THREAD. */
437
438 static void
439 linux_arch_setup_thread (struct thread_info *thread)
440 {
441 struct thread_info *saved_thread;
442
443 saved_thread = current_thread;
444 current_thread = thread;
445
446 linux_arch_setup ();
447
448 current_thread = saved_thread;
449 }
450
451 /* Handle a GNU/Linux extended wait response. If we see a clone,
452 fork, or vfork event, we need to add the new LWP to our list
453 (and return 0 so as not to report the trap to higher layers).
454 If we see an exec event, we will modify ORIG_EVENT_LWP to point
455 to a new LWP representing the new program. */
456
457 static int
458 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
459 {
460 struct lwp_info *event_lwp = *orig_event_lwp;
461 int event = linux_ptrace_get_extended_event (wstat);
462 struct thread_info *event_thr = get_lwp_thread (event_lwp);
463 struct lwp_info *new_lwp;
464
465 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
466
467 /* All extended events we currently use are mid-syscall. Only
468 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
469 you have to be using PTRACE_SEIZE to get that. */
470 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
471
472 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
473 || (event == PTRACE_EVENT_CLONE))
474 {
475 ptid_t ptid;
476 unsigned long new_pid;
477 int ret, status;
478
479 /* Get the pid of the new lwp. */
480 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
481 &new_pid);
482
483 /* If we haven't already seen the new PID stop, wait for it now. */
484 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
485 {
486 /* The new child has a pending SIGSTOP. We can't affect it until it
487 hits the SIGSTOP, but we're already attached. */
488
489 ret = my_waitpid (new_pid, &status, __WALL);
490
491 if (ret == -1)
492 perror_with_name ("waiting for new child");
493 else if (ret != new_pid)
494 warning ("wait returned unexpected PID %d", ret);
495 else if (!WIFSTOPPED (status))
496 warning ("wait returned unexpected status 0x%x", status);
497 }
498
499 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
500 {
501 struct process_info *parent_proc;
502 struct process_info *child_proc;
503 struct lwp_info *child_lwp;
504 struct thread_info *child_thr;
505 struct target_desc *tdesc;
506
507 ptid = ptid_build (new_pid, new_pid, 0);
508
509 if (debug_threads)
510 {
511 debug_printf ("HEW: Got fork event from LWP %ld, "
512 "new child is %d\n",
513 ptid_get_lwp (ptid_of (event_thr)),
514 ptid_get_pid (ptid));
515 }
516
517 /* Add the new process to the tables and clone the breakpoint
518 lists of the parent. We need to do this even if the new process
519 will be detached, since we will need the process object and the
520 breakpoints to remove any breakpoints from memory when we
521 detach, and the client side will access registers. */
522 child_proc = linux_add_process (new_pid, 0);
523 gdb_assert (child_proc != NULL);
524 child_lwp = add_lwp (ptid);
525 gdb_assert (child_lwp != NULL);
526 child_lwp->stopped = 1;
527 child_lwp->must_set_ptrace_flags = 1;
528 child_lwp->status_pending_p = 0;
529 child_thr = get_lwp_thread (child_lwp);
530 child_thr->last_resume_kind = resume_stop;
531 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
532
533 /* If we're suspending all threads, leave this one suspended
534 too. If the fork/clone parent is stepping over a breakpoint,
535 all other threads have been suspended already. Leave the
536 child suspended too. */
537 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
538 || event_lwp->bp_reinsert != 0)
539 {
540 if (debug_threads)
541 debug_printf ("HEW: leaving child suspended\n");
542 child_lwp->suspended = 1;
543 }
544
545 parent_proc = get_thread_process (event_thr);
546 child_proc->attached = parent_proc->attached;
547
548 if (event_lwp->bp_reinsert != 0
549 && can_software_single_step ()
550 && event == PTRACE_EVENT_VFORK)
551 {
552 struct thread_info *saved_thread = current_thread;
553
554 current_thread = event_thr;
555 /* If we leave reinsert breakpoints there, child will
556 hit it, so uninsert reinsert breakpoints from parent
557 (and child). Once vfork child is done, reinsert
558 them back to parent. */
559 uninsert_reinsert_breakpoints ();
560 current_thread = saved_thread;
561 }
562
563 clone_all_breakpoints (child_thr, event_thr);
564
565 tdesc = XNEW (struct target_desc);
566 copy_target_description (tdesc, parent_proc->tdesc);
567 child_proc->tdesc = tdesc;
568
569 /* Clone arch-specific process data. */
570 if (the_low_target.new_fork != NULL)
571 the_low_target.new_fork (parent_proc, child_proc);
572
573 /* Save fork info in the parent thread. */
574 if (event == PTRACE_EVENT_FORK)
575 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
576 else if (event == PTRACE_EVENT_VFORK)
577 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
578
579 event_lwp->waitstatus.value.related_pid = ptid;
580
581 /* The status_pending field contains bits denoting the
582 extended event, so when the pending event is handled,
583 the handler will look at lwp->waitstatus. */
584 event_lwp->status_pending_p = 1;
585 event_lwp->status_pending = wstat;
586
587 /* If the parent thread is doing step-over with reinsert
588 breakpoints, the list of reinsert breakpoints are cloned
589 from the parent's. Remove them from the child process.
590 In case of vfork, we'll reinsert them back once vforked
591 child is done. */
592 if (event_lwp->bp_reinsert != 0
593 && can_software_single_step ())
594 {
595 struct thread_info *saved_thread = current_thread;
596
597 /* The child process is forked and stopped, so it is safe
598 to access its memory without stopping all other threads
599 from other processes. */
600 current_thread = child_thr;
601 delete_reinsert_breakpoints ();
602 current_thread = saved_thread;
603
604 gdb_assert (has_reinsert_breakpoints (parent_proc));
605 gdb_assert (!has_reinsert_breakpoints (child_proc));
606 }
607
608 /* Report the event. */
609 return 0;
610 }
611
612 if (debug_threads)
613 debug_printf ("HEW: Got clone event "
614 "from LWP %ld, new child is LWP %ld\n",
615 lwpid_of (event_thr), new_pid);
616
617 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
618 new_lwp = add_lwp (ptid);
619
620 /* Either we're going to immediately resume the new thread
621 or leave it stopped. linux_resume_one_lwp is a nop if it
622 thinks the thread is currently running, so set this first
623 before calling linux_resume_one_lwp. */
624 new_lwp->stopped = 1;
625
626 /* If we're suspending all threads, leave this one suspended
627 too. If the fork/clone parent is stepping over a breakpoint,
628 all other threads have been suspended already. Leave the
629 child suspended too. */
630 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
631 || event_lwp->bp_reinsert != 0)
632 new_lwp->suspended = 1;
633
634 /* Normally we will get the pending SIGSTOP. But in some cases
635 we might get another signal delivered to the group first.
636 If we do get another signal, be sure not to lose it. */
637 if (WSTOPSIG (status) != SIGSTOP)
638 {
639 new_lwp->stop_expected = 1;
640 new_lwp->status_pending_p = 1;
641 new_lwp->status_pending = status;
642 }
643 else if (report_thread_events)
644 {
645 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
646 new_lwp->status_pending_p = 1;
647 new_lwp->status_pending = status;
648 }
649
650 /* Don't report the event. */
651 return 1;
652 }
653 else if (event == PTRACE_EVENT_VFORK_DONE)
654 {
655 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
656
657 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
658 {
659 struct thread_info *saved_thread = current_thread;
660 struct process_info *proc = get_thread_process (event_thr);
661
662 current_thread = event_thr;
663 reinsert_reinsert_breakpoints ();
664 current_thread = saved_thread;
665
666 gdb_assert (has_reinsert_breakpoints (proc));
667 }
668
669 /* Report the event. */
670 return 0;
671 }
672 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
673 {
674 struct process_info *proc;
675 VEC (int) *syscalls_to_catch;
676 ptid_t event_ptid;
677 pid_t event_pid;
678
679 if (debug_threads)
680 {
681 debug_printf ("HEW: Got exec event from LWP %ld\n",
682 lwpid_of (event_thr));
683 }
684
685 /* Get the event ptid. */
686 event_ptid = ptid_of (event_thr);
687 event_pid = ptid_get_pid (event_ptid);
688
689 /* Save the syscall list from the execing process. */
690 proc = get_thread_process (event_thr);
691 syscalls_to_catch = proc->syscalls_to_catch;
692 proc->syscalls_to_catch = NULL;
693
694 /* Delete the execing process and all its threads. */
695 linux_mourn (proc);
696 current_thread = NULL;
697
698 /* Create a new process/lwp/thread. */
699 proc = linux_add_process (event_pid, 0);
700 event_lwp = add_lwp (event_ptid);
701 event_thr = get_lwp_thread (event_lwp);
702 gdb_assert (current_thread == event_thr);
703 linux_arch_setup_thread (event_thr);
704
705 /* Set the event status. */
706 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
707 event_lwp->waitstatus.value.execd_pathname
708 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
709
710 /* Mark the exec status as pending. */
711 event_lwp->stopped = 1;
712 event_lwp->status_pending_p = 1;
713 event_lwp->status_pending = wstat;
714 event_thr->last_resume_kind = resume_continue;
715 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
716
717 /* Update syscall state in the new lwp, effectively mid-syscall too. */
718 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
719
720 /* Restore the list to catch. Don't rely on the client, which is free
721 to avoid sending a new list when the architecture doesn't change.
722 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
723 proc->syscalls_to_catch = syscalls_to_catch;
724
725 /* Report the event. */
726 *orig_event_lwp = event_lwp;
727 return 0;
728 }
729
730 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
731 }
732
733 /* Return the PC as read from the regcache of LWP, without any
734 adjustment. */
735
736 static CORE_ADDR
737 get_pc (struct lwp_info *lwp)
738 {
739 struct thread_info *saved_thread;
740 struct regcache *regcache;
741 CORE_ADDR pc;
742
743 if (the_low_target.get_pc == NULL)
744 return 0;
745
746 saved_thread = current_thread;
747 current_thread = get_lwp_thread (lwp);
748
749 regcache = get_thread_regcache (current_thread, 1);
750 pc = (*the_low_target.get_pc) (regcache);
751
752 if (debug_threads)
753 debug_printf ("pc is 0x%lx\n", (long) pc);
754
755 current_thread = saved_thread;
756 return pc;
757 }
758
759 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
760 Fill *SYSNO with the syscall nr trapped. */
761
762 static void
763 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
764 {
765 struct thread_info *saved_thread;
766 struct regcache *regcache;
767
768 if (the_low_target.get_syscall_trapinfo == NULL)
769 {
770 /* If we cannot get the syscall trapinfo, report an unknown
771 system call number. */
772 *sysno = UNKNOWN_SYSCALL;
773 return;
774 }
775
776 saved_thread = current_thread;
777 current_thread = get_lwp_thread (lwp);
778
779 regcache = get_thread_regcache (current_thread, 1);
780 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
781
782 if (debug_threads)
783 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
784
785 current_thread = saved_thread;
786 }
787
788 static int check_stopped_by_watchpoint (struct lwp_info *child);
789
790 /* Called when the LWP stopped for a signal/trap. If it stopped for a
791 trap check what caused it (breakpoint, watchpoint, trace, etc.),
792 and save the result in the LWP's stop_reason field. If it stopped
793 for a breakpoint, decrement the PC if necessary on the lwp's
794 architecture. Returns true if we now have the LWP's stop PC. */
795
796 static int
797 save_stop_reason (struct lwp_info *lwp)
798 {
799 CORE_ADDR pc;
800 CORE_ADDR sw_breakpoint_pc;
801 struct thread_info *saved_thread;
802 #if USE_SIGTRAP_SIGINFO
803 siginfo_t siginfo;
804 #endif
805
806 if (the_low_target.get_pc == NULL)
807 return 0;
808
809 pc = get_pc (lwp);
810 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
811
812 /* breakpoint_at reads from the current thread. */
813 saved_thread = current_thread;
814 current_thread = get_lwp_thread (lwp);
815
816 #if USE_SIGTRAP_SIGINFO
817 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
818 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
819 {
820 if (siginfo.si_signo == SIGTRAP)
821 {
822 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
823 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
824 {
825 /* The si_code is ambiguous on this arch -- check debug
826 registers. */
827 if (!check_stopped_by_watchpoint (lwp))
828 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
829 }
830 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
831 {
832 /* If we determine the LWP stopped for a SW breakpoint,
833 trust it. Particularly don't check watchpoint
834 registers, because at least on s390, we'd find
835 stopped-by-watchpoint as long as there's a watchpoint
836 set. */
837 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
838 }
839 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
840 {
841 /* This can indicate either a hardware breakpoint or
842 hardware watchpoint. Check debug registers. */
843 if (!check_stopped_by_watchpoint (lwp))
844 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
845 }
846 else if (siginfo.si_code == TRAP_TRACE)
847 {
848 /* We may have single stepped an instruction that
849 triggered a watchpoint. In that case, on some
850 architectures (such as x86), instead of TRAP_HWBKPT,
851 si_code indicates TRAP_TRACE, and we need to check
852 the debug registers separately. */
853 if (!check_stopped_by_watchpoint (lwp))
854 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
855 }
856 }
857 }
858 #else
859 /* We may have just stepped a breakpoint instruction. E.g., in
860 non-stop mode, GDB first tells the thread A to step a range, and
861 then the user inserts a breakpoint inside the range. In that
862 case we need to report the breakpoint PC. */
863 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
864 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
865 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
866
867 if (hardware_breakpoint_inserted_here (pc))
868 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
869
870 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
871 check_stopped_by_watchpoint (lwp);
872 #endif
873
874 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
875 {
876 if (debug_threads)
877 {
878 struct thread_info *thr = get_lwp_thread (lwp);
879
880 debug_printf ("CSBB: %s stopped by software breakpoint\n",
881 target_pid_to_str (ptid_of (thr)));
882 }
883
884 /* Back up the PC if necessary. */
885 if (pc != sw_breakpoint_pc)
886 {
887 struct regcache *regcache
888 = get_thread_regcache (current_thread, 1);
889 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
890 }
891
892 /* Update this so we record the correct stop PC below. */
893 pc = sw_breakpoint_pc;
894 }
895 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
896 {
897 if (debug_threads)
898 {
899 struct thread_info *thr = get_lwp_thread (lwp);
900
901 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
902 target_pid_to_str (ptid_of (thr)));
903 }
904 }
905 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
906 {
907 if (debug_threads)
908 {
909 struct thread_info *thr = get_lwp_thread (lwp);
910
911 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
912 target_pid_to_str (ptid_of (thr)));
913 }
914 }
915 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
916 {
917 if (debug_threads)
918 {
919 struct thread_info *thr = get_lwp_thread (lwp);
920
921 debug_printf ("CSBB: %s stopped by trace\n",
922 target_pid_to_str (ptid_of (thr)));
923 }
924 }
925
926 lwp->stop_pc = pc;
927 current_thread = saved_thread;
928 return 1;
929 }
930
931 static struct lwp_info *
932 add_lwp (ptid_t ptid)
933 {
934 struct lwp_info *lwp;
935
936 lwp = XCNEW (struct lwp_info);
937
938 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
939
940 if (the_low_target.new_thread != NULL)
941 the_low_target.new_thread (lwp);
942
943 lwp->thread = add_thread (ptid, lwp);
944
945 return lwp;
946 }
947
948 /* Start an inferior process and returns its pid.
949 ALLARGS is a vector of program-name and args. */
950
951 static int
952 linux_create_inferior (char *program, char **allargs)
953 {
954 struct lwp_info *new_lwp;
955 int pid;
956 ptid_t ptid;
957 struct cleanup *restore_personality
958 = maybe_disable_address_space_randomization (disable_randomization);
959
960 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
961 pid = vfork ();
962 #else
963 pid = fork ();
964 #endif
965 if (pid < 0)
966 perror_with_name ("fork");
967
968 if (pid == 0)
969 {
970 close_most_fds ();
971 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
972
973 setpgid (0, 0);
974
975 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
976 stdout to stderr so that inferior i/o doesn't corrupt the connection.
977 Also, redirect stdin to /dev/null. */
978 if (remote_connection_is_stdio ())
979 {
980 close (0);
981 open ("/dev/null", O_RDONLY);
982 dup2 (2, 1);
983 if (write (2, "stdin/stdout redirected\n",
984 sizeof ("stdin/stdout redirected\n") - 1) < 0)
985 {
986 /* Errors ignored. */;
987 }
988 }
989
990 execv (program, allargs);
991 if (errno == ENOENT)
992 execvp (program, allargs);
993
994 fprintf (stderr, "Cannot exec %s: %s.\n", program,
995 strerror (errno));
996 fflush (stderr);
997 _exit (0177);
998 }
999
1000 do_cleanups (restore_personality);
1001
1002 linux_add_process (pid, 0);
1003
1004 ptid = ptid_build (pid, pid, 0);
1005 new_lwp = add_lwp (ptid);
1006 new_lwp->must_set_ptrace_flags = 1;
1007
1008 return pid;
1009 }
1010
1011 /* Implement the post_create_inferior target_ops method. */
1012
1013 static void
1014 linux_post_create_inferior (void)
1015 {
1016 struct lwp_info *lwp = get_thread_lwp (current_thread);
1017
1018 linux_arch_setup ();
1019
1020 if (lwp->must_set_ptrace_flags)
1021 {
1022 struct process_info *proc = current_process ();
1023 int options = linux_low_ptrace_options (proc->attached);
1024
1025 linux_enable_event_reporting (lwpid_of (current_thread), options);
1026 lwp->must_set_ptrace_flags = 0;
1027 }
1028 }
1029
1030 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1031 error. */
1032
1033 int
1034 linux_attach_lwp (ptid_t ptid)
1035 {
1036 struct lwp_info *new_lwp;
1037 int lwpid = ptid_get_lwp (ptid);
1038
1039 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1040 != 0)
1041 return errno;
1042
1043 new_lwp = add_lwp (ptid);
1044
1045 /* We need to wait for SIGSTOP before being able to make the next
1046 ptrace call on this LWP. */
1047 new_lwp->must_set_ptrace_flags = 1;
1048
1049 if (linux_proc_pid_is_stopped (lwpid))
1050 {
1051 if (debug_threads)
1052 debug_printf ("Attached to a stopped process\n");
1053
1054 /* The process is definitely stopped. It is in a job control
1055 stop, unless the kernel predates the TASK_STOPPED /
1056 TASK_TRACED distinction, in which case it might be in a
1057 ptrace stop. Make sure it is in a ptrace stop; from there we
1058 can kill it, signal it, et cetera.
1059
1060 First make sure there is a pending SIGSTOP. Since we are
1061 already attached, the process can not transition from stopped
1062 to running without a PTRACE_CONT; so we know this signal will
1063 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1064 probably already in the queue (unless this kernel is old
1065 enough to use TASK_STOPPED for ptrace stops); but since
1066 SIGSTOP is not an RT signal, it can only be queued once. */
1067 kill_lwp (lwpid, SIGSTOP);
1068
1069 /* Finally, resume the stopped process. This will deliver the
1070 SIGSTOP (or a higher priority signal, just like normal
1071 PTRACE_ATTACH), which we'll catch later on. */
1072 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1073 }
1074
1075 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1076 brings it to a halt.
1077
1078 There are several cases to consider here:
1079
1080 1) gdbserver has already attached to the process and is being notified
1081 of a new thread that is being created.
1082 In this case we should ignore that SIGSTOP and resume the
1083 process. This is handled below by setting stop_expected = 1,
1084 and the fact that add_thread sets last_resume_kind ==
1085 resume_continue.
1086
1087 2) This is the first thread (the process thread), and we're attaching
1088 to it via attach_inferior.
1089 In this case we want the process thread to stop.
1090 This is handled by having linux_attach set last_resume_kind ==
1091 resume_stop after we return.
1092
1093 If the pid we are attaching to is also the tgid, we attach to and
1094 stop all the existing threads. Otherwise, we attach to pid and
1095 ignore any other threads in the same group as this pid.
1096
1097 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1098 existing threads.
1099 In this case we want the thread to stop.
1100 FIXME: This case is currently not properly handled.
1101 We should wait for the SIGSTOP but don't. Things work apparently
1102 because enough time passes between when we ptrace (ATTACH) and when
1103 gdb makes the next ptrace call on the thread.
1104
1105 On the other hand, if we are currently trying to stop all threads, we
1106 should treat the new thread as if we had sent it a SIGSTOP. This works
1107 because we are guaranteed that the add_lwp call above added us to the
1108 end of the list, and so the new thread has not yet reached
1109 wait_for_sigstop (but will). */
1110 new_lwp->stop_expected = 1;
1111
1112 return 0;
1113 }
1114
1115 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1116 already attached. Returns true if a new LWP is found, false
1117 otherwise. */
1118
1119 static int
1120 attach_proc_task_lwp_callback (ptid_t ptid)
1121 {
1122 /* Is this a new thread? */
1123 if (find_thread_ptid (ptid) == NULL)
1124 {
1125 int lwpid = ptid_get_lwp (ptid);
1126 int err;
1127
1128 if (debug_threads)
1129 debug_printf ("Found new lwp %d\n", lwpid);
1130
1131 err = linux_attach_lwp (ptid);
1132
1133 /* Be quiet if we simply raced with the thread exiting. EPERM
1134 is returned if the thread's task still exists, and is marked
1135 as exited or zombie, as well as other conditions, so in that
1136 case, confirm the status in /proc/PID/status. */
1137 if (err == ESRCH
1138 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1139 {
1140 if (debug_threads)
1141 {
1142 debug_printf ("Cannot attach to lwp %d: "
1143 "thread is gone (%d: %s)\n",
1144 lwpid, err, strerror (err));
1145 }
1146 }
1147 else if (err != 0)
1148 {
1149 warning (_("Cannot attach to lwp %d: %s"),
1150 lwpid,
1151 linux_ptrace_attach_fail_reason_string (ptid, err));
1152 }
1153
1154 return 1;
1155 }
1156 return 0;
1157 }
1158
1159 static void async_file_mark (void);
1160
1161 /* Attach to PID. If PID is the tgid, attach to it and all
1162 of its threads. */
1163
1164 static int
1165 linux_attach (unsigned long pid)
1166 {
1167 struct process_info *proc;
1168 struct thread_info *initial_thread;
1169 ptid_t ptid = ptid_build (pid, pid, 0);
1170 int err;
1171
1172 /* Attach to PID. We will check for other threads
1173 soon. */
1174 err = linux_attach_lwp (ptid);
1175 if (err != 0)
1176 error ("Cannot attach to process %ld: %s",
1177 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1178
1179 proc = linux_add_process (pid, 1);
1180
1181 /* Don't ignore the initial SIGSTOP if we just attached to this
1182 process. It will be collected by wait shortly. */
1183 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1184 initial_thread->last_resume_kind = resume_stop;
1185
1186 /* We must attach to every LWP. If /proc is mounted, use that to
1187 find them now. On the one hand, the inferior may be using raw
1188 clone instead of using pthreads. On the other hand, even if it
1189 is using pthreads, GDB may not be connected yet (thread_db needs
1190 to do symbol lookups, through qSymbol). Also, thread_db walks
1191 structures in the inferior's address space to find the list of
1192 threads/LWPs, and those structures may well be corrupted. Note
1193 that once thread_db is loaded, we'll still use it to list threads
1194 and associate pthread info with each LWP. */
1195 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1196
1197 /* GDB will shortly read the xml target description for this
1198 process, to figure out the process' architecture. But the target
1199 description is only filled in when the first process/thread in
1200 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1201 that now, otherwise, if GDB is fast enough, it could read the
1202 target description _before_ that initial stop. */
1203 if (non_stop)
1204 {
1205 struct lwp_info *lwp;
1206 int wstat, lwpid;
1207 ptid_t pid_ptid = pid_to_ptid (pid);
1208
1209 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1210 &wstat, __WALL);
1211 gdb_assert (lwpid > 0);
1212
1213 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1214
1215 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1216 {
1217 lwp->status_pending_p = 1;
1218 lwp->status_pending = wstat;
1219 }
1220
1221 initial_thread->last_resume_kind = resume_continue;
1222
1223 async_file_mark ();
1224
1225 gdb_assert (proc->tdesc != NULL);
1226 }
1227
1228 return 0;
1229 }
1230
1231 struct counter
1232 {
1233 int pid;
1234 int count;
1235 };
1236
1237 static int
1238 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1239 {
1240 struct counter *counter = (struct counter *) args;
1241
1242 if (ptid_get_pid (entry->id) == counter->pid)
1243 {
1244 if (++counter->count > 1)
1245 return 1;
1246 }
1247
1248 return 0;
1249 }
1250
1251 static int
1252 last_thread_of_process_p (int pid)
1253 {
1254 struct counter counter = { pid , 0 };
1255
1256 return (find_inferior (&all_threads,
1257 second_thread_of_pid_p, &counter) == NULL);
1258 }
1259
1260 /* Kill LWP. */
1261
1262 static void
1263 linux_kill_one_lwp (struct lwp_info *lwp)
1264 {
1265 struct thread_info *thr = get_lwp_thread (lwp);
1266 int pid = lwpid_of (thr);
1267
1268 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1269 there is no signal context, and ptrace(PTRACE_KILL) (or
1270 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1271 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1272 alternative is to kill with SIGKILL. We only need one SIGKILL
1273 per process, not one for each thread. But since we still support
1274 support debugging programs using raw clone without CLONE_THREAD,
1275 we send one for each thread. For years, we used PTRACE_KILL
1276 only, so we're being a bit paranoid about some old kernels where
1277 PTRACE_KILL might work better (dubious if there are any such, but
1278 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1279 second, and so we're fine everywhere. */
1280
1281 errno = 0;
1282 kill_lwp (pid, SIGKILL);
1283 if (debug_threads)
1284 {
1285 int save_errno = errno;
1286
1287 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1288 target_pid_to_str (ptid_of (thr)),
1289 save_errno ? strerror (save_errno) : "OK");
1290 }
1291
1292 errno = 0;
1293 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1294 if (debug_threads)
1295 {
1296 int save_errno = errno;
1297
1298 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1299 target_pid_to_str (ptid_of (thr)),
1300 save_errno ? strerror (save_errno) : "OK");
1301 }
1302 }
1303
1304 /* Kill LWP and wait for it to die. */
1305
1306 static void
1307 kill_wait_lwp (struct lwp_info *lwp)
1308 {
1309 struct thread_info *thr = get_lwp_thread (lwp);
1310 int pid = ptid_get_pid (ptid_of (thr));
1311 int lwpid = ptid_get_lwp (ptid_of (thr));
1312 int wstat;
1313 int res;
1314
1315 if (debug_threads)
1316 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1317
1318 do
1319 {
1320 linux_kill_one_lwp (lwp);
1321
1322 /* Make sure it died. Notes:
1323
1324 - The loop is most likely unnecessary.
1325
1326 - We don't use linux_wait_for_event as that could delete lwps
1327 while we're iterating over them. We're not interested in
1328 any pending status at this point, only in making sure all
1329 wait status on the kernel side are collected until the
1330 process is reaped.
1331
1332 - We don't use __WALL here as the __WALL emulation relies on
1333 SIGCHLD, and killing a stopped process doesn't generate
1334 one, nor an exit status.
1335 */
1336 res = my_waitpid (lwpid, &wstat, 0);
1337 if (res == -1 && errno == ECHILD)
1338 res = my_waitpid (lwpid, &wstat, __WCLONE);
1339 } while (res > 0 && WIFSTOPPED (wstat));
1340
1341 /* Even if it was stopped, the child may have already disappeared.
1342 E.g., if it was killed by SIGKILL. */
1343 if (res < 0 && errno != ECHILD)
1344 perror_with_name ("kill_wait_lwp");
1345 }
1346
1347 /* Callback for `find_inferior'. Kills an lwp of a given process,
1348 except the leader. */
1349
1350 static int
1351 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1352 {
1353 struct thread_info *thread = (struct thread_info *) entry;
1354 struct lwp_info *lwp = get_thread_lwp (thread);
1355 int pid = * (int *) args;
1356
1357 if (ptid_get_pid (entry->id) != pid)
1358 return 0;
1359
1360 /* We avoid killing the first thread here, because of a Linux kernel (at
1361 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1362 the children get a chance to be reaped, it will remain a zombie
1363 forever. */
1364
1365 if (lwpid_of (thread) == pid)
1366 {
1367 if (debug_threads)
1368 debug_printf ("lkop: is last of process %s\n",
1369 target_pid_to_str (entry->id));
1370 return 0;
1371 }
1372
1373 kill_wait_lwp (lwp);
1374 return 0;
1375 }
1376
1377 static int
1378 linux_kill (int pid)
1379 {
1380 struct process_info *process;
1381 struct lwp_info *lwp;
1382
1383 process = find_process_pid (pid);
1384 if (process == NULL)
1385 return -1;
1386
1387 /* If we're killing a running inferior, make sure it is stopped
1388 first, as PTRACE_KILL will not work otherwise. */
1389 stop_all_lwps (0, NULL);
1390
1391 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1392
1393 /* See the comment in linux_kill_one_lwp. We did not kill the first
1394 thread in the list, so do so now. */
1395 lwp = find_lwp_pid (pid_to_ptid (pid));
1396
1397 if (lwp == NULL)
1398 {
1399 if (debug_threads)
1400 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1401 pid);
1402 }
1403 else
1404 kill_wait_lwp (lwp);
1405
1406 the_target->mourn (process);
1407
1408 /* Since we presently can only stop all lwps of all processes, we
1409 need to unstop lwps of other processes. */
1410 unstop_all_lwps (0, NULL);
1411 return 0;
1412 }
1413
1414 /* Get pending signal of THREAD, for detaching purposes. This is the
1415 signal the thread last stopped for, which we need to deliver to the
1416 thread when detaching, otherwise, it'd be suppressed/lost. */
1417
1418 static int
1419 get_detach_signal (struct thread_info *thread)
1420 {
1421 enum gdb_signal signo = GDB_SIGNAL_0;
1422 int status;
1423 struct lwp_info *lp = get_thread_lwp (thread);
1424
1425 if (lp->status_pending_p)
1426 status = lp->status_pending;
1427 else
1428 {
1429 /* If the thread had been suspended by gdbserver, and it stopped
1430 cleanly, then it'll have stopped with SIGSTOP. But we don't
1431 want to deliver that SIGSTOP. */
1432 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1433 || thread->last_status.value.sig == GDB_SIGNAL_0)
1434 return 0;
1435
1436 /* Otherwise, we may need to deliver the signal we
1437 intercepted. */
1438 status = lp->last_status;
1439 }
1440
1441 if (!WIFSTOPPED (status))
1442 {
1443 if (debug_threads)
1444 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1445 target_pid_to_str (ptid_of (thread)));
1446 return 0;
1447 }
1448
1449 /* Extended wait statuses aren't real SIGTRAPs. */
1450 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1451 {
1452 if (debug_threads)
1453 debug_printf ("GPS: lwp %s had stopped with extended "
1454 "status: no pending signal\n",
1455 target_pid_to_str (ptid_of (thread)));
1456 return 0;
1457 }
1458
1459 signo = gdb_signal_from_host (WSTOPSIG (status));
1460
1461 if (program_signals_p && !program_signals[signo])
1462 {
1463 if (debug_threads)
1464 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1465 target_pid_to_str (ptid_of (thread)),
1466 gdb_signal_to_string (signo));
1467 return 0;
1468 }
1469 else if (!program_signals_p
1470 /* If we have no way to know which signals GDB does not
1471 want to have passed to the program, assume
1472 SIGTRAP/SIGINT, which is GDB's default. */
1473 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1474 {
1475 if (debug_threads)
1476 debug_printf ("GPS: lwp %s had signal %s, "
1477 "but we don't know if we should pass it. "
1478 "Default to not.\n",
1479 target_pid_to_str (ptid_of (thread)),
1480 gdb_signal_to_string (signo));
1481 return 0;
1482 }
1483 else
1484 {
1485 if (debug_threads)
1486 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1487 target_pid_to_str (ptid_of (thread)),
1488 gdb_signal_to_string (signo));
1489
1490 return WSTOPSIG (status);
1491 }
1492 }
1493
1494 /* Detach from LWP. */
1495
1496 static void
1497 linux_detach_one_lwp (struct lwp_info *lwp)
1498 {
1499 struct thread_info *thread = get_lwp_thread (lwp);
1500 int sig;
1501 int lwpid;
1502
1503 /* If there is a pending SIGSTOP, get rid of it. */
1504 if (lwp->stop_expected)
1505 {
1506 if (debug_threads)
1507 debug_printf ("Sending SIGCONT to %s\n",
1508 target_pid_to_str (ptid_of (thread)));
1509
1510 kill_lwp (lwpid_of (thread), SIGCONT);
1511 lwp->stop_expected = 0;
1512 }
1513
1514 /* Pass on any pending signal for this thread. */
1515 sig = get_detach_signal (thread);
1516
1517 /* Preparing to resume may try to write registers, and fail if the
1518 lwp is zombie. If that happens, ignore the error. We'll handle
1519 it below, when detach fails with ESRCH. */
1520 TRY
1521 {
1522 /* Flush any pending changes to the process's registers. */
1523 regcache_invalidate_thread (thread);
1524
1525 /* Finally, let it resume. */
1526 if (the_low_target.prepare_to_resume != NULL)
1527 the_low_target.prepare_to_resume (lwp);
1528 }
1529 CATCH (ex, RETURN_MASK_ERROR)
1530 {
1531 if (!check_ptrace_stopped_lwp_gone (lwp))
1532 throw_exception (ex);
1533 }
1534 END_CATCH
1535
1536 lwpid = lwpid_of (thread);
1537 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1538 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1539 {
1540 int save_errno = errno;
1541
1542 /* We know the thread exists, so ESRCH must mean the lwp is
1543 zombie. This can happen if one of the already-detached
1544 threads exits the whole thread group. In that case we're
1545 still attached, and must reap the lwp. */
1546 if (save_errno == ESRCH)
1547 {
1548 int ret, status;
1549
1550 ret = my_waitpid (lwpid, &status, __WALL);
1551 if (ret == -1)
1552 {
1553 warning (_("Couldn't reap LWP %d while detaching: %s"),
1554 lwpid, strerror (errno));
1555 }
1556 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1557 {
1558 warning (_("Reaping LWP %d while detaching "
1559 "returned unexpected status 0x%x"),
1560 lwpid, status);
1561 }
1562 }
1563 else
1564 {
1565 error (_("Can't detach %s: %s"),
1566 target_pid_to_str (ptid_of (thread)),
1567 strerror (save_errno));
1568 }
1569 }
1570 else if (debug_threads)
1571 {
1572 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1573 target_pid_to_str (ptid_of (thread)),
1574 strsignal (sig));
1575 }
1576
1577 delete_lwp (lwp);
1578 }
1579
1580 /* Callback for find_inferior. Detaches from non-leader threads of a
1581 given process. */
1582
1583 static int
1584 linux_detach_lwp_callback (struct inferior_list_entry *entry, void *args)
1585 {
1586 struct thread_info *thread = (struct thread_info *) entry;
1587 struct lwp_info *lwp = get_thread_lwp (thread);
1588 int pid = *(int *) args;
1589 int lwpid = lwpid_of (thread);
1590
1591 /* Skip other processes. */
1592 if (ptid_get_pid (entry->id) != pid)
1593 return 0;
1594
1595 /* We don't actually detach from the thread group leader just yet.
1596 If the thread group exits, we must reap the zombie clone lwps
1597 before we're able to reap the leader. */
1598 if (ptid_get_pid (entry->id) == lwpid)
1599 return 0;
1600
1601 linux_detach_one_lwp (lwp);
1602 return 0;
1603 }
1604
1605 static int
1606 linux_detach (int pid)
1607 {
1608 struct process_info *process;
1609 struct lwp_info *main_lwp;
1610
1611 process = find_process_pid (pid);
1612 if (process == NULL)
1613 return -1;
1614
1615 /* As there's a step over already in progress, let it finish first,
1616 otherwise nesting a stabilize_threads operation on top gets real
1617 messy. */
1618 complete_ongoing_step_over ();
1619
1620 /* Stop all threads before detaching. First, ptrace requires that
1621 the thread is stopped to sucessfully detach. Second, thread_db
1622 may need to uninstall thread event breakpoints from memory, which
1623 only works with a stopped process anyway. */
1624 stop_all_lwps (0, NULL);
1625
1626 #ifdef USE_THREAD_DB
1627 thread_db_detach (process);
1628 #endif
1629
1630 /* Stabilize threads (move out of jump pads). */
1631 stabilize_threads ();
1632
1633 /* Detach from the clone lwps first. If the thread group exits just
1634 while we're detaching, we must reap the clone lwps before we're
1635 able to reap the leader. */
1636 find_inferior (&all_threads, linux_detach_lwp_callback, &pid);
1637
1638 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1639 linux_detach_one_lwp (main_lwp);
1640
1641 the_target->mourn (process);
1642
1643 /* Since we presently can only stop all lwps of all processes, we
1644 need to unstop lwps of other processes. */
1645 unstop_all_lwps (0, NULL);
1646 return 0;
1647 }
1648
1649 /* Remove all LWPs that belong to process PROC from the lwp list. */
1650
1651 static int
1652 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1653 {
1654 struct thread_info *thread = (struct thread_info *) entry;
1655 struct lwp_info *lwp = get_thread_lwp (thread);
1656 struct process_info *process = (struct process_info *) proc;
1657
1658 if (pid_of (thread) == pid_of (process))
1659 delete_lwp (lwp);
1660
1661 return 0;
1662 }
1663
1664 static void
1665 linux_mourn (struct process_info *process)
1666 {
1667 struct process_info_private *priv;
1668
1669 #ifdef USE_THREAD_DB
1670 thread_db_mourn (process);
1671 #endif
1672
1673 find_inferior (&all_threads, delete_lwp_callback, process);
1674
1675 /* Freeing all private data. */
1676 priv = process->priv;
1677 free (priv->arch_private);
1678 free (priv);
1679 process->priv = NULL;
1680
1681 remove_process (process);
1682 }
1683
1684 static void
1685 linux_join (int pid)
1686 {
1687 int status, ret;
1688
1689 do {
1690 ret = my_waitpid (pid, &status, 0);
1691 if (WIFEXITED (status) || WIFSIGNALED (status))
1692 break;
1693 } while (ret != -1 || errno != ECHILD);
1694 }
1695
1696 /* Return nonzero if the given thread is still alive. */
1697 static int
1698 linux_thread_alive (ptid_t ptid)
1699 {
1700 struct lwp_info *lwp = find_lwp_pid (ptid);
1701
1702 /* We assume we always know if a thread exits. If a whole process
1703 exited but we still haven't been able to report it to GDB, we'll
1704 hold on to the last lwp of the dead process. */
1705 if (lwp != NULL)
1706 return !lwp_is_marked_dead (lwp);
1707 else
1708 return 0;
1709 }
1710
1711 /* Return 1 if this lwp still has an interesting status pending. If
1712 not (e.g., it had stopped for a breakpoint that is gone), return
1713 false. */
1714
1715 static int
1716 thread_still_has_status_pending_p (struct thread_info *thread)
1717 {
1718 struct lwp_info *lp = get_thread_lwp (thread);
1719
1720 if (!lp->status_pending_p)
1721 return 0;
1722
1723 if (thread->last_resume_kind != resume_stop
1724 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1725 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1726 {
1727 struct thread_info *saved_thread;
1728 CORE_ADDR pc;
1729 int discard = 0;
1730
1731 gdb_assert (lp->last_status != 0);
1732
1733 pc = get_pc (lp);
1734
1735 saved_thread = current_thread;
1736 current_thread = thread;
1737
1738 if (pc != lp->stop_pc)
1739 {
1740 if (debug_threads)
1741 debug_printf ("PC of %ld changed\n",
1742 lwpid_of (thread));
1743 discard = 1;
1744 }
1745
1746 #if !USE_SIGTRAP_SIGINFO
1747 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1748 && !(*the_low_target.breakpoint_at) (pc))
1749 {
1750 if (debug_threads)
1751 debug_printf ("previous SW breakpoint of %ld gone\n",
1752 lwpid_of (thread));
1753 discard = 1;
1754 }
1755 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1756 && !hardware_breakpoint_inserted_here (pc))
1757 {
1758 if (debug_threads)
1759 debug_printf ("previous HW breakpoint of %ld gone\n",
1760 lwpid_of (thread));
1761 discard = 1;
1762 }
1763 #endif
1764
1765 current_thread = saved_thread;
1766
1767 if (discard)
1768 {
1769 if (debug_threads)
1770 debug_printf ("discarding pending breakpoint status\n");
1771 lp->status_pending_p = 0;
1772 return 0;
1773 }
1774 }
1775
1776 return 1;
1777 }
1778
1779 /* Returns true if LWP is resumed from the client's perspective. */
1780
1781 static int
1782 lwp_resumed (struct lwp_info *lwp)
1783 {
1784 struct thread_info *thread = get_lwp_thread (lwp);
1785
1786 if (thread->last_resume_kind != resume_stop)
1787 return 1;
1788
1789 /* Did gdb send us a `vCont;t', but we haven't reported the
1790 corresponding stop to gdb yet? If so, the thread is still
1791 resumed/running from gdb's perspective. */
1792 if (thread->last_resume_kind == resume_stop
1793 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1794 return 1;
1795
1796 return 0;
1797 }
1798
1799 /* Return 1 if this lwp has an interesting status pending. */
1800 static int
1801 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1802 {
1803 struct thread_info *thread = (struct thread_info *) entry;
1804 struct lwp_info *lp = get_thread_lwp (thread);
1805 ptid_t ptid = * (ptid_t *) arg;
1806
1807 /* Check if we're only interested in events from a specific process
1808 or a specific LWP. */
1809 if (!ptid_match (ptid_of (thread), ptid))
1810 return 0;
1811
1812 if (!lwp_resumed (lp))
1813 return 0;
1814
1815 if (lp->status_pending_p
1816 && !thread_still_has_status_pending_p (thread))
1817 {
1818 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1819 return 0;
1820 }
1821
1822 return lp->status_pending_p;
1823 }
1824
1825 static int
1826 same_lwp (struct inferior_list_entry *entry, void *data)
1827 {
1828 ptid_t ptid = *(ptid_t *) data;
1829 int lwp;
1830
1831 if (ptid_get_lwp (ptid) != 0)
1832 lwp = ptid_get_lwp (ptid);
1833 else
1834 lwp = ptid_get_pid (ptid);
1835
1836 if (ptid_get_lwp (entry->id) == lwp)
1837 return 1;
1838
1839 return 0;
1840 }
1841
1842 struct lwp_info *
1843 find_lwp_pid (ptid_t ptid)
1844 {
1845 struct inferior_list_entry *thread
1846 = find_inferior (&all_threads, same_lwp, &ptid);
1847
1848 if (thread == NULL)
1849 return NULL;
1850
1851 return get_thread_lwp ((struct thread_info *) thread);
1852 }
1853
1854 /* Return the number of known LWPs in the tgid given by PID. */
1855
1856 static int
1857 num_lwps (int pid)
1858 {
1859 struct inferior_list_entry *inf, *tmp;
1860 int count = 0;
1861
1862 ALL_INFERIORS (&all_threads, inf, tmp)
1863 {
1864 if (ptid_get_pid (inf->id) == pid)
1865 count++;
1866 }
1867
1868 return count;
1869 }
1870
1871 /* The arguments passed to iterate_over_lwps. */
1872
1873 struct iterate_over_lwps_args
1874 {
1875 /* The FILTER argument passed to iterate_over_lwps. */
1876 ptid_t filter;
1877
1878 /* The CALLBACK argument passed to iterate_over_lwps. */
1879 iterate_over_lwps_ftype *callback;
1880
1881 /* The DATA argument passed to iterate_over_lwps. */
1882 void *data;
1883 };
1884
1885 /* Callback for find_inferior used by iterate_over_lwps to filter
1886 calls to the callback supplied to that function. Returning a
1887 nonzero value causes find_inferiors to stop iterating and return
1888 the current inferior_list_entry. Returning zero indicates that
1889 find_inferiors should continue iterating. */
1890
1891 static int
1892 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1893 {
1894 struct iterate_over_lwps_args *args
1895 = (struct iterate_over_lwps_args *) args_p;
1896
1897 if (ptid_match (entry->id, args->filter))
1898 {
1899 struct thread_info *thr = (struct thread_info *) entry;
1900 struct lwp_info *lwp = get_thread_lwp (thr);
1901
1902 return (*args->callback) (lwp, args->data);
1903 }
1904
1905 return 0;
1906 }
1907
1908 /* See nat/linux-nat.h. */
1909
1910 struct lwp_info *
1911 iterate_over_lwps (ptid_t filter,
1912 iterate_over_lwps_ftype callback,
1913 void *data)
1914 {
1915 struct iterate_over_lwps_args args = {filter, callback, data};
1916 struct inferior_list_entry *entry;
1917
1918 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1919 if (entry == NULL)
1920 return NULL;
1921
1922 return get_thread_lwp ((struct thread_info *) entry);
1923 }
1924
1925 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1926 their exits until all other threads in the group have exited. */
1927
1928 static void
1929 check_zombie_leaders (void)
1930 {
1931 struct process_info *proc, *tmp;
1932
1933 ALL_PROCESSES (proc, tmp)
1934 {
1935 pid_t leader_pid = pid_of (proc);
1936 struct lwp_info *leader_lp;
1937
1938 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1939
1940 if (debug_threads)
1941 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1942 "num_lwps=%d, zombie=%d\n",
1943 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1944 linux_proc_pid_is_zombie (leader_pid));
1945
1946 if (leader_lp != NULL && !leader_lp->stopped
1947 /* Check if there are other threads in the group, as we may
1948 have raced with the inferior simply exiting. */
1949 && !last_thread_of_process_p (leader_pid)
1950 && linux_proc_pid_is_zombie (leader_pid))
1951 {
1952 /* A leader zombie can mean one of two things:
1953
1954 - It exited, and there's an exit status pending
1955 available, or only the leader exited (not the whole
1956 program). In the latter case, we can't waitpid the
1957 leader's exit status until all other threads are gone.
1958
1959 - There are 3 or more threads in the group, and a thread
1960 other than the leader exec'd. On an exec, the Linux
1961 kernel destroys all other threads (except the execing
1962 one) in the thread group, and resets the execing thread's
1963 tid to the tgid. No exit notification is sent for the
1964 execing thread -- from the ptracer's perspective, it
1965 appears as though the execing thread just vanishes.
1966 Until we reap all other threads except the leader and the
1967 execing thread, the leader will be zombie, and the
1968 execing thread will be in `D (disc sleep)'. As soon as
1969 all other threads are reaped, the execing thread changes
1970 it's tid to the tgid, and the previous (zombie) leader
1971 vanishes, giving place to the "new" leader. We could try
1972 distinguishing the exit and exec cases, by waiting once
1973 more, and seeing if something comes out, but it doesn't
1974 sound useful. The previous leader _does_ go away, and
1975 we'll re-add the new one once we see the exec event
1976 (which is just the same as what would happen if the
1977 previous leader did exit voluntarily before some other
1978 thread execs). */
1979
1980 if (debug_threads)
1981 fprintf (stderr,
1982 "CZL: Thread group leader %d zombie "
1983 "(it exited, or another thread execd).\n",
1984 leader_pid);
1985
1986 delete_lwp (leader_lp);
1987 }
1988 }
1989 }
1990
1991 /* Callback for `find_inferior'. Returns the first LWP that is not
1992 stopped. ARG is a PTID filter. */
1993
1994 static int
1995 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1996 {
1997 struct thread_info *thr = (struct thread_info *) entry;
1998 struct lwp_info *lwp;
1999 ptid_t filter = *(ptid_t *) arg;
2000
2001 if (!ptid_match (ptid_of (thr), filter))
2002 return 0;
2003
2004 lwp = get_thread_lwp (thr);
2005 if (!lwp->stopped)
2006 return 1;
2007
2008 return 0;
2009 }
2010
2011 /* Increment LWP's suspend count. */
2012
2013 static void
2014 lwp_suspended_inc (struct lwp_info *lwp)
2015 {
2016 lwp->suspended++;
2017
2018 if (debug_threads && lwp->suspended > 4)
2019 {
2020 struct thread_info *thread = get_lwp_thread (lwp);
2021
2022 debug_printf ("LWP %ld has a suspiciously high suspend count,"
2023 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
2024 }
2025 }
2026
2027 /* Decrement LWP's suspend count. */
2028
2029 static void
2030 lwp_suspended_decr (struct lwp_info *lwp)
2031 {
2032 lwp->suspended--;
2033
2034 if (lwp->suspended < 0)
2035 {
2036 struct thread_info *thread = get_lwp_thread (lwp);
2037
2038 internal_error (__FILE__, __LINE__,
2039 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
2040 lwp->suspended);
2041 }
2042 }
2043
2044 /* This function should only be called if the LWP got a SIGTRAP.
2045
2046 Handle any tracepoint steps or hits. Return true if a tracepoint
2047 event was handled, 0 otherwise. */
2048
2049 static int
2050 handle_tracepoints (struct lwp_info *lwp)
2051 {
2052 struct thread_info *tinfo = get_lwp_thread (lwp);
2053 int tpoint_related_event = 0;
2054
2055 gdb_assert (lwp->suspended == 0);
2056
2057 /* If this tracepoint hit causes a tracing stop, we'll immediately
2058 uninsert tracepoints. To do this, we temporarily pause all
2059 threads, unpatch away, and then unpause threads. We need to make
2060 sure the unpausing doesn't resume LWP too. */
2061 lwp_suspended_inc (lwp);
2062
2063 /* And we need to be sure that any all-threads-stopping doesn't try
2064 to move threads out of the jump pads, as it could deadlock the
2065 inferior (LWP could be in the jump pad, maybe even holding the
2066 lock.) */
2067
2068 /* Do any necessary step collect actions. */
2069 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
2070
2071 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2072
2073 /* See if we just hit a tracepoint and do its main collect
2074 actions. */
2075 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2076
2077 lwp_suspended_decr (lwp);
2078
2079 gdb_assert (lwp->suspended == 0);
2080 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
2081
2082 if (tpoint_related_event)
2083 {
2084 if (debug_threads)
2085 debug_printf ("got a tracepoint event\n");
2086 return 1;
2087 }
2088
2089 return 0;
2090 }
2091
2092 /* Convenience wrapper. Returns true if LWP is presently collecting a
2093 fast tracepoint. */
2094
2095 static int
2096 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2097 struct fast_tpoint_collect_status *status)
2098 {
2099 CORE_ADDR thread_area;
2100 struct thread_info *thread = get_lwp_thread (lwp);
2101
2102 if (the_low_target.get_thread_area == NULL)
2103 return 0;
2104
2105 /* Get the thread area address. This is used to recognize which
2106 thread is which when tracing with the in-process agent library.
2107 We don't read anything from the address, and treat it as opaque;
2108 it's the address itself that we assume is unique per-thread. */
2109 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2110 return 0;
2111
2112 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2113 }
2114
2115 /* The reason we resume in the caller, is because we want to be able
2116 to pass lwp->status_pending as WSTAT, and we need to clear
2117 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2118 refuses to resume. */
2119
2120 static int
2121 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2122 {
2123 struct thread_info *saved_thread;
2124
2125 saved_thread = current_thread;
2126 current_thread = get_lwp_thread (lwp);
2127
2128 if ((wstat == NULL
2129 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2130 && supports_fast_tracepoints ()
2131 && agent_loaded_p ())
2132 {
2133 struct fast_tpoint_collect_status status;
2134 int r;
2135
2136 if (debug_threads)
2137 debug_printf ("Checking whether LWP %ld needs to move out of the "
2138 "jump pad.\n",
2139 lwpid_of (current_thread));
2140
2141 r = linux_fast_tracepoint_collecting (lwp, &status);
2142
2143 if (wstat == NULL
2144 || (WSTOPSIG (*wstat) != SIGILL
2145 && WSTOPSIG (*wstat) != SIGFPE
2146 && WSTOPSIG (*wstat) != SIGSEGV
2147 && WSTOPSIG (*wstat) != SIGBUS))
2148 {
2149 lwp->collecting_fast_tracepoint = r;
2150
2151 if (r != 0)
2152 {
2153 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
2154 {
2155 /* Haven't executed the original instruction yet.
2156 Set breakpoint there, and wait till it's hit,
2157 then single-step until exiting the jump pad. */
2158 lwp->exit_jump_pad_bkpt
2159 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2160 }
2161
2162 if (debug_threads)
2163 debug_printf ("Checking whether LWP %ld needs to move out of "
2164 "the jump pad...it does\n",
2165 lwpid_of (current_thread));
2166 current_thread = saved_thread;
2167
2168 return 1;
2169 }
2170 }
2171 else
2172 {
2173 /* If we get a synchronous signal while collecting, *and*
2174 while executing the (relocated) original instruction,
2175 reset the PC to point at the tpoint address, before
2176 reporting to GDB. Otherwise, it's an IPA lib bug: just
2177 report the signal to GDB, and pray for the best. */
2178
2179 lwp->collecting_fast_tracepoint = 0;
2180
2181 if (r != 0
2182 && (status.adjusted_insn_addr <= lwp->stop_pc
2183 && lwp->stop_pc < status.adjusted_insn_addr_end))
2184 {
2185 siginfo_t info;
2186 struct regcache *regcache;
2187
2188 /* The si_addr on a few signals references the address
2189 of the faulting instruction. Adjust that as
2190 well. */
2191 if ((WSTOPSIG (*wstat) == SIGILL
2192 || WSTOPSIG (*wstat) == SIGFPE
2193 || WSTOPSIG (*wstat) == SIGBUS
2194 || WSTOPSIG (*wstat) == SIGSEGV)
2195 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2196 (PTRACE_TYPE_ARG3) 0, &info) == 0
2197 /* Final check just to make sure we don't clobber
2198 the siginfo of non-kernel-sent signals. */
2199 && (uintptr_t) info.si_addr == lwp->stop_pc)
2200 {
2201 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2202 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2203 (PTRACE_TYPE_ARG3) 0, &info);
2204 }
2205
2206 regcache = get_thread_regcache (current_thread, 1);
2207 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2208 lwp->stop_pc = status.tpoint_addr;
2209
2210 /* Cancel any fast tracepoint lock this thread was
2211 holding. */
2212 force_unlock_trace_buffer ();
2213 }
2214
2215 if (lwp->exit_jump_pad_bkpt != NULL)
2216 {
2217 if (debug_threads)
2218 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2219 "stopping all threads momentarily.\n");
2220
2221 stop_all_lwps (1, lwp);
2222
2223 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2224 lwp->exit_jump_pad_bkpt = NULL;
2225
2226 unstop_all_lwps (1, lwp);
2227
2228 gdb_assert (lwp->suspended >= 0);
2229 }
2230 }
2231 }
2232
2233 if (debug_threads)
2234 debug_printf ("Checking whether LWP %ld needs to move out of the "
2235 "jump pad...no\n",
2236 lwpid_of (current_thread));
2237
2238 current_thread = saved_thread;
2239 return 0;
2240 }
2241
2242 /* Enqueue one signal in the "signals to report later when out of the
2243 jump pad" list. */
2244
2245 static void
2246 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2247 {
2248 struct pending_signals *p_sig;
2249 struct thread_info *thread = get_lwp_thread (lwp);
2250
2251 if (debug_threads)
2252 debug_printf ("Deferring signal %d for LWP %ld.\n",
2253 WSTOPSIG (*wstat), lwpid_of (thread));
2254
2255 if (debug_threads)
2256 {
2257 struct pending_signals *sig;
2258
2259 for (sig = lwp->pending_signals_to_report;
2260 sig != NULL;
2261 sig = sig->prev)
2262 debug_printf (" Already queued %d\n",
2263 sig->signal);
2264
2265 debug_printf (" (no more currently queued signals)\n");
2266 }
2267
2268 /* Don't enqueue non-RT signals if they are already in the deferred
2269 queue. (SIGSTOP being the easiest signal to see ending up here
2270 twice) */
2271 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2272 {
2273 struct pending_signals *sig;
2274
2275 for (sig = lwp->pending_signals_to_report;
2276 sig != NULL;
2277 sig = sig->prev)
2278 {
2279 if (sig->signal == WSTOPSIG (*wstat))
2280 {
2281 if (debug_threads)
2282 debug_printf ("Not requeuing already queued non-RT signal %d"
2283 " for LWP %ld\n",
2284 sig->signal,
2285 lwpid_of (thread));
2286 return;
2287 }
2288 }
2289 }
2290
2291 p_sig = XCNEW (struct pending_signals);
2292 p_sig->prev = lwp->pending_signals_to_report;
2293 p_sig->signal = WSTOPSIG (*wstat);
2294
2295 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2296 &p_sig->info);
2297
2298 lwp->pending_signals_to_report = p_sig;
2299 }
2300
2301 /* Dequeue one signal from the "signals to report later when out of
2302 the jump pad" list. */
2303
2304 static int
2305 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2306 {
2307 struct thread_info *thread = get_lwp_thread (lwp);
2308
2309 if (lwp->pending_signals_to_report != NULL)
2310 {
2311 struct pending_signals **p_sig;
2312
2313 p_sig = &lwp->pending_signals_to_report;
2314 while ((*p_sig)->prev != NULL)
2315 p_sig = &(*p_sig)->prev;
2316
2317 *wstat = W_STOPCODE ((*p_sig)->signal);
2318 if ((*p_sig)->info.si_signo != 0)
2319 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2320 &(*p_sig)->info);
2321 free (*p_sig);
2322 *p_sig = NULL;
2323
2324 if (debug_threads)
2325 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2326 WSTOPSIG (*wstat), lwpid_of (thread));
2327
2328 if (debug_threads)
2329 {
2330 struct pending_signals *sig;
2331
2332 for (sig = lwp->pending_signals_to_report;
2333 sig != NULL;
2334 sig = sig->prev)
2335 debug_printf (" Still queued %d\n",
2336 sig->signal);
2337
2338 debug_printf (" (no more queued signals)\n");
2339 }
2340
2341 return 1;
2342 }
2343
2344 return 0;
2345 }
2346
2347 /* Fetch the possibly triggered data watchpoint info and store it in
2348 CHILD.
2349
2350 On some archs, like x86, that use debug registers to set
2351 watchpoints, it's possible that the way to know which watched
2352 address trapped, is to check the register that is used to select
2353 which address to watch. Problem is, between setting the watchpoint
2354 and reading back which data address trapped, the user may change
2355 the set of watchpoints, and, as a consequence, GDB changes the
2356 debug registers in the inferior. To avoid reading back a stale
2357 stopped-data-address when that happens, we cache in LP the fact
2358 that a watchpoint trapped, and the corresponding data address, as
2359 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2360 registers meanwhile, we have the cached data we can rely on. */
2361
2362 static int
2363 check_stopped_by_watchpoint (struct lwp_info *child)
2364 {
2365 if (the_low_target.stopped_by_watchpoint != NULL)
2366 {
2367 struct thread_info *saved_thread;
2368
2369 saved_thread = current_thread;
2370 current_thread = get_lwp_thread (child);
2371
2372 if (the_low_target.stopped_by_watchpoint ())
2373 {
2374 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2375
2376 if (the_low_target.stopped_data_address != NULL)
2377 child->stopped_data_address
2378 = the_low_target.stopped_data_address ();
2379 else
2380 child->stopped_data_address = 0;
2381 }
2382
2383 current_thread = saved_thread;
2384 }
2385
2386 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2387 }
2388
2389 /* Return the ptrace options that we want to try to enable. */
2390
2391 static int
2392 linux_low_ptrace_options (int attached)
2393 {
2394 int options = 0;
2395
2396 if (!attached)
2397 options |= PTRACE_O_EXITKILL;
2398
2399 if (report_fork_events)
2400 options |= PTRACE_O_TRACEFORK;
2401
2402 if (report_vfork_events)
2403 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2404
2405 if (report_exec_events)
2406 options |= PTRACE_O_TRACEEXEC;
2407
2408 options |= PTRACE_O_TRACESYSGOOD;
2409
2410 return options;
2411 }
2412
2413 /* Do low-level handling of the event, and check if we should go on
2414 and pass it to caller code. Return the affected lwp if we are, or
2415 NULL otherwise. */
2416
2417 static struct lwp_info *
2418 linux_low_filter_event (int lwpid, int wstat)
2419 {
2420 struct lwp_info *child;
2421 struct thread_info *thread;
2422 int have_stop_pc = 0;
2423
2424 child = find_lwp_pid (pid_to_ptid (lwpid));
2425
2426 /* Check for stop events reported by a process we didn't already
2427 know about - anything not already in our LWP list.
2428
2429 If we're expecting to receive stopped processes after
2430 fork, vfork, and clone events, then we'll just add the
2431 new one to our list and go back to waiting for the event
2432 to be reported - the stopped process might be returned
2433 from waitpid before or after the event is.
2434
2435 But note the case of a non-leader thread exec'ing after the
2436 leader having exited, and gone from our lists (because
2437 check_zombie_leaders deleted it). The non-leader thread
2438 changes its tid to the tgid. */
2439
2440 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2441 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2442 {
2443 ptid_t child_ptid;
2444
2445 /* A multi-thread exec after we had seen the leader exiting. */
2446 if (debug_threads)
2447 {
2448 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2449 "after exec.\n", lwpid);
2450 }
2451
2452 child_ptid = ptid_build (lwpid, lwpid, 0);
2453 child = add_lwp (child_ptid);
2454 child->stopped = 1;
2455 current_thread = child->thread;
2456 }
2457
2458 /* If we didn't find a process, one of two things presumably happened:
2459 - A process we started and then detached from has exited. Ignore it.
2460 - A process we are controlling has forked and the new child's stop
2461 was reported to us by the kernel. Save its PID. */
2462 if (child == NULL && WIFSTOPPED (wstat))
2463 {
2464 add_to_pid_list (&stopped_pids, lwpid, wstat);
2465 return NULL;
2466 }
2467 else if (child == NULL)
2468 return NULL;
2469
2470 thread = get_lwp_thread (child);
2471
2472 child->stopped = 1;
2473
2474 child->last_status = wstat;
2475
2476 /* Check if the thread has exited. */
2477 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2478 {
2479 if (debug_threads)
2480 debug_printf ("LLFE: %d exited.\n", lwpid);
2481
2482 if (finish_step_over (child))
2483 {
2484 /* Unsuspend all other LWPs, and set them back running again. */
2485 unsuspend_all_lwps (child);
2486 }
2487
2488 /* If there is at least one more LWP, then the exit signal was
2489 not the end of the debugged application and should be
2490 ignored, unless GDB wants to hear about thread exits. */
2491 if (report_thread_events
2492 || last_thread_of_process_p (pid_of (thread)))
2493 {
2494 /* Since events are serialized to GDB core, and we can't
2495 report this one right now. Leave the status pending for
2496 the next time we're able to report it. */
2497 mark_lwp_dead (child, wstat);
2498 return child;
2499 }
2500 else
2501 {
2502 delete_lwp (child);
2503 return NULL;
2504 }
2505 }
2506
2507 gdb_assert (WIFSTOPPED (wstat));
2508
2509 if (WIFSTOPPED (wstat))
2510 {
2511 struct process_info *proc;
2512
2513 /* Architecture-specific setup after inferior is running. */
2514 proc = find_process_pid (pid_of (thread));
2515 if (proc->tdesc == NULL)
2516 {
2517 if (proc->attached)
2518 {
2519 /* This needs to happen after we have attached to the
2520 inferior and it is stopped for the first time, but
2521 before we access any inferior registers. */
2522 linux_arch_setup_thread (thread);
2523 }
2524 else
2525 {
2526 /* The process is started, but GDBserver will do
2527 architecture-specific setup after the program stops at
2528 the first instruction. */
2529 child->status_pending_p = 1;
2530 child->status_pending = wstat;
2531 return child;
2532 }
2533 }
2534 }
2535
2536 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2537 {
2538 struct process_info *proc = find_process_pid (pid_of (thread));
2539 int options = linux_low_ptrace_options (proc->attached);
2540
2541 linux_enable_event_reporting (lwpid, options);
2542 child->must_set_ptrace_flags = 0;
2543 }
2544
2545 /* Always update syscall_state, even if it will be filtered later. */
2546 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2547 {
2548 child->syscall_state
2549 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2550 ? TARGET_WAITKIND_SYSCALL_RETURN
2551 : TARGET_WAITKIND_SYSCALL_ENTRY);
2552 }
2553 else
2554 {
2555 /* Almost all other ptrace-stops are known to be outside of system
2556 calls, with further exceptions in handle_extended_wait. */
2557 child->syscall_state = TARGET_WAITKIND_IGNORE;
2558 }
2559
2560 /* Be careful to not overwrite stop_pc until save_stop_reason is
2561 called. */
2562 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2563 && linux_is_extended_waitstatus (wstat))
2564 {
2565 child->stop_pc = get_pc (child);
2566 if (handle_extended_wait (&child, wstat))
2567 {
2568 /* The event has been handled, so just return without
2569 reporting it. */
2570 return NULL;
2571 }
2572 }
2573
2574 if (linux_wstatus_maybe_breakpoint (wstat))
2575 {
2576 if (save_stop_reason (child))
2577 have_stop_pc = 1;
2578 }
2579
2580 if (!have_stop_pc)
2581 child->stop_pc = get_pc (child);
2582
2583 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2584 && child->stop_expected)
2585 {
2586 if (debug_threads)
2587 debug_printf ("Expected stop.\n");
2588 child->stop_expected = 0;
2589
2590 if (thread->last_resume_kind == resume_stop)
2591 {
2592 /* We want to report the stop to the core. Treat the
2593 SIGSTOP as a normal event. */
2594 if (debug_threads)
2595 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2596 target_pid_to_str (ptid_of (thread)));
2597 }
2598 else if (stopping_threads != NOT_STOPPING_THREADS)
2599 {
2600 /* Stopping threads. We don't want this SIGSTOP to end up
2601 pending. */
2602 if (debug_threads)
2603 debug_printf ("LLW: SIGSTOP caught for %s "
2604 "while stopping threads.\n",
2605 target_pid_to_str (ptid_of (thread)));
2606 return NULL;
2607 }
2608 else
2609 {
2610 /* This is a delayed SIGSTOP. Filter out the event. */
2611 if (debug_threads)
2612 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2613 child->stepping ? "step" : "continue",
2614 target_pid_to_str (ptid_of (thread)));
2615
2616 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2617 return NULL;
2618 }
2619 }
2620
2621 child->status_pending_p = 1;
2622 child->status_pending = wstat;
2623 return child;
2624 }
2625
2626 /* Return true if THREAD is doing hardware single step. */
2627
2628 static int
2629 maybe_hw_step (struct thread_info *thread)
2630 {
2631 if (can_hardware_single_step ())
2632 return 1;
2633 else
2634 {
2635 struct process_info *proc = get_thread_process (thread);
2636
2637 /* GDBserver must insert reinsert breakpoint for software
2638 single step. */
2639 gdb_assert (has_reinsert_breakpoints (proc));
2640 return 0;
2641 }
2642 }
2643
2644 /* Resume LWPs that are currently stopped without any pending status
2645 to report, but are resumed from the core's perspective. */
2646
2647 static void
2648 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2649 {
2650 struct thread_info *thread = (struct thread_info *) entry;
2651 struct lwp_info *lp = get_thread_lwp (thread);
2652
2653 if (lp->stopped
2654 && !lp->suspended
2655 && !lp->status_pending_p
2656 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2657 {
2658 int step = thread->last_resume_kind == resume_step;
2659
2660 if (debug_threads)
2661 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2662 target_pid_to_str (ptid_of (thread)),
2663 paddress (lp->stop_pc),
2664 step);
2665
2666 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2667 }
2668 }
2669
2670 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2671 match FILTER_PTID (leaving others pending). The PTIDs can be:
2672 minus_one_ptid, to specify any child; a pid PTID, specifying all
2673 lwps of a thread group; or a PTID representing a single lwp. Store
2674 the stop status through the status pointer WSTAT. OPTIONS is
2675 passed to the waitpid call. Return 0 if no event was found and
2676 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2677 was found. Return the PID of the stopped child otherwise. */
2678
2679 static int
2680 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2681 int *wstatp, int options)
2682 {
2683 struct thread_info *event_thread;
2684 struct lwp_info *event_child, *requested_child;
2685 sigset_t block_mask, prev_mask;
2686
2687 retry:
2688 /* N.B. event_thread points to the thread_info struct that contains
2689 event_child. Keep them in sync. */
2690 event_thread = NULL;
2691 event_child = NULL;
2692 requested_child = NULL;
2693
2694 /* Check for a lwp with a pending status. */
2695
2696 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2697 {
2698 event_thread = (struct thread_info *)
2699 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2700 if (event_thread != NULL)
2701 event_child = get_thread_lwp (event_thread);
2702 if (debug_threads && event_thread)
2703 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2704 }
2705 else if (!ptid_equal (filter_ptid, null_ptid))
2706 {
2707 requested_child = find_lwp_pid (filter_ptid);
2708
2709 if (stopping_threads == NOT_STOPPING_THREADS
2710 && requested_child->status_pending_p
2711 && requested_child->collecting_fast_tracepoint)
2712 {
2713 enqueue_one_deferred_signal (requested_child,
2714 &requested_child->status_pending);
2715 requested_child->status_pending_p = 0;
2716 requested_child->status_pending = 0;
2717 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2718 }
2719
2720 if (requested_child->suspended
2721 && requested_child->status_pending_p)
2722 {
2723 internal_error (__FILE__, __LINE__,
2724 "requesting an event out of a"
2725 " suspended child?");
2726 }
2727
2728 if (requested_child->status_pending_p)
2729 {
2730 event_child = requested_child;
2731 event_thread = get_lwp_thread (event_child);
2732 }
2733 }
2734
2735 if (event_child != NULL)
2736 {
2737 if (debug_threads)
2738 debug_printf ("Got an event from pending child %ld (%04x)\n",
2739 lwpid_of (event_thread), event_child->status_pending);
2740 *wstatp = event_child->status_pending;
2741 event_child->status_pending_p = 0;
2742 event_child->status_pending = 0;
2743 current_thread = event_thread;
2744 return lwpid_of (event_thread);
2745 }
2746
2747 /* But if we don't find a pending event, we'll have to wait.
2748
2749 We only enter this loop if no process has a pending wait status.
2750 Thus any action taken in response to a wait status inside this
2751 loop is responding as soon as we detect the status, not after any
2752 pending events. */
2753
2754 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2755 all signals while here. */
2756 sigfillset (&block_mask);
2757 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2758
2759 /* Always pull all events out of the kernel. We'll randomly select
2760 an event LWP out of all that have events, to prevent
2761 starvation. */
2762 while (event_child == NULL)
2763 {
2764 pid_t ret = 0;
2765
2766 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2767 quirks:
2768
2769 - If the thread group leader exits while other threads in the
2770 thread group still exist, waitpid(TGID, ...) hangs. That
2771 waitpid won't return an exit status until the other threads
2772 in the group are reaped.
2773
2774 - When a non-leader thread execs, that thread just vanishes
2775 without reporting an exit (so we'd hang if we waited for it
2776 explicitly in that case). The exec event is reported to
2777 the TGID pid. */
2778 errno = 0;
2779 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2780
2781 if (debug_threads)
2782 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2783 ret, errno ? strerror (errno) : "ERRNO-OK");
2784
2785 if (ret > 0)
2786 {
2787 if (debug_threads)
2788 {
2789 debug_printf ("LLW: waitpid %ld received %s\n",
2790 (long) ret, status_to_str (*wstatp));
2791 }
2792
2793 /* Filter all events. IOW, leave all events pending. We'll
2794 randomly select an event LWP out of all that have events
2795 below. */
2796 linux_low_filter_event (ret, *wstatp);
2797 /* Retry until nothing comes out of waitpid. A single
2798 SIGCHLD can indicate more than one child stopped. */
2799 continue;
2800 }
2801
2802 /* Now that we've pulled all events out of the kernel, resume
2803 LWPs that don't have an interesting event to report. */
2804 if (stopping_threads == NOT_STOPPING_THREADS)
2805 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2806
2807 /* ... and find an LWP with a status to report to the core, if
2808 any. */
2809 event_thread = (struct thread_info *)
2810 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2811 if (event_thread != NULL)
2812 {
2813 event_child = get_thread_lwp (event_thread);
2814 *wstatp = event_child->status_pending;
2815 event_child->status_pending_p = 0;
2816 event_child->status_pending = 0;
2817 break;
2818 }
2819
2820 /* Check for zombie thread group leaders. Those can't be reaped
2821 until all other threads in the thread group are. */
2822 check_zombie_leaders ();
2823
2824 /* If there are no resumed children left in the set of LWPs we
2825 want to wait for, bail. We can't just block in
2826 waitpid/sigsuspend, because lwps might have been left stopped
2827 in trace-stop state, and we'd be stuck forever waiting for
2828 their status to change (which would only happen if we resumed
2829 them). Even if WNOHANG is set, this return code is preferred
2830 over 0 (below), as it is more detailed. */
2831 if ((find_inferior (&all_threads,
2832 not_stopped_callback,
2833 &wait_ptid) == NULL))
2834 {
2835 if (debug_threads)
2836 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2837 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2838 return -1;
2839 }
2840
2841 /* No interesting event to report to the caller. */
2842 if ((options & WNOHANG))
2843 {
2844 if (debug_threads)
2845 debug_printf ("WNOHANG set, no event found\n");
2846
2847 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2848 return 0;
2849 }
2850
2851 /* Block until we get an event reported with SIGCHLD. */
2852 if (debug_threads)
2853 debug_printf ("sigsuspend'ing\n");
2854
2855 sigsuspend (&prev_mask);
2856 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2857 goto retry;
2858 }
2859
2860 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2861
2862 current_thread = event_thread;
2863
2864 return lwpid_of (event_thread);
2865 }
2866
2867 /* Wait for an event from child(ren) PTID. PTIDs can be:
2868 minus_one_ptid, to specify any child; a pid PTID, specifying all
2869 lwps of a thread group; or a PTID representing a single lwp. Store
2870 the stop status through the status pointer WSTAT. OPTIONS is
2871 passed to the waitpid call. Return 0 if no event was found and
2872 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2873 was found. Return the PID of the stopped child otherwise. */
2874
2875 static int
2876 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2877 {
2878 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2879 }
2880
2881 /* Count the LWP's that have had events. */
2882
2883 static int
2884 count_events_callback (struct inferior_list_entry *entry, void *data)
2885 {
2886 struct thread_info *thread = (struct thread_info *) entry;
2887 struct lwp_info *lp = get_thread_lwp (thread);
2888 int *count = (int *) data;
2889
2890 gdb_assert (count != NULL);
2891
2892 /* Count only resumed LWPs that have an event pending. */
2893 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2894 && lp->status_pending_p)
2895 (*count)++;
2896
2897 return 0;
2898 }
2899
2900 /* Select the LWP (if any) that is currently being single-stepped. */
2901
2902 static int
2903 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2904 {
2905 struct thread_info *thread = (struct thread_info *) entry;
2906 struct lwp_info *lp = get_thread_lwp (thread);
2907
2908 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2909 && thread->last_resume_kind == resume_step
2910 && lp->status_pending_p)
2911 return 1;
2912 else
2913 return 0;
2914 }
2915
2916 /* Select the Nth LWP that has had an event. */
2917
2918 static int
2919 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2920 {
2921 struct thread_info *thread = (struct thread_info *) entry;
2922 struct lwp_info *lp = get_thread_lwp (thread);
2923 int *selector = (int *) data;
2924
2925 gdb_assert (selector != NULL);
2926
2927 /* Select only resumed LWPs that have an event pending. */
2928 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2929 && lp->status_pending_p)
2930 if ((*selector)-- == 0)
2931 return 1;
2932
2933 return 0;
2934 }
2935
2936 /* Select one LWP out of those that have events pending. */
2937
2938 static void
2939 select_event_lwp (struct lwp_info **orig_lp)
2940 {
2941 int num_events = 0;
2942 int random_selector;
2943 struct thread_info *event_thread = NULL;
2944
2945 /* In all-stop, give preference to the LWP that is being
2946 single-stepped. There will be at most one, and it's the LWP that
2947 the core is most interested in. If we didn't do this, then we'd
2948 have to handle pending step SIGTRAPs somehow in case the core
2949 later continues the previously-stepped thread, otherwise we'd
2950 report the pending SIGTRAP, and the core, not having stepped the
2951 thread, wouldn't understand what the trap was for, and therefore
2952 would report it to the user as a random signal. */
2953 if (!non_stop)
2954 {
2955 event_thread
2956 = (struct thread_info *) find_inferior (&all_threads,
2957 select_singlestep_lwp_callback,
2958 NULL);
2959 if (event_thread != NULL)
2960 {
2961 if (debug_threads)
2962 debug_printf ("SEL: Select single-step %s\n",
2963 target_pid_to_str (ptid_of (event_thread)));
2964 }
2965 }
2966 if (event_thread == NULL)
2967 {
2968 /* No single-stepping LWP. Select one at random, out of those
2969 which have had events. */
2970
2971 /* First see how many events we have. */
2972 find_inferior (&all_threads, count_events_callback, &num_events);
2973 gdb_assert (num_events > 0);
2974
2975 /* Now randomly pick a LWP out of those that have had
2976 events. */
2977 random_selector = (int)
2978 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2979
2980 if (debug_threads && num_events > 1)
2981 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2982 num_events, random_selector);
2983
2984 event_thread
2985 = (struct thread_info *) find_inferior (&all_threads,
2986 select_event_lwp_callback,
2987 &random_selector);
2988 }
2989
2990 if (event_thread != NULL)
2991 {
2992 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2993
2994 /* Switch the event LWP. */
2995 *orig_lp = event_lp;
2996 }
2997 }
2998
2999 /* Decrement the suspend count of an LWP. */
3000
3001 static int
3002 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
3003 {
3004 struct thread_info *thread = (struct thread_info *) entry;
3005 struct lwp_info *lwp = get_thread_lwp (thread);
3006
3007 /* Ignore EXCEPT. */
3008 if (lwp == except)
3009 return 0;
3010
3011 lwp_suspended_decr (lwp);
3012 return 0;
3013 }
3014
3015 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
3016 NULL. */
3017
3018 static void
3019 unsuspend_all_lwps (struct lwp_info *except)
3020 {
3021 find_inferior (&all_threads, unsuspend_one_lwp, except);
3022 }
3023
3024 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
3025 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
3026 void *data);
3027 static int lwp_running (struct inferior_list_entry *entry, void *data);
3028 static ptid_t linux_wait_1 (ptid_t ptid,
3029 struct target_waitstatus *ourstatus,
3030 int target_options);
3031
3032 /* Stabilize threads (move out of jump pads).
3033
3034 If a thread is midway collecting a fast tracepoint, we need to
3035 finish the collection and move it out of the jump pad before
3036 reporting the signal.
3037
3038 This avoids recursion while collecting (when a signal arrives
3039 midway, and the signal handler itself collects), which would trash
3040 the trace buffer. In case the user set a breakpoint in a signal
3041 handler, this avoids the backtrace showing the jump pad, etc..
3042 Most importantly, there are certain things we can't do safely if
3043 threads are stopped in a jump pad (or in its callee's). For
3044 example:
3045
3046 - starting a new trace run. A thread still collecting the
3047 previous run, could trash the trace buffer when resumed. The trace
3048 buffer control structures would have been reset but the thread had
3049 no way to tell. The thread could even midway memcpy'ing to the
3050 buffer, which would mean that when resumed, it would clobber the
3051 trace buffer that had been set for a new run.
3052
3053 - we can't rewrite/reuse the jump pads for new tracepoints
3054 safely. Say you do tstart while a thread is stopped midway while
3055 collecting. When the thread is later resumed, it finishes the
3056 collection, and returns to the jump pad, to execute the original
3057 instruction that was under the tracepoint jump at the time the
3058 older run had been started. If the jump pad had been rewritten
3059 since for something else in the new run, the thread would now
3060 execute the wrong / random instructions. */
3061
3062 static void
3063 linux_stabilize_threads (void)
3064 {
3065 struct thread_info *saved_thread;
3066 struct thread_info *thread_stuck;
3067
3068 thread_stuck
3069 = (struct thread_info *) find_inferior (&all_threads,
3070 stuck_in_jump_pad_callback,
3071 NULL);
3072 if (thread_stuck != NULL)
3073 {
3074 if (debug_threads)
3075 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
3076 lwpid_of (thread_stuck));
3077 return;
3078 }
3079
3080 saved_thread = current_thread;
3081
3082 stabilizing_threads = 1;
3083
3084 /* Kick 'em all. */
3085 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
3086
3087 /* Loop until all are stopped out of the jump pads. */
3088 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
3089 {
3090 struct target_waitstatus ourstatus;
3091 struct lwp_info *lwp;
3092 int wstat;
3093
3094 /* Note that we go through the full wait even loop. While
3095 moving threads out of jump pad, we need to be able to step
3096 over internal breakpoints and such. */
3097 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
3098
3099 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
3100 {
3101 lwp = get_thread_lwp (current_thread);
3102
3103 /* Lock it. */
3104 lwp_suspended_inc (lwp);
3105
3106 if (ourstatus.value.sig != GDB_SIGNAL_0
3107 || current_thread->last_resume_kind == resume_stop)
3108 {
3109 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3110 enqueue_one_deferred_signal (lwp, &wstat);
3111 }
3112 }
3113 }
3114
3115 unsuspend_all_lwps (NULL);
3116
3117 stabilizing_threads = 0;
3118
3119 current_thread = saved_thread;
3120
3121 if (debug_threads)
3122 {
3123 thread_stuck
3124 = (struct thread_info *) find_inferior (&all_threads,
3125 stuck_in_jump_pad_callback,
3126 NULL);
3127 if (thread_stuck != NULL)
3128 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3129 lwpid_of (thread_stuck));
3130 }
3131 }
3132
3133 /* Convenience function that is called when the kernel reports an
3134 event that is not passed out to GDB. */
3135
3136 static ptid_t
3137 ignore_event (struct target_waitstatus *ourstatus)
3138 {
3139 /* If we got an event, there may still be others, as a single
3140 SIGCHLD can indicate more than one child stopped. This forces
3141 another target_wait call. */
3142 async_file_mark ();
3143
3144 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3145 return null_ptid;
3146 }
3147
3148 /* Convenience function that is called when the kernel reports an exit
3149 event. This decides whether to report the event to GDB as a
3150 process exit event, a thread exit event, or to suppress the
3151 event. */
3152
3153 static ptid_t
3154 filter_exit_event (struct lwp_info *event_child,
3155 struct target_waitstatus *ourstatus)
3156 {
3157 struct thread_info *thread = get_lwp_thread (event_child);
3158 ptid_t ptid = ptid_of (thread);
3159
3160 if (!last_thread_of_process_p (pid_of (thread)))
3161 {
3162 if (report_thread_events)
3163 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3164 else
3165 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3166
3167 delete_lwp (event_child);
3168 }
3169 return ptid;
3170 }
3171
3172 /* Returns 1 if GDB is interested in any event_child syscalls. */
3173
3174 static int
3175 gdb_catching_syscalls_p (struct lwp_info *event_child)
3176 {
3177 struct thread_info *thread = get_lwp_thread (event_child);
3178 struct process_info *proc = get_thread_process (thread);
3179
3180 return !VEC_empty (int, proc->syscalls_to_catch);
3181 }
3182
3183 /* Returns 1 if GDB is interested in the event_child syscall.
3184 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3185
3186 static int
3187 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3188 {
3189 int i, iter;
3190 int sysno;
3191 struct thread_info *thread = get_lwp_thread (event_child);
3192 struct process_info *proc = get_thread_process (thread);
3193
3194 if (VEC_empty (int, proc->syscalls_to_catch))
3195 return 0;
3196
3197 if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
3198 return 1;
3199
3200 get_syscall_trapinfo (event_child, &sysno);
3201 for (i = 0;
3202 VEC_iterate (int, proc->syscalls_to_catch, i, iter);
3203 i++)
3204 if (iter == sysno)
3205 return 1;
3206
3207 return 0;
3208 }
3209
3210 /* Wait for process, returns status. */
3211
3212 static ptid_t
3213 linux_wait_1 (ptid_t ptid,
3214 struct target_waitstatus *ourstatus, int target_options)
3215 {
3216 int w;
3217 struct lwp_info *event_child;
3218 int options;
3219 int pid;
3220 int step_over_finished;
3221 int bp_explains_trap;
3222 int maybe_internal_trap;
3223 int report_to_gdb;
3224 int trace_event;
3225 int in_step_range;
3226 int any_resumed;
3227
3228 if (debug_threads)
3229 {
3230 debug_enter ();
3231 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3232 }
3233
3234 /* Translate generic target options into linux options. */
3235 options = __WALL;
3236 if (target_options & TARGET_WNOHANG)
3237 options |= WNOHANG;
3238
3239 bp_explains_trap = 0;
3240 trace_event = 0;
3241 in_step_range = 0;
3242 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3243
3244 /* Find a resumed LWP, if any. */
3245 if (find_inferior (&all_threads,
3246 status_pending_p_callback,
3247 &minus_one_ptid) != NULL)
3248 any_resumed = 1;
3249 else if ((find_inferior (&all_threads,
3250 not_stopped_callback,
3251 &minus_one_ptid) != NULL))
3252 any_resumed = 1;
3253 else
3254 any_resumed = 0;
3255
3256 if (ptid_equal (step_over_bkpt, null_ptid))
3257 pid = linux_wait_for_event (ptid, &w, options);
3258 else
3259 {
3260 if (debug_threads)
3261 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3262 target_pid_to_str (step_over_bkpt));
3263 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3264 }
3265
3266 if (pid == 0 || (pid == -1 && !any_resumed))
3267 {
3268 gdb_assert (target_options & TARGET_WNOHANG);
3269
3270 if (debug_threads)
3271 {
3272 debug_printf ("linux_wait_1 ret = null_ptid, "
3273 "TARGET_WAITKIND_IGNORE\n");
3274 debug_exit ();
3275 }
3276
3277 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3278 return null_ptid;
3279 }
3280 else if (pid == -1)
3281 {
3282 if (debug_threads)
3283 {
3284 debug_printf ("linux_wait_1 ret = null_ptid, "
3285 "TARGET_WAITKIND_NO_RESUMED\n");
3286 debug_exit ();
3287 }
3288
3289 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3290 return null_ptid;
3291 }
3292
3293 event_child = get_thread_lwp (current_thread);
3294
3295 /* linux_wait_for_event only returns an exit status for the last
3296 child of a process. Report it. */
3297 if (WIFEXITED (w) || WIFSIGNALED (w))
3298 {
3299 if (WIFEXITED (w))
3300 {
3301 ourstatus->kind = TARGET_WAITKIND_EXITED;
3302 ourstatus->value.integer = WEXITSTATUS (w);
3303
3304 if (debug_threads)
3305 {
3306 debug_printf ("linux_wait_1 ret = %s, exited with "
3307 "retcode %d\n",
3308 target_pid_to_str (ptid_of (current_thread)),
3309 WEXITSTATUS (w));
3310 debug_exit ();
3311 }
3312 }
3313 else
3314 {
3315 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3316 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3317
3318 if (debug_threads)
3319 {
3320 debug_printf ("linux_wait_1 ret = %s, terminated with "
3321 "signal %d\n",
3322 target_pid_to_str (ptid_of (current_thread)),
3323 WTERMSIG (w));
3324 debug_exit ();
3325 }
3326 }
3327
3328 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3329 return filter_exit_event (event_child, ourstatus);
3330
3331 return ptid_of (current_thread);
3332 }
3333
3334 /* If step-over executes a breakpoint instruction, in the case of a
3335 hardware single step it means a gdb/gdbserver breakpoint had been
3336 planted on top of a permanent breakpoint, in the case of a software
3337 single step it may just mean that gdbserver hit the reinsert breakpoint.
3338 The PC has been adjusted by save_stop_reason to point at
3339 the breakpoint address.
3340 So in the case of the hardware single step advance the PC manually
3341 past the breakpoint and in the case of software single step advance only
3342 if it's not the reinsert_breakpoint we are hitting.
3343 This avoids that a program would keep trapping a permanent breakpoint
3344 forever. */
3345 if (!ptid_equal (step_over_bkpt, null_ptid)
3346 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3347 && (event_child->stepping
3348 || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
3349 {
3350 int increment_pc = 0;
3351 int breakpoint_kind = 0;
3352 CORE_ADDR stop_pc = event_child->stop_pc;
3353
3354 breakpoint_kind =
3355 the_target->breakpoint_kind_from_current_state (&stop_pc);
3356 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3357
3358 if (debug_threads)
3359 {
3360 debug_printf ("step-over for %s executed software breakpoint\n",
3361 target_pid_to_str (ptid_of (current_thread)));
3362 }
3363
3364 if (increment_pc != 0)
3365 {
3366 struct regcache *regcache
3367 = get_thread_regcache (current_thread, 1);
3368
3369 event_child->stop_pc += increment_pc;
3370 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3371
3372 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3373 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3374 }
3375 }
3376
3377 /* If this event was not handled before, and is not a SIGTRAP, we
3378 report it. SIGILL and SIGSEGV are also treated as traps in case
3379 a breakpoint is inserted at the current PC. If this target does
3380 not support internal breakpoints at all, we also report the
3381 SIGTRAP without further processing; it's of no concern to us. */
3382 maybe_internal_trap
3383 = (supports_breakpoints ()
3384 && (WSTOPSIG (w) == SIGTRAP
3385 || ((WSTOPSIG (w) == SIGILL
3386 || WSTOPSIG (w) == SIGSEGV)
3387 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3388
3389 if (maybe_internal_trap)
3390 {
3391 /* Handle anything that requires bookkeeping before deciding to
3392 report the event or continue waiting. */
3393
3394 /* First check if we can explain the SIGTRAP with an internal
3395 breakpoint, or if we should possibly report the event to GDB.
3396 Do this before anything that may remove or insert a
3397 breakpoint. */
3398 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3399
3400 /* We have a SIGTRAP, possibly a step-over dance has just
3401 finished. If so, tweak the state machine accordingly,
3402 reinsert breakpoints and delete any reinsert (software
3403 single-step) breakpoints. */
3404 step_over_finished = finish_step_over (event_child);
3405
3406 /* Now invoke the callbacks of any internal breakpoints there. */
3407 check_breakpoints (event_child->stop_pc);
3408
3409 /* Handle tracepoint data collecting. This may overflow the
3410 trace buffer, and cause a tracing stop, removing
3411 breakpoints. */
3412 trace_event = handle_tracepoints (event_child);
3413
3414 if (bp_explains_trap)
3415 {
3416 if (debug_threads)
3417 debug_printf ("Hit a gdbserver breakpoint.\n");
3418 }
3419 }
3420 else
3421 {
3422 /* We have some other signal, possibly a step-over dance was in
3423 progress, and it should be cancelled too. */
3424 step_over_finished = finish_step_over (event_child);
3425 }
3426
3427 /* We have all the data we need. Either report the event to GDB, or
3428 resume threads and keep waiting for more. */
3429
3430 /* If we're collecting a fast tracepoint, finish the collection and
3431 move out of the jump pad before delivering a signal. See
3432 linux_stabilize_threads. */
3433
3434 if (WIFSTOPPED (w)
3435 && WSTOPSIG (w) != SIGTRAP
3436 && supports_fast_tracepoints ()
3437 && agent_loaded_p ())
3438 {
3439 if (debug_threads)
3440 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3441 "to defer or adjust it.\n",
3442 WSTOPSIG (w), lwpid_of (current_thread));
3443
3444 /* Allow debugging the jump pad itself. */
3445 if (current_thread->last_resume_kind != resume_step
3446 && maybe_move_out_of_jump_pad (event_child, &w))
3447 {
3448 enqueue_one_deferred_signal (event_child, &w);
3449
3450 if (debug_threads)
3451 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3452 WSTOPSIG (w), lwpid_of (current_thread));
3453
3454 linux_resume_one_lwp (event_child, 0, 0, NULL);
3455
3456 return ignore_event (ourstatus);
3457 }
3458 }
3459
3460 if (event_child->collecting_fast_tracepoint)
3461 {
3462 if (debug_threads)
3463 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3464 "Check if we're already there.\n",
3465 lwpid_of (current_thread),
3466 event_child->collecting_fast_tracepoint);
3467
3468 trace_event = 1;
3469
3470 event_child->collecting_fast_tracepoint
3471 = linux_fast_tracepoint_collecting (event_child, NULL);
3472
3473 if (event_child->collecting_fast_tracepoint != 1)
3474 {
3475 /* No longer need this breakpoint. */
3476 if (event_child->exit_jump_pad_bkpt != NULL)
3477 {
3478 if (debug_threads)
3479 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3480 "stopping all threads momentarily.\n");
3481
3482 /* Other running threads could hit this breakpoint.
3483 We don't handle moribund locations like GDB does,
3484 instead we always pause all threads when removing
3485 breakpoints, so that any step-over or
3486 decr_pc_after_break adjustment is always taken
3487 care of while the breakpoint is still
3488 inserted. */
3489 stop_all_lwps (1, event_child);
3490
3491 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3492 event_child->exit_jump_pad_bkpt = NULL;
3493
3494 unstop_all_lwps (1, event_child);
3495
3496 gdb_assert (event_child->suspended >= 0);
3497 }
3498 }
3499
3500 if (event_child->collecting_fast_tracepoint == 0)
3501 {
3502 if (debug_threads)
3503 debug_printf ("fast tracepoint finished "
3504 "collecting successfully.\n");
3505
3506 /* We may have a deferred signal to report. */
3507 if (dequeue_one_deferred_signal (event_child, &w))
3508 {
3509 if (debug_threads)
3510 debug_printf ("dequeued one signal.\n");
3511 }
3512 else
3513 {
3514 if (debug_threads)
3515 debug_printf ("no deferred signals.\n");
3516
3517 if (stabilizing_threads)
3518 {
3519 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3520 ourstatus->value.sig = GDB_SIGNAL_0;
3521
3522 if (debug_threads)
3523 {
3524 debug_printf ("linux_wait_1 ret = %s, stopped "
3525 "while stabilizing threads\n",
3526 target_pid_to_str (ptid_of (current_thread)));
3527 debug_exit ();
3528 }
3529
3530 return ptid_of (current_thread);
3531 }
3532 }
3533 }
3534 }
3535
3536 /* Check whether GDB would be interested in this event. */
3537
3538 /* Check if GDB is interested in this syscall. */
3539 if (WIFSTOPPED (w)
3540 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3541 && !gdb_catch_this_syscall_p (event_child))
3542 {
3543 if (debug_threads)
3544 {
3545 debug_printf ("Ignored syscall for LWP %ld.\n",
3546 lwpid_of (current_thread));
3547 }
3548
3549 linux_resume_one_lwp (event_child, event_child->stepping,
3550 0, NULL);
3551 return ignore_event (ourstatus);
3552 }
3553
3554 /* If GDB is not interested in this signal, don't stop other
3555 threads, and don't report it to GDB. Just resume the inferior
3556 right away. We do this for threading-related signals as well as
3557 any that GDB specifically requested we ignore. But never ignore
3558 SIGSTOP if we sent it ourselves, and do not ignore signals when
3559 stepping - they may require special handling to skip the signal
3560 handler. Also never ignore signals that could be caused by a
3561 breakpoint. */
3562 if (WIFSTOPPED (w)
3563 && current_thread->last_resume_kind != resume_step
3564 && (
3565 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3566 (current_process ()->priv->thread_db != NULL
3567 && (WSTOPSIG (w) == __SIGRTMIN
3568 || WSTOPSIG (w) == __SIGRTMIN + 1))
3569 ||
3570 #endif
3571 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3572 && !(WSTOPSIG (w) == SIGSTOP
3573 && current_thread->last_resume_kind == resume_stop)
3574 && !linux_wstatus_maybe_breakpoint (w))))
3575 {
3576 siginfo_t info, *info_p;
3577
3578 if (debug_threads)
3579 debug_printf ("Ignored signal %d for LWP %ld.\n",
3580 WSTOPSIG (w), lwpid_of (current_thread));
3581
3582 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3583 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3584 info_p = &info;
3585 else
3586 info_p = NULL;
3587
3588 if (step_over_finished)
3589 {
3590 /* We cancelled this thread's step-over above. We still
3591 need to unsuspend all other LWPs, and set them back
3592 running again while the signal handler runs. */
3593 unsuspend_all_lwps (event_child);
3594
3595 /* Enqueue the pending signal info so that proceed_all_lwps
3596 doesn't lose it. */
3597 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3598
3599 proceed_all_lwps ();
3600 }
3601 else
3602 {
3603 linux_resume_one_lwp (event_child, event_child->stepping,
3604 WSTOPSIG (w), info_p);
3605 }
3606 return ignore_event (ourstatus);
3607 }
3608
3609 /* Note that all addresses are always "out of the step range" when
3610 there's no range to begin with. */
3611 in_step_range = lwp_in_step_range (event_child);
3612
3613 /* If GDB wanted this thread to single step, and the thread is out
3614 of the step range, we always want to report the SIGTRAP, and let
3615 GDB handle it. Watchpoints should always be reported. So should
3616 signals we can't explain. A SIGTRAP we can't explain could be a
3617 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3618 do, we're be able to handle GDB breakpoints on top of internal
3619 breakpoints, by handling the internal breakpoint and still
3620 reporting the event to GDB. If we don't, we're out of luck, GDB
3621 won't see the breakpoint hit. If we see a single-step event but
3622 the thread should be continuing, don't pass the trap to gdb.
3623 That indicates that we had previously finished a single-step but
3624 left the single-step pending -- see
3625 complete_ongoing_step_over. */
3626 report_to_gdb = (!maybe_internal_trap
3627 || (current_thread->last_resume_kind == resume_step
3628 && !in_step_range)
3629 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3630 || (!in_step_range
3631 && !bp_explains_trap
3632 && !trace_event
3633 && !step_over_finished
3634 && !(current_thread->last_resume_kind == resume_continue
3635 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3636 || (gdb_breakpoint_here (event_child->stop_pc)
3637 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3638 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3639 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3640
3641 run_breakpoint_commands (event_child->stop_pc);
3642
3643 /* We found no reason GDB would want us to stop. We either hit one
3644 of our own breakpoints, or finished an internal step GDB
3645 shouldn't know about. */
3646 if (!report_to_gdb)
3647 {
3648 if (debug_threads)
3649 {
3650 if (bp_explains_trap)
3651 debug_printf ("Hit a gdbserver breakpoint.\n");
3652 if (step_over_finished)
3653 debug_printf ("Step-over finished.\n");
3654 if (trace_event)
3655 debug_printf ("Tracepoint event.\n");
3656 if (lwp_in_step_range (event_child))
3657 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3658 paddress (event_child->stop_pc),
3659 paddress (event_child->step_range_start),
3660 paddress (event_child->step_range_end));
3661 }
3662
3663 /* We're not reporting this breakpoint to GDB, so apply the
3664 decr_pc_after_break adjustment to the inferior's regcache
3665 ourselves. */
3666
3667 if (the_low_target.set_pc != NULL)
3668 {
3669 struct regcache *regcache
3670 = get_thread_regcache (current_thread, 1);
3671 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3672 }
3673
3674 /* We may have finished stepping over a breakpoint. If so,
3675 we've stopped and suspended all LWPs momentarily except the
3676 stepping one. This is where we resume them all again. We're
3677 going to keep waiting, so use proceed, which handles stepping
3678 over the next breakpoint. */
3679 if (debug_threads)
3680 debug_printf ("proceeding all threads.\n");
3681
3682 if (step_over_finished)
3683 unsuspend_all_lwps (event_child);
3684
3685 proceed_all_lwps ();
3686 return ignore_event (ourstatus);
3687 }
3688
3689 if (debug_threads)
3690 {
3691 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3692 {
3693 char *str;
3694
3695 str = target_waitstatus_to_string (&event_child->waitstatus);
3696 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3697 lwpid_of (get_lwp_thread (event_child)), str);
3698 xfree (str);
3699 }
3700 if (current_thread->last_resume_kind == resume_step)
3701 {
3702 if (event_child->step_range_start == event_child->step_range_end)
3703 debug_printf ("GDB wanted to single-step, reporting event.\n");
3704 else if (!lwp_in_step_range (event_child))
3705 debug_printf ("Out of step range, reporting event.\n");
3706 }
3707 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3708 debug_printf ("Stopped by watchpoint.\n");
3709 else if (gdb_breakpoint_here (event_child->stop_pc))
3710 debug_printf ("Stopped by GDB breakpoint.\n");
3711 if (debug_threads)
3712 debug_printf ("Hit a non-gdbserver trap event.\n");
3713 }
3714
3715 /* Alright, we're going to report a stop. */
3716
3717 if (!stabilizing_threads)
3718 {
3719 /* In all-stop, stop all threads. */
3720 if (!non_stop)
3721 stop_all_lwps (0, NULL);
3722
3723 /* If we're not waiting for a specific LWP, choose an event LWP
3724 from among those that have had events. Giving equal priority
3725 to all LWPs that have had events helps prevent
3726 starvation. */
3727 if (ptid_equal (ptid, minus_one_ptid))
3728 {
3729 event_child->status_pending_p = 1;
3730 event_child->status_pending = w;
3731
3732 select_event_lwp (&event_child);
3733
3734 /* current_thread and event_child must stay in sync. */
3735 current_thread = get_lwp_thread (event_child);
3736
3737 event_child->status_pending_p = 0;
3738 w = event_child->status_pending;
3739 }
3740
3741 if (step_over_finished)
3742 {
3743 if (!non_stop)
3744 {
3745 /* If we were doing a step-over, all other threads but
3746 the stepping one had been paused in start_step_over,
3747 with their suspend counts incremented. We don't want
3748 to do a full unstop/unpause, because we're in
3749 all-stop mode (so we want threads stopped), but we
3750 still need to unsuspend the other threads, to
3751 decrement their `suspended' count back. */
3752 unsuspend_all_lwps (event_child);
3753 }
3754 else
3755 {
3756 /* If we just finished a step-over, then all threads had
3757 been momentarily paused. In all-stop, that's fine,
3758 we want threads stopped by now anyway. In non-stop,
3759 we need to re-resume threads that GDB wanted to be
3760 running. */
3761 unstop_all_lwps (1, event_child);
3762 }
3763 }
3764
3765 /* Stabilize threads (move out of jump pads). */
3766 if (!non_stop)
3767 stabilize_threads ();
3768 }
3769 else
3770 {
3771 /* If we just finished a step-over, then all threads had been
3772 momentarily paused. In all-stop, that's fine, we want
3773 threads stopped by now anyway. In non-stop, we need to
3774 re-resume threads that GDB wanted to be running. */
3775 if (step_over_finished)
3776 unstop_all_lwps (1, event_child);
3777 }
3778
3779 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3780 {
3781 /* If the reported event is an exit, fork, vfork or exec, let
3782 GDB know. */
3783 *ourstatus = event_child->waitstatus;
3784 /* Clear the event lwp's waitstatus since we handled it already. */
3785 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3786 }
3787 else
3788 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3789
3790 /* Now that we've selected our final event LWP, un-adjust its PC if
3791 it was a software breakpoint, and the client doesn't know we can
3792 adjust the breakpoint ourselves. */
3793 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3794 && !swbreak_feature)
3795 {
3796 int decr_pc = the_low_target.decr_pc_after_break;
3797
3798 if (decr_pc != 0)
3799 {
3800 struct regcache *regcache
3801 = get_thread_regcache (current_thread, 1);
3802 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3803 }
3804 }
3805
3806 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3807 {
3808 get_syscall_trapinfo (event_child,
3809 &ourstatus->value.syscall_number);
3810 ourstatus->kind = event_child->syscall_state;
3811 }
3812 else if (current_thread->last_resume_kind == resume_stop
3813 && WSTOPSIG (w) == SIGSTOP)
3814 {
3815 /* A thread that has been requested to stop by GDB with vCont;t,
3816 and it stopped cleanly, so report as SIG0. The use of
3817 SIGSTOP is an implementation detail. */
3818 ourstatus->value.sig = GDB_SIGNAL_0;
3819 }
3820 else if (current_thread->last_resume_kind == resume_stop
3821 && WSTOPSIG (w) != SIGSTOP)
3822 {
3823 /* A thread that has been requested to stop by GDB with vCont;t,
3824 but, it stopped for other reasons. */
3825 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3826 }
3827 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3828 {
3829 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3830 }
3831
3832 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3833
3834 if (debug_threads)
3835 {
3836 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3837 target_pid_to_str (ptid_of (current_thread)),
3838 ourstatus->kind, ourstatus->value.sig);
3839 debug_exit ();
3840 }
3841
3842 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3843 return filter_exit_event (event_child, ourstatus);
3844
3845 return ptid_of (current_thread);
3846 }
3847
3848 /* Get rid of any pending event in the pipe. */
3849 static void
3850 async_file_flush (void)
3851 {
3852 int ret;
3853 char buf;
3854
3855 do
3856 ret = read (linux_event_pipe[0], &buf, 1);
3857 while (ret >= 0 || (ret == -1 && errno == EINTR));
3858 }
3859
3860 /* Put something in the pipe, so the event loop wakes up. */
3861 static void
3862 async_file_mark (void)
3863 {
3864 int ret;
3865
3866 async_file_flush ();
3867
3868 do
3869 ret = write (linux_event_pipe[1], "+", 1);
3870 while (ret == 0 || (ret == -1 && errno == EINTR));
3871
3872 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3873 be awakened anyway. */
3874 }
3875
3876 static ptid_t
3877 linux_wait (ptid_t ptid,
3878 struct target_waitstatus *ourstatus, int target_options)
3879 {
3880 ptid_t event_ptid;
3881
3882 /* Flush the async file first. */
3883 if (target_is_async_p ())
3884 async_file_flush ();
3885
3886 do
3887 {
3888 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3889 }
3890 while ((target_options & TARGET_WNOHANG) == 0
3891 && ptid_equal (event_ptid, null_ptid)
3892 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3893
3894 /* If at least one stop was reported, there may be more. A single
3895 SIGCHLD can signal more than one child stop. */
3896 if (target_is_async_p ()
3897 && (target_options & TARGET_WNOHANG) != 0
3898 && !ptid_equal (event_ptid, null_ptid))
3899 async_file_mark ();
3900
3901 return event_ptid;
3902 }
3903
3904 /* Send a signal to an LWP. */
3905
3906 static int
3907 kill_lwp (unsigned long lwpid, int signo)
3908 {
3909 int ret;
3910
3911 errno = 0;
3912 ret = syscall (__NR_tkill, lwpid, signo);
3913 if (errno == ENOSYS)
3914 {
3915 /* If tkill fails, then we are not using nptl threads, a
3916 configuration we no longer support. */
3917 perror_with_name (("tkill"));
3918 }
3919 return ret;
3920 }
3921
3922 void
3923 linux_stop_lwp (struct lwp_info *lwp)
3924 {
3925 send_sigstop (lwp);
3926 }
3927
3928 static void
3929 send_sigstop (struct lwp_info *lwp)
3930 {
3931 int pid;
3932
3933 pid = lwpid_of (get_lwp_thread (lwp));
3934
3935 /* If we already have a pending stop signal for this process, don't
3936 send another. */
3937 if (lwp->stop_expected)
3938 {
3939 if (debug_threads)
3940 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3941
3942 return;
3943 }
3944
3945 if (debug_threads)
3946 debug_printf ("Sending sigstop to lwp %d\n", pid);
3947
3948 lwp->stop_expected = 1;
3949 kill_lwp (pid, SIGSTOP);
3950 }
3951
3952 static int
3953 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3954 {
3955 struct thread_info *thread = (struct thread_info *) entry;
3956 struct lwp_info *lwp = get_thread_lwp (thread);
3957
3958 /* Ignore EXCEPT. */
3959 if (lwp == except)
3960 return 0;
3961
3962 if (lwp->stopped)
3963 return 0;
3964
3965 send_sigstop (lwp);
3966 return 0;
3967 }
3968
3969 /* Increment the suspend count of an LWP, and stop it, if not stopped
3970 yet. */
3971 static int
3972 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3973 void *except)
3974 {
3975 struct thread_info *thread = (struct thread_info *) entry;
3976 struct lwp_info *lwp = get_thread_lwp (thread);
3977
3978 /* Ignore EXCEPT. */
3979 if (lwp == except)
3980 return 0;
3981
3982 lwp_suspended_inc (lwp);
3983
3984 return send_sigstop_callback (entry, except);
3985 }
3986
3987 static void
3988 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3989 {
3990 /* Store the exit status for later. */
3991 lwp->status_pending_p = 1;
3992 lwp->status_pending = wstat;
3993
3994 /* Store in waitstatus as well, as there's nothing else to process
3995 for this event. */
3996 if (WIFEXITED (wstat))
3997 {
3998 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3999 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
4000 }
4001 else if (WIFSIGNALED (wstat))
4002 {
4003 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
4004 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
4005 }
4006
4007 /* Prevent trying to stop it. */
4008 lwp->stopped = 1;
4009
4010 /* No further stops are expected from a dead lwp. */
4011 lwp->stop_expected = 0;
4012 }
4013
4014 /* Return true if LWP has exited already, and has a pending exit event
4015 to report to GDB. */
4016
4017 static int
4018 lwp_is_marked_dead (struct lwp_info *lwp)
4019 {
4020 return (lwp->status_pending_p
4021 && (WIFEXITED (lwp->status_pending)
4022 || WIFSIGNALED (lwp->status_pending)));
4023 }
4024
4025 /* Wait for all children to stop for the SIGSTOPs we just queued. */
4026
4027 static void
4028 wait_for_sigstop (void)
4029 {
4030 struct thread_info *saved_thread;
4031 ptid_t saved_tid;
4032 int wstat;
4033 int ret;
4034
4035 saved_thread = current_thread;
4036 if (saved_thread != NULL)
4037 saved_tid = saved_thread->entry.id;
4038 else
4039 saved_tid = null_ptid; /* avoid bogus unused warning */
4040
4041 if (debug_threads)
4042 debug_printf ("wait_for_sigstop: pulling events\n");
4043
4044 /* Passing NULL_PTID as filter indicates we want all events to be
4045 left pending. Eventually this returns when there are no
4046 unwaited-for children left. */
4047 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4048 &wstat, __WALL);
4049 gdb_assert (ret == -1);
4050
4051 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4052 current_thread = saved_thread;
4053 else
4054 {
4055 if (debug_threads)
4056 debug_printf ("Previously current thread died.\n");
4057
4058 /* We can't change the current inferior behind GDB's back,
4059 otherwise, a subsequent command may apply to the wrong
4060 process. */
4061 current_thread = NULL;
4062 }
4063 }
4064
4065 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
4066 move it out, because we need to report the stop event to GDB. For
4067 example, if the user puts a breakpoint in the jump pad, it's
4068 because she wants to debug it. */
4069
4070 static int
4071 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
4072 {
4073 struct thread_info *thread = (struct thread_info *) entry;
4074 struct lwp_info *lwp = get_thread_lwp (thread);
4075
4076 if (lwp->suspended != 0)
4077 {
4078 internal_error (__FILE__, __LINE__,
4079 "LWP %ld is suspended, suspended=%d\n",
4080 lwpid_of (thread), lwp->suspended);
4081 }
4082 gdb_assert (lwp->stopped);
4083
4084 /* Allow debugging the jump pad, gdb_collect, etc.. */
4085 return (supports_fast_tracepoints ()
4086 && agent_loaded_p ()
4087 && (gdb_breakpoint_here (lwp->stop_pc)
4088 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4089 || thread->last_resume_kind == resume_step)
4090 && linux_fast_tracepoint_collecting (lwp, NULL));
4091 }
4092
4093 static void
4094 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
4095 {
4096 struct thread_info *thread = (struct thread_info *) entry;
4097 struct thread_info *saved_thread;
4098 struct lwp_info *lwp = get_thread_lwp (thread);
4099 int *wstat;
4100
4101 if (lwp->suspended != 0)
4102 {
4103 internal_error (__FILE__, __LINE__,
4104 "LWP %ld is suspended, suspended=%d\n",
4105 lwpid_of (thread), lwp->suspended);
4106 }
4107 gdb_assert (lwp->stopped);
4108
4109 /* For gdb_breakpoint_here. */
4110 saved_thread = current_thread;
4111 current_thread = thread;
4112
4113 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4114
4115 /* Allow debugging the jump pad, gdb_collect, etc. */
4116 if (!gdb_breakpoint_here (lwp->stop_pc)
4117 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4118 && thread->last_resume_kind != resume_step
4119 && maybe_move_out_of_jump_pad (lwp, wstat))
4120 {
4121 if (debug_threads)
4122 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4123 lwpid_of (thread));
4124
4125 if (wstat)
4126 {
4127 lwp->status_pending_p = 0;
4128 enqueue_one_deferred_signal (lwp, wstat);
4129
4130 if (debug_threads)
4131 debug_printf ("Signal %d for LWP %ld deferred "
4132 "(in jump pad)\n",
4133 WSTOPSIG (*wstat), lwpid_of (thread));
4134 }
4135
4136 linux_resume_one_lwp (lwp, 0, 0, NULL);
4137 }
4138 else
4139 lwp_suspended_inc (lwp);
4140
4141 current_thread = saved_thread;
4142 }
4143
4144 static int
4145 lwp_running (struct inferior_list_entry *entry, void *data)
4146 {
4147 struct thread_info *thread = (struct thread_info *) entry;
4148 struct lwp_info *lwp = get_thread_lwp (thread);
4149
4150 if (lwp_is_marked_dead (lwp))
4151 return 0;
4152 if (lwp->stopped)
4153 return 0;
4154 return 1;
4155 }
4156
4157 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4158 If SUSPEND, then also increase the suspend count of every LWP,
4159 except EXCEPT. */
4160
4161 static void
4162 stop_all_lwps (int suspend, struct lwp_info *except)
4163 {
4164 /* Should not be called recursively. */
4165 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4166
4167 if (debug_threads)
4168 {
4169 debug_enter ();
4170 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4171 suspend ? "stop-and-suspend" : "stop",
4172 except != NULL
4173 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4174 : "none");
4175 }
4176
4177 stopping_threads = (suspend
4178 ? STOPPING_AND_SUSPENDING_THREADS
4179 : STOPPING_THREADS);
4180
4181 if (suspend)
4182 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4183 else
4184 find_inferior (&all_threads, send_sigstop_callback, except);
4185 wait_for_sigstop ();
4186 stopping_threads = NOT_STOPPING_THREADS;
4187
4188 if (debug_threads)
4189 {
4190 debug_printf ("stop_all_lwps done, setting stopping_threads "
4191 "back to !stopping\n");
4192 debug_exit ();
4193 }
4194 }
4195
4196 /* Enqueue one signal in the chain of signals which need to be
4197 delivered to this process on next resume. */
4198
4199 static void
4200 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4201 {
4202 struct pending_signals *p_sig = XNEW (struct pending_signals);
4203
4204 p_sig->prev = lwp->pending_signals;
4205 p_sig->signal = signal;
4206 if (info == NULL)
4207 memset (&p_sig->info, 0, sizeof (siginfo_t));
4208 else
4209 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4210 lwp->pending_signals = p_sig;
4211 }
4212
4213 /* Install breakpoints for software single stepping. */
4214
4215 static void
4216 install_software_single_step_breakpoints (struct lwp_info *lwp)
4217 {
4218 int i;
4219 CORE_ADDR pc;
4220 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4221 VEC (CORE_ADDR) *next_pcs = NULL;
4222 struct cleanup *old_chain = make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
4223
4224 next_pcs = (*the_low_target.get_next_pcs) (regcache);
4225
4226 for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
4227 set_reinsert_breakpoint (pc);
4228
4229 do_cleanups (old_chain);
4230 }
4231
4232 /* Single step via hardware or software single step.
4233 Return 1 if hardware single stepping, 0 if software single stepping
4234 or can't single step. */
4235
4236 static int
4237 single_step (struct lwp_info* lwp)
4238 {
4239 int step = 0;
4240
4241 if (can_hardware_single_step ())
4242 {
4243 step = 1;
4244 }
4245 else if (can_software_single_step ())
4246 {
4247 install_software_single_step_breakpoints (lwp);
4248 step = 0;
4249 }
4250 else
4251 {
4252 if (debug_threads)
4253 debug_printf ("stepping is not implemented on this target");
4254 }
4255
4256 return step;
4257 }
4258
4259 /* The signal can be delivered to the inferior if we are not trying to
4260 finish a fast tracepoint collect. Since signal can be delivered in
4261 the step-over, the program may go to signal handler and trap again
4262 after return from the signal handler. We can live with the spurious
4263 double traps. */
4264
4265 static int
4266 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4267 {
4268 return !lwp->collecting_fast_tracepoint;
4269 }
4270
4271 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4272 SIGNAL is nonzero, give it that signal. */
4273
4274 static void
4275 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4276 int step, int signal, siginfo_t *info)
4277 {
4278 struct thread_info *thread = get_lwp_thread (lwp);
4279 struct thread_info *saved_thread;
4280 int fast_tp_collecting;
4281 int ptrace_request;
4282 struct process_info *proc = get_thread_process (thread);
4283
4284 /* Note that target description may not be initialised
4285 (proc->tdesc == NULL) at this point because the program hasn't
4286 stopped at the first instruction yet. It means GDBserver skips
4287 the extra traps from the wrapper program (see option --wrapper).
4288 Code in this function that requires register access should be
4289 guarded by proc->tdesc == NULL or something else. */
4290
4291 if (lwp->stopped == 0)
4292 return;
4293
4294 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4295
4296 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4297
4298 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4299
4300 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4301 user used the "jump" command, or "set $pc = foo"). */
4302 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4303 {
4304 /* Collecting 'while-stepping' actions doesn't make sense
4305 anymore. */
4306 release_while_stepping_state_list (thread);
4307 }
4308
4309 /* If we have pending signals or status, and a new signal, enqueue the
4310 signal. Also enqueue the signal if it can't be delivered to the
4311 inferior right now. */
4312 if (signal != 0
4313 && (lwp->status_pending_p
4314 || lwp->pending_signals != NULL
4315 || !lwp_signal_can_be_delivered (lwp)))
4316 {
4317 enqueue_pending_signal (lwp, signal, info);
4318
4319 /* Postpone any pending signal. It was enqueued above. */
4320 signal = 0;
4321 }
4322
4323 if (lwp->status_pending_p)
4324 {
4325 if (debug_threads)
4326 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4327 " has pending status\n",
4328 lwpid_of (thread), step ? "step" : "continue",
4329 lwp->stop_expected ? "expected" : "not expected");
4330 return;
4331 }
4332
4333 saved_thread = current_thread;
4334 current_thread = thread;
4335
4336 /* This bit needs some thinking about. If we get a signal that
4337 we must report while a single-step reinsert is still pending,
4338 we often end up resuming the thread. It might be better to
4339 (ew) allow a stack of pending events; then we could be sure that
4340 the reinsert happened right away and not lose any signals.
4341
4342 Making this stack would also shrink the window in which breakpoints are
4343 uninserted (see comment in linux_wait_for_lwp) but not enough for
4344 complete correctness, so it won't solve that problem. It may be
4345 worthwhile just to solve this one, however. */
4346 if (lwp->bp_reinsert != 0)
4347 {
4348 if (debug_threads)
4349 debug_printf (" pending reinsert at 0x%s\n",
4350 paddress (lwp->bp_reinsert));
4351
4352 if (can_hardware_single_step ())
4353 {
4354 if (fast_tp_collecting == 0)
4355 {
4356 if (step == 0)
4357 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4358 if (lwp->suspended)
4359 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4360 lwp->suspended);
4361 }
4362 }
4363
4364 step = maybe_hw_step (thread);
4365 }
4366 else
4367 {
4368 /* If the thread isn't doing step-over, there shouldn't be any
4369 reinsert breakpoints. */
4370 gdb_assert (!has_reinsert_breakpoints (proc));
4371 }
4372
4373 if (fast_tp_collecting == 1)
4374 {
4375 if (debug_threads)
4376 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4377 " (exit-jump-pad-bkpt)\n",
4378 lwpid_of (thread));
4379 }
4380 else if (fast_tp_collecting == 2)
4381 {
4382 if (debug_threads)
4383 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4384 " single-stepping\n",
4385 lwpid_of (thread));
4386
4387 if (can_hardware_single_step ())
4388 step = 1;
4389 else
4390 {
4391 internal_error (__FILE__, __LINE__,
4392 "moving out of jump pad single-stepping"
4393 " not implemented on this target");
4394 }
4395 }
4396
4397 /* If we have while-stepping actions in this thread set it stepping.
4398 If we have a signal to deliver, it may or may not be set to
4399 SIG_IGN, we don't know. Assume so, and allow collecting
4400 while-stepping into a signal handler. A possible smart thing to
4401 do would be to set an internal breakpoint at the signal return
4402 address, continue, and carry on catching this while-stepping
4403 action only when that breakpoint is hit. A future
4404 enhancement. */
4405 if (thread->while_stepping != NULL)
4406 {
4407 if (debug_threads)
4408 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4409 lwpid_of (thread));
4410
4411 step = single_step (lwp);
4412 }
4413
4414 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4415 {
4416 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4417
4418 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4419
4420 if (debug_threads)
4421 {
4422 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4423 (long) lwp->stop_pc);
4424 }
4425 }
4426
4427 /* If we have pending signals, consume one if it can be delivered to
4428 the inferior. */
4429 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4430 {
4431 struct pending_signals **p_sig;
4432
4433 p_sig = &lwp->pending_signals;
4434 while ((*p_sig)->prev != NULL)
4435 p_sig = &(*p_sig)->prev;
4436
4437 signal = (*p_sig)->signal;
4438 if ((*p_sig)->info.si_signo != 0)
4439 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4440 &(*p_sig)->info);
4441
4442 free (*p_sig);
4443 *p_sig = NULL;
4444 }
4445
4446 if (debug_threads)
4447 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4448 lwpid_of (thread), step ? "step" : "continue", signal,
4449 lwp->stop_expected ? "expected" : "not expected");
4450
4451 if (the_low_target.prepare_to_resume != NULL)
4452 the_low_target.prepare_to_resume (lwp);
4453
4454 regcache_invalidate_thread (thread);
4455 errno = 0;
4456 lwp->stepping = step;
4457 if (step)
4458 ptrace_request = PTRACE_SINGLESTEP;
4459 else if (gdb_catching_syscalls_p (lwp))
4460 ptrace_request = PTRACE_SYSCALL;
4461 else
4462 ptrace_request = PTRACE_CONT;
4463 ptrace (ptrace_request,
4464 lwpid_of (thread),
4465 (PTRACE_TYPE_ARG3) 0,
4466 /* Coerce to a uintptr_t first to avoid potential gcc warning
4467 of coercing an 8 byte integer to a 4 byte pointer. */
4468 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4469
4470 current_thread = saved_thread;
4471 if (errno)
4472 perror_with_name ("resuming thread");
4473
4474 /* Successfully resumed. Clear state that no longer makes sense,
4475 and mark the LWP as running. Must not do this before resuming
4476 otherwise if that fails other code will be confused. E.g., we'd
4477 later try to stop the LWP and hang forever waiting for a stop
4478 status. Note that we must not throw after this is cleared,
4479 otherwise handle_zombie_lwp_error would get confused. */
4480 lwp->stopped = 0;
4481 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4482 }
4483
4484 /* Called when we try to resume a stopped LWP and that errors out. If
4485 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4486 or about to become), discard the error, clear any pending status
4487 the LWP may have, and return true (we'll collect the exit status
4488 soon enough). Otherwise, return false. */
4489
4490 static int
4491 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4492 {
4493 struct thread_info *thread = get_lwp_thread (lp);
4494
4495 /* If we get an error after resuming the LWP successfully, we'd
4496 confuse !T state for the LWP being gone. */
4497 gdb_assert (lp->stopped);
4498
4499 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4500 because even if ptrace failed with ESRCH, the tracee may be "not
4501 yet fully dead", but already refusing ptrace requests. In that
4502 case the tracee has 'R (Running)' state for a little bit
4503 (observed in Linux 3.18). See also the note on ESRCH in the
4504 ptrace(2) man page. Instead, check whether the LWP has any state
4505 other than ptrace-stopped. */
4506
4507 /* Don't assume anything if /proc/PID/status can't be read. */
4508 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4509 {
4510 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4511 lp->status_pending_p = 0;
4512 return 1;
4513 }
4514 return 0;
4515 }
4516
4517 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4518 disappears while we try to resume it. */
4519
4520 static void
4521 linux_resume_one_lwp (struct lwp_info *lwp,
4522 int step, int signal, siginfo_t *info)
4523 {
4524 TRY
4525 {
4526 linux_resume_one_lwp_throw (lwp, step, signal, info);
4527 }
4528 CATCH (ex, RETURN_MASK_ERROR)
4529 {
4530 if (!check_ptrace_stopped_lwp_gone (lwp))
4531 throw_exception (ex);
4532 }
4533 END_CATCH
4534 }
4535
4536 struct thread_resume_array
4537 {
4538 struct thread_resume *resume;
4539 size_t n;
4540 };
4541
4542 /* This function is called once per thread via find_inferior.
4543 ARG is a pointer to a thread_resume_array struct.
4544 We look up the thread specified by ENTRY in ARG, and mark the thread
4545 with a pointer to the appropriate resume request.
4546
4547 This algorithm is O(threads * resume elements), but resume elements
4548 is small (and will remain small at least until GDB supports thread
4549 suspension). */
4550
4551 static int
4552 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4553 {
4554 struct thread_info *thread = (struct thread_info *) entry;
4555 struct lwp_info *lwp = get_thread_lwp (thread);
4556 int ndx;
4557 struct thread_resume_array *r;
4558
4559 r = (struct thread_resume_array *) arg;
4560
4561 for (ndx = 0; ndx < r->n; ndx++)
4562 {
4563 ptid_t ptid = r->resume[ndx].thread;
4564 if (ptid_equal (ptid, minus_one_ptid)
4565 || ptid_equal (ptid, entry->id)
4566 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4567 of PID'. */
4568 || (ptid_get_pid (ptid) == pid_of (thread)
4569 && (ptid_is_pid (ptid)
4570 || ptid_get_lwp (ptid) == -1)))
4571 {
4572 if (r->resume[ndx].kind == resume_stop
4573 && thread->last_resume_kind == resume_stop)
4574 {
4575 if (debug_threads)
4576 debug_printf ("already %s LWP %ld at GDB's request\n",
4577 (thread->last_status.kind
4578 == TARGET_WAITKIND_STOPPED)
4579 ? "stopped"
4580 : "stopping",
4581 lwpid_of (thread));
4582
4583 continue;
4584 }
4585
4586 lwp->resume = &r->resume[ndx];
4587 thread->last_resume_kind = lwp->resume->kind;
4588
4589 lwp->step_range_start = lwp->resume->step_range_start;
4590 lwp->step_range_end = lwp->resume->step_range_end;
4591
4592 /* If we had a deferred signal to report, dequeue one now.
4593 This can happen if LWP gets more than one signal while
4594 trying to get out of a jump pad. */
4595 if (lwp->stopped
4596 && !lwp->status_pending_p
4597 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4598 {
4599 lwp->status_pending_p = 1;
4600
4601 if (debug_threads)
4602 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4603 "leaving status pending.\n",
4604 WSTOPSIG (lwp->status_pending),
4605 lwpid_of (thread));
4606 }
4607
4608 return 0;
4609 }
4610 }
4611
4612 /* No resume action for this thread. */
4613 lwp->resume = NULL;
4614
4615 return 0;
4616 }
4617
4618 /* find_inferior callback for linux_resume.
4619 Set *FLAG_P if this lwp has an interesting status pending. */
4620
4621 static int
4622 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4623 {
4624 struct thread_info *thread = (struct thread_info *) entry;
4625 struct lwp_info *lwp = get_thread_lwp (thread);
4626
4627 /* LWPs which will not be resumed are not interesting, because
4628 we might not wait for them next time through linux_wait. */
4629 if (lwp->resume == NULL)
4630 return 0;
4631
4632 if (thread_still_has_status_pending_p (thread))
4633 * (int *) flag_p = 1;
4634
4635 return 0;
4636 }
4637
4638 /* Return 1 if this lwp that GDB wants running is stopped at an
4639 internal breakpoint that we need to step over. It assumes that any
4640 required STOP_PC adjustment has already been propagated to the
4641 inferior's regcache. */
4642
4643 static int
4644 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4645 {
4646 struct thread_info *thread = (struct thread_info *) entry;
4647 struct lwp_info *lwp = get_thread_lwp (thread);
4648 struct thread_info *saved_thread;
4649 CORE_ADDR pc;
4650 struct process_info *proc = get_thread_process (thread);
4651
4652 /* GDBserver is skipping the extra traps from the wrapper program,
4653 don't have to do step over. */
4654 if (proc->tdesc == NULL)
4655 return 0;
4656
4657 /* LWPs which will not be resumed are not interesting, because we
4658 might not wait for them next time through linux_wait. */
4659
4660 if (!lwp->stopped)
4661 {
4662 if (debug_threads)
4663 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4664 lwpid_of (thread));
4665 return 0;
4666 }
4667
4668 if (thread->last_resume_kind == resume_stop)
4669 {
4670 if (debug_threads)
4671 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4672 " stopped\n",
4673 lwpid_of (thread));
4674 return 0;
4675 }
4676
4677 gdb_assert (lwp->suspended >= 0);
4678
4679 if (lwp->suspended)
4680 {
4681 if (debug_threads)
4682 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4683 lwpid_of (thread));
4684 return 0;
4685 }
4686
4687 if (lwp->status_pending_p)
4688 {
4689 if (debug_threads)
4690 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4691 " status.\n",
4692 lwpid_of (thread));
4693 return 0;
4694 }
4695
4696 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4697 or we have. */
4698 pc = get_pc (lwp);
4699
4700 /* If the PC has changed since we stopped, then don't do anything,
4701 and let the breakpoint/tracepoint be hit. This happens if, for
4702 instance, GDB handled the decr_pc_after_break subtraction itself,
4703 GDB is OOL stepping this thread, or the user has issued a "jump"
4704 command, or poked thread's registers herself. */
4705 if (pc != lwp->stop_pc)
4706 {
4707 if (debug_threads)
4708 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4709 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4710 lwpid_of (thread),
4711 paddress (lwp->stop_pc), paddress (pc));
4712 return 0;
4713 }
4714
4715 /* On software single step target, resume the inferior with signal
4716 rather than stepping over. */
4717 if (can_software_single_step ()
4718 && lwp->pending_signals != NULL
4719 && lwp_signal_can_be_delivered (lwp))
4720 {
4721 if (debug_threads)
4722 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4723 " signals.\n",
4724 lwpid_of (thread));
4725
4726 return 0;
4727 }
4728
4729 saved_thread = current_thread;
4730 current_thread = thread;
4731
4732 /* We can only step over breakpoints we know about. */
4733 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4734 {
4735 /* Don't step over a breakpoint that GDB expects to hit
4736 though. If the condition is being evaluated on the target's side
4737 and it evaluate to false, step over this breakpoint as well. */
4738 if (gdb_breakpoint_here (pc)
4739 && gdb_condition_true_at_breakpoint (pc)
4740 && gdb_no_commands_at_breakpoint (pc))
4741 {
4742 if (debug_threads)
4743 debug_printf ("Need step over [LWP %ld]? yes, but found"
4744 " GDB breakpoint at 0x%s; skipping step over\n",
4745 lwpid_of (thread), paddress (pc));
4746
4747 current_thread = saved_thread;
4748 return 0;
4749 }
4750 else
4751 {
4752 if (debug_threads)
4753 debug_printf ("Need step over [LWP %ld]? yes, "
4754 "found breakpoint at 0x%s\n",
4755 lwpid_of (thread), paddress (pc));
4756
4757 /* We've found an lwp that needs stepping over --- return 1 so
4758 that find_inferior stops looking. */
4759 current_thread = saved_thread;
4760
4761 return 1;
4762 }
4763 }
4764
4765 current_thread = saved_thread;
4766
4767 if (debug_threads)
4768 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4769 " at 0x%s\n",
4770 lwpid_of (thread), paddress (pc));
4771
4772 return 0;
4773 }
4774
4775 /* Start a step-over operation on LWP. When LWP stopped at a
4776 breakpoint, to make progress, we need to remove the breakpoint out
4777 of the way. If we let other threads run while we do that, they may
4778 pass by the breakpoint location and miss hitting it. To avoid
4779 that, a step-over momentarily stops all threads while LWP is
4780 single-stepped by either hardware or software while the breakpoint
4781 is temporarily uninserted from the inferior. When the single-step
4782 finishes, we reinsert the breakpoint, and let all threads that are
4783 supposed to be running, run again. */
4784
4785 static int
4786 start_step_over (struct lwp_info *lwp)
4787 {
4788 struct thread_info *thread = get_lwp_thread (lwp);
4789 struct thread_info *saved_thread;
4790 CORE_ADDR pc;
4791 int step;
4792
4793 if (debug_threads)
4794 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4795 lwpid_of (thread));
4796
4797 stop_all_lwps (1, lwp);
4798
4799 if (lwp->suspended != 0)
4800 {
4801 internal_error (__FILE__, __LINE__,
4802 "LWP %ld suspended=%d\n", lwpid_of (thread),
4803 lwp->suspended);
4804 }
4805
4806 if (debug_threads)
4807 debug_printf ("Done stopping all threads for step-over.\n");
4808
4809 /* Note, we should always reach here with an already adjusted PC,
4810 either by GDB (if we're resuming due to GDB's request), or by our
4811 caller, if we just finished handling an internal breakpoint GDB
4812 shouldn't care about. */
4813 pc = get_pc (lwp);
4814
4815 saved_thread = current_thread;
4816 current_thread = thread;
4817
4818 lwp->bp_reinsert = pc;
4819 uninsert_breakpoints_at (pc);
4820 uninsert_fast_tracepoint_jumps_at (pc);
4821
4822 step = single_step (lwp);
4823
4824 current_thread = saved_thread;
4825
4826 linux_resume_one_lwp (lwp, step, 0, NULL);
4827
4828 /* Require next event from this LWP. */
4829 step_over_bkpt = thread->entry.id;
4830 return 1;
4831 }
4832
4833 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4834 start_step_over, if still there, and delete any reinsert
4835 breakpoints we've set, on non hardware single-step targets. */
4836
4837 static int
4838 finish_step_over (struct lwp_info *lwp)
4839 {
4840 if (lwp->bp_reinsert != 0)
4841 {
4842 struct thread_info *saved_thread = current_thread;
4843
4844 if (debug_threads)
4845 debug_printf ("Finished step over.\n");
4846
4847 current_thread = get_lwp_thread (lwp);
4848
4849 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4850 may be no breakpoint to reinsert there by now. */
4851 reinsert_breakpoints_at (lwp->bp_reinsert);
4852 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4853
4854 lwp->bp_reinsert = 0;
4855
4856 /* Delete any software-single-step reinsert breakpoints. No
4857 longer needed. We don't have to worry about other threads
4858 hitting this trap, and later not being able to explain it,
4859 because we were stepping over a breakpoint, and we hold all
4860 threads but LWP stopped while doing that. */
4861 if (!can_hardware_single_step ())
4862 {
4863 gdb_assert (has_reinsert_breakpoints (current_process ()));
4864 delete_reinsert_breakpoints ();
4865 }
4866
4867 step_over_bkpt = null_ptid;
4868 current_thread = saved_thread;
4869 return 1;
4870 }
4871 else
4872 return 0;
4873 }
4874
4875 /* If there's a step over in progress, wait until all threads stop
4876 (that is, until the stepping thread finishes its step), and
4877 unsuspend all lwps. The stepping thread ends with its status
4878 pending, which is processed later when we get back to processing
4879 events. */
4880
4881 static void
4882 complete_ongoing_step_over (void)
4883 {
4884 if (!ptid_equal (step_over_bkpt, null_ptid))
4885 {
4886 struct lwp_info *lwp;
4887 int wstat;
4888 int ret;
4889
4890 if (debug_threads)
4891 debug_printf ("detach: step over in progress, finish it first\n");
4892
4893 /* Passing NULL_PTID as filter indicates we want all events to
4894 be left pending. Eventually this returns when there are no
4895 unwaited-for children left. */
4896 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4897 &wstat, __WALL);
4898 gdb_assert (ret == -1);
4899
4900 lwp = find_lwp_pid (step_over_bkpt);
4901 if (lwp != NULL)
4902 finish_step_over (lwp);
4903 step_over_bkpt = null_ptid;
4904 unsuspend_all_lwps (lwp);
4905 }
4906 }
4907
4908 /* This function is called once per thread. We check the thread's resume
4909 request, which will tell us whether to resume, step, or leave the thread
4910 stopped; and what signal, if any, it should be sent.
4911
4912 For threads which we aren't explicitly told otherwise, we preserve
4913 the stepping flag; this is used for stepping over gdbserver-placed
4914 breakpoints.
4915
4916 If pending_flags was set in any thread, we queue any needed
4917 signals, since we won't actually resume. We already have a pending
4918 event to report, so we don't need to preserve any step requests;
4919 they should be re-issued if necessary. */
4920
4921 static int
4922 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4923 {
4924 struct thread_info *thread = (struct thread_info *) entry;
4925 struct lwp_info *lwp = get_thread_lwp (thread);
4926 int step;
4927 int leave_all_stopped = * (int *) arg;
4928 int leave_pending;
4929
4930 if (lwp->resume == NULL)
4931 return 0;
4932
4933 if (lwp->resume->kind == resume_stop)
4934 {
4935 if (debug_threads)
4936 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4937
4938 if (!lwp->stopped)
4939 {
4940 if (debug_threads)
4941 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4942
4943 /* Stop the thread, and wait for the event asynchronously,
4944 through the event loop. */
4945 send_sigstop (lwp);
4946 }
4947 else
4948 {
4949 if (debug_threads)
4950 debug_printf ("already stopped LWP %ld\n",
4951 lwpid_of (thread));
4952
4953 /* The LWP may have been stopped in an internal event that
4954 was not meant to be notified back to GDB (e.g., gdbserver
4955 breakpoint), so we should be reporting a stop event in
4956 this case too. */
4957
4958 /* If the thread already has a pending SIGSTOP, this is a
4959 no-op. Otherwise, something later will presumably resume
4960 the thread and this will cause it to cancel any pending
4961 operation, due to last_resume_kind == resume_stop. If
4962 the thread already has a pending status to report, we
4963 will still report it the next time we wait - see
4964 status_pending_p_callback. */
4965
4966 /* If we already have a pending signal to report, then
4967 there's no need to queue a SIGSTOP, as this means we're
4968 midway through moving the LWP out of the jumppad, and we
4969 will report the pending signal as soon as that is
4970 finished. */
4971 if (lwp->pending_signals_to_report == NULL)
4972 send_sigstop (lwp);
4973 }
4974
4975 /* For stop requests, we're done. */
4976 lwp->resume = NULL;
4977 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4978 return 0;
4979 }
4980
4981 /* If this thread which is about to be resumed has a pending status,
4982 then don't resume it - we can just report the pending status.
4983 Likewise if it is suspended, because e.g., another thread is
4984 stepping past a breakpoint. Make sure to queue any signals that
4985 would otherwise be sent. In all-stop mode, we do this decision
4986 based on if *any* thread has a pending status. If there's a
4987 thread that needs the step-over-breakpoint dance, then don't
4988 resume any other thread but that particular one. */
4989 leave_pending = (lwp->suspended
4990 || lwp->status_pending_p
4991 || leave_all_stopped);
4992
4993 if (!leave_pending)
4994 {
4995 if (debug_threads)
4996 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4997
4998 step = (lwp->resume->kind == resume_step);
4999 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
5000 }
5001 else
5002 {
5003 if (debug_threads)
5004 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5005
5006 /* If we have a new signal, enqueue the signal. */
5007 if (lwp->resume->sig != 0)
5008 {
5009 struct pending_signals *p_sig = XCNEW (struct pending_signals);
5010
5011 p_sig->prev = lwp->pending_signals;
5012 p_sig->signal = lwp->resume->sig;
5013
5014 /* If this is the same signal we were previously stopped by,
5015 make sure to queue its siginfo. We can ignore the return
5016 value of ptrace; if it fails, we'll skip
5017 PTRACE_SETSIGINFO. */
5018 if (WIFSTOPPED (lwp->last_status)
5019 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
5020 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
5021 &p_sig->info);
5022
5023 lwp->pending_signals = p_sig;
5024 }
5025 }
5026
5027 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5028 lwp->resume = NULL;
5029 return 0;
5030 }
5031
5032 static void
5033 linux_resume (struct thread_resume *resume_info, size_t n)
5034 {
5035 struct thread_resume_array array = { resume_info, n };
5036 struct thread_info *need_step_over = NULL;
5037 int any_pending;
5038 int leave_all_stopped;
5039
5040 if (debug_threads)
5041 {
5042 debug_enter ();
5043 debug_printf ("linux_resume:\n");
5044 }
5045
5046 find_inferior (&all_threads, linux_set_resume_request, &array);
5047
5048 /* If there is a thread which would otherwise be resumed, which has
5049 a pending status, then don't resume any threads - we can just
5050 report the pending status. Make sure to queue any signals that
5051 would otherwise be sent. In non-stop mode, we'll apply this
5052 logic to each thread individually. We consume all pending events
5053 before considering to start a step-over (in all-stop). */
5054 any_pending = 0;
5055 if (!non_stop)
5056 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
5057
5058 /* If there is a thread which would otherwise be resumed, which is
5059 stopped at a breakpoint that needs stepping over, then don't
5060 resume any threads - have it step over the breakpoint with all
5061 other threads stopped, then resume all threads again. Make sure
5062 to queue any signals that would otherwise be delivered or
5063 queued. */
5064 if (!any_pending && supports_breakpoints ())
5065 need_step_over
5066 = (struct thread_info *) find_inferior (&all_threads,
5067 need_step_over_p, NULL);
5068
5069 leave_all_stopped = (need_step_over != NULL || any_pending);
5070
5071 if (debug_threads)
5072 {
5073 if (need_step_over != NULL)
5074 debug_printf ("Not resuming all, need step over\n");
5075 else if (any_pending)
5076 debug_printf ("Not resuming, all-stop and found "
5077 "an LWP with pending status\n");
5078 else
5079 debug_printf ("Resuming, no pending status or step over needed\n");
5080 }
5081
5082 /* Even if we're leaving threads stopped, queue all signals we'd
5083 otherwise deliver. */
5084 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
5085
5086 if (need_step_over)
5087 start_step_over (get_thread_lwp (need_step_over));
5088
5089 if (debug_threads)
5090 {
5091 debug_printf ("linux_resume done\n");
5092 debug_exit ();
5093 }
5094
5095 /* We may have events that were pending that can/should be sent to
5096 the client now. Trigger a linux_wait call. */
5097 if (target_is_async_p ())
5098 async_file_mark ();
5099 }
5100
5101 /* This function is called once per thread. We check the thread's
5102 last resume request, which will tell us whether to resume, step, or
5103 leave the thread stopped. Any signal the client requested to be
5104 delivered has already been enqueued at this point.
5105
5106 If any thread that GDB wants running is stopped at an internal
5107 breakpoint that needs stepping over, we start a step-over operation
5108 on that particular thread, and leave all others stopped. */
5109
5110 static int
5111 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5112 {
5113 struct thread_info *thread = (struct thread_info *) entry;
5114 struct lwp_info *lwp = get_thread_lwp (thread);
5115 int step;
5116
5117 if (lwp == except)
5118 return 0;
5119
5120 if (debug_threads)
5121 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5122
5123 if (!lwp->stopped)
5124 {
5125 if (debug_threads)
5126 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5127 return 0;
5128 }
5129
5130 if (thread->last_resume_kind == resume_stop
5131 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5132 {
5133 if (debug_threads)
5134 debug_printf (" client wants LWP to remain %ld stopped\n",
5135 lwpid_of (thread));
5136 return 0;
5137 }
5138
5139 if (lwp->status_pending_p)
5140 {
5141 if (debug_threads)
5142 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5143 lwpid_of (thread));
5144 return 0;
5145 }
5146
5147 gdb_assert (lwp->suspended >= 0);
5148
5149 if (lwp->suspended)
5150 {
5151 if (debug_threads)
5152 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5153 return 0;
5154 }
5155
5156 if (thread->last_resume_kind == resume_stop
5157 && lwp->pending_signals_to_report == NULL
5158 && lwp->collecting_fast_tracepoint == 0)
5159 {
5160 /* We haven't reported this LWP as stopped yet (otherwise, the
5161 last_status.kind check above would catch it, and we wouldn't
5162 reach here. This LWP may have been momentarily paused by a
5163 stop_all_lwps call while handling for example, another LWP's
5164 step-over. In that case, the pending expected SIGSTOP signal
5165 that was queued at vCont;t handling time will have already
5166 been consumed by wait_for_sigstop, and so we need to requeue
5167 another one here. Note that if the LWP already has a SIGSTOP
5168 pending, this is a no-op. */
5169
5170 if (debug_threads)
5171 debug_printf ("Client wants LWP %ld to stop. "
5172 "Making sure it has a SIGSTOP pending\n",
5173 lwpid_of (thread));
5174
5175 send_sigstop (lwp);
5176 }
5177
5178 if (thread->last_resume_kind == resume_step)
5179 {
5180 if (debug_threads)
5181 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5182 lwpid_of (thread));
5183 step = 1;
5184 }
5185 else if (lwp->bp_reinsert != 0)
5186 {
5187 if (debug_threads)
5188 debug_printf (" stepping LWP %ld, reinsert set\n",
5189 lwpid_of (thread));
5190
5191 step = maybe_hw_step (thread);
5192 }
5193 else
5194 step = 0;
5195
5196 linux_resume_one_lwp (lwp, step, 0, NULL);
5197 return 0;
5198 }
5199
5200 static int
5201 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5202 {
5203 struct thread_info *thread = (struct thread_info *) entry;
5204 struct lwp_info *lwp = get_thread_lwp (thread);
5205
5206 if (lwp == except)
5207 return 0;
5208
5209 lwp_suspended_decr (lwp);
5210
5211 return proceed_one_lwp (entry, except);
5212 }
5213
5214 /* When we finish a step-over, set threads running again. If there's
5215 another thread that may need a step-over, now's the time to start
5216 it. Eventually, we'll move all threads past their breakpoints. */
5217
5218 static void
5219 proceed_all_lwps (void)
5220 {
5221 struct thread_info *need_step_over;
5222
5223 /* If there is a thread which would otherwise be resumed, which is
5224 stopped at a breakpoint that needs stepping over, then don't
5225 resume any threads - have it step over the breakpoint with all
5226 other threads stopped, then resume all threads again. */
5227
5228 if (supports_breakpoints ())
5229 {
5230 need_step_over
5231 = (struct thread_info *) find_inferior (&all_threads,
5232 need_step_over_p, NULL);
5233
5234 if (need_step_over != NULL)
5235 {
5236 if (debug_threads)
5237 debug_printf ("proceed_all_lwps: found "
5238 "thread %ld needing a step-over\n",
5239 lwpid_of (need_step_over));
5240
5241 start_step_over (get_thread_lwp (need_step_over));
5242 return;
5243 }
5244 }
5245
5246 if (debug_threads)
5247 debug_printf ("Proceeding, no step-over needed\n");
5248
5249 find_inferior (&all_threads, proceed_one_lwp, NULL);
5250 }
5251
5252 /* Stopped LWPs that the client wanted to be running, that don't have
5253 pending statuses, are set to run again, except for EXCEPT, if not
5254 NULL. This undoes a stop_all_lwps call. */
5255
5256 static void
5257 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5258 {
5259 if (debug_threads)
5260 {
5261 debug_enter ();
5262 if (except)
5263 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5264 lwpid_of (get_lwp_thread (except)));
5265 else
5266 debug_printf ("unstopping all lwps\n");
5267 }
5268
5269 if (unsuspend)
5270 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5271 else
5272 find_inferior (&all_threads, proceed_one_lwp, except);
5273
5274 if (debug_threads)
5275 {
5276 debug_printf ("unstop_all_lwps done\n");
5277 debug_exit ();
5278 }
5279 }
5280
5281
5282 #ifdef HAVE_LINUX_REGSETS
5283
5284 #define use_linux_regsets 1
5285
5286 /* Returns true if REGSET has been disabled. */
5287
5288 static int
5289 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5290 {
5291 return (info->disabled_regsets != NULL
5292 && info->disabled_regsets[regset - info->regsets]);
5293 }
5294
5295 /* Disable REGSET. */
5296
5297 static void
5298 disable_regset (struct regsets_info *info, struct regset_info *regset)
5299 {
5300 int dr_offset;
5301
5302 dr_offset = regset - info->regsets;
5303 if (info->disabled_regsets == NULL)
5304 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5305 info->disabled_regsets[dr_offset] = 1;
5306 }
5307
5308 static int
5309 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5310 struct regcache *regcache)
5311 {
5312 struct regset_info *regset;
5313 int saw_general_regs = 0;
5314 int pid;
5315 struct iovec iov;
5316
5317 pid = lwpid_of (current_thread);
5318 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5319 {
5320 void *buf, *data;
5321 int nt_type, res;
5322
5323 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5324 continue;
5325
5326 buf = xmalloc (regset->size);
5327
5328 nt_type = regset->nt_type;
5329 if (nt_type)
5330 {
5331 iov.iov_base = buf;
5332 iov.iov_len = regset->size;
5333 data = (void *) &iov;
5334 }
5335 else
5336 data = buf;
5337
5338 #ifndef __sparc__
5339 res = ptrace (regset->get_request, pid,
5340 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5341 #else
5342 res = ptrace (regset->get_request, pid, data, nt_type);
5343 #endif
5344 if (res < 0)
5345 {
5346 if (errno == EIO)
5347 {
5348 /* If we get EIO on a regset, do not try it again for
5349 this process mode. */
5350 disable_regset (regsets_info, regset);
5351 }
5352 else if (errno == ENODATA)
5353 {
5354 /* ENODATA may be returned if the regset is currently
5355 not "active". This can happen in normal operation,
5356 so suppress the warning in this case. */
5357 }
5358 else
5359 {
5360 char s[256];
5361 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5362 pid);
5363 perror (s);
5364 }
5365 }
5366 else
5367 {
5368 if (regset->type == GENERAL_REGS)
5369 saw_general_regs = 1;
5370 regset->store_function (regcache, buf);
5371 }
5372 free (buf);
5373 }
5374 if (saw_general_regs)
5375 return 0;
5376 else
5377 return 1;
5378 }
5379
5380 static int
5381 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5382 struct regcache *regcache)
5383 {
5384 struct regset_info *regset;
5385 int saw_general_regs = 0;
5386 int pid;
5387 struct iovec iov;
5388
5389 pid = lwpid_of (current_thread);
5390 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5391 {
5392 void *buf, *data;
5393 int nt_type, res;
5394
5395 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5396 || regset->fill_function == NULL)
5397 continue;
5398
5399 buf = xmalloc (regset->size);
5400
5401 /* First fill the buffer with the current register set contents,
5402 in case there are any items in the kernel's regset that are
5403 not in gdbserver's regcache. */
5404
5405 nt_type = regset->nt_type;
5406 if (nt_type)
5407 {
5408 iov.iov_base = buf;
5409 iov.iov_len = regset->size;
5410 data = (void *) &iov;
5411 }
5412 else
5413 data = buf;
5414
5415 #ifndef __sparc__
5416 res = ptrace (regset->get_request, pid,
5417 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5418 #else
5419 res = ptrace (regset->get_request, pid, data, nt_type);
5420 #endif
5421
5422 if (res == 0)
5423 {
5424 /* Then overlay our cached registers on that. */
5425 regset->fill_function (regcache, buf);
5426
5427 /* Only now do we write the register set. */
5428 #ifndef __sparc__
5429 res = ptrace (regset->set_request, pid,
5430 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5431 #else
5432 res = ptrace (regset->set_request, pid, data, nt_type);
5433 #endif
5434 }
5435
5436 if (res < 0)
5437 {
5438 if (errno == EIO)
5439 {
5440 /* If we get EIO on a regset, do not try it again for
5441 this process mode. */
5442 disable_regset (regsets_info, regset);
5443 }
5444 else if (errno == ESRCH)
5445 {
5446 /* At this point, ESRCH should mean the process is
5447 already gone, in which case we simply ignore attempts
5448 to change its registers. See also the related
5449 comment in linux_resume_one_lwp. */
5450 free (buf);
5451 return 0;
5452 }
5453 else
5454 {
5455 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5456 }
5457 }
5458 else if (regset->type == GENERAL_REGS)
5459 saw_general_regs = 1;
5460 free (buf);
5461 }
5462 if (saw_general_regs)
5463 return 0;
5464 else
5465 return 1;
5466 }
5467
5468 #else /* !HAVE_LINUX_REGSETS */
5469
5470 #define use_linux_regsets 0
5471 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5472 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5473
5474 #endif
5475
5476 /* Return 1 if register REGNO is supported by one of the regset ptrace
5477 calls or 0 if it has to be transferred individually. */
5478
5479 static int
5480 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5481 {
5482 unsigned char mask = 1 << (regno % 8);
5483 size_t index = regno / 8;
5484
5485 return (use_linux_regsets
5486 && (regs_info->regset_bitmap == NULL
5487 || (regs_info->regset_bitmap[index] & mask) != 0));
5488 }
5489
5490 #ifdef HAVE_LINUX_USRREGS
5491
5492 static int
5493 register_addr (const struct usrregs_info *usrregs, int regnum)
5494 {
5495 int addr;
5496
5497 if (regnum < 0 || regnum >= usrregs->num_regs)
5498 error ("Invalid register number %d.", regnum);
5499
5500 addr = usrregs->regmap[regnum];
5501
5502 return addr;
5503 }
5504
5505 /* Fetch one register. */
5506 static void
5507 fetch_register (const struct usrregs_info *usrregs,
5508 struct regcache *regcache, int regno)
5509 {
5510 CORE_ADDR regaddr;
5511 int i, size;
5512 char *buf;
5513 int pid;
5514
5515 if (regno >= usrregs->num_regs)
5516 return;
5517 if ((*the_low_target.cannot_fetch_register) (regno))
5518 return;
5519
5520 regaddr = register_addr (usrregs, regno);
5521 if (regaddr == -1)
5522 return;
5523
5524 size = ((register_size (regcache->tdesc, regno)
5525 + sizeof (PTRACE_XFER_TYPE) - 1)
5526 & -sizeof (PTRACE_XFER_TYPE));
5527 buf = (char *) alloca (size);
5528
5529 pid = lwpid_of (current_thread);
5530 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5531 {
5532 errno = 0;
5533 *(PTRACE_XFER_TYPE *) (buf + i) =
5534 ptrace (PTRACE_PEEKUSER, pid,
5535 /* Coerce to a uintptr_t first to avoid potential gcc warning
5536 of coercing an 8 byte integer to a 4 byte pointer. */
5537 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5538 regaddr += sizeof (PTRACE_XFER_TYPE);
5539 if (errno != 0)
5540 error ("reading register %d: %s", regno, strerror (errno));
5541 }
5542
5543 if (the_low_target.supply_ptrace_register)
5544 the_low_target.supply_ptrace_register (regcache, regno, buf);
5545 else
5546 supply_register (regcache, regno, buf);
5547 }
5548
5549 /* Store one register. */
5550 static void
5551 store_register (const struct usrregs_info *usrregs,
5552 struct regcache *regcache, int regno)
5553 {
5554 CORE_ADDR regaddr;
5555 int i, size;
5556 char *buf;
5557 int pid;
5558
5559 if (regno >= usrregs->num_regs)
5560 return;
5561 if ((*the_low_target.cannot_store_register) (regno))
5562 return;
5563
5564 regaddr = register_addr (usrregs, regno);
5565 if (regaddr == -1)
5566 return;
5567
5568 size = ((register_size (regcache->tdesc, regno)
5569 + sizeof (PTRACE_XFER_TYPE) - 1)
5570 & -sizeof (PTRACE_XFER_TYPE));
5571 buf = (char *) alloca (size);
5572 memset (buf, 0, size);
5573
5574 if (the_low_target.collect_ptrace_register)
5575 the_low_target.collect_ptrace_register (regcache, regno, buf);
5576 else
5577 collect_register (regcache, regno, buf);
5578
5579 pid = lwpid_of (current_thread);
5580 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5581 {
5582 errno = 0;
5583 ptrace (PTRACE_POKEUSER, pid,
5584 /* Coerce to a uintptr_t first to avoid potential gcc warning
5585 about coercing an 8 byte integer to a 4 byte pointer. */
5586 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5587 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5588 if (errno != 0)
5589 {
5590 /* At this point, ESRCH should mean the process is
5591 already gone, in which case we simply ignore attempts
5592 to change its registers. See also the related
5593 comment in linux_resume_one_lwp. */
5594 if (errno == ESRCH)
5595 return;
5596
5597 if ((*the_low_target.cannot_store_register) (regno) == 0)
5598 error ("writing register %d: %s", regno, strerror (errno));
5599 }
5600 regaddr += sizeof (PTRACE_XFER_TYPE);
5601 }
5602 }
5603
5604 /* Fetch all registers, or just one, from the child process.
5605 If REGNO is -1, do this for all registers, skipping any that are
5606 assumed to have been retrieved by regsets_fetch_inferior_registers,
5607 unless ALL is non-zero.
5608 Otherwise, REGNO specifies which register (so we can save time). */
5609 static void
5610 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5611 struct regcache *regcache, int regno, int all)
5612 {
5613 struct usrregs_info *usr = regs_info->usrregs;
5614
5615 if (regno == -1)
5616 {
5617 for (regno = 0; regno < usr->num_regs; regno++)
5618 if (all || !linux_register_in_regsets (regs_info, regno))
5619 fetch_register (usr, regcache, regno);
5620 }
5621 else
5622 fetch_register (usr, regcache, regno);
5623 }
5624
5625 /* Store our register values back into the inferior.
5626 If REGNO is -1, do this for all registers, skipping any that are
5627 assumed to have been saved by regsets_store_inferior_registers,
5628 unless ALL is non-zero.
5629 Otherwise, REGNO specifies which register (so we can save time). */
5630 static void
5631 usr_store_inferior_registers (const struct regs_info *regs_info,
5632 struct regcache *regcache, int regno, int all)
5633 {
5634 struct usrregs_info *usr = regs_info->usrregs;
5635
5636 if (regno == -1)
5637 {
5638 for (regno = 0; regno < usr->num_regs; regno++)
5639 if (all || !linux_register_in_regsets (regs_info, regno))
5640 store_register (usr, regcache, regno);
5641 }
5642 else
5643 store_register (usr, regcache, regno);
5644 }
5645
5646 #else /* !HAVE_LINUX_USRREGS */
5647
5648 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5649 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5650
5651 #endif
5652
5653
5654 static void
5655 linux_fetch_registers (struct regcache *regcache, int regno)
5656 {
5657 int use_regsets;
5658 int all = 0;
5659 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5660
5661 if (regno == -1)
5662 {
5663 if (the_low_target.fetch_register != NULL
5664 && regs_info->usrregs != NULL)
5665 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5666 (*the_low_target.fetch_register) (regcache, regno);
5667
5668 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5669 if (regs_info->usrregs != NULL)
5670 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5671 }
5672 else
5673 {
5674 if (the_low_target.fetch_register != NULL
5675 && (*the_low_target.fetch_register) (regcache, regno))
5676 return;
5677
5678 use_regsets = linux_register_in_regsets (regs_info, regno);
5679 if (use_regsets)
5680 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5681 regcache);
5682 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5683 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5684 }
5685 }
5686
5687 static void
5688 linux_store_registers (struct regcache *regcache, int regno)
5689 {
5690 int use_regsets;
5691 int all = 0;
5692 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5693
5694 if (regno == -1)
5695 {
5696 all = regsets_store_inferior_registers (regs_info->regsets_info,
5697 regcache);
5698 if (regs_info->usrregs != NULL)
5699 usr_store_inferior_registers (regs_info, regcache, regno, all);
5700 }
5701 else
5702 {
5703 use_regsets = linux_register_in_regsets (regs_info, regno);
5704 if (use_regsets)
5705 all = regsets_store_inferior_registers (regs_info->regsets_info,
5706 regcache);
5707 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5708 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5709 }
5710 }
5711
5712
5713 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5714 to debugger memory starting at MYADDR. */
5715
5716 static int
5717 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5718 {
5719 int pid = lwpid_of (current_thread);
5720 register PTRACE_XFER_TYPE *buffer;
5721 register CORE_ADDR addr;
5722 register int count;
5723 char filename[64];
5724 register int i;
5725 int ret;
5726 int fd;
5727
5728 /* Try using /proc. Don't bother for one word. */
5729 if (len >= 3 * sizeof (long))
5730 {
5731 int bytes;
5732
5733 /* We could keep this file open and cache it - possibly one per
5734 thread. That requires some juggling, but is even faster. */
5735 sprintf (filename, "/proc/%d/mem", pid);
5736 fd = open (filename, O_RDONLY | O_LARGEFILE);
5737 if (fd == -1)
5738 goto no_proc;
5739
5740 /* If pread64 is available, use it. It's faster if the kernel
5741 supports it (only one syscall), and it's 64-bit safe even on
5742 32-bit platforms (for instance, SPARC debugging a SPARC64
5743 application). */
5744 #ifdef HAVE_PREAD64
5745 bytes = pread64 (fd, myaddr, len, memaddr);
5746 #else
5747 bytes = -1;
5748 if (lseek (fd, memaddr, SEEK_SET) != -1)
5749 bytes = read (fd, myaddr, len);
5750 #endif
5751
5752 close (fd);
5753 if (bytes == len)
5754 return 0;
5755
5756 /* Some data was read, we'll try to get the rest with ptrace. */
5757 if (bytes > 0)
5758 {
5759 memaddr += bytes;
5760 myaddr += bytes;
5761 len -= bytes;
5762 }
5763 }
5764
5765 no_proc:
5766 /* Round starting address down to longword boundary. */
5767 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5768 /* Round ending address up; get number of longwords that makes. */
5769 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5770 / sizeof (PTRACE_XFER_TYPE));
5771 /* Allocate buffer of that many longwords. */
5772 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5773
5774 /* Read all the longwords */
5775 errno = 0;
5776 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5777 {
5778 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5779 about coercing an 8 byte integer to a 4 byte pointer. */
5780 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5781 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5782 (PTRACE_TYPE_ARG4) 0);
5783 if (errno)
5784 break;
5785 }
5786 ret = errno;
5787
5788 /* Copy appropriate bytes out of the buffer. */
5789 if (i > 0)
5790 {
5791 i *= sizeof (PTRACE_XFER_TYPE);
5792 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5793 memcpy (myaddr,
5794 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5795 i < len ? i : len);
5796 }
5797
5798 return ret;
5799 }
5800
5801 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5802 memory at MEMADDR. On failure (cannot write to the inferior)
5803 returns the value of errno. Always succeeds if LEN is zero. */
5804
5805 static int
5806 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5807 {
5808 register int i;
5809 /* Round starting address down to longword boundary. */
5810 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5811 /* Round ending address up; get number of longwords that makes. */
5812 register int count
5813 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5814 / sizeof (PTRACE_XFER_TYPE);
5815
5816 /* Allocate buffer of that many longwords. */
5817 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5818
5819 int pid = lwpid_of (current_thread);
5820
5821 if (len == 0)
5822 {
5823 /* Zero length write always succeeds. */
5824 return 0;
5825 }
5826
5827 if (debug_threads)
5828 {
5829 /* Dump up to four bytes. */
5830 char str[4 * 2 + 1];
5831 char *p = str;
5832 int dump = len < 4 ? len : 4;
5833
5834 for (i = 0; i < dump; i++)
5835 {
5836 sprintf (p, "%02x", myaddr[i]);
5837 p += 2;
5838 }
5839 *p = '\0';
5840
5841 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5842 str, (long) memaddr, pid);
5843 }
5844
5845 /* Fill start and end extra bytes of buffer with existing memory data. */
5846
5847 errno = 0;
5848 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5849 about coercing an 8 byte integer to a 4 byte pointer. */
5850 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5851 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5852 (PTRACE_TYPE_ARG4) 0);
5853 if (errno)
5854 return errno;
5855
5856 if (count > 1)
5857 {
5858 errno = 0;
5859 buffer[count - 1]
5860 = ptrace (PTRACE_PEEKTEXT, pid,
5861 /* Coerce to a uintptr_t first to avoid potential gcc warning
5862 about coercing an 8 byte integer to a 4 byte pointer. */
5863 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5864 * sizeof (PTRACE_XFER_TYPE)),
5865 (PTRACE_TYPE_ARG4) 0);
5866 if (errno)
5867 return errno;
5868 }
5869
5870 /* Copy data to be written over corresponding part of buffer. */
5871
5872 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5873 myaddr, len);
5874
5875 /* Write the entire buffer. */
5876
5877 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5878 {
5879 errno = 0;
5880 ptrace (PTRACE_POKETEXT, pid,
5881 /* Coerce to a uintptr_t first to avoid potential gcc warning
5882 about coercing an 8 byte integer to a 4 byte pointer. */
5883 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5884 (PTRACE_TYPE_ARG4) buffer[i]);
5885 if (errno)
5886 return errno;
5887 }
5888
5889 return 0;
5890 }
5891
5892 static void
5893 linux_look_up_symbols (void)
5894 {
5895 #ifdef USE_THREAD_DB
5896 struct process_info *proc = current_process ();
5897
5898 if (proc->priv->thread_db != NULL)
5899 return;
5900
5901 thread_db_init ();
5902 #endif
5903 }
5904
5905 static void
5906 linux_request_interrupt (void)
5907 {
5908 extern unsigned long signal_pid;
5909
5910 /* Send a SIGINT to the process group. This acts just like the user
5911 typed a ^C on the controlling terminal. */
5912 kill (-signal_pid, SIGINT);
5913 }
5914
5915 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5916 to debugger memory starting at MYADDR. */
5917
5918 static int
5919 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5920 {
5921 char filename[PATH_MAX];
5922 int fd, n;
5923 int pid = lwpid_of (current_thread);
5924
5925 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5926
5927 fd = open (filename, O_RDONLY);
5928 if (fd < 0)
5929 return -1;
5930
5931 if (offset != (CORE_ADDR) 0
5932 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5933 n = -1;
5934 else
5935 n = read (fd, myaddr, len);
5936
5937 close (fd);
5938
5939 return n;
5940 }
5941
5942 /* These breakpoint and watchpoint related wrapper functions simply
5943 pass on the function call if the target has registered a
5944 corresponding function. */
5945
5946 static int
5947 linux_supports_z_point_type (char z_type)
5948 {
5949 return (the_low_target.supports_z_point_type != NULL
5950 && the_low_target.supports_z_point_type (z_type));
5951 }
5952
5953 static int
5954 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5955 int size, struct raw_breakpoint *bp)
5956 {
5957 if (type == raw_bkpt_type_sw)
5958 return insert_memory_breakpoint (bp);
5959 else if (the_low_target.insert_point != NULL)
5960 return the_low_target.insert_point (type, addr, size, bp);
5961 else
5962 /* Unsupported (see target.h). */
5963 return 1;
5964 }
5965
5966 static int
5967 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5968 int size, struct raw_breakpoint *bp)
5969 {
5970 if (type == raw_bkpt_type_sw)
5971 return remove_memory_breakpoint (bp);
5972 else if (the_low_target.remove_point != NULL)
5973 return the_low_target.remove_point (type, addr, size, bp);
5974 else
5975 /* Unsupported (see target.h). */
5976 return 1;
5977 }
5978
5979 /* Implement the to_stopped_by_sw_breakpoint target_ops
5980 method. */
5981
5982 static int
5983 linux_stopped_by_sw_breakpoint (void)
5984 {
5985 struct lwp_info *lwp = get_thread_lwp (current_thread);
5986
5987 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5988 }
5989
5990 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5991 method. */
5992
5993 static int
5994 linux_supports_stopped_by_sw_breakpoint (void)
5995 {
5996 return USE_SIGTRAP_SIGINFO;
5997 }
5998
5999 /* Implement the to_stopped_by_hw_breakpoint target_ops
6000 method. */
6001
6002 static int
6003 linux_stopped_by_hw_breakpoint (void)
6004 {
6005 struct lwp_info *lwp = get_thread_lwp (current_thread);
6006
6007 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6008 }
6009
6010 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6011 method. */
6012
6013 static int
6014 linux_supports_stopped_by_hw_breakpoint (void)
6015 {
6016 return USE_SIGTRAP_SIGINFO;
6017 }
6018
6019 /* Implement the supports_hardware_single_step target_ops method. */
6020
6021 static int
6022 linux_supports_hardware_single_step (void)
6023 {
6024 return can_hardware_single_step ();
6025 }
6026
6027 static int
6028 linux_supports_software_single_step (void)
6029 {
6030 return can_software_single_step ();
6031 }
6032
6033 static int
6034 linux_stopped_by_watchpoint (void)
6035 {
6036 struct lwp_info *lwp = get_thread_lwp (current_thread);
6037
6038 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6039 }
6040
6041 static CORE_ADDR
6042 linux_stopped_data_address (void)
6043 {
6044 struct lwp_info *lwp = get_thread_lwp (current_thread);
6045
6046 return lwp->stopped_data_address;
6047 }
6048
6049 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6050 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6051 && defined(PT_TEXT_END_ADDR)
6052
6053 /* This is only used for targets that define PT_TEXT_ADDR,
6054 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6055 the target has different ways of acquiring this information, like
6056 loadmaps. */
6057
6058 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6059 to tell gdb about. */
6060
6061 static int
6062 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6063 {
6064 unsigned long text, text_end, data;
6065 int pid = lwpid_of (current_thread);
6066
6067 errno = 0;
6068
6069 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6070 (PTRACE_TYPE_ARG4) 0);
6071 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6072 (PTRACE_TYPE_ARG4) 0);
6073 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6074 (PTRACE_TYPE_ARG4) 0);
6075
6076 if (errno == 0)
6077 {
6078 /* Both text and data offsets produced at compile-time (and so
6079 used by gdb) are relative to the beginning of the program,
6080 with the data segment immediately following the text segment.
6081 However, the actual runtime layout in memory may put the data
6082 somewhere else, so when we send gdb a data base-address, we
6083 use the real data base address and subtract the compile-time
6084 data base-address from it (which is just the length of the
6085 text segment). BSS immediately follows data in both
6086 cases. */
6087 *text_p = text;
6088 *data_p = data - (text_end - text);
6089
6090 return 1;
6091 }
6092 return 0;
6093 }
6094 #endif
6095
6096 static int
6097 linux_qxfer_osdata (const char *annex,
6098 unsigned char *readbuf, unsigned const char *writebuf,
6099 CORE_ADDR offset, int len)
6100 {
6101 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6102 }
6103
6104 /* Convert a native/host siginfo object, into/from the siginfo in the
6105 layout of the inferiors' architecture. */
6106
6107 static void
6108 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6109 {
6110 int done = 0;
6111
6112 if (the_low_target.siginfo_fixup != NULL)
6113 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6114
6115 /* If there was no callback, or the callback didn't do anything,
6116 then just do a straight memcpy. */
6117 if (!done)
6118 {
6119 if (direction == 1)
6120 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6121 else
6122 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6123 }
6124 }
6125
6126 static int
6127 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6128 unsigned const char *writebuf, CORE_ADDR offset, int len)
6129 {
6130 int pid;
6131 siginfo_t siginfo;
6132 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6133
6134 if (current_thread == NULL)
6135 return -1;
6136
6137 pid = lwpid_of (current_thread);
6138
6139 if (debug_threads)
6140 debug_printf ("%s siginfo for lwp %d.\n",
6141 readbuf != NULL ? "Reading" : "Writing",
6142 pid);
6143
6144 if (offset >= sizeof (siginfo))
6145 return -1;
6146
6147 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6148 return -1;
6149
6150 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6151 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6152 inferior with a 64-bit GDBSERVER should look the same as debugging it
6153 with a 32-bit GDBSERVER, we need to convert it. */
6154 siginfo_fixup (&siginfo, inf_siginfo, 0);
6155
6156 if (offset + len > sizeof (siginfo))
6157 len = sizeof (siginfo) - offset;
6158
6159 if (readbuf != NULL)
6160 memcpy (readbuf, inf_siginfo + offset, len);
6161 else
6162 {
6163 memcpy (inf_siginfo + offset, writebuf, len);
6164
6165 /* Convert back to ptrace layout before flushing it out. */
6166 siginfo_fixup (&siginfo, inf_siginfo, 1);
6167
6168 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6169 return -1;
6170 }
6171
6172 return len;
6173 }
6174
6175 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6176 so we notice when children change state; as the handler for the
6177 sigsuspend in my_waitpid. */
6178
6179 static void
6180 sigchld_handler (int signo)
6181 {
6182 int old_errno = errno;
6183
6184 if (debug_threads)
6185 {
6186 do
6187 {
6188 /* fprintf is not async-signal-safe, so call write
6189 directly. */
6190 if (write (2, "sigchld_handler\n",
6191 sizeof ("sigchld_handler\n") - 1) < 0)
6192 break; /* just ignore */
6193 } while (0);
6194 }
6195
6196 if (target_is_async_p ())
6197 async_file_mark (); /* trigger a linux_wait */
6198
6199 errno = old_errno;
6200 }
6201
6202 static int
6203 linux_supports_non_stop (void)
6204 {
6205 return 1;
6206 }
6207
6208 static int
6209 linux_async (int enable)
6210 {
6211 int previous = target_is_async_p ();
6212
6213 if (debug_threads)
6214 debug_printf ("linux_async (%d), previous=%d\n",
6215 enable, previous);
6216
6217 if (previous != enable)
6218 {
6219 sigset_t mask;
6220 sigemptyset (&mask);
6221 sigaddset (&mask, SIGCHLD);
6222
6223 sigprocmask (SIG_BLOCK, &mask, NULL);
6224
6225 if (enable)
6226 {
6227 if (pipe (linux_event_pipe) == -1)
6228 {
6229 linux_event_pipe[0] = -1;
6230 linux_event_pipe[1] = -1;
6231 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6232
6233 warning ("creating event pipe failed.");
6234 return previous;
6235 }
6236
6237 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6238 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6239
6240 /* Register the event loop handler. */
6241 add_file_handler (linux_event_pipe[0],
6242 handle_target_event, NULL);
6243
6244 /* Always trigger a linux_wait. */
6245 async_file_mark ();
6246 }
6247 else
6248 {
6249 delete_file_handler (linux_event_pipe[0]);
6250
6251 close (linux_event_pipe[0]);
6252 close (linux_event_pipe[1]);
6253 linux_event_pipe[0] = -1;
6254 linux_event_pipe[1] = -1;
6255 }
6256
6257 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6258 }
6259
6260 return previous;
6261 }
6262
6263 static int
6264 linux_start_non_stop (int nonstop)
6265 {
6266 /* Register or unregister from event-loop accordingly. */
6267 linux_async (nonstop);
6268
6269 if (target_is_async_p () != (nonstop != 0))
6270 return -1;
6271
6272 return 0;
6273 }
6274
6275 static int
6276 linux_supports_multi_process (void)
6277 {
6278 return 1;
6279 }
6280
6281 /* Check if fork events are supported. */
6282
6283 static int
6284 linux_supports_fork_events (void)
6285 {
6286 return linux_supports_tracefork ();
6287 }
6288
6289 /* Check if vfork events are supported. */
6290
6291 static int
6292 linux_supports_vfork_events (void)
6293 {
6294 return linux_supports_tracefork ();
6295 }
6296
6297 /* Check if exec events are supported. */
6298
6299 static int
6300 linux_supports_exec_events (void)
6301 {
6302 return linux_supports_traceexec ();
6303 }
6304
6305 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6306 options for the specified lwp. */
6307
6308 static int
6309 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6310 void *args)
6311 {
6312 struct thread_info *thread = (struct thread_info *) entry;
6313 struct lwp_info *lwp = get_thread_lwp (thread);
6314
6315 if (!lwp->stopped)
6316 {
6317 /* Stop the lwp so we can modify its ptrace options. */
6318 lwp->must_set_ptrace_flags = 1;
6319 linux_stop_lwp (lwp);
6320 }
6321 else
6322 {
6323 /* Already stopped; go ahead and set the ptrace options. */
6324 struct process_info *proc = find_process_pid (pid_of (thread));
6325 int options = linux_low_ptrace_options (proc->attached);
6326
6327 linux_enable_event_reporting (lwpid_of (thread), options);
6328 lwp->must_set_ptrace_flags = 0;
6329 }
6330
6331 return 0;
6332 }
6333
6334 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6335 ptrace flags for all inferiors. This is in case the new GDB connection
6336 doesn't support the same set of events that the previous one did. */
6337
6338 static void
6339 linux_handle_new_gdb_connection (void)
6340 {
6341 pid_t pid;
6342
6343 /* Request that all the lwps reset their ptrace options. */
6344 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6345 }
6346
6347 static int
6348 linux_supports_disable_randomization (void)
6349 {
6350 #ifdef HAVE_PERSONALITY
6351 return 1;
6352 #else
6353 return 0;
6354 #endif
6355 }
6356
6357 static int
6358 linux_supports_agent (void)
6359 {
6360 return 1;
6361 }
6362
6363 static int
6364 linux_supports_range_stepping (void)
6365 {
6366 if (*the_low_target.supports_range_stepping == NULL)
6367 return 0;
6368
6369 return (*the_low_target.supports_range_stepping) ();
6370 }
6371
6372 /* Enumerate spufs IDs for process PID. */
6373 static int
6374 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6375 {
6376 int pos = 0;
6377 int written = 0;
6378 char path[128];
6379 DIR *dir;
6380 struct dirent *entry;
6381
6382 sprintf (path, "/proc/%ld/fd", pid);
6383 dir = opendir (path);
6384 if (!dir)
6385 return -1;
6386
6387 rewinddir (dir);
6388 while ((entry = readdir (dir)) != NULL)
6389 {
6390 struct stat st;
6391 struct statfs stfs;
6392 int fd;
6393
6394 fd = atoi (entry->d_name);
6395 if (!fd)
6396 continue;
6397
6398 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6399 if (stat (path, &st) != 0)
6400 continue;
6401 if (!S_ISDIR (st.st_mode))
6402 continue;
6403
6404 if (statfs (path, &stfs) != 0)
6405 continue;
6406 if (stfs.f_type != SPUFS_MAGIC)
6407 continue;
6408
6409 if (pos >= offset && pos + 4 <= offset + len)
6410 {
6411 *(unsigned int *)(buf + pos - offset) = fd;
6412 written += 4;
6413 }
6414 pos += 4;
6415 }
6416
6417 closedir (dir);
6418 return written;
6419 }
6420
6421 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6422 object type, using the /proc file system. */
6423 static int
6424 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6425 unsigned const char *writebuf,
6426 CORE_ADDR offset, int len)
6427 {
6428 long pid = lwpid_of (current_thread);
6429 char buf[128];
6430 int fd = 0;
6431 int ret = 0;
6432
6433 if (!writebuf && !readbuf)
6434 return -1;
6435
6436 if (!*annex)
6437 {
6438 if (!readbuf)
6439 return -1;
6440 else
6441 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6442 }
6443
6444 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6445 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6446 if (fd <= 0)
6447 return -1;
6448
6449 if (offset != 0
6450 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6451 {
6452 close (fd);
6453 return 0;
6454 }
6455
6456 if (writebuf)
6457 ret = write (fd, writebuf, (size_t) len);
6458 else
6459 ret = read (fd, readbuf, (size_t) len);
6460
6461 close (fd);
6462 return ret;
6463 }
6464
6465 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6466 struct target_loadseg
6467 {
6468 /* Core address to which the segment is mapped. */
6469 Elf32_Addr addr;
6470 /* VMA recorded in the program header. */
6471 Elf32_Addr p_vaddr;
6472 /* Size of this segment in memory. */
6473 Elf32_Word p_memsz;
6474 };
6475
6476 # if defined PT_GETDSBT
6477 struct target_loadmap
6478 {
6479 /* Protocol version number, must be zero. */
6480 Elf32_Word version;
6481 /* Pointer to the DSBT table, its size, and the DSBT index. */
6482 unsigned *dsbt_table;
6483 unsigned dsbt_size, dsbt_index;
6484 /* Number of segments in this map. */
6485 Elf32_Word nsegs;
6486 /* The actual memory map. */
6487 struct target_loadseg segs[/*nsegs*/];
6488 };
6489 # define LINUX_LOADMAP PT_GETDSBT
6490 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6491 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6492 # else
6493 struct target_loadmap
6494 {
6495 /* Protocol version number, must be zero. */
6496 Elf32_Half version;
6497 /* Number of segments in this map. */
6498 Elf32_Half nsegs;
6499 /* The actual memory map. */
6500 struct target_loadseg segs[/*nsegs*/];
6501 };
6502 # define LINUX_LOADMAP PTRACE_GETFDPIC
6503 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6504 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6505 # endif
6506
6507 static int
6508 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6509 unsigned char *myaddr, unsigned int len)
6510 {
6511 int pid = lwpid_of (current_thread);
6512 int addr = -1;
6513 struct target_loadmap *data = NULL;
6514 unsigned int actual_length, copy_length;
6515
6516 if (strcmp (annex, "exec") == 0)
6517 addr = (int) LINUX_LOADMAP_EXEC;
6518 else if (strcmp (annex, "interp") == 0)
6519 addr = (int) LINUX_LOADMAP_INTERP;
6520 else
6521 return -1;
6522
6523 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6524 return -1;
6525
6526 if (data == NULL)
6527 return -1;
6528
6529 actual_length = sizeof (struct target_loadmap)
6530 + sizeof (struct target_loadseg) * data->nsegs;
6531
6532 if (offset < 0 || offset > actual_length)
6533 return -1;
6534
6535 copy_length = actual_length - offset < len ? actual_length - offset : len;
6536 memcpy (myaddr, (char *) data + offset, copy_length);
6537 return copy_length;
6538 }
6539 #else
6540 # define linux_read_loadmap NULL
6541 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6542
6543 static void
6544 linux_process_qsupported (char **features, int count)
6545 {
6546 if (the_low_target.process_qsupported != NULL)
6547 the_low_target.process_qsupported (features, count);
6548 }
6549
6550 static int
6551 linux_supports_catch_syscall (void)
6552 {
6553 return (the_low_target.get_syscall_trapinfo != NULL
6554 && linux_supports_tracesysgood ());
6555 }
6556
6557 static int
6558 linux_get_ipa_tdesc_idx (void)
6559 {
6560 if (the_low_target.get_ipa_tdesc_idx == NULL)
6561 return 0;
6562
6563 return (*the_low_target.get_ipa_tdesc_idx) ();
6564 }
6565
6566 static int
6567 linux_supports_tracepoints (void)
6568 {
6569 if (*the_low_target.supports_tracepoints == NULL)
6570 return 0;
6571
6572 return (*the_low_target.supports_tracepoints) ();
6573 }
6574
6575 static CORE_ADDR
6576 linux_read_pc (struct regcache *regcache)
6577 {
6578 if (the_low_target.get_pc == NULL)
6579 return 0;
6580
6581 return (*the_low_target.get_pc) (regcache);
6582 }
6583
6584 static void
6585 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6586 {
6587 gdb_assert (the_low_target.set_pc != NULL);
6588
6589 (*the_low_target.set_pc) (regcache, pc);
6590 }
6591
6592 static int
6593 linux_thread_stopped (struct thread_info *thread)
6594 {
6595 return get_thread_lwp (thread)->stopped;
6596 }
6597
6598 /* This exposes stop-all-threads functionality to other modules. */
6599
6600 static void
6601 linux_pause_all (int freeze)
6602 {
6603 stop_all_lwps (freeze, NULL);
6604 }
6605
6606 /* This exposes unstop-all-threads functionality to other gdbserver
6607 modules. */
6608
6609 static void
6610 linux_unpause_all (int unfreeze)
6611 {
6612 unstop_all_lwps (unfreeze, NULL);
6613 }
6614
6615 static int
6616 linux_prepare_to_access_memory (void)
6617 {
6618 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6619 running LWP. */
6620 if (non_stop)
6621 linux_pause_all (1);
6622 return 0;
6623 }
6624
6625 static void
6626 linux_done_accessing_memory (void)
6627 {
6628 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6629 running LWP. */
6630 if (non_stop)
6631 linux_unpause_all (1);
6632 }
6633
6634 static int
6635 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6636 CORE_ADDR collector,
6637 CORE_ADDR lockaddr,
6638 ULONGEST orig_size,
6639 CORE_ADDR *jump_entry,
6640 CORE_ADDR *trampoline,
6641 ULONGEST *trampoline_size,
6642 unsigned char *jjump_pad_insn,
6643 ULONGEST *jjump_pad_insn_size,
6644 CORE_ADDR *adjusted_insn_addr,
6645 CORE_ADDR *adjusted_insn_addr_end,
6646 char *err)
6647 {
6648 return (*the_low_target.install_fast_tracepoint_jump_pad)
6649 (tpoint, tpaddr, collector, lockaddr, orig_size,
6650 jump_entry, trampoline, trampoline_size,
6651 jjump_pad_insn, jjump_pad_insn_size,
6652 adjusted_insn_addr, adjusted_insn_addr_end,
6653 err);
6654 }
6655
6656 static struct emit_ops *
6657 linux_emit_ops (void)
6658 {
6659 if (the_low_target.emit_ops != NULL)
6660 return (*the_low_target.emit_ops) ();
6661 else
6662 return NULL;
6663 }
6664
6665 static int
6666 linux_get_min_fast_tracepoint_insn_len (void)
6667 {
6668 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6669 }
6670
6671 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6672
6673 static int
6674 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6675 CORE_ADDR *phdr_memaddr, int *num_phdr)
6676 {
6677 char filename[PATH_MAX];
6678 int fd;
6679 const int auxv_size = is_elf64
6680 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6681 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6682
6683 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6684
6685 fd = open (filename, O_RDONLY);
6686 if (fd < 0)
6687 return 1;
6688
6689 *phdr_memaddr = 0;
6690 *num_phdr = 0;
6691 while (read (fd, buf, auxv_size) == auxv_size
6692 && (*phdr_memaddr == 0 || *num_phdr == 0))
6693 {
6694 if (is_elf64)
6695 {
6696 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6697
6698 switch (aux->a_type)
6699 {
6700 case AT_PHDR:
6701 *phdr_memaddr = aux->a_un.a_val;
6702 break;
6703 case AT_PHNUM:
6704 *num_phdr = aux->a_un.a_val;
6705 break;
6706 }
6707 }
6708 else
6709 {
6710 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6711
6712 switch (aux->a_type)
6713 {
6714 case AT_PHDR:
6715 *phdr_memaddr = aux->a_un.a_val;
6716 break;
6717 case AT_PHNUM:
6718 *num_phdr = aux->a_un.a_val;
6719 break;
6720 }
6721 }
6722 }
6723
6724 close (fd);
6725
6726 if (*phdr_memaddr == 0 || *num_phdr == 0)
6727 {
6728 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6729 "phdr_memaddr = %ld, phdr_num = %d",
6730 (long) *phdr_memaddr, *num_phdr);
6731 return 2;
6732 }
6733
6734 return 0;
6735 }
6736
6737 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6738
6739 static CORE_ADDR
6740 get_dynamic (const int pid, const int is_elf64)
6741 {
6742 CORE_ADDR phdr_memaddr, relocation;
6743 int num_phdr, i;
6744 unsigned char *phdr_buf;
6745 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6746
6747 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6748 return 0;
6749
6750 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6751 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6752
6753 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6754 return 0;
6755
6756 /* Compute relocation: it is expected to be 0 for "regular" executables,
6757 non-zero for PIE ones. */
6758 relocation = -1;
6759 for (i = 0; relocation == -1 && i < num_phdr; i++)
6760 if (is_elf64)
6761 {
6762 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6763
6764 if (p->p_type == PT_PHDR)
6765 relocation = phdr_memaddr - p->p_vaddr;
6766 }
6767 else
6768 {
6769 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6770
6771 if (p->p_type == PT_PHDR)
6772 relocation = phdr_memaddr - p->p_vaddr;
6773 }
6774
6775 if (relocation == -1)
6776 {
6777 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6778 any real world executables, including PIE executables, have always
6779 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6780 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6781 or present DT_DEBUG anyway (fpc binaries are statically linked).
6782
6783 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6784
6785 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6786
6787 return 0;
6788 }
6789
6790 for (i = 0; i < num_phdr; i++)
6791 {
6792 if (is_elf64)
6793 {
6794 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6795
6796 if (p->p_type == PT_DYNAMIC)
6797 return p->p_vaddr + relocation;
6798 }
6799 else
6800 {
6801 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6802
6803 if (p->p_type == PT_DYNAMIC)
6804 return p->p_vaddr + relocation;
6805 }
6806 }
6807
6808 return 0;
6809 }
6810
6811 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6812 can be 0 if the inferior does not yet have the library list initialized.
6813 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6814 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6815
6816 static CORE_ADDR
6817 get_r_debug (const int pid, const int is_elf64)
6818 {
6819 CORE_ADDR dynamic_memaddr;
6820 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6821 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6822 CORE_ADDR map = -1;
6823
6824 dynamic_memaddr = get_dynamic (pid, is_elf64);
6825 if (dynamic_memaddr == 0)
6826 return map;
6827
6828 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6829 {
6830 if (is_elf64)
6831 {
6832 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6833 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6834 union
6835 {
6836 Elf64_Xword map;
6837 unsigned char buf[sizeof (Elf64_Xword)];
6838 }
6839 rld_map;
6840 #endif
6841 #ifdef DT_MIPS_RLD_MAP
6842 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6843 {
6844 if (linux_read_memory (dyn->d_un.d_val,
6845 rld_map.buf, sizeof (rld_map.buf)) == 0)
6846 return rld_map.map;
6847 else
6848 break;
6849 }
6850 #endif /* DT_MIPS_RLD_MAP */
6851 #ifdef DT_MIPS_RLD_MAP_REL
6852 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6853 {
6854 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6855 rld_map.buf, sizeof (rld_map.buf)) == 0)
6856 return rld_map.map;
6857 else
6858 break;
6859 }
6860 #endif /* DT_MIPS_RLD_MAP_REL */
6861
6862 if (dyn->d_tag == DT_DEBUG && map == -1)
6863 map = dyn->d_un.d_val;
6864
6865 if (dyn->d_tag == DT_NULL)
6866 break;
6867 }
6868 else
6869 {
6870 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6871 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6872 union
6873 {
6874 Elf32_Word map;
6875 unsigned char buf[sizeof (Elf32_Word)];
6876 }
6877 rld_map;
6878 #endif
6879 #ifdef DT_MIPS_RLD_MAP
6880 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6881 {
6882 if (linux_read_memory (dyn->d_un.d_val,
6883 rld_map.buf, sizeof (rld_map.buf)) == 0)
6884 return rld_map.map;
6885 else
6886 break;
6887 }
6888 #endif /* DT_MIPS_RLD_MAP */
6889 #ifdef DT_MIPS_RLD_MAP_REL
6890 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6891 {
6892 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6893 rld_map.buf, sizeof (rld_map.buf)) == 0)
6894 return rld_map.map;
6895 else
6896 break;
6897 }
6898 #endif /* DT_MIPS_RLD_MAP_REL */
6899
6900 if (dyn->d_tag == DT_DEBUG && map == -1)
6901 map = dyn->d_un.d_val;
6902
6903 if (dyn->d_tag == DT_NULL)
6904 break;
6905 }
6906
6907 dynamic_memaddr += dyn_size;
6908 }
6909
6910 return map;
6911 }
6912
6913 /* Read one pointer from MEMADDR in the inferior. */
6914
6915 static int
6916 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6917 {
6918 int ret;
6919
6920 /* Go through a union so this works on either big or little endian
6921 hosts, when the inferior's pointer size is smaller than the size
6922 of CORE_ADDR. It is assumed the inferior's endianness is the
6923 same of the superior's. */
6924 union
6925 {
6926 CORE_ADDR core_addr;
6927 unsigned int ui;
6928 unsigned char uc;
6929 } addr;
6930
6931 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6932 if (ret == 0)
6933 {
6934 if (ptr_size == sizeof (CORE_ADDR))
6935 *ptr = addr.core_addr;
6936 else if (ptr_size == sizeof (unsigned int))
6937 *ptr = addr.ui;
6938 else
6939 gdb_assert_not_reached ("unhandled pointer size");
6940 }
6941 return ret;
6942 }
6943
6944 struct link_map_offsets
6945 {
6946 /* Offset and size of r_debug.r_version. */
6947 int r_version_offset;
6948
6949 /* Offset and size of r_debug.r_map. */
6950 int r_map_offset;
6951
6952 /* Offset to l_addr field in struct link_map. */
6953 int l_addr_offset;
6954
6955 /* Offset to l_name field in struct link_map. */
6956 int l_name_offset;
6957
6958 /* Offset to l_ld field in struct link_map. */
6959 int l_ld_offset;
6960
6961 /* Offset to l_next field in struct link_map. */
6962 int l_next_offset;
6963
6964 /* Offset to l_prev field in struct link_map. */
6965 int l_prev_offset;
6966 };
6967
6968 /* Construct qXfer:libraries-svr4:read reply. */
6969
6970 static int
6971 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6972 unsigned const char *writebuf,
6973 CORE_ADDR offset, int len)
6974 {
6975 char *document;
6976 unsigned document_len;
6977 struct process_info_private *const priv = current_process ()->priv;
6978 char filename[PATH_MAX];
6979 int pid, is_elf64;
6980
6981 static const struct link_map_offsets lmo_32bit_offsets =
6982 {
6983 0, /* r_version offset. */
6984 4, /* r_debug.r_map offset. */
6985 0, /* l_addr offset in link_map. */
6986 4, /* l_name offset in link_map. */
6987 8, /* l_ld offset in link_map. */
6988 12, /* l_next offset in link_map. */
6989 16 /* l_prev offset in link_map. */
6990 };
6991
6992 static const struct link_map_offsets lmo_64bit_offsets =
6993 {
6994 0, /* r_version offset. */
6995 8, /* r_debug.r_map offset. */
6996 0, /* l_addr offset in link_map. */
6997 8, /* l_name offset in link_map. */
6998 16, /* l_ld offset in link_map. */
6999 24, /* l_next offset in link_map. */
7000 32 /* l_prev offset in link_map. */
7001 };
7002 const struct link_map_offsets *lmo;
7003 unsigned int machine;
7004 int ptr_size;
7005 CORE_ADDR lm_addr = 0, lm_prev = 0;
7006 int allocated = 1024;
7007 char *p;
7008 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
7009 int header_done = 0;
7010
7011 if (writebuf != NULL)
7012 return -2;
7013 if (readbuf == NULL)
7014 return -1;
7015
7016 pid = lwpid_of (current_thread);
7017 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7018 is_elf64 = elf_64_file_p (filename, &machine);
7019 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7020 ptr_size = is_elf64 ? 8 : 4;
7021
7022 while (annex[0] != '\0')
7023 {
7024 const char *sep;
7025 CORE_ADDR *addrp;
7026 int len;
7027
7028 sep = strchr (annex, '=');
7029 if (sep == NULL)
7030 break;
7031
7032 len = sep - annex;
7033 if (len == 5 && startswith (annex, "start"))
7034 addrp = &lm_addr;
7035 else if (len == 4 && startswith (annex, "prev"))
7036 addrp = &lm_prev;
7037 else
7038 {
7039 annex = strchr (sep, ';');
7040 if (annex == NULL)
7041 break;
7042 annex++;
7043 continue;
7044 }
7045
7046 annex = decode_address_to_semicolon (addrp, sep + 1);
7047 }
7048
7049 if (lm_addr == 0)
7050 {
7051 int r_version = 0;
7052
7053 if (priv->r_debug == 0)
7054 priv->r_debug = get_r_debug (pid, is_elf64);
7055
7056 /* We failed to find DT_DEBUG. Such situation will not change
7057 for this inferior - do not retry it. Report it to GDB as
7058 E01, see for the reasons at the GDB solib-svr4.c side. */
7059 if (priv->r_debug == (CORE_ADDR) -1)
7060 return -1;
7061
7062 if (priv->r_debug != 0)
7063 {
7064 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7065 (unsigned char *) &r_version,
7066 sizeof (r_version)) != 0
7067 || r_version != 1)
7068 {
7069 warning ("unexpected r_debug version %d", r_version);
7070 }
7071 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7072 &lm_addr, ptr_size) != 0)
7073 {
7074 warning ("unable to read r_map from 0x%lx",
7075 (long) priv->r_debug + lmo->r_map_offset);
7076 }
7077 }
7078 }
7079
7080 document = (char *) xmalloc (allocated);
7081 strcpy (document, "<library-list-svr4 version=\"1.0\"");
7082 p = document + strlen (document);
7083
7084 while (lm_addr
7085 && read_one_ptr (lm_addr + lmo->l_name_offset,
7086 &l_name, ptr_size) == 0
7087 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7088 &l_addr, ptr_size) == 0
7089 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7090 &l_ld, ptr_size) == 0
7091 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7092 &l_prev, ptr_size) == 0
7093 && read_one_ptr (lm_addr + lmo->l_next_offset,
7094 &l_next, ptr_size) == 0)
7095 {
7096 unsigned char libname[PATH_MAX];
7097
7098 if (lm_prev != l_prev)
7099 {
7100 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7101 (long) lm_prev, (long) l_prev);
7102 break;
7103 }
7104
7105 /* Ignore the first entry even if it has valid name as the first entry
7106 corresponds to the main executable. The first entry should not be
7107 skipped if the dynamic loader was loaded late by a static executable
7108 (see solib-svr4.c parameter ignore_first). But in such case the main
7109 executable does not have PT_DYNAMIC present and this function already
7110 exited above due to failed get_r_debug. */
7111 if (lm_prev == 0)
7112 {
7113 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7114 p = p + strlen (p);
7115 }
7116 else
7117 {
7118 /* Not checking for error because reading may stop before
7119 we've got PATH_MAX worth of characters. */
7120 libname[0] = '\0';
7121 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7122 libname[sizeof (libname) - 1] = '\0';
7123 if (libname[0] != '\0')
7124 {
7125 /* 6x the size for xml_escape_text below. */
7126 size_t len = 6 * strlen ((char *) libname);
7127 char *name;
7128
7129 if (!header_done)
7130 {
7131 /* Terminate `<library-list-svr4'. */
7132 *p++ = '>';
7133 header_done = 1;
7134 }
7135
7136 while (allocated < p - document + len + 200)
7137 {
7138 /* Expand to guarantee sufficient storage. */
7139 uintptr_t document_len = p - document;
7140
7141 document = (char *) xrealloc (document, 2 * allocated);
7142 allocated *= 2;
7143 p = document + document_len;
7144 }
7145
7146 name = xml_escape_text ((char *) libname);
7147 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
7148 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7149 name, (unsigned long) lm_addr,
7150 (unsigned long) l_addr, (unsigned long) l_ld);
7151 free (name);
7152 }
7153 }
7154
7155 lm_prev = lm_addr;
7156 lm_addr = l_next;
7157 }
7158
7159 if (!header_done)
7160 {
7161 /* Empty list; terminate `<library-list-svr4'. */
7162 strcpy (p, "/>");
7163 }
7164 else
7165 strcpy (p, "</library-list-svr4>");
7166
7167 document_len = strlen (document);
7168 if (offset < document_len)
7169 document_len -= offset;
7170 else
7171 document_len = 0;
7172 if (len > document_len)
7173 len = document_len;
7174
7175 memcpy (readbuf, document + offset, len);
7176 xfree (document);
7177
7178 return len;
7179 }
7180
7181 #ifdef HAVE_LINUX_BTRACE
7182
7183 /* See to_disable_btrace target method. */
7184
7185 static int
7186 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7187 {
7188 enum btrace_error err;
7189
7190 err = linux_disable_btrace (tinfo);
7191 return (err == BTRACE_ERR_NONE ? 0 : -1);
7192 }
7193
7194 /* Encode an Intel Processor Trace configuration. */
7195
7196 static void
7197 linux_low_encode_pt_config (struct buffer *buffer,
7198 const struct btrace_data_pt_config *config)
7199 {
7200 buffer_grow_str (buffer, "<pt-config>\n");
7201
7202 switch (config->cpu.vendor)
7203 {
7204 case CV_INTEL:
7205 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7206 "model=\"%u\" stepping=\"%u\"/>\n",
7207 config->cpu.family, config->cpu.model,
7208 config->cpu.stepping);
7209 break;
7210
7211 default:
7212 break;
7213 }
7214
7215 buffer_grow_str (buffer, "</pt-config>\n");
7216 }
7217
7218 /* Encode a raw buffer. */
7219
7220 static void
7221 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7222 unsigned int size)
7223 {
7224 if (size == 0)
7225 return;
7226
7227 /* We use hex encoding - see common/rsp-low.h. */
7228 buffer_grow_str (buffer, "<raw>\n");
7229
7230 while (size-- > 0)
7231 {
7232 char elem[2];
7233
7234 elem[0] = tohex ((*data >> 4) & 0xf);
7235 elem[1] = tohex (*data++ & 0xf);
7236
7237 buffer_grow (buffer, elem, 2);
7238 }
7239
7240 buffer_grow_str (buffer, "</raw>\n");
7241 }
7242
7243 /* See to_read_btrace target method. */
7244
7245 static int
7246 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7247 enum btrace_read_type type)
7248 {
7249 struct btrace_data btrace;
7250 struct btrace_block *block;
7251 enum btrace_error err;
7252 int i;
7253
7254 btrace_data_init (&btrace);
7255
7256 err = linux_read_btrace (&btrace, tinfo, type);
7257 if (err != BTRACE_ERR_NONE)
7258 {
7259 if (err == BTRACE_ERR_OVERFLOW)
7260 buffer_grow_str0 (buffer, "E.Overflow.");
7261 else
7262 buffer_grow_str0 (buffer, "E.Generic Error.");
7263
7264 goto err;
7265 }
7266
7267 switch (btrace.format)
7268 {
7269 case BTRACE_FORMAT_NONE:
7270 buffer_grow_str0 (buffer, "E.No Trace.");
7271 goto err;
7272
7273 case BTRACE_FORMAT_BTS:
7274 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7275 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7276
7277 for (i = 0;
7278 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7279 i++)
7280 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7281 paddress (block->begin), paddress (block->end));
7282
7283 buffer_grow_str0 (buffer, "</btrace>\n");
7284 break;
7285
7286 case BTRACE_FORMAT_PT:
7287 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7288 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7289 buffer_grow_str (buffer, "<pt>\n");
7290
7291 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7292
7293 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7294 btrace.variant.pt.size);
7295
7296 buffer_grow_str (buffer, "</pt>\n");
7297 buffer_grow_str0 (buffer, "</btrace>\n");
7298 break;
7299
7300 default:
7301 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7302 goto err;
7303 }
7304
7305 btrace_data_fini (&btrace);
7306 return 0;
7307
7308 err:
7309 btrace_data_fini (&btrace);
7310 return -1;
7311 }
7312
7313 /* See to_btrace_conf target method. */
7314
7315 static int
7316 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7317 struct buffer *buffer)
7318 {
7319 const struct btrace_config *conf;
7320
7321 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7322 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7323
7324 conf = linux_btrace_conf (tinfo);
7325 if (conf != NULL)
7326 {
7327 switch (conf->format)
7328 {
7329 case BTRACE_FORMAT_NONE:
7330 break;
7331
7332 case BTRACE_FORMAT_BTS:
7333 buffer_xml_printf (buffer, "<bts");
7334 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7335 buffer_xml_printf (buffer, " />\n");
7336 break;
7337
7338 case BTRACE_FORMAT_PT:
7339 buffer_xml_printf (buffer, "<pt");
7340 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7341 buffer_xml_printf (buffer, "/>\n");
7342 break;
7343 }
7344 }
7345
7346 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7347 return 0;
7348 }
7349 #endif /* HAVE_LINUX_BTRACE */
7350
7351 /* See nat/linux-nat.h. */
7352
7353 ptid_t
7354 current_lwp_ptid (void)
7355 {
7356 return ptid_of (current_thread);
7357 }
7358
7359 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7360
7361 static int
7362 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7363 {
7364 if (the_low_target.breakpoint_kind_from_pc != NULL)
7365 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7366 else
7367 return default_breakpoint_kind_from_pc (pcptr);
7368 }
7369
7370 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7371
7372 static const gdb_byte *
7373 linux_sw_breakpoint_from_kind (int kind, int *size)
7374 {
7375 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7376
7377 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7378 }
7379
7380 /* Implementation of the target_ops method
7381 "breakpoint_kind_from_current_state". */
7382
7383 static int
7384 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7385 {
7386 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7387 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7388 else
7389 return linux_breakpoint_kind_from_pc (pcptr);
7390 }
7391
7392 /* Default implementation of linux_target_ops method "set_pc" for
7393 32-bit pc register which is literally named "pc". */
7394
7395 void
7396 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7397 {
7398 uint32_t newpc = pc;
7399
7400 supply_register_by_name (regcache, "pc", &newpc);
7401 }
7402
7403 /* Default implementation of linux_target_ops method "get_pc" for
7404 32-bit pc register which is literally named "pc". */
7405
7406 CORE_ADDR
7407 linux_get_pc_32bit (struct regcache *regcache)
7408 {
7409 uint32_t pc;
7410
7411 collect_register_by_name (regcache, "pc", &pc);
7412 if (debug_threads)
7413 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7414 return pc;
7415 }
7416
7417 /* Default implementation of linux_target_ops method "set_pc" for
7418 64-bit pc register which is literally named "pc". */
7419
7420 void
7421 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7422 {
7423 uint64_t newpc = pc;
7424
7425 supply_register_by_name (regcache, "pc", &newpc);
7426 }
7427
7428 /* Default implementation of linux_target_ops method "get_pc" for
7429 64-bit pc register which is literally named "pc". */
7430
7431 CORE_ADDR
7432 linux_get_pc_64bit (struct regcache *regcache)
7433 {
7434 uint64_t pc;
7435
7436 collect_register_by_name (regcache, "pc", &pc);
7437 if (debug_threads)
7438 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7439 return pc;
7440 }
7441
7442
7443 static struct target_ops linux_target_ops = {
7444 linux_create_inferior,
7445 linux_post_create_inferior,
7446 linux_attach,
7447 linux_kill,
7448 linux_detach,
7449 linux_mourn,
7450 linux_join,
7451 linux_thread_alive,
7452 linux_resume,
7453 linux_wait,
7454 linux_fetch_registers,
7455 linux_store_registers,
7456 linux_prepare_to_access_memory,
7457 linux_done_accessing_memory,
7458 linux_read_memory,
7459 linux_write_memory,
7460 linux_look_up_symbols,
7461 linux_request_interrupt,
7462 linux_read_auxv,
7463 linux_supports_z_point_type,
7464 linux_insert_point,
7465 linux_remove_point,
7466 linux_stopped_by_sw_breakpoint,
7467 linux_supports_stopped_by_sw_breakpoint,
7468 linux_stopped_by_hw_breakpoint,
7469 linux_supports_stopped_by_hw_breakpoint,
7470 linux_supports_hardware_single_step,
7471 linux_stopped_by_watchpoint,
7472 linux_stopped_data_address,
7473 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7474 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7475 && defined(PT_TEXT_END_ADDR)
7476 linux_read_offsets,
7477 #else
7478 NULL,
7479 #endif
7480 #ifdef USE_THREAD_DB
7481 thread_db_get_tls_address,
7482 #else
7483 NULL,
7484 #endif
7485 linux_qxfer_spu,
7486 hostio_last_error_from_errno,
7487 linux_qxfer_osdata,
7488 linux_xfer_siginfo,
7489 linux_supports_non_stop,
7490 linux_async,
7491 linux_start_non_stop,
7492 linux_supports_multi_process,
7493 linux_supports_fork_events,
7494 linux_supports_vfork_events,
7495 linux_supports_exec_events,
7496 linux_handle_new_gdb_connection,
7497 #ifdef USE_THREAD_DB
7498 thread_db_handle_monitor_command,
7499 #else
7500 NULL,
7501 #endif
7502 linux_common_core_of_thread,
7503 linux_read_loadmap,
7504 linux_process_qsupported,
7505 linux_supports_tracepoints,
7506 linux_read_pc,
7507 linux_write_pc,
7508 linux_thread_stopped,
7509 NULL,
7510 linux_pause_all,
7511 linux_unpause_all,
7512 linux_stabilize_threads,
7513 linux_install_fast_tracepoint_jump_pad,
7514 linux_emit_ops,
7515 linux_supports_disable_randomization,
7516 linux_get_min_fast_tracepoint_insn_len,
7517 linux_qxfer_libraries_svr4,
7518 linux_supports_agent,
7519 #ifdef HAVE_LINUX_BTRACE
7520 linux_supports_btrace,
7521 linux_enable_btrace,
7522 linux_low_disable_btrace,
7523 linux_low_read_btrace,
7524 linux_low_btrace_conf,
7525 #else
7526 NULL,
7527 NULL,
7528 NULL,
7529 NULL,
7530 NULL,
7531 #endif
7532 linux_supports_range_stepping,
7533 linux_proc_pid_to_exec_file,
7534 linux_mntns_open_cloexec,
7535 linux_mntns_unlink,
7536 linux_mntns_readlink,
7537 linux_breakpoint_kind_from_pc,
7538 linux_sw_breakpoint_from_kind,
7539 linux_proc_tid_get_name,
7540 linux_breakpoint_kind_from_current_state,
7541 linux_supports_software_single_step,
7542 linux_supports_catch_syscall,
7543 linux_get_ipa_tdesc_idx,
7544 };
7545
7546 #ifdef HAVE_LINUX_REGSETS
7547 void
7548 initialize_regsets_info (struct regsets_info *info)
7549 {
7550 for (info->num_regsets = 0;
7551 info->regsets[info->num_regsets].size >= 0;
7552 info->num_regsets++)
7553 ;
7554 }
7555 #endif
7556
7557 void
7558 initialize_low (void)
7559 {
7560 struct sigaction sigchld_action;
7561
7562 memset (&sigchld_action, 0, sizeof (sigchld_action));
7563 set_target_ops (&linux_target_ops);
7564
7565 linux_ptrace_init_warnings ();
7566
7567 sigchld_action.sa_handler = sigchld_handler;
7568 sigemptyset (&sigchld_action.sa_mask);
7569 sigchld_action.sa_flags = SA_RESTART;
7570 sigaction (SIGCHLD, &sigchld_action, NULL);
7571
7572 initialize_low_arch ();
7573
7574 linux_check_ptrace_features ();
7575 }
This page took 0.225387 seconds and 4 git commands to generate.