Fix PR gdb/18653: gdb disturbs inferior's inherited signal dispositions
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2016 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25 #include "signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #ifndef ELFMAG0
51 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55 #include <elf.h>
56 #endif
57 #include "nat/linux-namespaces.h"
58
59 #ifndef SPUFS_MAGIC
60 #define SPUFS_MAGIC 0x23c9b64e
61 #endif
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 /* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77 #if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80 #if defined(__mcoldfire__)
81 /* These are still undefined in 3.10 kernels. */
82 #define PT_TEXT_ADDR 49*4
83 #define PT_DATA_ADDR 50*4
84 #define PT_TEXT_END_ADDR 51*4
85 /* BFIN already defines these since at least 2.6.32 kernels. */
86 #elif defined(BFIN)
87 #define PT_TEXT_ADDR 220
88 #define PT_TEXT_END_ADDR 224
89 #define PT_DATA_ADDR 228
90 /* These are still undefined in 3.10 kernels. */
91 #elif defined(__TMS320C6X__)
92 #define PT_TEXT_ADDR (0x10000*4)
93 #define PT_DATA_ADDR (0x10004*4)
94 #define PT_TEXT_END_ADDR (0x10008*4)
95 #endif
96 #endif
97
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "btrace-common.h"
101 #endif
102
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
105 typedef struct
106 {
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115 } Elf32_auxv_t;
116 #endif
117
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
120 typedef struct
121 {
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130 } Elf64_auxv_t;
131 #endif
132
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset = -1;
135
136 /* LWP accessors. */
137
138 /* See nat/linux-nat.h. */
139
140 ptid_t
141 ptid_of_lwp (struct lwp_info *lwp)
142 {
143 return ptid_of (get_lwp_thread (lwp));
144 }
145
146 /* See nat/linux-nat.h. */
147
148 void
149 lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151 {
152 lwp->arch_private = info;
153 }
154
155 /* See nat/linux-nat.h. */
156
157 struct arch_lwp_info *
158 lwp_arch_private_info (struct lwp_info *lwp)
159 {
160 return lwp->arch_private;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 int
166 lwp_is_stopped (struct lwp_info *lwp)
167 {
168 return lwp->stopped;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info *lwp)
175 {
176 return lwp->stop_reason;
177 }
178
179 /* A list of all unknown processes which receive stop signals. Some
180 other process will presumably claim each of these as forked
181 children momentarily. */
182
183 struct simple_pid_list
184 {
185 /* The process ID. */
186 int pid;
187
188 /* The status as reported by waitpid. */
189 int status;
190
191 /* Next in chain. */
192 struct simple_pid_list *next;
193 };
194 struct simple_pid_list *stopped_pids;
195
196 /* Trivial list manipulation functions to keep track of a list of new
197 stopped processes. */
198
199 static void
200 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
201 {
202 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
203
204 new_pid->pid = pid;
205 new_pid->status = status;
206 new_pid->next = *listp;
207 *listp = new_pid;
208 }
209
210 static int
211 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
212 {
213 struct simple_pid_list **p;
214
215 for (p = listp; *p != NULL; p = &(*p)->next)
216 if ((*p)->pid == pid)
217 {
218 struct simple_pid_list *next = (*p)->next;
219
220 *statusp = (*p)->status;
221 xfree (*p);
222 *p = next;
223 return 1;
224 }
225 return 0;
226 }
227
228 enum stopping_threads_kind
229 {
230 /* Not stopping threads presently. */
231 NOT_STOPPING_THREADS,
232
233 /* Stopping threads. */
234 STOPPING_THREADS,
235
236 /* Stopping and suspending threads. */
237 STOPPING_AND_SUSPENDING_THREADS
238 };
239
240 /* This is set while stop_all_lwps is in effect. */
241 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
242
243 /* FIXME make into a target method? */
244 int using_threads = 1;
245
246 /* True if we're presently stabilizing threads (moving them out of
247 jump pads). */
248 static int stabilizing_threads;
249
250 static void linux_resume_one_lwp (struct lwp_info *lwp,
251 int step, int signal, siginfo_t *info);
252 static void linux_resume (struct thread_resume *resume_info, size_t n);
253 static void stop_all_lwps (int suspend, struct lwp_info *except);
254 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
255 static void unsuspend_all_lwps (struct lwp_info *except);
256 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
257 int *wstat, int options);
258 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
259 static struct lwp_info *add_lwp (ptid_t ptid);
260 static void linux_mourn (struct process_info *process);
261 static int linux_stopped_by_watchpoint (void);
262 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
263 static int lwp_is_marked_dead (struct lwp_info *lwp);
264 static void proceed_all_lwps (void);
265 static int finish_step_over (struct lwp_info *lwp);
266 static int kill_lwp (unsigned long lwpid, int signo);
267 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
268 static void complete_ongoing_step_over (void);
269 static int linux_low_ptrace_options (int attached);
270 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
271 static int proceed_one_lwp (struct inferior_list_entry *entry, void *except);
272
273 /* When the event-loop is doing a step-over, this points at the thread
274 being stepped. */
275 ptid_t step_over_bkpt;
276
277 /* True if the low target can hardware single-step. */
278
279 static int
280 can_hardware_single_step (void)
281 {
282 if (the_low_target.supports_hardware_single_step != NULL)
283 return the_low_target.supports_hardware_single_step ();
284 else
285 return 0;
286 }
287
288 /* True if the low target can software single-step. Such targets
289 implement the GET_NEXT_PCS callback. */
290
291 static int
292 can_software_single_step (void)
293 {
294 return (the_low_target.get_next_pcs != NULL);
295 }
296
297 /* True if the low target supports memory breakpoints. If so, we'll
298 have a GET_PC implementation. */
299
300 static int
301 supports_breakpoints (void)
302 {
303 return (the_low_target.get_pc != NULL);
304 }
305
306 /* Returns true if this target can support fast tracepoints. This
307 does not mean that the in-process agent has been loaded in the
308 inferior. */
309
310 static int
311 supports_fast_tracepoints (void)
312 {
313 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
314 }
315
316 /* True if LWP is stopped in its stepping range. */
317
318 static int
319 lwp_in_step_range (struct lwp_info *lwp)
320 {
321 CORE_ADDR pc = lwp->stop_pc;
322
323 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
324 }
325
326 struct pending_signals
327 {
328 int signal;
329 siginfo_t info;
330 struct pending_signals *prev;
331 };
332
333 /* The read/write ends of the pipe registered as waitable file in the
334 event loop. */
335 static int linux_event_pipe[2] = { -1, -1 };
336
337 /* True if we're currently in async mode. */
338 #define target_is_async_p() (linux_event_pipe[0] != -1)
339
340 static void send_sigstop (struct lwp_info *lwp);
341 static void wait_for_sigstop (void);
342
343 /* Return non-zero if HEADER is a 64-bit ELF file. */
344
345 static int
346 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
347 {
348 if (header->e_ident[EI_MAG0] == ELFMAG0
349 && header->e_ident[EI_MAG1] == ELFMAG1
350 && header->e_ident[EI_MAG2] == ELFMAG2
351 && header->e_ident[EI_MAG3] == ELFMAG3)
352 {
353 *machine = header->e_machine;
354 return header->e_ident[EI_CLASS] == ELFCLASS64;
355
356 }
357 *machine = EM_NONE;
358 return -1;
359 }
360
361 /* Return non-zero if FILE is a 64-bit ELF file,
362 zero if the file is not a 64-bit ELF file,
363 and -1 if the file is not accessible or doesn't exist. */
364
365 static int
366 elf_64_file_p (const char *file, unsigned int *machine)
367 {
368 Elf64_Ehdr header;
369 int fd;
370
371 fd = open (file, O_RDONLY);
372 if (fd < 0)
373 return -1;
374
375 if (read (fd, &header, sizeof (header)) != sizeof (header))
376 {
377 close (fd);
378 return 0;
379 }
380 close (fd);
381
382 return elf_64_header_p (&header, machine);
383 }
384
385 /* Accepts an integer PID; Returns true if the executable PID is
386 running is a 64-bit ELF file.. */
387
388 int
389 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
390 {
391 char file[PATH_MAX];
392
393 sprintf (file, "/proc/%d/exe", pid);
394 return elf_64_file_p (file, machine);
395 }
396
397 static void
398 delete_lwp (struct lwp_info *lwp)
399 {
400 struct thread_info *thr = get_lwp_thread (lwp);
401
402 if (debug_threads)
403 debug_printf ("deleting %ld\n", lwpid_of (thr));
404
405 remove_thread (thr);
406 free (lwp->arch_private);
407 free (lwp);
408 }
409
410 /* Add a process to the common process list, and set its private
411 data. */
412
413 static struct process_info *
414 linux_add_process (int pid, int attached)
415 {
416 struct process_info *proc;
417
418 proc = add_process (pid, attached);
419 proc->priv = XCNEW (struct process_info_private);
420
421 if (the_low_target.new_process != NULL)
422 proc->priv->arch_private = the_low_target.new_process ();
423
424 return proc;
425 }
426
427 static CORE_ADDR get_pc (struct lwp_info *lwp);
428
429 /* Call the target arch_setup function on the current thread. */
430
431 static void
432 linux_arch_setup (void)
433 {
434 the_low_target.arch_setup ();
435 }
436
437 /* Call the target arch_setup function on THREAD. */
438
439 static void
440 linux_arch_setup_thread (struct thread_info *thread)
441 {
442 struct thread_info *saved_thread;
443
444 saved_thread = current_thread;
445 current_thread = thread;
446
447 linux_arch_setup ();
448
449 current_thread = saved_thread;
450 }
451
452 /* Handle a GNU/Linux extended wait response. If we see a clone,
453 fork, or vfork event, we need to add the new LWP to our list
454 (and return 0 so as not to report the trap to higher layers).
455 If we see an exec event, we will modify ORIG_EVENT_LWP to point
456 to a new LWP representing the new program. */
457
458 static int
459 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
460 {
461 struct lwp_info *event_lwp = *orig_event_lwp;
462 int event = linux_ptrace_get_extended_event (wstat);
463 struct thread_info *event_thr = get_lwp_thread (event_lwp);
464 struct lwp_info *new_lwp;
465
466 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
467
468 /* All extended events we currently use are mid-syscall. Only
469 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
470 you have to be using PTRACE_SEIZE to get that. */
471 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
472
473 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
474 || (event == PTRACE_EVENT_CLONE))
475 {
476 ptid_t ptid;
477 unsigned long new_pid;
478 int ret, status;
479
480 /* Get the pid of the new lwp. */
481 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
482 &new_pid);
483
484 /* If we haven't already seen the new PID stop, wait for it now. */
485 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
486 {
487 /* The new child has a pending SIGSTOP. We can't affect it until it
488 hits the SIGSTOP, but we're already attached. */
489
490 ret = my_waitpid (new_pid, &status, __WALL);
491
492 if (ret == -1)
493 perror_with_name ("waiting for new child");
494 else if (ret != new_pid)
495 warning ("wait returned unexpected PID %d", ret);
496 else if (!WIFSTOPPED (status))
497 warning ("wait returned unexpected status 0x%x", status);
498 }
499
500 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
501 {
502 struct process_info *parent_proc;
503 struct process_info *child_proc;
504 struct lwp_info *child_lwp;
505 struct thread_info *child_thr;
506 struct target_desc *tdesc;
507
508 ptid = ptid_build (new_pid, new_pid, 0);
509
510 if (debug_threads)
511 {
512 debug_printf ("HEW: Got fork event from LWP %ld, "
513 "new child is %d\n",
514 ptid_get_lwp (ptid_of (event_thr)),
515 ptid_get_pid (ptid));
516 }
517
518 /* Add the new process to the tables and clone the breakpoint
519 lists of the parent. We need to do this even if the new process
520 will be detached, since we will need the process object and the
521 breakpoints to remove any breakpoints from memory when we
522 detach, and the client side will access registers. */
523 child_proc = linux_add_process (new_pid, 0);
524 gdb_assert (child_proc != NULL);
525 child_lwp = add_lwp (ptid);
526 gdb_assert (child_lwp != NULL);
527 child_lwp->stopped = 1;
528 child_lwp->must_set_ptrace_flags = 1;
529 child_lwp->status_pending_p = 0;
530 child_thr = get_lwp_thread (child_lwp);
531 child_thr->last_resume_kind = resume_stop;
532 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
533
534 /* If we're suspending all threads, leave this one suspended
535 too. If the fork/clone parent is stepping over a breakpoint,
536 all other threads have been suspended already. Leave the
537 child suspended too. */
538 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
539 || event_lwp->bp_reinsert != 0)
540 {
541 if (debug_threads)
542 debug_printf ("HEW: leaving child suspended\n");
543 child_lwp->suspended = 1;
544 }
545
546 parent_proc = get_thread_process (event_thr);
547 child_proc->attached = parent_proc->attached;
548
549 if (event_lwp->bp_reinsert != 0
550 && can_software_single_step ()
551 && event == PTRACE_EVENT_VFORK)
552 {
553 /* If we leave reinsert breakpoints there, child will
554 hit it, so uninsert reinsert breakpoints from parent
555 (and child). Once vfork child is done, reinsert
556 them back to parent. */
557 uninsert_reinsert_breakpoints (event_thr);
558 }
559
560 clone_all_breakpoints (child_thr, event_thr);
561
562 tdesc = XNEW (struct target_desc);
563 copy_target_description (tdesc, parent_proc->tdesc);
564 child_proc->tdesc = tdesc;
565
566 /* Clone arch-specific process data. */
567 if (the_low_target.new_fork != NULL)
568 the_low_target.new_fork (parent_proc, child_proc);
569
570 /* Save fork info in the parent thread. */
571 if (event == PTRACE_EVENT_FORK)
572 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
573 else if (event == PTRACE_EVENT_VFORK)
574 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
575
576 event_lwp->waitstatus.value.related_pid = ptid;
577
578 /* The status_pending field contains bits denoting the
579 extended event, so when the pending event is handled,
580 the handler will look at lwp->waitstatus. */
581 event_lwp->status_pending_p = 1;
582 event_lwp->status_pending = wstat;
583
584 /* If the parent thread is doing step-over with reinsert
585 breakpoints, the list of reinsert breakpoints are cloned
586 from the parent's. Remove them from the child process.
587 In case of vfork, we'll reinsert them back once vforked
588 child is done. */
589 if (event_lwp->bp_reinsert != 0
590 && can_software_single_step ())
591 {
592 /* The child process is forked and stopped, so it is safe
593 to access its memory without stopping all other threads
594 from other processes. */
595 delete_reinsert_breakpoints (child_thr);
596
597 gdb_assert (has_reinsert_breakpoints (event_thr));
598 gdb_assert (!has_reinsert_breakpoints (child_thr));
599 }
600
601 /* Report the event. */
602 return 0;
603 }
604
605 if (debug_threads)
606 debug_printf ("HEW: Got clone event "
607 "from LWP %ld, new child is LWP %ld\n",
608 lwpid_of (event_thr), new_pid);
609
610 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
611 new_lwp = add_lwp (ptid);
612
613 /* Either we're going to immediately resume the new thread
614 or leave it stopped. linux_resume_one_lwp is a nop if it
615 thinks the thread is currently running, so set this first
616 before calling linux_resume_one_lwp. */
617 new_lwp->stopped = 1;
618
619 /* If we're suspending all threads, leave this one suspended
620 too. If the fork/clone parent is stepping over a breakpoint,
621 all other threads have been suspended already. Leave the
622 child suspended too. */
623 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
624 || event_lwp->bp_reinsert != 0)
625 new_lwp->suspended = 1;
626
627 /* Normally we will get the pending SIGSTOP. But in some cases
628 we might get another signal delivered to the group first.
629 If we do get another signal, be sure not to lose it. */
630 if (WSTOPSIG (status) != SIGSTOP)
631 {
632 new_lwp->stop_expected = 1;
633 new_lwp->status_pending_p = 1;
634 new_lwp->status_pending = status;
635 }
636 else if (report_thread_events)
637 {
638 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
639 new_lwp->status_pending_p = 1;
640 new_lwp->status_pending = status;
641 }
642
643 /* Don't report the event. */
644 return 1;
645 }
646 else if (event == PTRACE_EVENT_VFORK_DONE)
647 {
648 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
649
650 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
651 {
652 reinsert_reinsert_breakpoints (event_thr);
653
654 gdb_assert (has_reinsert_breakpoints (event_thr));
655 }
656
657 /* Report the event. */
658 return 0;
659 }
660 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
661 {
662 struct process_info *proc;
663 VEC (int) *syscalls_to_catch;
664 ptid_t event_ptid;
665 pid_t event_pid;
666
667 if (debug_threads)
668 {
669 debug_printf ("HEW: Got exec event from LWP %ld\n",
670 lwpid_of (event_thr));
671 }
672
673 /* Get the event ptid. */
674 event_ptid = ptid_of (event_thr);
675 event_pid = ptid_get_pid (event_ptid);
676
677 /* Save the syscall list from the execing process. */
678 proc = get_thread_process (event_thr);
679 syscalls_to_catch = proc->syscalls_to_catch;
680 proc->syscalls_to_catch = NULL;
681
682 /* Delete the execing process and all its threads. */
683 linux_mourn (proc);
684 current_thread = NULL;
685
686 /* Create a new process/lwp/thread. */
687 proc = linux_add_process (event_pid, 0);
688 event_lwp = add_lwp (event_ptid);
689 event_thr = get_lwp_thread (event_lwp);
690 gdb_assert (current_thread == event_thr);
691 linux_arch_setup_thread (event_thr);
692
693 /* Set the event status. */
694 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
695 event_lwp->waitstatus.value.execd_pathname
696 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
697
698 /* Mark the exec status as pending. */
699 event_lwp->stopped = 1;
700 event_lwp->status_pending_p = 1;
701 event_lwp->status_pending = wstat;
702 event_thr->last_resume_kind = resume_continue;
703 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
704
705 /* Update syscall state in the new lwp, effectively mid-syscall too. */
706 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
707
708 /* Restore the list to catch. Don't rely on the client, which is free
709 to avoid sending a new list when the architecture doesn't change.
710 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
711 proc->syscalls_to_catch = syscalls_to_catch;
712
713 /* Report the event. */
714 *orig_event_lwp = event_lwp;
715 return 0;
716 }
717
718 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
719 }
720
721 /* Return the PC as read from the regcache of LWP, without any
722 adjustment. */
723
724 static CORE_ADDR
725 get_pc (struct lwp_info *lwp)
726 {
727 struct thread_info *saved_thread;
728 struct regcache *regcache;
729 CORE_ADDR pc;
730
731 if (the_low_target.get_pc == NULL)
732 return 0;
733
734 saved_thread = current_thread;
735 current_thread = get_lwp_thread (lwp);
736
737 regcache = get_thread_regcache (current_thread, 1);
738 pc = (*the_low_target.get_pc) (regcache);
739
740 if (debug_threads)
741 debug_printf ("pc is 0x%lx\n", (long) pc);
742
743 current_thread = saved_thread;
744 return pc;
745 }
746
747 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
748 Fill *SYSNO with the syscall nr trapped. */
749
750 static void
751 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
752 {
753 struct thread_info *saved_thread;
754 struct regcache *regcache;
755
756 if (the_low_target.get_syscall_trapinfo == NULL)
757 {
758 /* If we cannot get the syscall trapinfo, report an unknown
759 system call number. */
760 *sysno = UNKNOWN_SYSCALL;
761 return;
762 }
763
764 saved_thread = current_thread;
765 current_thread = get_lwp_thread (lwp);
766
767 regcache = get_thread_regcache (current_thread, 1);
768 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
769
770 if (debug_threads)
771 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
772
773 current_thread = saved_thread;
774 }
775
776 static int check_stopped_by_watchpoint (struct lwp_info *child);
777
778 /* Called when the LWP stopped for a signal/trap. If it stopped for a
779 trap check what caused it (breakpoint, watchpoint, trace, etc.),
780 and save the result in the LWP's stop_reason field. If it stopped
781 for a breakpoint, decrement the PC if necessary on the lwp's
782 architecture. Returns true if we now have the LWP's stop PC. */
783
784 static int
785 save_stop_reason (struct lwp_info *lwp)
786 {
787 CORE_ADDR pc;
788 CORE_ADDR sw_breakpoint_pc;
789 struct thread_info *saved_thread;
790 #if USE_SIGTRAP_SIGINFO
791 siginfo_t siginfo;
792 #endif
793
794 if (the_low_target.get_pc == NULL)
795 return 0;
796
797 pc = get_pc (lwp);
798 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
799
800 /* breakpoint_at reads from the current thread. */
801 saved_thread = current_thread;
802 current_thread = get_lwp_thread (lwp);
803
804 #if USE_SIGTRAP_SIGINFO
805 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
806 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
807 {
808 if (siginfo.si_signo == SIGTRAP)
809 {
810 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
811 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
812 {
813 /* The si_code is ambiguous on this arch -- check debug
814 registers. */
815 if (!check_stopped_by_watchpoint (lwp))
816 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
817 }
818 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
819 {
820 /* If we determine the LWP stopped for a SW breakpoint,
821 trust it. Particularly don't check watchpoint
822 registers, because at least on s390, we'd find
823 stopped-by-watchpoint as long as there's a watchpoint
824 set. */
825 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
826 }
827 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
828 {
829 /* This can indicate either a hardware breakpoint or
830 hardware watchpoint. Check debug registers. */
831 if (!check_stopped_by_watchpoint (lwp))
832 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
833 }
834 else if (siginfo.si_code == TRAP_TRACE)
835 {
836 /* We may have single stepped an instruction that
837 triggered a watchpoint. In that case, on some
838 architectures (such as x86), instead of TRAP_HWBKPT,
839 si_code indicates TRAP_TRACE, and we need to check
840 the debug registers separately. */
841 if (!check_stopped_by_watchpoint (lwp))
842 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
843 }
844 }
845 }
846 #else
847 /* We may have just stepped a breakpoint instruction. E.g., in
848 non-stop mode, GDB first tells the thread A to step a range, and
849 then the user inserts a breakpoint inside the range. In that
850 case we need to report the breakpoint PC. */
851 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
852 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
853 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
854
855 if (hardware_breakpoint_inserted_here (pc))
856 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
857
858 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
859 check_stopped_by_watchpoint (lwp);
860 #endif
861
862 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
863 {
864 if (debug_threads)
865 {
866 struct thread_info *thr = get_lwp_thread (lwp);
867
868 debug_printf ("CSBB: %s stopped by software breakpoint\n",
869 target_pid_to_str (ptid_of (thr)));
870 }
871
872 /* Back up the PC if necessary. */
873 if (pc != sw_breakpoint_pc)
874 {
875 struct regcache *regcache
876 = get_thread_regcache (current_thread, 1);
877 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
878 }
879
880 /* Update this so we record the correct stop PC below. */
881 pc = sw_breakpoint_pc;
882 }
883 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
884 {
885 if (debug_threads)
886 {
887 struct thread_info *thr = get_lwp_thread (lwp);
888
889 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
890 target_pid_to_str (ptid_of (thr)));
891 }
892 }
893 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
894 {
895 if (debug_threads)
896 {
897 struct thread_info *thr = get_lwp_thread (lwp);
898
899 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
900 target_pid_to_str (ptid_of (thr)));
901 }
902 }
903 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
904 {
905 if (debug_threads)
906 {
907 struct thread_info *thr = get_lwp_thread (lwp);
908
909 debug_printf ("CSBB: %s stopped by trace\n",
910 target_pid_to_str (ptid_of (thr)));
911 }
912 }
913
914 lwp->stop_pc = pc;
915 current_thread = saved_thread;
916 return 1;
917 }
918
919 static struct lwp_info *
920 add_lwp (ptid_t ptid)
921 {
922 struct lwp_info *lwp;
923
924 lwp = XCNEW (struct lwp_info);
925
926 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
927
928 if (the_low_target.new_thread != NULL)
929 the_low_target.new_thread (lwp);
930
931 lwp->thread = add_thread (ptid, lwp);
932
933 return lwp;
934 }
935
936 /* Start an inferior process and returns its pid.
937 ALLARGS is a vector of program-name and args. */
938
939 static int
940 linux_create_inferior (char *program, char **allargs)
941 {
942 struct lwp_info *new_lwp;
943 int pid;
944 ptid_t ptid;
945 struct cleanup *restore_personality
946 = maybe_disable_address_space_randomization (disable_randomization);
947
948 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
949 pid = vfork ();
950 #else
951 pid = fork ();
952 #endif
953 if (pid < 0)
954 perror_with_name ("fork");
955
956 if (pid == 0)
957 {
958 close_most_fds ();
959 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
960
961 setpgid (0, 0);
962
963 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
964 stdout to stderr so that inferior i/o doesn't corrupt the connection.
965 Also, redirect stdin to /dev/null. */
966 if (remote_connection_is_stdio ())
967 {
968 close (0);
969 open ("/dev/null", O_RDONLY);
970 dup2 (2, 1);
971 if (write (2, "stdin/stdout redirected\n",
972 sizeof ("stdin/stdout redirected\n") - 1) < 0)
973 {
974 /* Errors ignored. */;
975 }
976 }
977
978 restore_original_signals_state ();
979
980 execv (program, allargs);
981 if (errno == ENOENT)
982 execvp (program, allargs);
983
984 fprintf (stderr, "Cannot exec %s: %s.\n", program,
985 strerror (errno));
986 fflush (stderr);
987 _exit (0177);
988 }
989
990 do_cleanups (restore_personality);
991
992 linux_add_process (pid, 0);
993
994 ptid = ptid_build (pid, pid, 0);
995 new_lwp = add_lwp (ptid);
996 new_lwp->must_set_ptrace_flags = 1;
997
998 return pid;
999 }
1000
1001 /* Implement the post_create_inferior target_ops method. */
1002
1003 static void
1004 linux_post_create_inferior (void)
1005 {
1006 struct lwp_info *lwp = get_thread_lwp (current_thread);
1007
1008 linux_arch_setup ();
1009
1010 if (lwp->must_set_ptrace_flags)
1011 {
1012 struct process_info *proc = current_process ();
1013 int options = linux_low_ptrace_options (proc->attached);
1014
1015 linux_enable_event_reporting (lwpid_of (current_thread), options);
1016 lwp->must_set_ptrace_flags = 0;
1017 }
1018 }
1019
1020 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1021 error. */
1022
1023 int
1024 linux_attach_lwp (ptid_t ptid)
1025 {
1026 struct lwp_info *new_lwp;
1027 int lwpid = ptid_get_lwp (ptid);
1028
1029 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1030 != 0)
1031 return errno;
1032
1033 new_lwp = add_lwp (ptid);
1034
1035 /* We need to wait for SIGSTOP before being able to make the next
1036 ptrace call on this LWP. */
1037 new_lwp->must_set_ptrace_flags = 1;
1038
1039 if (linux_proc_pid_is_stopped (lwpid))
1040 {
1041 if (debug_threads)
1042 debug_printf ("Attached to a stopped process\n");
1043
1044 /* The process is definitely stopped. It is in a job control
1045 stop, unless the kernel predates the TASK_STOPPED /
1046 TASK_TRACED distinction, in which case it might be in a
1047 ptrace stop. Make sure it is in a ptrace stop; from there we
1048 can kill it, signal it, et cetera.
1049
1050 First make sure there is a pending SIGSTOP. Since we are
1051 already attached, the process can not transition from stopped
1052 to running without a PTRACE_CONT; so we know this signal will
1053 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1054 probably already in the queue (unless this kernel is old
1055 enough to use TASK_STOPPED for ptrace stops); but since
1056 SIGSTOP is not an RT signal, it can only be queued once. */
1057 kill_lwp (lwpid, SIGSTOP);
1058
1059 /* Finally, resume the stopped process. This will deliver the
1060 SIGSTOP (or a higher priority signal, just like normal
1061 PTRACE_ATTACH), which we'll catch later on. */
1062 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1063 }
1064
1065 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1066 brings it to a halt.
1067
1068 There are several cases to consider here:
1069
1070 1) gdbserver has already attached to the process and is being notified
1071 of a new thread that is being created.
1072 In this case we should ignore that SIGSTOP and resume the
1073 process. This is handled below by setting stop_expected = 1,
1074 and the fact that add_thread sets last_resume_kind ==
1075 resume_continue.
1076
1077 2) This is the first thread (the process thread), and we're attaching
1078 to it via attach_inferior.
1079 In this case we want the process thread to stop.
1080 This is handled by having linux_attach set last_resume_kind ==
1081 resume_stop after we return.
1082
1083 If the pid we are attaching to is also the tgid, we attach to and
1084 stop all the existing threads. Otherwise, we attach to pid and
1085 ignore any other threads in the same group as this pid.
1086
1087 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1088 existing threads.
1089 In this case we want the thread to stop.
1090 FIXME: This case is currently not properly handled.
1091 We should wait for the SIGSTOP but don't. Things work apparently
1092 because enough time passes between when we ptrace (ATTACH) and when
1093 gdb makes the next ptrace call on the thread.
1094
1095 On the other hand, if we are currently trying to stop all threads, we
1096 should treat the new thread as if we had sent it a SIGSTOP. This works
1097 because we are guaranteed that the add_lwp call above added us to the
1098 end of the list, and so the new thread has not yet reached
1099 wait_for_sigstop (but will). */
1100 new_lwp->stop_expected = 1;
1101
1102 return 0;
1103 }
1104
1105 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1106 already attached. Returns true if a new LWP is found, false
1107 otherwise. */
1108
1109 static int
1110 attach_proc_task_lwp_callback (ptid_t ptid)
1111 {
1112 /* Is this a new thread? */
1113 if (find_thread_ptid (ptid) == NULL)
1114 {
1115 int lwpid = ptid_get_lwp (ptid);
1116 int err;
1117
1118 if (debug_threads)
1119 debug_printf ("Found new lwp %d\n", lwpid);
1120
1121 err = linux_attach_lwp (ptid);
1122
1123 /* Be quiet if we simply raced with the thread exiting. EPERM
1124 is returned if the thread's task still exists, and is marked
1125 as exited or zombie, as well as other conditions, so in that
1126 case, confirm the status in /proc/PID/status. */
1127 if (err == ESRCH
1128 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1129 {
1130 if (debug_threads)
1131 {
1132 debug_printf ("Cannot attach to lwp %d: "
1133 "thread is gone (%d: %s)\n",
1134 lwpid, err, strerror (err));
1135 }
1136 }
1137 else if (err != 0)
1138 {
1139 warning (_("Cannot attach to lwp %d: %s"),
1140 lwpid,
1141 linux_ptrace_attach_fail_reason_string (ptid, err));
1142 }
1143
1144 return 1;
1145 }
1146 return 0;
1147 }
1148
1149 static void async_file_mark (void);
1150
1151 /* Attach to PID. If PID is the tgid, attach to it and all
1152 of its threads. */
1153
1154 static int
1155 linux_attach (unsigned long pid)
1156 {
1157 struct process_info *proc;
1158 struct thread_info *initial_thread;
1159 ptid_t ptid = ptid_build (pid, pid, 0);
1160 int err;
1161
1162 /* Attach to PID. We will check for other threads
1163 soon. */
1164 err = linux_attach_lwp (ptid);
1165 if (err != 0)
1166 error ("Cannot attach to process %ld: %s",
1167 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1168
1169 proc = linux_add_process (pid, 1);
1170
1171 /* Don't ignore the initial SIGSTOP if we just attached to this
1172 process. It will be collected by wait shortly. */
1173 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1174 initial_thread->last_resume_kind = resume_stop;
1175
1176 /* We must attach to every LWP. If /proc is mounted, use that to
1177 find them now. On the one hand, the inferior may be using raw
1178 clone instead of using pthreads. On the other hand, even if it
1179 is using pthreads, GDB may not be connected yet (thread_db needs
1180 to do symbol lookups, through qSymbol). Also, thread_db walks
1181 structures in the inferior's address space to find the list of
1182 threads/LWPs, and those structures may well be corrupted. Note
1183 that once thread_db is loaded, we'll still use it to list threads
1184 and associate pthread info with each LWP. */
1185 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1186
1187 /* GDB will shortly read the xml target description for this
1188 process, to figure out the process' architecture. But the target
1189 description is only filled in when the first process/thread in
1190 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1191 that now, otherwise, if GDB is fast enough, it could read the
1192 target description _before_ that initial stop. */
1193 if (non_stop)
1194 {
1195 struct lwp_info *lwp;
1196 int wstat, lwpid;
1197 ptid_t pid_ptid = pid_to_ptid (pid);
1198
1199 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1200 &wstat, __WALL);
1201 gdb_assert (lwpid > 0);
1202
1203 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1204
1205 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1206 {
1207 lwp->status_pending_p = 1;
1208 lwp->status_pending = wstat;
1209 }
1210
1211 initial_thread->last_resume_kind = resume_continue;
1212
1213 async_file_mark ();
1214
1215 gdb_assert (proc->tdesc != NULL);
1216 }
1217
1218 return 0;
1219 }
1220
1221 struct counter
1222 {
1223 int pid;
1224 int count;
1225 };
1226
1227 static int
1228 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1229 {
1230 struct counter *counter = (struct counter *) args;
1231
1232 if (ptid_get_pid (entry->id) == counter->pid)
1233 {
1234 if (++counter->count > 1)
1235 return 1;
1236 }
1237
1238 return 0;
1239 }
1240
1241 static int
1242 last_thread_of_process_p (int pid)
1243 {
1244 struct counter counter = { pid , 0 };
1245
1246 return (find_inferior (&all_threads,
1247 second_thread_of_pid_p, &counter) == NULL);
1248 }
1249
1250 /* Kill LWP. */
1251
1252 static void
1253 linux_kill_one_lwp (struct lwp_info *lwp)
1254 {
1255 struct thread_info *thr = get_lwp_thread (lwp);
1256 int pid = lwpid_of (thr);
1257
1258 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1259 there is no signal context, and ptrace(PTRACE_KILL) (or
1260 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1261 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1262 alternative is to kill with SIGKILL. We only need one SIGKILL
1263 per process, not one for each thread. But since we still support
1264 support debugging programs using raw clone without CLONE_THREAD,
1265 we send one for each thread. For years, we used PTRACE_KILL
1266 only, so we're being a bit paranoid about some old kernels where
1267 PTRACE_KILL might work better (dubious if there are any such, but
1268 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1269 second, and so we're fine everywhere. */
1270
1271 errno = 0;
1272 kill_lwp (pid, SIGKILL);
1273 if (debug_threads)
1274 {
1275 int save_errno = errno;
1276
1277 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1278 target_pid_to_str (ptid_of (thr)),
1279 save_errno ? strerror (save_errno) : "OK");
1280 }
1281
1282 errno = 0;
1283 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1284 if (debug_threads)
1285 {
1286 int save_errno = errno;
1287
1288 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1289 target_pid_to_str (ptid_of (thr)),
1290 save_errno ? strerror (save_errno) : "OK");
1291 }
1292 }
1293
1294 /* Kill LWP and wait for it to die. */
1295
1296 static void
1297 kill_wait_lwp (struct lwp_info *lwp)
1298 {
1299 struct thread_info *thr = get_lwp_thread (lwp);
1300 int pid = ptid_get_pid (ptid_of (thr));
1301 int lwpid = ptid_get_lwp (ptid_of (thr));
1302 int wstat;
1303 int res;
1304
1305 if (debug_threads)
1306 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1307
1308 do
1309 {
1310 linux_kill_one_lwp (lwp);
1311
1312 /* Make sure it died. Notes:
1313
1314 - The loop is most likely unnecessary.
1315
1316 - We don't use linux_wait_for_event as that could delete lwps
1317 while we're iterating over them. We're not interested in
1318 any pending status at this point, only in making sure all
1319 wait status on the kernel side are collected until the
1320 process is reaped.
1321
1322 - We don't use __WALL here as the __WALL emulation relies on
1323 SIGCHLD, and killing a stopped process doesn't generate
1324 one, nor an exit status.
1325 */
1326 res = my_waitpid (lwpid, &wstat, 0);
1327 if (res == -1 && errno == ECHILD)
1328 res = my_waitpid (lwpid, &wstat, __WCLONE);
1329 } while (res > 0 && WIFSTOPPED (wstat));
1330
1331 /* Even if it was stopped, the child may have already disappeared.
1332 E.g., if it was killed by SIGKILL. */
1333 if (res < 0 && errno != ECHILD)
1334 perror_with_name ("kill_wait_lwp");
1335 }
1336
1337 /* Callback for `find_inferior'. Kills an lwp of a given process,
1338 except the leader. */
1339
1340 static int
1341 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1342 {
1343 struct thread_info *thread = (struct thread_info *) entry;
1344 struct lwp_info *lwp = get_thread_lwp (thread);
1345 int pid = * (int *) args;
1346
1347 if (ptid_get_pid (entry->id) != pid)
1348 return 0;
1349
1350 /* We avoid killing the first thread here, because of a Linux kernel (at
1351 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1352 the children get a chance to be reaped, it will remain a zombie
1353 forever. */
1354
1355 if (lwpid_of (thread) == pid)
1356 {
1357 if (debug_threads)
1358 debug_printf ("lkop: is last of process %s\n",
1359 target_pid_to_str (entry->id));
1360 return 0;
1361 }
1362
1363 kill_wait_lwp (lwp);
1364 return 0;
1365 }
1366
1367 static int
1368 linux_kill (int pid)
1369 {
1370 struct process_info *process;
1371 struct lwp_info *lwp;
1372
1373 process = find_process_pid (pid);
1374 if (process == NULL)
1375 return -1;
1376
1377 /* If we're killing a running inferior, make sure it is stopped
1378 first, as PTRACE_KILL will not work otherwise. */
1379 stop_all_lwps (0, NULL);
1380
1381 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1382
1383 /* See the comment in linux_kill_one_lwp. We did not kill the first
1384 thread in the list, so do so now. */
1385 lwp = find_lwp_pid (pid_to_ptid (pid));
1386
1387 if (lwp == NULL)
1388 {
1389 if (debug_threads)
1390 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1391 pid);
1392 }
1393 else
1394 kill_wait_lwp (lwp);
1395
1396 the_target->mourn (process);
1397
1398 /* Since we presently can only stop all lwps of all processes, we
1399 need to unstop lwps of other processes. */
1400 unstop_all_lwps (0, NULL);
1401 return 0;
1402 }
1403
1404 /* Get pending signal of THREAD, for detaching purposes. This is the
1405 signal the thread last stopped for, which we need to deliver to the
1406 thread when detaching, otherwise, it'd be suppressed/lost. */
1407
1408 static int
1409 get_detach_signal (struct thread_info *thread)
1410 {
1411 enum gdb_signal signo = GDB_SIGNAL_0;
1412 int status;
1413 struct lwp_info *lp = get_thread_lwp (thread);
1414
1415 if (lp->status_pending_p)
1416 status = lp->status_pending;
1417 else
1418 {
1419 /* If the thread had been suspended by gdbserver, and it stopped
1420 cleanly, then it'll have stopped with SIGSTOP. But we don't
1421 want to deliver that SIGSTOP. */
1422 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1423 || thread->last_status.value.sig == GDB_SIGNAL_0)
1424 return 0;
1425
1426 /* Otherwise, we may need to deliver the signal we
1427 intercepted. */
1428 status = lp->last_status;
1429 }
1430
1431 if (!WIFSTOPPED (status))
1432 {
1433 if (debug_threads)
1434 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1435 target_pid_to_str (ptid_of (thread)));
1436 return 0;
1437 }
1438
1439 /* Extended wait statuses aren't real SIGTRAPs. */
1440 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1441 {
1442 if (debug_threads)
1443 debug_printf ("GPS: lwp %s had stopped with extended "
1444 "status: no pending signal\n",
1445 target_pid_to_str (ptid_of (thread)));
1446 return 0;
1447 }
1448
1449 signo = gdb_signal_from_host (WSTOPSIG (status));
1450
1451 if (program_signals_p && !program_signals[signo])
1452 {
1453 if (debug_threads)
1454 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1455 target_pid_to_str (ptid_of (thread)),
1456 gdb_signal_to_string (signo));
1457 return 0;
1458 }
1459 else if (!program_signals_p
1460 /* If we have no way to know which signals GDB does not
1461 want to have passed to the program, assume
1462 SIGTRAP/SIGINT, which is GDB's default. */
1463 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1464 {
1465 if (debug_threads)
1466 debug_printf ("GPS: lwp %s had signal %s, "
1467 "but we don't know if we should pass it. "
1468 "Default to not.\n",
1469 target_pid_to_str (ptid_of (thread)),
1470 gdb_signal_to_string (signo));
1471 return 0;
1472 }
1473 else
1474 {
1475 if (debug_threads)
1476 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1477 target_pid_to_str (ptid_of (thread)),
1478 gdb_signal_to_string (signo));
1479
1480 return WSTOPSIG (status);
1481 }
1482 }
1483
1484 /* Detach from LWP. */
1485
1486 static void
1487 linux_detach_one_lwp (struct lwp_info *lwp)
1488 {
1489 struct thread_info *thread = get_lwp_thread (lwp);
1490 int sig;
1491 int lwpid;
1492
1493 /* If there is a pending SIGSTOP, get rid of it. */
1494 if (lwp->stop_expected)
1495 {
1496 if (debug_threads)
1497 debug_printf ("Sending SIGCONT to %s\n",
1498 target_pid_to_str (ptid_of (thread)));
1499
1500 kill_lwp (lwpid_of (thread), SIGCONT);
1501 lwp->stop_expected = 0;
1502 }
1503
1504 /* Pass on any pending signal for this thread. */
1505 sig = get_detach_signal (thread);
1506
1507 /* Preparing to resume may try to write registers, and fail if the
1508 lwp is zombie. If that happens, ignore the error. We'll handle
1509 it below, when detach fails with ESRCH. */
1510 TRY
1511 {
1512 /* Flush any pending changes to the process's registers. */
1513 regcache_invalidate_thread (thread);
1514
1515 /* Finally, let it resume. */
1516 if (the_low_target.prepare_to_resume != NULL)
1517 the_low_target.prepare_to_resume (lwp);
1518 }
1519 CATCH (ex, RETURN_MASK_ERROR)
1520 {
1521 if (!check_ptrace_stopped_lwp_gone (lwp))
1522 throw_exception (ex);
1523 }
1524 END_CATCH
1525
1526 lwpid = lwpid_of (thread);
1527 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1528 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1529 {
1530 int save_errno = errno;
1531
1532 /* We know the thread exists, so ESRCH must mean the lwp is
1533 zombie. This can happen if one of the already-detached
1534 threads exits the whole thread group. In that case we're
1535 still attached, and must reap the lwp. */
1536 if (save_errno == ESRCH)
1537 {
1538 int ret, status;
1539
1540 ret = my_waitpid (lwpid, &status, __WALL);
1541 if (ret == -1)
1542 {
1543 warning (_("Couldn't reap LWP %d while detaching: %s"),
1544 lwpid, strerror (errno));
1545 }
1546 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1547 {
1548 warning (_("Reaping LWP %d while detaching "
1549 "returned unexpected status 0x%x"),
1550 lwpid, status);
1551 }
1552 }
1553 else
1554 {
1555 error (_("Can't detach %s: %s"),
1556 target_pid_to_str (ptid_of (thread)),
1557 strerror (save_errno));
1558 }
1559 }
1560 else if (debug_threads)
1561 {
1562 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1563 target_pid_to_str (ptid_of (thread)),
1564 strsignal (sig));
1565 }
1566
1567 delete_lwp (lwp);
1568 }
1569
1570 /* Callback for find_inferior. Detaches from non-leader threads of a
1571 given process. */
1572
1573 static int
1574 linux_detach_lwp_callback (struct inferior_list_entry *entry, void *args)
1575 {
1576 struct thread_info *thread = (struct thread_info *) entry;
1577 struct lwp_info *lwp = get_thread_lwp (thread);
1578 int pid = *(int *) args;
1579 int lwpid = lwpid_of (thread);
1580
1581 /* Skip other processes. */
1582 if (ptid_get_pid (entry->id) != pid)
1583 return 0;
1584
1585 /* We don't actually detach from the thread group leader just yet.
1586 If the thread group exits, we must reap the zombie clone lwps
1587 before we're able to reap the leader. */
1588 if (ptid_get_pid (entry->id) == lwpid)
1589 return 0;
1590
1591 linux_detach_one_lwp (lwp);
1592 return 0;
1593 }
1594
1595 static int
1596 linux_detach (int pid)
1597 {
1598 struct process_info *process;
1599 struct lwp_info *main_lwp;
1600
1601 process = find_process_pid (pid);
1602 if (process == NULL)
1603 return -1;
1604
1605 /* As there's a step over already in progress, let it finish first,
1606 otherwise nesting a stabilize_threads operation on top gets real
1607 messy. */
1608 complete_ongoing_step_over ();
1609
1610 /* Stop all threads before detaching. First, ptrace requires that
1611 the thread is stopped to sucessfully detach. Second, thread_db
1612 may need to uninstall thread event breakpoints from memory, which
1613 only works with a stopped process anyway. */
1614 stop_all_lwps (0, NULL);
1615
1616 #ifdef USE_THREAD_DB
1617 thread_db_detach (process);
1618 #endif
1619
1620 /* Stabilize threads (move out of jump pads). */
1621 stabilize_threads ();
1622
1623 /* Detach from the clone lwps first. If the thread group exits just
1624 while we're detaching, we must reap the clone lwps before we're
1625 able to reap the leader. */
1626 find_inferior (&all_threads, linux_detach_lwp_callback, &pid);
1627
1628 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1629 linux_detach_one_lwp (main_lwp);
1630
1631 the_target->mourn (process);
1632
1633 /* Since we presently can only stop all lwps of all processes, we
1634 need to unstop lwps of other processes. */
1635 unstop_all_lwps (0, NULL);
1636 return 0;
1637 }
1638
1639 /* Remove all LWPs that belong to process PROC from the lwp list. */
1640
1641 static int
1642 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1643 {
1644 struct thread_info *thread = (struct thread_info *) entry;
1645 struct lwp_info *lwp = get_thread_lwp (thread);
1646 struct process_info *process = (struct process_info *) proc;
1647
1648 if (pid_of (thread) == pid_of (process))
1649 delete_lwp (lwp);
1650
1651 return 0;
1652 }
1653
1654 static void
1655 linux_mourn (struct process_info *process)
1656 {
1657 struct process_info_private *priv;
1658
1659 #ifdef USE_THREAD_DB
1660 thread_db_mourn (process);
1661 #endif
1662
1663 find_inferior (&all_threads, delete_lwp_callback, process);
1664
1665 /* Freeing all private data. */
1666 priv = process->priv;
1667 free (priv->arch_private);
1668 free (priv);
1669 process->priv = NULL;
1670
1671 remove_process (process);
1672 }
1673
1674 static void
1675 linux_join (int pid)
1676 {
1677 int status, ret;
1678
1679 do {
1680 ret = my_waitpid (pid, &status, 0);
1681 if (WIFEXITED (status) || WIFSIGNALED (status))
1682 break;
1683 } while (ret != -1 || errno != ECHILD);
1684 }
1685
1686 /* Return nonzero if the given thread is still alive. */
1687 static int
1688 linux_thread_alive (ptid_t ptid)
1689 {
1690 struct lwp_info *lwp = find_lwp_pid (ptid);
1691
1692 /* We assume we always know if a thread exits. If a whole process
1693 exited but we still haven't been able to report it to GDB, we'll
1694 hold on to the last lwp of the dead process. */
1695 if (lwp != NULL)
1696 return !lwp_is_marked_dead (lwp);
1697 else
1698 return 0;
1699 }
1700
1701 /* Return 1 if this lwp still has an interesting status pending. If
1702 not (e.g., it had stopped for a breakpoint that is gone), return
1703 false. */
1704
1705 static int
1706 thread_still_has_status_pending_p (struct thread_info *thread)
1707 {
1708 struct lwp_info *lp = get_thread_lwp (thread);
1709
1710 if (!lp->status_pending_p)
1711 return 0;
1712
1713 if (thread->last_resume_kind != resume_stop
1714 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1715 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1716 {
1717 struct thread_info *saved_thread;
1718 CORE_ADDR pc;
1719 int discard = 0;
1720
1721 gdb_assert (lp->last_status != 0);
1722
1723 pc = get_pc (lp);
1724
1725 saved_thread = current_thread;
1726 current_thread = thread;
1727
1728 if (pc != lp->stop_pc)
1729 {
1730 if (debug_threads)
1731 debug_printf ("PC of %ld changed\n",
1732 lwpid_of (thread));
1733 discard = 1;
1734 }
1735
1736 #if !USE_SIGTRAP_SIGINFO
1737 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1738 && !(*the_low_target.breakpoint_at) (pc))
1739 {
1740 if (debug_threads)
1741 debug_printf ("previous SW breakpoint of %ld gone\n",
1742 lwpid_of (thread));
1743 discard = 1;
1744 }
1745 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1746 && !hardware_breakpoint_inserted_here (pc))
1747 {
1748 if (debug_threads)
1749 debug_printf ("previous HW breakpoint of %ld gone\n",
1750 lwpid_of (thread));
1751 discard = 1;
1752 }
1753 #endif
1754
1755 current_thread = saved_thread;
1756
1757 if (discard)
1758 {
1759 if (debug_threads)
1760 debug_printf ("discarding pending breakpoint status\n");
1761 lp->status_pending_p = 0;
1762 return 0;
1763 }
1764 }
1765
1766 return 1;
1767 }
1768
1769 /* Returns true if LWP is resumed from the client's perspective. */
1770
1771 static int
1772 lwp_resumed (struct lwp_info *lwp)
1773 {
1774 struct thread_info *thread = get_lwp_thread (lwp);
1775
1776 if (thread->last_resume_kind != resume_stop)
1777 return 1;
1778
1779 /* Did gdb send us a `vCont;t', but we haven't reported the
1780 corresponding stop to gdb yet? If so, the thread is still
1781 resumed/running from gdb's perspective. */
1782 if (thread->last_resume_kind == resume_stop
1783 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1784 return 1;
1785
1786 return 0;
1787 }
1788
1789 /* Return 1 if this lwp has an interesting status pending. */
1790 static int
1791 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1792 {
1793 struct thread_info *thread = (struct thread_info *) entry;
1794 struct lwp_info *lp = get_thread_lwp (thread);
1795 ptid_t ptid = * (ptid_t *) arg;
1796
1797 /* Check if we're only interested in events from a specific process
1798 or a specific LWP. */
1799 if (!ptid_match (ptid_of (thread), ptid))
1800 return 0;
1801
1802 if (!lwp_resumed (lp))
1803 return 0;
1804
1805 if (lp->status_pending_p
1806 && !thread_still_has_status_pending_p (thread))
1807 {
1808 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1809 return 0;
1810 }
1811
1812 return lp->status_pending_p;
1813 }
1814
1815 static int
1816 same_lwp (struct inferior_list_entry *entry, void *data)
1817 {
1818 ptid_t ptid = *(ptid_t *) data;
1819 int lwp;
1820
1821 if (ptid_get_lwp (ptid) != 0)
1822 lwp = ptid_get_lwp (ptid);
1823 else
1824 lwp = ptid_get_pid (ptid);
1825
1826 if (ptid_get_lwp (entry->id) == lwp)
1827 return 1;
1828
1829 return 0;
1830 }
1831
1832 struct lwp_info *
1833 find_lwp_pid (ptid_t ptid)
1834 {
1835 struct inferior_list_entry *thread
1836 = find_inferior (&all_threads, same_lwp, &ptid);
1837
1838 if (thread == NULL)
1839 return NULL;
1840
1841 return get_thread_lwp ((struct thread_info *) thread);
1842 }
1843
1844 /* Return the number of known LWPs in the tgid given by PID. */
1845
1846 static int
1847 num_lwps (int pid)
1848 {
1849 struct inferior_list_entry *inf, *tmp;
1850 int count = 0;
1851
1852 ALL_INFERIORS (&all_threads, inf, tmp)
1853 {
1854 if (ptid_get_pid (inf->id) == pid)
1855 count++;
1856 }
1857
1858 return count;
1859 }
1860
1861 /* The arguments passed to iterate_over_lwps. */
1862
1863 struct iterate_over_lwps_args
1864 {
1865 /* The FILTER argument passed to iterate_over_lwps. */
1866 ptid_t filter;
1867
1868 /* The CALLBACK argument passed to iterate_over_lwps. */
1869 iterate_over_lwps_ftype *callback;
1870
1871 /* The DATA argument passed to iterate_over_lwps. */
1872 void *data;
1873 };
1874
1875 /* Callback for find_inferior used by iterate_over_lwps to filter
1876 calls to the callback supplied to that function. Returning a
1877 nonzero value causes find_inferiors to stop iterating and return
1878 the current inferior_list_entry. Returning zero indicates that
1879 find_inferiors should continue iterating. */
1880
1881 static int
1882 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1883 {
1884 struct iterate_over_lwps_args *args
1885 = (struct iterate_over_lwps_args *) args_p;
1886
1887 if (ptid_match (entry->id, args->filter))
1888 {
1889 struct thread_info *thr = (struct thread_info *) entry;
1890 struct lwp_info *lwp = get_thread_lwp (thr);
1891
1892 return (*args->callback) (lwp, args->data);
1893 }
1894
1895 return 0;
1896 }
1897
1898 /* See nat/linux-nat.h. */
1899
1900 struct lwp_info *
1901 iterate_over_lwps (ptid_t filter,
1902 iterate_over_lwps_ftype callback,
1903 void *data)
1904 {
1905 struct iterate_over_lwps_args args = {filter, callback, data};
1906 struct inferior_list_entry *entry;
1907
1908 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1909 if (entry == NULL)
1910 return NULL;
1911
1912 return get_thread_lwp ((struct thread_info *) entry);
1913 }
1914
1915 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1916 their exits until all other threads in the group have exited. */
1917
1918 static void
1919 check_zombie_leaders (void)
1920 {
1921 struct process_info *proc, *tmp;
1922
1923 ALL_PROCESSES (proc, tmp)
1924 {
1925 pid_t leader_pid = pid_of (proc);
1926 struct lwp_info *leader_lp;
1927
1928 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1929
1930 if (debug_threads)
1931 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1932 "num_lwps=%d, zombie=%d\n",
1933 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1934 linux_proc_pid_is_zombie (leader_pid));
1935
1936 if (leader_lp != NULL && !leader_lp->stopped
1937 /* Check if there are other threads in the group, as we may
1938 have raced with the inferior simply exiting. */
1939 && !last_thread_of_process_p (leader_pid)
1940 && linux_proc_pid_is_zombie (leader_pid))
1941 {
1942 /* A leader zombie can mean one of two things:
1943
1944 - It exited, and there's an exit status pending
1945 available, or only the leader exited (not the whole
1946 program). In the latter case, we can't waitpid the
1947 leader's exit status until all other threads are gone.
1948
1949 - There are 3 or more threads in the group, and a thread
1950 other than the leader exec'd. On an exec, the Linux
1951 kernel destroys all other threads (except the execing
1952 one) in the thread group, and resets the execing thread's
1953 tid to the tgid. No exit notification is sent for the
1954 execing thread -- from the ptracer's perspective, it
1955 appears as though the execing thread just vanishes.
1956 Until we reap all other threads except the leader and the
1957 execing thread, the leader will be zombie, and the
1958 execing thread will be in `D (disc sleep)'. As soon as
1959 all other threads are reaped, the execing thread changes
1960 it's tid to the tgid, and the previous (zombie) leader
1961 vanishes, giving place to the "new" leader. We could try
1962 distinguishing the exit and exec cases, by waiting once
1963 more, and seeing if something comes out, but it doesn't
1964 sound useful. The previous leader _does_ go away, and
1965 we'll re-add the new one once we see the exec event
1966 (which is just the same as what would happen if the
1967 previous leader did exit voluntarily before some other
1968 thread execs). */
1969
1970 if (debug_threads)
1971 fprintf (stderr,
1972 "CZL: Thread group leader %d zombie "
1973 "(it exited, or another thread execd).\n",
1974 leader_pid);
1975
1976 delete_lwp (leader_lp);
1977 }
1978 }
1979 }
1980
1981 /* Callback for `find_inferior'. Returns the first LWP that is not
1982 stopped. ARG is a PTID filter. */
1983
1984 static int
1985 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1986 {
1987 struct thread_info *thr = (struct thread_info *) entry;
1988 struct lwp_info *lwp;
1989 ptid_t filter = *(ptid_t *) arg;
1990
1991 if (!ptid_match (ptid_of (thr), filter))
1992 return 0;
1993
1994 lwp = get_thread_lwp (thr);
1995 if (!lwp->stopped)
1996 return 1;
1997
1998 return 0;
1999 }
2000
2001 /* Increment LWP's suspend count. */
2002
2003 static void
2004 lwp_suspended_inc (struct lwp_info *lwp)
2005 {
2006 lwp->suspended++;
2007
2008 if (debug_threads && lwp->suspended > 4)
2009 {
2010 struct thread_info *thread = get_lwp_thread (lwp);
2011
2012 debug_printf ("LWP %ld has a suspiciously high suspend count,"
2013 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
2014 }
2015 }
2016
2017 /* Decrement LWP's suspend count. */
2018
2019 static void
2020 lwp_suspended_decr (struct lwp_info *lwp)
2021 {
2022 lwp->suspended--;
2023
2024 if (lwp->suspended < 0)
2025 {
2026 struct thread_info *thread = get_lwp_thread (lwp);
2027
2028 internal_error (__FILE__, __LINE__,
2029 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
2030 lwp->suspended);
2031 }
2032 }
2033
2034 /* This function should only be called if the LWP got a SIGTRAP.
2035
2036 Handle any tracepoint steps or hits. Return true if a tracepoint
2037 event was handled, 0 otherwise. */
2038
2039 static int
2040 handle_tracepoints (struct lwp_info *lwp)
2041 {
2042 struct thread_info *tinfo = get_lwp_thread (lwp);
2043 int tpoint_related_event = 0;
2044
2045 gdb_assert (lwp->suspended == 0);
2046
2047 /* If this tracepoint hit causes a tracing stop, we'll immediately
2048 uninsert tracepoints. To do this, we temporarily pause all
2049 threads, unpatch away, and then unpause threads. We need to make
2050 sure the unpausing doesn't resume LWP too. */
2051 lwp_suspended_inc (lwp);
2052
2053 /* And we need to be sure that any all-threads-stopping doesn't try
2054 to move threads out of the jump pads, as it could deadlock the
2055 inferior (LWP could be in the jump pad, maybe even holding the
2056 lock.) */
2057
2058 /* Do any necessary step collect actions. */
2059 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
2060
2061 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2062
2063 /* See if we just hit a tracepoint and do its main collect
2064 actions. */
2065 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2066
2067 lwp_suspended_decr (lwp);
2068
2069 gdb_assert (lwp->suspended == 0);
2070 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
2071
2072 if (tpoint_related_event)
2073 {
2074 if (debug_threads)
2075 debug_printf ("got a tracepoint event\n");
2076 return 1;
2077 }
2078
2079 return 0;
2080 }
2081
2082 /* Convenience wrapper. Returns true if LWP is presently collecting a
2083 fast tracepoint. */
2084
2085 static int
2086 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2087 struct fast_tpoint_collect_status *status)
2088 {
2089 CORE_ADDR thread_area;
2090 struct thread_info *thread = get_lwp_thread (lwp);
2091
2092 if (the_low_target.get_thread_area == NULL)
2093 return 0;
2094
2095 /* Get the thread area address. This is used to recognize which
2096 thread is which when tracing with the in-process agent library.
2097 We don't read anything from the address, and treat it as opaque;
2098 it's the address itself that we assume is unique per-thread. */
2099 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2100 return 0;
2101
2102 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2103 }
2104
2105 /* The reason we resume in the caller, is because we want to be able
2106 to pass lwp->status_pending as WSTAT, and we need to clear
2107 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2108 refuses to resume. */
2109
2110 static int
2111 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2112 {
2113 struct thread_info *saved_thread;
2114
2115 saved_thread = current_thread;
2116 current_thread = get_lwp_thread (lwp);
2117
2118 if ((wstat == NULL
2119 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2120 && supports_fast_tracepoints ()
2121 && agent_loaded_p ())
2122 {
2123 struct fast_tpoint_collect_status status;
2124 int r;
2125
2126 if (debug_threads)
2127 debug_printf ("Checking whether LWP %ld needs to move out of the "
2128 "jump pad.\n",
2129 lwpid_of (current_thread));
2130
2131 r = linux_fast_tracepoint_collecting (lwp, &status);
2132
2133 if (wstat == NULL
2134 || (WSTOPSIG (*wstat) != SIGILL
2135 && WSTOPSIG (*wstat) != SIGFPE
2136 && WSTOPSIG (*wstat) != SIGSEGV
2137 && WSTOPSIG (*wstat) != SIGBUS))
2138 {
2139 lwp->collecting_fast_tracepoint = r;
2140
2141 if (r != 0)
2142 {
2143 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
2144 {
2145 /* Haven't executed the original instruction yet.
2146 Set breakpoint there, and wait till it's hit,
2147 then single-step until exiting the jump pad. */
2148 lwp->exit_jump_pad_bkpt
2149 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2150 }
2151
2152 if (debug_threads)
2153 debug_printf ("Checking whether LWP %ld needs to move out of "
2154 "the jump pad...it does\n",
2155 lwpid_of (current_thread));
2156 current_thread = saved_thread;
2157
2158 return 1;
2159 }
2160 }
2161 else
2162 {
2163 /* If we get a synchronous signal while collecting, *and*
2164 while executing the (relocated) original instruction,
2165 reset the PC to point at the tpoint address, before
2166 reporting to GDB. Otherwise, it's an IPA lib bug: just
2167 report the signal to GDB, and pray for the best. */
2168
2169 lwp->collecting_fast_tracepoint = 0;
2170
2171 if (r != 0
2172 && (status.adjusted_insn_addr <= lwp->stop_pc
2173 && lwp->stop_pc < status.adjusted_insn_addr_end))
2174 {
2175 siginfo_t info;
2176 struct regcache *regcache;
2177
2178 /* The si_addr on a few signals references the address
2179 of the faulting instruction. Adjust that as
2180 well. */
2181 if ((WSTOPSIG (*wstat) == SIGILL
2182 || WSTOPSIG (*wstat) == SIGFPE
2183 || WSTOPSIG (*wstat) == SIGBUS
2184 || WSTOPSIG (*wstat) == SIGSEGV)
2185 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2186 (PTRACE_TYPE_ARG3) 0, &info) == 0
2187 /* Final check just to make sure we don't clobber
2188 the siginfo of non-kernel-sent signals. */
2189 && (uintptr_t) info.si_addr == lwp->stop_pc)
2190 {
2191 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2192 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2193 (PTRACE_TYPE_ARG3) 0, &info);
2194 }
2195
2196 regcache = get_thread_regcache (current_thread, 1);
2197 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2198 lwp->stop_pc = status.tpoint_addr;
2199
2200 /* Cancel any fast tracepoint lock this thread was
2201 holding. */
2202 force_unlock_trace_buffer ();
2203 }
2204
2205 if (lwp->exit_jump_pad_bkpt != NULL)
2206 {
2207 if (debug_threads)
2208 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2209 "stopping all threads momentarily.\n");
2210
2211 stop_all_lwps (1, lwp);
2212
2213 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2214 lwp->exit_jump_pad_bkpt = NULL;
2215
2216 unstop_all_lwps (1, lwp);
2217
2218 gdb_assert (lwp->suspended >= 0);
2219 }
2220 }
2221 }
2222
2223 if (debug_threads)
2224 debug_printf ("Checking whether LWP %ld needs to move out of the "
2225 "jump pad...no\n",
2226 lwpid_of (current_thread));
2227
2228 current_thread = saved_thread;
2229 return 0;
2230 }
2231
2232 /* Enqueue one signal in the "signals to report later when out of the
2233 jump pad" list. */
2234
2235 static void
2236 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2237 {
2238 struct pending_signals *p_sig;
2239 struct thread_info *thread = get_lwp_thread (lwp);
2240
2241 if (debug_threads)
2242 debug_printf ("Deferring signal %d for LWP %ld.\n",
2243 WSTOPSIG (*wstat), lwpid_of (thread));
2244
2245 if (debug_threads)
2246 {
2247 struct pending_signals *sig;
2248
2249 for (sig = lwp->pending_signals_to_report;
2250 sig != NULL;
2251 sig = sig->prev)
2252 debug_printf (" Already queued %d\n",
2253 sig->signal);
2254
2255 debug_printf (" (no more currently queued signals)\n");
2256 }
2257
2258 /* Don't enqueue non-RT signals if they are already in the deferred
2259 queue. (SIGSTOP being the easiest signal to see ending up here
2260 twice) */
2261 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2262 {
2263 struct pending_signals *sig;
2264
2265 for (sig = lwp->pending_signals_to_report;
2266 sig != NULL;
2267 sig = sig->prev)
2268 {
2269 if (sig->signal == WSTOPSIG (*wstat))
2270 {
2271 if (debug_threads)
2272 debug_printf ("Not requeuing already queued non-RT signal %d"
2273 " for LWP %ld\n",
2274 sig->signal,
2275 lwpid_of (thread));
2276 return;
2277 }
2278 }
2279 }
2280
2281 p_sig = XCNEW (struct pending_signals);
2282 p_sig->prev = lwp->pending_signals_to_report;
2283 p_sig->signal = WSTOPSIG (*wstat);
2284
2285 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2286 &p_sig->info);
2287
2288 lwp->pending_signals_to_report = p_sig;
2289 }
2290
2291 /* Dequeue one signal from the "signals to report later when out of
2292 the jump pad" list. */
2293
2294 static int
2295 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2296 {
2297 struct thread_info *thread = get_lwp_thread (lwp);
2298
2299 if (lwp->pending_signals_to_report != NULL)
2300 {
2301 struct pending_signals **p_sig;
2302
2303 p_sig = &lwp->pending_signals_to_report;
2304 while ((*p_sig)->prev != NULL)
2305 p_sig = &(*p_sig)->prev;
2306
2307 *wstat = W_STOPCODE ((*p_sig)->signal);
2308 if ((*p_sig)->info.si_signo != 0)
2309 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2310 &(*p_sig)->info);
2311 free (*p_sig);
2312 *p_sig = NULL;
2313
2314 if (debug_threads)
2315 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2316 WSTOPSIG (*wstat), lwpid_of (thread));
2317
2318 if (debug_threads)
2319 {
2320 struct pending_signals *sig;
2321
2322 for (sig = lwp->pending_signals_to_report;
2323 sig != NULL;
2324 sig = sig->prev)
2325 debug_printf (" Still queued %d\n",
2326 sig->signal);
2327
2328 debug_printf (" (no more queued signals)\n");
2329 }
2330
2331 return 1;
2332 }
2333
2334 return 0;
2335 }
2336
2337 /* Fetch the possibly triggered data watchpoint info and store it in
2338 CHILD.
2339
2340 On some archs, like x86, that use debug registers to set
2341 watchpoints, it's possible that the way to know which watched
2342 address trapped, is to check the register that is used to select
2343 which address to watch. Problem is, between setting the watchpoint
2344 and reading back which data address trapped, the user may change
2345 the set of watchpoints, and, as a consequence, GDB changes the
2346 debug registers in the inferior. To avoid reading back a stale
2347 stopped-data-address when that happens, we cache in LP the fact
2348 that a watchpoint trapped, and the corresponding data address, as
2349 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2350 registers meanwhile, we have the cached data we can rely on. */
2351
2352 static int
2353 check_stopped_by_watchpoint (struct lwp_info *child)
2354 {
2355 if (the_low_target.stopped_by_watchpoint != NULL)
2356 {
2357 struct thread_info *saved_thread;
2358
2359 saved_thread = current_thread;
2360 current_thread = get_lwp_thread (child);
2361
2362 if (the_low_target.stopped_by_watchpoint ())
2363 {
2364 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2365
2366 if (the_low_target.stopped_data_address != NULL)
2367 child->stopped_data_address
2368 = the_low_target.stopped_data_address ();
2369 else
2370 child->stopped_data_address = 0;
2371 }
2372
2373 current_thread = saved_thread;
2374 }
2375
2376 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2377 }
2378
2379 /* Return the ptrace options that we want to try to enable. */
2380
2381 static int
2382 linux_low_ptrace_options (int attached)
2383 {
2384 int options = 0;
2385
2386 if (!attached)
2387 options |= PTRACE_O_EXITKILL;
2388
2389 if (report_fork_events)
2390 options |= PTRACE_O_TRACEFORK;
2391
2392 if (report_vfork_events)
2393 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2394
2395 if (report_exec_events)
2396 options |= PTRACE_O_TRACEEXEC;
2397
2398 options |= PTRACE_O_TRACESYSGOOD;
2399
2400 return options;
2401 }
2402
2403 /* Do low-level handling of the event, and check if we should go on
2404 and pass it to caller code. Return the affected lwp if we are, or
2405 NULL otherwise. */
2406
2407 static struct lwp_info *
2408 linux_low_filter_event (int lwpid, int wstat)
2409 {
2410 struct lwp_info *child;
2411 struct thread_info *thread;
2412 int have_stop_pc = 0;
2413
2414 child = find_lwp_pid (pid_to_ptid (lwpid));
2415
2416 /* Check for stop events reported by a process we didn't already
2417 know about - anything not already in our LWP list.
2418
2419 If we're expecting to receive stopped processes after
2420 fork, vfork, and clone events, then we'll just add the
2421 new one to our list and go back to waiting for the event
2422 to be reported - the stopped process might be returned
2423 from waitpid before or after the event is.
2424
2425 But note the case of a non-leader thread exec'ing after the
2426 leader having exited, and gone from our lists (because
2427 check_zombie_leaders deleted it). The non-leader thread
2428 changes its tid to the tgid. */
2429
2430 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2431 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2432 {
2433 ptid_t child_ptid;
2434
2435 /* A multi-thread exec after we had seen the leader exiting. */
2436 if (debug_threads)
2437 {
2438 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2439 "after exec.\n", lwpid);
2440 }
2441
2442 child_ptid = ptid_build (lwpid, lwpid, 0);
2443 child = add_lwp (child_ptid);
2444 child->stopped = 1;
2445 current_thread = child->thread;
2446 }
2447
2448 /* If we didn't find a process, one of two things presumably happened:
2449 - A process we started and then detached from has exited. Ignore it.
2450 - A process we are controlling has forked and the new child's stop
2451 was reported to us by the kernel. Save its PID. */
2452 if (child == NULL && WIFSTOPPED (wstat))
2453 {
2454 add_to_pid_list (&stopped_pids, lwpid, wstat);
2455 return NULL;
2456 }
2457 else if (child == NULL)
2458 return NULL;
2459
2460 thread = get_lwp_thread (child);
2461
2462 child->stopped = 1;
2463
2464 child->last_status = wstat;
2465
2466 /* Check if the thread has exited. */
2467 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2468 {
2469 if (debug_threads)
2470 debug_printf ("LLFE: %d exited.\n", lwpid);
2471
2472 if (finish_step_over (child))
2473 {
2474 /* Unsuspend all other LWPs, and set them back running again. */
2475 unsuspend_all_lwps (child);
2476 }
2477
2478 /* If there is at least one more LWP, then the exit signal was
2479 not the end of the debugged application and should be
2480 ignored, unless GDB wants to hear about thread exits. */
2481 if (report_thread_events
2482 || last_thread_of_process_p (pid_of (thread)))
2483 {
2484 /* Since events are serialized to GDB core, and we can't
2485 report this one right now. Leave the status pending for
2486 the next time we're able to report it. */
2487 mark_lwp_dead (child, wstat);
2488 return child;
2489 }
2490 else
2491 {
2492 delete_lwp (child);
2493 return NULL;
2494 }
2495 }
2496
2497 gdb_assert (WIFSTOPPED (wstat));
2498
2499 if (WIFSTOPPED (wstat))
2500 {
2501 struct process_info *proc;
2502
2503 /* Architecture-specific setup after inferior is running. */
2504 proc = find_process_pid (pid_of (thread));
2505 if (proc->tdesc == NULL)
2506 {
2507 if (proc->attached)
2508 {
2509 /* This needs to happen after we have attached to the
2510 inferior and it is stopped for the first time, but
2511 before we access any inferior registers. */
2512 linux_arch_setup_thread (thread);
2513 }
2514 else
2515 {
2516 /* The process is started, but GDBserver will do
2517 architecture-specific setup after the program stops at
2518 the first instruction. */
2519 child->status_pending_p = 1;
2520 child->status_pending = wstat;
2521 return child;
2522 }
2523 }
2524 }
2525
2526 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2527 {
2528 struct process_info *proc = find_process_pid (pid_of (thread));
2529 int options = linux_low_ptrace_options (proc->attached);
2530
2531 linux_enable_event_reporting (lwpid, options);
2532 child->must_set_ptrace_flags = 0;
2533 }
2534
2535 /* Always update syscall_state, even if it will be filtered later. */
2536 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2537 {
2538 child->syscall_state
2539 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2540 ? TARGET_WAITKIND_SYSCALL_RETURN
2541 : TARGET_WAITKIND_SYSCALL_ENTRY);
2542 }
2543 else
2544 {
2545 /* Almost all other ptrace-stops are known to be outside of system
2546 calls, with further exceptions in handle_extended_wait. */
2547 child->syscall_state = TARGET_WAITKIND_IGNORE;
2548 }
2549
2550 /* Be careful to not overwrite stop_pc until save_stop_reason is
2551 called. */
2552 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2553 && linux_is_extended_waitstatus (wstat))
2554 {
2555 child->stop_pc = get_pc (child);
2556 if (handle_extended_wait (&child, wstat))
2557 {
2558 /* The event has been handled, so just return without
2559 reporting it. */
2560 return NULL;
2561 }
2562 }
2563
2564 if (linux_wstatus_maybe_breakpoint (wstat))
2565 {
2566 if (save_stop_reason (child))
2567 have_stop_pc = 1;
2568 }
2569
2570 if (!have_stop_pc)
2571 child->stop_pc = get_pc (child);
2572
2573 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2574 && child->stop_expected)
2575 {
2576 if (debug_threads)
2577 debug_printf ("Expected stop.\n");
2578 child->stop_expected = 0;
2579
2580 if (thread->last_resume_kind == resume_stop)
2581 {
2582 /* We want to report the stop to the core. Treat the
2583 SIGSTOP as a normal event. */
2584 if (debug_threads)
2585 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2586 target_pid_to_str (ptid_of (thread)));
2587 }
2588 else if (stopping_threads != NOT_STOPPING_THREADS)
2589 {
2590 /* Stopping threads. We don't want this SIGSTOP to end up
2591 pending. */
2592 if (debug_threads)
2593 debug_printf ("LLW: SIGSTOP caught for %s "
2594 "while stopping threads.\n",
2595 target_pid_to_str (ptid_of (thread)));
2596 return NULL;
2597 }
2598 else
2599 {
2600 /* This is a delayed SIGSTOP. Filter out the event. */
2601 if (debug_threads)
2602 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2603 child->stepping ? "step" : "continue",
2604 target_pid_to_str (ptid_of (thread)));
2605
2606 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2607 return NULL;
2608 }
2609 }
2610
2611 child->status_pending_p = 1;
2612 child->status_pending = wstat;
2613 return child;
2614 }
2615
2616 /* Return true if THREAD is doing hardware single step. */
2617
2618 static int
2619 maybe_hw_step (struct thread_info *thread)
2620 {
2621 if (can_hardware_single_step ())
2622 return 1;
2623 else
2624 {
2625 /* GDBserver must insert reinsert breakpoint for software
2626 single step. */
2627 gdb_assert (has_reinsert_breakpoints (thread));
2628 return 0;
2629 }
2630 }
2631
2632 /* Resume LWPs that are currently stopped without any pending status
2633 to report, but are resumed from the core's perspective. */
2634
2635 static void
2636 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2637 {
2638 struct thread_info *thread = (struct thread_info *) entry;
2639 struct lwp_info *lp = get_thread_lwp (thread);
2640
2641 if (lp->stopped
2642 && !lp->suspended
2643 && !lp->status_pending_p
2644 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2645 {
2646 int step = 0;
2647
2648 if (thread->last_resume_kind == resume_step)
2649 step = maybe_hw_step (thread);
2650
2651 if (debug_threads)
2652 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2653 target_pid_to_str (ptid_of (thread)),
2654 paddress (lp->stop_pc),
2655 step);
2656
2657 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2658 }
2659 }
2660
2661 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2662 match FILTER_PTID (leaving others pending). The PTIDs can be:
2663 minus_one_ptid, to specify any child; a pid PTID, specifying all
2664 lwps of a thread group; or a PTID representing a single lwp. Store
2665 the stop status through the status pointer WSTAT. OPTIONS is
2666 passed to the waitpid call. Return 0 if no event was found and
2667 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2668 was found. Return the PID of the stopped child otherwise. */
2669
2670 static int
2671 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2672 int *wstatp, int options)
2673 {
2674 struct thread_info *event_thread;
2675 struct lwp_info *event_child, *requested_child;
2676 sigset_t block_mask, prev_mask;
2677
2678 retry:
2679 /* N.B. event_thread points to the thread_info struct that contains
2680 event_child. Keep them in sync. */
2681 event_thread = NULL;
2682 event_child = NULL;
2683 requested_child = NULL;
2684
2685 /* Check for a lwp with a pending status. */
2686
2687 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2688 {
2689 event_thread = (struct thread_info *)
2690 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2691 if (event_thread != NULL)
2692 event_child = get_thread_lwp (event_thread);
2693 if (debug_threads && event_thread)
2694 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2695 }
2696 else if (!ptid_equal (filter_ptid, null_ptid))
2697 {
2698 requested_child = find_lwp_pid (filter_ptid);
2699
2700 if (stopping_threads == NOT_STOPPING_THREADS
2701 && requested_child->status_pending_p
2702 && requested_child->collecting_fast_tracepoint)
2703 {
2704 enqueue_one_deferred_signal (requested_child,
2705 &requested_child->status_pending);
2706 requested_child->status_pending_p = 0;
2707 requested_child->status_pending = 0;
2708 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2709 }
2710
2711 if (requested_child->suspended
2712 && requested_child->status_pending_p)
2713 {
2714 internal_error (__FILE__, __LINE__,
2715 "requesting an event out of a"
2716 " suspended child?");
2717 }
2718
2719 if (requested_child->status_pending_p)
2720 {
2721 event_child = requested_child;
2722 event_thread = get_lwp_thread (event_child);
2723 }
2724 }
2725
2726 if (event_child != NULL)
2727 {
2728 if (debug_threads)
2729 debug_printf ("Got an event from pending child %ld (%04x)\n",
2730 lwpid_of (event_thread), event_child->status_pending);
2731 *wstatp = event_child->status_pending;
2732 event_child->status_pending_p = 0;
2733 event_child->status_pending = 0;
2734 current_thread = event_thread;
2735 return lwpid_of (event_thread);
2736 }
2737
2738 /* But if we don't find a pending event, we'll have to wait.
2739
2740 We only enter this loop if no process has a pending wait status.
2741 Thus any action taken in response to a wait status inside this
2742 loop is responding as soon as we detect the status, not after any
2743 pending events. */
2744
2745 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2746 all signals while here. */
2747 sigfillset (&block_mask);
2748 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2749
2750 /* Always pull all events out of the kernel. We'll randomly select
2751 an event LWP out of all that have events, to prevent
2752 starvation. */
2753 while (event_child == NULL)
2754 {
2755 pid_t ret = 0;
2756
2757 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2758 quirks:
2759
2760 - If the thread group leader exits while other threads in the
2761 thread group still exist, waitpid(TGID, ...) hangs. That
2762 waitpid won't return an exit status until the other threads
2763 in the group are reaped.
2764
2765 - When a non-leader thread execs, that thread just vanishes
2766 without reporting an exit (so we'd hang if we waited for it
2767 explicitly in that case). The exec event is reported to
2768 the TGID pid. */
2769 errno = 0;
2770 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2771
2772 if (debug_threads)
2773 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2774 ret, errno ? strerror (errno) : "ERRNO-OK");
2775
2776 if (ret > 0)
2777 {
2778 if (debug_threads)
2779 {
2780 debug_printf ("LLW: waitpid %ld received %s\n",
2781 (long) ret, status_to_str (*wstatp));
2782 }
2783
2784 /* Filter all events. IOW, leave all events pending. We'll
2785 randomly select an event LWP out of all that have events
2786 below. */
2787 linux_low_filter_event (ret, *wstatp);
2788 /* Retry until nothing comes out of waitpid. A single
2789 SIGCHLD can indicate more than one child stopped. */
2790 continue;
2791 }
2792
2793 /* Now that we've pulled all events out of the kernel, resume
2794 LWPs that don't have an interesting event to report. */
2795 if (stopping_threads == NOT_STOPPING_THREADS)
2796 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2797
2798 /* ... and find an LWP with a status to report to the core, if
2799 any. */
2800 event_thread = (struct thread_info *)
2801 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2802 if (event_thread != NULL)
2803 {
2804 event_child = get_thread_lwp (event_thread);
2805 *wstatp = event_child->status_pending;
2806 event_child->status_pending_p = 0;
2807 event_child->status_pending = 0;
2808 break;
2809 }
2810
2811 /* Check for zombie thread group leaders. Those can't be reaped
2812 until all other threads in the thread group are. */
2813 check_zombie_leaders ();
2814
2815 /* If there are no resumed children left in the set of LWPs we
2816 want to wait for, bail. We can't just block in
2817 waitpid/sigsuspend, because lwps might have been left stopped
2818 in trace-stop state, and we'd be stuck forever waiting for
2819 their status to change (which would only happen if we resumed
2820 them). Even if WNOHANG is set, this return code is preferred
2821 over 0 (below), as it is more detailed. */
2822 if ((find_inferior (&all_threads,
2823 not_stopped_callback,
2824 &wait_ptid) == NULL))
2825 {
2826 if (debug_threads)
2827 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2828 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2829 return -1;
2830 }
2831
2832 /* No interesting event to report to the caller. */
2833 if ((options & WNOHANG))
2834 {
2835 if (debug_threads)
2836 debug_printf ("WNOHANG set, no event found\n");
2837
2838 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2839 return 0;
2840 }
2841
2842 /* Block until we get an event reported with SIGCHLD. */
2843 if (debug_threads)
2844 debug_printf ("sigsuspend'ing\n");
2845
2846 sigsuspend (&prev_mask);
2847 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2848 goto retry;
2849 }
2850
2851 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2852
2853 current_thread = event_thread;
2854
2855 return lwpid_of (event_thread);
2856 }
2857
2858 /* Wait for an event from child(ren) PTID. PTIDs can be:
2859 minus_one_ptid, to specify any child; a pid PTID, specifying all
2860 lwps of a thread group; or a PTID representing a single lwp. Store
2861 the stop status through the status pointer WSTAT. OPTIONS is
2862 passed to the waitpid call. Return 0 if no event was found and
2863 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2864 was found. Return the PID of the stopped child otherwise. */
2865
2866 static int
2867 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2868 {
2869 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2870 }
2871
2872 /* Count the LWP's that have had events. */
2873
2874 static int
2875 count_events_callback (struct inferior_list_entry *entry, void *data)
2876 {
2877 struct thread_info *thread = (struct thread_info *) entry;
2878 struct lwp_info *lp = get_thread_lwp (thread);
2879 int *count = (int *) data;
2880
2881 gdb_assert (count != NULL);
2882
2883 /* Count only resumed LWPs that have an event pending. */
2884 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2885 && lp->status_pending_p)
2886 (*count)++;
2887
2888 return 0;
2889 }
2890
2891 /* Select the LWP (if any) that is currently being single-stepped. */
2892
2893 static int
2894 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2895 {
2896 struct thread_info *thread = (struct thread_info *) entry;
2897 struct lwp_info *lp = get_thread_lwp (thread);
2898
2899 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2900 && thread->last_resume_kind == resume_step
2901 && lp->status_pending_p)
2902 return 1;
2903 else
2904 return 0;
2905 }
2906
2907 /* Select the Nth LWP that has had an event. */
2908
2909 static int
2910 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2911 {
2912 struct thread_info *thread = (struct thread_info *) entry;
2913 struct lwp_info *lp = get_thread_lwp (thread);
2914 int *selector = (int *) data;
2915
2916 gdb_assert (selector != NULL);
2917
2918 /* Select only resumed LWPs that have an event pending. */
2919 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2920 && lp->status_pending_p)
2921 if ((*selector)-- == 0)
2922 return 1;
2923
2924 return 0;
2925 }
2926
2927 /* Select one LWP out of those that have events pending. */
2928
2929 static void
2930 select_event_lwp (struct lwp_info **orig_lp)
2931 {
2932 int num_events = 0;
2933 int random_selector;
2934 struct thread_info *event_thread = NULL;
2935
2936 /* In all-stop, give preference to the LWP that is being
2937 single-stepped. There will be at most one, and it's the LWP that
2938 the core is most interested in. If we didn't do this, then we'd
2939 have to handle pending step SIGTRAPs somehow in case the core
2940 later continues the previously-stepped thread, otherwise we'd
2941 report the pending SIGTRAP, and the core, not having stepped the
2942 thread, wouldn't understand what the trap was for, and therefore
2943 would report it to the user as a random signal. */
2944 if (!non_stop)
2945 {
2946 event_thread
2947 = (struct thread_info *) find_inferior (&all_threads,
2948 select_singlestep_lwp_callback,
2949 NULL);
2950 if (event_thread != NULL)
2951 {
2952 if (debug_threads)
2953 debug_printf ("SEL: Select single-step %s\n",
2954 target_pid_to_str (ptid_of (event_thread)));
2955 }
2956 }
2957 if (event_thread == NULL)
2958 {
2959 /* No single-stepping LWP. Select one at random, out of those
2960 which have had events. */
2961
2962 /* First see how many events we have. */
2963 find_inferior (&all_threads, count_events_callback, &num_events);
2964 gdb_assert (num_events > 0);
2965
2966 /* Now randomly pick a LWP out of those that have had
2967 events. */
2968 random_selector = (int)
2969 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2970
2971 if (debug_threads && num_events > 1)
2972 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2973 num_events, random_selector);
2974
2975 event_thread
2976 = (struct thread_info *) find_inferior (&all_threads,
2977 select_event_lwp_callback,
2978 &random_selector);
2979 }
2980
2981 if (event_thread != NULL)
2982 {
2983 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2984
2985 /* Switch the event LWP. */
2986 *orig_lp = event_lp;
2987 }
2988 }
2989
2990 /* Decrement the suspend count of an LWP. */
2991
2992 static int
2993 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2994 {
2995 struct thread_info *thread = (struct thread_info *) entry;
2996 struct lwp_info *lwp = get_thread_lwp (thread);
2997
2998 /* Ignore EXCEPT. */
2999 if (lwp == except)
3000 return 0;
3001
3002 lwp_suspended_decr (lwp);
3003 return 0;
3004 }
3005
3006 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
3007 NULL. */
3008
3009 static void
3010 unsuspend_all_lwps (struct lwp_info *except)
3011 {
3012 find_inferior (&all_threads, unsuspend_one_lwp, except);
3013 }
3014
3015 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
3016 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
3017 void *data);
3018 static int lwp_running (struct inferior_list_entry *entry, void *data);
3019 static ptid_t linux_wait_1 (ptid_t ptid,
3020 struct target_waitstatus *ourstatus,
3021 int target_options);
3022
3023 /* Stabilize threads (move out of jump pads).
3024
3025 If a thread is midway collecting a fast tracepoint, we need to
3026 finish the collection and move it out of the jump pad before
3027 reporting the signal.
3028
3029 This avoids recursion while collecting (when a signal arrives
3030 midway, and the signal handler itself collects), which would trash
3031 the trace buffer. In case the user set a breakpoint in a signal
3032 handler, this avoids the backtrace showing the jump pad, etc..
3033 Most importantly, there are certain things we can't do safely if
3034 threads are stopped in a jump pad (or in its callee's). For
3035 example:
3036
3037 - starting a new trace run. A thread still collecting the
3038 previous run, could trash the trace buffer when resumed. The trace
3039 buffer control structures would have been reset but the thread had
3040 no way to tell. The thread could even midway memcpy'ing to the
3041 buffer, which would mean that when resumed, it would clobber the
3042 trace buffer that had been set for a new run.
3043
3044 - we can't rewrite/reuse the jump pads for new tracepoints
3045 safely. Say you do tstart while a thread is stopped midway while
3046 collecting. When the thread is later resumed, it finishes the
3047 collection, and returns to the jump pad, to execute the original
3048 instruction that was under the tracepoint jump at the time the
3049 older run had been started. If the jump pad had been rewritten
3050 since for something else in the new run, the thread would now
3051 execute the wrong / random instructions. */
3052
3053 static void
3054 linux_stabilize_threads (void)
3055 {
3056 struct thread_info *saved_thread;
3057 struct thread_info *thread_stuck;
3058
3059 thread_stuck
3060 = (struct thread_info *) find_inferior (&all_threads,
3061 stuck_in_jump_pad_callback,
3062 NULL);
3063 if (thread_stuck != NULL)
3064 {
3065 if (debug_threads)
3066 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
3067 lwpid_of (thread_stuck));
3068 return;
3069 }
3070
3071 saved_thread = current_thread;
3072
3073 stabilizing_threads = 1;
3074
3075 /* Kick 'em all. */
3076 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
3077
3078 /* Loop until all are stopped out of the jump pads. */
3079 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
3080 {
3081 struct target_waitstatus ourstatus;
3082 struct lwp_info *lwp;
3083 int wstat;
3084
3085 /* Note that we go through the full wait even loop. While
3086 moving threads out of jump pad, we need to be able to step
3087 over internal breakpoints and such. */
3088 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
3089
3090 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
3091 {
3092 lwp = get_thread_lwp (current_thread);
3093
3094 /* Lock it. */
3095 lwp_suspended_inc (lwp);
3096
3097 if (ourstatus.value.sig != GDB_SIGNAL_0
3098 || current_thread->last_resume_kind == resume_stop)
3099 {
3100 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3101 enqueue_one_deferred_signal (lwp, &wstat);
3102 }
3103 }
3104 }
3105
3106 unsuspend_all_lwps (NULL);
3107
3108 stabilizing_threads = 0;
3109
3110 current_thread = saved_thread;
3111
3112 if (debug_threads)
3113 {
3114 thread_stuck
3115 = (struct thread_info *) find_inferior (&all_threads,
3116 stuck_in_jump_pad_callback,
3117 NULL);
3118 if (thread_stuck != NULL)
3119 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3120 lwpid_of (thread_stuck));
3121 }
3122 }
3123
3124 /* Convenience function that is called when the kernel reports an
3125 event that is not passed out to GDB. */
3126
3127 static ptid_t
3128 ignore_event (struct target_waitstatus *ourstatus)
3129 {
3130 /* If we got an event, there may still be others, as a single
3131 SIGCHLD can indicate more than one child stopped. This forces
3132 another target_wait call. */
3133 async_file_mark ();
3134
3135 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3136 return null_ptid;
3137 }
3138
3139 /* Convenience function that is called when the kernel reports an exit
3140 event. This decides whether to report the event to GDB as a
3141 process exit event, a thread exit event, or to suppress the
3142 event. */
3143
3144 static ptid_t
3145 filter_exit_event (struct lwp_info *event_child,
3146 struct target_waitstatus *ourstatus)
3147 {
3148 struct thread_info *thread = get_lwp_thread (event_child);
3149 ptid_t ptid = ptid_of (thread);
3150
3151 if (!last_thread_of_process_p (pid_of (thread)))
3152 {
3153 if (report_thread_events)
3154 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3155 else
3156 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3157
3158 delete_lwp (event_child);
3159 }
3160 return ptid;
3161 }
3162
3163 /* Returns 1 if GDB is interested in any event_child syscalls. */
3164
3165 static int
3166 gdb_catching_syscalls_p (struct lwp_info *event_child)
3167 {
3168 struct thread_info *thread = get_lwp_thread (event_child);
3169 struct process_info *proc = get_thread_process (thread);
3170
3171 return !VEC_empty (int, proc->syscalls_to_catch);
3172 }
3173
3174 /* Returns 1 if GDB is interested in the event_child syscall.
3175 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3176
3177 static int
3178 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3179 {
3180 int i, iter;
3181 int sysno;
3182 struct thread_info *thread = get_lwp_thread (event_child);
3183 struct process_info *proc = get_thread_process (thread);
3184
3185 if (VEC_empty (int, proc->syscalls_to_catch))
3186 return 0;
3187
3188 if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
3189 return 1;
3190
3191 get_syscall_trapinfo (event_child, &sysno);
3192 for (i = 0;
3193 VEC_iterate (int, proc->syscalls_to_catch, i, iter);
3194 i++)
3195 if (iter == sysno)
3196 return 1;
3197
3198 return 0;
3199 }
3200
3201 /* Wait for process, returns status. */
3202
3203 static ptid_t
3204 linux_wait_1 (ptid_t ptid,
3205 struct target_waitstatus *ourstatus, int target_options)
3206 {
3207 int w;
3208 struct lwp_info *event_child;
3209 int options;
3210 int pid;
3211 int step_over_finished;
3212 int bp_explains_trap;
3213 int maybe_internal_trap;
3214 int report_to_gdb;
3215 int trace_event;
3216 int in_step_range;
3217 int any_resumed;
3218
3219 if (debug_threads)
3220 {
3221 debug_enter ();
3222 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3223 }
3224
3225 /* Translate generic target options into linux options. */
3226 options = __WALL;
3227 if (target_options & TARGET_WNOHANG)
3228 options |= WNOHANG;
3229
3230 bp_explains_trap = 0;
3231 trace_event = 0;
3232 in_step_range = 0;
3233 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3234
3235 /* Find a resumed LWP, if any. */
3236 if (find_inferior (&all_threads,
3237 status_pending_p_callback,
3238 &minus_one_ptid) != NULL)
3239 any_resumed = 1;
3240 else if ((find_inferior (&all_threads,
3241 not_stopped_callback,
3242 &minus_one_ptid) != NULL))
3243 any_resumed = 1;
3244 else
3245 any_resumed = 0;
3246
3247 if (ptid_equal (step_over_bkpt, null_ptid))
3248 pid = linux_wait_for_event (ptid, &w, options);
3249 else
3250 {
3251 if (debug_threads)
3252 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3253 target_pid_to_str (step_over_bkpt));
3254 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3255 }
3256
3257 if (pid == 0 || (pid == -1 && !any_resumed))
3258 {
3259 gdb_assert (target_options & TARGET_WNOHANG);
3260
3261 if (debug_threads)
3262 {
3263 debug_printf ("linux_wait_1 ret = null_ptid, "
3264 "TARGET_WAITKIND_IGNORE\n");
3265 debug_exit ();
3266 }
3267
3268 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3269 return null_ptid;
3270 }
3271 else if (pid == -1)
3272 {
3273 if (debug_threads)
3274 {
3275 debug_printf ("linux_wait_1 ret = null_ptid, "
3276 "TARGET_WAITKIND_NO_RESUMED\n");
3277 debug_exit ();
3278 }
3279
3280 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3281 return null_ptid;
3282 }
3283
3284 event_child = get_thread_lwp (current_thread);
3285
3286 /* linux_wait_for_event only returns an exit status for the last
3287 child of a process. Report it. */
3288 if (WIFEXITED (w) || WIFSIGNALED (w))
3289 {
3290 if (WIFEXITED (w))
3291 {
3292 ourstatus->kind = TARGET_WAITKIND_EXITED;
3293 ourstatus->value.integer = WEXITSTATUS (w);
3294
3295 if (debug_threads)
3296 {
3297 debug_printf ("linux_wait_1 ret = %s, exited with "
3298 "retcode %d\n",
3299 target_pid_to_str (ptid_of (current_thread)),
3300 WEXITSTATUS (w));
3301 debug_exit ();
3302 }
3303 }
3304 else
3305 {
3306 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3307 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3308
3309 if (debug_threads)
3310 {
3311 debug_printf ("linux_wait_1 ret = %s, terminated with "
3312 "signal %d\n",
3313 target_pid_to_str (ptid_of (current_thread)),
3314 WTERMSIG (w));
3315 debug_exit ();
3316 }
3317 }
3318
3319 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3320 return filter_exit_event (event_child, ourstatus);
3321
3322 return ptid_of (current_thread);
3323 }
3324
3325 /* If step-over executes a breakpoint instruction, in the case of a
3326 hardware single step it means a gdb/gdbserver breakpoint had been
3327 planted on top of a permanent breakpoint, in the case of a software
3328 single step it may just mean that gdbserver hit the reinsert breakpoint.
3329 The PC has been adjusted by save_stop_reason to point at
3330 the breakpoint address.
3331 So in the case of the hardware single step advance the PC manually
3332 past the breakpoint and in the case of software single step advance only
3333 if it's not the reinsert_breakpoint we are hitting.
3334 This avoids that a program would keep trapping a permanent breakpoint
3335 forever. */
3336 if (!ptid_equal (step_over_bkpt, null_ptid)
3337 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3338 && (event_child->stepping
3339 || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
3340 {
3341 int increment_pc = 0;
3342 int breakpoint_kind = 0;
3343 CORE_ADDR stop_pc = event_child->stop_pc;
3344
3345 breakpoint_kind =
3346 the_target->breakpoint_kind_from_current_state (&stop_pc);
3347 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3348
3349 if (debug_threads)
3350 {
3351 debug_printf ("step-over for %s executed software breakpoint\n",
3352 target_pid_to_str (ptid_of (current_thread)));
3353 }
3354
3355 if (increment_pc != 0)
3356 {
3357 struct regcache *regcache
3358 = get_thread_regcache (current_thread, 1);
3359
3360 event_child->stop_pc += increment_pc;
3361 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3362
3363 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3364 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3365 }
3366 }
3367
3368 /* If this event was not handled before, and is not a SIGTRAP, we
3369 report it. SIGILL and SIGSEGV are also treated as traps in case
3370 a breakpoint is inserted at the current PC. If this target does
3371 not support internal breakpoints at all, we also report the
3372 SIGTRAP without further processing; it's of no concern to us. */
3373 maybe_internal_trap
3374 = (supports_breakpoints ()
3375 && (WSTOPSIG (w) == SIGTRAP
3376 || ((WSTOPSIG (w) == SIGILL
3377 || WSTOPSIG (w) == SIGSEGV)
3378 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3379
3380 if (maybe_internal_trap)
3381 {
3382 /* Handle anything that requires bookkeeping before deciding to
3383 report the event or continue waiting. */
3384
3385 /* First check if we can explain the SIGTRAP with an internal
3386 breakpoint, or if we should possibly report the event to GDB.
3387 Do this before anything that may remove or insert a
3388 breakpoint. */
3389 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3390
3391 /* We have a SIGTRAP, possibly a step-over dance has just
3392 finished. If so, tweak the state machine accordingly,
3393 reinsert breakpoints and delete any reinsert (software
3394 single-step) breakpoints. */
3395 step_over_finished = finish_step_over (event_child);
3396
3397 /* Now invoke the callbacks of any internal breakpoints there. */
3398 check_breakpoints (event_child->stop_pc);
3399
3400 /* Handle tracepoint data collecting. This may overflow the
3401 trace buffer, and cause a tracing stop, removing
3402 breakpoints. */
3403 trace_event = handle_tracepoints (event_child);
3404
3405 if (bp_explains_trap)
3406 {
3407 if (debug_threads)
3408 debug_printf ("Hit a gdbserver breakpoint.\n");
3409 }
3410 }
3411 else
3412 {
3413 /* We have some other signal, possibly a step-over dance was in
3414 progress, and it should be cancelled too. */
3415 step_over_finished = finish_step_over (event_child);
3416 }
3417
3418 /* We have all the data we need. Either report the event to GDB, or
3419 resume threads and keep waiting for more. */
3420
3421 /* If we're collecting a fast tracepoint, finish the collection and
3422 move out of the jump pad before delivering a signal. See
3423 linux_stabilize_threads. */
3424
3425 if (WIFSTOPPED (w)
3426 && WSTOPSIG (w) != SIGTRAP
3427 && supports_fast_tracepoints ()
3428 && agent_loaded_p ())
3429 {
3430 if (debug_threads)
3431 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3432 "to defer or adjust it.\n",
3433 WSTOPSIG (w), lwpid_of (current_thread));
3434
3435 /* Allow debugging the jump pad itself. */
3436 if (current_thread->last_resume_kind != resume_step
3437 && maybe_move_out_of_jump_pad (event_child, &w))
3438 {
3439 enqueue_one_deferred_signal (event_child, &w);
3440
3441 if (debug_threads)
3442 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3443 WSTOPSIG (w), lwpid_of (current_thread));
3444
3445 linux_resume_one_lwp (event_child, 0, 0, NULL);
3446
3447 return ignore_event (ourstatus);
3448 }
3449 }
3450
3451 if (event_child->collecting_fast_tracepoint)
3452 {
3453 if (debug_threads)
3454 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3455 "Check if we're already there.\n",
3456 lwpid_of (current_thread),
3457 event_child->collecting_fast_tracepoint);
3458
3459 trace_event = 1;
3460
3461 event_child->collecting_fast_tracepoint
3462 = linux_fast_tracepoint_collecting (event_child, NULL);
3463
3464 if (event_child->collecting_fast_tracepoint != 1)
3465 {
3466 /* No longer need this breakpoint. */
3467 if (event_child->exit_jump_pad_bkpt != NULL)
3468 {
3469 if (debug_threads)
3470 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3471 "stopping all threads momentarily.\n");
3472
3473 /* Other running threads could hit this breakpoint.
3474 We don't handle moribund locations like GDB does,
3475 instead we always pause all threads when removing
3476 breakpoints, so that any step-over or
3477 decr_pc_after_break adjustment is always taken
3478 care of while the breakpoint is still
3479 inserted. */
3480 stop_all_lwps (1, event_child);
3481
3482 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3483 event_child->exit_jump_pad_bkpt = NULL;
3484
3485 unstop_all_lwps (1, event_child);
3486
3487 gdb_assert (event_child->suspended >= 0);
3488 }
3489 }
3490
3491 if (event_child->collecting_fast_tracepoint == 0)
3492 {
3493 if (debug_threads)
3494 debug_printf ("fast tracepoint finished "
3495 "collecting successfully.\n");
3496
3497 /* We may have a deferred signal to report. */
3498 if (dequeue_one_deferred_signal (event_child, &w))
3499 {
3500 if (debug_threads)
3501 debug_printf ("dequeued one signal.\n");
3502 }
3503 else
3504 {
3505 if (debug_threads)
3506 debug_printf ("no deferred signals.\n");
3507
3508 if (stabilizing_threads)
3509 {
3510 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3511 ourstatus->value.sig = GDB_SIGNAL_0;
3512
3513 if (debug_threads)
3514 {
3515 debug_printf ("linux_wait_1 ret = %s, stopped "
3516 "while stabilizing threads\n",
3517 target_pid_to_str (ptid_of (current_thread)));
3518 debug_exit ();
3519 }
3520
3521 return ptid_of (current_thread);
3522 }
3523 }
3524 }
3525 }
3526
3527 /* Check whether GDB would be interested in this event. */
3528
3529 /* Check if GDB is interested in this syscall. */
3530 if (WIFSTOPPED (w)
3531 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3532 && !gdb_catch_this_syscall_p (event_child))
3533 {
3534 if (debug_threads)
3535 {
3536 debug_printf ("Ignored syscall for LWP %ld.\n",
3537 lwpid_of (current_thread));
3538 }
3539
3540 linux_resume_one_lwp (event_child, event_child->stepping,
3541 0, NULL);
3542 return ignore_event (ourstatus);
3543 }
3544
3545 /* If GDB is not interested in this signal, don't stop other
3546 threads, and don't report it to GDB. Just resume the inferior
3547 right away. We do this for threading-related signals as well as
3548 any that GDB specifically requested we ignore. But never ignore
3549 SIGSTOP if we sent it ourselves, and do not ignore signals when
3550 stepping - they may require special handling to skip the signal
3551 handler. Also never ignore signals that could be caused by a
3552 breakpoint. */
3553 if (WIFSTOPPED (w)
3554 && current_thread->last_resume_kind != resume_step
3555 && (
3556 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3557 (current_process ()->priv->thread_db != NULL
3558 && (WSTOPSIG (w) == __SIGRTMIN
3559 || WSTOPSIG (w) == __SIGRTMIN + 1))
3560 ||
3561 #endif
3562 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3563 && !(WSTOPSIG (w) == SIGSTOP
3564 && current_thread->last_resume_kind == resume_stop)
3565 && !linux_wstatus_maybe_breakpoint (w))))
3566 {
3567 siginfo_t info, *info_p;
3568
3569 if (debug_threads)
3570 debug_printf ("Ignored signal %d for LWP %ld.\n",
3571 WSTOPSIG (w), lwpid_of (current_thread));
3572
3573 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3574 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3575 info_p = &info;
3576 else
3577 info_p = NULL;
3578
3579 if (step_over_finished)
3580 {
3581 /* We cancelled this thread's step-over above. We still
3582 need to unsuspend all other LWPs, and set them back
3583 running again while the signal handler runs. */
3584 unsuspend_all_lwps (event_child);
3585
3586 /* Enqueue the pending signal info so that proceed_all_lwps
3587 doesn't lose it. */
3588 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3589
3590 proceed_all_lwps ();
3591 }
3592 else
3593 {
3594 linux_resume_one_lwp (event_child, event_child->stepping,
3595 WSTOPSIG (w), info_p);
3596 }
3597 return ignore_event (ourstatus);
3598 }
3599
3600 /* Note that all addresses are always "out of the step range" when
3601 there's no range to begin with. */
3602 in_step_range = lwp_in_step_range (event_child);
3603
3604 /* If GDB wanted this thread to single step, and the thread is out
3605 of the step range, we always want to report the SIGTRAP, and let
3606 GDB handle it. Watchpoints should always be reported. So should
3607 signals we can't explain. A SIGTRAP we can't explain could be a
3608 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3609 do, we're be able to handle GDB breakpoints on top of internal
3610 breakpoints, by handling the internal breakpoint and still
3611 reporting the event to GDB. If we don't, we're out of luck, GDB
3612 won't see the breakpoint hit. If we see a single-step event but
3613 the thread should be continuing, don't pass the trap to gdb.
3614 That indicates that we had previously finished a single-step but
3615 left the single-step pending -- see
3616 complete_ongoing_step_over. */
3617 report_to_gdb = (!maybe_internal_trap
3618 || (current_thread->last_resume_kind == resume_step
3619 && !in_step_range)
3620 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3621 || (!in_step_range
3622 && !bp_explains_trap
3623 && !trace_event
3624 && !step_over_finished
3625 && !(current_thread->last_resume_kind == resume_continue
3626 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3627 || (gdb_breakpoint_here (event_child->stop_pc)
3628 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3629 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3630 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3631
3632 run_breakpoint_commands (event_child->stop_pc);
3633
3634 /* We found no reason GDB would want us to stop. We either hit one
3635 of our own breakpoints, or finished an internal step GDB
3636 shouldn't know about. */
3637 if (!report_to_gdb)
3638 {
3639 if (debug_threads)
3640 {
3641 if (bp_explains_trap)
3642 debug_printf ("Hit a gdbserver breakpoint.\n");
3643 if (step_over_finished)
3644 debug_printf ("Step-over finished.\n");
3645 if (trace_event)
3646 debug_printf ("Tracepoint event.\n");
3647 if (lwp_in_step_range (event_child))
3648 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3649 paddress (event_child->stop_pc),
3650 paddress (event_child->step_range_start),
3651 paddress (event_child->step_range_end));
3652 }
3653
3654 /* We're not reporting this breakpoint to GDB, so apply the
3655 decr_pc_after_break adjustment to the inferior's regcache
3656 ourselves. */
3657
3658 if (the_low_target.set_pc != NULL)
3659 {
3660 struct regcache *regcache
3661 = get_thread_regcache (current_thread, 1);
3662 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3663 }
3664
3665 /* We may have finished stepping over a breakpoint. If so,
3666 we've stopped and suspended all LWPs momentarily except the
3667 stepping one. This is where we resume them all again. We're
3668 going to keep waiting, so use proceed, which handles stepping
3669 over the next breakpoint. */
3670 if (debug_threads)
3671 debug_printf ("proceeding all threads.\n");
3672
3673 if (step_over_finished)
3674 unsuspend_all_lwps (event_child);
3675
3676 proceed_all_lwps ();
3677 return ignore_event (ourstatus);
3678 }
3679
3680 if (debug_threads)
3681 {
3682 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3683 {
3684 char *str;
3685
3686 str = target_waitstatus_to_string (&event_child->waitstatus);
3687 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3688 lwpid_of (get_lwp_thread (event_child)), str);
3689 xfree (str);
3690 }
3691 if (current_thread->last_resume_kind == resume_step)
3692 {
3693 if (event_child->step_range_start == event_child->step_range_end)
3694 debug_printf ("GDB wanted to single-step, reporting event.\n");
3695 else if (!lwp_in_step_range (event_child))
3696 debug_printf ("Out of step range, reporting event.\n");
3697 }
3698 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3699 debug_printf ("Stopped by watchpoint.\n");
3700 else if (gdb_breakpoint_here (event_child->stop_pc))
3701 debug_printf ("Stopped by GDB breakpoint.\n");
3702 if (debug_threads)
3703 debug_printf ("Hit a non-gdbserver trap event.\n");
3704 }
3705
3706 /* Alright, we're going to report a stop. */
3707
3708 /* Remove reinsert breakpoints. */
3709 if (can_software_single_step ())
3710 {
3711 /* Remove reinsert breakpoints or not. It it is true, stop all
3712 lwps, so that other threads won't hit the breakpoint in the
3713 staled memory. */
3714 int remove_reinsert_breakpoints_p = 0;
3715
3716 if (non_stop)
3717 {
3718 remove_reinsert_breakpoints_p
3719 = has_reinsert_breakpoints (current_thread);
3720 }
3721 else
3722 {
3723 /* In all-stop, a stop reply cancels all previous resume
3724 requests. Delete all reinsert breakpoints. */
3725 struct inferior_list_entry *inf, *tmp;
3726
3727 ALL_INFERIORS (&all_threads, inf, tmp)
3728 {
3729 struct thread_info *thread = (struct thread_info *) inf;
3730
3731 if (has_reinsert_breakpoints (thread))
3732 {
3733 remove_reinsert_breakpoints_p = 1;
3734 break;
3735 }
3736 }
3737 }
3738
3739 if (remove_reinsert_breakpoints_p)
3740 {
3741 /* If we remove reinsert breakpoints from memory, stop all lwps,
3742 so that other threads won't hit the breakpoint in the staled
3743 memory. */
3744 stop_all_lwps (0, event_child);
3745
3746 if (non_stop)
3747 {
3748 gdb_assert (has_reinsert_breakpoints (current_thread));
3749 delete_reinsert_breakpoints (current_thread);
3750 }
3751 else
3752 {
3753 struct inferior_list_entry *inf, *tmp;
3754
3755 ALL_INFERIORS (&all_threads, inf, tmp)
3756 {
3757 struct thread_info *thread = (struct thread_info *) inf;
3758
3759 if (has_reinsert_breakpoints (thread))
3760 delete_reinsert_breakpoints (thread);
3761 }
3762 }
3763
3764 unstop_all_lwps (0, event_child);
3765 }
3766 }
3767
3768 if (!stabilizing_threads)
3769 {
3770 /* In all-stop, stop all threads. */
3771 if (!non_stop)
3772 stop_all_lwps (0, NULL);
3773
3774 /* If we're not waiting for a specific LWP, choose an event LWP
3775 from among those that have had events. Giving equal priority
3776 to all LWPs that have had events helps prevent
3777 starvation. */
3778 if (ptid_equal (ptid, minus_one_ptid))
3779 {
3780 event_child->status_pending_p = 1;
3781 event_child->status_pending = w;
3782
3783 select_event_lwp (&event_child);
3784
3785 /* current_thread and event_child must stay in sync. */
3786 current_thread = get_lwp_thread (event_child);
3787
3788 event_child->status_pending_p = 0;
3789 w = event_child->status_pending;
3790 }
3791
3792 if (step_over_finished)
3793 {
3794 if (!non_stop)
3795 {
3796 /* If we were doing a step-over, all other threads but
3797 the stepping one had been paused in start_step_over,
3798 with their suspend counts incremented. We don't want
3799 to do a full unstop/unpause, because we're in
3800 all-stop mode (so we want threads stopped), but we
3801 still need to unsuspend the other threads, to
3802 decrement their `suspended' count back. */
3803 unsuspend_all_lwps (event_child);
3804 }
3805 else
3806 {
3807 /* If we just finished a step-over, then all threads had
3808 been momentarily paused. In all-stop, that's fine,
3809 we want threads stopped by now anyway. In non-stop,
3810 we need to re-resume threads that GDB wanted to be
3811 running. */
3812 unstop_all_lwps (1, event_child);
3813 }
3814 }
3815
3816 /* Stabilize threads (move out of jump pads). */
3817 if (!non_stop)
3818 stabilize_threads ();
3819 }
3820 else
3821 {
3822 /* If we just finished a step-over, then all threads had been
3823 momentarily paused. In all-stop, that's fine, we want
3824 threads stopped by now anyway. In non-stop, we need to
3825 re-resume threads that GDB wanted to be running. */
3826 if (step_over_finished)
3827 unstop_all_lwps (1, event_child);
3828 }
3829
3830 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3831 {
3832 /* If the reported event is an exit, fork, vfork or exec, let
3833 GDB know. */
3834 *ourstatus = event_child->waitstatus;
3835 /* Clear the event lwp's waitstatus since we handled it already. */
3836 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3837 }
3838 else
3839 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3840
3841 /* Now that we've selected our final event LWP, un-adjust its PC if
3842 it was a software breakpoint, and the client doesn't know we can
3843 adjust the breakpoint ourselves. */
3844 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3845 && !swbreak_feature)
3846 {
3847 int decr_pc = the_low_target.decr_pc_after_break;
3848
3849 if (decr_pc != 0)
3850 {
3851 struct regcache *regcache
3852 = get_thread_regcache (current_thread, 1);
3853 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3854 }
3855 }
3856
3857 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3858 {
3859 get_syscall_trapinfo (event_child,
3860 &ourstatus->value.syscall_number);
3861 ourstatus->kind = event_child->syscall_state;
3862 }
3863 else if (current_thread->last_resume_kind == resume_stop
3864 && WSTOPSIG (w) == SIGSTOP)
3865 {
3866 /* A thread that has been requested to stop by GDB with vCont;t,
3867 and it stopped cleanly, so report as SIG0. The use of
3868 SIGSTOP is an implementation detail. */
3869 ourstatus->value.sig = GDB_SIGNAL_0;
3870 }
3871 else if (current_thread->last_resume_kind == resume_stop
3872 && WSTOPSIG (w) != SIGSTOP)
3873 {
3874 /* A thread that has been requested to stop by GDB with vCont;t,
3875 but, it stopped for other reasons. */
3876 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3877 }
3878 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3879 {
3880 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3881 }
3882
3883 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3884
3885 if (debug_threads)
3886 {
3887 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3888 target_pid_to_str (ptid_of (current_thread)),
3889 ourstatus->kind, ourstatus->value.sig);
3890 debug_exit ();
3891 }
3892
3893 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3894 return filter_exit_event (event_child, ourstatus);
3895
3896 return ptid_of (current_thread);
3897 }
3898
3899 /* Get rid of any pending event in the pipe. */
3900 static void
3901 async_file_flush (void)
3902 {
3903 int ret;
3904 char buf;
3905
3906 do
3907 ret = read (linux_event_pipe[0], &buf, 1);
3908 while (ret >= 0 || (ret == -1 && errno == EINTR));
3909 }
3910
3911 /* Put something in the pipe, so the event loop wakes up. */
3912 static void
3913 async_file_mark (void)
3914 {
3915 int ret;
3916
3917 async_file_flush ();
3918
3919 do
3920 ret = write (linux_event_pipe[1], "+", 1);
3921 while (ret == 0 || (ret == -1 && errno == EINTR));
3922
3923 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3924 be awakened anyway. */
3925 }
3926
3927 static ptid_t
3928 linux_wait (ptid_t ptid,
3929 struct target_waitstatus *ourstatus, int target_options)
3930 {
3931 ptid_t event_ptid;
3932
3933 /* Flush the async file first. */
3934 if (target_is_async_p ())
3935 async_file_flush ();
3936
3937 do
3938 {
3939 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3940 }
3941 while ((target_options & TARGET_WNOHANG) == 0
3942 && ptid_equal (event_ptid, null_ptid)
3943 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3944
3945 /* If at least one stop was reported, there may be more. A single
3946 SIGCHLD can signal more than one child stop. */
3947 if (target_is_async_p ()
3948 && (target_options & TARGET_WNOHANG) != 0
3949 && !ptid_equal (event_ptid, null_ptid))
3950 async_file_mark ();
3951
3952 return event_ptid;
3953 }
3954
3955 /* Send a signal to an LWP. */
3956
3957 static int
3958 kill_lwp (unsigned long lwpid, int signo)
3959 {
3960 int ret;
3961
3962 errno = 0;
3963 ret = syscall (__NR_tkill, lwpid, signo);
3964 if (errno == ENOSYS)
3965 {
3966 /* If tkill fails, then we are not using nptl threads, a
3967 configuration we no longer support. */
3968 perror_with_name (("tkill"));
3969 }
3970 return ret;
3971 }
3972
3973 void
3974 linux_stop_lwp (struct lwp_info *lwp)
3975 {
3976 send_sigstop (lwp);
3977 }
3978
3979 static void
3980 send_sigstop (struct lwp_info *lwp)
3981 {
3982 int pid;
3983
3984 pid = lwpid_of (get_lwp_thread (lwp));
3985
3986 /* If we already have a pending stop signal for this process, don't
3987 send another. */
3988 if (lwp->stop_expected)
3989 {
3990 if (debug_threads)
3991 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3992
3993 return;
3994 }
3995
3996 if (debug_threads)
3997 debug_printf ("Sending sigstop to lwp %d\n", pid);
3998
3999 lwp->stop_expected = 1;
4000 kill_lwp (pid, SIGSTOP);
4001 }
4002
4003 static int
4004 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
4005 {
4006 struct thread_info *thread = (struct thread_info *) entry;
4007 struct lwp_info *lwp = get_thread_lwp (thread);
4008
4009 /* Ignore EXCEPT. */
4010 if (lwp == except)
4011 return 0;
4012
4013 if (lwp->stopped)
4014 return 0;
4015
4016 send_sigstop (lwp);
4017 return 0;
4018 }
4019
4020 /* Increment the suspend count of an LWP, and stop it, if not stopped
4021 yet. */
4022 static int
4023 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
4024 void *except)
4025 {
4026 struct thread_info *thread = (struct thread_info *) entry;
4027 struct lwp_info *lwp = get_thread_lwp (thread);
4028
4029 /* Ignore EXCEPT. */
4030 if (lwp == except)
4031 return 0;
4032
4033 lwp_suspended_inc (lwp);
4034
4035 return send_sigstop_callback (entry, except);
4036 }
4037
4038 static void
4039 mark_lwp_dead (struct lwp_info *lwp, int wstat)
4040 {
4041 /* Store the exit status for later. */
4042 lwp->status_pending_p = 1;
4043 lwp->status_pending = wstat;
4044
4045 /* Store in waitstatus as well, as there's nothing else to process
4046 for this event. */
4047 if (WIFEXITED (wstat))
4048 {
4049 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
4050 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
4051 }
4052 else if (WIFSIGNALED (wstat))
4053 {
4054 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
4055 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
4056 }
4057
4058 /* Prevent trying to stop it. */
4059 lwp->stopped = 1;
4060
4061 /* No further stops are expected from a dead lwp. */
4062 lwp->stop_expected = 0;
4063 }
4064
4065 /* Return true if LWP has exited already, and has a pending exit event
4066 to report to GDB. */
4067
4068 static int
4069 lwp_is_marked_dead (struct lwp_info *lwp)
4070 {
4071 return (lwp->status_pending_p
4072 && (WIFEXITED (lwp->status_pending)
4073 || WIFSIGNALED (lwp->status_pending)));
4074 }
4075
4076 /* Wait for all children to stop for the SIGSTOPs we just queued. */
4077
4078 static void
4079 wait_for_sigstop (void)
4080 {
4081 struct thread_info *saved_thread;
4082 ptid_t saved_tid;
4083 int wstat;
4084 int ret;
4085
4086 saved_thread = current_thread;
4087 if (saved_thread != NULL)
4088 saved_tid = saved_thread->entry.id;
4089 else
4090 saved_tid = null_ptid; /* avoid bogus unused warning */
4091
4092 if (debug_threads)
4093 debug_printf ("wait_for_sigstop: pulling events\n");
4094
4095 /* Passing NULL_PTID as filter indicates we want all events to be
4096 left pending. Eventually this returns when there are no
4097 unwaited-for children left. */
4098 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4099 &wstat, __WALL);
4100 gdb_assert (ret == -1);
4101
4102 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4103 current_thread = saved_thread;
4104 else
4105 {
4106 if (debug_threads)
4107 debug_printf ("Previously current thread died.\n");
4108
4109 /* We can't change the current inferior behind GDB's back,
4110 otherwise, a subsequent command may apply to the wrong
4111 process. */
4112 current_thread = NULL;
4113 }
4114 }
4115
4116 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
4117 move it out, because we need to report the stop event to GDB. For
4118 example, if the user puts a breakpoint in the jump pad, it's
4119 because she wants to debug it. */
4120
4121 static int
4122 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
4123 {
4124 struct thread_info *thread = (struct thread_info *) entry;
4125 struct lwp_info *lwp = get_thread_lwp (thread);
4126
4127 if (lwp->suspended != 0)
4128 {
4129 internal_error (__FILE__, __LINE__,
4130 "LWP %ld is suspended, suspended=%d\n",
4131 lwpid_of (thread), lwp->suspended);
4132 }
4133 gdb_assert (lwp->stopped);
4134
4135 /* Allow debugging the jump pad, gdb_collect, etc.. */
4136 return (supports_fast_tracepoints ()
4137 && agent_loaded_p ()
4138 && (gdb_breakpoint_here (lwp->stop_pc)
4139 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4140 || thread->last_resume_kind == resume_step)
4141 && linux_fast_tracepoint_collecting (lwp, NULL));
4142 }
4143
4144 static void
4145 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
4146 {
4147 struct thread_info *thread = (struct thread_info *) entry;
4148 struct thread_info *saved_thread;
4149 struct lwp_info *lwp = get_thread_lwp (thread);
4150 int *wstat;
4151
4152 if (lwp->suspended != 0)
4153 {
4154 internal_error (__FILE__, __LINE__,
4155 "LWP %ld is suspended, suspended=%d\n",
4156 lwpid_of (thread), lwp->suspended);
4157 }
4158 gdb_assert (lwp->stopped);
4159
4160 /* For gdb_breakpoint_here. */
4161 saved_thread = current_thread;
4162 current_thread = thread;
4163
4164 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4165
4166 /* Allow debugging the jump pad, gdb_collect, etc. */
4167 if (!gdb_breakpoint_here (lwp->stop_pc)
4168 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4169 && thread->last_resume_kind != resume_step
4170 && maybe_move_out_of_jump_pad (lwp, wstat))
4171 {
4172 if (debug_threads)
4173 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4174 lwpid_of (thread));
4175
4176 if (wstat)
4177 {
4178 lwp->status_pending_p = 0;
4179 enqueue_one_deferred_signal (lwp, wstat);
4180
4181 if (debug_threads)
4182 debug_printf ("Signal %d for LWP %ld deferred "
4183 "(in jump pad)\n",
4184 WSTOPSIG (*wstat), lwpid_of (thread));
4185 }
4186
4187 linux_resume_one_lwp (lwp, 0, 0, NULL);
4188 }
4189 else
4190 lwp_suspended_inc (lwp);
4191
4192 current_thread = saved_thread;
4193 }
4194
4195 static int
4196 lwp_running (struct inferior_list_entry *entry, void *data)
4197 {
4198 struct thread_info *thread = (struct thread_info *) entry;
4199 struct lwp_info *lwp = get_thread_lwp (thread);
4200
4201 if (lwp_is_marked_dead (lwp))
4202 return 0;
4203 if (lwp->stopped)
4204 return 0;
4205 return 1;
4206 }
4207
4208 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4209 If SUSPEND, then also increase the suspend count of every LWP,
4210 except EXCEPT. */
4211
4212 static void
4213 stop_all_lwps (int suspend, struct lwp_info *except)
4214 {
4215 /* Should not be called recursively. */
4216 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4217
4218 if (debug_threads)
4219 {
4220 debug_enter ();
4221 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4222 suspend ? "stop-and-suspend" : "stop",
4223 except != NULL
4224 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4225 : "none");
4226 }
4227
4228 stopping_threads = (suspend
4229 ? STOPPING_AND_SUSPENDING_THREADS
4230 : STOPPING_THREADS);
4231
4232 if (suspend)
4233 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4234 else
4235 find_inferior (&all_threads, send_sigstop_callback, except);
4236 wait_for_sigstop ();
4237 stopping_threads = NOT_STOPPING_THREADS;
4238
4239 if (debug_threads)
4240 {
4241 debug_printf ("stop_all_lwps done, setting stopping_threads "
4242 "back to !stopping\n");
4243 debug_exit ();
4244 }
4245 }
4246
4247 /* Enqueue one signal in the chain of signals which need to be
4248 delivered to this process on next resume. */
4249
4250 static void
4251 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4252 {
4253 struct pending_signals *p_sig = XNEW (struct pending_signals);
4254
4255 p_sig->prev = lwp->pending_signals;
4256 p_sig->signal = signal;
4257 if (info == NULL)
4258 memset (&p_sig->info, 0, sizeof (siginfo_t));
4259 else
4260 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4261 lwp->pending_signals = p_sig;
4262 }
4263
4264 /* Install breakpoints for software single stepping. */
4265
4266 static void
4267 install_software_single_step_breakpoints (struct lwp_info *lwp)
4268 {
4269 int i;
4270 CORE_ADDR pc;
4271 struct thread_info *thread = get_lwp_thread (lwp);
4272 struct regcache *regcache = get_thread_regcache (thread, 1);
4273 VEC (CORE_ADDR) *next_pcs = NULL;
4274 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
4275
4276 make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
4277
4278 current_thread = thread;
4279 next_pcs = (*the_low_target.get_next_pcs) (regcache);
4280
4281 for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
4282 set_reinsert_breakpoint (pc, current_ptid);
4283
4284 do_cleanups (old_chain);
4285 }
4286
4287 /* Single step via hardware or software single step.
4288 Return 1 if hardware single stepping, 0 if software single stepping
4289 or can't single step. */
4290
4291 static int
4292 single_step (struct lwp_info* lwp)
4293 {
4294 int step = 0;
4295
4296 if (can_hardware_single_step ())
4297 {
4298 step = 1;
4299 }
4300 else if (can_software_single_step ())
4301 {
4302 install_software_single_step_breakpoints (lwp);
4303 step = 0;
4304 }
4305 else
4306 {
4307 if (debug_threads)
4308 debug_printf ("stepping is not implemented on this target");
4309 }
4310
4311 return step;
4312 }
4313
4314 /* The signal can be delivered to the inferior if we are not trying to
4315 finish a fast tracepoint collect. Since signal can be delivered in
4316 the step-over, the program may go to signal handler and trap again
4317 after return from the signal handler. We can live with the spurious
4318 double traps. */
4319
4320 static int
4321 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4322 {
4323 return !lwp->collecting_fast_tracepoint;
4324 }
4325
4326 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4327 SIGNAL is nonzero, give it that signal. */
4328
4329 static void
4330 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4331 int step, int signal, siginfo_t *info)
4332 {
4333 struct thread_info *thread = get_lwp_thread (lwp);
4334 struct thread_info *saved_thread;
4335 int fast_tp_collecting;
4336 int ptrace_request;
4337 struct process_info *proc = get_thread_process (thread);
4338
4339 /* Note that target description may not be initialised
4340 (proc->tdesc == NULL) at this point because the program hasn't
4341 stopped at the first instruction yet. It means GDBserver skips
4342 the extra traps from the wrapper program (see option --wrapper).
4343 Code in this function that requires register access should be
4344 guarded by proc->tdesc == NULL or something else. */
4345
4346 if (lwp->stopped == 0)
4347 return;
4348
4349 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4350
4351 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4352
4353 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4354
4355 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4356 user used the "jump" command, or "set $pc = foo"). */
4357 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4358 {
4359 /* Collecting 'while-stepping' actions doesn't make sense
4360 anymore. */
4361 release_while_stepping_state_list (thread);
4362 }
4363
4364 /* If we have pending signals or status, and a new signal, enqueue the
4365 signal. Also enqueue the signal if it can't be delivered to the
4366 inferior right now. */
4367 if (signal != 0
4368 && (lwp->status_pending_p
4369 || lwp->pending_signals != NULL
4370 || !lwp_signal_can_be_delivered (lwp)))
4371 {
4372 enqueue_pending_signal (lwp, signal, info);
4373
4374 /* Postpone any pending signal. It was enqueued above. */
4375 signal = 0;
4376 }
4377
4378 if (lwp->status_pending_p)
4379 {
4380 if (debug_threads)
4381 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4382 " has pending status\n",
4383 lwpid_of (thread), step ? "step" : "continue",
4384 lwp->stop_expected ? "expected" : "not expected");
4385 return;
4386 }
4387
4388 saved_thread = current_thread;
4389 current_thread = thread;
4390
4391 /* This bit needs some thinking about. If we get a signal that
4392 we must report while a single-step reinsert is still pending,
4393 we often end up resuming the thread. It might be better to
4394 (ew) allow a stack of pending events; then we could be sure that
4395 the reinsert happened right away and not lose any signals.
4396
4397 Making this stack would also shrink the window in which breakpoints are
4398 uninserted (see comment in linux_wait_for_lwp) but not enough for
4399 complete correctness, so it won't solve that problem. It may be
4400 worthwhile just to solve this one, however. */
4401 if (lwp->bp_reinsert != 0)
4402 {
4403 if (debug_threads)
4404 debug_printf (" pending reinsert at 0x%s\n",
4405 paddress (lwp->bp_reinsert));
4406
4407 if (can_hardware_single_step ())
4408 {
4409 if (fast_tp_collecting == 0)
4410 {
4411 if (step == 0)
4412 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4413 if (lwp->suspended)
4414 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4415 lwp->suspended);
4416 }
4417 }
4418
4419 step = maybe_hw_step (thread);
4420 }
4421
4422 if (fast_tp_collecting == 1)
4423 {
4424 if (debug_threads)
4425 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4426 " (exit-jump-pad-bkpt)\n",
4427 lwpid_of (thread));
4428 }
4429 else if (fast_tp_collecting == 2)
4430 {
4431 if (debug_threads)
4432 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4433 " single-stepping\n",
4434 lwpid_of (thread));
4435
4436 if (can_hardware_single_step ())
4437 step = 1;
4438 else
4439 {
4440 internal_error (__FILE__, __LINE__,
4441 "moving out of jump pad single-stepping"
4442 " not implemented on this target");
4443 }
4444 }
4445
4446 /* If we have while-stepping actions in this thread set it stepping.
4447 If we have a signal to deliver, it may or may not be set to
4448 SIG_IGN, we don't know. Assume so, and allow collecting
4449 while-stepping into a signal handler. A possible smart thing to
4450 do would be to set an internal breakpoint at the signal return
4451 address, continue, and carry on catching this while-stepping
4452 action only when that breakpoint is hit. A future
4453 enhancement. */
4454 if (thread->while_stepping != NULL)
4455 {
4456 if (debug_threads)
4457 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4458 lwpid_of (thread));
4459
4460 step = single_step (lwp);
4461 }
4462
4463 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4464 {
4465 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4466
4467 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4468
4469 if (debug_threads)
4470 {
4471 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4472 (long) lwp->stop_pc);
4473 }
4474 }
4475
4476 /* If we have pending signals, consume one if it can be delivered to
4477 the inferior. */
4478 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4479 {
4480 struct pending_signals **p_sig;
4481
4482 p_sig = &lwp->pending_signals;
4483 while ((*p_sig)->prev != NULL)
4484 p_sig = &(*p_sig)->prev;
4485
4486 signal = (*p_sig)->signal;
4487 if ((*p_sig)->info.si_signo != 0)
4488 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4489 &(*p_sig)->info);
4490
4491 free (*p_sig);
4492 *p_sig = NULL;
4493 }
4494
4495 if (debug_threads)
4496 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4497 lwpid_of (thread), step ? "step" : "continue", signal,
4498 lwp->stop_expected ? "expected" : "not expected");
4499
4500 if (the_low_target.prepare_to_resume != NULL)
4501 the_low_target.prepare_to_resume (lwp);
4502
4503 regcache_invalidate_thread (thread);
4504 errno = 0;
4505 lwp->stepping = step;
4506 if (step)
4507 ptrace_request = PTRACE_SINGLESTEP;
4508 else if (gdb_catching_syscalls_p (lwp))
4509 ptrace_request = PTRACE_SYSCALL;
4510 else
4511 ptrace_request = PTRACE_CONT;
4512 ptrace (ptrace_request,
4513 lwpid_of (thread),
4514 (PTRACE_TYPE_ARG3) 0,
4515 /* Coerce to a uintptr_t first to avoid potential gcc warning
4516 of coercing an 8 byte integer to a 4 byte pointer. */
4517 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4518
4519 current_thread = saved_thread;
4520 if (errno)
4521 perror_with_name ("resuming thread");
4522
4523 /* Successfully resumed. Clear state that no longer makes sense,
4524 and mark the LWP as running. Must not do this before resuming
4525 otherwise if that fails other code will be confused. E.g., we'd
4526 later try to stop the LWP and hang forever waiting for a stop
4527 status. Note that we must not throw after this is cleared,
4528 otherwise handle_zombie_lwp_error would get confused. */
4529 lwp->stopped = 0;
4530 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4531 }
4532
4533 /* Called when we try to resume a stopped LWP and that errors out. If
4534 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4535 or about to become), discard the error, clear any pending status
4536 the LWP may have, and return true (we'll collect the exit status
4537 soon enough). Otherwise, return false. */
4538
4539 static int
4540 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4541 {
4542 struct thread_info *thread = get_lwp_thread (lp);
4543
4544 /* If we get an error after resuming the LWP successfully, we'd
4545 confuse !T state for the LWP being gone. */
4546 gdb_assert (lp->stopped);
4547
4548 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4549 because even if ptrace failed with ESRCH, the tracee may be "not
4550 yet fully dead", but already refusing ptrace requests. In that
4551 case the tracee has 'R (Running)' state for a little bit
4552 (observed in Linux 3.18). See also the note on ESRCH in the
4553 ptrace(2) man page. Instead, check whether the LWP has any state
4554 other than ptrace-stopped. */
4555
4556 /* Don't assume anything if /proc/PID/status can't be read. */
4557 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4558 {
4559 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4560 lp->status_pending_p = 0;
4561 return 1;
4562 }
4563 return 0;
4564 }
4565
4566 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4567 disappears while we try to resume it. */
4568
4569 static void
4570 linux_resume_one_lwp (struct lwp_info *lwp,
4571 int step, int signal, siginfo_t *info)
4572 {
4573 TRY
4574 {
4575 linux_resume_one_lwp_throw (lwp, step, signal, info);
4576 }
4577 CATCH (ex, RETURN_MASK_ERROR)
4578 {
4579 if (!check_ptrace_stopped_lwp_gone (lwp))
4580 throw_exception (ex);
4581 }
4582 END_CATCH
4583 }
4584
4585 struct thread_resume_array
4586 {
4587 struct thread_resume *resume;
4588 size_t n;
4589 };
4590
4591 /* This function is called once per thread via find_inferior.
4592 ARG is a pointer to a thread_resume_array struct.
4593 We look up the thread specified by ENTRY in ARG, and mark the thread
4594 with a pointer to the appropriate resume request.
4595
4596 This algorithm is O(threads * resume elements), but resume elements
4597 is small (and will remain small at least until GDB supports thread
4598 suspension). */
4599
4600 static int
4601 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4602 {
4603 struct thread_info *thread = (struct thread_info *) entry;
4604 struct lwp_info *lwp = get_thread_lwp (thread);
4605 int ndx;
4606 struct thread_resume_array *r;
4607
4608 r = (struct thread_resume_array *) arg;
4609
4610 for (ndx = 0; ndx < r->n; ndx++)
4611 {
4612 ptid_t ptid = r->resume[ndx].thread;
4613 if (ptid_equal (ptid, minus_one_ptid)
4614 || ptid_equal (ptid, entry->id)
4615 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4616 of PID'. */
4617 || (ptid_get_pid (ptid) == pid_of (thread)
4618 && (ptid_is_pid (ptid)
4619 || ptid_get_lwp (ptid) == -1)))
4620 {
4621 if (r->resume[ndx].kind == resume_stop
4622 && thread->last_resume_kind == resume_stop)
4623 {
4624 if (debug_threads)
4625 debug_printf ("already %s LWP %ld at GDB's request\n",
4626 (thread->last_status.kind
4627 == TARGET_WAITKIND_STOPPED)
4628 ? "stopped"
4629 : "stopping",
4630 lwpid_of (thread));
4631
4632 continue;
4633 }
4634
4635 lwp->resume = &r->resume[ndx];
4636 thread->last_resume_kind = lwp->resume->kind;
4637
4638 lwp->step_range_start = lwp->resume->step_range_start;
4639 lwp->step_range_end = lwp->resume->step_range_end;
4640
4641 /* If we had a deferred signal to report, dequeue one now.
4642 This can happen if LWP gets more than one signal while
4643 trying to get out of a jump pad. */
4644 if (lwp->stopped
4645 && !lwp->status_pending_p
4646 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4647 {
4648 lwp->status_pending_p = 1;
4649
4650 if (debug_threads)
4651 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4652 "leaving status pending.\n",
4653 WSTOPSIG (lwp->status_pending),
4654 lwpid_of (thread));
4655 }
4656
4657 return 0;
4658 }
4659 }
4660
4661 /* No resume action for this thread. */
4662 lwp->resume = NULL;
4663
4664 return 0;
4665 }
4666
4667 /* find_inferior callback for linux_resume.
4668 Set *FLAG_P if this lwp has an interesting status pending. */
4669
4670 static int
4671 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4672 {
4673 struct thread_info *thread = (struct thread_info *) entry;
4674 struct lwp_info *lwp = get_thread_lwp (thread);
4675
4676 /* LWPs which will not be resumed are not interesting, because
4677 we might not wait for them next time through linux_wait. */
4678 if (lwp->resume == NULL)
4679 return 0;
4680
4681 if (thread_still_has_status_pending_p (thread))
4682 * (int *) flag_p = 1;
4683
4684 return 0;
4685 }
4686
4687 /* Return 1 if this lwp that GDB wants running is stopped at an
4688 internal breakpoint that we need to step over. It assumes that any
4689 required STOP_PC adjustment has already been propagated to the
4690 inferior's regcache. */
4691
4692 static int
4693 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4694 {
4695 struct thread_info *thread = (struct thread_info *) entry;
4696 struct lwp_info *lwp = get_thread_lwp (thread);
4697 struct thread_info *saved_thread;
4698 CORE_ADDR pc;
4699 struct process_info *proc = get_thread_process (thread);
4700
4701 /* GDBserver is skipping the extra traps from the wrapper program,
4702 don't have to do step over. */
4703 if (proc->tdesc == NULL)
4704 return 0;
4705
4706 /* LWPs which will not be resumed are not interesting, because we
4707 might not wait for them next time through linux_wait. */
4708
4709 if (!lwp->stopped)
4710 {
4711 if (debug_threads)
4712 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4713 lwpid_of (thread));
4714 return 0;
4715 }
4716
4717 if (thread->last_resume_kind == resume_stop)
4718 {
4719 if (debug_threads)
4720 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4721 " stopped\n",
4722 lwpid_of (thread));
4723 return 0;
4724 }
4725
4726 gdb_assert (lwp->suspended >= 0);
4727
4728 if (lwp->suspended)
4729 {
4730 if (debug_threads)
4731 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4732 lwpid_of (thread));
4733 return 0;
4734 }
4735
4736 if (lwp->status_pending_p)
4737 {
4738 if (debug_threads)
4739 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4740 " status.\n",
4741 lwpid_of (thread));
4742 return 0;
4743 }
4744
4745 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4746 or we have. */
4747 pc = get_pc (lwp);
4748
4749 /* If the PC has changed since we stopped, then don't do anything,
4750 and let the breakpoint/tracepoint be hit. This happens if, for
4751 instance, GDB handled the decr_pc_after_break subtraction itself,
4752 GDB is OOL stepping this thread, or the user has issued a "jump"
4753 command, or poked thread's registers herself. */
4754 if (pc != lwp->stop_pc)
4755 {
4756 if (debug_threads)
4757 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4758 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4759 lwpid_of (thread),
4760 paddress (lwp->stop_pc), paddress (pc));
4761 return 0;
4762 }
4763
4764 /* On software single step target, resume the inferior with signal
4765 rather than stepping over. */
4766 if (can_software_single_step ()
4767 && lwp->pending_signals != NULL
4768 && lwp_signal_can_be_delivered (lwp))
4769 {
4770 if (debug_threads)
4771 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4772 " signals.\n",
4773 lwpid_of (thread));
4774
4775 return 0;
4776 }
4777
4778 saved_thread = current_thread;
4779 current_thread = thread;
4780
4781 /* We can only step over breakpoints we know about. */
4782 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4783 {
4784 /* Don't step over a breakpoint that GDB expects to hit
4785 though. If the condition is being evaluated on the target's side
4786 and it evaluate to false, step over this breakpoint as well. */
4787 if (gdb_breakpoint_here (pc)
4788 && gdb_condition_true_at_breakpoint (pc)
4789 && gdb_no_commands_at_breakpoint (pc))
4790 {
4791 if (debug_threads)
4792 debug_printf ("Need step over [LWP %ld]? yes, but found"
4793 " GDB breakpoint at 0x%s; skipping step over\n",
4794 lwpid_of (thread), paddress (pc));
4795
4796 current_thread = saved_thread;
4797 return 0;
4798 }
4799 else
4800 {
4801 if (debug_threads)
4802 debug_printf ("Need step over [LWP %ld]? yes, "
4803 "found breakpoint at 0x%s\n",
4804 lwpid_of (thread), paddress (pc));
4805
4806 /* We've found an lwp that needs stepping over --- return 1 so
4807 that find_inferior stops looking. */
4808 current_thread = saved_thread;
4809
4810 return 1;
4811 }
4812 }
4813
4814 current_thread = saved_thread;
4815
4816 if (debug_threads)
4817 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4818 " at 0x%s\n",
4819 lwpid_of (thread), paddress (pc));
4820
4821 return 0;
4822 }
4823
4824 /* Start a step-over operation on LWP. When LWP stopped at a
4825 breakpoint, to make progress, we need to remove the breakpoint out
4826 of the way. If we let other threads run while we do that, they may
4827 pass by the breakpoint location and miss hitting it. To avoid
4828 that, a step-over momentarily stops all threads while LWP is
4829 single-stepped by either hardware or software while the breakpoint
4830 is temporarily uninserted from the inferior. When the single-step
4831 finishes, we reinsert the breakpoint, and let all threads that are
4832 supposed to be running, run again. */
4833
4834 static int
4835 start_step_over (struct lwp_info *lwp)
4836 {
4837 struct thread_info *thread = get_lwp_thread (lwp);
4838 struct thread_info *saved_thread;
4839 CORE_ADDR pc;
4840 int step;
4841
4842 if (debug_threads)
4843 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4844 lwpid_of (thread));
4845
4846 stop_all_lwps (1, lwp);
4847
4848 if (lwp->suspended != 0)
4849 {
4850 internal_error (__FILE__, __LINE__,
4851 "LWP %ld suspended=%d\n", lwpid_of (thread),
4852 lwp->suspended);
4853 }
4854
4855 if (debug_threads)
4856 debug_printf ("Done stopping all threads for step-over.\n");
4857
4858 /* Note, we should always reach here with an already adjusted PC,
4859 either by GDB (if we're resuming due to GDB's request), or by our
4860 caller, if we just finished handling an internal breakpoint GDB
4861 shouldn't care about. */
4862 pc = get_pc (lwp);
4863
4864 saved_thread = current_thread;
4865 current_thread = thread;
4866
4867 lwp->bp_reinsert = pc;
4868 uninsert_breakpoints_at (pc);
4869 uninsert_fast_tracepoint_jumps_at (pc);
4870
4871 step = single_step (lwp);
4872
4873 current_thread = saved_thread;
4874
4875 linux_resume_one_lwp (lwp, step, 0, NULL);
4876
4877 /* Require next event from this LWP. */
4878 step_over_bkpt = thread->entry.id;
4879 return 1;
4880 }
4881
4882 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4883 start_step_over, if still there, and delete any reinsert
4884 breakpoints we've set, on non hardware single-step targets. */
4885
4886 static int
4887 finish_step_over (struct lwp_info *lwp)
4888 {
4889 if (lwp->bp_reinsert != 0)
4890 {
4891 struct thread_info *saved_thread = current_thread;
4892
4893 if (debug_threads)
4894 debug_printf ("Finished step over.\n");
4895
4896 current_thread = get_lwp_thread (lwp);
4897
4898 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4899 may be no breakpoint to reinsert there by now. */
4900 reinsert_breakpoints_at (lwp->bp_reinsert);
4901 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4902
4903 lwp->bp_reinsert = 0;
4904
4905 /* Delete any software-single-step reinsert breakpoints. No
4906 longer needed. We don't have to worry about other threads
4907 hitting this trap, and later not being able to explain it,
4908 because we were stepping over a breakpoint, and we hold all
4909 threads but LWP stopped while doing that. */
4910 if (!can_hardware_single_step ())
4911 {
4912 gdb_assert (has_reinsert_breakpoints (current_thread));
4913 delete_reinsert_breakpoints (current_thread);
4914 }
4915
4916 step_over_bkpt = null_ptid;
4917 current_thread = saved_thread;
4918 return 1;
4919 }
4920 else
4921 return 0;
4922 }
4923
4924 /* If there's a step over in progress, wait until all threads stop
4925 (that is, until the stepping thread finishes its step), and
4926 unsuspend all lwps. The stepping thread ends with its status
4927 pending, which is processed later when we get back to processing
4928 events. */
4929
4930 static void
4931 complete_ongoing_step_over (void)
4932 {
4933 if (!ptid_equal (step_over_bkpt, null_ptid))
4934 {
4935 struct lwp_info *lwp;
4936 int wstat;
4937 int ret;
4938
4939 if (debug_threads)
4940 debug_printf ("detach: step over in progress, finish it first\n");
4941
4942 /* Passing NULL_PTID as filter indicates we want all events to
4943 be left pending. Eventually this returns when there are no
4944 unwaited-for children left. */
4945 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4946 &wstat, __WALL);
4947 gdb_assert (ret == -1);
4948
4949 lwp = find_lwp_pid (step_over_bkpt);
4950 if (lwp != NULL)
4951 finish_step_over (lwp);
4952 step_over_bkpt = null_ptid;
4953 unsuspend_all_lwps (lwp);
4954 }
4955 }
4956
4957 /* This function is called once per thread. We check the thread's resume
4958 request, which will tell us whether to resume, step, or leave the thread
4959 stopped; and what signal, if any, it should be sent.
4960
4961 For threads which we aren't explicitly told otherwise, we preserve
4962 the stepping flag; this is used for stepping over gdbserver-placed
4963 breakpoints.
4964
4965 If pending_flags was set in any thread, we queue any needed
4966 signals, since we won't actually resume. We already have a pending
4967 event to report, so we don't need to preserve any step requests;
4968 they should be re-issued if necessary. */
4969
4970 static int
4971 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4972 {
4973 struct thread_info *thread = (struct thread_info *) entry;
4974 struct lwp_info *lwp = get_thread_lwp (thread);
4975 int leave_all_stopped = * (int *) arg;
4976 int leave_pending;
4977
4978 if (lwp->resume == NULL)
4979 return 0;
4980
4981 if (lwp->resume->kind == resume_stop)
4982 {
4983 if (debug_threads)
4984 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4985
4986 if (!lwp->stopped)
4987 {
4988 if (debug_threads)
4989 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4990
4991 /* Stop the thread, and wait for the event asynchronously,
4992 through the event loop. */
4993 send_sigstop (lwp);
4994 }
4995 else
4996 {
4997 if (debug_threads)
4998 debug_printf ("already stopped LWP %ld\n",
4999 lwpid_of (thread));
5000
5001 /* The LWP may have been stopped in an internal event that
5002 was not meant to be notified back to GDB (e.g., gdbserver
5003 breakpoint), so we should be reporting a stop event in
5004 this case too. */
5005
5006 /* If the thread already has a pending SIGSTOP, this is a
5007 no-op. Otherwise, something later will presumably resume
5008 the thread and this will cause it to cancel any pending
5009 operation, due to last_resume_kind == resume_stop. If
5010 the thread already has a pending status to report, we
5011 will still report it the next time we wait - see
5012 status_pending_p_callback. */
5013
5014 /* If we already have a pending signal to report, then
5015 there's no need to queue a SIGSTOP, as this means we're
5016 midway through moving the LWP out of the jumppad, and we
5017 will report the pending signal as soon as that is
5018 finished. */
5019 if (lwp->pending_signals_to_report == NULL)
5020 send_sigstop (lwp);
5021 }
5022
5023 /* For stop requests, we're done. */
5024 lwp->resume = NULL;
5025 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5026 return 0;
5027 }
5028
5029 /* If this thread which is about to be resumed has a pending status,
5030 then don't resume it - we can just report the pending status.
5031 Likewise if it is suspended, because e.g., another thread is
5032 stepping past a breakpoint. Make sure to queue any signals that
5033 would otherwise be sent. In all-stop mode, we do this decision
5034 based on if *any* thread has a pending status. If there's a
5035 thread that needs the step-over-breakpoint dance, then don't
5036 resume any other thread but that particular one. */
5037 leave_pending = (lwp->suspended
5038 || lwp->status_pending_p
5039 || leave_all_stopped);
5040
5041 /* If we have a new signal, enqueue the signal. */
5042 if (lwp->resume->sig != 0)
5043 {
5044 siginfo_t info, *info_p;
5045
5046 /* If this is the same signal we were previously stopped by,
5047 make sure to queue its siginfo. */
5048 if (WIFSTOPPED (lwp->last_status)
5049 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
5050 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
5051 (PTRACE_TYPE_ARG3) 0, &info) == 0)
5052 info_p = &info;
5053 else
5054 info_p = NULL;
5055
5056 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
5057 }
5058
5059 if (!leave_pending)
5060 {
5061 if (debug_threads)
5062 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5063
5064 proceed_one_lwp (entry, NULL);
5065 }
5066 else
5067 {
5068 if (debug_threads)
5069 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5070 }
5071
5072 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5073 lwp->resume = NULL;
5074 return 0;
5075 }
5076
5077 static void
5078 linux_resume (struct thread_resume *resume_info, size_t n)
5079 {
5080 struct thread_resume_array array = { resume_info, n };
5081 struct thread_info *need_step_over = NULL;
5082 int any_pending;
5083 int leave_all_stopped;
5084
5085 if (debug_threads)
5086 {
5087 debug_enter ();
5088 debug_printf ("linux_resume:\n");
5089 }
5090
5091 find_inferior (&all_threads, linux_set_resume_request, &array);
5092
5093 /* If there is a thread which would otherwise be resumed, which has
5094 a pending status, then don't resume any threads - we can just
5095 report the pending status. Make sure to queue any signals that
5096 would otherwise be sent. In non-stop mode, we'll apply this
5097 logic to each thread individually. We consume all pending events
5098 before considering to start a step-over (in all-stop). */
5099 any_pending = 0;
5100 if (!non_stop)
5101 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
5102
5103 /* If there is a thread which would otherwise be resumed, which is
5104 stopped at a breakpoint that needs stepping over, then don't
5105 resume any threads - have it step over the breakpoint with all
5106 other threads stopped, then resume all threads again. Make sure
5107 to queue any signals that would otherwise be delivered or
5108 queued. */
5109 if (!any_pending && supports_breakpoints ())
5110 need_step_over
5111 = (struct thread_info *) find_inferior (&all_threads,
5112 need_step_over_p, NULL);
5113
5114 leave_all_stopped = (need_step_over != NULL || any_pending);
5115
5116 if (debug_threads)
5117 {
5118 if (need_step_over != NULL)
5119 debug_printf ("Not resuming all, need step over\n");
5120 else if (any_pending)
5121 debug_printf ("Not resuming, all-stop and found "
5122 "an LWP with pending status\n");
5123 else
5124 debug_printf ("Resuming, no pending status or step over needed\n");
5125 }
5126
5127 /* Even if we're leaving threads stopped, queue all signals we'd
5128 otherwise deliver. */
5129 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
5130
5131 if (need_step_over)
5132 start_step_over (get_thread_lwp (need_step_over));
5133
5134 if (debug_threads)
5135 {
5136 debug_printf ("linux_resume done\n");
5137 debug_exit ();
5138 }
5139
5140 /* We may have events that were pending that can/should be sent to
5141 the client now. Trigger a linux_wait call. */
5142 if (target_is_async_p ())
5143 async_file_mark ();
5144 }
5145
5146 /* This function is called once per thread. We check the thread's
5147 last resume request, which will tell us whether to resume, step, or
5148 leave the thread stopped. Any signal the client requested to be
5149 delivered has already been enqueued at this point.
5150
5151 If any thread that GDB wants running is stopped at an internal
5152 breakpoint that needs stepping over, we start a step-over operation
5153 on that particular thread, and leave all others stopped. */
5154
5155 static int
5156 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5157 {
5158 struct thread_info *thread = (struct thread_info *) entry;
5159 struct lwp_info *lwp = get_thread_lwp (thread);
5160 int step;
5161
5162 if (lwp == except)
5163 return 0;
5164
5165 if (debug_threads)
5166 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5167
5168 if (!lwp->stopped)
5169 {
5170 if (debug_threads)
5171 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5172 return 0;
5173 }
5174
5175 if (thread->last_resume_kind == resume_stop
5176 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5177 {
5178 if (debug_threads)
5179 debug_printf (" client wants LWP to remain %ld stopped\n",
5180 lwpid_of (thread));
5181 return 0;
5182 }
5183
5184 if (lwp->status_pending_p)
5185 {
5186 if (debug_threads)
5187 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5188 lwpid_of (thread));
5189 return 0;
5190 }
5191
5192 gdb_assert (lwp->suspended >= 0);
5193
5194 if (lwp->suspended)
5195 {
5196 if (debug_threads)
5197 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5198 return 0;
5199 }
5200
5201 if (thread->last_resume_kind == resume_stop
5202 && lwp->pending_signals_to_report == NULL
5203 && lwp->collecting_fast_tracepoint == 0)
5204 {
5205 /* We haven't reported this LWP as stopped yet (otherwise, the
5206 last_status.kind check above would catch it, and we wouldn't
5207 reach here. This LWP may have been momentarily paused by a
5208 stop_all_lwps call while handling for example, another LWP's
5209 step-over. In that case, the pending expected SIGSTOP signal
5210 that was queued at vCont;t handling time will have already
5211 been consumed by wait_for_sigstop, and so we need to requeue
5212 another one here. Note that if the LWP already has a SIGSTOP
5213 pending, this is a no-op. */
5214
5215 if (debug_threads)
5216 debug_printf ("Client wants LWP %ld to stop. "
5217 "Making sure it has a SIGSTOP pending\n",
5218 lwpid_of (thread));
5219
5220 send_sigstop (lwp);
5221 }
5222
5223 if (thread->last_resume_kind == resume_step)
5224 {
5225 if (debug_threads)
5226 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5227 lwpid_of (thread));
5228
5229 /* If resume_step is requested by GDB, install reinsert
5230 breakpoints when the thread is about to be actually resumed if
5231 the reinsert breakpoints weren't removed. */
5232 if (can_software_single_step () && !has_reinsert_breakpoints (thread))
5233 install_software_single_step_breakpoints (lwp);
5234
5235 step = maybe_hw_step (thread);
5236 }
5237 else if (lwp->bp_reinsert != 0)
5238 {
5239 if (debug_threads)
5240 debug_printf (" stepping LWP %ld, reinsert set\n",
5241 lwpid_of (thread));
5242
5243 step = maybe_hw_step (thread);
5244 }
5245 else
5246 step = 0;
5247
5248 linux_resume_one_lwp (lwp, step, 0, NULL);
5249 return 0;
5250 }
5251
5252 static int
5253 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5254 {
5255 struct thread_info *thread = (struct thread_info *) entry;
5256 struct lwp_info *lwp = get_thread_lwp (thread);
5257
5258 if (lwp == except)
5259 return 0;
5260
5261 lwp_suspended_decr (lwp);
5262
5263 return proceed_one_lwp (entry, except);
5264 }
5265
5266 /* When we finish a step-over, set threads running again. If there's
5267 another thread that may need a step-over, now's the time to start
5268 it. Eventually, we'll move all threads past their breakpoints. */
5269
5270 static void
5271 proceed_all_lwps (void)
5272 {
5273 struct thread_info *need_step_over;
5274
5275 /* If there is a thread which would otherwise be resumed, which is
5276 stopped at a breakpoint that needs stepping over, then don't
5277 resume any threads - have it step over the breakpoint with all
5278 other threads stopped, then resume all threads again. */
5279
5280 if (supports_breakpoints ())
5281 {
5282 need_step_over
5283 = (struct thread_info *) find_inferior (&all_threads,
5284 need_step_over_p, NULL);
5285
5286 if (need_step_over != NULL)
5287 {
5288 if (debug_threads)
5289 debug_printf ("proceed_all_lwps: found "
5290 "thread %ld needing a step-over\n",
5291 lwpid_of (need_step_over));
5292
5293 start_step_over (get_thread_lwp (need_step_over));
5294 return;
5295 }
5296 }
5297
5298 if (debug_threads)
5299 debug_printf ("Proceeding, no step-over needed\n");
5300
5301 find_inferior (&all_threads, proceed_one_lwp, NULL);
5302 }
5303
5304 /* Stopped LWPs that the client wanted to be running, that don't have
5305 pending statuses, are set to run again, except for EXCEPT, if not
5306 NULL. This undoes a stop_all_lwps call. */
5307
5308 static void
5309 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5310 {
5311 if (debug_threads)
5312 {
5313 debug_enter ();
5314 if (except)
5315 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5316 lwpid_of (get_lwp_thread (except)));
5317 else
5318 debug_printf ("unstopping all lwps\n");
5319 }
5320
5321 if (unsuspend)
5322 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5323 else
5324 find_inferior (&all_threads, proceed_one_lwp, except);
5325
5326 if (debug_threads)
5327 {
5328 debug_printf ("unstop_all_lwps done\n");
5329 debug_exit ();
5330 }
5331 }
5332
5333
5334 #ifdef HAVE_LINUX_REGSETS
5335
5336 #define use_linux_regsets 1
5337
5338 /* Returns true if REGSET has been disabled. */
5339
5340 static int
5341 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5342 {
5343 return (info->disabled_regsets != NULL
5344 && info->disabled_regsets[regset - info->regsets]);
5345 }
5346
5347 /* Disable REGSET. */
5348
5349 static void
5350 disable_regset (struct regsets_info *info, struct regset_info *regset)
5351 {
5352 int dr_offset;
5353
5354 dr_offset = regset - info->regsets;
5355 if (info->disabled_regsets == NULL)
5356 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5357 info->disabled_regsets[dr_offset] = 1;
5358 }
5359
5360 static int
5361 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5362 struct regcache *regcache)
5363 {
5364 struct regset_info *regset;
5365 int saw_general_regs = 0;
5366 int pid;
5367 struct iovec iov;
5368
5369 pid = lwpid_of (current_thread);
5370 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5371 {
5372 void *buf, *data;
5373 int nt_type, res;
5374
5375 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5376 continue;
5377
5378 buf = xmalloc (regset->size);
5379
5380 nt_type = regset->nt_type;
5381 if (nt_type)
5382 {
5383 iov.iov_base = buf;
5384 iov.iov_len = regset->size;
5385 data = (void *) &iov;
5386 }
5387 else
5388 data = buf;
5389
5390 #ifndef __sparc__
5391 res = ptrace (regset->get_request, pid,
5392 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5393 #else
5394 res = ptrace (regset->get_request, pid, data, nt_type);
5395 #endif
5396 if (res < 0)
5397 {
5398 if (errno == EIO)
5399 {
5400 /* If we get EIO on a regset, do not try it again for
5401 this process mode. */
5402 disable_regset (regsets_info, regset);
5403 }
5404 else if (errno == ENODATA)
5405 {
5406 /* ENODATA may be returned if the regset is currently
5407 not "active". This can happen in normal operation,
5408 so suppress the warning in this case. */
5409 }
5410 else if (errno == ESRCH)
5411 {
5412 /* At this point, ESRCH should mean the process is
5413 already gone, in which case we simply ignore attempts
5414 to read its registers. */
5415 }
5416 else
5417 {
5418 char s[256];
5419 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5420 pid);
5421 perror (s);
5422 }
5423 }
5424 else
5425 {
5426 if (regset->type == GENERAL_REGS)
5427 saw_general_regs = 1;
5428 regset->store_function (regcache, buf);
5429 }
5430 free (buf);
5431 }
5432 if (saw_general_regs)
5433 return 0;
5434 else
5435 return 1;
5436 }
5437
5438 static int
5439 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5440 struct regcache *regcache)
5441 {
5442 struct regset_info *regset;
5443 int saw_general_regs = 0;
5444 int pid;
5445 struct iovec iov;
5446
5447 pid = lwpid_of (current_thread);
5448 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5449 {
5450 void *buf, *data;
5451 int nt_type, res;
5452
5453 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5454 || regset->fill_function == NULL)
5455 continue;
5456
5457 buf = xmalloc (regset->size);
5458
5459 /* First fill the buffer with the current register set contents,
5460 in case there are any items in the kernel's regset that are
5461 not in gdbserver's regcache. */
5462
5463 nt_type = regset->nt_type;
5464 if (nt_type)
5465 {
5466 iov.iov_base = buf;
5467 iov.iov_len = regset->size;
5468 data = (void *) &iov;
5469 }
5470 else
5471 data = buf;
5472
5473 #ifndef __sparc__
5474 res = ptrace (regset->get_request, pid,
5475 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5476 #else
5477 res = ptrace (regset->get_request, pid, data, nt_type);
5478 #endif
5479
5480 if (res == 0)
5481 {
5482 /* Then overlay our cached registers on that. */
5483 regset->fill_function (regcache, buf);
5484
5485 /* Only now do we write the register set. */
5486 #ifndef __sparc__
5487 res = ptrace (regset->set_request, pid,
5488 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5489 #else
5490 res = ptrace (regset->set_request, pid, data, nt_type);
5491 #endif
5492 }
5493
5494 if (res < 0)
5495 {
5496 if (errno == EIO)
5497 {
5498 /* If we get EIO on a regset, do not try it again for
5499 this process mode. */
5500 disable_regset (regsets_info, regset);
5501 }
5502 else if (errno == ESRCH)
5503 {
5504 /* At this point, ESRCH should mean the process is
5505 already gone, in which case we simply ignore attempts
5506 to change its registers. See also the related
5507 comment in linux_resume_one_lwp. */
5508 free (buf);
5509 return 0;
5510 }
5511 else
5512 {
5513 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5514 }
5515 }
5516 else if (regset->type == GENERAL_REGS)
5517 saw_general_regs = 1;
5518 free (buf);
5519 }
5520 if (saw_general_regs)
5521 return 0;
5522 else
5523 return 1;
5524 }
5525
5526 #else /* !HAVE_LINUX_REGSETS */
5527
5528 #define use_linux_regsets 0
5529 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5530 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5531
5532 #endif
5533
5534 /* Return 1 if register REGNO is supported by one of the regset ptrace
5535 calls or 0 if it has to be transferred individually. */
5536
5537 static int
5538 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5539 {
5540 unsigned char mask = 1 << (regno % 8);
5541 size_t index = regno / 8;
5542
5543 return (use_linux_regsets
5544 && (regs_info->regset_bitmap == NULL
5545 || (regs_info->regset_bitmap[index] & mask) != 0));
5546 }
5547
5548 #ifdef HAVE_LINUX_USRREGS
5549
5550 static int
5551 register_addr (const struct usrregs_info *usrregs, int regnum)
5552 {
5553 int addr;
5554
5555 if (regnum < 0 || regnum >= usrregs->num_regs)
5556 error ("Invalid register number %d.", regnum);
5557
5558 addr = usrregs->regmap[regnum];
5559
5560 return addr;
5561 }
5562
5563 /* Fetch one register. */
5564 static void
5565 fetch_register (const struct usrregs_info *usrregs,
5566 struct regcache *regcache, int regno)
5567 {
5568 CORE_ADDR regaddr;
5569 int i, size;
5570 char *buf;
5571 int pid;
5572
5573 if (regno >= usrregs->num_regs)
5574 return;
5575 if ((*the_low_target.cannot_fetch_register) (regno))
5576 return;
5577
5578 regaddr = register_addr (usrregs, regno);
5579 if (regaddr == -1)
5580 return;
5581
5582 size = ((register_size (regcache->tdesc, regno)
5583 + sizeof (PTRACE_XFER_TYPE) - 1)
5584 & -sizeof (PTRACE_XFER_TYPE));
5585 buf = (char *) alloca (size);
5586
5587 pid = lwpid_of (current_thread);
5588 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5589 {
5590 errno = 0;
5591 *(PTRACE_XFER_TYPE *) (buf + i) =
5592 ptrace (PTRACE_PEEKUSER, pid,
5593 /* Coerce to a uintptr_t first to avoid potential gcc warning
5594 of coercing an 8 byte integer to a 4 byte pointer. */
5595 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5596 regaddr += sizeof (PTRACE_XFER_TYPE);
5597 if (errno != 0)
5598 error ("reading register %d: %s", regno, strerror (errno));
5599 }
5600
5601 if (the_low_target.supply_ptrace_register)
5602 the_low_target.supply_ptrace_register (regcache, regno, buf);
5603 else
5604 supply_register (regcache, regno, buf);
5605 }
5606
5607 /* Store one register. */
5608 static void
5609 store_register (const struct usrregs_info *usrregs,
5610 struct regcache *regcache, int regno)
5611 {
5612 CORE_ADDR regaddr;
5613 int i, size;
5614 char *buf;
5615 int pid;
5616
5617 if (regno >= usrregs->num_regs)
5618 return;
5619 if ((*the_low_target.cannot_store_register) (regno))
5620 return;
5621
5622 regaddr = register_addr (usrregs, regno);
5623 if (regaddr == -1)
5624 return;
5625
5626 size = ((register_size (regcache->tdesc, regno)
5627 + sizeof (PTRACE_XFER_TYPE) - 1)
5628 & -sizeof (PTRACE_XFER_TYPE));
5629 buf = (char *) alloca (size);
5630 memset (buf, 0, size);
5631
5632 if (the_low_target.collect_ptrace_register)
5633 the_low_target.collect_ptrace_register (regcache, regno, buf);
5634 else
5635 collect_register (regcache, regno, buf);
5636
5637 pid = lwpid_of (current_thread);
5638 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5639 {
5640 errno = 0;
5641 ptrace (PTRACE_POKEUSER, pid,
5642 /* Coerce to a uintptr_t first to avoid potential gcc warning
5643 about coercing an 8 byte integer to a 4 byte pointer. */
5644 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5645 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5646 if (errno != 0)
5647 {
5648 /* At this point, ESRCH should mean the process is
5649 already gone, in which case we simply ignore attempts
5650 to change its registers. See also the related
5651 comment in linux_resume_one_lwp. */
5652 if (errno == ESRCH)
5653 return;
5654
5655 if ((*the_low_target.cannot_store_register) (regno) == 0)
5656 error ("writing register %d: %s", regno, strerror (errno));
5657 }
5658 regaddr += sizeof (PTRACE_XFER_TYPE);
5659 }
5660 }
5661
5662 /* Fetch all registers, or just one, from the child process.
5663 If REGNO is -1, do this for all registers, skipping any that are
5664 assumed to have been retrieved by regsets_fetch_inferior_registers,
5665 unless ALL is non-zero.
5666 Otherwise, REGNO specifies which register (so we can save time). */
5667 static void
5668 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5669 struct regcache *regcache, int regno, int all)
5670 {
5671 struct usrregs_info *usr = regs_info->usrregs;
5672
5673 if (regno == -1)
5674 {
5675 for (regno = 0; regno < usr->num_regs; regno++)
5676 if (all || !linux_register_in_regsets (regs_info, regno))
5677 fetch_register (usr, regcache, regno);
5678 }
5679 else
5680 fetch_register (usr, regcache, regno);
5681 }
5682
5683 /* Store our register values back into the inferior.
5684 If REGNO is -1, do this for all registers, skipping any that are
5685 assumed to have been saved by regsets_store_inferior_registers,
5686 unless ALL is non-zero.
5687 Otherwise, REGNO specifies which register (so we can save time). */
5688 static void
5689 usr_store_inferior_registers (const struct regs_info *regs_info,
5690 struct regcache *regcache, int regno, int all)
5691 {
5692 struct usrregs_info *usr = regs_info->usrregs;
5693
5694 if (regno == -1)
5695 {
5696 for (regno = 0; regno < usr->num_regs; regno++)
5697 if (all || !linux_register_in_regsets (regs_info, regno))
5698 store_register (usr, regcache, regno);
5699 }
5700 else
5701 store_register (usr, regcache, regno);
5702 }
5703
5704 #else /* !HAVE_LINUX_USRREGS */
5705
5706 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5707 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5708
5709 #endif
5710
5711
5712 static void
5713 linux_fetch_registers (struct regcache *regcache, int regno)
5714 {
5715 int use_regsets;
5716 int all = 0;
5717 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5718
5719 if (regno == -1)
5720 {
5721 if (the_low_target.fetch_register != NULL
5722 && regs_info->usrregs != NULL)
5723 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5724 (*the_low_target.fetch_register) (regcache, regno);
5725
5726 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5727 if (regs_info->usrregs != NULL)
5728 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5729 }
5730 else
5731 {
5732 if (the_low_target.fetch_register != NULL
5733 && (*the_low_target.fetch_register) (regcache, regno))
5734 return;
5735
5736 use_regsets = linux_register_in_regsets (regs_info, regno);
5737 if (use_regsets)
5738 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5739 regcache);
5740 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5741 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5742 }
5743 }
5744
5745 static void
5746 linux_store_registers (struct regcache *regcache, int regno)
5747 {
5748 int use_regsets;
5749 int all = 0;
5750 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5751
5752 if (regno == -1)
5753 {
5754 all = regsets_store_inferior_registers (regs_info->regsets_info,
5755 regcache);
5756 if (regs_info->usrregs != NULL)
5757 usr_store_inferior_registers (regs_info, regcache, regno, all);
5758 }
5759 else
5760 {
5761 use_regsets = linux_register_in_regsets (regs_info, regno);
5762 if (use_regsets)
5763 all = regsets_store_inferior_registers (regs_info->regsets_info,
5764 regcache);
5765 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5766 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5767 }
5768 }
5769
5770
5771 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5772 to debugger memory starting at MYADDR. */
5773
5774 static int
5775 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5776 {
5777 int pid = lwpid_of (current_thread);
5778 register PTRACE_XFER_TYPE *buffer;
5779 register CORE_ADDR addr;
5780 register int count;
5781 char filename[64];
5782 register int i;
5783 int ret;
5784 int fd;
5785
5786 /* Try using /proc. Don't bother for one word. */
5787 if (len >= 3 * sizeof (long))
5788 {
5789 int bytes;
5790
5791 /* We could keep this file open and cache it - possibly one per
5792 thread. That requires some juggling, but is even faster. */
5793 sprintf (filename, "/proc/%d/mem", pid);
5794 fd = open (filename, O_RDONLY | O_LARGEFILE);
5795 if (fd == -1)
5796 goto no_proc;
5797
5798 /* If pread64 is available, use it. It's faster if the kernel
5799 supports it (only one syscall), and it's 64-bit safe even on
5800 32-bit platforms (for instance, SPARC debugging a SPARC64
5801 application). */
5802 #ifdef HAVE_PREAD64
5803 bytes = pread64 (fd, myaddr, len, memaddr);
5804 #else
5805 bytes = -1;
5806 if (lseek (fd, memaddr, SEEK_SET) != -1)
5807 bytes = read (fd, myaddr, len);
5808 #endif
5809
5810 close (fd);
5811 if (bytes == len)
5812 return 0;
5813
5814 /* Some data was read, we'll try to get the rest with ptrace. */
5815 if (bytes > 0)
5816 {
5817 memaddr += bytes;
5818 myaddr += bytes;
5819 len -= bytes;
5820 }
5821 }
5822
5823 no_proc:
5824 /* Round starting address down to longword boundary. */
5825 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5826 /* Round ending address up; get number of longwords that makes. */
5827 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5828 / sizeof (PTRACE_XFER_TYPE));
5829 /* Allocate buffer of that many longwords. */
5830 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5831
5832 /* Read all the longwords */
5833 errno = 0;
5834 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5835 {
5836 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5837 about coercing an 8 byte integer to a 4 byte pointer. */
5838 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5839 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5840 (PTRACE_TYPE_ARG4) 0);
5841 if (errno)
5842 break;
5843 }
5844 ret = errno;
5845
5846 /* Copy appropriate bytes out of the buffer. */
5847 if (i > 0)
5848 {
5849 i *= sizeof (PTRACE_XFER_TYPE);
5850 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5851 memcpy (myaddr,
5852 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5853 i < len ? i : len);
5854 }
5855
5856 return ret;
5857 }
5858
5859 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5860 memory at MEMADDR. On failure (cannot write to the inferior)
5861 returns the value of errno. Always succeeds if LEN is zero. */
5862
5863 static int
5864 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5865 {
5866 register int i;
5867 /* Round starting address down to longword boundary. */
5868 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5869 /* Round ending address up; get number of longwords that makes. */
5870 register int count
5871 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5872 / sizeof (PTRACE_XFER_TYPE);
5873
5874 /* Allocate buffer of that many longwords. */
5875 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5876
5877 int pid = lwpid_of (current_thread);
5878
5879 if (len == 0)
5880 {
5881 /* Zero length write always succeeds. */
5882 return 0;
5883 }
5884
5885 if (debug_threads)
5886 {
5887 /* Dump up to four bytes. */
5888 char str[4 * 2 + 1];
5889 char *p = str;
5890 int dump = len < 4 ? len : 4;
5891
5892 for (i = 0; i < dump; i++)
5893 {
5894 sprintf (p, "%02x", myaddr[i]);
5895 p += 2;
5896 }
5897 *p = '\0';
5898
5899 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5900 str, (long) memaddr, pid);
5901 }
5902
5903 /* Fill start and end extra bytes of buffer with existing memory data. */
5904
5905 errno = 0;
5906 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5907 about coercing an 8 byte integer to a 4 byte pointer. */
5908 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5909 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5910 (PTRACE_TYPE_ARG4) 0);
5911 if (errno)
5912 return errno;
5913
5914 if (count > 1)
5915 {
5916 errno = 0;
5917 buffer[count - 1]
5918 = ptrace (PTRACE_PEEKTEXT, pid,
5919 /* Coerce to a uintptr_t first to avoid potential gcc warning
5920 about coercing an 8 byte integer to a 4 byte pointer. */
5921 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5922 * sizeof (PTRACE_XFER_TYPE)),
5923 (PTRACE_TYPE_ARG4) 0);
5924 if (errno)
5925 return errno;
5926 }
5927
5928 /* Copy data to be written over corresponding part of buffer. */
5929
5930 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5931 myaddr, len);
5932
5933 /* Write the entire buffer. */
5934
5935 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5936 {
5937 errno = 0;
5938 ptrace (PTRACE_POKETEXT, pid,
5939 /* Coerce to a uintptr_t first to avoid potential gcc warning
5940 about coercing an 8 byte integer to a 4 byte pointer. */
5941 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5942 (PTRACE_TYPE_ARG4) buffer[i]);
5943 if (errno)
5944 return errno;
5945 }
5946
5947 return 0;
5948 }
5949
5950 static void
5951 linux_look_up_symbols (void)
5952 {
5953 #ifdef USE_THREAD_DB
5954 struct process_info *proc = current_process ();
5955
5956 if (proc->priv->thread_db != NULL)
5957 return;
5958
5959 thread_db_init ();
5960 #endif
5961 }
5962
5963 static void
5964 linux_request_interrupt (void)
5965 {
5966 extern unsigned long signal_pid;
5967
5968 /* Send a SIGINT to the process group. This acts just like the user
5969 typed a ^C on the controlling terminal. */
5970 kill (-signal_pid, SIGINT);
5971 }
5972
5973 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5974 to debugger memory starting at MYADDR. */
5975
5976 static int
5977 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5978 {
5979 char filename[PATH_MAX];
5980 int fd, n;
5981 int pid = lwpid_of (current_thread);
5982
5983 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5984
5985 fd = open (filename, O_RDONLY);
5986 if (fd < 0)
5987 return -1;
5988
5989 if (offset != (CORE_ADDR) 0
5990 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5991 n = -1;
5992 else
5993 n = read (fd, myaddr, len);
5994
5995 close (fd);
5996
5997 return n;
5998 }
5999
6000 /* These breakpoint and watchpoint related wrapper functions simply
6001 pass on the function call if the target has registered a
6002 corresponding function. */
6003
6004 static int
6005 linux_supports_z_point_type (char z_type)
6006 {
6007 return (the_low_target.supports_z_point_type != NULL
6008 && the_low_target.supports_z_point_type (z_type));
6009 }
6010
6011 static int
6012 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
6013 int size, struct raw_breakpoint *bp)
6014 {
6015 if (type == raw_bkpt_type_sw)
6016 return insert_memory_breakpoint (bp);
6017 else if (the_low_target.insert_point != NULL)
6018 return the_low_target.insert_point (type, addr, size, bp);
6019 else
6020 /* Unsupported (see target.h). */
6021 return 1;
6022 }
6023
6024 static int
6025 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
6026 int size, struct raw_breakpoint *bp)
6027 {
6028 if (type == raw_bkpt_type_sw)
6029 return remove_memory_breakpoint (bp);
6030 else if (the_low_target.remove_point != NULL)
6031 return the_low_target.remove_point (type, addr, size, bp);
6032 else
6033 /* Unsupported (see target.h). */
6034 return 1;
6035 }
6036
6037 /* Implement the to_stopped_by_sw_breakpoint target_ops
6038 method. */
6039
6040 static int
6041 linux_stopped_by_sw_breakpoint (void)
6042 {
6043 struct lwp_info *lwp = get_thread_lwp (current_thread);
6044
6045 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
6046 }
6047
6048 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
6049 method. */
6050
6051 static int
6052 linux_supports_stopped_by_sw_breakpoint (void)
6053 {
6054 return USE_SIGTRAP_SIGINFO;
6055 }
6056
6057 /* Implement the to_stopped_by_hw_breakpoint target_ops
6058 method. */
6059
6060 static int
6061 linux_stopped_by_hw_breakpoint (void)
6062 {
6063 struct lwp_info *lwp = get_thread_lwp (current_thread);
6064
6065 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6066 }
6067
6068 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6069 method. */
6070
6071 static int
6072 linux_supports_stopped_by_hw_breakpoint (void)
6073 {
6074 return USE_SIGTRAP_SIGINFO;
6075 }
6076
6077 /* Implement the supports_hardware_single_step target_ops method. */
6078
6079 static int
6080 linux_supports_hardware_single_step (void)
6081 {
6082 return can_hardware_single_step ();
6083 }
6084
6085 static int
6086 linux_supports_software_single_step (void)
6087 {
6088 return can_software_single_step ();
6089 }
6090
6091 static int
6092 linux_stopped_by_watchpoint (void)
6093 {
6094 struct lwp_info *lwp = get_thread_lwp (current_thread);
6095
6096 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6097 }
6098
6099 static CORE_ADDR
6100 linux_stopped_data_address (void)
6101 {
6102 struct lwp_info *lwp = get_thread_lwp (current_thread);
6103
6104 return lwp->stopped_data_address;
6105 }
6106
6107 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6108 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6109 && defined(PT_TEXT_END_ADDR)
6110
6111 /* This is only used for targets that define PT_TEXT_ADDR,
6112 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6113 the target has different ways of acquiring this information, like
6114 loadmaps. */
6115
6116 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6117 to tell gdb about. */
6118
6119 static int
6120 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6121 {
6122 unsigned long text, text_end, data;
6123 int pid = lwpid_of (current_thread);
6124
6125 errno = 0;
6126
6127 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6128 (PTRACE_TYPE_ARG4) 0);
6129 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6130 (PTRACE_TYPE_ARG4) 0);
6131 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6132 (PTRACE_TYPE_ARG4) 0);
6133
6134 if (errno == 0)
6135 {
6136 /* Both text and data offsets produced at compile-time (and so
6137 used by gdb) are relative to the beginning of the program,
6138 with the data segment immediately following the text segment.
6139 However, the actual runtime layout in memory may put the data
6140 somewhere else, so when we send gdb a data base-address, we
6141 use the real data base address and subtract the compile-time
6142 data base-address from it (which is just the length of the
6143 text segment). BSS immediately follows data in both
6144 cases. */
6145 *text_p = text;
6146 *data_p = data - (text_end - text);
6147
6148 return 1;
6149 }
6150 return 0;
6151 }
6152 #endif
6153
6154 static int
6155 linux_qxfer_osdata (const char *annex,
6156 unsigned char *readbuf, unsigned const char *writebuf,
6157 CORE_ADDR offset, int len)
6158 {
6159 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6160 }
6161
6162 /* Convert a native/host siginfo object, into/from the siginfo in the
6163 layout of the inferiors' architecture. */
6164
6165 static void
6166 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6167 {
6168 int done = 0;
6169
6170 if (the_low_target.siginfo_fixup != NULL)
6171 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6172
6173 /* If there was no callback, or the callback didn't do anything,
6174 then just do a straight memcpy. */
6175 if (!done)
6176 {
6177 if (direction == 1)
6178 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6179 else
6180 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6181 }
6182 }
6183
6184 static int
6185 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6186 unsigned const char *writebuf, CORE_ADDR offset, int len)
6187 {
6188 int pid;
6189 siginfo_t siginfo;
6190 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6191
6192 if (current_thread == NULL)
6193 return -1;
6194
6195 pid = lwpid_of (current_thread);
6196
6197 if (debug_threads)
6198 debug_printf ("%s siginfo for lwp %d.\n",
6199 readbuf != NULL ? "Reading" : "Writing",
6200 pid);
6201
6202 if (offset >= sizeof (siginfo))
6203 return -1;
6204
6205 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6206 return -1;
6207
6208 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6209 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6210 inferior with a 64-bit GDBSERVER should look the same as debugging it
6211 with a 32-bit GDBSERVER, we need to convert it. */
6212 siginfo_fixup (&siginfo, inf_siginfo, 0);
6213
6214 if (offset + len > sizeof (siginfo))
6215 len = sizeof (siginfo) - offset;
6216
6217 if (readbuf != NULL)
6218 memcpy (readbuf, inf_siginfo + offset, len);
6219 else
6220 {
6221 memcpy (inf_siginfo + offset, writebuf, len);
6222
6223 /* Convert back to ptrace layout before flushing it out. */
6224 siginfo_fixup (&siginfo, inf_siginfo, 1);
6225
6226 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6227 return -1;
6228 }
6229
6230 return len;
6231 }
6232
6233 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6234 so we notice when children change state; as the handler for the
6235 sigsuspend in my_waitpid. */
6236
6237 static void
6238 sigchld_handler (int signo)
6239 {
6240 int old_errno = errno;
6241
6242 if (debug_threads)
6243 {
6244 do
6245 {
6246 /* fprintf is not async-signal-safe, so call write
6247 directly. */
6248 if (write (2, "sigchld_handler\n",
6249 sizeof ("sigchld_handler\n") - 1) < 0)
6250 break; /* just ignore */
6251 } while (0);
6252 }
6253
6254 if (target_is_async_p ())
6255 async_file_mark (); /* trigger a linux_wait */
6256
6257 errno = old_errno;
6258 }
6259
6260 static int
6261 linux_supports_non_stop (void)
6262 {
6263 return 1;
6264 }
6265
6266 static int
6267 linux_async (int enable)
6268 {
6269 int previous = target_is_async_p ();
6270
6271 if (debug_threads)
6272 debug_printf ("linux_async (%d), previous=%d\n",
6273 enable, previous);
6274
6275 if (previous != enable)
6276 {
6277 sigset_t mask;
6278 sigemptyset (&mask);
6279 sigaddset (&mask, SIGCHLD);
6280
6281 sigprocmask (SIG_BLOCK, &mask, NULL);
6282
6283 if (enable)
6284 {
6285 if (pipe (linux_event_pipe) == -1)
6286 {
6287 linux_event_pipe[0] = -1;
6288 linux_event_pipe[1] = -1;
6289 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6290
6291 warning ("creating event pipe failed.");
6292 return previous;
6293 }
6294
6295 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6296 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6297
6298 /* Register the event loop handler. */
6299 add_file_handler (linux_event_pipe[0],
6300 handle_target_event, NULL);
6301
6302 /* Always trigger a linux_wait. */
6303 async_file_mark ();
6304 }
6305 else
6306 {
6307 delete_file_handler (linux_event_pipe[0]);
6308
6309 close (linux_event_pipe[0]);
6310 close (linux_event_pipe[1]);
6311 linux_event_pipe[0] = -1;
6312 linux_event_pipe[1] = -1;
6313 }
6314
6315 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6316 }
6317
6318 return previous;
6319 }
6320
6321 static int
6322 linux_start_non_stop (int nonstop)
6323 {
6324 /* Register or unregister from event-loop accordingly. */
6325 linux_async (nonstop);
6326
6327 if (target_is_async_p () != (nonstop != 0))
6328 return -1;
6329
6330 return 0;
6331 }
6332
6333 static int
6334 linux_supports_multi_process (void)
6335 {
6336 return 1;
6337 }
6338
6339 /* Check if fork events are supported. */
6340
6341 static int
6342 linux_supports_fork_events (void)
6343 {
6344 return linux_supports_tracefork ();
6345 }
6346
6347 /* Check if vfork events are supported. */
6348
6349 static int
6350 linux_supports_vfork_events (void)
6351 {
6352 return linux_supports_tracefork ();
6353 }
6354
6355 /* Check if exec events are supported. */
6356
6357 static int
6358 linux_supports_exec_events (void)
6359 {
6360 return linux_supports_traceexec ();
6361 }
6362
6363 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6364 options for the specified lwp. */
6365
6366 static int
6367 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6368 void *args)
6369 {
6370 struct thread_info *thread = (struct thread_info *) entry;
6371 struct lwp_info *lwp = get_thread_lwp (thread);
6372
6373 if (!lwp->stopped)
6374 {
6375 /* Stop the lwp so we can modify its ptrace options. */
6376 lwp->must_set_ptrace_flags = 1;
6377 linux_stop_lwp (lwp);
6378 }
6379 else
6380 {
6381 /* Already stopped; go ahead and set the ptrace options. */
6382 struct process_info *proc = find_process_pid (pid_of (thread));
6383 int options = linux_low_ptrace_options (proc->attached);
6384
6385 linux_enable_event_reporting (lwpid_of (thread), options);
6386 lwp->must_set_ptrace_flags = 0;
6387 }
6388
6389 return 0;
6390 }
6391
6392 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6393 ptrace flags for all inferiors. This is in case the new GDB connection
6394 doesn't support the same set of events that the previous one did. */
6395
6396 static void
6397 linux_handle_new_gdb_connection (void)
6398 {
6399 pid_t pid;
6400
6401 /* Request that all the lwps reset their ptrace options. */
6402 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6403 }
6404
6405 static int
6406 linux_supports_disable_randomization (void)
6407 {
6408 #ifdef HAVE_PERSONALITY
6409 return 1;
6410 #else
6411 return 0;
6412 #endif
6413 }
6414
6415 static int
6416 linux_supports_agent (void)
6417 {
6418 return 1;
6419 }
6420
6421 static int
6422 linux_supports_range_stepping (void)
6423 {
6424 if (*the_low_target.supports_range_stepping == NULL)
6425 return 0;
6426
6427 return (*the_low_target.supports_range_stepping) ();
6428 }
6429
6430 /* Enumerate spufs IDs for process PID. */
6431 static int
6432 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6433 {
6434 int pos = 0;
6435 int written = 0;
6436 char path[128];
6437 DIR *dir;
6438 struct dirent *entry;
6439
6440 sprintf (path, "/proc/%ld/fd", pid);
6441 dir = opendir (path);
6442 if (!dir)
6443 return -1;
6444
6445 rewinddir (dir);
6446 while ((entry = readdir (dir)) != NULL)
6447 {
6448 struct stat st;
6449 struct statfs stfs;
6450 int fd;
6451
6452 fd = atoi (entry->d_name);
6453 if (!fd)
6454 continue;
6455
6456 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6457 if (stat (path, &st) != 0)
6458 continue;
6459 if (!S_ISDIR (st.st_mode))
6460 continue;
6461
6462 if (statfs (path, &stfs) != 0)
6463 continue;
6464 if (stfs.f_type != SPUFS_MAGIC)
6465 continue;
6466
6467 if (pos >= offset && pos + 4 <= offset + len)
6468 {
6469 *(unsigned int *)(buf + pos - offset) = fd;
6470 written += 4;
6471 }
6472 pos += 4;
6473 }
6474
6475 closedir (dir);
6476 return written;
6477 }
6478
6479 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6480 object type, using the /proc file system. */
6481 static int
6482 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6483 unsigned const char *writebuf,
6484 CORE_ADDR offset, int len)
6485 {
6486 long pid = lwpid_of (current_thread);
6487 char buf[128];
6488 int fd = 0;
6489 int ret = 0;
6490
6491 if (!writebuf && !readbuf)
6492 return -1;
6493
6494 if (!*annex)
6495 {
6496 if (!readbuf)
6497 return -1;
6498 else
6499 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6500 }
6501
6502 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6503 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6504 if (fd <= 0)
6505 return -1;
6506
6507 if (offset != 0
6508 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6509 {
6510 close (fd);
6511 return 0;
6512 }
6513
6514 if (writebuf)
6515 ret = write (fd, writebuf, (size_t) len);
6516 else
6517 ret = read (fd, readbuf, (size_t) len);
6518
6519 close (fd);
6520 return ret;
6521 }
6522
6523 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6524 struct target_loadseg
6525 {
6526 /* Core address to which the segment is mapped. */
6527 Elf32_Addr addr;
6528 /* VMA recorded in the program header. */
6529 Elf32_Addr p_vaddr;
6530 /* Size of this segment in memory. */
6531 Elf32_Word p_memsz;
6532 };
6533
6534 # if defined PT_GETDSBT
6535 struct target_loadmap
6536 {
6537 /* Protocol version number, must be zero. */
6538 Elf32_Word version;
6539 /* Pointer to the DSBT table, its size, and the DSBT index. */
6540 unsigned *dsbt_table;
6541 unsigned dsbt_size, dsbt_index;
6542 /* Number of segments in this map. */
6543 Elf32_Word nsegs;
6544 /* The actual memory map. */
6545 struct target_loadseg segs[/*nsegs*/];
6546 };
6547 # define LINUX_LOADMAP PT_GETDSBT
6548 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6549 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6550 # else
6551 struct target_loadmap
6552 {
6553 /* Protocol version number, must be zero. */
6554 Elf32_Half version;
6555 /* Number of segments in this map. */
6556 Elf32_Half nsegs;
6557 /* The actual memory map. */
6558 struct target_loadseg segs[/*nsegs*/];
6559 };
6560 # define LINUX_LOADMAP PTRACE_GETFDPIC
6561 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6562 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6563 # endif
6564
6565 static int
6566 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6567 unsigned char *myaddr, unsigned int len)
6568 {
6569 int pid = lwpid_of (current_thread);
6570 int addr = -1;
6571 struct target_loadmap *data = NULL;
6572 unsigned int actual_length, copy_length;
6573
6574 if (strcmp (annex, "exec") == 0)
6575 addr = (int) LINUX_LOADMAP_EXEC;
6576 else if (strcmp (annex, "interp") == 0)
6577 addr = (int) LINUX_LOADMAP_INTERP;
6578 else
6579 return -1;
6580
6581 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6582 return -1;
6583
6584 if (data == NULL)
6585 return -1;
6586
6587 actual_length = sizeof (struct target_loadmap)
6588 + sizeof (struct target_loadseg) * data->nsegs;
6589
6590 if (offset < 0 || offset > actual_length)
6591 return -1;
6592
6593 copy_length = actual_length - offset < len ? actual_length - offset : len;
6594 memcpy (myaddr, (char *) data + offset, copy_length);
6595 return copy_length;
6596 }
6597 #else
6598 # define linux_read_loadmap NULL
6599 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6600
6601 static void
6602 linux_process_qsupported (char **features, int count)
6603 {
6604 if (the_low_target.process_qsupported != NULL)
6605 the_low_target.process_qsupported (features, count);
6606 }
6607
6608 static int
6609 linux_supports_catch_syscall (void)
6610 {
6611 return (the_low_target.get_syscall_trapinfo != NULL
6612 && linux_supports_tracesysgood ());
6613 }
6614
6615 static int
6616 linux_get_ipa_tdesc_idx (void)
6617 {
6618 if (the_low_target.get_ipa_tdesc_idx == NULL)
6619 return 0;
6620
6621 return (*the_low_target.get_ipa_tdesc_idx) ();
6622 }
6623
6624 static int
6625 linux_supports_tracepoints (void)
6626 {
6627 if (*the_low_target.supports_tracepoints == NULL)
6628 return 0;
6629
6630 return (*the_low_target.supports_tracepoints) ();
6631 }
6632
6633 static CORE_ADDR
6634 linux_read_pc (struct regcache *regcache)
6635 {
6636 if (the_low_target.get_pc == NULL)
6637 return 0;
6638
6639 return (*the_low_target.get_pc) (regcache);
6640 }
6641
6642 static void
6643 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6644 {
6645 gdb_assert (the_low_target.set_pc != NULL);
6646
6647 (*the_low_target.set_pc) (regcache, pc);
6648 }
6649
6650 static int
6651 linux_thread_stopped (struct thread_info *thread)
6652 {
6653 return get_thread_lwp (thread)->stopped;
6654 }
6655
6656 /* This exposes stop-all-threads functionality to other modules. */
6657
6658 static void
6659 linux_pause_all (int freeze)
6660 {
6661 stop_all_lwps (freeze, NULL);
6662 }
6663
6664 /* This exposes unstop-all-threads functionality to other gdbserver
6665 modules. */
6666
6667 static void
6668 linux_unpause_all (int unfreeze)
6669 {
6670 unstop_all_lwps (unfreeze, NULL);
6671 }
6672
6673 static int
6674 linux_prepare_to_access_memory (void)
6675 {
6676 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6677 running LWP. */
6678 if (non_stop)
6679 linux_pause_all (1);
6680 return 0;
6681 }
6682
6683 static void
6684 linux_done_accessing_memory (void)
6685 {
6686 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6687 running LWP. */
6688 if (non_stop)
6689 linux_unpause_all (1);
6690 }
6691
6692 static int
6693 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6694 CORE_ADDR collector,
6695 CORE_ADDR lockaddr,
6696 ULONGEST orig_size,
6697 CORE_ADDR *jump_entry,
6698 CORE_ADDR *trampoline,
6699 ULONGEST *trampoline_size,
6700 unsigned char *jjump_pad_insn,
6701 ULONGEST *jjump_pad_insn_size,
6702 CORE_ADDR *adjusted_insn_addr,
6703 CORE_ADDR *adjusted_insn_addr_end,
6704 char *err)
6705 {
6706 return (*the_low_target.install_fast_tracepoint_jump_pad)
6707 (tpoint, tpaddr, collector, lockaddr, orig_size,
6708 jump_entry, trampoline, trampoline_size,
6709 jjump_pad_insn, jjump_pad_insn_size,
6710 adjusted_insn_addr, adjusted_insn_addr_end,
6711 err);
6712 }
6713
6714 static struct emit_ops *
6715 linux_emit_ops (void)
6716 {
6717 if (the_low_target.emit_ops != NULL)
6718 return (*the_low_target.emit_ops) ();
6719 else
6720 return NULL;
6721 }
6722
6723 static int
6724 linux_get_min_fast_tracepoint_insn_len (void)
6725 {
6726 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6727 }
6728
6729 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6730
6731 static int
6732 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6733 CORE_ADDR *phdr_memaddr, int *num_phdr)
6734 {
6735 char filename[PATH_MAX];
6736 int fd;
6737 const int auxv_size = is_elf64
6738 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6739 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6740
6741 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6742
6743 fd = open (filename, O_RDONLY);
6744 if (fd < 0)
6745 return 1;
6746
6747 *phdr_memaddr = 0;
6748 *num_phdr = 0;
6749 while (read (fd, buf, auxv_size) == auxv_size
6750 && (*phdr_memaddr == 0 || *num_phdr == 0))
6751 {
6752 if (is_elf64)
6753 {
6754 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6755
6756 switch (aux->a_type)
6757 {
6758 case AT_PHDR:
6759 *phdr_memaddr = aux->a_un.a_val;
6760 break;
6761 case AT_PHNUM:
6762 *num_phdr = aux->a_un.a_val;
6763 break;
6764 }
6765 }
6766 else
6767 {
6768 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6769
6770 switch (aux->a_type)
6771 {
6772 case AT_PHDR:
6773 *phdr_memaddr = aux->a_un.a_val;
6774 break;
6775 case AT_PHNUM:
6776 *num_phdr = aux->a_un.a_val;
6777 break;
6778 }
6779 }
6780 }
6781
6782 close (fd);
6783
6784 if (*phdr_memaddr == 0 || *num_phdr == 0)
6785 {
6786 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6787 "phdr_memaddr = %ld, phdr_num = %d",
6788 (long) *phdr_memaddr, *num_phdr);
6789 return 2;
6790 }
6791
6792 return 0;
6793 }
6794
6795 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6796
6797 static CORE_ADDR
6798 get_dynamic (const int pid, const int is_elf64)
6799 {
6800 CORE_ADDR phdr_memaddr, relocation;
6801 int num_phdr, i;
6802 unsigned char *phdr_buf;
6803 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6804
6805 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6806 return 0;
6807
6808 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6809 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6810
6811 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6812 return 0;
6813
6814 /* Compute relocation: it is expected to be 0 for "regular" executables,
6815 non-zero for PIE ones. */
6816 relocation = -1;
6817 for (i = 0; relocation == -1 && i < num_phdr; i++)
6818 if (is_elf64)
6819 {
6820 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6821
6822 if (p->p_type == PT_PHDR)
6823 relocation = phdr_memaddr - p->p_vaddr;
6824 }
6825 else
6826 {
6827 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6828
6829 if (p->p_type == PT_PHDR)
6830 relocation = phdr_memaddr - p->p_vaddr;
6831 }
6832
6833 if (relocation == -1)
6834 {
6835 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6836 any real world executables, including PIE executables, have always
6837 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6838 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6839 or present DT_DEBUG anyway (fpc binaries are statically linked).
6840
6841 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6842
6843 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6844
6845 return 0;
6846 }
6847
6848 for (i = 0; i < num_phdr; i++)
6849 {
6850 if (is_elf64)
6851 {
6852 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6853
6854 if (p->p_type == PT_DYNAMIC)
6855 return p->p_vaddr + relocation;
6856 }
6857 else
6858 {
6859 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6860
6861 if (p->p_type == PT_DYNAMIC)
6862 return p->p_vaddr + relocation;
6863 }
6864 }
6865
6866 return 0;
6867 }
6868
6869 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6870 can be 0 if the inferior does not yet have the library list initialized.
6871 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6872 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6873
6874 static CORE_ADDR
6875 get_r_debug (const int pid, const int is_elf64)
6876 {
6877 CORE_ADDR dynamic_memaddr;
6878 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6879 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6880 CORE_ADDR map = -1;
6881
6882 dynamic_memaddr = get_dynamic (pid, is_elf64);
6883 if (dynamic_memaddr == 0)
6884 return map;
6885
6886 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6887 {
6888 if (is_elf64)
6889 {
6890 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6891 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6892 union
6893 {
6894 Elf64_Xword map;
6895 unsigned char buf[sizeof (Elf64_Xword)];
6896 }
6897 rld_map;
6898 #endif
6899 #ifdef DT_MIPS_RLD_MAP
6900 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6901 {
6902 if (linux_read_memory (dyn->d_un.d_val,
6903 rld_map.buf, sizeof (rld_map.buf)) == 0)
6904 return rld_map.map;
6905 else
6906 break;
6907 }
6908 #endif /* DT_MIPS_RLD_MAP */
6909 #ifdef DT_MIPS_RLD_MAP_REL
6910 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6911 {
6912 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6913 rld_map.buf, sizeof (rld_map.buf)) == 0)
6914 return rld_map.map;
6915 else
6916 break;
6917 }
6918 #endif /* DT_MIPS_RLD_MAP_REL */
6919
6920 if (dyn->d_tag == DT_DEBUG && map == -1)
6921 map = dyn->d_un.d_val;
6922
6923 if (dyn->d_tag == DT_NULL)
6924 break;
6925 }
6926 else
6927 {
6928 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6929 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6930 union
6931 {
6932 Elf32_Word map;
6933 unsigned char buf[sizeof (Elf32_Word)];
6934 }
6935 rld_map;
6936 #endif
6937 #ifdef DT_MIPS_RLD_MAP
6938 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6939 {
6940 if (linux_read_memory (dyn->d_un.d_val,
6941 rld_map.buf, sizeof (rld_map.buf)) == 0)
6942 return rld_map.map;
6943 else
6944 break;
6945 }
6946 #endif /* DT_MIPS_RLD_MAP */
6947 #ifdef DT_MIPS_RLD_MAP_REL
6948 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6949 {
6950 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6951 rld_map.buf, sizeof (rld_map.buf)) == 0)
6952 return rld_map.map;
6953 else
6954 break;
6955 }
6956 #endif /* DT_MIPS_RLD_MAP_REL */
6957
6958 if (dyn->d_tag == DT_DEBUG && map == -1)
6959 map = dyn->d_un.d_val;
6960
6961 if (dyn->d_tag == DT_NULL)
6962 break;
6963 }
6964
6965 dynamic_memaddr += dyn_size;
6966 }
6967
6968 return map;
6969 }
6970
6971 /* Read one pointer from MEMADDR in the inferior. */
6972
6973 static int
6974 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6975 {
6976 int ret;
6977
6978 /* Go through a union so this works on either big or little endian
6979 hosts, when the inferior's pointer size is smaller than the size
6980 of CORE_ADDR. It is assumed the inferior's endianness is the
6981 same of the superior's. */
6982 union
6983 {
6984 CORE_ADDR core_addr;
6985 unsigned int ui;
6986 unsigned char uc;
6987 } addr;
6988
6989 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6990 if (ret == 0)
6991 {
6992 if (ptr_size == sizeof (CORE_ADDR))
6993 *ptr = addr.core_addr;
6994 else if (ptr_size == sizeof (unsigned int))
6995 *ptr = addr.ui;
6996 else
6997 gdb_assert_not_reached ("unhandled pointer size");
6998 }
6999 return ret;
7000 }
7001
7002 struct link_map_offsets
7003 {
7004 /* Offset and size of r_debug.r_version. */
7005 int r_version_offset;
7006
7007 /* Offset and size of r_debug.r_map. */
7008 int r_map_offset;
7009
7010 /* Offset to l_addr field in struct link_map. */
7011 int l_addr_offset;
7012
7013 /* Offset to l_name field in struct link_map. */
7014 int l_name_offset;
7015
7016 /* Offset to l_ld field in struct link_map. */
7017 int l_ld_offset;
7018
7019 /* Offset to l_next field in struct link_map. */
7020 int l_next_offset;
7021
7022 /* Offset to l_prev field in struct link_map. */
7023 int l_prev_offset;
7024 };
7025
7026 /* Construct qXfer:libraries-svr4:read reply. */
7027
7028 static int
7029 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
7030 unsigned const char *writebuf,
7031 CORE_ADDR offset, int len)
7032 {
7033 char *document;
7034 unsigned document_len;
7035 struct process_info_private *const priv = current_process ()->priv;
7036 char filename[PATH_MAX];
7037 int pid, is_elf64;
7038
7039 static const struct link_map_offsets lmo_32bit_offsets =
7040 {
7041 0, /* r_version offset. */
7042 4, /* r_debug.r_map offset. */
7043 0, /* l_addr offset in link_map. */
7044 4, /* l_name offset in link_map. */
7045 8, /* l_ld offset in link_map. */
7046 12, /* l_next offset in link_map. */
7047 16 /* l_prev offset in link_map. */
7048 };
7049
7050 static const struct link_map_offsets lmo_64bit_offsets =
7051 {
7052 0, /* r_version offset. */
7053 8, /* r_debug.r_map offset. */
7054 0, /* l_addr offset in link_map. */
7055 8, /* l_name offset in link_map. */
7056 16, /* l_ld offset in link_map. */
7057 24, /* l_next offset in link_map. */
7058 32 /* l_prev offset in link_map. */
7059 };
7060 const struct link_map_offsets *lmo;
7061 unsigned int machine;
7062 int ptr_size;
7063 CORE_ADDR lm_addr = 0, lm_prev = 0;
7064 int allocated = 1024;
7065 char *p;
7066 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
7067 int header_done = 0;
7068
7069 if (writebuf != NULL)
7070 return -2;
7071 if (readbuf == NULL)
7072 return -1;
7073
7074 pid = lwpid_of (current_thread);
7075 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7076 is_elf64 = elf_64_file_p (filename, &machine);
7077 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7078 ptr_size = is_elf64 ? 8 : 4;
7079
7080 while (annex[0] != '\0')
7081 {
7082 const char *sep;
7083 CORE_ADDR *addrp;
7084 int len;
7085
7086 sep = strchr (annex, '=');
7087 if (sep == NULL)
7088 break;
7089
7090 len = sep - annex;
7091 if (len == 5 && startswith (annex, "start"))
7092 addrp = &lm_addr;
7093 else if (len == 4 && startswith (annex, "prev"))
7094 addrp = &lm_prev;
7095 else
7096 {
7097 annex = strchr (sep, ';');
7098 if (annex == NULL)
7099 break;
7100 annex++;
7101 continue;
7102 }
7103
7104 annex = decode_address_to_semicolon (addrp, sep + 1);
7105 }
7106
7107 if (lm_addr == 0)
7108 {
7109 int r_version = 0;
7110
7111 if (priv->r_debug == 0)
7112 priv->r_debug = get_r_debug (pid, is_elf64);
7113
7114 /* We failed to find DT_DEBUG. Such situation will not change
7115 for this inferior - do not retry it. Report it to GDB as
7116 E01, see for the reasons at the GDB solib-svr4.c side. */
7117 if (priv->r_debug == (CORE_ADDR) -1)
7118 return -1;
7119
7120 if (priv->r_debug != 0)
7121 {
7122 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7123 (unsigned char *) &r_version,
7124 sizeof (r_version)) != 0
7125 || r_version != 1)
7126 {
7127 warning ("unexpected r_debug version %d", r_version);
7128 }
7129 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7130 &lm_addr, ptr_size) != 0)
7131 {
7132 warning ("unable to read r_map from 0x%lx",
7133 (long) priv->r_debug + lmo->r_map_offset);
7134 }
7135 }
7136 }
7137
7138 document = (char *) xmalloc (allocated);
7139 strcpy (document, "<library-list-svr4 version=\"1.0\"");
7140 p = document + strlen (document);
7141
7142 while (lm_addr
7143 && read_one_ptr (lm_addr + lmo->l_name_offset,
7144 &l_name, ptr_size) == 0
7145 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7146 &l_addr, ptr_size) == 0
7147 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7148 &l_ld, ptr_size) == 0
7149 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7150 &l_prev, ptr_size) == 0
7151 && read_one_ptr (lm_addr + lmo->l_next_offset,
7152 &l_next, ptr_size) == 0)
7153 {
7154 unsigned char libname[PATH_MAX];
7155
7156 if (lm_prev != l_prev)
7157 {
7158 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7159 (long) lm_prev, (long) l_prev);
7160 break;
7161 }
7162
7163 /* Ignore the first entry even if it has valid name as the first entry
7164 corresponds to the main executable. The first entry should not be
7165 skipped if the dynamic loader was loaded late by a static executable
7166 (see solib-svr4.c parameter ignore_first). But in such case the main
7167 executable does not have PT_DYNAMIC present and this function already
7168 exited above due to failed get_r_debug. */
7169 if (lm_prev == 0)
7170 {
7171 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7172 p = p + strlen (p);
7173 }
7174 else
7175 {
7176 /* Not checking for error because reading may stop before
7177 we've got PATH_MAX worth of characters. */
7178 libname[0] = '\0';
7179 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7180 libname[sizeof (libname) - 1] = '\0';
7181 if (libname[0] != '\0')
7182 {
7183 /* 6x the size for xml_escape_text below. */
7184 size_t len = 6 * strlen ((char *) libname);
7185 char *name;
7186
7187 if (!header_done)
7188 {
7189 /* Terminate `<library-list-svr4'. */
7190 *p++ = '>';
7191 header_done = 1;
7192 }
7193
7194 while (allocated < p - document + len + 200)
7195 {
7196 /* Expand to guarantee sufficient storage. */
7197 uintptr_t document_len = p - document;
7198
7199 document = (char *) xrealloc (document, 2 * allocated);
7200 allocated *= 2;
7201 p = document + document_len;
7202 }
7203
7204 name = xml_escape_text ((char *) libname);
7205 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
7206 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7207 name, (unsigned long) lm_addr,
7208 (unsigned long) l_addr, (unsigned long) l_ld);
7209 free (name);
7210 }
7211 }
7212
7213 lm_prev = lm_addr;
7214 lm_addr = l_next;
7215 }
7216
7217 if (!header_done)
7218 {
7219 /* Empty list; terminate `<library-list-svr4'. */
7220 strcpy (p, "/>");
7221 }
7222 else
7223 strcpy (p, "</library-list-svr4>");
7224
7225 document_len = strlen (document);
7226 if (offset < document_len)
7227 document_len -= offset;
7228 else
7229 document_len = 0;
7230 if (len > document_len)
7231 len = document_len;
7232
7233 memcpy (readbuf, document + offset, len);
7234 xfree (document);
7235
7236 return len;
7237 }
7238
7239 #ifdef HAVE_LINUX_BTRACE
7240
7241 /* See to_disable_btrace target method. */
7242
7243 static int
7244 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7245 {
7246 enum btrace_error err;
7247
7248 err = linux_disable_btrace (tinfo);
7249 return (err == BTRACE_ERR_NONE ? 0 : -1);
7250 }
7251
7252 /* Encode an Intel Processor Trace configuration. */
7253
7254 static void
7255 linux_low_encode_pt_config (struct buffer *buffer,
7256 const struct btrace_data_pt_config *config)
7257 {
7258 buffer_grow_str (buffer, "<pt-config>\n");
7259
7260 switch (config->cpu.vendor)
7261 {
7262 case CV_INTEL:
7263 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7264 "model=\"%u\" stepping=\"%u\"/>\n",
7265 config->cpu.family, config->cpu.model,
7266 config->cpu.stepping);
7267 break;
7268
7269 default:
7270 break;
7271 }
7272
7273 buffer_grow_str (buffer, "</pt-config>\n");
7274 }
7275
7276 /* Encode a raw buffer. */
7277
7278 static void
7279 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7280 unsigned int size)
7281 {
7282 if (size == 0)
7283 return;
7284
7285 /* We use hex encoding - see common/rsp-low.h. */
7286 buffer_grow_str (buffer, "<raw>\n");
7287
7288 while (size-- > 0)
7289 {
7290 char elem[2];
7291
7292 elem[0] = tohex ((*data >> 4) & 0xf);
7293 elem[1] = tohex (*data++ & 0xf);
7294
7295 buffer_grow (buffer, elem, 2);
7296 }
7297
7298 buffer_grow_str (buffer, "</raw>\n");
7299 }
7300
7301 /* See to_read_btrace target method. */
7302
7303 static int
7304 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7305 enum btrace_read_type type)
7306 {
7307 struct btrace_data btrace;
7308 struct btrace_block *block;
7309 enum btrace_error err;
7310 int i;
7311
7312 btrace_data_init (&btrace);
7313
7314 err = linux_read_btrace (&btrace, tinfo, type);
7315 if (err != BTRACE_ERR_NONE)
7316 {
7317 if (err == BTRACE_ERR_OVERFLOW)
7318 buffer_grow_str0 (buffer, "E.Overflow.");
7319 else
7320 buffer_grow_str0 (buffer, "E.Generic Error.");
7321
7322 goto err;
7323 }
7324
7325 switch (btrace.format)
7326 {
7327 case BTRACE_FORMAT_NONE:
7328 buffer_grow_str0 (buffer, "E.No Trace.");
7329 goto err;
7330
7331 case BTRACE_FORMAT_BTS:
7332 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7333 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7334
7335 for (i = 0;
7336 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7337 i++)
7338 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7339 paddress (block->begin), paddress (block->end));
7340
7341 buffer_grow_str0 (buffer, "</btrace>\n");
7342 break;
7343
7344 case BTRACE_FORMAT_PT:
7345 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7346 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7347 buffer_grow_str (buffer, "<pt>\n");
7348
7349 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7350
7351 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7352 btrace.variant.pt.size);
7353
7354 buffer_grow_str (buffer, "</pt>\n");
7355 buffer_grow_str0 (buffer, "</btrace>\n");
7356 break;
7357
7358 default:
7359 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7360 goto err;
7361 }
7362
7363 btrace_data_fini (&btrace);
7364 return 0;
7365
7366 err:
7367 btrace_data_fini (&btrace);
7368 return -1;
7369 }
7370
7371 /* See to_btrace_conf target method. */
7372
7373 static int
7374 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7375 struct buffer *buffer)
7376 {
7377 const struct btrace_config *conf;
7378
7379 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7380 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7381
7382 conf = linux_btrace_conf (tinfo);
7383 if (conf != NULL)
7384 {
7385 switch (conf->format)
7386 {
7387 case BTRACE_FORMAT_NONE:
7388 break;
7389
7390 case BTRACE_FORMAT_BTS:
7391 buffer_xml_printf (buffer, "<bts");
7392 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7393 buffer_xml_printf (buffer, " />\n");
7394 break;
7395
7396 case BTRACE_FORMAT_PT:
7397 buffer_xml_printf (buffer, "<pt");
7398 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7399 buffer_xml_printf (buffer, "/>\n");
7400 break;
7401 }
7402 }
7403
7404 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7405 return 0;
7406 }
7407 #endif /* HAVE_LINUX_BTRACE */
7408
7409 /* See nat/linux-nat.h. */
7410
7411 ptid_t
7412 current_lwp_ptid (void)
7413 {
7414 return ptid_of (current_thread);
7415 }
7416
7417 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7418
7419 static int
7420 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7421 {
7422 if (the_low_target.breakpoint_kind_from_pc != NULL)
7423 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7424 else
7425 return default_breakpoint_kind_from_pc (pcptr);
7426 }
7427
7428 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7429
7430 static const gdb_byte *
7431 linux_sw_breakpoint_from_kind (int kind, int *size)
7432 {
7433 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7434
7435 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7436 }
7437
7438 /* Implementation of the target_ops method
7439 "breakpoint_kind_from_current_state". */
7440
7441 static int
7442 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7443 {
7444 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7445 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7446 else
7447 return linux_breakpoint_kind_from_pc (pcptr);
7448 }
7449
7450 /* Default implementation of linux_target_ops method "set_pc" for
7451 32-bit pc register which is literally named "pc". */
7452
7453 void
7454 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7455 {
7456 uint32_t newpc = pc;
7457
7458 supply_register_by_name (regcache, "pc", &newpc);
7459 }
7460
7461 /* Default implementation of linux_target_ops method "get_pc" for
7462 32-bit pc register which is literally named "pc". */
7463
7464 CORE_ADDR
7465 linux_get_pc_32bit (struct regcache *regcache)
7466 {
7467 uint32_t pc;
7468
7469 collect_register_by_name (regcache, "pc", &pc);
7470 if (debug_threads)
7471 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7472 return pc;
7473 }
7474
7475 /* Default implementation of linux_target_ops method "set_pc" for
7476 64-bit pc register which is literally named "pc". */
7477
7478 void
7479 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7480 {
7481 uint64_t newpc = pc;
7482
7483 supply_register_by_name (regcache, "pc", &newpc);
7484 }
7485
7486 /* Default implementation of linux_target_ops method "get_pc" for
7487 64-bit pc register which is literally named "pc". */
7488
7489 CORE_ADDR
7490 linux_get_pc_64bit (struct regcache *regcache)
7491 {
7492 uint64_t pc;
7493
7494 collect_register_by_name (regcache, "pc", &pc);
7495 if (debug_threads)
7496 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7497 return pc;
7498 }
7499
7500
7501 static struct target_ops linux_target_ops = {
7502 linux_create_inferior,
7503 linux_post_create_inferior,
7504 linux_attach,
7505 linux_kill,
7506 linux_detach,
7507 linux_mourn,
7508 linux_join,
7509 linux_thread_alive,
7510 linux_resume,
7511 linux_wait,
7512 linux_fetch_registers,
7513 linux_store_registers,
7514 linux_prepare_to_access_memory,
7515 linux_done_accessing_memory,
7516 linux_read_memory,
7517 linux_write_memory,
7518 linux_look_up_symbols,
7519 linux_request_interrupt,
7520 linux_read_auxv,
7521 linux_supports_z_point_type,
7522 linux_insert_point,
7523 linux_remove_point,
7524 linux_stopped_by_sw_breakpoint,
7525 linux_supports_stopped_by_sw_breakpoint,
7526 linux_stopped_by_hw_breakpoint,
7527 linux_supports_stopped_by_hw_breakpoint,
7528 linux_supports_hardware_single_step,
7529 linux_stopped_by_watchpoint,
7530 linux_stopped_data_address,
7531 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7532 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7533 && defined(PT_TEXT_END_ADDR)
7534 linux_read_offsets,
7535 #else
7536 NULL,
7537 #endif
7538 #ifdef USE_THREAD_DB
7539 thread_db_get_tls_address,
7540 #else
7541 NULL,
7542 #endif
7543 linux_qxfer_spu,
7544 hostio_last_error_from_errno,
7545 linux_qxfer_osdata,
7546 linux_xfer_siginfo,
7547 linux_supports_non_stop,
7548 linux_async,
7549 linux_start_non_stop,
7550 linux_supports_multi_process,
7551 linux_supports_fork_events,
7552 linux_supports_vfork_events,
7553 linux_supports_exec_events,
7554 linux_handle_new_gdb_connection,
7555 #ifdef USE_THREAD_DB
7556 thread_db_handle_monitor_command,
7557 #else
7558 NULL,
7559 #endif
7560 linux_common_core_of_thread,
7561 linux_read_loadmap,
7562 linux_process_qsupported,
7563 linux_supports_tracepoints,
7564 linux_read_pc,
7565 linux_write_pc,
7566 linux_thread_stopped,
7567 NULL,
7568 linux_pause_all,
7569 linux_unpause_all,
7570 linux_stabilize_threads,
7571 linux_install_fast_tracepoint_jump_pad,
7572 linux_emit_ops,
7573 linux_supports_disable_randomization,
7574 linux_get_min_fast_tracepoint_insn_len,
7575 linux_qxfer_libraries_svr4,
7576 linux_supports_agent,
7577 #ifdef HAVE_LINUX_BTRACE
7578 linux_supports_btrace,
7579 linux_enable_btrace,
7580 linux_low_disable_btrace,
7581 linux_low_read_btrace,
7582 linux_low_btrace_conf,
7583 #else
7584 NULL,
7585 NULL,
7586 NULL,
7587 NULL,
7588 NULL,
7589 #endif
7590 linux_supports_range_stepping,
7591 linux_proc_pid_to_exec_file,
7592 linux_mntns_open_cloexec,
7593 linux_mntns_unlink,
7594 linux_mntns_readlink,
7595 linux_breakpoint_kind_from_pc,
7596 linux_sw_breakpoint_from_kind,
7597 linux_proc_tid_get_name,
7598 linux_breakpoint_kind_from_current_state,
7599 linux_supports_software_single_step,
7600 linux_supports_catch_syscall,
7601 linux_get_ipa_tdesc_idx,
7602 };
7603
7604 #ifdef HAVE_LINUX_REGSETS
7605 void
7606 initialize_regsets_info (struct regsets_info *info)
7607 {
7608 for (info->num_regsets = 0;
7609 info->regsets[info->num_regsets].size >= 0;
7610 info->num_regsets++)
7611 ;
7612 }
7613 #endif
7614
7615 void
7616 initialize_low (void)
7617 {
7618 struct sigaction sigchld_action;
7619
7620 memset (&sigchld_action, 0, sizeof (sigchld_action));
7621 set_target_ops (&linux_target_ops);
7622
7623 linux_ptrace_init_warnings ();
7624
7625 sigchld_action.sa_handler = sigchld_handler;
7626 sigemptyset (&sigchld_action.sa_mask);
7627 sigchld_action.sa_flags = SA_RESTART;
7628 sigaction (SIGCHLD, &sigchld_action, NULL);
7629
7630 initialize_low_arch ();
7631
7632 linux_check_ptrace_features ();
7633 }
This page took 0.180057 seconds and 5 git commands to generate.