Fix lwp_suspend/unsuspend imbalance in linux_wait_1
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2016 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25 #include "signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #ifndef ELFMAG0
51 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55 #include <elf.h>
56 #endif
57 #include "nat/linux-namespaces.h"
58
59 #ifndef SPUFS_MAGIC
60 #define SPUFS_MAGIC 0x23c9b64e
61 #endif
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 /* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77 #if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80 #if defined(__mcoldfire__)
81 /* These are still undefined in 3.10 kernels. */
82 #define PT_TEXT_ADDR 49*4
83 #define PT_DATA_ADDR 50*4
84 #define PT_TEXT_END_ADDR 51*4
85 /* BFIN already defines these since at least 2.6.32 kernels. */
86 #elif defined(BFIN)
87 #define PT_TEXT_ADDR 220
88 #define PT_TEXT_END_ADDR 224
89 #define PT_DATA_ADDR 228
90 /* These are still undefined in 3.10 kernels. */
91 #elif defined(__TMS320C6X__)
92 #define PT_TEXT_ADDR (0x10000*4)
93 #define PT_DATA_ADDR (0x10004*4)
94 #define PT_TEXT_END_ADDR (0x10008*4)
95 #endif
96 #endif
97
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "btrace-common.h"
101 #endif
102
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
105 typedef struct
106 {
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115 } Elf32_auxv_t;
116 #endif
117
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
120 typedef struct
121 {
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130 } Elf64_auxv_t;
131 #endif
132
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset = -1;
135
136 /* LWP accessors. */
137
138 /* See nat/linux-nat.h. */
139
140 ptid_t
141 ptid_of_lwp (struct lwp_info *lwp)
142 {
143 return ptid_of (get_lwp_thread (lwp));
144 }
145
146 /* See nat/linux-nat.h. */
147
148 void
149 lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151 {
152 lwp->arch_private = info;
153 }
154
155 /* See nat/linux-nat.h. */
156
157 struct arch_lwp_info *
158 lwp_arch_private_info (struct lwp_info *lwp)
159 {
160 return lwp->arch_private;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 int
166 lwp_is_stopped (struct lwp_info *lwp)
167 {
168 return lwp->stopped;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info *lwp)
175 {
176 return lwp->stop_reason;
177 }
178
179 /* A list of all unknown processes which receive stop signals. Some
180 other process will presumably claim each of these as forked
181 children momentarily. */
182
183 struct simple_pid_list
184 {
185 /* The process ID. */
186 int pid;
187
188 /* The status as reported by waitpid. */
189 int status;
190
191 /* Next in chain. */
192 struct simple_pid_list *next;
193 };
194 struct simple_pid_list *stopped_pids;
195
196 /* Trivial list manipulation functions to keep track of a list of new
197 stopped processes. */
198
199 static void
200 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
201 {
202 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
203
204 new_pid->pid = pid;
205 new_pid->status = status;
206 new_pid->next = *listp;
207 *listp = new_pid;
208 }
209
210 static int
211 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
212 {
213 struct simple_pid_list **p;
214
215 for (p = listp; *p != NULL; p = &(*p)->next)
216 if ((*p)->pid == pid)
217 {
218 struct simple_pid_list *next = (*p)->next;
219
220 *statusp = (*p)->status;
221 xfree (*p);
222 *p = next;
223 return 1;
224 }
225 return 0;
226 }
227
228 enum stopping_threads_kind
229 {
230 /* Not stopping threads presently. */
231 NOT_STOPPING_THREADS,
232
233 /* Stopping threads. */
234 STOPPING_THREADS,
235
236 /* Stopping and suspending threads. */
237 STOPPING_AND_SUSPENDING_THREADS
238 };
239
240 /* This is set while stop_all_lwps is in effect. */
241 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
242
243 /* FIXME make into a target method? */
244 int using_threads = 1;
245
246 /* True if we're presently stabilizing threads (moving them out of
247 jump pads). */
248 static int stabilizing_threads;
249
250 static void linux_resume_one_lwp (struct lwp_info *lwp,
251 int step, int signal, siginfo_t *info);
252 static void linux_resume (struct thread_resume *resume_info, size_t n);
253 static void stop_all_lwps (int suspend, struct lwp_info *except);
254 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
255 static void unsuspend_all_lwps (struct lwp_info *except);
256 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
257 int *wstat, int options);
258 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
259 static struct lwp_info *add_lwp (ptid_t ptid);
260 static void linux_mourn (struct process_info *process);
261 static int linux_stopped_by_watchpoint (void);
262 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
263 static int lwp_is_marked_dead (struct lwp_info *lwp);
264 static void proceed_all_lwps (void);
265 static int finish_step_over (struct lwp_info *lwp);
266 static int kill_lwp (unsigned long lwpid, int signo);
267 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
268 static void complete_ongoing_step_over (void);
269 static int linux_low_ptrace_options (int attached);
270 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
271 static int proceed_one_lwp (struct inferior_list_entry *entry, void *except);
272
273 /* When the event-loop is doing a step-over, this points at the thread
274 being stepped. */
275 ptid_t step_over_bkpt;
276
277 /* True if the low target can hardware single-step. */
278
279 static int
280 can_hardware_single_step (void)
281 {
282 if (the_low_target.supports_hardware_single_step != NULL)
283 return the_low_target.supports_hardware_single_step ();
284 else
285 return 0;
286 }
287
288 /* True if the low target can software single-step. Such targets
289 implement the GET_NEXT_PCS callback. */
290
291 static int
292 can_software_single_step (void)
293 {
294 return (the_low_target.get_next_pcs != NULL);
295 }
296
297 /* True if the low target supports memory breakpoints. If so, we'll
298 have a GET_PC implementation. */
299
300 static int
301 supports_breakpoints (void)
302 {
303 return (the_low_target.get_pc != NULL);
304 }
305
306 /* Returns true if this target can support fast tracepoints. This
307 does not mean that the in-process agent has been loaded in the
308 inferior. */
309
310 static int
311 supports_fast_tracepoints (void)
312 {
313 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
314 }
315
316 /* True if LWP is stopped in its stepping range. */
317
318 static int
319 lwp_in_step_range (struct lwp_info *lwp)
320 {
321 CORE_ADDR pc = lwp->stop_pc;
322
323 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
324 }
325
326 struct pending_signals
327 {
328 int signal;
329 siginfo_t info;
330 struct pending_signals *prev;
331 };
332
333 /* The read/write ends of the pipe registered as waitable file in the
334 event loop. */
335 static int linux_event_pipe[2] = { -1, -1 };
336
337 /* True if we're currently in async mode. */
338 #define target_is_async_p() (linux_event_pipe[0] != -1)
339
340 static void send_sigstop (struct lwp_info *lwp);
341 static void wait_for_sigstop (void);
342
343 /* Return non-zero if HEADER is a 64-bit ELF file. */
344
345 static int
346 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
347 {
348 if (header->e_ident[EI_MAG0] == ELFMAG0
349 && header->e_ident[EI_MAG1] == ELFMAG1
350 && header->e_ident[EI_MAG2] == ELFMAG2
351 && header->e_ident[EI_MAG3] == ELFMAG3)
352 {
353 *machine = header->e_machine;
354 return header->e_ident[EI_CLASS] == ELFCLASS64;
355
356 }
357 *machine = EM_NONE;
358 return -1;
359 }
360
361 /* Return non-zero if FILE is a 64-bit ELF file,
362 zero if the file is not a 64-bit ELF file,
363 and -1 if the file is not accessible or doesn't exist. */
364
365 static int
366 elf_64_file_p (const char *file, unsigned int *machine)
367 {
368 Elf64_Ehdr header;
369 int fd;
370
371 fd = open (file, O_RDONLY);
372 if (fd < 0)
373 return -1;
374
375 if (read (fd, &header, sizeof (header)) != sizeof (header))
376 {
377 close (fd);
378 return 0;
379 }
380 close (fd);
381
382 return elf_64_header_p (&header, machine);
383 }
384
385 /* Accepts an integer PID; Returns true if the executable PID is
386 running is a 64-bit ELF file.. */
387
388 int
389 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
390 {
391 char file[PATH_MAX];
392
393 sprintf (file, "/proc/%d/exe", pid);
394 return elf_64_file_p (file, machine);
395 }
396
397 static void
398 delete_lwp (struct lwp_info *lwp)
399 {
400 struct thread_info *thr = get_lwp_thread (lwp);
401
402 if (debug_threads)
403 debug_printf ("deleting %ld\n", lwpid_of (thr));
404
405 remove_thread (thr);
406 free (lwp->arch_private);
407 free (lwp);
408 }
409
410 /* Add a process to the common process list, and set its private
411 data. */
412
413 static struct process_info *
414 linux_add_process (int pid, int attached)
415 {
416 struct process_info *proc;
417
418 proc = add_process (pid, attached);
419 proc->priv = XCNEW (struct process_info_private);
420
421 if (the_low_target.new_process != NULL)
422 proc->priv->arch_private = the_low_target.new_process ();
423
424 return proc;
425 }
426
427 static CORE_ADDR get_pc (struct lwp_info *lwp);
428
429 /* Call the target arch_setup function on the current thread. */
430
431 static void
432 linux_arch_setup (void)
433 {
434 the_low_target.arch_setup ();
435 }
436
437 /* Call the target arch_setup function on THREAD. */
438
439 static void
440 linux_arch_setup_thread (struct thread_info *thread)
441 {
442 struct thread_info *saved_thread;
443
444 saved_thread = current_thread;
445 current_thread = thread;
446
447 linux_arch_setup ();
448
449 current_thread = saved_thread;
450 }
451
452 /* Handle a GNU/Linux extended wait response. If we see a clone,
453 fork, or vfork event, we need to add the new LWP to our list
454 (and return 0 so as not to report the trap to higher layers).
455 If we see an exec event, we will modify ORIG_EVENT_LWP to point
456 to a new LWP representing the new program. */
457
458 static int
459 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
460 {
461 struct lwp_info *event_lwp = *orig_event_lwp;
462 int event = linux_ptrace_get_extended_event (wstat);
463 struct thread_info *event_thr = get_lwp_thread (event_lwp);
464 struct lwp_info *new_lwp;
465
466 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
467
468 /* All extended events we currently use are mid-syscall. Only
469 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
470 you have to be using PTRACE_SEIZE to get that. */
471 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
472
473 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
474 || (event == PTRACE_EVENT_CLONE))
475 {
476 ptid_t ptid;
477 unsigned long new_pid;
478 int ret, status;
479
480 /* Get the pid of the new lwp. */
481 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
482 &new_pid);
483
484 /* If we haven't already seen the new PID stop, wait for it now. */
485 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
486 {
487 /* The new child has a pending SIGSTOP. We can't affect it until it
488 hits the SIGSTOP, but we're already attached. */
489
490 ret = my_waitpid (new_pid, &status, __WALL);
491
492 if (ret == -1)
493 perror_with_name ("waiting for new child");
494 else if (ret != new_pid)
495 warning ("wait returned unexpected PID %d", ret);
496 else if (!WIFSTOPPED (status))
497 warning ("wait returned unexpected status 0x%x", status);
498 }
499
500 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
501 {
502 struct process_info *parent_proc;
503 struct process_info *child_proc;
504 struct lwp_info *child_lwp;
505 struct thread_info *child_thr;
506 struct target_desc *tdesc;
507
508 ptid = ptid_build (new_pid, new_pid, 0);
509
510 if (debug_threads)
511 {
512 debug_printf ("HEW: Got fork event from LWP %ld, "
513 "new child is %d\n",
514 ptid_get_lwp (ptid_of (event_thr)),
515 ptid_get_pid (ptid));
516 }
517
518 /* Add the new process to the tables and clone the breakpoint
519 lists of the parent. We need to do this even if the new process
520 will be detached, since we will need the process object and the
521 breakpoints to remove any breakpoints from memory when we
522 detach, and the client side will access registers. */
523 child_proc = linux_add_process (new_pid, 0);
524 gdb_assert (child_proc != NULL);
525 child_lwp = add_lwp (ptid);
526 gdb_assert (child_lwp != NULL);
527 child_lwp->stopped = 1;
528 child_lwp->must_set_ptrace_flags = 1;
529 child_lwp->status_pending_p = 0;
530 child_thr = get_lwp_thread (child_lwp);
531 child_thr->last_resume_kind = resume_stop;
532 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
533
534 /* If we're suspending all threads, leave this one suspended
535 too. If the fork/clone parent is stepping over a breakpoint,
536 all other threads have been suspended already. Leave the
537 child suspended too. */
538 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
539 || event_lwp->bp_reinsert != 0)
540 {
541 if (debug_threads)
542 debug_printf ("HEW: leaving child suspended\n");
543 child_lwp->suspended = 1;
544 }
545
546 parent_proc = get_thread_process (event_thr);
547 child_proc->attached = parent_proc->attached;
548
549 if (event_lwp->bp_reinsert != 0
550 && can_software_single_step ()
551 && event == PTRACE_EVENT_VFORK)
552 {
553 /* If we leave reinsert breakpoints there, child will
554 hit it, so uninsert reinsert breakpoints from parent
555 (and child). Once vfork child is done, reinsert
556 them back to parent. */
557 uninsert_reinsert_breakpoints (event_thr);
558 }
559
560 clone_all_breakpoints (child_thr, event_thr);
561
562 tdesc = XNEW (struct target_desc);
563 copy_target_description (tdesc, parent_proc->tdesc);
564 child_proc->tdesc = tdesc;
565
566 /* Clone arch-specific process data. */
567 if (the_low_target.new_fork != NULL)
568 the_low_target.new_fork (parent_proc, child_proc);
569
570 /* Save fork info in the parent thread. */
571 if (event == PTRACE_EVENT_FORK)
572 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
573 else if (event == PTRACE_EVENT_VFORK)
574 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
575
576 event_lwp->waitstatus.value.related_pid = ptid;
577
578 /* The status_pending field contains bits denoting the
579 extended event, so when the pending event is handled,
580 the handler will look at lwp->waitstatus. */
581 event_lwp->status_pending_p = 1;
582 event_lwp->status_pending = wstat;
583
584 /* If the parent thread is doing step-over with reinsert
585 breakpoints, the list of reinsert breakpoints are cloned
586 from the parent's. Remove them from the child process.
587 In case of vfork, we'll reinsert them back once vforked
588 child is done. */
589 if (event_lwp->bp_reinsert != 0
590 && can_software_single_step ())
591 {
592 /* The child process is forked and stopped, so it is safe
593 to access its memory without stopping all other threads
594 from other processes. */
595 delete_reinsert_breakpoints (child_thr);
596
597 gdb_assert (has_reinsert_breakpoints (event_thr));
598 gdb_assert (!has_reinsert_breakpoints (child_thr));
599 }
600
601 /* Report the event. */
602 return 0;
603 }
604
605 if (debug_threads)
606 debug_printf ("HEW: Got clone event "
607 "from LWP %ld, new child is LWP %ld\n",
608 lwpid_of (event_thr), new_pid);
609
610 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
611 new_lwp = add_lwp (ptid);
612
613 /* Either we're going to immediately resume the new thread
614 or leave it stopped. linux_resume_one_lwp is a nop if it
615 thinks the thread is currently running, so set this first
616 before calling linux_resume_one_lwp. */
617 new_lwp->stopped = 1;
618
619 /* If we're suspending all threads, leave this one suspended
620 too. If the fork/clone parent is stepping over a breakpoint,
621 all other threads have been suspended already. Leave the
622 child suspended too. */
623 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
624 || event_lwp->bp_reinsert != 0)
625 new_lwp->suspended = 1;
626
627 /* Normally we will get the pending SIGSTOP. But in some cases
628 we might get another signal delivered to the group first.
629 If we do get another signal, be sure not to lose it. */
630 if (WSTOPSIG (status) != SIGSTOP)
631 {
632 new_lwp->stop_expected = 1;
633 new_lwp->status_pending_p = 1;
634 new_lwp->status_pending = status;
635 }
636 else if (report_thread_events)
637 {
638 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
639 new_lwp->status_pending_p = 1;
640 new_lwp->status_pending = status;
641 }
642
643 /* Don't report the event. */
644 return 1;
645 }
646 else if (event == PTRACE_EVENT_VFORK_DONE)
647 {
648 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
649
650 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
651 {
652 reinsert_reinsert_breakpoints (event_thr);
653
654 gdb_assert (has_reinsert_breakpoints (event_thr));
655 }
656
657 /* Report the event. */
658 return 0;
659 }
660 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
661 {
662 struct process_info *proc;
663 VEC (int) *syscalls_to_catch;
664 ptid_t event_ptid;
665 pid_t event_pid;
666
667 if (debug_threads)
668 {
669 debug_printf ("HEW: Got exec event from LWP %ld\n",
670 lwpid_of (event_thr));
671 }
672
673 /* Get the event ptid. */
674 event_ptid = ptid_of (event_thr);
675 event_pid = ptid_get_pid (event_ptid);
676
677 /* Save the syscall list from the execing process. */
678 proc = get_thread_process (event_thr);
679 syscalls_to_catch = proc->syscalls_to_catch;
680 proc->syscalls_to_catch = NULL;
681
682 /* Delete the execing process and all its threads. */
683 linux_mourn (proc);
684 current_thread = NULL;
685
686 /* Create a new process/lwp/thread. */
687 proc = linux_add_process (event_pid, 0);
688 event_lwp = add_lwp (event_ptid);
689 event_thr = get_lwp_thread (event_lwp);
690 gdb_assert (current_thread == event_thr);
691 linux_arch_setup_thread (event_thr);
692
693 /* Set the event status. */
694 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
695 event_lwp->waitstatus.value.execd_pathname
696 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
697
698 /* Mark the exec status as pending. */
699 event_lwp->stopped = 1;
700 event_lwp->status_pending_p = 1;
701 event_lwp->status_pending = wstat;
702 event_thr->last_resume_kind = resume_continue;
703 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
704
705 /* Update syscall state in the new lwp, effectively mid-syscall too. */
706 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
707
708 /* Restore the list to catch. Don't rely on the client, which is free
709 to avoid sending a new list when the architecture doesn't change.
710 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
711 proc->syscalls_to_catch = syscalls_to_catch;
712
713 /* Report the event. */
714 *orig_event_lwp = event_lwp;
715 return 0;
716 }
717
718 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
719 }
720
721 /* Return the PC as read from the regcache of LWP, without any
722 adjustment. */
723
724 static CORE_ADDR
725 get_pc (struct lwp_info *lwp)
726 {
727 struct thread_info *saved_thread;
728 struct regcache *regcache;
729 CORE_ADDR pc;
730
731 if (the_low_target.get_pc == NULL)
732 return 0;
733
734 saved_thread = current_thread;
735 current_thread = get_lwp_thread (lwp);
736
737 regcache = get_thread_regcache (current_thread, 1);
738 pc = (*the_low_target.get_pc) (regcache);
739
740 if (debug_threads)
741 debug_printf ("pc is 0x%lx\n", (long) pc);
742
743 current_thread = saved_thread;
744 return pc;
745 }
746
747 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
748 Fill *SYSNO with the syscall nr trapped. */
749
750 static void
751 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
752 {
753 struct thread_info *saved_thread;
754 struct regcache *regcache;
755
756 if (the_low_target.get_syscall_trapinfo == NULL)
757 {
758 /* If we cannot get the syscall trapinfo, report an unknown
759 system call number. */
760 *sysno = UNKNOWN_SYSCALL;
761 return;
762 }
763
764 saved_thread = current_thread;
765 current_thread = get_lwp_thread (lwp);
766
767 regcache = get_thread_regcache (current_thread, 1);
768 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
769
770 if (debug_threads)
771 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
772
773 current_thread = saved_thread;
774 }
775
776 static int check_stopped_by_watchpoint (struct lwp_info *child);
777
778 /* Called when the LWP stopped for a signal/trap. If it stopped for a
779 trap check what caused it (breakpoint, watchpoint, trace, etc.),
780 and save the result in the LWP's stop_reason field. If it stopped
781 for a breakpoint, decrement the PC if necessary on the lwp's
782 architecture. Returns true if we now have the LWP's stop PC. */
783
784 static int
785 save_stop_reason (struct lwp_info *lwp)
786 {
787 CORE_ADDR pc;
788 CORE_ADDR sw_breakpoint_pc;
789 struct thread_info *saved_thread;
790 #if USE_SIGTRAP_SIGINFO
791 siginfo_t siginfo;
792 #endif
793
794 if (the_low_target.get_pc == NULL)
795 return 0;
796
797 pc = get_pc (lwp);
798 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
799
800 /* breakpoint_at reads from the current thread. */
801 saved_thread = current_thread;
802 current_thread = get_lwp_thread (lwp);
803
804 #if USE_SIGTRAP_SIGINFO
805 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
806 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
807 {
808 if (siginfo.si_signo == SIGTRAP)
809 {
810 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
811 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
812 {
813 /* The si_code is ambiguous on this arch -- check debug
814 registers. */
815 if (!check_stopped_by_watchpoint (lwp))
816 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
817 }
818 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
819 {
820 /* If we determine the LWP stopped for a SW breakpoint,
821 trust it. Particularly don't check watchpoint
822 registers, because at least on s390, we'd find
823 stopped-by-watchpoint as long as there's a watchpoint
824 set. */
825 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
826 }
827 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
828 {
829 /* This can indicate either a hardware breakpoint or
830 hardware watchpoint. Check debug registers. */
831 if (!check_stopped_by_watchpoint (lwp))
832 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
833 }
834 else if (siginfo.si_code == TRAP_TRACE)
835 {
836 /* We may have single stepped an instruction that
837 triggered a watchpoint. In that case, on some
838 architectures (such as x86), instead of TRAP_HWBKPT,
839 si_code indicates TRAP_TRACE, and we need to check
840 the debug registers separately. */
841 if (!check_stopped_by_watchpoint (lwp))
842 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
843 }
844 }
845 }
846 #else
847 /* We may have just stepped a breakpoint instruction. E.g., in
848 non-stop mode, GDB first tells the thread A to step a range, and
849 then the user inserts a breakpoint inside the range. In that
850 case we need to report the breakpoint PC. */
851 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
852 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
853 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
854
855 if (hardware_breakpoint_inserted_here (pc))
856 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
857
858 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
859 check_stopped_by_watchpoint (lwp);
860 #endif
861
862 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
863 {
864 if (debug_threads)
865 {
866 struct thread_info *thr = get_lwp_thread (lwp);
867
868 debug_printf ("CSBB: %s stopped by software breakpoint\n",
869 target_pid_to_str (ptid_of (thr)));
870 }
871
872 /* Back up the PC if necessary. */
873 if (pc != sw_breakpoint_pc)
874 {
875 struct regcache *regcache
876 = get_thread_regcache (current_thread, 1);
877 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
878 }
879
880 /* Update this so we record the correct stop PC below. */
881 pc = sw_breakpoint_pc;
882 }
883 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
884 {
885 if (debug_threads)
886 {
887 struct thread_info *thr = get_lwp_thread (lwp);
888
889 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
890 target_pid_to_str (ptid_of (thr)));
891 }
892 }
893 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
894 {
895 if (debug_threads)
896 {
897 struct thread_info *thr = get_lwp_thread (lwp);
898
899 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
900 target_pid_to_str (ptid_of (thr)));
901 }
902 }
903 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
904 {
905 if (debug_threads)
906 {
907 struct thread_info *thr = get_lwp_thread (lwp);
908
909 debug_printf ("CSBB: %s stopped by trace\n",
910 target_pid_to_str (ptid_of (thr)));
911 }
912 }
913
914 lwp->stop_pc = pc;
915 current_thread = saved_thread;
916 return 1;
917 }
918
919 static struct lwp_info *
920 add_lwp (ptid_t ptid)
921 {
922 struct lwp_info *lwp;
923
924 lwp = XCNEW (struct lwp_info);
925
926 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
927
928 if (the_low_target.new_thread != NULL)
929 the_low_target.new_thread (lwp);
930
931 lwp->thread = add_thread (ptid, lwp);
932
933 return lwp;
934 }
935
936 /* Start an inferior process and returns its pid.
937 ALLARGS is a vector of program-name and args. */
938
939 static int
940 linux_create_inferior (char *program, char **allargs)
941 {
942 struct lwp_info *new_lwp;
943 int pid;
944 ptid_t ptid;
945 struct cleanup *restore_personality
946 = maybe_disable_address_space_randomization (disable_randomization);
947
948 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
949 pid = vfork ();
950 #else
951 pid = fork ();
952 #endif
953 if (pid < 0)
954 perror_with_name ("fork");
955
956 if (pid == 0)
957 {
958 close_most_fds ();
959 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
960
961 setpgid (0, 0);
962
963 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
964 stdout to stderr so that inferior i/o doesn't corrupt the connection.
965 Also, redirect stdin to /dev/null. */
966 if (remote_connection_is_stdio ())
967 {
968 close (0);
969 open ("/dev/null", O_RDONLY);
970 dup2 (2, 1);
971 if (write (2, "stdin/stdout redirected\n",
972 sizeof ("stdin/stdout redirected\n") - 1) < 0)
973 {
974 /* Errors ignored. */;
975 }
976 }
977
978 restore_original_signals_state ();
979
980 execv (program, allargs);
981 if (errno == ENOENT)
982 execvp (program, allargs);
983
984 fprintf (stderr, "Cannot exec %s: %s.\n", program,
985 strerror (errno));
986 fflush (stderr);
987 _exit (0177);
988 }
989
990 do_cleanups (restore_personality);
991
992 linux_add_process (pid, 0);
993
994 ptid = ptid_build (pid, pid, 0);
995 new_lwp = add_lwp (ptid);
996 new_lwp->must_set_ptrace_flags = 1;
997
998 return pid;
999 }
1000
1001 /* Implement the post_create_inferior target_ops method. */
1002
1003 static void
1004 linux_post_create_inferior (void)
1005 {
1006 struct lwp_info *lwp = get_thread_lwp (current_thread);
1007
1008 linux_arch_setup ();
1009
1010 if (lwp->must_set_ptrace_flags)
1011 {
1012 struct process_info *proc = current_process ();
1013 int options = linux_low_ptrace_options (proc->attached);
1014
1015 linux_enable_event_reporting (lwpid_of (current_thread), options);
1016 lwp->must_set_ptrace_flags = 0;
1017 }
1018 }
1019
1020 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1021 error. */
1022
1023 int
1024 linux_attach_lwp (ptid_t ptid)
1025 {
1026 struct lwp_info *new_lwp;
1027 int lwpid = ptid_get_lwp (ptid);
1028
1029 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1030 != 0)
1031 return errno;
1032
1033 new_lwp = add_lwp (ptid);
1034
1035 /* We need to wait for SIGSTOP before being able to make the next
1036 ptrace call on this LWP. */
1037 new_lwp->must_set_ptrace_flags = 1;
1038
1039 if (linux_proc_pid_is_stopped (lwpid))
1040 {
1041 if (debug_threads)
1042 debug_printf ("Attached to a stopped process\n");
1043
1044 /* The process is definitely stopped. It is in a job control
1045 stop, unless the kernel predates the TASK_STOPPED /
1046 TASK_TRACED distinction, in which case it might be in a
1047 ptrace stop. Make sure it is in a ptrace stop; from there we
1048 can kill it, signal it, et cetera.
1049
1050 First make sure there is a pending SIGSTOP. Since we are
1051 already attached, the process can not transition from stopped
1052 to running without a PTRACE_CONT; so we know this signal will
1053 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1054 probably already in the queue (unless this kernel is old
1055 enough to use TASK_STOPPED for ptrace stops); but since
1056 SIGSTOP is not an RT signal, it can only be queued once. */
1057 kill_lwp (lwpid, SIGSTOP);
1058
1059 /* Finally, resume the stopped process. This will deliver the
1060 SIGSTOP (or a higher priority signal, just like normal
1061 PTRACE_ATTACH), which we'll catch later on. */
1062 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1063 }
1064
1065 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1066 brings it to a halt.
1067
1068 There are several cases to consider here:
1069
1070 1) gdbserver has already attached to the process and is being notified
1071 of a new thread that is being created.
1072 In this case we should ignore that SIGSTOP and resume the
1073 process. This is handled below by setting stop_expected = 1,
1074 and the fact that add_thread sets last_resume_kind ==
1075 resume_continue.
1076
1077 2) This is the first thread (the process thread), and we're attaching
1078 to it via attach_inferior.
1079 In this case we want the process thread to stop.
1080 This is handled by having linux_attach set last_resume_kind ==
1081 resume_stop after we return.
1082
1083 If the pid we are attaching to is also the tgid, we attach to and
1084 stop all the existing threads. Otherwise, we attach to pid and
1085 ignore any other threads in the same group as this pid.
1086
1087 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1088 existing threads.
1089 In this case we want the thread to stop.
1090 FIXME: This case is currently not properly handled.
1091 We should wait for the SIGSTOP but don't. Things work apparently
1092 because enough time passes between when we ptrace (ATTACH) and when
1093 gdb makes the next ptrace call on the thread.
1094
1095 On the other hand, if we are currently trying to stop all threads, we
1096 should treat the new thread as if we had sent it a SIGSTOP. This works
1097 because we are guaranteed that the add_lwp call above added us to the
1098 end of the list, and so the new thread has not yet reached
1099 wait_for_sigstop (but will). */
1100 new_lwp->stop_expected = 1;
1101
1102 return 0;
1103 }
1104
1105 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1106 already attached. Returns true if a new LWP is found, false
1107 otherwise. */
1108
1109 static int
1110 attach_proc_task_lwp_callback (ptid_t ptid)
1111 {
1112 /* Is this a new thread? */
1113 if (find_thread_ptid (ptid) == NULL)
1114 {
1115 int lwpid = ptid_get_lwp (ptid);
1116 int err;
1117
1118 if (debug_threads)
1119 debug_printf ("Found new lwp %d\n", lwpid);
1120
1121 err = linux_attach_lwp (ptid);
1122
1123 /* Be quiet if we simply raced with the thread exiting. EPERM
1124 is returned if the thread's task still exists, and is marked
1125 as exited or zombie, as well as other conditions, so in that
1126 case, confirm the status in /proc/PID/status. */
1127 if (err == ESRCH
1128 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1129 {
1130 if (debug_threads)
1131 {
1132 debug_printf ("Cannot attach to lwp %d: "
1133 "thread is gone (%d: %s)\n",
1134 lwpid, err, strerror (err));
1135 }
1136 }
1137 else if (err != 0)
1138 {
1139 warning (_("Cannot attach to lwp %d: %s"),
1140 lwpid,
1141 linux_ptrace_attach_fail_reason_string (ptid, err));
1142 }
1143
1144 return 1;
1145 }
1146 return 0;
1147 }
1148
1149 static void async_file_mark (void);
1150
1151 /* Attach to PID. If PID is the tgid, attach to it and all
1152 of its threads. */
1153
1154 static int
1155 linux_attach (unsigned long pid)
1156 {
1157 struct process_info *proc;
1158 struct thread_info *initial_thread;
1159 ptid_t ptid = ptid_build (pid, pid, 0);
1160 int err;
1161
1162 /* Attach to PID. We will check for other threads
1163 soon. */
1164 err = linux_attach_lwp (ptid);
1165 if (err != 0)
1166 error ("Cannot attach to process %ld: %s",
1167 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1168
1169 proc = linux_add_process (pid, 1);
1170
1171 /* Don't ignore the initial SIGSTOP if we just attached to this
1172 process. It will be collected by wait shortly. */
1173 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1174 initial_thread->last_resume_kind = resume_stop;
1175
1176 /* We must attach to every LWP. If /proc is mounted, use that to
1177 find them now. On the one hand, the inferior may be using raw
1178 clone instead of using pthreads. On the other hand, even if it
1179 is using pthreads, GDB may not be connected yet (thread_db needs
1180 to do symbol lookups, through qSymbol). Also, thread_db walks
1181 structures in the inferior's address space to find the list of
1182 threads/LWPs, and those structures may well be corrupted. Note
1183 that once thread_db is loaded, we'll still use it to list threads
1184 and associate pthread info with each LWP. */
1185 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1186
1187 /* GDB will shortly read the xml target description for this
1188 process, to figure out the process' architecture. But the target
1189 description is only filled in when the first process/thread in
1190 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1191 that now, otherwise, if GDB is fast enough, it could read the
1192 target description _before_ that initial stop. */
1193 if (non_stop)
1194 {
1195 struct lwp_info *lwp;
1196 int wstat, lwpid;
1197 ptid_t pid_ptid = pid_to_ptid (pid);
1198
1199 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1200 &wstat, __WALL);
1201 gdb_assert (lwpid > 0);
1202
1203 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1204
1205 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1206 {
1207 lwp->status_pending_p = 1;
1208 lwp->status_pending = wstat;
1209 }
1210
1211 initial_thread->last_resume_kind = resume_continue;
1212
1213 async_file_mark ();
1214
1215 gdb_assert (proc->tdesc != NULL);
1216 }
1217
1218 return 0;
1219 }
1220
1221 struct counter
1222 {
1223 int pid;
1224 int count;
1225 };
1226
1227 static int
1228 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1229 {
1230 struct counter *counter = (struct counter *) args;
1231
1232 if (ptid_get_pid (entry->id) == counter->pid)
1233 {
1234 if (++counter->count > 1)
1235 return 1;
1236 }
1237
1238 return 0;
1239 }
1240
1241 static int
1242 last_thread_of_process_p (int pid)
1243 {
1244 struct counter counter = { pid , 0 };
1245
1246 return (find_inferior (&all_threads,
1247 second_thread_of_pid_p, &counter) == NULL);
1248 }
1249
1250 /* Kill LWP. */
1251
1252 static void
1253 linux_kill_one_lwp (struct lwp_info *lwp)
1254 {
1255 struct thread_info *thr = get_lwp_thread (lwp);
1256 int pid = lwpid_of (thr);
1257
1258 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1259 there is no signal context, and ptrace(PTRACE_KILL) (or
1260 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1261 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1262 alternative is to kill with SIGKILL. We only need one SIGKILL
1263 per process, not one for each thread. But since we still support
1264 support debugging programs using raw clone without CLONE_THREAD,
1265 we send one for each thread. For years, we used PTRACE_KILL
1266 only, so we're being a bit paranoid about some old kernels where
1267 PTRACE_KILL might work better (dubious if there are any such, but
1268 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1269 second, and so we're fine everywhere. */
1270
1271 errno = 0;
1272 kill_lwp (pid, SIGKILL);
1273 if (debug_threads)
1274 {
1275 int save_errno = errno;
1276
1277 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1278 target_pid_to_str (ptid_of (thr)),
1279 save_errno ? strerror (save_errno) : "OK");
1280 }
1281
1282 errno = 0;
1283 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1284 if (debug_threads)
1285 {
1286 int save_errno = errno;
1287
1288 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1289 target_pid_to_str (ptid_of (thr)),
1290 save_errno ? strerror (save_errno) : "OK");
1291 }
1292 }
1293
1294 /* Kill LWP and wait for it to die. */
1295
1296 static void
1297 kill_wait_lwp (struct lwp_info *lwp)
1298 {
1299 struct thread_info *thr = get_lwp_thread (lwp);
1300 int pid = ptid_get_pid (ptid_of (thr));
1301 int lwpid = ptid_get_lwp (ptid_of (thr));
1302 int wstat;
1303 int res;
1304
1305 if (debug_threads)
1306 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1307
1308 do
1309 {
1310 linux_kill_one_lwp (lwp);
1311
1312 /* Make sure it died. Notes:
1313
1314 - The loop is most likely unnecessary.
1315
1316 - We don't use linux_wait_for_event as that could delete lwps
1317 while we're iterating over them. We're not interested in
1318 any pending status at this point, only in making sure all
1319 wait status on the kernel side are collected until the
1320 process is reaped.
1321
1322 - We don't use __WALL here as the __WALL emulation relies on
1323 SIGCHLD, and killing a stopped process doesn't generate
1324 one, nor an exit status.
1325 */
1326 res = my_waitpid (lwpid, &wstat, 0);
1327 if (res == -1 && errno == ECHILD)
1328 res = my_waitpid (lwpid, &wstat, __WCLONE);
1329 } while (res > 0 && WIFSTOPPED (wstat));
1330
1331 /* Even if it was stopped, the child may have already disappeared.
1332 E.g., if it was killed by SIGKILL. */
1333 if (res < 0 && errno != ECHILD)
1334 perror_with_name ("kill_wait_lwp");
1335 }
1336
1337 /* Callback for `find_inferior'. Kills an lwp of a given process,
1338 except the leader. */
1339
1340 static int
1341 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1342 {
1343 struct thread_info *thread = (struct thread_info *) entry;
1344 struct lwp_info *lwp = get_thread_lwp (thread);
1345 int pid = * (int *) args;
1346
1347 if (ptid_get_pid (entry->id) != pid)
1348 return 0;
1349
1350 /* We avoid killing the first thread here, because of a Linux kernel (at
1351 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1352 the children get a chance to be reaped, it will remain a zombie
1353 forever. */
1354
1355 if (lwpid_of (thread) == pid)
1356 {
1357 if (debug_threads)
1358 debug_printf ("lkop: is last of process %s\n",
1359 target_pid_to_str (entry->id));
1360 return 0;
1361 }
1362
1363 kill_wait_lwp (lwp);
1364 return 0;
1365 }
1366
1367 static int
1368 linux_kill (int pid)
1369 {
1370 struct process_info *process;
1371 struct lwp_info *lwp;
1372
1373 process = find_process_pid (pid);
1374 if (process == NULL)
1375 return -1;
1376
1377 /* If we're killing a running inferior, make sure it is stopped
1378 first, as PTRACE_KILL will not work otherwise. */
1379 stop_all_lwps (0, NULL);
1380
1381 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1382
1383 /* See the comment in linux_kill_one_lwp. We did not kill the first
1384 thread in the list, so do so now. */
1385 lwp = find_lwp_pid (pid_to_ptid (pid));
1386
1387 if (lwp == NULL)
1388 {
1389 if (debug_threads)
1390 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1391 pid);
1392 }
1393 else
1394 kill_wait_lwp (lwp);
1395
1396 the_target->mourn (process);
1397
1398 /* Since we presently can only stop all lwps of all processes, we
1399 need to unstop lwps of other processes. */
1400 unstop_all_lwps (0, NULL);
1401 return 0;
1402 }
1403
1404 /* Get pending signal of THREAD, for detaching purposes. This is the
1405 signal the thread last stopped for, which we need to deliver to the
1406 thread when detaching, otherwise, it'd be suppressed/lost. */
1407
1408 static int
1409 get_detach_signal (struct thread_info *thread)
1410 {
1411 enum gdb_signal signo = GDB_SIGNAL_0;
1412 int status;
1413 struct lwp_info *lp = get_thread_lwp (thread);
1414
1415 if (lp->status_pending_p)
1416 status = lp->status_pending;
1417 else
1418 {
1419 /* If the thread had been suspended by gdbserver, and it stopped
1420 cleanly, then it'll have stopped with SIGSTOP. But we don't
1421 want to deliver that SIGSTOP. */
1422 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1423 || thread->last_status.value.sig == GDB_SIGNAL_0)
1424 return 0;
1425
1426 /* Otherwise, we may need to deliver the signal we
1427 intercepted. */
1428 status = lp->last_status;
1429 }
1430
1431 if (!WIFSTOPPED (status))
1432 {
1433 if (debug_threads)
1434 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1435 target_pid_to_str (ptid_of (thread)));
1436 return 0;
1437 }
1438
1439 /* Extended wait statuses aren't real SIGTRAPs. */
1440 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1441 {
1442 if (debug_threads)
1443 debug_printf ("GPS: lwp %s had stopped with extended "
1444 "status: no pending signal\n",
1445 target_pid_to_str (ptid_of (thread)));
1446 return 0;
1447 }
1448
1449 signo = gdb_signal_from_host (WSTOPSIG (status));
1450
1451 if (program_signals_p && !program_signals[signo])
1452 {
1453 if (debug_threads)
1454 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1455 target_pid_to_str (ptid_of (thread)),
1456 gdb_signal_to_string (signo));
1457 return 0;
1458 }
1459 else if (!program_signals_p
1460 /* If we have no way to know which signals GDB does not
1461 want to have passed to the program, assume
1462 SIGTRAP/SIGINT, which is GDB's default. */
1463 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1464 {
1465 if (debug_threads)
1466 debug_printf ("GPS: lwp %s had signal %s, "
1467 "but we don't know if we should pass it. "
1468 "Default to not.\n",
1469 target_pid_to_str (ptid_of (thread)),
1470 gdb_signal_to_string (signo));
1471 return 0;
1472 }
1473 else
1474 {
1475 if (debug_threads)
1476 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1477 target_pid_to_str (ptid_of (thread)),
1478 gdb_signal_to_string (signo));
1479
1480 return WSTOPSIG (status);
1481 }
1482 }
1483
1484 /* Detach from LWP. */
1485
1486 static void
1487 linux_detach_one_lwp (struct lwp_info *lwp)
1488 {
1489 struct thread_info *thread = get_lwp_thread (lwp);
1490 int sig;
1491 int lwpid;
1492
1493 /* If there is a pending SIGSTOP, get rid of it. */
1494 if (lwp->stop_expected)
1495 {
1496 if (debug_threads)
1497 debug_printf ("Sending SIGCONT to %s\n",
1498 target_pid_to_str (ptid_of (thread)));
1499
1500 kill_lwp (lwpid_of (thread), SIGCONT);
1501 lwp->stop_expected = 0;
1502 }
1503
1504 /* Pass on any pending signal for this thread. */
1505 sig = get_detach_signal (thread);
1506
1507 /* Preparing to resume may try to write registers, and fail if the
1508 lwp is zombie. If that happens, ignore the error. We'll handle
1509 it below, when detach fails with ESRCH. */
1510 TRY
1511 {
1512 /* Flush any pending changes to the process's registers. */
1513 regcache_invalidate_thread (thread);
1514
1515 /* Finally, let it resume. */
1516 if (the_low_target.prepare_to_resume != NULL)
1517 the_low_target.prepare_to_resume (lwp);
1518 }
1519 CATCH (ex, RETURN_MASK_ERROR)
1520 {
1521 if (!check_ptrace_stopped_lwp_gone (lwp))
1522 throw_exception (ex);
1523 }
1524 END_CATCH
1525
1526 lwpid = lwpid_of (thread);
1527 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1528 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1529 {
1530 int save_errno = errno;
1531
1532 /* We know the thread exists, so ESRCH must mean the lwp is
1533 zombie. This can happen if one of the already-detached
1534 threads exits the whole thread group. In that case we're
1535 still attached, and must reap the lwp. */
1536 if (save_errno == ESRCH)
1537 {
1538 int ret, status;
1539
1540 ret = my_waitpid (lwpid, &status, __WALL);
1541 if (ret == -1)
1542 {
1543 warning (_("Couldn't reap LWP %d while detaching: %s"),
1544 lwpid, strerror (errno));
1545 }
1546 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1547 {
1548 warning (_("Reaping LWP %d while detaching "
1549 "returned unexpected status 0x%x"),
1550 lwpid, status);
1551 }
1552 }
1553 else
1554 {
1555 error (_("Can't detach %s: %s"),
1556 target_pid_to_str (ptid_of (thread)),
1557 strerror (save_errno));
1558 }
1559 }
1560 else if (debug_threads)
1561 {
1562 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1563 target_pid_to_str (ptid_of (thread)),
1564 strsignal (sig));
1565 }
1566
1567 delete_lwp (lwp);
1568 }
1569
1570 /* Callback for find_inferior. Detaches from non-leader threads of a
1571 given process. */
1572
1573 static int
1574 linux_detach_lwp_callback (struct inferior_list_entry *entry, void *args)
1575 {
1576 struct thread_info *thread = (struct thread_info *) entry;
1577 struct lwp_info *lwp = get_thread_lwp (thread);
1578 int pid = *(int *) args;
1579 int lwpid = lwpid_of (thread);
1580
1581 /* Skip other processes. */
1582 if (ptid_get_pid (entry->id) != pid)
1583 return 0;
1584
1585 /* We don't actually detach from the thread group leader just yet.
1586 If the thread group exits, we must reap the zombie clone lwps
1587 before we're able to reap the leader. */
1588 if (ptid_get_pid (entry->id) == lwpid)
1589 return 0;
1590
1591 linux_detach_one_lwp (lwp);
1592 return 0;
1593 }
1594
1595 static int
1596 linux_detach (int pid)
1597 {
1598 struct process_info *process;
1599 struct lwp_info *main_lwp;
1600
1601 process = find_process_pid (pid);
1602 if (process == NULL)
1603 return -1;
1604
1605 /* As there's a step over already in progress, let it finish first,
1606 otherwise nesting a stabilize_threads operation on top gets real
1607 messy. */
1608 complete_ongoing_step_over ();
1609
1610 /* Stop all threads before detaching. First, ptrace requires that
1611 the thread is stopped to sucessfully detach. Second, thread_db
1612 may need to uninstall thread event breakpoints from memory, which
1613 only works with a stopped process anyway. */
1614 stop_all_lwps (0, NULL);
1615
1616 #ifdef USE_THREAD_DB
1617 thread_db_detach (process);
1618 #endif
1619
1620 /* Stabilize threads (move out of jump pads). */
1621 stabilize_threads ();
1622
1623 /* Detach from the clone lwps first. If the thread group exits just
1624 while we're detaching, we must reap the clone lwps before we're
1625 able to reap the leader. */
1626 find_inferior (&all_threads, linux_detach_lwp_callback, &pid);
1627
1628 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1629 linux_detach_one_lwp (main_lwp);
1630
1631 the_target->mourn (process);
1632
1633 /* Since we presently can only stop all lwps of all processes, we
1634 need to unstop lwps of other processes. */
1635 unstop_all_lwps (0, NULL);
1636 return 0;
1637 }
1638
1639 /* Remove all LWPs that belong to process PROC from the lwp list. */
1640
1641 static int
1642 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1643 {
1644 struct thread_info *thread = (struct thread_info *) entry;
1645 struct lwp_info *lwp = get_thread_lwp (thread);
1646 struct process_info *process = (struct process_info *) proc;
1647
1648 if (pid_of (thread) == pid_of (process))
1649 delete_lwp (lwp);
1650
1651 return 0;
1652 }
1653
1654 static void
1655 linux_mourn (struct process_info *process)
1656 {
1657 struct process_info_private *priv;
1658
1659 #ifdef USE_THREAD_DB
1660 thread_db_mourn (process);
1661 #endif
1662
1663 find_inferior (&all_threads, delete_lwp_callback, process);
1664
1665 /* Freeing all private data. */
1666 priv = process->priv;
1667 free (priv->arch_private);
1668 free (priv);
1669 process->priv = NULL;
1670
1671 remove_process (process);
1672 }
1673
1674 static void
1675 linux_join (int pid)
1676 {
1677 int status, ret;
1678
1679 do {
1680 ret = my_waitpid (pid, &status, 0);
1681 if (WIFEXITED (status) || WIFSIGNALED (status))
1682 break;
1683 } while (ret != -1 || errno != ECHILD);
1684 }
1685
1686 /* Return nonzero if the given thread is still alive. */
1687 static int
1688 linux_thread_alive (ptid_t ptid)
1689 {
1690 struct lwp_info *lwp = find_lwp_pid (ptid);
1691
1692 /* We assume we always know if a thread exits. If a whole process
1693 exited but we still haven't been able to report it to GDB, we'll
1694 hold on to the last lwp of the dead process. */
1695 if (lwp != NULL)
1696 return !lwp_is_marked_dead (lwp);
1697 else
1698 return 0;
1699 }
1700
1701 /* Return 1 if this lwp still has an interesting status pending. If
1702 not (e.g., it had stopped for a breakpoint that is gone), return
1703 false. */
1704
1705 static int
1706 thread_still_has_status_pending_p (struct thread_info *thread)
1707 {
1708 struct lwp_info *lp = get_thread_lwp (thread);
1709
1710 if (!lp->status_pending_p)
1711 return 0;
1712
1713 if (thread->last_resume_kind != resume_stop
1714 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1715 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1716 {
1717 struct thread_info *saved_thread;
1718 CORE_ADDR pc;
1719 int discard = 0;
1720
1721 gdb_assert (lp->last_status != 0);
1722
1723 pc = get_pc (lp);
1724
1725 saved_thread = current_thread;
1726 current_thread = thread;
1727
1728 if (pc != lp->stop_pc)
1729 {
1730 if (debug_threads)
1731 debug_printf ("PC of %ld changed\n",
1732 lwpid_of (thread));
1733 discard = 1;
1734 }
1735
1736 #if !USE_SIGTRAP_SIGINFO
1737 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1738 && !(*the_low_target.breakpoint_at) (pc))
1739 {
1740 if (debug_threads)
1741 debug_printf ("previous SW breakpoint of %ld gone\n",
1742 lwpid_of (thread));
1743 discard = 1;
1744 }
1745 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1746 && !hardware_breakpoint_inserted_here (pc))
1747 {
1748 if (debug_threads)
1749 debug_printf ("previous HW breakpoint of %ld gone\n",
1750 lwpid_of (thread));
1751 discard = 1;
1752 }
1753 #endif
1754
1755 current_thread = saved_thread;
1756
1757 if (discard)
1758 {
1759 if (debug_threads)
1760 debug_printf ("discarding pending breakpoint status\n");
1761 lp->status_pending_p = 0;
1762 return 0;
1763 }
1764 }
1765
1766 return 1;
1767 }
1768
1769 /* Returns true if LWP is resumed from the client's perspective. */
1770
1771 static int
1772 lwp_resumed (struct lwp_info *lwp)
1773 {
1774 struct thread_info *thread = get_lwp_thread (lwp);
1775
1776 if (thread->last_resume_kind != resume_stop)
1777 return 1;
1778
1779 /* Did gdb send us a `vCont;t', but we haven't reported the
1780 corresponding stop to gdb yet? If so, the thread is still
1781 resumed/running from gdb's perspective. */
1782 if (thread->last_resume_kind == resume_stop
1783 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1784 return 1;
1785
1786 return 0;
1787 }
1788
1789 /* Return 1 if this lwp has an interesting status pending. */
1790 static int
1791 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1792 {
1793 struct thread_info *thread = (struct thread_info *) entry;
1794 struct lwp_info *lp = get_thread_lwp (thread);
1795 ptid_t ptid = * (ptid_t *) arg;
1796
1797 /* Check if we're only interested in events from a specific process
1798 or a specific LWP. */
1799 if (!ptid_match (ptid_of (thread), ptid))
1800 return 0;
1801
1802 if (!lwp_resumed (lp))
1803 return 0;
1804
1805 if (lp->status_pending_p
1806 && !thread_still_has_status_pending_p (thread))
1807 {
1808 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1809 return 0;
1810 }
1811
1812 return lp->status_pending_p;
1813 }
1814
1815 static int
1816 same_lwp (struct inferior_list_entry *entry, void *data)
1817 {
1818 ptid_t ptid = *(ptid_t *) data;
1819 int lwp;
1820
1821 if (ptid_get_lwp (ptid) != 0)
1822 lwp = ptid_get_lwp (ptid);
1823 else
1824 lwp = ptid_get_pid (ptid);
1825
1826 if (ptid_get_lwp (entry->id) == lwp)
1827 return 1;
1828
1829 return 0;
1830 }
1831
1832 struct lwp_info *
1833 find_lwp_pid (ptid_t ptid)
1834 {
1835 struct inferior_list_entry *thread
1836 = find_inferior (&all_threads, same_lwp, &ptid);
1837
1838 if (thread == NULL)
1839 return NULL;
1840
1841 return get_thread_lwp ((struct thread_info *) thread);
1842 }
1843
1844 /* Return the number of known LWPs in the tgid given by PID. */
1845
1846 static int
1847 num_lwps (int pid)
1848 {
1849 struct inferior_list_entry *inf, *tmp;
1850 int count = 0;
1851
1852 ALL_INFERIORS (&all_threads, inf, tmp)
1853 {
1854 if (ptid_get_pid (inf->id) == pid)
1855 count++;
1856 }
1857
1858 return count;
1859 }
1860
1861 /* The arguments passed to iterate_over_lwps. */
1862
1863 struct iterate_over_lwps_args
1864 {
1865 /* The FILTER argument passed to iterate_over_lwps. */
1866 ptid_t filter;
1867
1868 /* The CALLBACK argument passed to iterate_over_lwps. */
1869 iterate_over_lwps_ftype *callback;
1870
1871 /* The DATA argument passed to iterate_over_lwps. */
1872 void *data;
1873 };
1874
1875 /* Callback for find_inferior used by iterate_over_lwps to filter
1876 calls to the callback supplied to that function. Returning a
1877 nonzero value causes find_inferiors to stop iterating and return
1878 the current inferior_list_entry. Returning zero indicates that
1879 find_inferiors should continue iterating. */
1880
1881 static int
1882 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1883 {
1884 struct iterate_over_lwps_args *args
1885 = (struct iterate_over_lwps_args *) args_p;
1886
1887 if (ptid_match (entry->id, args->filter))
1888 {
1889 struct thread_info *thr = (struct thread_info *) entry;
1890 struct lwp_info *lwp = get_thread_lwp (thr);
1891
1892 return (*args->callback) (lwp, args->data);
1893 }
1894
1895 return 0;
1896 }
1897
1898 /* See nat/linux-nat.h. */
1899
1900 struct lwp_info *
1901 iterate_over_lwps (ptid_t filter,
1902 iterate_over_lwps_ftype callback,
1903 void *data)
1904 {
1905 struct iterate_over_lwps_args args = {filter, callback, data};
1906 struct inferior_list_entry *entry;
1907
1908 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1909 if (entry == NULL)
1910 return NULL;
1911
1912 return get_thread_lwp ((struct thread_info *) entry);
1913 }
1914
1915 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1916 their exits until all other threads in the group have exited. */
1917
1918 static void
1919 check_zombie_leaders (void)
1920 {
1921 struct process_info *proc, *tmp;
1922
1923 ALL_PROCESSES (proc, tmp)
1924 {
1925 pid_t leader_pid = pid_of (proc);
1926 struct lwp_info *leader_lp;
1927
1928 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1929
1930 if (debug_threads)
1931 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1932 "num_lwps=%d, zombie=%d\n",
1933 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1934 linux_proc_pid_is_zombie (leader_pid));
1935
1936 if (leader_lp != NULL && !leader_lp->stopped
1937 /* Check if there are other threads in the group, as we may
1938 have raced with the inferior simply exiting. */
1939 && !last_thread_of_process_p (leader_pid)
1940 && linux_proc_pid_is_zombie (leader_pid))
1941 {
1942 /* A leader zombie can mean one of two things:
1943
1944 - It exited, and there's an exit status pending
1945 available, or only the leader exited (not the whole
1946 program). In the latter case, we can't waitpid the
1947 leader's exit status until all other threads are gone.
1948
1949 - There are 3 or more threads in the group, and a thread
1950 other than the leader exec'd. On an exec, the Linux
1951 kernel destroys all other threads (except the execing
1952 one) in the thread group, and resets the execing thread's
1953 tid to the tgid. No exit notification is sent for the
1954 execing thread -- from the ptracer's perspective, it
1955 appears as though the execing thread just vanishes.
1956 Until we reap all other threads except the leader and the
1957 execing thread, the leader will be zombie, and the
1958 execing thread will be in `D (disc sleep)'. As soon as
1959 all other threads are reaped, the execing thread changes
1960 it's tid to the tgid, and the previous (zombie) leader
1961 vanishes, giving place to the "new" leader. We could try
1962 distinguishing the exit and exec cases, by waiting once
1963 more, and seeing if something comes out, but it doesn't
1964 sound useful. The previous leader _does_ go away, and
1965 we'll re-add the new one once we see the exec event
1966 (which is just the same as what would happen if the
1967 previous leader did exit voluntarily before some other
1968 thread execs). */
1969
1970 if (debug_threads)
1971 fprintf (stderr,
1972 "CZL: Thread group leader %d zombie "
1973 "(it exited, or another thread execd).\n",
1974 leader_pid);
1975
1976 delete_lwp (leader_lp);
1977 }
1978 }
1979 }
1980
1981 /* Callback for `find_inferior'. Returns the first LWP that is not
1982 stopped. ARG is a PTID filter. */
1983
1984 static int
1985 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1986 {
1987 struct thread_info *thr = (struct thread_info *) entry;
1988 struct lwp_info *lwp;
1989 ptid_t filter = *(ptid_t *) arg;
1990
1991 if (!ptid_match (ptid_of (thr), filter))
1992 return 0;
1993
1994 lwp = get_thread_lwp (thr);
1995 if (!lwp->stopped)
1996 return 1;
1997
1998 return 0;
1999 }
2000
2001 /* Increment LWP's suspend count. */
2002
2003 static void
2004 lwp_suspended_inc (struct lwp_info *lwp)
2005 {
2006 lwp->suspended++;
2007
2008 if (debug_threads && lwp->suspended > 4)
2009 {
2010 struct thread_info *thread = get_lwp_thread (lwp);
2011
2012 debug_printf ("LWP %ld has a suspiciously high suspend count,"
2013 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
2014 }
2015 }
2016
2017 /* Decrement LWP's suspend count. */
2018
2019 static void
2020 lwp_suspended_decr (struct lwp_info *lwp)
2021 {
2022 lwp->suspended--;
2023
2024 if (lwp->suspended < 0)
2025 {
2026 struct thread_info *thread = get_lwp_thread (lwp);
2027
2028 internal_error (__FILE__, __LINE__,
2029 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
2030 lwp->suspended);
2031 }
2032 }
2033
2034 /* This function should only be called if the LWP got a SIGTRAP.
2035
2036 Handle any tracepoint steps or hits. Return true if a tracepoint
2037 event was handled, 0 otherwise. */
2038
2039 static int
2040 handle_tracepoints (struct lwp_info *lwp)
2041 {
2042 struct thread_info *tinfo = get_lwp_thread (lwp);
2043 int tpoint_related_event = 0;
2044
2045 gdb_assert (lwp->suspended == 0);
2046
2047 /* If this tracepoint hit causes a tracing stop, we'll immediately
2048 uninsert tracepoints. To do this, we temporarily pause all
2049 threads, unpatch away, and then unpause threads. We need to make
2050 sure the unpausing doesn't resume LWP too. */
2051 lwp_suspended_inc (lwp);
2052
2053 /* And we need to be sure that any all-threads-stopping doesn't try
2054 to move threads out of the jump pads, as it could deadlock the
2055 inferior (LWP could be in the jump pad, maybe even holding the
2056 lock.) */
2057
2058 /* Do any necessary step collect actions. */
2059 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
2060
2061 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2062
2063 /* See if we just hit a tracepoint and do its main collect
2064 actions. */
2065 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2066
2067 lwp_suspended_decr (lwp);
2068
2069 gdb_assert (lwp->suspended == 0);
2070 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
2071
2072 if (tpoint_related_event)
2073 {
2074 if (debug_threads)
2075 debug_printf ("got a tracepoint event\n");
2076 return 1;
2077 }
2078
2079 return 0;
2080 }
2081
2082 /* Convenience wrapper. Returns true if LWP is presently collecting a
2083 fast tracepoint. */
2084
2085 static int
2086 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2087 struct fast_tpoint_collect_status *status)
2088 {
2089 CORE_ADDR thread_area;
2090 struct thread_info *thread = get_lwp_thread (lwp);
2091
2092 if (the_low_target.get_thread_area == NULL)
2093 return 0;
2094
2095 /* Get the thread area address. This is used to recognize which
2096 thread is which when tracing with the in-process agent library.
2097 We don't read anything from the address, and treat it as opaque;
2098 it's the address itself that we assume is unique per-thread. */
2099 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2100 return 0;
2101
2102 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2103 }
2104
2105 /* The reason we resume in the caller, is because we want to be able
2106 to pass lwp->status_pending as WSTAT, and we need to clear
2107 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2108 refuses to resume. */
2109
2110 static int
2111 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2112 {
2113 struct thread_info *saved_thread;
2114
2115 saved_thread = current_thread;
2116 current_thread = get_lwp_thread (lwp);
2117
2118 if ((wstat == NULL
2119 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2120 && supports_fast_tracepoints ()
2121 && agent_loaded_p ())
2122 {
2123 struct fast_tpoint_collect_status status;
2124 int r;
2125
2126 if (debug_threads)
2127 debug_printf ("Checking whether LWP %ld needs to move out of the "
2128 "jump pad.\n",
2129 lwpid_of (current_thread));
2130
2131 r = linux_fast_tracepoint_collecting (lwp, &status);
2132
2133 if (wstat == NULL
2134 || (WSTOPSIG (*wstat) != SIGILL
2135 && WSTOPSIG (*wstat) != SIGFPE
2136 && WSTOPSIG (*wstat) != SIGSEGV
2137 && WSTOPSIG (*wstat) != SIGBUS))
2138 {
2139 lwp->collecting_fast_tracepoint = r;
2140
2141 if (r != 0)
2142 {
2143 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
2144 {
2145 /* Haven't executed the original instruction yet.
2146 Set breakpoint there, and wait till it's hit,
2147 then single-step until exiting the jump pad. */
2148 lwp->exit_jump_pad_bkpt
2149 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2150 }
2151
2152 if (debug_threads)
2153 debug_printf ("Checking whether LWP %ld needs to move out of "
2154 "the jump pad...it does\n",
2155 lwpid_of (current_thread));
2156 current_thread = saved_thread;
2157
2158 return 1;
2159 }
2160 }
2161 else
2162 {
2163 /* If we get a synchronous signal while collecting, *and*
2164 while executing the (relocated) original instruction,
2165 reset the PC to point at the tpoint address, before
2166 reporting to GDB. Otherwise, it's an IPA lib bug: just
2167 report the signal to GDB, and pray for the best. */
2168
2169 lwp->collecting_fast_tracepoint = 0;
2170
2171 if (r != 0
2172 && (status.adjusted_insn_addr <= lwp->stop_pc
2173 && lwp->stop_pc < status.adjusted_insn_addr_end))
2174 {
2175 siginfo_t info;
2176 struct regcache *regcache;
2177
2178 /* The si_addr on a few signals references the address
2179 of the faulting instruction. Adjust that as
2180 well. */
2181 if ((WSTOPSIG (*wstat) == SIGILL
2182 || WSTOPSIG (*wstat) == SIGFPE
2183 || WSTOPSIG (*wstat) == SIGBUS
2184 || WSTOPSIG (*wstat) == SIGSEGV)
2185 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2186 (PTRACE_TYPE_ARG3) 0, &info) == 0
2187 /* Final check just to make sure we don't clobber
2188 the siginfo of non-kernel-sent signals. */
2189 && (uintptr_t) info.si_addr == lwp->stop_pc)
2190 {
2191 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2192 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2193 (PTRACE_TYPE_ARG3) 0, &info);
2194 }
2195
2196 regcache = get_thread_regcache (current_thread, 1);
2197 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2198 lwp->stop_pc = status.tpoint_addr;
2199
2200 /* Cancel any fast tracepoint lock this thread was
2201 holding. */
2202 force_unlock_trace_buffer ();
2203 }
2204
2205 if (lwp->exit_jump_pad_bkpt != NULL)
2206 {
2207 if (debug_threads)
2208 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2209 "stopping all threads momentarily.\n");
2210
2211 stop_all_lwps (1, lwp);
2212
2213 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2214 lwp->exit_jump_pad_bkpt = NULL;
2215
2216 unstop_all_lwps (1, lwp);
2217
2218 gdb_assert (lwp->suspended >= 0);
2219 }
2220 }
2221 }
2222
2223 if (debug_threads)
2224 debug_printf ("Checking whether LWP %ld needs to move out of the "
2225 "jump pad...no\n",
2226 lwpid_of (current_thread));
2227
2228 current_thread = saved_thread;
2229 return 0;
2230 }
2231
2232 /* Enqueue one signal in the "signals to report later when out of the
2233 jump pad" list. */
2234
2235 static void
2236 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2237 {
2238 struct pending_signals *p_sig;
2239 struct thread_info *thread = get_lwp_thread (lwp);
2240
2241 if (debug_threads)
2242 debug_printf ("Deferring signal %d for LWP %ld.\n",
2243 WSTOPSIG (*wstat), lwpid_of (thread));
2244
2245 if (debug_threads)
2246 {
2247 struct pending_signals *sig;
2248
2249 for (sig = lwp->pending_signals_to_report;
2250 sig != NULL;
2251 sig = sig->prev)
2252 debug_printf (" Already queued %d\n",
2253 sig->signal);
2254
2255 debug_printf (" (no more currently queued signals)\n");
2256 }
2257
2258 /* Don't enqueue non-RT signals if they are already in the deferred
2259 queue. (SIGSTOP being the easiest signal to see ending up here
2260 twice) */
2261 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2262 {
2263 struct pending_signals *sig;
2264
2265 for (sig = lwp->pending_signals_to_report;
2266 sig != NULL;
2267 sig = sig->prev)
2268 {
2269 if (sig->signal == WSTOPSIG (*wstat))
2270 {
2271 if (debug_threads)
2272 debug_printf ("Not requeuing already queued non-RT signal %d"
2273 " for LWP %ld\n",
2274 sig->signal,
2275 lwpid_of (thread));
2276 return;
2277 }
2278 }
2279 }
2280
2281 p_sig = XCNEW (struct pending_signals);
2282 p_sig->prev = lwp->pending_signals_to_report;
2283 p_sig->signal = WSTOPSIG (*wstat);
2284
2285 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2286 &p_sig->info);
2287
2288 lwp->pending_signals_to_report = p_sig;
2289 }
2290
2291 /* Dequeue one signal from the "signals to report later when out of
2292 the jump pad" list. */
2293
2294 static int
2295 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2296 {
2297 struct thread_info *thread = get_lwp_thread (lwp);
2298
2299 if (lwp->pending_signals_to_report != NULL)
2300 {
2301 struct pending_signals **p_sig;
2302
2303 p_sig = &lwp->pending_signals_to_report;
2304 while ((*p_sig)->prev != NULL)
2305 p_sig = &(*p_sig)->prev;
2306
2307 *wstat = W_STOPCODE ((*p_sig)->signal);
2308 if ((*p_sig)->info.si_signo != 0)
2309 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2310 &(*p_sig)->info);
2311 free (*p_sig);
2312 *p_sig = NULL;
2313
2314 if (debug_threads)
2315 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2316 WSTOPSIG (*wstat), lwpid_of (thread));
2317
2318 if (debug_threads)
2319 {
2320 struct pending_signals *sig;
2321
2322 for (sig = lwp->pending_signals_to_report;
2323 sig != NULL;
2324 sig = sig->prev)
2325 debug_printf (" Still queued %d\n",
2326 sig->signal);
2327
2328 debug_printf (" (no more queued signals)\n");
2329 }
2330
2331 return 1;
2332 }
2333
2334 return 0;
2335 }
2336
2337 /* Fetch the possibly triggered data watchpoint info and store it in
2338 CHILD.
2339
2340 On some archs, like x86, that use debug registers to set
2341 watchpoints, it's possible that the way to know which watched
2342 address trapped, is to check the register that is used to select
2343 which address to watch. Problem is, between setting the watchpoint
2344 and reading back which data address trapped, the user may change
2345 the set of watchpoints, and, as a consequence, GDB changes the
2346 debug registers in the inferior. To avoid reading back a stale
2347 stopped-data-address when that happens, we cache in LP the fact
2348 that a watchpoint trapped, and the corresponding data address, as
2349 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2350 registers meanwhile, we have the cached data we can rely on. */
2351
2352 static int
2353 check_stopped_by_watchpoint (struct lwp_info *child)
2354 {
2355 if (the_low_target.stopped_by_watchpoint != NULL)
2356 {
2357 struct thread_info *saved_thread;
2358
2359 saved_thread = current_thread;
2360 current_thread = get_lwp_thread (child);
2361
2362 if (the_low_target.stopped_by_watchpoint ())
2363 {
2364 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2365
2366 if (the_low_target.stopped_data_address != NULL)
2367 child->stopped_data_address
2368 = the_low_target.stopped_data_address ();
2369 else
2370 child->stopped_data_address = 0;
2371 }
2372
2373 current_thread = saved_thread;
2374 }
2375
2376 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2377 }
2378
2379 /* Return the ptrace options that we want to try to enable. */
2380
2381 static int
2382 linux_low_ptrace_options (int attached)
2383 {
2384 int options = 0;
2385
2386 if (!attached)
2387 options |= PTRACE_O_EXITKILL;
2388
2389 if (report_fork_events)
2390 options |= PTRACE_O_TRACEFORK;
2391
2392 if (report_vfork_events)
2393 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2394
2395 if (report_exec_events)
2396 options |= PTRACE_O_TRACEEXEC;
2397
2398 options |= PTRACE_O_TRACESYSGOOD;
2399
2400 return options;
2401 }
2402
2403 /* Do low-level handling of the event, and check if we should go on
2404 and pass it to caller code. Return the affected lwp if we are, or
2405 NULL otherwise. */
2406
2407 static struct lwp_info *
2408 linux_low_filter_event (int lwpid, int wstat)
2409 {
2410 struct lwp_info *child;
2411 struct thread_info *thread;
2412 int have_stop_pc = 0;
2413
2414 child = find_lwp_pid (pid_to_ptid (lwpid));
2415
2416 /* Check for stop events reported by a process we didn't already
2417 know about - anything not already in our LWP list.
2418
2419 If we're expecting to receive stopped processes after
2420 fork, vfork, and clone events, then we'll just add the
2421 new one to our list and go back to waiting for the event
2422 to be reported - the stopped process might be returned
2423 from waitpid before or after the event is.
2424
2425 But note the case of a non-leader thread exec'ing after the
2426 leader having exited, and gone from our lists (because
2427 check_zombie_leaders deleted it). The non-leader thread
2428 changes its tid to the tgid. */
2429
2430 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2431 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2432 {
2433 ptid_t child_ptid;
2434
2435 /* A multi-thread exec after we had seen the leader exiting. */
2436 if (debug_threads)
2437 {
2438 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2439 "after exec.\n", lwpid);
2440 }
2441
2442 child_ptid = ptid_build (lwpid, lwpid, 0);
2443 child = add_lwp (child_ptid);
2444 child->stopped = 1;
2445 current_thread = child->thread;
2446 }
2447
2448 /* If we didn't find a process, one of two things presumably happened:
2449 - A process we started and then detached from has exited. Ignore it.
2450 - A process we are controlling has forked and the new child's stop
2451 was reported to us by the kernel. Save its PID. */
2452 if (child == NULL && WIFSTOPPED (wstat))
2453 {
2454 add_to_pid_list (&stopped_pids, lwpid, wstat);
2455 return NULL;
2456 }
2457 else if (child == NULL)
2458 return NULL;
2459
2460 thread = get_lwp_thread (child);
2461
2462 child->stopped = 1;
2463
2464 child->last_status = wstat;
2465
2466 /* Check if the thread has exited. */
2467 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2468 {
2469 if (debug_threads)
2470 debug_printf ("LLFE: %d exited.\n", lwpid);
2471
2472 if (finish_step_over (child))
2473 {
2474 /* Unsuspend all other LWPs, and set them back running again. */
2475 unsuspend_all_lwps (child);
2476 }
2477
2478 /* If there is at least one more LWP, then the exit signal was
2479 not the end of the debugged application and should be
2480 ignored, unless GDB wants to hear about thread exits. */
2481 if (report_thread_events
2482 || last_thread_of_process_p (pid_of (thread)))
2483 {
2484 /* Since events are serialized to GDB core, and we can't
2485 report this one right now. Leave the status pending for
2486 the next time we're able to report it. */
2487 mark_lwp_dead (child, wstat);
2488 return child;
2489 }
2490 else
2491 {
2492 delete_lwp (child);
2493 return NULL;
2494 }
2495 }
2496
2497 gdb_assert (WIFSTOPPED (wstat));
2498
2499 if (WIFSTOPPED (wstat))
2500 {
2501 struct process_info *proc;
2502
2503 /* Architecture-specific setup after inferior is running. */
2504 proc = find_process_pid (pid_of (thread));
2505 if (proc->tdesc == NULL)
2506 {
2507 if (proc->attached)
2508 {
2509 /* This needs to happen after we have attached to the
2510 inferior and it is stopped for the first time, but
2511 before we access any inferior registers. */
2512 linux_arch_setup_thread (thread);
2513 }
2514 else
2515 {
2516 /* The process is started, but GDBserver will do
2517 architecture-specific setup after the program stops at
2518 the first instruction. */
2519 child->status_pending_p = 1;
2520 child->status_pending = wstat;
2521 return child;
2522 }
2523 }
2524 }
2525
2526 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2527 {
2528 struct process_info *proc = find_process_pid (pid_of (thread));
2529 int options = linux_low_ptrace_options (proc->attached);
2530
2531 linux_enable_event_reporting (lwpid, options);
2532 child->must_set_ptrace_flags = 0;
2533 }
2534
2535 /* Always update syscall_state, even if it will be filtered later. */
2536 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2537 {
2538 child->syscall_state
2539 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2540 ? TARGET_WAITKIND_SYSCALL_RETURN
2541 : TARGET_WAITKIND_SYSCALL_ENTRY);
2542 }
2543 else
2544 {
2545 /* Almost all other ptrace-stops are known to be outside of system
2546 calls, with further exceptions in handle_extended_wait. */
2547 child->syscall_state = TARGET_WAITKIND_IGNORE;
2548 }
2549
2550 /* Be careful to not overwrite stop_pc until save_stop_reason is
2551 called. */
2552 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2553 && linux_is_extended_waitstatus (wstat))
2554 {
2555 child->stop_pc = get_pc (child);
2556 if (handle_extended_wait (&child, wstat))
2557 {
2558 /* The event has been handled, so just return without
2559 reporting it. */
2560 return NULL;
2561 }
2562 }
2563
2564 if (linux_wstatus_maybe_breakpoint (wstat))
2565 {
2566 if (save_stop_reason (child))
2567 have_stop_pc = 1;
2568 }
2569
2570 if (!have_stop_pc)
2571 child->stop_pc = get_pc (child);
2572
2573 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2574 && child->stop_expected)
2575 {
2576 if (debug_threads)
2577 debug_printf ("Expected stop.\n");
2578 child->stop_expected = 0;
2579
2580 if (thread->last_resume_kind == resume_stop)
2581 {
2582 /* We want to report the stop to the core. Treat the
2583 SIGSTOP as a normal event. */
2584 if (debug_threads)
2585 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2586 target_pid_to_str (ptid_of (thread)));
2587 }
2588 else if (stopping_threads != NOT_STOPPING_THREADS)
2589 {
2590 /* Stopping threads. We don't want this SIGSTOP to end up
2591 pending. */
2592 if (debug_threads)
2593 debug_printf ("LLW: SIGSTOP caught for %s "
2594 "while stopping threads.\n",
2595 target_pid_to_str (ptid_of (thread)));
2596 return NULL;
2597 }
2598 else
2599 {
2600 /* This is a delayed SIGSTOP. Filter out the event. */
2601 if (debug_threads)
2602 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2603 child->stepping ? "step" : "continue",
2604 target_pid_to_str (ptid_of (thread)));
2605
2606 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2607 return NULL;
2608 }
2609 }
2610
2611 child->status_pending_p = 1;
2612 child->status_pending = wstat;
2613 return child;
2614 }
2615
2616 /* Return true if THREAD is doing hardware single step. */
2617
2618 static int
2619 maybe_hw_step (struct thread_info *thread)
2620 {
2621 if (can_hardware_single_step ())
2622 return 1;
2623 else
2624 {
2625 /* GDBserver must insert reinsert breakpoint for software
2626 single step. */
2627 gdb_assert (has_reinsert_breakpoints (thread));
2628 return 0;
2629 }
2630 }
2631
2632 /* Resume LWPs that are currently stopped without any pending status
2633 to report, but are resumed from the core's perspective. */
2634
2635 static void
2636 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2637 {
2638 struct thread_info *thread = (struct thread_info *) entry;
2639 struct lwp_info *lp = get_thread_lwp (thread);
2640
2641 if (lp->stopped
2642 && !lp->suspended
2643 && !lp->status_pending_p
2644 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2645 {
2646 int step = 0;
2647
2648 if (thread->last_resume_kind == resume_step)
2649 step = maybe_hw_step (thread);
2650
2651 if (debug_threads)
2652 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2653 target_pid_to_str (ptid_of (thread)),
2654 paddress (lp->stop_pc),
2655 step);
2656
2657 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2658 }
2659 }
2660
2661 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2662 match FILTER_PTID (leaving others pending). The PTIDs can be:
2663 minus_one_ptid, to specify any child; a pid PTID, specifying all
2664 lwps of a thread group; or a PTID representing a single lwp. Store
2665 the stop status through the status pointer WSTAT. OPTIONS is
2666 passed to the waitpid call. Return 0 if no event was found and
2667 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2668 was found. Return the PID of the stopped child otherwise. */
2669
2670 static int
2671 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2672 int *wstatp, int options)
2673 {
2674 struct thread_info *event_thread;
2675 struct lwp_info *event_child, *requested_child;
2676 sigset_t block_mask, prev_mask;
2677
2678 retry:
2679 /* N.B. event_thread points to the thread_info struct that contains
2680 event_child. Keep them in sync. */
2681 event_thread = NULL;
2682 event_child = NULL;
2683 requested_child = NULL;
2684
2685 /* Check for a lwp with a pending status. */
2686
2687 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2688 {
2689 event_thread = (struct thread_info *)
2690 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2691 if (event_thread != NULL)
2692 event_child = get_thread_lwp (event_thread);
2693 if (debug_threads && event_thread)
2694 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2695 }
2696 else if (!ptid_equal (filter_ptid, null_ptid))
2697 {
2698 requested_child = find_lwp_pid (filter_ptid);
2699
2700 if (stopping_threads == NOT_STOPPING_THREADS
2701 && requested_child->status_pending_p
2702 && requested_child->collecting_fast_tracepoint)
2703 {
2704 enqueue_one_deferred_signal (requested_child,
2705 &requested_child->status_pending);
2706 requested_child->status_pending_p = 0;
2707 requested_child->status_pending = 0;
2708 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2709 }
2710
2711 if (requested_child->suspended
2712 && requested_child->status_pending_p)
2713 {
2714 internal_error (__FILE__, __LINE__,
2715 "requesting an event out of a"
2716 " suspended child?");
2717 }
2718
2719 if (requested_child->status_pending_p)
2720 {
2721 event_child = requested_child;
2722 event_thread = get_lwp_thread (event_child);
2723 }
2724 }
2725
2726 if (event_child != NULL)
2727 {
2728 if (debug_threads)
2729 debug_printf ("Got an event from pending child %ld (%04x)\n",
2730 lwpid_of (event_thread), event_child->status_pending);
2731 *wstatp = event_child->status_pending;
2732 event_child->status_pending_p = 0;
2733 event_child->status_pending = 0;
2734 current_thread = event_thread;
2735 return lwpid_of (event_thread);
2736 }
2737
2738 /* But if we don't find a pending event, we'll have to wait.
2739
2740 We only enter this loop if no process has a pending wait status.
2741 Thus any action taken in response to a wait status inside this
2742 loop is responding as soon as we detect the status, not after any
2743 pending events. */
2744
2745 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2746 all signals while here. */
2747 sigfillset (&block_mask);
2748 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2749
2750 /* Always pull all events out of the kernel. We'll randomly select
2751 an event LWP out of all that have events, to prevent
2752 starvation. */
2753 while (event_child == NULL)
2754 {
2755 pid_t ret = 0;
2756
2757 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2758 quirks:
2759
2760 - If the thread group leader exits while other threads in the
2761 thread group still exist, waitpid(TGID, ...) hangs. That
2762 waitpid won't return an exit status until the other threads
2763 in the group are reaped.
2764
2765 - When a non-leader thread execs, that thread just vanishes
2766 without reporting an exit (so we'd hang if we waited for it
2767 explicitly in that case). The exec event is reported to
2768 the TGID pid. */
2769 errno = 0;
2770 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2771
2772 if (debug_threads)
2773 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2774 ret, errno ? strerror (errno) : "ERRNO-OK");
2775
2776 if (ret > 0)
2777 {
2778 if (debug_threads)
2779 {
2780 debug_printf ("LLW: waitpid %ld received %s\n",
2781 (long) ret, status_to_str (*wstatp));
2782 }
2783
2784 /* Filter all events. IOW, leave all events pending. We'll
2785 randomly select an event LWP out of all that have events
2786 below. */
2787 linux_low_filter_event (ret, *wstatp);
2788 /* Retry until nothing comes out of waitpid. A single
2789 SIGCHLD can indicate more than one child stopped. */
2790 continue;
2791 }
2792
2793 /* Now that we've pulled all events out of the kernel, resume
2794 LWPs that don't have an interesting event to report. */
2795 if (stopping_threads == NOT_STOPPING_THREADS)
2796 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2797
2798 /* ... and find an LWP with a status to report to the core, if
2799 any. */
2800 event_thread = (struct thread_info *)
2801 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2802 if (event_thread != NULL)
2803 {
2804 event_child = get_thread_lwp (event_thread);
2805 *wstatp = event_child->status_pending;
2806 event_child->status_pending_p = 0;
2807 event_child->status_pending = 0;
2808 break;
2809 }
2810
2811 /* Check for zombie thread group leaders. Those can't be reaped
2812 until all other threads in the thread group are. */
2813 check_zombie_leaders ();
2814
2815 /* If there are no resumed children left in the set of LWPs we
2816 want to wait for, bail. We can't just block in
2817 waitpid/sigsuspend, because lwps might have been left stopped
2818 in trace-stop state, and we'd be stuck forever waiting for
2819 their status to change (which would only happen if we resumed
2820 them). Even if WNOHANG is set, this return code is preferred
2821 over 0 (below), as it is more detailed. */
2822 if ((find_inferior (&all_threads,
2823 not_stopped_callback,
2824 &wait_ptid) == NULL))
2825 {
2826 if (debug_threads)
2827 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2828 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2829 return -1;
2830 }
2831
2832 /* No interesting event to report to the caller. */
2833 if ((options & WNOHANG))
2834 {
2835 if (debug_threads)
2836 debug_printf ("WNOHANG set, no event found\n");
2837
2838 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2839 return 0;
2840 }
2841
2842 /* Block until we get an event reported with SIGCHLD. */
2843 if (debug_threads)
2844 debug_printf ("sigsuspend'ing\n");
2845
2846 sigsuspend (&prev_mask);
2847 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2848 goto retry;
2849 }
2850
2851 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2852
2853 current_thread = event_thread;
2854
2855 return lwpid_of (event_thread);
2856 }
2857
2858 /* Wait for an event from child(ren) PTID. PTIDs can be:
2859 minus_one_ptid, to specify any child; a pid PTID, specifying all
2860 lwps of a thread group; or a PTID representing a single lwp. Store
2861 the stop status through the status pointer WSTAT. OPTIONS is
2862 passed to the waitpid call. Return 0 if no event was found and
2863 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2864 was found. Return the PID of the stopped child otherwise. */
2865
2866 static int
2867 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2868 {
2869 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2870 }
2871
2872 /* Count the LWP's that have had events. */
2873
2874 static int
2875 count_events_callback (struct inferior_list_entry *entry, void *data)
2876 {
2877 struct thread_info *thread = (struct thread_info *) entry;
2878 struct lwp_info *lp = get_thread_lwp (thread);
2879 int *count = (int *) data;
2880
2881 gdb_assert (count != NULL);
2882
2883 /* Count only resumed LWPs that have an event pending. */
2884 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2885 && lp->status_pending_p)
2886 (*count)++;
2887
2888 return 0;
2889 }
2890
2891 /* Select the LWP (if any) that is currently being single-stepped. */
2892
2893 static int
2894 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2895 {
2896 struct thread_info *thread = (struct thread_info *) entry;
2897 struct lwp_info *lp = get_thread_lwp (thread);
2898
2899 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2900 && thread->last_resume_kind == resume_step
2901 && lp->status_pending_p)
2902 return 1;
2903 else
2904 return 0;
2905 }
2906
2907 /* Select the Nth LWP that has had an event. */
2908
2909 static int
2910 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2911 {
2912 struct thread_info *thread = (struct thread_info *) entry;
2913 struct lwp_info *lp = get_thread_lwp (thread);
2914 int *selector = (int *) data;
2915
2916 gdb_assert (selector != NULL);
2917
2918 /* Select only resumed LWPs that have an event pending. */
2919 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2920 && lp->status_pending_p)
2921 if ((*selector)-- == 0)
2922 return 1;
2923
2924 return 0;
2925 }
2926
2927 /* Select one LWP out of those that have events pending. */
2928
2929 static void
2930 select_event_lwp (struct lwp_info **orig_lp)
2931 {
2932 int num_events = 0;
2933 int random_selector;
2934 struct thread_info *event_thread = NULL;
2935
2936 /* In all-stop, give preference to the LWP that is being
2937 single-stepped. There will be at most one, and it's the LWP that
2938 the core is most interested in. If we didn't do this, then we'd
2939 have to handle pending step SIGTRAPs somehow in case the core
2940 later continues the previously-stepped thread, otherwise we'd
2941 report the pending SIGTRAP, and the core, not having stepped the
2942 thread, wouldn't understand what the trap was for, and therefore
2943 would report it to the user as a random signal. */
2944 if (!non_stop)
2945 {
2946 event_thread
2947 = (struct thread_info *) find_inferior (&all_threads,
2948 select_singlestep_lwp_callback,
2949 NULL);
2950 if (event_thread != NULL)
2951 {
2952 if (debug_threads)
2953 debug_printf ("SEL: Select single-step %s\n",
2954 target_pid_to_str (ptid_of (event_thread)));
2955 }
2956 }
2957 if (event_thread == NULL)
2958 {
2959 /* No single-stepping LWP. Select one at random, out of those
2960 which have had events. */
2961
2962 /* First see how many events we have. */
2963 find_inferior (&all_threads, count_events_callback, &num_events);
2964 gdb_assert (num_events > 0);
2965
2966 /* Now randomly pick a LWP out of those that have had
2967 events. */
2968 random_selector = (int)
2969 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2970
2971 if (debug_threads && num_events > 1)
2972 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2973 num_events, random_selector);
2974
2975 event_thread
2976 = (struct thread_info *) find_inferior (&all_threads,
2977 select_event_lwp_callback,
2978 &random_selector);
2979 }
2980
2981 if (event_thread != NULL)
2982 {
2983 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2984
2985 /* Switch the event LWP. */
2986 *orig_lp = event_lp;
2987 }
2988 }
2989
2990 /* Decrement the suspend count of an LWP. */
2991
2992 static int
2993 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2994 {
2995 struct thread_info *thread = (struct thread_info *) entry;
2996 struct lwp_info *lwp = get_thread_lwp (thread);
2997
2998 /* Ignore EXCEPT. */
2999 if (lwp == except)
3000 return 0;
3001
3002 lwp_suspended_decr (lwp);
3003 return 0;
3004 }
3005
3006 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
3007 NULL. */
3008
3009 static void
3010 unsuspend_all_lwps (struct lwp_info *except)
3011 {
3012 find_inferior (&all_threads, unsuspend_one_lwp, except);
3013 }
3014
3015 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
3016 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
3017 void *data);
3018 static int lwp_running (struct inferior_list_entry *entry, void *data);
3019 static ptid_t linux_wait_1 (ptid_t ptid,
3020 struct target_waitstatus *ourstatus,
3021 int target_options);
3022
3023 /* Stabilize threads (move out of jump pads).
3024
3025 If a thread is midway collecting a fast tracepoint, we need to
3026 finish the collection and move it out of the jump pad before
3027 reporting the signal.
3028
3029 This avoids recursion while collecting (when a signal arrives
3030 midway, and the signal handler itself collects), which would trash
3031 the trace buffer. In case the user set a breakpoint in a signal
3032 handler, this avoids the backtrace showing the jump pad, etc..
3033 Most importantly, there are certain things we can't do safely if
3034 threads are stopped in a jump pad (or in its callee's). For
3035 example:
3036
3037 - starting a new trace run. A thread still collecting the
3038 previous run, could trash the trace buffer when resumed. The trace
3039 buffer control structures would have been reset but the thread had
3040 no way to tell. The thread could even midway memcpy'ing to the
3041 buffer, which would mean that when resumed, it would clobber the
3042 trace buffer that had been set for a new run.
3043
3044 - we can't rewrite/reuse the jump pads for new tracepoints
3045 safely. Say you do tstart while a thread is stopped midway while
3046 collecting. When the thread is later resumed, it finishes the
3047 collection, and returns to the jump pad, to execute the original
3048 instruction that was under the tracepoint jump at the time the
3049 older run had been started. If the jump pad had been rewritten
3050 since for something else in the new run, the thread would now
3051 execute the wrong / random instructions. */
3052
3053 static void
3054 linux_stabilize_threads (void)
3055 {
3056 struct thread_info *saved_thread;
3057 struct thread_info *thread_stuck;
3058
3059 thread_stuck
3060 = (struct thread_info *) find_inferior (&all_threads,
3061 stuck_in_jump_pad_callback,
3062 NULL);
3063 if (thread_stuck != NULL)
3064 {
3065 if (debug_threads)
3066 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
3067 lwpid_of (thread_stuck));
3068 return;
3069 }
3070
3071 saved_thread = current_thread;
3072
3073 stabilizing_threads = 1;
3074
3075 /* Kick 'em all. */
3076 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
3077
3078 /* Loop until all are stopped out of the jump pads. */
3079 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
3080 {
3081 struct target_waitstatus ourstatus;
3082 struct lwp_info *lwp;
3083 int wstat;
3084
3085 /* Note that we go through the full wait even loop. While
3086 moving threads out of jump pad, we need to be able to step
3087 over internal breakpoints and such. */
3088 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
3089
3090 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
3091 {
3092 lwp = get_thread_lwp (current_thread);
3093
3094 /* Lock it. */
3095 lwp_suspended_inc (lwp);
3096
3097 if (ourstatus.value.sig != GDB_SIGNAL_0
3098 || current_thread->last_resume_kind == resume_stop)
3099 {
3100 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3101 enqueue_one_deferred_signal (lwp, &wstat);
3102 }
3103 }
3104 }
3105
3106 unsuspend_all_lwps (NULL);
3107
3108 stabilizing_threads = 0;
3109
3110 current_thread = saved_thread;
3111
3112 if (debug_threads)
3113 {
3114 thread_stuck
3115 = (struct thread_info *) find_inferior (&all_threads,
3116 stuck_in_jump_pad_callback,
3117 NULL);
3118 if (thread_stuck != NULL)
3119 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3120 lwpid_of (thread_stuck));
3121 }
3122 }
3123
3124 /* Convenience function that is called when the kernel reports an
3125 event that is not passed out to GDB. */
3126
3127 static ptid_t
3128 ignore_event (struct target_waitstatus *ourstatus)
3129 {
3130 /* If we got an event, there may still be others, as a single
3131 SIGCHLD can indicate more than one child stopped. This forces
3132 another target_wait call. */
3133 async_file_mark ();
3134
3135 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3136 return null_ptid;
3137 }
3138
3139 /* Convenience function that is called when the kernel reports an exit
3140 event. This decides whether to report the event to GDB as a
3141 process exit event, a thread exit event, or to suppress the
3142 event. */
3143
3144 static ptid_t
3145 filter_exit_event (struct lwp_info *event_child,
3146 struct target_waitstatus *ourstatus)
3147 {
3148 struct thread_info *thread = get_lwp_thread (event_child);
3149 ptid_t ptid = ptid_of (thread);
3150
3151 if (!last_thread_of_process_p (pid_of (thread)))
3152 {
3153 if (report_thread_events)
3154 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3155 else
3156 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3157
3158 delete_lwp (event_child);
3159 }
3160 return ptid;
3161 }
3162
3163 /* Returns 1 if GDB is interested in any event_child syscalls. */
3164
3165 static int
3166 gdb_catching_syscalls_p (struct lwp_info *event_child)
3167 {
3168 struct thread_info *thread = get_lwp_thread (event_child);
3169 struct process_info *proc = get_thread_process (thread);
3170
3171 return !VEC_empty (int, proc->syscalls_to_catch);
3172 }
3173
3174 /* Returns 1 if GDB is interested in the event_child syscall.
3175 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3176
3177 static int
3178 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3179 {
3180 int i, iter;
3181 int sysno;
3182 struct thread_info *thread = get_lwp_thread (event_child);
3183 struct process_info *proc = get_thread_process (thread);
3184
3185 if (VEC_empty (int, proc->syscalls_to_catch))
3186 return 0;
3187
3188 if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
3189 return 1;
3190
3191 get_syscall_trapinfo (event_child, &sysno);
3192 for (i = 0;
3193 VEC_iterate (int, proc->syscalls_to_catch, i, iter);
3194 i++)
3195 if (iter == sysno)
3196 return 1;
3197
3198 return 0;
3199 }
3200
3201 /* Wait for process, returns status. */
3202
3203 static ptid_t
3204 linux_wait_1 (ptid_t ptid,
3205 struct target_waitstatus *ourstatus, int target_options)
3206 {
3207 int w;
3208 struct lwp_info *event_child;
3209 int options;
3210 int pid;
3211 int step_over_finished;
3212 int bp_explains_trap;
3213 int maybe_internal_trap;
3214 int report_to_gdb;
3215 int trace_event;
3216 int in_step_range;
3217 int any_resumed;
3218
3219 if (debug_threads)
3220 {
3221 debug_enter ();
3222 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3223 }
3224
3225 /* Translate generic target options into linux options. */
3226 options = __WALL;
3227 if (target_options & TARGET_WNOHANG)
3228 options |= WNOHANG;
3229
3230 bp_explains_trap = 0;
3231 trace_event = 0;
3232 in_step_range = 0;
3233 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3234
3235 /* Find a resumed LWP, if any. */
3236 if (find_inferior (&all_threads,
3237 status_pending_p_callback,
3238 &minus_one_ptid) != NULL)
3239 any_resumed = 1;
3240 else if ((find_inferior (&all_threads,
3241 not_stopped_callback,
3242 &minus_one_ptid) != NULL))
3243 any_resumed = 1;
3244 else
3245 any_resumed = 0;
3246
3247 if (ptid_equal (step_over_bkpt, null_ptid))
3248 pid = linux_wait_for_event (ptid, &w, options);
3249 else
3250 {
3251 if (debug_threads)
3252 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3253 target_pid_to_str (step_over_bkpt));
3254 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3255 }
3256
3257 if (pid == 0 || (pid == -1 && !any_resumed))
3258 {
3259 gdb_assert (target_options & TARGET_WNOHANG);
3260
3261 if (debug_threads)
3262 {
3263 debug_printf ("linux_wait_1 ret = null_ptid, "
3264 "TARGET_WAITKIND_IGNORE\n");
3265 debug_exit ();
3266 }
3267
3268 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3269 return null_ptid;
3270 }
3271 else if (pid == -1)
3272 {
3273 if (debug_threads)
3274 {
3275 debug_printf ("linux_wait_1 ret = null_ptid, "
3276 "TARGET_WAITKIND_NO_RESUMED\n");
3277 debug_exit ();
3278 }
3279
3280 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3281 return null_ptid;
3282 }
3283
3284 event_child = get_thread_lwp (current_thread);
3285
3286 /* linux_wait_for_event only returns an exit status for the last
3287 child of a process. Report it. */
3288 if (WIFEXITED (w) || WIFSIGNALED (w))
3289 {
3290 if (WIFEXITED (w))
3291 {
3292 ourstatus->kind = TARGET_WAITKIND_EXITED;
3293 ourstatus->value.integer = WEXITSTATUS (w);
3294
3295 if (debug_threads)
3296 {
3297 debug_printf ("linux_wait_1 ret = %s, exited with "
3298 "retcode %d\n",
3299 target_pid_to_str (ptid_of (current_thread)),
3300 WEXITSTATUS (w));
3301 debug_exit ();
3302 }
3303 }
3304 else
3305 {
3306 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3307 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3308
3309 if (debug_threads)
3310 {
3311 debug_printf ("linux_wait_1 ret = %s, terminated with "
3312 "signal %d\n",
3313 target_pid_to_str (ptid_of (current_thread)),
3314 WTERMSIG (w));
3315 debug_exit ();
3316 }
3317 }
3318
3319 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3320 return filter_exit_event (event_child, ourstatus);
3321
3322 return ptid_of (current_thread);
3323 }
3324
3325 /* If step-over executes a breakpoint instruction, in the case of a
3326 hardware single step it means a gdb/gdbserver breakpoint had been
3327 planted on top of a permanent breakpoint, in the case of a software
3328 single step it may just mean that gdbserver hit the reinsert breakpoint.
3329 The PC has been adjusted by save_stop_reason to point at
3330 the breakpoint address.
3331 So in the case of the hardware single step advance the PC manually
3332 past the breakpoint and in the case of software single step advance only
3333 if it's not the reinsert_breakpoint we are hitting.
3334 This avoids that a program would keep trapping a permanent breakpoint
3335 forever. */
3336 if (!ptid_equal (step_over_bkpt, null_ptid)
3337 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3338 && (event_child->stepping
3339 || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
3340 {
3341 int increment_pc = 0;
3342 int breakpoint_kind = 0;
3343 CORE_ADDR stop_pc = event_child->stop_pc;
3344
3345 breakpoint_kind =
3346 the_target->breakpoint_kind_from_current_state (&stop_pc);
3347 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3348
3349 if (debug_threads)
3350 {
3351 debug_printf ("step-over for %s executed software breakpoint\n",
3352 target_pid_to_str (ptid_of (current_thread)));
3353 }
3354
3355 if (increment_pc != 0)
3356 {
3357 struct regcache *regcache
3358 = get_thread_regcache (current_thread, 1);
3359
3360 event_child->stop_pc += increment_pc;
3361 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3362
3363 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3364 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3365 }
3366 }
3367
3368 /* If this event was not handled before, and is not a SIGTRAP, we
3369 report it. SIGILL and SIGSEGV are also treated as traps in case
3370 a breakpoint is inserted at the current PC. If this target does
3371 not support internal breakpoints at all, we also report the
3372 SIGTRAP without further processing; it's of no concern to us. */
3373 maybe_internal_trap
3374 = (supports_breakpoints ()
3375 && (WSTOPSIG (w) == SIGTRAP
3376 || ((WSTOPSIG (w) == SIGILL
3377 || WSTOPSIG (w) == SIGSEGV)
3378 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3379
3380 if (maybe_internal_trap)
3381 {
3382 /* Handle anything that requires bookkeeping before deciding to
3383 report the event or continue waiting. */
3384
3385 /* First check if we can explain the SIGTRAP with an internal
3386 breakpoint, or if we should possibly report the event to GDB.
3387 Do this before anything that may remove or insert a
3388 breakpoint. */
3389 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3390
3391 /* We have a SIGTRAP, possibly a step-over dance has just
3392 finished. If so, tweak the state machine accordingly,
3393 reinsert breakpoints and delete any reinsert (software
3394 single-step) breakpoints. */
3395 step_over_finished = finish_step_over (event_child);
3396
3397 /* Now invoke the callbacks of any internal breakpoints there. */
3398 check_breakpoints (event_child->stop_pc);
3399
3400 /* Handle tracepoint data collecting. This may overflow the
3401 trace buffer, and cause a tracing stop, removing
3402 breakpoints. */
3403 trace_event = handle_tracepoints (event_child);
3404
3405 if (bp_explains_trap)
3406 {
3407 if (debug_threads)
3408 debug_printf ("Hit a gdbserver breakpoint.\n");
3409 }
3410 }
3411 else
3412 {
3413 /* We have some other signal, possibly a step-over dance was in
3414 progress, and it should be cancelled too. */
3415 step_over_finished = finish_step_over (event_child);
3416 }
3417
3418 /* We have all the data we need. Either report the event to GDB, or
3419 resume threads and keep waiting for more. */
3420
3421 /* If we're collecting a fast tracepoint, finish the collection and
3422 move out of the jump pad before delivering a signal. See
3423 linux_stabilize_threads. */
3424
3425 if (WIFSTOPPED (w)
3426 && WSTOPSIG (w) != SIGTRAP
3427 && supports_fast_tracepoints ()
3428 && agent_loaded_p ())
3429 {
3430 if (debug_threads)
3431 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3432 "to defer or adjust it.\n",
3433 WSTOPSIG (w), lwpid_of (current_thread));
3434
3435 /* Allow debugging the jump pad itself. */
3436 if (current_thread->last_resume_kind != resume_step
3437 && maybe_move_out_of_jump_pad (event_child, &w))
3438 {
3439 enqueue_one_deferred_signal (event_child, &w);
3440
3441 if (debug_threads)
3442 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3443 WSTOPSIG (w), lwpid_of (current_thread));
3444
3445 linux_resume_one_lwp (event_child, 0, 0, NULL);
3446
3447 return ignore_event (ourstatus);
3448 }
3449 }
3450
3451 if (event_child->collecting_fast_tracepoint)
3452 {
3453 if (debug_threads)
3454 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3455 "Check if we're already there.\n",
3456 lwpid_of (current_thread),
3457 event_child->collecting_fast_tracepoint);
3458
3459 trace_event = 1;
3460
3461 event_child->collecting_fast_tracepoint
3462 = linux_fast_tracepoint_collecting (event_child, NULL);
3463
3464 if (event_child->collecting_fast_tracepoint != 1)
3465 {
3466 /* No longer need this breakpoint. */
3467 if (event_child->exit_jump_pad_bkpt != NULL)
3468 {
3469 if (debug_threads)
3470 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3471 "stopping all threads momentarily.\n");
3472
3473 /* Other running threads could hit this breakpoint.
3474 We don't handle moribund locations like GDB does,
3475 instead we always pause all threads when removing
3476 breakpoints, so that any step-over or
3477 decr_pc_after_break adjustment is always taken
3478 care of while the breakpoint is still
3479 inserted. */
3480 stop_all_lwps (1, event_child);
3481
3482 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3483 event_child->exit_jump_pad_bkpt = NULL;
3484
3485 unstop_all_lwps (1, event_child);
3486
3487 gdb_assert (event_child->suspended >= 0);
3488 }
3489 }
3490
3491 if (event_child->collecting_fast_tracepoint == 0)
3492 {
3493 if (debug_threads)
3494 debug_printf ("fast tracepoint finished "
3495 "collecting successfully.\n");
3496
3497 /* We may have a deferred signal to report. */
3498 if (dequeue_one_deferred_signal (event_child, &w))
3499 {
3500 if (debug_threads)
3501 debug_printf ("dequeued one signal.\n");
3502 }
3503 else
3504 {
3505 if (debug_threads)
3506 debug_printf ("no deferred signals.\n");
3507
3508 if (stabilizing_threads)
3509 {
3510 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3511 ourstatus->value.sig = GDB_SIGNAL_0;
3512
3513 if (debug_threads)
3514 {
3515 debug_printf ("linux_wait_1 ret = %s, stopped "
3516 "while stabilizing threads\n",
3517 target_pid_to_str (ptid_of (current_thread)));
3518 debug_exit ();
3519 }
3520
3521 return ptid_of (current_thread);
3522 }
3523 }
3524 }
3525 }
3526
3527 /* Check whether GDB would be interested in this event. */
3528
3529 /* Check if GDB is interested in this syscall. */
3530 if (WIFSTOPPED (w)
3531 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3532 && !gdb_catch_this_syscall_p (event_child))
3533 {
3534 if (debug_threads)
3535 {
3536 debug_printf ("Ignored syscall for LWP %ld.\n",
3537 lwpid_of (current_thread));
3538 }
3539
3540 linux_resume_one_lwp (event_child, event_child->stepping,
3541 0, NULL);
3542 return ignore_event (ourstatus);
3543 }
3544
3545 /* If GDB is not interested in this signal, don't stop other
3546 threads, and don't report it to GDB. Just resume the inferior
3547 right away. We do this for threading-related signals as well as
3548 any that GDB specifically requested we ignore. But never ignore
3549 SIGSTOP if we sent it ourselves, and do not ignore signals when
3550 stepping - they may require special handling to skip the signal
3551 handler. Also never ignore signals that could be caused by a
3552 breakpoint. */
3553 if (WIFSTOPPED (w)
3554 && current_thread->last_resume_kind != resume_step
3555 && (
3556 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3557 (current_process ()->priv->thread_db != NULL
3558 && (WSTOPSIG (w) == __SIGRTMIN
3559 || WSTOPSIG (w) == __SIGRTMIN + 1))
3560 ||
3561 #endif
3562 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3563 && !(WSTOPSIG (w) == SIGSTOP
3564 && current_thread->last_resume_kind == resume_stop)
3565 && !linux_wstatus_maybe_breakpoint (w))))
3566 {
3567 siginfo_t info, *info_p;
3568
3569 if (debug_threads)
3570 debug_printf ("Ignored signal %d for LWP %ld.\n",
3571 WSTOPSIG (w), lwpid_of (current_thread));
3572
3573 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3574 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3575 info_p = &info;
3576 else
3577 info_p = NULL;
3578
3579 if (step_over_finished)
3580 {
3581 /* We cancelled this thread's step-over above. We still
3582 need to unsuspend all other LWPs, and set them back
3583 running again while the signal handler runs. */
3584 unsuspend_all_lwps (event_child);
3585
3586 /* Enqueue the pending signal info so that proceed_all_lwps
3587 doesn't lose it. */
3588 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3589
3590 proceed_all_lwps ();
3591 }
3592 else
3593 {
3594 linux_resume_one_lwp (event_child, event_child->stepping,
3595 WSTOPSIG (w), info_p);
3596 }
3597 return ignore_event (ourstatus);
3598 }
3599
3600 /* Note that all addresses are always "out of the step range" when
3601 there's no range to begin with. */
3602 in_step_range = lwp_in_step_range (event_child);
3603
3604 /* If GDB wanted this thread to single step, and the thread is out
3605 of the step range, we always want to report the SIGTRAP, and let
3606 GDB handle it. Watchpoints should always be reported. So should
3607 signals we can't explain. A SIGTRAP we can't explain could be a
3608 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3609 do, we're be able to handle GDB breakpoints on top of internal
3610 breakpoints, by handling the internal breakpoint and still
3611 reporting the event to GDB. If we don't, we're out of luck, GDB
3612 won't see the breakpoint hit. If we see a single-step event but
3613 the thread should be continuing, don't pass the trap to gdb.
3614 That indicates that we had previously finished a single-step but
3615 left the single-step pending -- see
3616 complete_ongoing_step_over. */
3617 report_to_gdb = (!maybe_internal_trap
3618 || (current_thread->last_resume_kind == resume_step
3619 && !in_step_range)
3620 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3621 || (!in_step_range
3622 && !bp_explains_trap
3623 && !trace_event
3624 && !step_over_finished
3625 && !(current_thread->last_resume_kind == resume_continue
3626 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3627 || (gdb_breakpoint_here (event_child->stop_pc)
3628 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3629 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3630 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3631
3632 run_breakpoint_commands (event_child->stop_pc);
3633
3634 /* We found no reason GDB would want us to stop. We either hit one
3635 of our own breakpoints, or finished an internal step GDB
3636 shouldn't know about. */
3637 if (!report_to_gdb)
3638 {
3639 if (debug_threads)
3640 {
3641 if (bp_explains_trap)
3642 debug_printf ("Hit a gdbserver breakpoint.\n");
3643 if (step_over_finished)
3644 debug_printf ("Step-over finished.\n");
3645 if (trace_event)
3646 debug_printf ("Tracepoint event.\n");
3647 if (lwp_in_step_range (event_child))
3648 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3649 paddress (event_child->stop_pc),
3650 paddress (event_child->step_range_start),
3651 paddress (event_child->step_range_end));
3652 }
3653
3654 /* We're not reporting this breakpoint to GDB, so apply the
3655 decr_pc_after_break adjustment to the inferior's regcache
3656 ourselves. */
3657
3658 if (the_low_target.set_pc != NULL)
3659 {
3660 struct regcache *regcache
3661 = get_thread_regcache (current_thread, 1);
3662 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3663 }
3664
3665 /* We may have finished stepping over a breakpoint. If so,
3666 we've stopped and suspended all LWPs momentarily except the
3667 stepping one. This is where we resume them all again. We're
3668 going to keep waiting, so use proceed, which handles stepping
3669 over the next breakpoint. */
3670 if (debug_threads)
3671 debug_printf ("proceeding all threads.\n");
3672
3673 if (step_over_finished)
3674 unsuspend_all_lwps (event_child);
3675
3676 proceed_all_lwps ();
3677 return ignore_event (ourstatus);
3678 }
3679
3680 if (debug_threads)
3681 {
3682 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3683 {
3684 char *str;
3685
3686 str = target_waitstatus_to_string (&event_child->waitstatus);
3687 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3688 lwpid_of (get_lwp_thread (event_child)), str);
3689 xfree (str);
3690 }
3691 if (current_thread->last_resume_kind == resume_step)
3692 {
3693 if (event_child->step_range_start == event_child->step_range_end)
3694 debug_printf ("GDB wanted to single-step, reporting event.\n");
3695 else if (!lwp_in_step_range (event_child))
3696 debug_printf ("Out of step range, reporting event.\n");
3697 }
3698 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3699 debug_printf ("Stopped by watchpoint.\n");
3700 else if (gdb_breakpoint_here (event_child->stop_pc))
3701 debug_printf ("Stopped by GDB breakpoint.\n");
3702 if (debug_threads)
3703 debug_printf ("Hit a non-gdbserver trap event.\n");
3704 }
3705
3706 /* Alright, we're going to report a stop. */
3707
3708 /* Remove reinsert breakpoints. */
3709 if (can_software_single_step ())
3710 {
3711 /* Remove reinsert breakpoints or not. It it is true, stop all
3712 lwps, so that other threads won't hit the breakpoint in the
3713 staled memory. */
3714 int remove_reinsert_breakpoints_p = 0;
3715
3716 if (non_stop)
3717 {
3718 remove_reinsert_breakpoints_p
3719 = has_reinsert_breakpoints (current_thread);
3720 }
3721 else
3722 {
3723 /* In all-stop, a stop reply cancels all previous resume
3724 requests. Delete all reinsert breakpoints. */
3725 struct inferior_list_entry *inf, *tmp;
3726
3727 ALL_INFERIORS (&all_threads, inf, tmp)
3728 {
3729 struct thread_info *thread = (struct thread_info *) inf;
3730
3731 if (has_reinsert_breakpoints (thread))
3732 {
3733 remove_reinsert_breakpoints_p = 1;
3734 break;
3735 }
3736 }
3737 }
3738
3739 if (remove_reinsert_breakpoints_p)
3740 {
3741 /* If we remove reinsert breakpoints from memory, stop all lwps,
3742 so that other threads won't hit the breakpoint in the staled
3743 memory. */
3744 stop_all_lwps (0, event_child);
3745
3746 if (non_stop)
3747 {
3748 gdb_assert (has_reinsert_breakpoints (current_thread));
3749 delete_reinsert_breakpoints (current_thread);
3750 }
3751 else
3752 {
3753 struct inferior_list_entry *inf, *tmp;
3754
3755 ALL_INFERIORS (&all_threads, inf, tmp)
3756 {
3757 struct thread_info *thread = (struct thread_info *) inf;
3758
3759 if (has_reinsert_breakpoints (thread))
3760 delete_reinsert_breakpoints (thread);
3761 }
3762 }
3763
3764 unstop_all_lwps (0, event_child);
3765 }
3766 }
3767
3768 if (!stabilizing_threads)
3769 {
3770 /* In all-stop, stop all threads. */
3771 if (!non_stop)
3772 stop_all_lwps (0, NULL);
3773
3774 if (step_over_finished)
3775 {
3776 if (!non_stop)
3777 {
3778 /* If we were doing a step-over, all other threads but
3779 the stepping one had been paused in start_step_over,
3780 with their suspend counts incremented. We don't want
3781 to do a full unstop/unpause, because we're in
3782 all-stop mode (so we want threads stopped), but we
3783 still need to unsuspend the other threads, to
3784 decrement their `suspended' count back. */
3785 unsuspend_all_lwps (event_child);
3786 }
3787 else
3788 {
3789 /* If we just finished a step-over, then all threads had
3790 been momentarily paused. In all-stop, that's fine,
3791 we want threads stopped by now anyway. In non-stop,
3792 we need to re-resume threads that GDB wanted to be
3793 running. */
3794 unstop_all_lwps (1, event_child);
3795 }
3796 }
3797
3798 /* If we're not waiting for a specific LWP, choose an event LWP
3799 from among those that have had events. Giving equal priority
3800 to all LWPs that have had events helps prevent
3801 starvation. */
3802 if (ptid_equal (ptid, minus_one_ptid))
3803 {
3804 event_child->status_pending_p = 1;
3805 event_child->status_pending = w;
3806
3807 select_event_lwp (&event_child);
3808
3809 /* current_thread and event_child must stay in sync. */
3810 current_thread = get_lwp_thread (event_child);
3811
3812 event_child->status_pending_p = 0;
3813 w = event_child->status_pending;
3814 }
3815
3816
3817 /* Stabilize threads (move out of jump pads). */
3818 if (!non_stop)
3819 stabilize_threads ();
3820 }
3821 else
3822 {
3823 /* If we just finished a step-over, then all threads had been
3824 momentarily paused. In all-stop, that's fine, we want
3825 threads stopped by now anyway. In non-stop, we need to
3826 re-resume threads that GDB wanted to be running. */
3827 if (step_over_finished)
3828 unstop_all_lwps (1, event_child);
3829 }
3830
3831 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3832 {
3833 /* If the reported event is an exit, fork, vfork or exec, let
3834 GDB know. */
3835 *ourstatus = event_child->waitstatus;
3836 /* Clear the event lwp's waitstatus since we handled it already. */
3837 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3838 }
3839 else
3840 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3841
3842 /* Now that we've selected our final event LWP, un-adjust its PC if
3843 it was a software breakpoint, and the client doesn't know we can
3844 adjust the breakpoint ourselves. */
3845 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3846 && !swbreak_feature)
3847 {
3848 int decr_pc = the_low_target.decr_pc_after_break;
3849
3850 if (decr_pc != 0)
3851 {
3852 struct regcache *regcache
3853 = get_thread_regcache (current_thread, 1);
3854 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3855 }
3856 }
3857
3858 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3859 {
3860 get_syscall_trapinfo (event_child,
3861 &ourstatus->value.syscall_number);
3862 ourstatus->kind = event_child->syscall_state;
3863 }
3864 else if (current_thread->last_resume_kind == resume_stop
3865 && WSTOPSIG (w) == SIGSTOP)
3866 {
3867 /* A thread that has been requested to stop by GDB with vCont;t,
3868 and it stopped cleanly, so report as SIG0. The use of
3869 SIGSTOP is an implementation detail. */
3870 ourstatus->value.sig = GDB_SIGNAL_0;
3871 }
3872 else if (current_thread->last_resume_kind == resume_stop
3873 && WSTOPSIG (w) != SIGSTOP)
3874 {
3875 /* A thread that has been requested to stop by GDB with vCont;t,
3876 but, it stopped for other reasons. */
3877 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3878 }
3879 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3880 {
3881 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3882 }
3883
3884 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3885
3886 if (debug_threads)
3887 {
3888 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3889 target_pid_to_str (ptid_of (current_thread)),
3890 ourstatus->kind, ourstatus->value.sig);
3891 debug_exit ();
3892 }
3893
3894 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3895 return filter_exit_event (event_child, ourstatus);
3896
3897 return ptid_of (current_thread);
3898 }
3899
3900 /* Get rid of any pending event in the pipe. */
3901 static void
3902 async_file_flush (void)
3903 {
3904 int ret;
3905 char buf;
3906
3907 do
3908 ret = read (linux_event_pipe[0], &buf, 1);
3909 while (ret >= 0 || (ret == -1 && errno == EINTR));
3910 }
3911
3912 /* Put something in the pipe, so the event loop wakes up. */
3913 static void
3914 async_file_mark (void)
3915 {
3916 int ret;
3917
3918 async_file_flush ();
3919
3920 do
3921 ret = write (linux_event_pipe[1], "+", 1);
3922 while (ret == 0 || (ret == -1 && errno == EINTR));
3923
3924 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3925 be awakened anyway. */
3926 }
3927
3928 static ptid_t
3929 linux_wait (ptid_t ptid,
3930 struct target_waitstatus *ourstatus, int target_options)
3931 {
3932 ptid_t event_ptid;
3933
3934 /* Flush the async file first. */
3935 if (target_is_async_p ())
3936 async_file_flush ();
3937
3938 do
3939 {
3940 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3941 }
3942 while ((target_options & TARGET_WNOHANG) == 0
3943 && ptid_equal (event_ptid, null_ptid)
3944 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3945
3946 /* If at least one stop was reported, there may be more. A single
3947 SIGCHLD can signal more than one child stop. */
3948 if (target_is_async_p ()
3949 && (target_options & TARGET_WNOHANG) != 0
3950 && !ptid_equal (event_ptid, null_ptid))
3951 async_file_mark ();
3952
3953 return event_ptid;
3954 }
3955
3956 /* Send a signal to an LWP. */
3957
3958 static int
3959 kill_lwp (unsigned long lwpid, int signo)
3960 {
3961 int ret;
3962
3963 errno = 0;
3964 ret = syscall (__NR_tkill, lwpid, signo);
3965 if (errno == ENOSYS)
3966 {
3967 /* If tkill fails, then we are not using nptl threads, a
3968 configuration we no longer support. */
3969 perror_with_name (("tkill"));
3970 }
3971 return ret;
3972 }
3973
3974 void
3975 linux_stop_lwp (struct lwp_info *lwp)
3976 {
3977 send_sigstop (lwp);
3978 }
3979
3980 static void
3981 send_sigstop (struct lwp_info *lwp)
3982 {
3983 int pid;
3984
3985 pid = lwpid_of (get_lwp_thread (lwp));
3986
3987 /* If we already have a pending stop signal for this process, don't
3988 send another. */
3989 if (lwp->stop_expected)
3990 {
3991 if (debug_threads)
3992 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3993
3994 return;
3995 }
3996
3997 if (debug_threads)
3998 debug_printf ("Sending sigstop to lwp %d\n", pid);
3999
4000 lwp->stop_expected = 1;
4001 kill_lwp (pid, SIGSTOP);
4002 }
4003
4004 static int
4005 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
4006 {
4007 struct thread_info *thread = (struct thread_info *) entry;
4008 struct lwp_info *lwp = get_thread_lwp (thread);
4009
4010 /* Ignore EXCEPT. */
4011 if (lwp == except)
4012 return 0;
4013
4014 if (lwp->stopped)
4015 return 0;
4016
4017 send_sigstop (lwp);
4018 return 0;
4019 }
4020
4021 /* Increment the suspend count of an LWP, and stop it, if not stopped
4022 yet. */
4023 static int
4024 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
4025 void *except)
4026 {
4027 struct thread_info *thread = (struct thread_info *) entry;
4028 struct lwp_info *lwp = get_thread_lwp (thread);
4029
4030 /* Ignore EXCEPT. */
4031 if (lwp == except)
4032 return 0;
4033
4034 lwp_suspended_inc (lwp);
4035
4036 return send_sigstop_callback (entry, except);
4037 }
4038
4039 static void
4040 mark_lwp_dead (struct lwp_info *lwp, int wstat)
4041 {
4042 /* Store the exit status for later. */
4043 lwp->status_pending_p = 1;
4044 lwp->status_pending = wstat;
4045
4046 /* Store in waitstatus as well, as there's nothing else to process
4047 for this event. */
4048 if (WIFEXITED (wstat))
4049 {
4050 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
4051 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
4052 }
4053 else if (WIFSIGNALED (wstat))
4054 {
4055 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
4056 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
4057 }
4058
4059 /* Prevent trying to stop it. */
4060 lwp->stopped = 1;
4061
4062 /* No further stops are expected from a dead lwp. */
4063 lwp->stop_expected = 0;
4064 }
4065
4066 /* Return true if LWP has exited already, and has a pending exit event
4067 to report to GDB. */
4068
4069 static int
4070 lwp_is_marked_dead (struct lwp_info *lwp)
4071 {
4072 return (lwp->status_pending_p
4073 && (WIFEXITED (lwp->status_pending)
4074 || WIFSIGNALED (lwp->status_pending)));
4075 }
4076
4077 /* Wait for all children to stop for the SIGSTOPs we just queued. */
4078
4079 static void
4080 wait_for_sigstop (void)
4081 {
4082 struct thread_info *saved_thread;
4083 ptid_t saved_tid;
4084 int wstat;
4085 int ret;
4086
4087 saved_thread = current_thread;
4088 if (saved_thread != NULL)
4089 saved_tid = saved_thread->entry.id;
4090 else
4091 saved_tid = null_ptid; /* avoid bogus unused warning */
4092
4093 if (debug_threads)
4094 debug_printf ("wait_for_sigstop: pulling events\n");
4095
4096 /* Passing NULL_PTID as filter indicates we want all events to be
4097 left pending. Eventually this returns when there are no
4098 unwaited-for children left. */
4099 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4100 &wstat, __WALL);
4101 gdb_assert (ret == -1);
4102
4103 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4104 current_thread = saved_thread;
4105 else
4106 {
4107 if (debug_threads)
4108 debug_printf ("Previously current thread died.\n");
4109
4110 /* We can't change the current inferior behind GDB's back,
4111 otherwise, a subsequent command may apply to the wrong
4112 process. */
4113 current_thread = NULL;
4114 }
4115 }
4116
4117 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
4118 move it out, because we need to report the stop event to GDB. For
4119 example, if the user puts a breakpoint in the jump pad, it's
4120 because she wants to debug it. */
4121
4122 static int
4123 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
4124 {
4125 struct thread_info *thread = (struct thread_info *) entry;
4126 struct lwp_info *lwp = get_thread_lwp (thread);
4127
4128 if (lwp->suspended != 0)
4129 {
4130 internal_error (__FILE__, __LINE__,
4131 "LWP %ld is suspended, suspended=%d\n",
4132 lwpid_of (thread), lwp->suspended);
4133 }
4134 gdb_assert (lwp->stopped);
4135
4136 /* Allow debugging the jump pad, gdb_collect, etc.. */
4137 return (supports_fast_tracepoints ()
4138 && agent_loaded_p ()
4139 && (gdb_breakpoint_here (lwp->stop_pc)
4140 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4141 || thread->last_resume_kind == resume_step)
4142 && linux_fast_tracepoint_collecting (lwp, NULL));
4143 }
4144
4145 static void
4146 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
4147 {
4148 struct thread_info *thread = (struct thread_info *) entry;
4149 struct thread_info *saved_thread;
4150 struct lwp_info *lwp = get_thread_lwp (thread);
4151 int *wstat;
4152
4153 if (lwp->suspended != 0)
4154 {
4155 internal_error (__FILE__, __LINE__,
4156 "LWP %ld is suspended, suspended=%d\n",
4157 lwpid_of (thread), lwp->suspended);
4158 }
4159 gdb_assert (lwp->stopped);
4160
4161 /* For gdb_breakpoint_here. */
4162 saved_thread = current_thread;
4163 current_thread = thread;
4164
4165 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4166
4167 /* Allow debugging the jump pad, gdb_collect, etc. */
4168 if (!gdb_breakpoint_here (lwp->stop_pc)
4169 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4170 && thread->last_resume_kind != resume_step
4171 && maybe_move_out_of_jump_pad (lwp, wstat))
4172 {
4173 if (debug_threads)
4174 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4175 lwpid_of (thread));
4176
4177 if (wstat)
4178 {
4179 lwp->status_pending_p = 0;
4180 enqueue_one_deferred_signal (lwp, wstat);
4181
4182 if (debug_threads)
4183 debug_printf ("Signal %d for LWP %ld deferred "
4184 "(in jump pad)\n",
4185 WSTOPSIG (*wstat), lwpid_of (thread));
4186 }
4187
4188 linux_resume_one_lwp (lwp, 0, 0, NULL);
4189 }
4190 else
4191 lwp_suspended_inc (lwp);
4192
4193 current_thread = saved_thread;
4194 }
4195
4196 static int
4197 lwp_running (struct inferior_list_entry *entry, void *data)
4198 {
4199 struct thread_info *thread = (struct thread_info *) entry;
4200 struct lwp_info *lwp = get_thread_lwp (thread);
4201
4202 if (lwp_is_marked_dead (lwp))
4203 return 0;
4204 if (lwp->stopped)
4205 return 0;
4206 return 1;
4207 }
4208
4209 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4210 If SUSPEND, then also increase the suspend count of every LWP,
4211 except EXCEPT. */
4212
4213 static void
4214 stop_all_lwps (int suspend, struct lwp_info *except)
4215 {
4216 /* Should not be called recursively. */
4217 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4218
4219 if (debug_threads)
4220 {
4221 debug_enter ();
4222 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4223 suspend ? "stop-and-suspend" : "stop",
4224 except != NULL
4225 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4226 : "none");
4227 }
4228
4229 stopping_threads = (suspend
4230 ? STOPPING_AND_SUSPENDING_THREADS
4231 : STOPPING_THREADS);
4232
4233 if (suspend)
4234 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4235 else
4236 find_inferior (&all_threads, send_sigstop_callback, except);
4237 wait_for_sigstop ();
4238 stopping_threads = NOT_STOPPING_THREADS;
4239
4240 if (debug_threads)
4241 {
4242 debug_printf ("stop_all_lwps done, setting stopping_threads "
4243 "back to !stopping\n");
4244 debug_exit ();
4245 }
4246 }
4247
4248 /* Enqueue one signal in the chain of signals which need to be
4249 delivered to this process on next resume. */
4250
4251 static void
4252 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4253 {
4254 struct pending_signals *p_sig = XNEW (struct pending_signals);
4255
4256 p_sig->prev = lwp->pending_signals;
4257 p_sig->signal = signal;
4258 if (info == NULL)
4259 memset (&p_sig->info, 0, sizeof (siginfo_t));
4260 else
4261 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4262 lwp->pending_signals = p_sig;
4263 }
4264
4265 /* Install breakpoints for software single stepping. */
4266
4267 static void
4268 install_software_single_step_breakpoints (struct lwp_info *lwp)
4269 {
4270 int i;
4271 CORE_ADDR pc;
4272 struct thread_info *thread = get_lwp_thread (lwp);
4273 struct regcache *regcache = get_thread_regcache (thread, 1);
4274 VEC (CORE_ADDR) *next_pcs = NULL;
4275 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
4276
4277 make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
4278
4279 current_thread = thread;
4280 next_pcs = (*the_low_target.get_next_pcs) (regcache);
4281
4282 for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
4283 set_reinsert_breakpoint (pc, current_ptid);
4284
4285 do_cleanups (old_chain);
4286 }
4287
4288 /* Single step via hardware or software single step.
4289 Return 1 if hardware single stepping, 0 if software single stepping
4290 or can't single step. */
4291
4292 static int
4293 single_step (struct lwp_info* lwp)
4294 {
4295 int step = 0;
4296
4297 if (can_hardware_single_step ())
4298 {
4299 step = 1;
4300 }
4301 else if (can_software_single_step ())
4302 {
4303 install_software_single_step_breakpoints (lwp);
4304 step = 0;
4305 }
4306 else
4307 {
4308 if (debug_threads)
4309 debug_printf ("stepping is not implemented on this target");
4310 }
4311
4312 return step;
4313 }
4314
4315 /* The signal can be delivered to the inferior if we are not trying to
4316 finish a fast tracepoint collect. Since signal can be delivered in
4317 the step-over, the program may go to signal handler and trap again
4318 after return from the signal handler. We can live with the spurious
4319 double traps. */
4320
4321 static int
4322 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4323 {
4324 return !lwp->collecting_fast_tracepoint;
4325 }
4326
4327 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4328 SIGNAL is nonzero, give it that signal. */
4329
4330 static void
4331 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4332 int step, int signal, siginfo_t *info)
4333 {
4334 struct thread_info *thread = get_lwp_thread (lwp);
4335 struct thread_info *saved_thread;
4336 int fast_tp_collecting;
4337 int ptrace_request;
4338 struct process_info *proc = get_thread_process (thread);
4339
4340 /* Note that target description may not be initialised
4341 (proc->tdesc == NULL) at this point because the program hasn't
4342 stopped at the first instruction yet. It means GDBserver skips
4343 the extra traps from the wrapper program (see option --wrapper).
4344 Code in this function that requires register access should be
4345 guarded by proc->tdesc == NULL or something else. */
4346
4347 if (lwp->stopped == 0)
4348 return;
4349
4350 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4351
4352 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4353
4354 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4355
4356 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4357 user used the "jump" command, or "set $pc = foo"). */
4358 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4359 {
4360 /* Collecting 'while-stepping' actions doesn't make sense
4361 anymore. */
4362 release_while_stepping_state_list (thread);
4363 }
4364
4365 /* If we have pending signals or status, and a new signal, enqueue the
4366 signal. Also enqueue the signal if it can't be delivered to the
4367 inferior right now. */
4368 if (signal != 0
4369 && (lwp->status_pending_p
4370 || lwp->pending_signals != NULL
4371 || !lwp_signal_can_be_delivered (lwp)))
4372 {
4373 enqueue_pending_signal (lwp, signal, info);
4374
4375 /* Postpone any pending signal. It was enqueued above. */
4376 signal = 0;
4377 }
4378
4379 if (lwp->status_pending_p)
4380 {
4381 if (debug_threads)
4382 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4383 " has pending status\n",
4384 lwpid_of (thread), step ? "step" : "continue",
4385 lwp->stop_expected ? "expected" : "not expected");
4386 return;
4387 }
4388
4389 saved_thread = current_thread;
4390 current_thread = thread;
4391
4392 /* This bit needs some thinking about. If we get a signal that
4393 we must report while a single-step reinsert is still pending,
4394 we often end up resuming the thread. It might be better to
4395 (ew) allow a stack of pending events; then we could be sure that
4396 the reinsert happened right away and not lose any signals.
4397
4398 Making this stack would also shrink the window in which breakpoints are
4399 uninserted (see comment in linux_wait_for_lwp) but not enough for
4400 complete correctness, so it won't solve that problem. It may be
4401 worthwhile just to solve this one, however. */
4402 if (lwp->bp_reinsert != 0)
4403 {
4404 if (debug_threads)
4405 debug_printf (" pending reinsert at 0x%s\n",
4406 paddress (lwp->bp_reinsert));
4407
4408 if (can_hardware_single_step ())
4409 {
4410 if (fast_tp_collecting == 0)
4411 {
4412 if (step == 0)
4413 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4414 if (lwp->suspended)
4415 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4416 lwp->suspended);
4417 }
4418 }
4419
4420 step = maybe_hw_step (thread);
4421 }
4422
4423 if (fast_tp_collecting == 1)
4424 {
4425 if (debug_threads)
4426 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4427 " (exit-jump-pad-bkpt)\n",
4428 lwpid_of (thread));
4429 }
4430 else if (fast_tp_collecting == 2)
4431 {
4432 if (debug_threads)
4433 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4434 " single-stepping\n",
4435 lwpid_of (thread));
4436
4437 if (can_hardware_single_step ())
4438 step = 1;
4439 else
4440 {
4441 internal_error (__FILE__, __LINE__,
4442 "moving out of jump pad single-stepping"
4443 " not implemented on this target");
4444 }
4445 }
4446
4447 /* If we have while-stepping actions in this thread set it stepping.
4448 If we have a signal to deliver, it may or may not be set to
4449 SIG_IGN, we don't know. Assume so, and allow collecting
4450 while-stepping into a signal handler. A possible smart thing to
4451 do would be to set an internal breakpoint at the signal return
4452 address, continue, and carry on catching this while-stepping
4453 action only when that breakpoint is hit. A future
4454 enhancement. */
4455 if (thread->while_stepping != NULL)
4456 {
4457 if (debug_threads)
4458 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4459 lwpid_of (thread));
4460
4461 step = single_step (lwp);
4462 }
4463
4464 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4465 {
4466 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4467
4468 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4469
4470 if (debug_threads)
4471 {
4472 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4473 (long) lwp->stop_pc);
4474 }
4475 }
4476
4477 /* If we have pending signals, consume one if it can be delivered to
4478 the inferior. */
4479 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4480 {
4481 struct pending_signals **p_sig;
4482
4483 p_sig = &lwp->pending_signals;
4484 while ((*p_sig)->prev != NULL)
4485 p_sig = &(*p_sig)->prev;
4486
4487 signal = (*p_sig)->signal;
4488 if ((*p_sig)->info.si_signo != 0)
4489 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4490 &(*p_sig)->info);
4491
4492 free (*p_sig);
4493 *p_sig = NULL;
4494 }
4495
4496 if (debug_threads)
4497 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4498 lwpid_of (thread), step ? "step" : "continue", signal,
4499 lwp->stop_expected ? "expected" : "not expected");
4500
4501 if (the_low_target.prepare_to_resume != NULL)
4502 the_low_target.prepare_to_resume (lwp);
4503
4504 regcache_invalidate_thread (thread);
4505 errno = 0;
4506 lwp->stepping = step;
4507 if (step)
4508 ptrace_request = PTRACE_SINGLESTEP;
4509 else if (gdb_catching_syscalls_p (lwp))
4510 ptrace_request = PTRACE_SYSCALL;
4511 else
4512 ptrace_request = PTRACE_CONT;
4513 ptrace (ptrace_request,
4514 lwpid_of (thread),
4515 (PTRACE_TYPE_ARG3) 0,
4516 /* Coerce to a uintptr_t first to avoid potential gcc warning
4517 of coercing an 8 byte integer to a 4 byte pointer. */
4518 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4519
4520 current_thread = saved_thread;
4521 if (errno)
4522 perror_with_name ("resuming thread");
4523
4524 /* Successfully resumed. Clear state that no longer makes sense,
4525 and mark the LWP as running. Must not do this before resuming
4526 otherwise if that fails other code will be confused. E.g., we'd
4527 later try to stop the LWP and hang forever waiting for a stop
4528 status. Note that we must not throw after this is cleared,
4529 otherwise handle_zombie_lwp_error would get confused. */
4530 lwp->stopped = 0;
4531 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4532 }
4533
4534 /* Called when we try to resume a stopped LWP and that errors out. If
4535 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4536 or about to become), discard the error, clear any pending status
4537 the LWP may have, and return true (we'll collect the exit status
4538 soon enough). Otherwise, return false. */
4539
4540 static int
4541 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4542 {
4543 struct thread_info *thread = get_lwp_thread (lp);
4544
4545 /* If we get an error after resuming the LWP successfully, we'd
4546 confuse !T state for the LWP being gone. */
4547 gdb_assert (lp->stopped);
4548
4549 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4550 because even if ptrace failed with ESRCH, the tracee may be "not
4551 yet fully dead", but already refusing ptrace requests. In that
4552 case the tracee has 'R (Running)' state for a little bit
4553 (observed in Linux 3.18). See also the note on ESRCH in the
4554 ptrace(2) man page. Instead, check whether the LWP has any state
4555 other than ptrace-stopped. */
4556
4557 /* Don't assume anything if /proc/PID/status can't be read. */
4558 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4559 {
4560 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4561 lp->status_pending_p = 0;
4562 return 1;
4563 }
4564 return 0;
4565 }
4566
4567 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4568 disappears while we try to resume it. */
4569
4570 static void
4571 linux_resume_one_lwp (struct lwp_info *lwp,
4572 int step, int signal, siginfo_t *info)
4573 {
4574 TRY
4575 {
4576 linux_resume_one_lwp_throw (lwp, step, signal, info);
4577 }
4578 CATCH (ex, RETURN_MASK_ERROR)
4579 {
4580 if (!check_ptrace_stopped_lwp_gone (lwp))
4581 throw_exception (ex);
4582 }
4583 END_CATCH
4584 }
4585
4586 struct thread_resume_array
4587 {
4588 struct thread_resume *resume;
4589 size_t n;
4590 };
4591
4592 /* This function is called once per thread via find_inferior.
4593 ARG is a pointer to a thread_resume_array struct.
4594 We look up the thread specified by ENTRY in ARG, and mark the thread
4595 with a pointer to the appropriate resume request.
4596
4597 This algorithm is O(threads * resume elements), but resume elements
4598 is small (and will remain small at least until GDB supports thread
4599 suspension). */
4600
4601 static int
4602 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4603 {
4604 struct thread_info *thread = (struct thread_info *) entry;
4605 struct lwp_info *lwp = get_thread_lwp (thread);
4606 int ndx;
4607 struct thread_resume_array *r;
4608
4609 r = (struct thread_resume_array *) arg;
4610
4611 for (ndx = 0; ndx < r->n; ndx++)
4612 {
4613 ptid_t ptid = r->resume[ndx].thread;
4614 if (ptid_equal (ptid, minus_one_ptid)
4615 || ptid_equal (ptid, entry->id)
4616 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4617 of PID'. */
4618 || (ptid_get_pid (ptid) == pid_of (thread)
4619 && (ptid_is_pid (ptid)
4620 || ptid_get_lwp (ptid) == -1)))
4621 {
4622 if (r->resume[ndx].kind == resume_stop
4623 && thread->last_resume_kind == resume_stop)
4624 {
4625 if (debug_threads)
4626 debug_printf ("already %s LWP %ld at GDB's request\n",
4627 (thread->last_status.kind
4628 == TARGET_WAITKIND_STOPPED)
4629 ? "stopped"
4630 : "stopping",
4631 lwpid_of (thread));
4632
4633 continue;
4634 }
4635
4636 lwp->resume = &r->resume[ndx];
4637 thread->last_resume_kind = lwp->resume->kind;
4638
4639 lwp->step_range_start = lwp->resume->step_range_start;
4640 lwp->step_range_end = lwp->resume->step_range_end;
4641
4642 /* If we had a deferred signal to report, dequeue one now.
4643 This can happen if LWP gets more than one signal while
4644 trying to get out of a jump pad. */
4645 if (lwp->stopped
4646 && !lwp->status_pending_p
4647 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4648 {
4649 lwp->status_pending_p = 1;
4650
4651 if (debug_threads)
4652 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4653 "leaving status pending.\n",
4654 WSTOPSIG (lwp->status_pending),
4655 lwpid_of (thread));
4656 }
4657
4658 return 0;
4659 }
4660 }
4661
4662 /* No resume action for this thread. */
4663 lwp->resume = NULL;
4664
4665 return 0;
4666 }
4667
4668 /* find_inferior callback for linux_resume.
4669 Set *FLAG_P if this lwp has an interesting status pending. */
4670
4671 static int
4672 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4673 {
4674 struct thread_info *thread = (struct thread_info *) entry;
4675 struct lwp_info *lwp = get_thread_lwp (thread);
4676
4677 /* LWPs which will not be resumed are not interesting, because
4678 we might not wait for them next time through linux_wait. */
4679 if (lwp->resume == NULL)
4680 return 0;
4681
4682 if (thread_still_has_status_pending_p (thread))
4683 * (int *) flag_p = 1;
4684
4685 return 0;
4686 }
4687
4688 /* Return 1 if this lwp that GDB wants running is stopped at an
4689 internal breakpoint that we need to step over. It assumes that any
4690 required STOP_PC adjustment has already been propagated to the
4691 inferior's regcache. */
4692
4693 static int
4694 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4695 {
4696 struct thread_info *thread = (struct thread_info *) entry;
4697 struct lwp_info *lwp = get_thread_lwp (thread);
4698 struct thread_info *saved_thread;
4699 CORE_ADDR pc;
4700 struct process_info *proc = get_thread_process (thread);
4701
4702 /* GDBserver is skipping the extra traps from the wrapper program,
4703 don't have to do step over. */
4704 if (proc->tdesc == NULL)
4705 return 0;
4706
4707 /* LWPs which will not be resumed are not interesting, because we
4708 might not wait for them next time through linux_wait. */
4709
4710 if (!lwp->stopped)
4711 {
4712 if (debug_threads)
4713 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4714 lwpid_of (thread));
4715 return 0;
4716 }
4717
4718 if (thread->last_resume_kind == resume_stop)
4719 {
4720 if (debug_threads)
4721 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4722 " stopped\n",
4723 lwpid_of (thread));
4724 return 0;
4725 }
4726
4727 gdb_assert (lwp->suspended >= 0);
4728
4729 if (lwp->suspended)
4730 {
4731 if (debug_threads)
4732 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4733 lwpid_of (thread));
4734 return 0;
4735 }
4736
4737 if (lwp->status_pending_p)
4738 {
4739 if (debug_threads)
4740 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4741 " status.\n",
4742 lwpid_of (thread));
4743 return 0;
4744 }
4745
4746 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4747 or we have. */
4748 pc = get_pc (lwp);
4749
4750 /* If the PC has changed since we stopped, then don't do anything,
4751 and let the breakpoint/tracepoint be hit. This happens if, for
4752 instance, GDB handled the decr_pc_after_break subtraction itself,
4753 GDB is OOL stepping this thread, or the user has issued a "jump"
4754 command, or poked thread's registers herself. */
4755 if (pc != lwp->stop_pc)
4756 {
4757 if (debug_threads)
4758 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4759 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4760 lwpid_of (thread),
4761 paddress (lwp->stop_pc), paddress (pc));
4762 return 0;
4763 }
4764
4765 /* On software single step target, resume the inferior with signal
4766 rather than stepping over. */
4767 if (can_software_single_step ()
4768 && lwp->pending_signals != NULL
4769 && lwp_signal_can_be_delivered (lwp))
4770 {
4771 if (debug_threads)
4772 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4773 " signals.\n",
4774 lwpid_of (thread));
4775
4776 return 0;
4777 }
4778
4779 saved_thread = current_thread;
4780 current_thread = thread;
4781
4782 /* We can only step over breakpoints we know about. */
4783 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4784 {
4785 /* Don't step over a breakpoint that GDB expects to hit
4786 though. If the condition is being evaluated on the target's side
4787 and it evaluate to false, step over this breakpoint as well. */
4788 if (gdb_breakpoint_here (pc)
4789 && gdb_condition_true_at_breakpoint (pc)
4790 && gdb_no_commands_at_breakpoint (pc))
4791 {
4792 if (debug_threads)
4793 debug_printf ("Need step over [LWP %ld]? yes, but found"
4794 " GDB breakpoint at 0x%s; skipping step over\n",
4795 lwpid_of (thread), paddress (pc));
4796
4797 current_thread = saved_thread;
4798 return 0;
4799 }
4800 else
4801 {
4802 if (debug_threads)
4803 debug_printf ("Need step over [LWP %ld]? yes, "
4804 "found breakpoint at 0x%s\n",
4805 lwpid_of (thread), paddress (pc));
4806
4807 /* We've found an lwp that needs stepping over --- return 1 so
4808 that find_inferior stops looking. */
4809 current_thread = saved_thread;
4810
4811 return 1;
4812 }
4813 }
4814
4815 current_thread = saved_thread;
4816
4817 if (debug_threads)
4818 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4819 " at 0x%s\n",
4820 lwpid_of (thread), paddress (pc));
4821
4822 return 0;
4823 }
4824
4825 /* Start a step-over operation on LWP. When LWP stopped at a
4826 breakpoint, to make progress, we need to remove the breakpoint out
4827 of the way. If we let other threads run while we do that, they may
4828 pass by the breakpoint location and miss hitting it. To avoid
4829 that, a step-over momentarily stops all threads while LWP is
4830 single-stepped by either hardware or software while the breakpoint
4831 is temporarily uninserted from the inferior. When the single-step
4832 finishes, we reinsert the breakpoint, and let all threads that are
4833 supposed to be running, run again. */
4834
4835 static int
4836 start_step_over (struct lwp_info *lwp)
4837 {
4838 struct thread_info *thread = get_lwp_thread (lwp);
4839 struct thread_info *saved_thread;
4840 CORE_ADDR pc;
4841 int step;
4842
4843 if (debug_threads)
4844 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4845 lwpid_of (thread));
4846
4847 stop_all_lwps (1, lwp);
4848
4849 if (lwp->suspended != 0)
4850 {
4851 internal_error (__FILE__, __LINE__,
4852 "LWP %ld suspended=%d\n", lwpid_of (thread),
4853 lwp->suspended);
4854 }
4855
4856 if (debug_threads)
4857 debug_printf ("Done stopping all threads for step-over.\n");
4858
4859 /* Note, we should always reach here with an already adjusted PC,
4860 either by GDB (if we're resuming due to GDB's request), or by our
4861 caller, if we just finished handling an internal breakpoint GDB
4862 shouldn't care about. */
4863 pc = get_pc (lwp);
4864
4865 saved_thread = current_thread;
4866 current_thread = thread;
4867
4868 lwp->bp_reinsert = pc;
4869 uninsert_breakpoints_at (pc);
4870 uninsert_fast_tracepoint_jumps_at (pc);
4871
4872 step = single_step (lwp);
4873
4874 current_thread = saved_thread;
4875
4876 linux_resume_one_lwp (lwp, step, 0, NULL);
4877
4878 /* Require next event from this LWP. */
4879 step_over_bkpt = thread->entry.id;
4880 return 1;
4881 }
4882
4883 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4884 start_step_over, if still there, and delete any reinsert
4885 breakpoints we've set, on non hardware single-step targets. */
4886
4887 static int
4888 finish_step_over (struct lwp_info *lwp)
4889 {
4890 if (lwp->bp_reinsert != 0)
4891 {
4892 struct thread_info *saved_thread = current_thread;
4893
4894 if (debug_threads)
4895 debug_printf ("Finished step over.\n");
4896
4897 current_thread = get_lwp_thread (lwp);
4898
4899 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4900 may be no breakpoint to reinsert there by now. */
4901 reinsert_breakpoints_at (lwp->bp_reinsert);
4902 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4903
4904 lwp->bp_reinsert = 0;
4905
4906 /* Delete any software-single-step reinsert breakpoints. No
4907 longer needed. We don't have to worry about other threads
4908 hitting this trap, and later not being able to explain it,
4909 because we were stepping over a breakpoint, and we hold all
4910 threads but LWP stopped while doing that. */
4911 if (!can_hardware_single_step ())
4912 {
4913 gdb_assert (has_reinsert_breakpoints (current_thread));
4914 delete_reinsert_breakpoints (current_thread);
4915 }
4916
4917 step_over_bkpt = null_ptid;
4918 current_thread = saved_thread;
4919 return 1;
4920 }
4921 else
4922 return 0;
4923 }
4924
4925 /* If there's a step over in progress, wait until all threads stop
4926 (that is, until the stepping thread finishes its step), and
4927 unsuspend all lwps. The stepping thread ends with its status
4928 pending, which is processed later when we get back to processing
4929 events. */
4930
4931 static void
4932 complete_ongoing_step_over (void)
4933 {
4934 if (!ptid_equal (step_over_bkpt, null_ptid))
4935 {
4936 struct lwp_info *lwp;
4937 int wstat;
4938 int ret;
4939
4940 if (debug_threads)
4941 debug_printf ("detach: step over in progress, finish it first\n");
4942
4943 /* Passing NULL_PTID as filter indicates we want all events to
4944 be left pending. Eventually this returns when there are no
4945 unwaited-for children left. */
4946 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4947 &wstat, __WALL);
4948 gdb_assert (ret == -1);
4949
4950 lwp = find_lwp_pid (step_over_bkpt);
4951 if (lwp != NULL)
4952 finish_step_over (lwp);
4953 step_over_bkpt = null_ptid;
4954 unsuspend_all_lwps (lwp);
4955 }
4956 }
4957
4958 /* This function is called once per thread. We check the thread's resume
4959 request, which will tell us whether to resume, step, or leave the thread
4960 stopped; and what signal, if any, it should be sent.
4961
4962 For threads which we aren't explicitly told otherwise, we preserve
4963 the stepping flag; this is used for stepping over gdbserver-placed
4964 breakpoints.
4965
4966 If pending_flags was set in any thread, we queue any needed
4967 signals, since we won't actually resume. We already have a pending
4968 event to report, so we don't need to preserve any step requests;
4969 they should be re-issued if necessary. */
4970
4971 static int
4972 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4973 {
4974 struct thread_info *thread = (struct thread_info *) entry;
4975 struct lwp_info *lwp = get_thread_lwp (thread);
4976 int leave_all_stopped = * (int *) arg;
4977 int leave_pending;
4978
4979 if (lwp->resume == NULL)
4980 return 0;
4981
4982 if (lwp->resume->kind == resume_stop)
4983 {
4984 if (debug_threads)
4985 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4986
4987 if (!lwp->stopped)
4988 {
4989 if (debug_threads)
4990 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4991
4992 /* Stop the thread, and wait for the event asynchronously,
4993 through the event loop. */
4994 send_sigstop (lwp);
4995 }
4996 else
4997 {
4998 if (debug_threads)
4999 debug_printf ("already stopped LWP %ld\n",
5000 lwpid_of (thread));
5001
5002 /* The LWP may have been stopped in an internal event that
5003 was not meant to be notified back to GDB (e.g., gdbserver
5004 breakpoint), so we should be reporting a stop event in
5005 this case too. */
5006
5007 /* If the thread already has a pending SIGSTOP, this is a
5008 no-op. Otherwise, something later will presumably resume
5009 the thread and this will cause it to cancel any pending
5010 operation, due to last_resume_kind == resume_stop. If
5011 the thread already has a pending status to report, we
5012 will still report it the next time we wait - see
5013 status_pending_p_callback. */
5014
5015 /* If we already have a pending signal to report, then
5016 there's no need to queue a SIGSTOP, as this means we're
5017 midway through moving the LWP out of the jumppad, and we
5018 will report the pending signal as soon as that is
5019 finished. */
5020 if (lwp->pending_signals_to_report == NULL)
5021 send_sigstop (lwp);
5022 }
5023
5024 /* For stop requests, we're done. */
5025 lwp->resume = NULL;
5026 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5027 return 0;
5028 }
5029
5030 /* If this thread which is about to be resumed has a pending status,
5031 then don't resume it - we can just report the pending status.
5032 Likewise if it is suspended, because e.g., another thread is
5033 stepping past a breakpoint. Make sure to queue any signals that
5034 would otherwise be sent. In all-stop mode, we do this decision
5035 based on if *any* thread has a pending status. If there's a
5036 thread that needs the step-over-breakpoint dance, then don't
5037 resume any other thread but that particular one. */
5038 leave_pending = (lwp->suspended
5039 || lwp->status_pending_p
5040 || leave_all_stopped);
5041
5042 /* If we have a new signal, enqueue the signal. */
5043 if (lwp->resume->sig != 0)
5044 {
5045 siginfo_t info, *info_p;
5046
5047 /* If this is the same signal we were previously stopped by,
5048 make sure to queue its siginfo. */
5049 if (WIFSTOPPED (lwp->last_status)
5050 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
5051 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
5052 (PTRACE_TYPE_ARG3) 0, &info) == 0)
5053 info_p = &info;
5054 else
5055 info_p = NULL;
5056
5057 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
5058 }
5059
5060 if (!leave_pending)
5061 {
5062 if (debug_threads)
5063 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5064
5065 proceed_one_lwp (entry, NULL);
5066 }
5067 else
5068 {
5069 if (debug_threads)
5070 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5071 }
5072
5073 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5074 lwp->resume = NULL;
5075 return 0;
5076 }
5077
5078 static void
5079 linux_resume (struct thread_resume *resume_info, size_t n)
5080 {
5081 struct thread_resume_array array = { resume_info, n };
5082 struct thread_info *need_step_over = NULL;
5083 int any_pending;
5084 int leave_all_stopped;
5085
5086 if (debug_threads)
5087 {
5088 debug_enter ();
5089 debug_printf ("linux_resume:\n");
5090 }
5091
5092 find_inferior (&all_threads, linux_set_resume_request, &array);
5093
5094 /* If there is a thread which would otherwise be resumed, which has
5095 a pending status, then don't resume any threads - we can just
5096 report the pending status. Make sure to queue any signals that
5097 would otherwise be sent. In non-stop mode, we'll apply this
5098 logic to each thread individually. We consume all pending events
5099 before considering to start a step-over (in all-stop). */
5100 any_pending = 0;
5101 if (!non_stop)
5102 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
5103
5104 /* If there is a thread which would otherwise be resumed, which is
5105 stopped at a breakpoint that needs stepping over, then don't
5106 resume any threads - have it step over the breakpoint with all
5107 other threads stopped, then resume all threads again. Make sure
5108 to queue any signals that would otherwise be delivered or
5109 queued. */
5110 if (!any_pending && supports_breakpoints ())
5111 need_step_over
5112 = (struct thread_info *) find_inferior (&all_threads,
5113 need_step_over_p, NULL);
5114
5115 leave_all_stopped = (need_step_over != NULL || any_pending);
5116
5117 if (debug_threads)
5118 {
5119 if (need_step_over != NULL)
5120 debug_printf ("Not resuming all, need step over\n");
5121 else if (any_pending)
5122 debug_printf ("Not resuming, all-stop and found "
5123 "an LWP with pending status\n");
5124 else
5125 debug_printf ("Resuming, no pending status or step over needed\n");
5126 }
5127
5128 /* Even if we're leaving threads stopped, queue all signals we'd
5129 otherwise deliver. */
5130 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
5131
5132 if (need_step_over)
5133 start_step_over (get_thread_lwp (need_step_over));
5134
5135 if (debug_threads)
5136 {
5137 debug_printf ("linux_resume done\n");
5138 debug_exit ();
5139 }
5140
5141 /* We may have events that were pending that can/should be sent to
5142 the client now. Trigger a linux_wait call. */
5143 if (target_is_async_p ())
5144 async_file_mark ();
5145 }
5146
5147 /* This function is called once per thread. We check the thread's
5148 last resume request, which will tell us whether to resume, step, or
5149 leave the thread stopped. Any signal the client requested to be
5150 delivered has already been enqueued at this point.
5151
5152 If any thread that GDB wants running is stopped at an internal
5153 breakpoint that needs stepping over, we start a step-over operation
5154 on that particular thread, and leave all others stopped. */
5155
5156 static int
5157 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5158 {
5159 struct thread_info *thread = (struct thread_info *) entry;
5160 struct lwp_info *lwp = get_thread_lwp (thread);
5161 int step;
5162
5163 if (lwp == except)
5164 return 0;
5165
5166 if (debug_threads)
5167 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5168
5169 if (!lwp->stopped)
5170 {
5171 if (debug_threads)
5172 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5173 return 0;
5174 }
5175
5176 if (thread->last_resume_kind == resume_stop
5177 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5178 {
5179 if (debug_threads)
5180 debug_printf (" client wants LWP to remain %ld stopped\n",
5181 lwpid_of (thread));
5182 return 0;
5183 }
5184
5185 if (lwp->status_pending_p)
5186 {
5187 if (debug_threads)
5188 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5189 lwpid_of (thread));
5190 return 0;
5191 }
5192
5193 gdb_assert (lwp->suspended >= 0);
5194
5195 if (lwp->suspended)
5196 {
5197 if (debug_threads)
5198 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5199 return 0;
5200 }
5201
5202 if (thread->last_resume_kind == resume_stop
5203 && lwp->pending_signals_to_report == NULL
5204 && lwp->collecting_fast_tracepoint == 0)
5205 {
5206 /* We haven't reported this LWP as stopped yet (otherwise, the
5207 last_status.kind check above would catch it, and we wouldn't
5208 reach here. This LWP may have been momentarily paused by a
5209 stop_all_lwps call while handling for example, another LWP's
5210 step-over. In that case, the pending expected SIGSTOP signal
5211 that was queued at vCont;t handling time will have already
5212 been consumed by wait_for_sigstop, and so we need to requeue
5213 another one here. Note that if the LWP already has a SIGSTOP
5214 pending, this is a no-op. */
5215
5216 if (debug_threads)
5217 debug_printf ("Client wants LWP %ld to stop. "
5218 "Making sure it has a SIGSTOP pending\n",
5219 lwpid_of (thread));
5220
5221 send_sigstop (lwp);
5222 }
5223
5224 if (thread->last_resume_kind == resume_step)
5225 {
5226 if (debug_threads)
5227 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5228 lwpid_of (thread));
5229
5230 /* If resume_step is requested by GDB, install reinsert
5231 breakpoints when the thread is about to be actually resumed if
5232 the reinsert breakpoints weren't removed. */
5233 if (can_software_single_step () && !has_reinsert_breakpoints (thread))
5234 install_software_single_step_breakpoints (lwp);
5235
5236 step = maybe_hw_step (thread);
5237 }
5238 else if (lwp->bp_reinsert != 0)
5239 {
5240 if (debug_threads)
5241 debug_printf (" stepping LWP %ld, reinsert set\n",
5242 lwpid_of (thread));
5243
5244 step = maybe_hw_step (thread);
5245 }
5246 else
5247 step = 0;
5248
5249 linux_resume_one_lwp (lwp, step, 0, NULL);
5250 return 0;
5251 }
5252
5253 static int
5254 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5255 {
5256 struct thread_info *thread = (struct thread_info *) entry;
5257 struct lwp_info *lwp = get_thread_lwp (thread);
5258
5259 if (lwp == except)
5260 return 0;
5261
5262 lwp_suspended_decr (lwp);
5263
5264 return proceed_one_lwp (entry, except);
5265 }
5266
5267 /* When we finish a step-over, set threads running again. If there's
5268 another thread that may need a step-over, now's the time to start
5269 it. Eventually, we'll move all threads past their breakpoints. */
5270
5271 static void
5272 proceed_all_lwps (void)
5273 {
5274 struct thread_info *need_step_over;
5275
5276 /* If there is a thread which would otherwise be resumed, which is
5277 stopped at a breakpoint that needs stepping over, then don't
5278 resume any threads - have it step over the breakpoint with all
5279 other threads stopped, then resume all threads again. */
5280
5281 if (supports_breakpoints ())
5282 {
5283 need_step_over
5284 = (struct thread_info *) find_inferior (&all_threads,
5285 need_step_over_p, NULL);
5286
5287 if (need_step_over != NULL)
5288 {
5289 if (debug_threads)
5290 debug_printf ("proceed_all_lwps: found "
5291 "thread %ld needing a step-over\n",
5292 lwpid_of (need_step_over));
5293
5294 start_step_over (get_thread_lwp (need_step_over));
5295 return;
5296 }
5297 }
5298
5299 if (debug_threads)
5300 debug_printf ("Proceeding, no step-over needed\n");
5301
5302 find_inferior (&all_threads, proceed_one_lwp, NULL);
5303 }
5304
5305 /* Stopped LWPs that the client wanted to be running, that don't have
5306 pending statuses, are set to run again, except for EXCEPT, if not
5307 NULL. This undoes a stop_all_lwps call. */
5308
5309 static void
5310 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5311 {
5312 if (debug_threads)
5313 {
5314 debug_enter ();
5315 if (except)
5316 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5317 lwpid_of (get_lwp_thread (except)));
5318 else
5319 debug_printf ("unstopping all lwps\n");
5320 }
5321
5322 if (unsuspend)
5323 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5324 else
5325 find_inferior (&all_threads, proceed_one_lwp, except);
5326
5327 if (debug_threads)
5328 {
5329 debug_printf ("unstop_all_lwps done\n");
5330 debug_exit ();
5331 }
5332 }
5333
5334
5335 #ifdef HAVE_LINUX_REGSETS
5336
5337 #define use_linux_regsets 1
5338
5339 /* Returns true if REGSET has been disabled. */
5340
5341 static int
5342 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5343 {
5344 return (info->disabled_regsets != NULL
5345 && info->disabled_regsets[regset - info->regsets]);
5346 }
5347
5348 /* Disable REGSET. */
5349
5350 static void
5351 disable_regset (struct regsets_info *info, struct regset_info *regset)
5352 {
5353 int dr_offset;
5354
5355 dr_offset = regset - info->regsets;
5356 if (info->disabled_regsets == NULL)
5357 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5358 info->disabled_regsets[dr_offset] = 1;
5359 }
5360
5361 static int
5362 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5363 struct regcache *regcache)
5364 {
5365 struct regset_info *regset;
5366 int saw_general_regs = 0;
5367 int pid;
5368 struct iovec iov;
5369
5370 pid = lwpid_of (current_thread);
5371 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5372 {
5373 void *buf, *data;
5374 int nt_type, res;
5375
5376 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5377 continue;
5378
5379 buf = xmalloc (regset->size);
5380
5381 nt_type = regset->nt_type;
5382 if (nt_type)
5383 {
5384 iov.iov_base = buf;
5385 iov.iov_len = regset->size;
5386 data = (void *) &iov;
5387 }
5388 else
5389 data = buf;
5390
5391 #ifndef __sparc__
5392 res = ptrace (regset->get_request, pid,
5393 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5394 #else
5395 res = ptrace (regset->get_request, pid, data, nt_type);
5396 #endif
5397 if (res < 0)
5398 {
5399 if (errno == EIO)
5400 {
5401 /* If we get EIO on a regset, do not try it again for
5402 this process mode. */
5403 disable_regset (regsets_info, regset);
5404 }
5405 else if (errno == ENODATA)
5406 {
5407 /* ENODATA may be returned if the regset is currently
5408 not "active". This can happen in normal operation,
5409 so suppress the warning in this case. */
5410 }
5411 else if (errno == ESRCH)
5412 {
5413 /* At this point, ESRCH should mean the process is
5414 already gone, in which case we simply ignore attempts
5415 to read its registers. */
5416 }
5417 else
5418 {
5419 char s[256];
5420 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5421 pid);
5422 perror (s);
5423 }
5424 }
5425 else
5426 {
5427 if (regset->type == GENERAL_REGS)
5428 saw_general_regs = 1;
5429 regset->store_function (regcache, buf);
5430 }
5431 free (buf);
5432 }
5433 if (saw_general_regs)
5434 return 0;
5435 else
5436 return 1;
5437 }
5438
5439 static int
5440 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5441 struct regcache *regcache)
5442 {
5443 struct regset_info *regset;
5444 int saw_general_regs = 0;
5445 int pid;
5446 struct iovec iov;
5447
5448 pid = lwpid_of (current_thread);
5449 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5450 {
5451 void *buf, *data;
5452 int nt_type, res;
5453
5454 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5455 || regset->fill_function == NULL)
5456 continue;
5457
5458 buf = xmalloc (regset->size);
5459
5460 /* First fill the buffer with the current register set contents,
5461 in case there are any items in the kernel's regset that are
5462 not in gdbserver's regcache. */
5463
5464 nt_type = regset->nt_type;
5465 if (nt_type)
5466 {
5467 iov.iov_base = buf;
5468 iov.iov_len = regset->size;
5469 data = (void *) &iov;
5470 }
5471 else
5472 data = buf;
5473
5474 #ifndef __sparc__
5475 res = ptrace (regset->get_request, pid,
5476 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5477 #else
5478 res = ptrace (regset->get_request, pid, data, nt_type);
5479 #endif
5480
5481 if (res == 0)
5482 {
5483 /* Then overlay our cached registers on that. */
5484 regset->fill_function (regcache, buf);
5485
5486 /* Only now do we write the register set. */
5487 #ifndef __sparc__
5488 res = ptrace (regset->set_request, pid,
5489 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5490 #else
5491 res = ptrace (regset->set_request, pid, data, nt_type);
5492 #endif
5493 }
5494
5495 if (res < 0)
5496 {
5497 if (errno == EIO)
5498 {
5499 /* If we get EIO on a regset, do not try it again for
5500 this process mode. */
5501 disable_regset (regsets_info, regset);
5502 }
5503 else if (errno == ESRCH)
5504 {
5505 /* At this point, ESRCH should mean the process is
5506 already gone, in which case we simply ignore attempts
5507 to change its registers. See also the related
5508 comment in linux_resume_one_lwp. */
5509 free (buf);
5510 return 0;
5511 }
5512 else
5513 {
5514 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5515 }
5516 }
5517 else if (regset->type == GENERAL_REGS)
5518 saw_general_regs = 1;
5519 free (buf);
5520 }
5521 if (saw_general_regs)
5522 return 0;
5523 else
5524 return 1;
5525 }
5526
5527 #else /* !HAVE_LINUX_REGSETS */
5528
5529 #define use_linux_regsets 0
5530 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5531 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5532
5533 #endif
5534
5535 /* Return 1 if register REGNO is supported by one of the regset ptrace
5536 calls or 0 if it has to be transferred individually. */
5537
5538 static int
5539 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5540 {
5541 unsigned char mask = 1 << (regno % 8);
5542 size_t index = regno / 8;
5543
5544 return (use_linux_regsets
5545 && (regs_info->regset_bitmap == NULL
5546 || (regs_info->regset_bitmap[index] & mask) != 0));
5547 }
5548
5549 #ifdef HAVE_LINUX_USRREGS
5550
5551 static int
5552 register_addr (const struct usrregs_info *usrregs, int regnum)
5553 {
5554 int addr;
5555
5556 if (regnum < 0 || regnum >= usrregs->num_regs)
5557 error ("Invalid register number %d.", regnum);
5558
5559 addr = usrregs->regmap[regnum];
5560
5561 return addr;
5562 }
5563
5564 /* Fetch one register. */
5565 static void
5566 fetch_register (const struct usrregs_info *usrregs,
5567 struct regcache *regcache, int regno)
5568 {
5569 CORE_ADDR regaddr;
5570 int i, size;
5571 char *buf;
5572 int pid;
5573
5574 if (regno >= usrregs->num_regs)
5575 return;
5576 if ((*the_low_target.cannot_fetch_register) (regno))
5577 return;
5578
5579 regaddr = register_addr (usrregs, regno);
5580 if (regaddr == -1)
5581 return;
5582
5583 size = ((register_size (regcache->tdesc, regno)
5584 + sizeof (PTRACE_XFER_TYPE) - 1)
5585 & -sizeof (PTRACE_XFER_TYPE));
5586 buf = (char *) alloca (size);
5587
5588 pid = lwpid_of (current_thread);
5589 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5590 {
5591 errno = 0;
5592 *(PTRACE_XFER_TYPE *) (buf + i) =
5593 ptrace (PTRACE_PEEKUSER, pid,
5594 /* Coerce to a uintptr_t first to avoid potential gcc warning
5595 of coercing an 8 byte integer to a 4 byte pointer. */
5596 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5597 regaddr += sizeof (PTRACE_XFER_TYPE);
5598 if (errno != 0)
5599 error ("reading register %d: %s", regno, strerror (errno));
5600 }
5601
5602 if (the_low_target.supply_ptrace_register)
5603 the_low_target.supply_ptrace_register (regcache, regno, buf);
5604 else
5605 supply_register (regcache, regno, buf);
5606 }
5607
5608 /* Store one register. */
5609 static void
5610 store_register (const struct usrregs_info *usrregs,
5611 struct regcache *regcache, int regno)
5612 {
5613 CORE_ADDR regaddr;
5614 int i, size;
5615 char *buf;
5616 int pid;
5617
5618 if (regno >= usrregs->num_regs)
5619 return;
5620 if ((*the_low_target.cannot_store_register) (regno))
5621 return;
5622
5623 regaddr = register_addr (usrregs, regno);
5624 if (regaddr == -1)
5625 return;
5626
5627 size = ((register_size (regcache->tdesc, regno)
5628 + sizeof (PTRACE_XFER_TYPE) - 1)
5629 & -sizeof (PTRACE_XFER_TYPE));
5630 buf = (char *) alloca (size);
5631 memset (buf, 0, size);
5632
5633 if (the_low_target.collect_ptrace_register)
5634 the_low_target.collect_ptrace_register (regcache, regno, buf);
5635 else
5636 collect_register (regcache, regno, buf);
5637
5638 pid = lwpid_of (current_thread);
5639 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5640 {
5641 errno = 0;
5642 ptrace (PTRACE_POKEUSER, pid,
5643 /* Coerce to a uintptr_t first to avoid potential gcc warning
5644 about coercing an 8 byte integer to a 4 byte pointer. */
5645 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5646 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5647 if (errno != 0)
5648 {
5649 /* At this point, ESRCH should mean the process is
5650 already gone, in which case we simply ignore attempts
5651 to change its registers. See also the related
5652 comment in linux_resume_one_lwp. */
5653 if (errno == ESRCH)
5654 return;
5655
5656 if ((*the_low_target.cannot_store_register) (regno) == 0)
5657 error ("writing register %d: %s", regno, strerror (errno));
5658 }
5659 regaddr += sizeof (PTRACE_XFER_TYPE);
5660 }
5661 }
5662
5663 /* Fetch all registers, or just one, from the child process.
5664 If REGNO is -1, do this for all registers, skipping any that are
5665 assumed to have been retrieved by regsets_fetch_inferior_registers,
5666 unless ALL is non-zero.
5667 Otherwise, REGNO specifies which register (so we can save time). */
5668 static void
5669 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5670 struct regcache *regcache, int regno, int all)
5671 {
5672 struct usrregs_info *usr = regs_info->usrregs;
5673
5674 if (regno == -1)
5675 {
5676 for (regno = 0; regno < usr->num_regs; regno++)
5677 if (all || !linux_register_in_regsets (regs_info, regno))
5678 fetch_register (usr, regcache, regno);
5679 }
5680 else
5681 fetch_register (usr, regcache, regno);
5682 }
5683
5684 /* Store our register values back into the inferior.
5685 If REGNO is -1, do this for all registers, skipping any that are
5686 assumed to have been saved by regsets_store_inferior_registers,
5687 unless ALL is non-zero.
5688 Otherwise, REGNO specifies which register (so we can save time). */
5689 static void
5690 usr_store_inferior_registers (const struct regs_info *regs_info,
5691 struct regcache *regcache, int regno, int all)
5692 {
5693 struct usrregs_info *usr = regs_info->usrregs;
5694
5695 if (regno == -1)
5696 {
5697 for (regno = 0; regno < usr->num_regs; regno++)
5698 if (all || !linux_register_in_regsets (regs_info, regno))
5699 store_register (usr, regcache, regno);
5700 }
5701 else
5702 store_register (usr, regcache, regno);
5703 }
5704
5705 #else /* !HAVE_LINUX_USRREGS */
5706
5707 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5708 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5709
5710 #endif
5711
5712
5713 static void
5714 linux_fetch_registers (struct regcache *regcache, int regno)
5715 {
5716 int use_regsets;
5717 int all = 0;
5718 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5719
5720 if (regno == -1)
5721 {
5722 if (the_low_target.fetch_register != NULL
5723 && regs_info->usrregs != NULL)
5724 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5725 (*the_low_target.fetch_register) (regcache, regno);
5726
5727 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5728 if (regs_info->usrregs != NULL)
5729 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5730 }
5731 else
5732 {
5733 if (the_low_target.fetch_register != NULL
5734 && (*the_low_target.fetch_register) (regcache, regno))
5735 return;
5736
5737 use_regsets = linux_register_in_regsets (regs_info, regno);
5738 if (use_regsets)
5739 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5740 regcache);
5741 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5742 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5743 }
5744 }
5745
5746 static void
5747 linux_store_registers (struct regcache *regcache, int regno)
5748 {
5749 int use_regsets;
5750 int all = 0;
5751 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5752
5753 if (regno == -1)
5754 {
5755 all = regsets_store_inferior_registers (regs_info->regsets_info,
5756 regcache);
5757 if (regs_info->usrregs != NULL)
5758 usr_store_inferior_registers (regs_info, regcache, regno, all);
5759 }
5760 else
5761 {
5762 use_regsets = linux_register_in_regsets (regs_info, regno);
5763 if (use_regsets)
5764 all = regsets_store_inferior_registers (regs_info->regsets_info,
5765 regcache);
5766 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5767 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5768 }
5769 }
5770
5771
5772 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5773 to debugger memory starting at MYADDR. */
5774
5775 static int
5776 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5777 {
5778 int pid = lwpid_of (current_thread);
5779 register PTRACE_XFER_TYPE *buffer;
5780 register CORE_ADDR addr;
5781 register int count;
5782 char filename[64];
5783 register int i;
5784 int ret;
5785 int fd;
5786
5787 /* Try using /proc. Don't bother for one word. */
5788 if (len >= 3 * sizeof (long))
5789 {
5790 int bytes;
5791
5792 /* We could keep this file open and cache it - possibly one per
5793 thread. That requires some juggling, but is even faster. */
5794 sprintf (filename, "/proc/%d/mem", pid);
5795 fd = open (filename, O_RDONLY | O_LARGEFILE);
5796 if (fd == -1)
5797 goto no_proc;
5798
5799 /* If pread64 is available, use it. It's faster if the kernel
5800 supports it (only one syscall), and it's 64-bit safe even on
5801 32-bit platforms (for instance, SPARC debugging a SPARC64
5802 application). */
5803 #ifdef HAVE_PREAD64
5804 bytes = pread64 (fd, myaddr, len, memaddr);
5805 #else
5806 bytes = -1;
5807 if (lseek (fd, memaddr, SEEK_SET) != -1)
5808 bytes = read (fd, myaddr, len);
5809 #endif
5810
5811 close (fd);
5812 if (bytes == len)
5813 return 0;
5814
5815 /* Some data was read, we'll try to get the rest with ptrace. */
5816 if (bytes > 0)
5817 {
5818 memaddr += bytes;
5819 myaddr += bytes;
5820 len -= bytes;
5821 }
5822 }
5823
5824 no_proc:
5825 /* Round starting address down to longword boundary. */
5826 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5827 /* Round ending address up; get number of longwords that makes. */
5828 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5829 / sizeof (PTRACE_XFER_TYPE));
5830 /* Allocate buffer of that many longwords. */
5831 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5832
5833 /* Read all the longwords */
5834 errno = 0;
5835 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5836 {
5837 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5838 about coercing an 8 byte integer to a 4 byte pointer. */
5839 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5840 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5841 (PTRACE_TYPE_ARG4) 0);
5842 if (errno)
5843 break;
5844 }
5845 ret = errno;
5846
5847 /* Copy appropriate bytes out of the buffer. */
5848 if (i > 0)
5849 {
5850 i *= sizeof (PTRACE_XFER_TYPE);
5851 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5852 memcpy (myaddr,
5853 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5854 i < len ? i : len);
5855 }
5856
5857 return ret;
5858 }
5859
5860 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5861 memory at MEMADDR. On failure (cannot write to the inferior)
5862 returns the value of errno. Always succeeds if LEN is zero. */
5863
5864 static int
5865 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5866 {
5867 register int i;
5868 /* Round starting address down to longword boundary. */
5869 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5870 /* Round ending address up; get number of longwords that makes. */
5871 register int count
5872 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5873 / sizeof (PTRACE_XFER_TYPE);
5874
5875 /* Allocate buffer of that many longwords. */
5876 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5877
5878 int pid = lwpid_of (current_thread);
5879
5880 if (len == 0)
5881 {
5882 /* Zero length write always succeeds. */
5883 return 0;
5884 }
5885
5886 if (debug_threads)
5887 {
5888 /* Dump up to four bytes. */
5889 char str[4 * 2 + 1];
5890 char *p = str;
5891 int dump = len < 4 ? len : 4;
5892
5893 for (i = 0; i < dump; i++)
5894 {
5895 sprintf (p, "%02x", myaddr[i]);
5896 p += 2;
5897 }
5898 *p = '\0';
5899
5900 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5901 str, (long) memaddr, pid);
5902 }
5903
5904 /* Fill start and end extra bytes of buffer with existing memory data. */
5905
5906 errno = 0;
5907 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5908 about coercing an 8 byte integer to a 4 byte pointer. */
5909 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5910 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5911 (PTRACE_TYPE_ARG4) 0);
5912 if (errno)
5913 return errno;
5914
5915 if (count > 1)
5916 {
5917 errno = 0;
5918 buffer[count - 1]
5919 = ptrace (PTRACE_PEEKTEXT, pid,
5920 /* Coerce to a uintptr_t first to avoid potential gcc warning
5921 about coercing an 8 byte integer to a 4 byte pointer. */
5922 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5923 * sizeof (PTRACE_XFER_TYPE)),
5924 (PTRACE_TYPE_ARG4) 0);
5925 if (errno)
5926 return errno;
5927 }
5928
5929 /* Copy data to be written over corresponding part of buffer. */
5930
5931 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5932 myaddr, len);
5933
5934 /* Write the entire buffer. */
5935
5936 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5937 {
5938 errno = 0;
5939 ptrace (PTRACE_POKETEXT, pid,
5940 /* Coerce to a uintptr_t first to avoid potential gcc warning
5941 about coercing an 8 byte integer to a 4 byte pointer. */
5942 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5943 (PTRACE_TYPE_ARG4) buffer[i]);
5944 if (errno)
5945 return errno;
5946 }
5947
5948 return 0;
5949 }
5950
5951 static void
5952 linux_look_up_symbols (void)
5953 {
5954 #ifdef USE_THREAD_DB
5955 struct process_info *proc = current_process ();
5956
5957 if (proc->priv->thread_db != NULL)
5958 return;
5959
5960 thread_db_init ();
5961 #endif
5962 }
5963
5964 static void
5965 linux_request_interrupt (void)
5966 {
5967 extern unsigned long signal_pid;
5968
5969 /* Send a SIGINT to the process group. This acts just like the user
5970 typed a ^C on the controlling terminal. */
5971 kill (-signal_pid, SIGINT);
5972 }
5973
5974 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5975 to debugger memory starting at MYADDR. */
5976
5977 static int
5978 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5979 {
5980 char filename[PATH_MAX];
5981 int fd, n;
5982 int pid = lwpid_of (current_thread);
5983
5984 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5985
5986 fd = open (filename, O_RDONLY);
5987 if (fd < 0)
5988 return -1;
5989
5990 if (offset != (CORE_ADDR) 0
5991 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5992 n = -1;
5993 else
5994 n = read (fd, myaddr, len);
5995
5996 close (fd);
5997
5998 return n;
5999 }
6000
6001 /* These breakpoint and watchpoint related wrapper functions simply
6002 pass on the function call if the target has registered a
6003 corresponding function. */
6004
6005 static int
6006 linux_supports_z_point_type (char z_type)
6007 {
6008 return (the_low_target.supports_z_point_type != NULL
6009 && the_low_target.supports_z_point_type (z_type));
6010 }
6011
6012 static int
6013 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
6014 int size, struct raw_breakpoint *bp)
6015 {
6016 if (type == raw_bkpt_type_sw)
6017 return insert_memory_breakpoint (bp);
6018 else if (the_low_target.insert_point != NULL)
6019 return the_low_target.insert_point (type, addr, size, bp);
6020 else
6021 /* Unsupported (see target.h). */
6022 return 1;
6023 }
6024
6025 static int
6026 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
6027 int size, struct raw_breakpoint *bp)
6028 {
6029 if (type == raw_bkpt_type_sw)
6030 return remove_memory_breakpoint (bp);
6031 else if (the_low_target.remove_point != NULL)
6032 return the_low_target.remove_point (type, addr, size, bp);
6033 else
6034 /* Unsupported (see target.h). */
6035 return 1;
6036 }
6037
6038 /* Implement the to_stopped_by_sw_breakpoint target_ops
6039 method. */
6040
6041 static int
6042 linux_stopped_by_sw_breakpoint (void)
6043 {
6044 struct lwp_info *lwp = get_thread_lwp (current_thread);
6045
6046 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
6047 }
6048
6049 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
6050 method. */
6051
6052 static int
6053 linux_supports_stopped_by_sw_breakpoint (void)
6054 {
6055 return USE_SIGTRAP_SIGINFO;
6056 }
6057
6058 /* Implement the to_stopped_by_hw_breakpoint target_ops
6059 method. */
6060
6061 static int
6062 linux_stopped_by_hw_breakpoint (void)
6063 {
6064 struct lwp_info *lwp = get_thread_lwp (current_thread);
6065
6066 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6067 }
6068
6069 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6070 method. */
6071
6072 static int
6073 linux_supports_stopped_by_hw_breakpoint (void)
6074 {
6075 return USE_SIGTRAP_SIGINFO;
6076 }
6077
6078 /* Implement the supports_hardware_single_step target_ops method. */
6079
6080 static int
6081 linux_supports_hardware_single_step (void)
6082 {
6083 return can_hardware_single_step ();
6084 }
6085
6086 static int
6087 linux_supports_software_single_step (void)
6088 {
6089 return can_software_single_step ();
6090 }
6091
6092 static int
6093 linux_stopped_by_watchpoint (void)
6094 {
6095 struct lwp_info *lwp = get_thread_lwp (current_thread);
6096
6097 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6098 }
6099
6100 static CORE_ADDR
6101 linux_stopped_data_address (void)
6102 {
6103 struct lwp_info *lwp = get_thread_lwp (current_thread);
6104
6105 return lwp->stopped_data_address;
6106 }
6107
6108 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6109 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6110 && defined(PT_TEXT_END_ADDR)
6111
6112 /* This is only used for targets that define PT_TEXT_ADDR,
6113 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6114 the target has different ways of acquiring this information, like
6115 loadmaps. */
6116
6117 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6118 to tell gdb about. */
6119
6120 static int
6121 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6122 {
6123 unsigned long text, text_end, data;
6124 int pid = lwpid_of (current_thread);
6125
6126 errno = 0;
6127
6128 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6129 (PTRACE_TYPE_ARG4) 0);
6130 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6131 (PTRACE_TYPE_ARG4) 0);
6132 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6133 (PTRACE_TYPE_ARG4) 0);
6134
6135 if (errno == 0)
6136 {
6137 /* Both text and data offsets produced at compile-time (and so
6138 used by gdb) are relative to the beginning of the program,
6139 with the data segment immediately following the text segment.
6140 However, the actual runtime layout in memory may put the data
6141 somewhere else, so when we send gdb a data base-address, we
6142 use the real data base address and subtract the compile-time
6143 data base-address from it (which is just the length of the
6144 text segment). BSS immediately follows data in both
6145 cases. */
6146 *text_p = text;
6147 *data_p = data - (text_end - text);
6148
6149 return 1;
6150 }
6151 return 0;
6152 }
6153 #endif
6154
6155 static int
6156 linux_qxfer_osdata (const char *annex,
6157 unsigned char *readbuf, unsigned const char *writebuf,
6158 CORE_ADDR offset, int len)
6159 {
6160 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6161 }
6162
6163 /* Convert a native/host siginfo object, into/from the siginfo in the
6164 layout of the inferiors' architecture. */
6165
6166 static void
6167 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6168 {
6169 int done = 0;
6170
6171 if (the_low_target.siginfo_fixup != NULL)
6172 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6173
6174 /* If there was no callback, or the callback didn't do anything,
6175 then just do a straight memcpy. */
6176 if (!done)
6177 {
6178 if (direction == 1)
6179 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6180 else
6181 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6182 }
6183 }
6184
6185 static int
6186 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6187 unsigned const char *writebuf, CORE_ADDR offset, int len)
6188 {
6189 int pid;
6190 siginfo_t siginfo;
6191 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6192
6193 if (current_thread == NULL)
6194 return -1;
6195
6196 pid = lwpid_of (current_thread);
6197
6198 if (debug_threads)
6199 debug_printf ("%s siginfo for lwp %d.\n",
6200 readbuf != NULL ? "Reading" : "Writing",
6201 pid);
6202
6203 if (offset >= sizeof (siginfo))
6204 return -1;
6205
6206 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6207 return -1;
6208
6209 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6210 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6211 inferior with a 64-bit GDBSERVER should look the same as debugging it
6212 with a 32-bit GDBSERVER, we need to convert it. */
6213 siginfo_fixup (&siginfo, inf_siginfo, 0);
6214
6215 if (offset + len > sizeof (siginfo))
6216 len = sizeof (siginfo) - offset;
6217
6218 if (readbuf != NULL)
6219 memcpy (readbuf, inf_siginfo + offset, len);
6220 else
6221 {
6222 memcpy (inf_siginfo + offset, writebuf, len);
6223
6224 /* Convert back to ptrace layout before flushing it out. */
6225 siginfo_fixup (&siginfo, inf_siginfo, 1);
6226
6227 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6228 return -1;
6229 }
6230
6231 return len;
6232 }
6233
6234 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6235 so we notice when children change state; as the handler for the
6236 sigsuspend in my_waitpid. */
6237
6238 static void
6239 sigchld_handler (int signo)
6240 {
6241 int old_errno = errno;
6242
6243 if (debug_threads)
6244 {
6245 do
6246 {
6247 /* fprintf is not async-signal-safe, so call write
6248 directly. */
6249 if (write (2, "sigchld_handler\n",
6250 sizeof ("sigchld_handler\n") - 1) < 0)
6251 break; /* just ignore */
6252 } while (0);
6253 }
6254
6255 if (target_is_async_p ())
6256 async_file_mark (); /* trigger a linux_wait */
6257
6258 errno = old_errno;
6259 }
6260
6261 static int
6262 linux_supports_non_stop (void)
6263 {
6264 return 1;
6265 }
6266
6267 static int
6268 linux_async (int enable)
6269 {
6270 int previous = target_is_async_p ();
6271
6272 if (debug_threads)
6273 debug_printf ("linux_async (%d), previous=%d\n",
6274 enable, previous);
6275
6276 if (previous != enable)
6277 {
6278 sigset_t mask;
6279 sigemptyset (&mask);
6280 sigaddset (&mask, SIGCHLD);
6281
6282 sigprocmask (SIG_BLOCK, &mask, NULL);
6283
6284 if (enable)
6285 {
6286 if (pipe (linux_event_pipe) == -1)
6287 {
6288 linux_event_pipe[0] = -1;
6289 linux_event_pipe[1] = -1;
6290 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6291
6292 warning ("creating event pipe failed.");
6293 return previous;
6294 }
6295
6296 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6297 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6298
6299 /* Register the event loop handler. */
6300 add_file_handler (linux_event_pipe[0],
6301 handle_target_event, NULL);
6302
6303 /* Always trigger a linux_wait. */
6304 async_file_mark ();
6305 }
6306 else
6307 {
6308 delete_file_handler (linux_event_pipe[0]);
6309
6310 close (linux_event_pipe[0]);
6311 close (linux_event_pipe[1]);
6312 linux_event_pipe[0] = -1;
6313 linux_event_pipe[1] = -1;
6314 }
6315
6316 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6317 }
6318
6319 return previous;
6320 }
6321
6322 static int
6323 linux_start_non_stop (int nonstop)
6324 {
6325 /* Register or unregister from event-loop accordingly. */
6326 linux_async (nonstop);
6327
6328 if (target_is_async_p () != (nonstop != 0))
6329 return -1;
6330
6331 return 0;
6332 }
6333
6334 static int
6335 linux_supports_multi_process (void)
6336 {
6337 return 1;
6338 }
6339
6340 /* Check if fork events are supported. */
6341
6342 static int
6343 linux_supports_fork_events (void)
6344 {
6345 return linux_supports_tracefork ();
6346 }
6347
6348 /* Check if vfork events are supported. */
6349
6350 static int
6351 linux_supports_vfork_events (void)
6352 {
6353 return linux_supports_tracefork ();
6354 }
6355
6356 /* Check if exec events are supported. */
6357
6358 static int
6359 linux_supports_exec_events (void)
6360 {
6361 return linux_supports_traceexec ();
6362 }
6363
6364 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6365 options for the specified lwp. */
6366
6367 static int
6368 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6369 void *args)
6370 {
6371 struct thread_info *thread = (struct thread_info *) entry;
6372 struct lwp_info *lwp = get_thread_lwp (thread);
6373
6374 if (!lwp->stopped)
6375 {
6376 /* Stop the lwp so we can modify its ptrace options. */
6377 lwp->must_set_ptrace_flags = 1;
6378 linux_stop_lwp (lwp);
6379 }
6380 else
6381 {
6382 /* Already stopped; go ahead and set the ptrace options. */
6383 struct process_info *proc = find_process_pid (pid_of (thread));
6384 int options = linux_low_ptrace_options (proc->attached);
6385
6386 linux_enable_event_reporting (lwpid_of (thread), options);
6387 lwp->must_set_ptrace_flags = 0;
6388 }
6389
6390 return 0;
6391 }
6392
6393 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6394 ptrace flags for all inferiors. This is in case the new GDB connection
6395 doesn't support the same set of events that the previous one did. */
6396
6397 static void
6398 linux_handle_new_gdb_connection (void)
6399 {
6400 pid_t pid;
6401
6402 /* Request that all the lwps reset their ptrace options. */
6403 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6404 }
6405
6406 static int
6407 linux_supports_disable_randomization (void)
6408 {
6409 #ifdef HAVE_PERSONALITY
6410 return 1;
6411 #else
6412 return 0;
6413 #endif
6414 }
6415
6416 static int
6417 linux_supports_agent (void)
6418 {
6419 return 1;
6420 }
6421
6422 static int
6423 linux_supports_range_stepping (void)
6424 {
6425 if (*the_low_target.supports_range_stepping == NULL)
6426 return 0;
6427
6428 return (*the_low_target.supports_range_stepping) ();
6429 }
6430
6431 /* Enumerate spufs IDs for process PID. */
6432 static int
6433 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6434 {
6435 int pos = 0;
6436 int written = 0;
6437 char path[128];
6438 DIR *dir;
6439 struct dirent *entry;
6440
6441 sprintf (path, "/proc/%ld/fd", pid);
6442 dir = opendir (path);
6443 if (!dir)
6444 return -1;
6445
6446 rewinddir (dir);
6447 while ((entry = readdir (dir)) != NULL)
6448 {
6449 struct stat st;
6450 struct statfs stfs;
6451 int fd;
6452
6453 fd = atoi (entry->d_name);
6454 if (!fd)
6455 continue;
6456
6457 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6458 if (stat (path, &st) != 0)
6459 continue;
6460 if (!S_ISDIR (st.st_mode))
6461 continue;
6462
6463 if (statfs (path, &stfs) != 0)
6464 continue;
6465 if (stfs.f_type != SPUFS_MAGIC)
6466 continue;
6467
6468 if (pos >= offset && pos + 4 <= offset + len)
6469 {
6470 *(unsigned int *)(buf + pos - offset) = fd;
6471 written += 4;
6472 }
6473 pos += 4;
6474 }
6475
6476 closedir (dir);
6477 return written;
6478 }
6479
6480 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6481 object type, using the /proc file system. */
6482 static int
6483 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6484 unsigned const char *writebuf,
6485 CORE_ADDR offset, int len)
6486 {
6487 long pid = lwpid_of (current_thread);
6488 char buf[128];
6489 int fd = 0;
6490 int ret = 0;
6491
6492 if (!writebuf && !readbuf)
6493 return -1;
6494
6495 if (!*annex)
6496 {
6497 if (!readbuf)
6498 return -1;
6499 else
6500 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6501 }
6502
6503 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6504 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6505 if (fd <= 0)
6506 return -1;
6507
6508 if (offset != 0
6509 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6510 {
6511 close (fd);
6512 return 0;
6513 }
6514
6515 if (writebuf)
6516 ret = write (fd, writebuf, (size_t) len);
6517 else
6518 ret = read (fd, readbuf, (size_t) len);
6519
6520 close (fd);
6521 return ret;
6522 }
6523
6524 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6525 struct target_loadseg
6526 {
6527 /* Core address to which the segment is mapped. */
6528 Elf32_Addr addr;
6529 /* VMA recorded in the program header. */
6530 Elf32_Addr p_vaddr;
6531 /* Size of this segment in memory. */
6532 Elf32_Word p_memsz;
6533 };
6534
6535 # if defined PT_GETDSBT
6536 struct target_loadmap
6537 {
6538 /* Protocol version number, must be zero. */
6539 Elf32_Word version;
6540 /* Pointer to the DSBT table, its size, and the DSBT index. */
6541 unsigned *dsbt_table;
6542 unsigned dsbt_size, dsbt_index;
6543 /* Number of segments in this map. */
6544 Elf32_Word nsegs;
6545 /* The actual memory map. */
6546 struct target_loadseg segs[/*nsegs*/];
6547 };
6548 # define LINUX_LOADMAP PT_GETDSBT
6549 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6550 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6551 # else
6552 struct target_loadmap
6553 {
6554 /* Protocol version number, must be zero. */
6555 Elf32_Half version;
6556 /* Number of segments in this map. */
6557 Elf32_Half nsegs;
6558 /* The actual memory map. */
6559 struct target_loadseg segs[/*nsegs*/];
6560 };
6561 # define LINUX_LOADMAP PTRACE_GETFDPIC
6562 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6563 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6564 # endif
6565
6566 static int
6567 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6568 unsigned char *myaddr, unsigned int len)
6569 {
6570 int pid = lwpid_of (current_thread);
6571 int addr = -1;
6572 struct target_loadmap *data = NULL;
6573 unsigned int actual_length, copy_length;
6574
6575 if (strcmp (annex, "exec") == 0)
6576 addr = (int) LINUX_LOADMAP_EXEC;
6577 else if (strcmp (annex, "interp") == 0)
6578 addr = (int) LINUX_LOADMAP_INTERP;
6579 else
6580 return -1;
6581
6582 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6583 return -1;
6584
6585 if (data == NULL)
6586 return -1;
6587
6588 actual_length = sizeof (struct target_loadmap)
6589 + sizeof (struct target_loadseg) * data->nsegs;
6590
6591 if (offset < 0 || offset > actual_length)
6592 return -1;
6593
6594 copy_length = actual_length - offset < len ? actual_length - offset : len;
6595 memcpy (myaddr, (char *) data + offset, copy_length);
6596 return copy_length;
6597 }
6598 #else
6599 # define linux_read_loadmap NULL
6600 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6601
6602 static void
6603 linux_process_qsupported (char **features, int count)
6604 {
6605 if (the_low_target.process_qsupported != NULL)
6606 the_low_target.process_qsupported (features, count);
6607 }
6608
6609 static int
6610 linux_supports_catch_syscall (void)
6611 {
6612 return (the_low_target.get_syscall_trapinfo != NULL
6613 && linux_supports_tracesysgood ());
6614 }
6615
6616 static int
6617 linux_get_ipa_tdesc_idx (void)
6618 {
6619 if (the_low_target.get_ipa_tdesc_idx == NULL)
6620 return 0;
6621
6622 return (*the_low_target.get_ipa_tdesc_idx) ();
6623 }
6624
6625 static int
6626 linux_supports_tracepoints (void)
6627 {
6628 if (*the_low_target.supports_tracepoints == NULL)
6629 return 0;
6630
6631 return (*the_low_target.supports_tracepoints) ();
6632 }
6633
6634 static CORE_ADDR
6635 linux_read_pc (struct regcache *regcache)
6636 {
6637 if (the_low_target.get_pc == NULL)
6638 return 0;
6639
6640 return (*the_low_target.get_pc) (regcache);
6641 }
6642
6643 static void
6644 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6645 {
6646 gdb_assert (the_low_target.set_pc != NULL);
6647
6648 (*the_low_target.set_pc) (regcache, pc);
6649 }
6650
6651 static int
6652 linux_thread_stopped (struct thread_info *thread)
6653 {
6654 return get_thread_lwp (thread)->stopped;
6655 }
6656
6657 /* This exposes stop-all-threads functionality to other modules. */
6658
6659 static void
6660 linux_pause_all (int freeze)
6661 {
6662 stop_all_lwps (freeze, NULL);
6663 }
6664
6665 /* This exposes unstop-all-threads functionality to other gdbserver
6666 modules. */
6667
6668 static void
6669 linux_unpause_all (int unfreeze)
6670 {
6671 unstop_all_lwps (unfreeze, NULL);
6672 }
6673
6674 static int
6675 linux_prepare_to_access_memory (void)
6676 {
6677 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6678 running LWP. */
6679 if (non_stop)
6680 linux_pause_all (1);
6681 return 0;
6682 }
6683
6684 static void
6685 linux_done_accessing_memory (void)
6686 {
6687 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6688 running LWP. */
6689 if (non_stop)
6690 linux_unpause_all (1);
6691 }
6692
6693 static int
6694 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6695 CORE_ADDR collector,
6696 CORE_ADDR lockaddr,
6697 ULONGEST orig_size,
6698 CORE_ADDR *jump_entry,
6699 CORE_ADDR *trampoline,
6700 ULONGEST *trampoline_size,
6701 unsigned char *jjump_pad_insn,
6702 ULONGEST *jjump_pad_insn_size,
6703 CORE_ADDR *adjusted_insn_addr,
6704 CORE_ADDR *adjusted_insn_addr_end,
6705 char *err)
6706 {
6707 return (*the_low_target.install_fast_tracepoint_jump_pad)
6708 (tpoint, tpaddr, collector, lockaddr, orig_size,
6709 jump_entry, trampoline, trampoline_size,
6710 jjump_pad_insn, jjump_pad_insn_size,
6711 adjusted_insn_addr, adjusted_insn_addr_end,
6712 err);
6713 }
6714
6715 static struct emit_ops *
6716 linux_emit_ops (void)
6717 {
6718 if (the_low_target.emit_ops != NULL)
6719 return (*the_low_target.emit_ops) ();
6720 else
6721 return NULL;
6722 }
6723
6724 static int
6725 linux_get_min_fast_tracepoint_insn_len (void)
6726 {
6727 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6728 }
6729
6730 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6731
6732 static int
6733 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6734 CORE_ADDR *phdr_memaddr, int *num_phdr)
6735 {
6736 char filename[PATH_MAX];
6737 int fd;
6738 const int auxv_size = is_elf64
6739 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6740 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6741
6742 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6743
6744 fd = open (filename, O_RDONLY);
6745 if (fd < 0)
6746 return 1;
6747
6748 *phdr_memaddr = 0;
6749 *num_phdr = 0;
6750 while (read (fd, buf, auxv_size) == auxv_size
6751 && (*phdr_memaddr == 0 || *num_phdr == 0))
6752 {
6753 if (is_elf64)
6754 {
6755 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6756
6757 switch (aux->a_type)
6758 {
6759 case AT_PHDR:
6760 *phdr_memaddr = aux->a_un.a_val;
6761 break;
6762 case AT_PHNUM:
6763 *num_phdr = aux->a_un.a_val;
6764 break;
6765 }
6766 }
6767 else
6768 {
6769 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6770
6771 switch (aux->a_type)
6772 {
6773 case AT_PHDR:
6774 *phdr_memaddr = aux->a_un.a_val;
6775 break;
6776 case AT_PHNUM:
6777 *num_phdr = aux->a_un.a_val;
6778 break;
6779 }
6780 }
6781 }
6782
6783 close (fd);
6784
6785 if (*phdr_memaddr == 0 || *num_phdr == 0)
6786 {
6787 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6788 "phdr_memaddr = %ld, phdr_num = %d",
6789 (long) *phdr_memaddr, *num_phdr);
6790 return 2;
6791 }
6792
6793 return 0;
6794 }
6795
6796 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6797
6798 static CORE_ADDR
6799 get_dynamic (const int pid, const int is_elf64)
6800 {
6801 CORE_ADDR phdr_memaddr, relocation;
6802 int num_phdr, i;
6803 unsigned char *phdr_buf;
6804 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6805
6806 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6807 return 0;
6808
6809 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6810 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6811
6812 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6813 return 0;
6814
6815 /* Compute relocation: it is expected to be 0 for "regular" executables,
6816 non-zero for PIE ones. */
6817 relocation = -1;
6818 for (i = 0; relocation == -1 && i < num_phdr; i++)
6819 if (is_elf64)
6820 {
6821 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6822
6823 if (p->p_type == PT_PHDR)
6824 relocation = phdr_memaddr - p->p_vaddr;
6825 }
6826 else
6827 {
6828 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6829
6830 if (p->p_type == PT_PHDR)
6831 relocation = phdr_memaddr - p->p_vaddr;
6832 }
6833
6834 if (relocation == -1)
6835 {
6836 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6837 any real world executables, including PIE executables, have always
6838 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6839 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6840 or present DT_DEBUG anyway (fpc binaries are statically linked).
6841
6842 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6843
6844 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6845
6846 return 0;
6847 }
6848
6849 for (i = 0; i < num_phdr; i++)
6850 {
6851 if (is_elf64)
6852 {
6853 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6854
6855 if (p->p_type == PT_DYNAMIC)
6856 return p->p_vaddr + relocation;
6857 }
6858 else
6859 {
6860 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6861
6862 if (p->p_type == PT_DYNAMIC)
6863 return p->p_vaddr + relocation;
6864 }
6865 }
6866
6867 return 0;
6868 }
6869
6870 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6871 can be 0 if the inferior does not yet have the library list initialized.
6872 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6873 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6874
6875 static CORE_ADDR
6876 get_r_debug (const int pid, const int is_elf64)
6877 {
6878 CORE_ADDR dynamic_memaddr;
6879 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6880 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6881 CORE_ADDR map = -1;
6882
6883 dynamic_memaddr = get_dynamic (pid, is_elf64);
6884 if (dynamic_memaddr == 0)
6885 return map;
6886
6887 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6888 {
6889 if (is_elf64)
6890 {
6891 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6892 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6893 union
6894 {
6895 Elf64_Xword map;
6896 unsigned char buf[sizeof (Elf64_Xword)];
6897 }
6898 rld_map;
6899 #endif
6900 #ifdef DT_MIPS_RLD_MAP
6901 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6902 {
6903 if (linux_read_memory (dyn->d_un.d_val,
6904 rld_map.buf, sizeof (rld_map.buf)) == 0)
6905 return rld_map.map;
6906 else
6907 break;
6908 }
6909 #endif /* DT_MIPS_RLD_MAP */
6910 #ifdef DT_MIPS_RLD_MAP_REL
6911 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6912 {
6913 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6914 rld_map.buf, sizeof (rld_map.buf)) == 0)
6915 return rld_map.map;
6916 else
6917 break;
6918 }
6919 #endif /* DT_MIPS_RLD_MAP_REL */
6920
6921 if (dyn->d_tag == DT_DEBUG && map == -1)
6922 map = dyn->d_un.d_val;
6923
6924 if (dyn->d_tag == DT_NULL)
6925 break;
6926 }
6927 else
6928 {
6929 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6930 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6931 union
6932 {
6933 Elf32_Word map;
6934 unsigned char buf[sizeof (Elf32_Word)];
6935 }
6936 rld_map;
6937 #endif
6938 #ifdef DT_MIPS_RLD_MAP
6939 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6940 {
6941 if (linux_read_memory (dyn->d_un.d_val,
6942 rld_map.buf, sizeof (rld_map.buf)) == 0)
6943 return rld_map.map;
6944 else
6945 break;
6946 }
6947 #endif /* DT_MIPS_RLD_MAP */
6948 #ifdef DT_MIPS_RLD_MAP_REL
6949 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6950 {
6951 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6952 rld_map.buf, sizeof (rld_map.buf)) == 0)
6953 return rld_map.map;
6954 else
6955 break;
6956 }
6957 #endif /* DT_MIPS_RLD_MAP_REL */
6958
6959 if (dyn->d_tag == DT_DEBUG && map == -1)
6960 map = dyn->d_un.d_val;
6961
6962 if (dyn->d_tag == DT_NULL)
6963 break;
6964 }
6965
6966 dynamic_memaddr += dyn_size;
6967 }
6968
6969 return map;
6970 }
6971
6972 /* Read one pointer from MEMADDR in the inferior. */
6973
6974 static int
6975 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6976 {
6977 int ret;
6978
6979 /* Go through a union so this works on either big or little endian
6980 hosts, when the inferior's pointer size is smaller than the size
6981 of CORE_ADDR. It is assumed the inferior's endianness is the
6982 same of the superior's. */
6983 union
6984 {
6985 CORE_ADDR core_addr;
6986 unsigned int ui;
6987 unsigned char uc;
6988 } addr;
6989
6990 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6991 if (ret == 0)
6992 {
6993 if (ptr_size == sizeof (CORE_ADDR))
6994 *ptr = addr.core_addr;
6995 else if (ptr_size == sizeof (unsigned int))
6996 *ptr = addr.ui;
6997 else
6998 gdb_assert_not_reached ("unhandled pointer size");
6999 }
7000 return ret;
7001 }
7002
7003 struct link_map_offsets
7004 {
7005 /* Offset and size of r_debug.r_version. */
7006 int r_version_offset;
7007
7008 /* Offset and size of r_debug.r_map. */
7009 int r_map_offset;
7010
7011 /* Offset to l_addr field in struct link_map. */
7012 int l_addr_offset;
7013
7014 /* Offset to l_name field in struct link_map. */
7015 int l_name_offset;
7016
7017 /* Offset to l_ld field in struct link_map. */
7018 int l_ld_offset;
7019
7020 /* Offset to l_next field in struct link_map. */
7021 int l_next_offset;
7022
7023 /* Offset to l_prev field in struct link_map. */
7024 int l_prev_offset;
7025 };
7026
7027 /* Construct qXfer:libraries-svr4:read reply. */
7028
7029 static int
7030 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
7031 unsigned const char *writebuf,
7032 CORE_ADDR offset, int len)
7033 {
7034 char *document;
7035 unsigned document_len;
7036 struct process_info_private *const priv = current_process ()->priv;
7037 char filename[PATH_MAX];
7038 int pid, is_elf64;
7039
7040 static const struct link_map_offsets lmo_32bit_offsets =
7041 {
7042 0, /* r_version offset. */
7043 4, /* r_debug.r_map offset. */
7044 0, /* l_addr offset in link_map. */
7045 4, /* l_name offset in link_map. */
7046 8, /* l_ld offset in link_map. */
7047 12, /* l_next offset in link_map. */
7048 16 /* l_prev offset in link_map. */
7049 };
7050
7051 static const struct link_map_offsets lmo_64bit_offsets =
7052 {
7053 0, /* r_version offset. */
7054 8, /* r_debug.r_map offset. */
7055 0, /* l_addr offset in link_map. */
7056 8, /* l_name offset in link_map. */
7057 16, /* l_ld offset in link_map. */
7058 24, /* l_next offset in link_map. */
7059 32 /* l_prev offset in link_map. */
7060 };
7061 const struct link_map_offsets *lmo;
7062 unsigned int machine;
7063 int ptr_size;
7064 CORE_ADDR lm_addr = 0, lm_prev = 0;
7065 int allocated = 1024;
7066 char *p;
7067 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
7068 int header_done = 0;
7069
7070 if (writebuf != NULL)
7071 return -2;
7072 if (readbuf == NULL)
7073 return -1;
7074
7075 pid = lwpid_of (current_thread);
7076 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7077 is_elf64 = elf_64_file_p (filename, &machine);
7078 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7079 ptr_size = is_elf64 ? 8 : 4;
7080
7081 while (annex[0] != '\0')
7082 {
7083 const char *sep;
7084 CORE_ADDR *addrp;
7085 int len;
7086
7087 sep = strchr (annex, '=');
7088 if (sep == NULL)
7089 break;
7090
7091 len = sep - annex;
7092 if (len == 5 && startswith (annex, "start"))
7093 addrp = &lm_addr;
7094 else if (len == 4 && startswith (annex, "prev"))
7095 addrp = &lm_prev;
7096 else
7097 {
7098 annex = strchr (sep, ';');
7099 if (annex == NULL)
7100 break;
7101 annex++;
7102 continue;
7103 }
7104
7105 annex = decode_address_to_semicolon (addrp, sep + 1);
7106 }
7107
7108 if (lm_addr == 0)
7109 {
7110 int r_version = 0;
7111
7112 if (priv->r_debug == 0)
7113 priv->r_debug = get_r_debug (pid, is_elf64);
7114
7115 /* We failed to find DT_DEBUG. Such situation will not change
7116 for this inferior - do not retry it. Report it to GDB as
7117 E01, see for the reasons at the GDB solib-svr4.c side. */
7118 if (priv->r_debug == (CORE_ADDR) -1)
7119 return -1;
7120
7121 if (priv->r_debug != 0)
7122 {
7123 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7124 (unsigned char *) &r_version,
7125 sizeof (r_version)) != 0
7126 || r_version != 1)
7127 {
7128 warning ("unexpected r_debug version %d", r_version);
7129 }
7130 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7131 &lm_addr, ptr_size) != 0)
7132 {
7133 warning ("unable to read r_map from 0x%lx",
7134 (long) priv->r_debug + lmo->r_map_offset);
7135 }
7136 }
7137 }
7138
7139 document = (char *) xmalloc (allocated);
7140 strcpy (document, "<library-list-svr4 version=\"1.0\"");
7141 p = document + strlen (document);
7142
7143 while (lm_addr
7144 && read_one_ptr (lm_addr + lmo->l_name_offset,
7145 &l_name, ptr_size) == 0
7146 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7147 &l_addr, ptr_size) == 0
7148 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7149 &l_ld, ptr_size) == 0
7150 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7151 &l_prev, ptr_size) == 0
7152 && read_one_ptr (lm_addr + lmo->l_next_offset,
7153 &l_next, ptr_size) == 0)
7154 {
7155 unsigned char libname[PATH_MAX];
7156
7157 if (lm_prev != l_prev)
7158 {
7159 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7160 (long) lm_prev, (long) l_prev);
7161 break;
7162 }
7163
7164 /* Ignore the first entry even if it has valid name as the first entry
7165 corresponds to the main executable. The first entry should not be
7166 skipped if the dynamic loader was loaded late by a static executable
7167 (see solib-svr4.c parameter ignore_first). But in such case the main
7168 executable does not have PT_DYNAMIC present and this function already
7169 exited above due to failed get_r_debug. */
7170 if (lm_prev == 0)
7171 {
7172 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7173 p = p + strlen (p);
7174 }
7175 else
7176 {
7177 /* Not checking for error because reading may stop before
7178 we've got PATH_MAX worth of characters. */
7179 libname[0] = '\0';
7180 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7181 libname[sizeof (libname) - 1] = '\0';
7182 if (libname[0] != '\0')
7183 {
7184 /* 6x the size for xml_escape_text below. */
7185 size_t len = 6 * strlen ((char *) libname);
7186 char *name;
7187
7188 if (!header_done)
7189 {
7190 /* Terminate `<library-list-svr4'. */
7191 *p++ = '>';
7192 header_done = 1;
7193 }
7194
7195 while (allocated < p - document + len + 200)
7196 {
7197 /* Expand to guarantee sufficient storage. */
7198 uintptr_t document_len = p - document;
7199
7200 document = (char *) xrealloc (document, 2 * allocated);
7201 allocated *= 2;
7202 p = document + document_len;
7203 }
7204
7205 name = xml_escape_text ((char *) libname);
7206 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
7207 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7208 name, (unsigned long) lm_addr,
7209 (unsigned long) l_addr, (unsigned long) l_ld);
7210 free (name);
7211 }
7212 }
7213
7214 lm_prev = lm_addr;
7215 lm_addr = l_next;
7216 }
7217
7218 if (!header_done)
7219 {
7220 /* Empty list; terminate `<library-list-svr4'. */
7221 strcpy (p, "/>");
7222 }
7223 else
7224 strcpy (p, "</library-list-svr4>");
7225
7226 document_len = strlen (document);
7227 if (offset < document_len)
7228 document_len -= offset;
7229 else
7230 document_len = 0;
7231 if (len > document_len)
7232 len = document_len;
7233
7234 memcpy (readbuf, document + offset, len);
7235 xfree (document);
7236
7237 return len;
7238 }
7239
7240 #ifdef HAVE_LINUX_BTRACE
7241
7242 /* See to_disable_btrace target method. */
7243
7244 static int
7245 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7246 {
7247 enum btrace_error err;
7248
7249 err = linux_disable_btrace (tinfo);
7250 return (err == BTRACE_ERR_NONE ? 0 : -1);
7251 }
7252
7253 /* Encode an Intel Processor Trace configuration. */
7254
7255 static void
7256 linux_low_encode_pt_config (struct buffer *buffer,
7257 const struct btrace_data_pt_config *config)
7258 {
7259 buffer_grow_str (buffer, "<pt-config>\n");
7260
7261 switch (config->cpu.vendor)
7262 {
7263 case CV_INTEL:
7264 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7265 "model=\"%u\" stepping=\"%u\"/>\n",
7266 config->cpu.family, config->cpu.model,
7267 config->cpu.stepping);
7268 break;
7269
7270 default:
7271 break;
7272 }
7273
7274 buffer_grow_str (buffer, "</pt-config>\n");
7275 }
7276
7277 /* Encode a raw buffer. */
7278
7279 static void
7280 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7281 unsigned int size)
7282 {
7283 if (size == 0)
7284 return;
7285
7286 /* We use hex encoding - see common/rsp-low.h. */
7287 buffer_grow_str (buffer, "<raw>\n");
7288
7289 while (size-- > 0)
7290 {
7291 char elem[2];
7292
7293 elem[0] = tohex ((*data >> 4) & 0xf);
7294 elem[1] = tohex (*data++ & 0xf);
7295
7296 buffer_grow (buffer, elem, 2);
7297 }
7298
7299 buffer_grow_str (buffer, "</raw>\n");
7300 }
7301
7302 /* See to_read_btrace target method. */
7303
7304 static int
7305 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7306 enum btrace_read_type type)
7307 {
7308 struct btrace_data btrace;
7309 struct btrace_block *block;
7310 enum btrace_error err;
7311 int i;
7312
7313 btrace_data_init (&btrace);
7314
7315 err = linux_read_btrace (&btrace, tinfo, type);
7316 if (err != BTRACE_ERR_NONE)
7317 {
7318 if (err == BTRACE_ERR_OVERFLOW)
7319 buffer_grow_str0 (buffer, "E.Overflow.");
7320 else
7321 buffer_grow_str0 (buffer, "E.Generic Error.");
7322
7323 goto err;
7324 }
7325
7326 switch (btrace.format)
7327 {
7328 case BTRACE_FORMAT_NONE:
7329 buffer_grow_str0 (buffer, "E.No Trace.");
7330 goto err;
7331
7332 case BTRACE_FORMAT_BTS:
7333 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7334 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7335
7336 for (i = 0;
7337 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7338 i++)
7339 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7340 paddress (block->begin), paddress (block->end));
7341
7342 buffer_grow_str0 (buffer, "</btrace>\n");
7343 break;
7344
7345 case BTRACE_FORMAT_PT:
7346 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7347 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7348 buffer_grow_str (buffer, "<pt>\n");
7349
7350 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7351
7352 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7353 btrace.variant.pt.size);
7354
7355 buffer_grow_str (buffer, "</pt>\n");
7356 buffer_grow_str0 (buffer, "</btrace>\n");
7357 break;
7358
7359 default:
7360 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7361 goto err;
7362 }
7363
7364 btrace_data_fini (&btrace);
7365 return 0;
7366
7367 err:
7368 btrace_data_fini (&btrace);
7369 return -1;
7370 }
7371
7372 /* See to_btrace_conf target method. */
7373
7374 static int
7375 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7376 struct buffer *buffer)
7377 {
7378 const struct btrace_config *conf;
7379
7380 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7381 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7382
7383 conf = linux_btrace_conf (tinfo);
7384 if (conf != NULL)
7385 {
7386 switch (conf->format)
7387 {
7388 case BTRACE_FORMAT_NONE:
7389 break;
7390
7391 case BTRACE_FORMAT_BTS:
7392 buffer_xml_printf (buffer, "<bts");
7393 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7394 buffer_xml_printf (buffer, " />\n");
7395 break;
7396
7397 case BTRACE_FORMAT_PT:
7398 buffer_xml_printf (buffer, "<pt");
7399 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7400 buffer_xml_printf (buffer, "/>\n");
7401 break;
7402 }
7403 }
7404
7405 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7406 return 0;
7407 }
7408 #endif /* HAVE_LINUX_BTRACE */
7409
7410 /* See nat/linux-nat.h. */
7411
7412 ptid_t
7413 current_lwp_ptid (void)
7414 {
7415 return ptid_of (current_thread);
7416 }
7417
7418 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7419
7420 static int
7421 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7422 {
7423 if (the_low_target.breakpoint_kind_from_pc != NULL)
7424 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7425 else
7426 return default_breakpoint_kind_from_pc (pcptr);
7427 }
7428
7429 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7430
7431 static const gdb_byte *
7432 linux_sw_breakpoint_from_kind (int kind, int *size)
7433 {
7434 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7435
7436 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7437 }
7438
7439 /* Implementation of the target_ops method
7440 "breakpoint_kind_from_current_state". */
7441
7442 static int
7443 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7444 {
7445 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7446 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7447 else
7448 return linux_breakpoint_kind_from_pc (pcptr);
7449 }
7450
7451 /* Default implementation of linux_target_ops method "set_pc" for
7452 32-bit pc register which is literally named "pc". */
7453
7454 void
7455 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7456 {
7457 uint32_t newpc = pc;
7458
7459 supply_register_by_name (regcache, "pc", &newpc);
7460 }
7461
7462 /* Default implementation of linux_target_ops method "get_pc" for
7463 32-bit pc register which is literally named "pc". */
7464
7465 CORE_ADDR
7466 linux_get_pc_32bit (struct regcache *regcache)
7467 {
7468 uint32_t pc;
7469
7470 collect_register_by_name (regcache, "pc", &pc);
7471 if (debug_threads)
7472 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7473 return pc;
7474 }
7475
7476 /* Default implementation of linux_target_ops method "set_pc" for
7477 64-bit pc register which is literally named "pc". */
7478
7479 void
7480 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7481 {
7482 uint64_t newpc = pc;
7483
7484 supply_register_by_name (regcache, "pc", &newpc);
7485 }
7486
7487 /* Default implementation of linux_target_ops method "get_pc" for
7488 64-bit pc register which is literally named "pc". */
7489
7490 CORE_ADDR
7491 linux_get_pc_64bit (struct regcache *regcache)
7492 {
7493 uint64_t pc;
7494
7495 collect_register_by_name (regcache, "pc", &pc);
7496 if (debug_threads)
7497 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7498 return pc;
7499 }
7500
7501
7502 static struct target_ops linux_target_ops = {
7503 linux_create_inferior,
7504 linux_post_create_inferior,
7505 linux_attach,
7506 linux_kill,
7507 linux_detach,
7508 linux_mourn,
7509 linux_join,
7510 linux_thread_alive,
7511 linux_resume,
7512 linux_wait,
7513 linux_fetch_registers,
7514 linux_store_registers,
7515 linux_prepare_to_access_memory,
7516 linux_done_accessing_memory,
7517 linux_read_memory,
7518 linux_write_memory,
7519 linux_look_up_symbols,
7520 linux_request_interrupt,
7521 linux_read_auxv,
7522 linux_supports_z_point_type,
7523 linux_insert_point,
7524 linux_remove_point,
7525 linux_stopped_by_sw_breakpoint,
7526 linux_supports_stopped_by_sw_breakpoint,
7527 linux_stopped_by_hw_breakpoint,
7528 linux_supports_stopped_by_hw_breakpoint,
7529 linux_supports_hardware_single_step,
7530 linux_stopped_by_watchpoint,
7531 linux_stopped_data_address,
7532 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7533 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7534 && defined(PT_TEXT_END_ADDR)
7535 linux_read_offsets,
7536 #else
7537 NULL,
7538 #endif
7539 #ifdef USE_THREAD_DB
7540 thread_db_get_tls_address,
7541 #else
7542 NULL,
7543 #endif
7544 linux_qxfer_spu,
7545 hostio_last_error_from_errno,
7546 linux_qxfer_osdata,
7547 linux_xfer_siginfo,
7548 linux_supports_non_stop,
7549 linux_async,
7550 linux_start_non_stop,
7551 linux_supports_multi_process,
7552 linux_supports_fork_events,
7553 linux_supports_vfork_events,
7554 linux_supports_exec_events,
7555 linux_handle_new_gdb_connection,
7556 #ifdef USE_THREAD_DB
7557 thread_db_handle_monitor_command,
7558 #else
7559 NULL,
7560 #endif
7561 linux_common_core_of_thread,
7562 linux_read_loadmap,
7563 linux_process_qsupported,
7564 linux_supports_tracepoints,
7565 linux_read_pc,
7566 linux_write_pc,
7567 linux_thread_stopped,
7568 NULL,
7569 linux_pause_all,
7570 linux_unpause_all,
7571 linux_stabilize_threads,
7572 linux_install_fast_tracepoint_jump_pad,
7573 linux_emit_ops,
7574 linux_supports_disable_randomization,
7575 linux_get_min_fast_tracepoint_insn_len,
7576 linux_qxfer_libraries_svr4,
7577 linux_supports_agent,
7578 #ifdef HAVE_LINUX_BTRACE
7579 linux_supports_btrace,
7580 linux_enable_btrace,
7581 linux_low_disable_btrace,
7582 linux_low_read_btrace,
7583 linux_low_btrace_conf,
7584 #else
7585 NULL,
7586 NULL,
7587 NULL,
7588 NULL,
7589 NULL,
7590 #endif
7591 linux_supports_range_stepping,
7592 linux_proc_pid_to_exec_file,
7593 linux_mntns_open_cloexec,
7594 linux_mntns_unlink,
7595 linux_mntns_readlink,
7596 linux_breakpoint_kind_from_pc,
7597 linux_sw_breakpoint_from_kind,
7598 linux_proc_tid_get_name,
7599 linux_breakpoint_kind_from_current_state,
7600 linux_supports_software_single_step,
7601 linux_supports_catch_syscall,
7602 linux_get_ipa_tdesc_idx,
7603 };
7604
7605 #ifdef HAVE_LINUX_REGSETS
7606 void
7607 initialize_regsets_info (struct regsets_info *info)
7608 {
7609 for (info->num_regsets = 0;
7610 info->regsets[info->num_regsets].size >= 0;
7611 info->num_regsets++)
7612 ;
7613 }
7614 #endif
7615
7616 void
7617 initialize_low (void)
7618 {
7619 struct sigaction sigchld_action;
7620
7621 memset (&sigchld_action, 0, sizeof (sigchld_action));
7622 set_target_ops (&linux_target_ops);
7623
7624 linux_ptrace_init_warnings ();
7625
7626 sigchld_action.sa_handler = sigchld_handler;
7627 sigemptyset (&sigchld_action.sa_mask);
7628 sigchld_action.sa_flags = SA_RESTART;
7629 sigaction (SIGCHLD, &sigchld_action, NULL);
7630
7631 initialize_low_arch ();
7632
7633 linux_check_ptrace_features ();
7634 }
This page took 0.183049 seconds and 5 git commands to generate.