Delete reinsert breakpoints from forked child
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2016 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #ifndef ELFMAG0
51 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55 #include <elf.h>
56 #endif
57 #include "nat/linux-namespaces.h"
58
59 #ifndef SPUFS_MAGIC
60 #define SPUFS_MAGIC 0x23c9b64e
61 #endif
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 /* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77 #if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80 #if defined(__mcoldfire__)
81 /* These are still undefined in 3.10 kernels. */
82 #define PT_TEXT_ADDR 49*4
83 #define PT_DATA_ADDR 50*4
84 #define PT_TEXT_END_ADDR 51*4
85 /* BFIN already defines these since at least 2.6.32 kernels. */
86 #elif defined(BFIN)
87 #define PT_TEXT_ADDR 220
88 #define PT_TEXT_END_ADDR 224
89 #define PT_DATA_ADDR 228
90 /* These are still undefined in 3.10 kernels. */
91 #elif defined(__TMS320C6X__)
92 #define PT_TEXT_ADDR (0x10000*4)
93 #define PT_DATA_ADDR (0x10004*4)
94 #define PT_TEXT_END_ADDR (0x10008*4)
95 #endif
96 #endif
97
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "btrace-common.h"
101 #endif
102
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
105 typedef struct
106 {
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115 } Elf32_auxv_t;
116 #endif
117
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
120 typedef struct
121 {
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130 } Elf64_auxv_t;
131 #endif
132
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset = -1;
135
136 /* LWP accessors. */
137
138 /* See nat/linux-nat.h. */
139
140 ptid_t
141 ptid_of_lwp (struct lwp_info *lwp)
142 {
143 return ptid_of (get_lwp_thread (lwp));
144 }
145
146 /* See nat/linux-nat.h. */
147
148 void
149 lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151 {
152 lwp->arch_private = info;
153 }
154
155 /* See nat/linux-nat.h. */
156
157 struct arch_lwp_info *
158 lwp_arch_private_info (struct lwp_info *lwp)
159 {
160 return lwp->arch_private;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 int
166 lwp_is_stopped (struct lwp_info *lwp)
167 {
168 return lwp->stopped;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info *lwp)
175 {
176 return lwp->stop_reason;
177 }
178
179 /* A list of all unknown processes which receive stop signals. Some
180 other process will presumably claim each of these as forked
181 children momentarily. */
182
183 struct simple_pid_list
184 {
185 /* The process ID. */
186 int pid;
187
188 /* The status as reported by waitpid. */
189 int status;
190
191 /* Next in chain. */
192 struct simple_pid_list *next;
193 };
194 struct simple_pid_list *stopped_pids;
195
196 /* Trivial list manipulation functions to keep track of a list of new
197 stopped processes. */
198
199 static void
200 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
201 {
202 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
203
204 new_pid->pid = pid;
205 new_pid->status = status;
206 new_pid->next = *listp;
207 *listp = new_pid;
208 }
209
210 static int
211 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
212 {
213 struct simple_pid_list **p;
214
215 for (p = listp; *p != NULL; p = &(*p)->next)
216 if ((*p)->pid == pid)
217 {
218 struct simple_pid_list *next = (*p)->next;
219
220 *statusp = (*p)->status;
221 xfree (*p);
222 *p = next;
223 return 1;
224 }
225 return 0;
226 }
227
228 enum stopping_threads_kind
229 {
230 /* Not stopping threads presently. */
231 NOT_STOPPING_THREADS,
232
233 /* Stopping threads. */
234 STOPPING_THREADS,
235
236 /* Stopping and suspending threads. */
237 STOPPING_AND_SUSPENDING_THREADS
238 };
239
240 /* This is set while stop_all_lwps is in effect. */
241 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
242
243 /* FIXME make into a target method? */
244 int using_threads = 1;
245
246 /* True if we're presently stabilizing threads (moving them out of
247 jump pads). */
248 static int stabilizing_threads;
249
250 static void linux_resume_one_lwp (struct lwp_info *lwp,
251 int step, int signal, siginfo_t *info);
252 static void linux_resume (struct thread_resume *resume_info, size_t n);
253 static void stop_all_lwps (int suspend, struct lwp_info *except);
254 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
255 static void unsuspend_all_lwps (struct lwp_info *except);
256 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
257 int *wstat, int options);
258 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
259 static struct lwp_info *add_lwp (ptid_t ptid);
260 static void linux_mourn (struct process_info *process);
261 static int linux_stopped_by_watchpoint (void);
262 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
263 static int lwp_is_marked_dead (struct lwp_info *lwp);
264 static void proceed_all_lwps (void);
265 static int finish_step_over (struct lwp_info *lwp);
266 static int kill_lwp (unsigned long lwpid, int signo);
267 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
268 static void complete_ongoing_step_over (void);
269 static int linux_low_ptrace_options (int attached);
270
271 /* When the event-loop is doing a step-over, this points at the thread
272 being stepped. */
273 ptid_t step_over_bkpt;
274
275 /* True if the low target can hardware single-step. */
276
277 static int
278 can_hardware_single_step (void)
279 {
280 if (the_low_target.supports_hardware_single_step != NULL)
281 return the_low_target.supports_hardware_single_step ();
282 else
283 return 0;
284 }
285
286 /* True if the low target can software single-step. Such targets
287 implement the GET_NEXT_PCS callback. */
288
289 static int
290 can_software_single_step (void)
291 {
292 return (the_low_target.get_next_pcs != NULL);
293 }
294
295 /* True if the low target supports memory breakpoints. If so, we'll
296 have a GET_PC implementation. */
297
298 static int
299 supports_breakpoints (void)
300 {
301 return (the_low_target.get_pc != NULL);
302 }
303
304 /* Returns true if this target can support fast tracepoints. This
305 does not mean that the in-process agent has been loaded in the
306 inferior. */
307
308 static int
309 supports_fast_tracepoints (void)
310 {
311 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
312 }
313
314 /* True if LWP is stopped in its stepping range. */
315
316 static int
317 lwp_in_step_range (struct lwp_info *lwp)
318 {
319 CORE_ADDR pc = lwp->stop_pc;
320
321 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
322 }
323
324 struct pending_signals
325 {
326 int signal;
327 siginfo_t info;
328 struct pending_signals *prev;
329 };
330
331 /* The read/write ends of the pipe registered as waitable file in the
332 event loop. */
333 static int linux_event_pipe[2] = { -1, -1 };
334
335 /* True if we're currently in async mode. */
336 #define target_is_async_p() (linux_event_pipe[0] != -1)
337
338 static void send_sigstop (struct lwp_info *lwp);
339 static void wait_for_sigstop (void);
340
341 /* Return non-zero if HEADER is a 64-bit ELF file. */
342
343 static int
344 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
345 {
346 if (header->e_ident[EI_MAG0] == ELFMAG0
347 && header->e_ident[EI_MAG1] == ELFMAG1
348 && header->e_ident[EI_MAG2] == ELFMAG2
349 && header->e_ident[EI_MAG3] == ELFMAG3)
350 {
351 *machine = header->e_machine;
352 return header->e_ident[EI_CLASS] == ELFCLASS64;
353
354 }
355 *machine = EM_NONE;
356 return -1;
357 }
358
359 /* Return non-zero if FILE is a 64-bit ELF file,
360 zero if the file is not a 64-bit ELF file,
361 and -1 if the file is not accessible or doesn't exist. */
362
363 static int
364 elf_64_file_p (const char *file, unsigned int *machine)
365 {
366 Elf64_Ehdr header;
367 int fd;
368
369 fd = open (file, O_RDONLY);
370 if (fd < 0)
371 return -1;
372
373 if (read (fd, &header, sizeof (header)) != sizeof (header))
374 {
375 close (fd);
376 return 0;
377 }
378 close (fd);
379
380 return elf_64_header_p (&header, machine);
381 }
382
383 /* Accepts an integer PID; Returns true if the executable PID is
384 running is a 64-bit ELF file.. */
385
386 int
387 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
388 {
389 char file[PATH_MAX];
390
391 sprintf (file, "/proc/%d/exe", pid);
392 return elf_64_file_p (file, machine);
393 }
394
395 static void
396 delete_lwp (struct lwp_info *lwp)
397 {
398 struct thread_info *thr = get_lwp_thread (lwp);
399
400 if (debug_threads)
401 debug_printf ("deleting %ld\n", lwpid_of (thr));
402
403 remove_thread (thr);
404 free (lwp->arch_private);
405 free (lwp);
406 }
407
408 /* Add a process to the common process list, and set its private
409 data. */
410
411 static struct process_info *
412 linux_add_process (int pid, int attached)
413 {
414 struct process_info *proc;
415
416 proc = add_process (pid, attached);
417 proc->priv = XCNEW (struct process_info_private);
418
419 if (the_low_target.new_process != NULL)
420 proc->priv->arch_private = the_low_target.new_process ();
421
422 return proc;
423 }
424
425 static CORE_ADDR get_pc (struct lwp_info *lwp);
426
427 /* Call the target arch_setup function on the current thread. */
428
429 static void
430 linux_arch_setup (void)
431 {
432 the_low_target.arch_setup ();
433 }
434
435 /* Call the target arch_setup function on THREAD. */
436
437 static void
438 linux_arch_setup_thread (struct thread_info *thread)
439 {
440 struct thread_info *saved_thread;
441
442 saved_thread = current_thread;
443 current_thread = thread;
444
445 linux_arch_setup ();
446
447 current_thread = saved_thread;
448 }
449
450 /* Handle a GNU/Linux extended wait response. If we see a clone,
451 fork, or vfork event, we need to add the new LWP to our list
452 (and return 0 so as not to report the trap to higher layers).
453 If we see an exec event, we will modify ORIG_EVENT_LWP to point
454 to a new LWP representing the new program. */
455
456 static int
457 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
458 {
459 struct lwp_info *event_lwp = *orig_event_lwp;
460 int event = linux_ptrace_get_extended_event (wstat);
461 struct thread_info *event_thr = get_lwp_thread (event_lwp);
462 struct lwp_info *new_lwp;
463
464 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
465
466 /* All extended events we currently use are mid-syscall. Only
467 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
468 you have to be using PTRACE_SEIZE to get that. */
469 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
470
471 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
472 || (event == PTRACE_EVENT_CLONE))
473 {
474 ptid_t ptid;
475 unsigned long new_pid;
476 int ret, status;
477
478 /* Get the pid of the new lwp. */
479 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
480 &new_pid);
481
482 /* If we haven't already seen the new PID stop, wait for it now. */
483 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
484 {
485 /* The new child has a pending SIGSTOP. We can't affect it until it
486 hits the SIGSTOP, but we're already attached. */
487
488 ret = my_waitpid (new_pid, &status, __WALL);
489
490 if (ret == -1)
491 perror_with_name ("waiting for new child");
492 else if (ret != new_pid)
493 warning ("wait returned unexpected PID %d", ret);
494 else if (!WIFSTOPPED (status))
495 warning ("wait returned unexpected status 0x%x", status);
496 }
497
498 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
499 {
500 struct process_info *parent_proc;
501 struct process_info *child_proc;
502 struct lwp_info *child_lwp;
503 struct thread_info *child_thr;
504 struct target_desc *tdesc;
505
506 ptid = ptid_build (new_pid, new_pid, 0);
507
508 if (debug_threads)
509 {
510 debug_printf ("HEW: Got fork event from LWP %ld, "
511 "new child is %d\n",
512 ptid_get_lwp (ptid_of (event_thr)),
513 ptid_get_pid (ptid));
514 }
515
516 /* Add the new process to the tables and clone the breakpoint
517 lists of the parent. We need to do this even if the new process
518 will be detached, since we will need the process object and the
519 breakpoints to remove any breakpoints from memory when we
520 detach, and the client side will access registers. */
521 child_proc = linux_add_process (new_pid, 0);
522 gdb_assert (child_proc != NULL);
523 child_lwp = add_lwp (ptid);
524 gdb_assert (child_lwp != NULL);
525 child_lwp->stopped = 1;
526 child_lwp->must_set_ptrace_flags = 1;
527 child_lwp->status_pending_p = 0;
528 child_thr = get_lwp_thread (child_lwp);
529 child_thr->last_resume_kind = resume_stop;
530 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
531
532 /* If we're suspending all threads, leave this one suspended
533 too. If the fork/clone parent is stepping over a breakpoint,
534 all other threads have been suspended already. Leave the
535 child suspended too. */
536 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
537 || event_lwp->bp_reinsert != 0)
538 {
539 if (debug_threads)
540 debug_printf ("HEW: leaving child suspended\n");
541 child_lwp->suspended = 1;
542 }
543
544 parent_proc = get_thread_process (event_thr);
545 child_proc->attached = parent_proc->attached;
546 clone_all_breakpoints (&child_proc->breakpoints,
547 &child_proc->raw_breakpoints,
548 parent_proc->breakpoints);
549
550 tdesc = XNEW (struct target_desc);
551 copy_target_description (tdesc, parent_proc->tdesc);
552 child_proc->tdesc = tdesc;
553
554 /* Clone arch-specific process data. */
555 if (the_low_target.new_fork != NULL)
556 the_low_target.new_fork (parent_proc, child_proc);
557
558 /* Save fork info in the parent thread. */
559 if (event == PTRACE_EVENT_FORK)
560 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
561 else if (event == PTRACE_EVENT_VFORK)
562 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
563
564 event_lwp->waitstatus.value.related_pid = ptid;
565
566 /* The status_pending field contains bits denoting the
567 extended event, so when the pending event is handled,
568 the handler will look at lwp->waitstatus. */
569 event_lwp->status_pending_p = 1;
570 event_lwp->status_pending = wstat;
571
572 /* If the parent thread is doing step-over with reinsert
573 breakpoints, the reinsert breakpoints are still in forked
574 child's process space and cloned to its breakpoint list
575 from the parent's. Remove them from the child process. */
576 if (event_lwp->bp_reinsert != 0
577 && can_software_single_step ()
578 && event == PTRACE_EVENT_FORK)
579 {
580 struct thread_info *saved_thread = current_thread;
581
582 /* The child process is forked and stopped, so it is safe
583 to access its memory without stopping all other threads
584 from other processes. */
585 current_thread = child_thr;
586 delete_reinsert_breakpoints ();
587 current_thread = saved_thread;
588
589 gdb_assert (has_reinsert_breakpoints (parent_proc));
590 gdb_assert (!has_reinsert_breakpoints (child_proc));
591 }
592
593 /* Report the event. */
594 return 0;
595 }
596
597 if (debug_threads)
598 debug_printf ("HEW: Got clone event "
599 "from LWP %ld, new child is LWP %ld\n",
600 lwpid_of (event_thr), new_pid);
601
602 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
603 new_lwp = add_lwp (ptid);
604
605 /* Either we're going to immediately resume the new thread
606 or leave it stopped. linux_resume_one_lwp is a nop if it
607 thinks the thread is currently running, so set this first
608 before calling linux_resume_one_lwp. */
609 new_lwp->stopped = 1;
610
611 /* If we're suspending all threads, leave this one suspended
612 too. If the fork/clone parent is stepping over a breakpoint,
613 all other threads have been suspended already. Leave the
614 child suspended too. */
615 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
616 || event_lwp->bp_reinsert != 0)
617 new_lwp->suspended = 1;
618
619 /* Normally we will get the pending SIGSTOP. But in some cases
620 we might get another signal delivered to the group first.
621 If we do get another signal, be sure not to lose it. */
622 if (WSTOPSIG (status) != SIGSTOP)
623 {
624 new_lwp->stop_expected = 1;
625 new_lwp->status_pending_p = 1;
626 new_lwp->status_pending = status;
627 }
628 else if (report_thread_events)
629 {
630 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
631 new_lwp->status_pending_p = 1;
632 new_lwp->status_pending = status;
633 }
634
635 /* Don't report the event. */
636 return 1;
637 }
638 else if (event == PTRACE_EVENT_VFORK_DONE)
639 {
640 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
641
642 /* Report the event. */
643 return 0;
644 }
645 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
646 {
647 struct process_info *proc;
648 VEC (int) *syscalls_to_catch;
649 ptid_t event_ptid;
650 pid_t event_pid;
651
652 if (debug_threads)
653 {
654 debug_printf ("HEW: Got exec event from LWP %ld\n",
655 lwpid_of (event_thr));
656 }
657
658 /* Get the event ptid. */
659 event_ptid = ptid_of (event_thr);
660 event_pid = ptid_get_pid (event_ptid);
661
662 /* Save the syscall list from the execing process. */
663 proc = get_thread_process (event_thr);
664 syscalls_to_catch = proc->syscalls_to_catch;
665 proc->syscalls_to_catch = NULL;
666
667 /* Delete the execing process and all its threads. */
668 linux_mourn (proc);
669 current_thread = NULL;
670
671 /* Create a new process/lwp/thread. */
672 proc = linux_add_process (event_pid, 0);
673 event_lwp = add_lwp (event_ptid);
674 event_thr = get_lwp_thread (event_lwp);
675 gdb_assert (current_thread == event_thr);
676 linux_arch_setup_thread (event_thr);
677
678 /* Set the event status. */
679 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
680 event_lwp->waitstatus.value.execd_pathname
681 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
682
683 /* Mark the exec status as pending. */
684 event_lwp->stopped = 1;
685 event_lwp->status_pending_p = 1;
686 event_lwp->status_pending = wstat;
687 event_thr->last_resume_kind = resume_continue;
688 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
689
690 /* Update syscall state in the new lwp, effectively mid-syscall too. */
691 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
692
693 /* Restore the list to catch. Don't rely on the client, which is free
694 to avoid sending a new list when the architecture doesn't change.
695 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
696 proc->syscalls_to_catch = syscalls_to_catch;
697
698 /* Report the event. */
699 *orig_event_lwp = event_lwp;
700 return 0;
701 }
702
703 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
704 }
705
706 /* Return the PC as read from the regcache of LWP, without any
707 adjustment. */
708
709 static CORE_ADDR
710 get_pc (struct lwp_info *lwp)
711 {
712 struct thread_info *saved_thread;
713 struct regcache *regcache;
714 CORE_ADDR pc;
715
716 if (the_low_target.get_pc == NULL)
717 return 0;
718
719 saved_thread = current_thread;
720 current_thread = get_lwp_thread (lwp);
721
722 regcache = get_thread_regcache (current_thread, 1);
723 pc = (*the_low_target.get_pc) (regcache);
724
725 if (debug_threads)
726 debug_printf ("pc is 0x%lx\n", (long) pc);
727
728 current_thread = saved_thread;
729 return pc;
730 }
731
732 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
733 Fill *SYSNO with the syscall nr trapped. Fill *SYSRET with the
734 return code. */
735
736 static void
737 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno, int *sysret)
738 {
739 struct thread_info *saved_thread;
740 struct regcache *regcache;
741
742 if (the_low_target.get_syscall_trapinfo == NULL)
743 {
744 /* If we cannot get the syscall trapinfo, report an unknown
745 system call number and -ENOSYS return value. */
746 *sysno = UNKNOWN_SYSCALL;
747 *sysret = -ENOSYS;
748 return;
749 }
750
751 saved_thread = current_thread;
752 current_thread = get_lwp_thread (lwp);
753
754 regcache = get_thread_regcache (current_thread, 1);
755 (*the_low_target.get_syscall_trapinfo) (regcache, sysno, sysret);
756
757 if (debug_threads)
758 {
759 debug_printf ("get_syscall_trapinfo sysno %d sysret %d\n",
760 *sysno, *sysret);
761 }
762
763 current_thread = saved_thread;
764 }
765
766 static int check_stopped_by_watchpoint (struct lwp_info *child);
767
768 /* Called when the LWP stopped for a signal/trap. If it stopped for a
769 trap check what caused it (breakpoint, watchpoint, trace, etc.),
770 and save the result in the LWP's stop_reason field. If it stopped
771 for a breakpoint, decrement the PC if necessary on the lwp's
772 architecture. Returns true if we now have the LWP's stop PC. */
773
774 static int
775 save_stop_reason (struct lwp_info *lwp)
776 {
777 CORE_ADDR pc;
778 CORE_ADDR sw_breakpoint_pc;
779 struct thread_info *saved_thread;
780 #if USE_SIGTRAP_SIGINFO
781 siginfo_t siginfo;
782 #endif
783
784 if (the_low_target.get_pc == NULL)
785 return 0;
786
787 pc = get_pc (lwp);
788 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
789
790 /* breakpoint_at reads from the current thread. */
791 saved_thread = current_thread;
792 current_thread = get_lwp_thread (lwp);
793
794 #if USE_SIGTRAP_SIGINFO
795 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
796 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
797 {
798 if (siginfo.si_signo == SIGTRAP)
799 {
800 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
801 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
802 {
803 /* The si_code is ambiguous on this arch -- check debug
804 registers. */
805 if (!check_stopped_by_watchpoint (lwp))
806 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
807 }
808 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
809 {
810 /* If we determine the LWP stopped for a SW breakpoint,
811 trust it. Particularly don't check watchpoint
812 registers, because at least on s390, we'd find
813 stopped-by-watchpoint as long as there's a watchpoint
814 set. */
815 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
816 }
817 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
818 {
819 /* This can indicate either a hardware breakpoint or
820 hardware watchpoint. Check debug registers. */
821 if (!check_stopped_by_watchpoint (lwp))
822 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
823 }
824 else if (siginfo.si_code == TRAP_TRACE)
825 {
826 /* We may have single stepped an instruction that
827 triggered a watchpoint. In that case, on some
828 architectures (such as x86), instead of TRAP_HWBKPT,
829 si_code indicates TRAP_TRACE, and we need to check
830 the debug registers separately. */
831 if (!check_stopped_by_watchpoint (lwp))
832 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
833 }
834 }
835 }
836 #else
837 /* We may have just stepped a breakpoint instruction. E.g., in
838 non-stop mode, GDB first tells the thread A to step a range, and
839 then the user inserts a breakpoint inside the range. In that
840 case we need to report the breakpoint PC. */
841 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
842 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
843 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
844
845 if (hardware_breakpoint_inserted_here (pc))
846 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
847
848 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
849 check_stopped_by_watchpoint (lwp);
850 #endif
851
852 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
853 {
854 if (debug_threads)
855 {
856 struct thread_info *thr = get_lwp_thread (lwp);
857
858 debug_printf ("CSBB: %s stopped by software breakpoint\n",
859 target_pid_to_str (ptid_of (thr)));
860 }
861
862 /* Back up the PC if necessary. */
863 if (pc != sw_breakpoint_pc)
864 {
865 struct regcache *regcache
866 = get_thread_regcache (current_thread, 1);
867 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
868 }
869
870 /* Update this so we record the correct stop PC below. */
871 pc = sw_breakpoint_pc;
872 }
873 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
874 {
875 if (debug_threads)
876 {
877 struct thread_info *thr = get_lwp_thread (lwp);
878
879 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
880 target_pid_to_str (ptid_of (thr)));
881 }
882 }
883 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
884 {
885 if (debug_threads)
886 {
887 struct thread_info *thr = get_lwp_thread (lwp);
888
889 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
890 target_pid_to_str (ptid_of (thr)));
891 }
892 }
893 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
894 {
895 if (debug_threads)
896 {
897 struct thread_info *thr = get_lwp_thread (lwp);
898
899 debug_printf ("CSBB: %s stopped by trace\n",
900 target_pid_to_str (ptid_of (thr)));
901 }
902 }
903
904 lwp->stop_pc = pc;
905 current_thread = saved_thread;
906 return 1;
907 }
908
909 static struct lwp_info *
910 add_lwp (ptid_t ptid)
911 {
912 struct lwp_info *lwp;
913
914 lwp = XCNEW (struct lwp_info);
915
916 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
917
918 if (the_low_target.new_thread != NULL)
919 the_low_target.new_thread (lwp);
920
921 lwp->thread = add_thread (ptid, lwp);
922
923 return lwp;
924 }
925
926 /* Start an inferior process and returns its pid.
927 ALLARGS is a vector of program-name and args. */
928
929 static int
930 linux_create_inferior (char *program, char **allargs)
931 {
932 struct lwp_info *new_lwp;
933 int pid;
934 ptid_t ptid;
935 struct cleanup *restore_personality
936 = maybe_disable_address_space_randomization (disable_randomization);
937
938 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
939 pid = vfork ();
940 #else
941 pid = fork ();
942 #endif
943 if (pid < 0)
944 perror_with_name ("fork");
945
946 if (pid == 0)
947 {
948 close_most_fds ();
949 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
950
951 setpgid (0, 0);
952
953 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
954 stdout to stderr so that inferior i/o doesn't corrupt the connection.
955 Also, redirect stdin to /dev/null. */
956 if (remote_connection_is_stdio ())
957 {
958 close (0);
959 open ("/dev/null", O_RDONLY);
960 dup2 (2, 1);
961 if (write (2, "stdin/stdout redirected\n",
962 sizeof ("stdin/stdout redirected\n") - 1) < 0)
963 {
964 /* Errors ignored. */;
965 }
966 }
967
968 execv (program, allargs);
969 if (errno == ENOENT)
970 execvp (program, allargs);
971
972 fprintf (stderr, "Cannot exec %s: %s.\n", program,
973 strerror (errno));
974 fflush (stderr);
975 _exit (0177);
976 }
977
978 do_cleanups (restore_personality);
979
980 linux_add_process (pid, 0);
981
982 ptid = ptid_build (pid, pid, 0);
983 new_lwp = add_lwp (ptid);
984 new_lwp->must_set_ptrace_flags = 1;
985
986 return pid;
987 }
988
989 /* Implement the post_create_inferior target_ops method. */
990
991 static void
992 linux_post_create_inferior (void)
993 {
994 struct lwp_info *lwp = get_thread_lwp (current_thread);
995
996 linux_arch_setup ();
997
998 if (lwp->must_set_ptrace_flags)
999 {
1000 struct process_info *proc = current_process ();
1001 int options = linux_low_ptrace_options (proc->attached);
1002
1003 linux_enable_event_reporting (lwpid_of (current_thread), options);
1004 lwp->must_set_ptrace_flags = 0;
1005 }
1006 }
1007
1008 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1009 error. */
1010
1011 int
1012 linux_attach_lwp (ptid_t ptid)
1013 {
1014 struct lwp_info *new_lwp;
1015 int lwpid = ptid_get_lwp (ptid);
1016
1017 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1018 != 0)
1019 return errno;
1020
1021 new_lwp = add_lwp (ptid);
1022
1023 /* We need to wait for SIGSTOP before being able to make the next
1024 ptrace call on this LWP. */
1025 new_lwp->must_set_ptrace_flags = 1;
1026
1027 if (linux_proc_pid_is_stopped (lwpid))
1028 {
1029 if (debug_threads)
1030 debug_printf ("Attached to a stopped process\n");
1031
1032 /* The process is definitely stopped. It is in a job control
1033 stop, unless the kernel predates the TASK_STOPPED /
1034 TASK_TRACED distinction, in which case it might be in a
1035 ptrace stop. Make sure it is in a ptrace stop; from there we
1036 can kill it, signal it, et cetera.
1037
1038 First make sure there is a pending SIGSTOP. Since we are
1039 already attached, the process can not transition from stopped
1040 to running without a PTRACE_CONT; so we know this signal will
1041 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1042 probably already in the queue (unless this kernel is old
1043 enough to use TASK_STOPPED for ptrace stops); but since
1044 SIGSTOP is not an RT signal, it can only be queued once. */
1045 kill_lwp (lwpid, SIGSTOP);
1046
1047 /* Finally, resume the stopped process. This will deliver the
1048 SIGSTOP (or a higher priority signal, just like normal
1049 PTRACE_ATTACH), which we'll catch later on. */
1050 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1051 }
1052
1053 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1054 brings it to a halt.
1055
1056 There are several cases to consider here:
1057
1058 1) gdbserver has already attached to the process and is being notified
1059 of a new thread that is being created.
1060 In this case we should ignore that SIGSTOP and resume the
1061 process. This is handled below by setting stop_expected = 1,
1062 and the fact that add_thread sets last_resume_kind ==
1063 resume_continue.
1064
1065 2) This is the first thread (the process thread), and we're attaching
1066 to it via attach_inferior.
1067 In this case we want the process thread to stop.
1068 This is handled by having linux_attach set last_resume_kind ==
1069 resume_stop after we return.
1070
1071 If the pid we are attaching to is also the tgid, we attach to and
1072 stop all the existing threads. Otherwise, we attach to pid and
1073 ignore any other threads in the same group as this pid.
1074
1075 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1076 existing threads.
1077 In this case we want the thread to stop.
1078 FIXME: This case is currently not properly handled.
1079 We should wait for the SIGSTOP but don't. Things work apparently
1080 because enough time passes between when we ptrace (ATTACH) and when
1081 gdb makes the next ptrace call on the thread.
1082
1083 On the other hand, if we are currently trying to stop all threads, we
1084 should treat the new thread as if we had sent it a SIGSTOP. This works
1085 because we are guaranteed that the add_lwp call above added us to the
1086 end of the list, and so the new thread has not yet reached
1087 wait_for_sigstop (but will). */
1088 new_lwp->stop_expected = 1;
1089
1090 return 0;
1091 }
1092
1093 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1094 already attached. Returns true if a new LWP is found, false
1095 otherwise. */
1096
1097 static int
1098 attach_proc_task_lwp_callback (ptid_t ptid)
1099 {
1100 /* Is this a new thread? */
1101 if (find_thread_ptid (ptid) == NULL)
1102 {
1103 int lwpid = ptid_get_lwp (ptid);
1104 int err;
1105
1106 if (debug_threads)
1107 debug_printf ("Found new lwp %d\n", lwpid);
1108
1109 err = linux_attach_lwp (ptid);
1110
1111 /* Be quiet if we simply raced with the thread exiting. EPERM
1112 is returned if the thread's task still exists, and is marked
1113 as exited or zombie, as well as other conditions, so in that
1114 case, confirm the status in /proc/PID/status. */
1115 if (err == ESRCH
1116 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1117 {
1118 if (debug_threads)
1119 {
1120 debug_printf ("Cannot attach to lwp %d: "
1121 "thread is gone (%d: %s)\n",
1122 lwpid, err, strerror (err));
1123 }
1124 }
1125 else if (err != 0)
1126 {
1127 warning (_("Cannot attach to lwp %d: %s"),
1128 lwpid,
1129 linux_ptrace_attach_fail_reason_string (ptid, err));
1130 }
1131
1132 return 1;
1133 }
1134 return 0;
1135 }
1136
1137 static void async_file_mark (void);
1138
1139 /* Attach to PID. If PID is the tgid, attach to it and all
1140 of its threads. */
1141
1142 static int
1143 linux_attach (unsigned long pid)
1144 {
1145 struct process_info *proc;
1146 struct thread_info *initial_thread;
1147 ptid_t ptid = ptid_build (pid, pid, 0);
1148 int err;
1149
1150 /* Attach to PID. We will check for other threads
1151 soon. */
1152 err = linux_attach_lwp (ptid);
1153 if (err != 0)
1154 error ("Cannot attach to process %ld: %s",
1155 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1156
1157 proc = linux_add_process (pid, 1);
1158
1159 /* Don't ignore the initial SIGSTOP if we just attached to this
1160 process. It will be collected by wait shortly. */
1161 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1162 initial_thread->last_resume_kind = resume_stop;
1163
1164 /* We must attach to every LWP. If /proc is mounted, use that to
1165 find them now. On the one hand, the inferior may be using raw
1166 clone instead of using pthreads. On the other hand, even if it
1167 is using pthreads, GDB may not be connected yet (thread_db needs
1168 to do symbol lookups, through qSymbol). Also, thread_db walks
1169 structures in the inferior's address space to find the list of
1170 threads/LWPs, and those structures may well be corrupted. Note
1171 that once thread_db is loaded, we'll still use it to list threads
1172 and associate pthread info with each LWP. */
1173 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1174
1175 /* GDB will shortly read the xml target description for this
1176 process, to figure out the process' architecture. But the target
1177 description is only filled in when the first process/thread in
1178 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1179 that now, otherwise, if GDB is fast enough, it could read the
1180 target description _before_ that initial stop. */
1181 if (non_stop)
1182 {
1183 struct lwp_info *lwp;
1184 int wstat, lwpid;
1185 ptid_t pid_ptid = pid_to_ptid (pid);
1186
1187 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1188 &wstat, __WALL);
1189 gdb_assert (lwpid > 0);
1190
1191 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1192
1193 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1194 {
1195 lwp->status_pending_p = 1;
1196 lwp->status_pending = wstat;
1197 }
1198
1199 initial_thread->last_resume_kind = resume_continue;
1200
1201 async_file_mark ();
1202
1203 gdb_assert (proc->tdesc != NULL);
1204 }
1205
1206 return 0;
1207 }
1208
1209 struct counter
1210 {
1211 int pid;
1212 int count;
1213 };
1214
1215 static int
1216 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1217 {
1218 struct counter *counter = (struct counter *) args;
1219
1220 if (ptid_get_pid (entry->id) == counter->pid)
1221 {
1222 if (++counter->count > 1)
1223 return 1;
1224 }
1225
1226 return 0;
1227 }
1228
1229 static int
1230 last_thread_of_process_p (int pid)
1231 {
1232 struct counter counter = { pid , 0 };
1233
1234 return (find_inferior (&all_threads,
1235 second_thread_of_pid_p, &counter) == NULL);
1236 }
1237
1238 /* Kill LWP. */
1239
1240 static void
1241 linux_kill_one_lwp (struct lwp_info *lwp)
1242 {
1243 struct thread_info *thr = get_lwp_thread (lwp);
1244 int pid = lwpid_of (thr);
1245
1246 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1247 there is no signal context, and ptrace(PTRACE_KILL) (or
1248 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1249 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1250 alternative is to kill with SIGKILL. We only need one SIGKILL
1251 per process, not one for each thread. But since we still support
1252 support debugging programs using raw clone without CLONE_THREAD,
1253 we send one for each thread. For years, we used PTRACE_KILL
1254 only, so we're being a bit paranoid about some old kernels where
1255 PTRACE_KILL might work better (dubious if there are any such, but
1256 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1257 second, and so we're fine everywhere. */
1258
1259 errno = 0;
1260 kill_lwp (pid, SIGKILL);
1261 if (debug_threads)
1262 {
1263 int save_errno = errno;
1264
1265 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1266 target_pid_to_str (ptid_of (thr)),
1267 save_errno ? strerror (save_errno) : "OK");
1268 }
1269
1270 errno = 0;
1271 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1272 if (debug_threads)
1273 {
1274 int save_errno = errno;
1275
1276 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1277 target_pid_to_str (ptid_of (thr)),
1278 save_errno ? strerror (save_errno) : "OK");
1279 }
1280 }
1281
1282 /* Kill LWP and wait for it to die. */
1283
1284 static void
1285 kill_wait_lwp (struct lwp_info *lwp)
1286 {
1287 struct thread_info *thr = get_lwp_thread (lwp);
1288 int pid = ptid_get_pid (ptid_of (thr));
1289 int lwpid = ptid_get_lwp (ptid_of (thr));
1290 int wstat;
1291 int res;
1292
1293 if (debug_threads)
1294 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1295
1296 do
1297 {
1298 linux_kill_one_lwp (lwp);
1299
1300 /* Make sure it died. Notes:
1301
1302 - The loop is most likely unnecessary.
1303
1304 - We don't use linux_wait_for_event as that could delete lwps
1305 while we're iterating over them. We're not interested in
1306 any pending status at this point, only in making sure all
1307 wait status on the kernel side are collected until the
1308 process is reaped.
1309
1310 - We don't use __WALL here as the __WALL emulation relies on
1311 SIGCHLD, and killing a stopped process doesn't generate
1312 one, nor an exit status.
1313 */
1314 res = my_waitpid (lwpid, &wstat, 0);
1315 if (res == -1 && errno == ECHILD)
1316 res = my_waitpid (lwpid, &wstat, __WCLONE);
1317 } while (res > 0 && WIFSTOPPED (wstat));
1318
1319 /* Even if it was stopped, the child may have already disappeared.
1320 E.g., if it was killed by SIGKILL. */
1321 if (res < 0 && errno != ECHILD)
1322 perror_with_name ("kill_wait_lwp");
1323 }
1324
1325 /* Callback for `find_inferior'. Kills an lwp of a given process,
1326 except the leader. */
1327
1328 static int
1329 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1330 {
1331 struct thread_info *thread = (struct thread_info *) entry;
1332 struct lwp_info *lwp = get_thread_lwp (thread);
1333 int pid = * (int *) args;
1334
1335 if (ptid_get_pid (entry->id) != pid)
1336 return 0;
1337
1338 /* We avoid killing the first thread here, because of a Linux kernel (at
1339 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1340 the children get a chance to be reaped, it will remain a zombie
1341 forever. */
1342
1343 if (lwpid_of (thread) == pid)
1344 {
1345 if (debug_threads)
1346 debug_printf ("lkop: is last of process %s\n",
1347 target_pid_to_str (entry->id));
1348 return 0;
1349 }
1350
1351 kill_wait_lwp (lwp);
1352 return 0;
1353 }
1354
1355 static int
1356 linux_kill (int pid)
1357 {
1358 struct process_info *process;
1359 struct lwp_info *lwp;
1360
1361 process = find_process_pid (pid);
1362 if (process == NULL)
1363 return -1;
1364
1365 /* If we're killing a running inferior, make sure it is stopped
1366 first, as PTRACE_KILL will not work otherwise. */
1367 stop_all_lwps (0, NULL);
1368
1369 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1370
1371 /* See the comment in linux_kill_one_lwp. We did not kill the first
1372 thread in the list, so do so now. */
1373 lwp = find_lwp_pid (pid_to_ptid (pid));
1374
1375 if (lwp == NULL)
1376 {
1377 if (debug_threads)
1378 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1379 pid);
1380 }
1381 else
1382 kill_wait_lwp (lwp);
1383
1384 the_target->mourn (process);
1385
1386 /* Since we presently can only stop all lwps of all processes, we
1387 need to unstop lwps of other processes. */
1388 unstop_all_lwps (0, NULL);
1389 return 0;
1390 }
1391
1392 /* Get pending signal of THREAD, for detaching purposes. This is the
1393 signal the thread last stopped for, which we need to deliver to the
1394 thread when detaching, otherwise, it'd be suppressed/lost. */
1395
1396 static int
1397 get_detach_signal (struct thread_info *thread)
1398 {
1399 enum gdb_signal signo = GDB_SIGNAL_0;
1400 int status;
1401 struct lwp_info *lp = get_thread_lwp (thread);
1402
1403 if (lp->status_pending_p)
1404 status = lp->status_pending;
1405 else
1406 {
1407 /* If the thread had been suspended by gdbserver, and it stopped
1408 cleanly, then it'll have stopped with SIGSTOP. But we don't
1409 want to deliver that SIGSTOP. */
1410 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1411 || thread->last_status.value.sig == GDB_SIGNAL_0)
1412 return 0;
1413
1414 /* Otherwise, we may need to deliver the signal we
1415 intercepted. */
1416 status = lp->last_status;
1417 }
1418
1419 if (!WIFSTOPPED (status))
1420 {
1421 if (debug_threads)
1422 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1423 target_pid_to_str (ptid_of (thread)));
1424 return 0;
1425 }
1426
1427 /* Extended wait statuses aren't real SIGTRAPs. */
1428 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1429 {
1430 if (debug_threads)
1431 debug_printf ("GPS: lwp %s had stopped with extended "
1432 "status: no pending signal\n",
1433 target_pid_to_str (ptid_of (thread)));
1434 return 0;
1435 }
1436
1437 signo = gdb_signal_from_host (WSTOPSIG (status));
1438
1439 if (program_signals_p && !program_signals[signo])
1440 {
1441 if (debug_threads)
1442 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1443 target_pid_to_str (ptid_of (thread)),
1444 gdb_signal_to_string (signo));
1445 return 0;
1446 }
1447 else if (!program_signals_p
1448 /* If we have no way to know which signals GDB does not
1449 want to have passed to the program, assume
1450 SIGTRAP/SIGINT, which is GDB's default. */
1451 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1452 {
1453 if (debug_threads)
1454 debug_printf ("GPS: lwp %s had signal %s, "
1455 "but we don't know if we should pass it. "
1456 "Default to not.\n",
1457 target_pid_to_str (ptid_of (thread)),
1458 gdb_signal_to_string (signo));
1459 return 0;
1460 }
1461 else
1462 {
1463 if (debug_threads)
1464 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1465 target_pid_to_str (ptid_of (thread)),
1466 gdb_signal_to_string (signo));
1467
1468 return WSTOPSIG (status);
1469 }
1470 }
1471
1472 static int
1473 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1474 {
1475 struct thread_info *thread = (struct thread_info *) entry;
1476 struct lwp_info *lwp = get_thread_lwp (thread);
1477 int pid = * (int *) args;
1478 int sig;
1479
1480 if (ptid_get_pid (entry->id) != pid)
1481 return 0;
1482
1483 /* If there is a pending SIGSTOP, get rid of it. */
1484 if (lwp->stop_expected)
1485 {
1486 if (debug_threads)
1487 debug_printf ("Sending SIGCONT to %s\n",
1488 target_pid_to_str (ptid_of (thread)));
1489
1490 kill_lwp (lwpid_of (thread), SIGCONT);
1491 lwp->stop_expected = 0;
1492 }
1493
1494 /* Flush any pending changes to the process's registers. */
1495 regcache_invalidate_thread (thread);
1496
1497 /* Pass on any pending signal for this thread. */
1498 sig = get_detach_signal (thread);
1499
1500 /* Finally, let it resume. */
1501 if (the_low_target.prepare_to_resume != NULL)
1502 the_low_target.prepare_to_resume (lwp);
1503 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1504 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1505 error (_("Can't detach %s: %s"),
1506 target_pid_to_str (ptid_of (thread)),
1507 strerror (errno));
1508
1509 delete_lwp (lwp);
1510 return 0;
1511 }
1512
1513 static int
1514 linux_detach (int pid)
1515 {
1516 struct process_info *process;
1517
1518 process = find_process_pid (pid);
1519 if (process == NULL)
1520 return -1;
1521
1522 /* As there's a step over already in progress, let it finish first,
1523 otherwise nesting a stabilize_threads operation on top gets real
1524 messy. */
1525 complete_ongoing_step_over ();
1526
1527 /* Stop all threads before detaching. First, ptrace requires that
1528 the thread is stopped to sucessfully detach. Second, thread_db
1529 may need to uninstall thread event breakpoints from memory, which
1530 only works with a stopped process anyway. */
1531 stop_all_lwps (0, NULL);
1532
1533 #ifdef USE_THREAD_DB
1534 thread_db_detach (process);
1535 #endif
1536
1537 /* Stabilize threads (move out of jump pads). */
1538 stabilize_threads ();
1539
1540 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1541
1542 the_target->mourn (process);
1543
1544 /* Since we presently can only stop all lwps of all processes, we
1545 need to unstop lwps of other processes. */
1546 unstop_all_lwps (0, NULL);
1547 return 0;
1548 }
1549
1550 /* Remove all LWPs that belong to process PROC from the lwp list. */
1551
1552 static int
1553 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1554 {
1555 struct thread_info *thread = (struct thread_info *) entry;
1556 struct lwp_info *lwp = get_thread_lwp (thread);
1557 struct process_info *process = (struct process_info *) proc;
1558
1559 if (pid_of (thread) == pid_of (process))
1560 delete_lwp (lwp);
1561
1562 return 0;
1563 }
1564
1565 static void
1566 linux_mourn (struct process_info *process)
1567 {
1568 struct process_info_private *priv;
1569
1570 #ifdef USE_THREAD_DB
1571 thread_db_mourn (process);
1572 #endif
1573
1574 find_inferior (&all_threads, delete_lwp_callback, process);
1575
1576 /* Freeing all private data. */
1577 priv = process->priv;
1578 free (priv->arch_private);
1579 free (priv);
1580 process->priv = NULL;
1581
1582 remove_process (process);
1583 }
1584
1585 static void
1586 linux_join (int pid)
1587 {
1588 int status, ret;
1589
1590 do {
1591 ret = my_waitpid (pid, &status, 0);
1592 if (WIFEXITED (status) || WIFSIGNALED (status))
1593 break;
1594 } while (ret != -1 || errno != ECHILD);
1595 }
1596
1597 /* Return nonzero if the given thread is still alive. */
1598 static int
1599 linux_thread_alive (ptid_t ptid)
1600 {
1601 struct lwp_info *lwp = find_lwp_pid (ptid);
1602
1603 /* We assume we always know if a thread exits. If a whole process
1604 exited but we still haven't been able to report it to GDB, we'll
1605 hold on to the last lwp of the dead process. */
1606 if (lwp != NULL)
1607 return !lwp_is_marked_dead (lwp);
1608 else
1609 return 0;
1610 }
1611
1612 /* Return 1 if this lwp still has an interesting status pending. If
1613 not (e.g., it had stopped for a breakpoint that is gone), return
1614 false. */
1615
1616 static int
1617 thread_still_has_status_pending_p (struct thread_info *thread)
1618 {
1619 struct lwp_info *lp = get_thread_lwp (thread);
1620
1621 if (!lp->status_pending_p)
1622 return 0;
1623
1624 if (thread->last_resume_kind != resume_stop
1625 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1626 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1627 {
1628 struct thread_info *saved_thread;
1629 CORE_ADDR pc;
1630 int discard = 0;
1631
1632 gdb_assert (lp->last_status != 0);
1633
1634 pc = get_pc (lp);
1635
1636 saved_thread = current_thread;
1637 current_thread = thread;
1638
1639 if (pc != lp->stop_pc)
1640 {
1641 if (debug_threads)
1642 debug_printf ("PC of %ld changed\n",
1643 lwpid_of (thread));
1644 discard = 1;
1645 }
1646
1647 #if !USE_SIGTRAP_SIGINFO
1648 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1649 && !(*the_low_target.breakpoint_at) (pc))
1650 {
1651 if (debug_threads)
1652 debug_printf ("previous SW breakpoint of %ld gone\n",
1653 lwpid_of (thread));
1654 discard = 1;
1655 }
1656 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1657 && !hardware_breakpoint_inserted_here (pc))
1658 {
1659 if (debug_threads)
1660 debug_printf ("previous HW breakpoint of %ld gone\n",
1661 lwpid_of (thread));
1662 discard = 1;
1663 }
1664 #endif
1665
1666 current_thread = saved_thread;
1667
1668 if (discard)
1669 {
1670 if (debug_threads)
1671 debug_printf ("discarding pending breakpoint status\n");
1672 lp->status_pending_p = 0;
1673 return 0;
1674 }
1675 }
1676
1677 return 1;
1678 }
1679
1680 /* Returns true if LWP is resumed from the client's perspective. */
1681
1682 static int
1683 lwp_resumed (struct lwp_info *lwp)
1684 {
1685 struct thread_info *thread = get_lwp_thread (lwp);
1686
1687 if (thread->last_resume_kind != resume_stop)
1688 return 1;
1689
1690 /* Did gdb send us a `vCont;t', but we haven't reported the
1691 corresponding stop to gdb yet? If so, the thread is still
1692 resumed/running from gdb's perspective. */
1693 if (thread->last_resume_kind == resume_stop
1694 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1695 return 1;
1696
1697 return 0;
1698 }
1699
1700 /* Return 1 if this lwp has an interesting status pending. */
1701 static int
1702 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1703 {
1704 struct thread_info *thread = (struct thread_info *) entry;
1705 struct lwp_info *lp = get_thread_lwp (thread);
1706 ptid_t ptid = * (ptid_t *) arg;
1707
1708 /* Check if we're only interested in events from a specific process
1709 or a specific LWP. */
1710 if (!ptid_match (ptid_of (thread), ptid))
1711 return 0;
1712
1713 if (!lwp_resumed (lp))
1714 return 0;
1715
1716 if (lp->status_pending_p
1717 && !thread_still_has_status_pending_p (thread))
1718 {
1719 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1720 return 0;
1721 }
1722
1723 return lp->status_pending_p;
1724 }
1725
1726 static int
1727 same_lwp (struct inferior_list_entry *entry, void *data)
1728 {
1729 ptid_t ptid = *(ptid_t *) data;
1730 int lwp;
1731
1732 if (ptid_get_lwp (ptid) != 0)
1733 lwp = ptid_get_lwp (ptid);
1734 else
1735 lwp = ptid_get_pid (ptid);
1736
1737 if (ptid_get_lwp (entry->id) == lwp)
1738 return 1;
1739
1740 return 0;
1741 }
1742
1743 struct lwp_info *
1744 find_lwp_pid (ptid_t ptid)
1745 {
1746 struct inferior_list_entry *thread
1747 = find_inferior (&all_threads, same_lwp, &ptid);
1748
1749 if (thread == NULL)
1750 return NULL;
1751
1752 return get_thread_lwp ((struct thread_info *) thread);
1753 }
1754
1755 /* Return the number of known LWPs in the tgid given by PID. */
1756
1757 static int
1758 num_lwps (int pid)
1759 {
1760 struct inferior_list_entry *inf, *tmp;
1761 int count = 0;
1762
1763 ALL_INFERIORS (&all_threads, inf, tmp)
1764 {
1765 if (ptid_get_pid (inf->id) == pid)
1766 count++;
1767 }
1768
1769 return count;
1770 }
1771
1772 /* The arguments passed to iterate_over_lwps. */
1773
1774 struct iterate_over_lwps_args
1775 {
1776 /* The FILTER argument passed to iterate_over_lwps. */
1777 ptid_t filter;
1778
1779 /* The CALLBACK argument passed to iterate_over_lwps. */
1780 iterate_over_lwps_ftype *callback;
1781
1782 /* The DATA argument passed to iterate_over_lwps. */
1783 void *data;
1784 };
1785
1786 /* Callback for find_inferior used by iterate_over_lwps to filter
1787 calls to the callback supplied to that function. Returning a
1788 nonzero value causes find_inferiors to stop iterating and return
1789 the current inferior_list_entry. Returning zero indicates that
1790 find_inferiors should continue iterating. */
1791
1792 static int
1793 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1794 {
1795 struct iterate_over_lwps_args *args
1796 = (struct iterate_over_lwps_args *) args_p;
1797
1798 if (ptid_match (entry->id, args->filter))
1799 {
1800 struct thread_info *thr = (struct thread_info *) entry;
1801 struct lwp_info *lwp = get_thread_lwp (thr);
1802
1803 return (*args->callback) (lwp, args->data);
1804 }
1805
1806 return 0;
1807 }
1808
1809 /* See nat/linux-nat.h. */
1810
1811 struct lwp_info *
1812 iterate_over_lwps (ptid_t filter,
1813 iterate_over_lwps_ftype callback,
1814 void *data)
1815 {
1816 struct iterate_over_lwps_args args = {filter, callback, data};
1817 struct inferior_list_entry *entry;
1818
1819 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1820 if (entry == NULL)
1821 return NULL;
1822
1823 return get_thread_lwp ((struct thread_info *) entry);
1824 }
1825
1826 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1827 their exits until all other threads in the group have exited. */
1828
1829 static void
1830 check_zombie_leaders (void)
1831 {
1832 struct process_info *proc, *tmp;
1833
1834 ALL_PROCESSES (proc, tmp)
1835 {
1836 pid_t leader_pid = pid_of (proc);
1837 struct lwp_info *leader_lp;
1838
1839 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1840
1841 if (debug_threads)
1842 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1843 "num_lwps=%d, zombie=%d\n",
1844 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1845 linux_proc_pid_is_zombie (leader_pid));
1846
1847 if (leader_lp != NULL && !leader_lp->stopped
1848 /* Check if there are other threads in the group, as we may
1849 have raced with the inferior simply exiting. */
1850 && !last_thread_of_process_p (leader_pid)
1851 && linux_proc_pid_is_zombie (leader_pid))
1852 {
1853 /* A leader zombie can mean one of two things:
1854
1855 - It exited, and there's an exit status pending
1856 available, or only the leader exited (not the whole
1857 program). In the latter case, we can't waitpid the
1858 leader's exit status until all other threads are gone.
1859
1860 - There are 3 or more threads in the group, and a thread
1861 other than the leader exec'd. On an exec, the Linux
1862 kernel destroys all other threads (except the execing
1863 one) in the thread group, and resets the execing thread's
1864 tid to the tgid. No exit notification is sent for the
1865 execing thread -- from the ptracer's perspective, it
1866 appears as though the execing thread just vanishes.
1867 Until we reap all other threads except the leader and the
1868 execing thread, the leader will be zombie, and the
1869 execing thread will be in `D (disc sleep)'. As soon as
1870 all other threads are reaped, the execing thread changes
1871 it's tid to the tgid, and the previous (zombie) leader
1872 vanishes, giving place to the "new" leader. We could try
1873 distinguishing the exit and exec cases, by waiting once
1874 more, and seeing if something comes out, but it doesn't
1875 sound useful. The previous leader _does_ go away, and
1876 we'll re-add the new one once we see the exec event
1877 (which is just the same as what would happen if the
1878 previous leader did exit voluntarily before some other
1879 thread execs). */
1880
1881 if (debug_threads)
1882 fprintf (stderr,
1883 "CZL: Thread group leader %d zombie "
1884 "(it exited, or another thread execd).\n",
1885 leader_pid);
1886
1887 delete_lwp (leader_lp);
1888 }
1889 }
1890 }
1891
1892 /* Callback for `find_inferior'. Returns the first LWP that is not
1893 stopped. ARG is a PTID filter. */
1894
1895 static int
1896 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1897 {
1898 struct thread_info *thr = (struct thread_info *) entry;
1899 struct lwp_info *lwp;
1900 ptid_t filter = *(ptid_t *) arg;
1901
1902 if (!ptid_match (ptid_of (thr), filter))
1903 return 0;
1904
1905 lwp = get_thread_lwp (thr);
1906 if (!lwp->stopped)
1907 return 1;
1908
1909 return 0;
1910 }
1911
1912 /* Increment LWP's suspend count. */
1913
1914 static void
1915 lwp_suspended_inc (struct lwp_info *lwp)
1916 {
1917 lwp->suspended++;
1918
1919 if (debug_threads && lwp->suspended > 4)
1920 {
1921 struct thread_info *thread = get_lwp_thread (lwp);
1922
1923 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1924 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1925 }
1926 }
1927
1928 /* Decrement LWP's suspend count. */
1929
1930 static void
1931 lwp_suspended_decr (struct lwp_info *lwp)
1932 {
1933 lwp->suspended--;
1934
1935 if (lwp->suspended < 0)
1936 {
1937 struct thread_info *thread = get_lwp_thread (lwp);
1938
1939 internal_error (__FILE__, __LINE__,
1940 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1941 lwp->suspended);
1942 }
1943 }
1944
1945 /* This function should only be called if the LWP got a SIGTRAP.
1946
1947 Handle any tracepoint steps or hits. Return true if a tracepoint
1948 event was handled, 0 otherwise. */
1949
1950 static int
1951 handle_tracepoints (struct lwp_info *lwp)
1952 {
1953 struct thread_info *tinfo = get_lwp_thread (lwp);
1954 int tpoint_related_event = 0;
1955
1956 gdb_assert (lwp->suspended == 0);
1957
1958 /* If this tracepoint hit causes a tracing stop, we'll immediately
1959 uninsert tracepoints. To do this, we temporarily pause all
1960 threads, unpatch away, and then unpause threads. We need to make
1961 sure the unpausing doesn't resume LWP too. */
1962 lwp_suspended_inc (lwp);
1963
1964 /* And we need to be sure that any all-threads-stopping doesn't try
1965 to move threads out of the jump pads, as it could deadlock the
1966 inferior (LWP could be in the jump pad, maybe even holding the
1967 lock.) */
1968
1969 /* Do any necessary step collect actions. */
1970 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1971
1972 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1973
1974 /* See if we just hit a tracepoint and do its main collect
1975 actions. */
1976 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1977
1978 lwp_suspended_decr (lwp);
1979
1980 gdb_assert (lwp->suspended == 0);
1981 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1982
1983 if (tpoint_related_event)
1984 {
1985 if (debug_threads)
1986 debug_printf ("got a tracepoint event\n");
1987 return 1;
1988 }
1989
1990 return 0;
1991 }
1992
1993 /* Convenience wrapper. Returns true if LWP is presently collecting a
1994 fast tracepoint. */
1995
1996 static int
1997 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1998 struct fast_tpoint_collect_status *status)
1999 {
2000 CORE_ADDR thread_area;
2001 struct thread_info *thread = get_lwp_thread (lwp);
2002
2003 if (the_low_target.get_thread_area == NULL)
2004 return 0;
2005
2006 /* Get the thread area address. This is used to recognize which
2007 thread is which when tracing with the in-process agent library.
2008 We don't read anything from the address, and treat it as opaque;
2009 it's the address itself that we assume is unique per-thread. */
2010 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2011 return 0;
2012
2013 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2014 }
2015
2016 /* The reason we resume in the caller, is because we want to be able
2017 to pass lwp->status_pending as WSTAT, and we need to clear
2018 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2019 refuses to resume. */
2020
2021 static int
2022 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2023 {
2024 struct thread_info *saved_thread;
2025
2026 saved_thread = current_thread;
2027 current_thread = get_lwp_thread (lwp);
2028
2029 if ((wstat == NULL
2030 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2031 && supports_fast_tracepoints ()
2032 && agent_loaded_p ())
2033 {
2034 struct fast_tpoint_collect_status status;
2035 int r;
2036
2037 if (debug_threads)
2038 debug_printf ("Checking whether LWP %ld needs to move out of the "
2039 "jump pad.\n",
2040 lwpid_of (current_thread));
2041
2042 r = linux_fast_tracepoint_collecting (lwp, &status);
2043
2044 if (wstat == NULL
2045 || (WSTOPSIG (*wstat) != SIGILL
2046 && WSTOPSIG (*wstat) != SIGFPE
2047 && WSTOPSIG (*wstat) != SIGSEGV
2048 && WSTOPSIG (*wstat) != SIGBUS))
2049 {
2050 lwp->collecting_fast_tracepoint = r;
2051
2052 if (r != 0)
2053 {
2054 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
2055 {
2056 /* Haven't executed the original instruction yet.
2057 Set breakpoint there, and wait till it's hit,
2058 then single-step until exiting the jump pad. */
2059 lwp->exit_jump_pad_bkpt
2060 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2061 }
2062
2063 if (debug_threads)
2064 debug_printf ("Checking whether LWP %ld needs to move out of "
2065 "the jump pad...it does\n",
2066 lwpid_of (current_thread));
2067 current_thread = saved_thread;
2068
2069 return 1;
2070 }
2071 }
2072 else
2073 {
2074 /* If we get a synchronous signal while collecting, *and*
2075 while executing the (relocated) original instruction,
2076 reset the PC to point at the tpoint address, before
2077 reporting to GDB. Otherwise, it's an IPA lib bug: just
2078 report the signal to GDB, and pray for the best. */
2079
2080 lwp->collecting_fast_tracepoint = 0;
2081
2082 if (r != 0
2083 && (status.adjusted_insn_addr <= lwp->stop_pc
2084 && lwp->stop_pc < status.adjusted_insn_addr_end))
2085 {
2086 siginfo_t info;
2087 struct regcache *regcache;
2088
2089 /* The si_addr on a few signals references the address
2090 of the faulting instruction. Adjust that as
2091 well. */
2092 if ((WSTOPSIG (*wstat) == SIGILL
2093 || WSTOPSIG (*wstat) == SIGFPE
2094 || WSTOPSIG (*wstat) == SIGBUS
2095 || WSTOPSIG (*wstat) == SIGSEGV)
2096 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2097 (PTRACE_TYPE_ARG3) 0, &info) == 0
2098 /* Final check just to make sure we don't clobber
2099 the siginfo of non-kernel-sent signals. */
2100 && (uintptr_t) info.si_addr == lwp->stop_pc)
2101 {
2102 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2103 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2104 (PTRACE_TYPE_ARG3) 0, &info);
2105 }
2106
2107 regcache = get_thread_regcache (current_thread, 1);
2108 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2109 lwp->stop_pc = status.tpoint_addr;
2110
2111 /* Cancel any fast tracepoint lock this thread was
2112 holding. */
2113 force_unlock_trace_buffer ();
2114 }
2115
2116 if (lwp->exit_jump_pad_bkpt != NULL)
2117 {
2118 if (debug_threads)
2119 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2120 "stopping all threads momentarily.\n");
2121
2122 stop_all_lwps (1, lwp);
2123
2124 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2125 lwp->exit_jump_pad_bkpt = NULL;
2126
2127 unstop_all_lwps (1, lwp);
2128
2129 gdb_assert (lwp->suspended >= 0);
2130 }
2131 }
2132 }
2133
2134 if (debug_threads)
2135 debug_printf ("Checking whether LWP %ld needs to move out of the "
2136 "jump pad...no\n",
2137 lwpid_of (current_thread));
2138
2139 current_thread = saved_thread;
2140 return 0;
2141 }
2142
2143 /* Enqueue one signal in the "signals to report later when out of the
2144 jump pad" list. */
2145
2146 static void
2147 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2148 {
2149 struct pending_signals *p_sig;
2150 struct thread_info *thread = get_lwp_thread (lwp);
2151
2152 if (debug_threads)
2153 debug_printf ("Deferring signal %d for LWP %ld.\n",
2154 WSTOPSIG (*wstat), lwpid_of (thread));
2155
2156 if (debug_threads)
2157 {
2158 struct pending_signals *sig;
2159
2160 for (sig = lwp->pending_signals_to_report;
2161 sig != NULL;
2162 sig = sig->prev)
2163 debug_printf (" Already queued %d\n",
2164 sig->signal);
2165
2166 debug_printf (" (no more currently queued signals)\n");
2167 }
2168
2169 /* Don't enqueue non-RT signals if they are already in the deferred
2170 queue. (SIGSTOP being the easiest signal to see ending up here
2171 twice) */
2172 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2173 {
2174 struct pending_signals *sig;
2175
2176 for (sig = lwp->pending_signals_to_report;
2177 sig != NULL;
2178 sig = sig->prev)
2179 {
2180 if (sig->signal == WSTOPSIG (*wstat))
2181 {
2182 if (debug_threads)
2183 debug_printf ("Not requeuing already queued non-RT signal %d"
2184 " for LWP %ld\n",
2185 sig->signal,
2186 lwpid_of (thread));
2187 return;
2188 }
2189 }
2190 }
2191
2192 p_sig = XCNEW (struct pending_signals);
2193 p_sig->prev = lwp->pending_signals_to_report;
2194 p_sig->signal = WSTOPSIG (*wstat);
2195
2196 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2197 &p_sig->info);
2198
2199 lwp->pending_signals_to_report = p_sig;
2200 }
2201
2202 /* Dequeue one signal from the "signals to report later when out of
2203 the jump pad" list. */
2204
2205 static int
2206 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2207 {
2208 struct thread_info *thread = get_lwp_thread (lwp);
2209
2210 if (lwp->pending_signals_to_report != NULL)
2211 {
2212 struct pending_signals **p_sig;
2213
2214 p_sig = &lwp->pending_signals_to_report;
2215 while ((*p_sig)->prev != NULL)
2216 p_sig = &(*p_sig)->prev;
2217
2218 *wstat = W_STOPCODE ((*p_sig)->signal);
2219 if ((*p_sig)->info.si_signo != 0)
2220 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2221 &(*p_sig)->info);
2222 free (*p_sig);
2223 *p_sig = NULL;
2224
2225 if (debug_threads)
2226 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2227 WSTOPSIG (*wstat), lwpid_of (thread));
2228
2229 if (debug_threads)
2230 {
2231 struct pending_signals *sig;
2232
2233 for (sig = lwp->pending_signals_to_report;
2234 sig != NULL;
2235 sig = sig->prev)
2236 debug_printf (" Still queued %d\n",
2237 sig->signal);
2238
2239 debug_printf (" (no more queued signals)\n");
2240 }
2241
2242 return 1;
2243 }
2244
2245 return 0;
2246 }
2247
2248 /* Fetch the possibly triggered data watchpoint info and store it in
2249 CHILD.
2250
2251 On some archs, like x86, that use debug registers to set
2252 watchpoints, it's possible that the way to know which watched
2253 address trapped, is to check the register that is used to select
2254 which address to watch. Problem is, between setting the watchpoint
2255 and reading back which data address trapped, the user may change
2256 the set of watchpoints, and, as a consequence, GDB changes the
2257 debug registers in the inferior. To avoid reading back a stale
2258 stopped-data-address when that happens, we cache in LP the fact
2259 that a watchpoint trapped, and the corresponding data address, as
2260 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2261 registers meanwhile, we have the cached data we can rely on. */
2262
2263 static int
2264 check_stopped_by_watchpoint (struct lwp_info *child)
2265 {
2266 if (the_low_target.stopped_by_watchpoint != NULL)
2267 {
2268 struct thread_info *saved_thread;
2269
2270 saved_thread = current_thread;
2271 current_thread = get_lwp_thread (child);
2272
2273 if (the_low_target.stopped_by_watchpoint ())
2274 {
2275 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2276
2277 if (the_low_target.stopped_data_address != NULL)
2278 child->stopped_data_address
2279 = the_low_target.stopped_data_address ();
2280 else
2281 child->stopped_data_address = 0;
2282 }
2283
2284 current_thread = saved_thread;
2285 }
2286
2287 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2288 }
2289
2290 /* Return the ptrace options that we want to try to enable. */
2291
2292 static int
2293 linux_low_ptrace_options (int attached)
2294 {
2295 int options = 0;
2296
2297 if (!attached)
2298 options |= PTRACE_O_EXITKILL;
2299
2300 if (report_fork_events)
2301 options |= PTRACE_O_TRACEFORK;
2302
2303 if (report_vfork_events)
2304 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2305
2306 if (report_exec_events)
2307 options |= PTRACE_O_TRACEEXEC;
2308
2309 options |= PTRACE_O_TRACESYSGOOD;
2310
2311 return options;
2312 }
2313
2314 /* Do low-level handling of the event, and check if we should go on
2315 and pass it to caller code. Return the affected lwp if we are, or
2316 NULL otherwise. */
2317
2318 static struct lwp_info *
2319 linux_low_filter_event (int lwpid, int wstat)
2320 {
2321 struct lwp_info *child;
2322 struct thread_info *thread;
2323 int have_stop_pc = 0;
2324
2325 child = find_lwp_pid (pid_to_ptid (lwpid));
2326
2327 /* Check for stop events reported by a process we didn't already
2328 know about - anything not already in our LWP list.
2329
2330 If we're expecting to receive stopped processes after
2331 fork, vfork, and clone events, then we'll just add the
2332 new one to our list and go back to waiting for the event
2333 to be reported - the stopped process might be returned
2334 from waitpid before or after the event is.
2335
2336 But note the case of a non-leader thread exec'ing after the
2337 leader having exited, and gone from our lists (because
2338 check_zombie_leaders deleted it). The non-leader thread
2339 changes its tid to the tgid. */
2340
2341 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2342 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2343 {
2344 ptid_t child_ptid;
2345
2346 /* A multi-thread exec after we had seen the leader exiting. */
2347 if (debug_threads)
2348 {
2349 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2350 "after exec.\n", lwpid);
2351 }
2352
2353 child_ptid = ptid_build (lwpid, lwpid, 0);
2354 child = add_lwp (child_ptid);
2355 child->stopped = 1;
2356 current_thread = child->thread;
2357 }
2358
2359 /* If we didn't find a process, one of two things presumably happened:
2360 - A process we started and then detached from has exited. Ignore it.
2361 - A process we are controlling has forked and the new child's stop
2362 was reported to us by the kernel. Save its PID. */
2363 if (child == NULL && WIFSTOPPED (wstat))
2364 {
2365 add_to_pid_list (&stopped_pids, lwpid, wstat);
2366 return NULL;
2367 }
2368 else if (child == NULL)
2369 return NULL;
2370
2371 thread = get_lwp_thread (child);
2372
2373 child->stopped = 1;
2374
2375 child->last_status = wstat;
2376
2377 /* Check if the thread has exited. */
2378 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2379 {
2380 if (debug_threads)
2381 debug_printf ("LLFE: %d exited.\n", lwpid);
2382
2383 if (finish_step_over (child))
2384 {
2385 /* Unsuspend all other LWPs, and set them back running again. */
2386 unsuspend_all_lwps (child);
2387 }
2388
2389 /* If there is at least one more LWP, then the exit signal was
2390 not the end of the debugged application and should be
2391 ignored, unless GDB wants to hear about thread exits. */
2392 if (report_thread_events
2393 || last_thread_of_process_p (pid_of (thread)))
2394 {
2395 /* Since events are serialized to GDB core, and we can't
2396 report this one right now. Leave the status pending for
2397 the next time we're able to report it. */
2398 mark_lwp_dead (child, wstat);
2399 return child;
2400 }
2401 else
2402 {
2403 delete_lwp (child);
2404 return NULL;
2405 }
2406 }
2407
2408 gdb_assert (WIFSTOPPED (wstat));
2409
2410 if (WIFSTOPPED (wstat))
2411 {
2412 struct process_info *proc;
2413
2414 /* Architecture-specific setup after inferior is running. */
2415 proc = find_process_pid (pid_of (thread));
2416 if (proc->tdesc == NULL)
2417 {
2418 if (proc->attached)
2419 {
2420 /* This needs to happen after we have attached to the
2421 inferior and it is stopped for the first time, but
2422 before we access any inferior registers. */
2423 linux_arch_setup_thread (thread);
2424 }
2425 else
2426 {
2427 /* The process is started, but GDBserver will do
2428 architecture-specific setup after the program stops at
2429 the first instruction. */
2430 child->status_pending_p = 1;
2431 child->status_pending = wstat;
2432 return child;
2433 }
2434 }
2435 }
2436
2437 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2438 {
2439 struct process_info *proc = find_process_pid (pid_of (thread));
2440 int options = linux_low_ptrace_options (proc->attached);
2441
2442 linux_enable_event_reporting (lwpid, options);
2443 child->must_set_ptrace_flags = 0;
2444 }
2445
2446 /* Always update syscall_state, even if it will be filtered later. */
2447 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2448 {
2449 child->syscall_state
2450 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2451 ? TARGET_WAITKIND_SYSCALL_RETURN
2452 : TARGET_WAITKIND_SYSCALL_ENTRY);
2453 }
2454 else
2455 {
2456 /* Almost all other ptrace-stops are known to be outside of system
2457 calls, with further exceptions in handle_extended_wait. */
2458 child->syscall_state = TARGET_WAITKIND_IGNORE;
2459 }
2460
2461 /* Be careful to not overwrite stop_pc until save_stop_reason is
2462 called. */
2463 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2464 && linux_is_extended_waitstatus (wstat))
2465 {
2466 child->stop_pc = get_pc (child);
2467 if (handle_extended_wait (&child, wstat))
2468 {
2469 /* The event has been handled, so just return without
2470 reporting it. */
2471 return NULL;
2472 }
2473 }
2474
2475 if (linux_wstatus_maybe_breakpoint (wstat))
2476 {
2477 if (save_stop_reason (child))
2478 have_stop_pc = 1;
2479 }
2480
2481 if (!have_stop_pc)
2482 child->stop_pc = get_pc (child);
2483
2484 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2485 && child->stop_expected)
2486 {
2487 if (debug_threads)
2488 debug_printf ("Expected stop.\n");
2489 child->stop_expected = 0;
2490
2491 if (thread->last_resume_kind == resume_stop)
2492 {
2493 /* We want to report the stop to the core. Treat the
2494 SIGSTOP as a normal event. */
2495 if (debug_threads)
2496 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2497 target_pid_to_str (ptid_of (thread)));
2498 }
2499 else if (stopping_threads != NOT_STOPPING_THREADS)
2500 {
2501 /* Stopping threads. We don't want this SIGSTOP to end up
2502 pending. */
2503 if (debug_threads)
2504 debug_printf ("LLW: SIGSTOP caught for %s "
2505 "while stopping threads.\n",
2506 target_pid_to_str (ptid_of (thread)));
2507 return NULL;
2508 }
2509 else
2510 {
2511 /* This is a delayed SIGSTOP. Filter out the event. */
2512 if (debug_threads)
2513 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2514 child->stepping ? "step" : "continue",
2515 target_pid_to_str (ptid_of (thread)));
2516
2517 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2518 return NULL;
2519 }
2520 }
2521
2522 child->status_pending_p = 1;
2523 child->status_pending = wstat;
2524 return child;
2525 }
2526
2527 /* Return true if THREAD is doing hardware single step. */
2528
2529 static int
2530 maybe_hw_step (struct thread_info *thread)
2531 {
2532 if (can_hardware_single_step ())
2533 return 1;
2534 else
2535 {
2536 struct process_info *proc = get_thread_process (thread);
2537
2538 /* GDBserver must insert reinsert breakpoint for software
2539 single step. */
2540 gdb_assert (has_reinsert_breakpoints (proc));
2541 return 0;
2542 }
2543 }
2544
2545 /* Resume LWPs that are currently stopped without any pending status
2546 to report, but are resumed from the core's perspective. */
2547
2548 static void
2549 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2550 {
2551 struct thread_info *thread = (struct thread_info *) entry;
2552 struct lwp_info *lp = get_thread_lwp (thread);
2553
2554 if (lp->stopped
2555 && !lp->suspended
2556 && !lp->status_pending_p
2557 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2558 {
2559 int step = thread->last_resume_kind == resume_step;
2560
2561 if (debug_threads)
2562 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2563 target_pid_to_str (ptid_of (thread)),
2564 paddress (lp->stop_pc),
2565 step);
2566
2567 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2568 }
2569 }
2570
2571 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2572 match FILTER_PTID (leaving others pending). The PTIDs can be:
2573 minus_one_ptid, to specify any child; a pid PTID, specifying all
2574 lwps of a thread group; or a PTID representing a single lwp. Store
2575 the stop status through the status pointer WSTAT. OPTIONS is
2576 passed to the waitpid call. Return 0 if no event was found and
2577 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2578 was found. Return the PID of the stopped child otherwise. */
2579
2580 static int
2581 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2582 int *wstatp, int options)
2583 {
2584 struct thread_info *event_thread;
2585 struct lwp_info *event_child, *requested_child;
2586 sigset_t block_mask, prev_mask;
2587
2588 retry:
2589 /* N.B. event_thread points to the thread_info struct that contains
2590 event_child. Keep them in sync. */
2591 event_thread = NULL;
2592 event_child = NULL;
2593 requested_child = NULL;
2594
2595 /* Check for a lwp with a pending status. */
2596
2597 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2598 {
2599 event_thread = (struct thread_info *)
2600 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2601 if (event_thread != NULL)
2602 event_child = get_thread_lwp (event_thread);
2603 if (debug_threads && event_thread)
2604 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2605 }
2606 else if (!ptid_equal (filter_ptid, null_ptid))
2607 {
2608 requested_child = find_lwp_pid (filter_ptid);
2609
2610 if (stopping_threads == NOT_STOPPING_THREADS
2611 && requested_child->status_pending_p
2612 && requested_child->collecting_fast_tracepoint)
2613 {
2614 enqueue_one_deferred_signal (requested_child,
2615 &requested_child->status_pending);
2616 requested_child->status_pending_p = 0;
2617 requested_child->status_pending = 0;
2618 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2619 }
2620
2621 if (requested_child->suspended
2622 && requested_child->status_pending_p)
2623 {
2624 internal_error (__FILE__, __LINE__,
2625 "requesting an event out of a"
2626 " suspended child?");
2627 }
2628
2629 if (requested_child->status_pending_p)
2630 {
2631 event_child = requested_child;
2632 event_thread = get_lwp_thread (event_child);
2633 }
2634 }
2635
2636 if (event_child != NULL)
2637 {
2638 if (debug_threads)
2639 debug_printf ("Got an event from pending child %ld (%04x)\n",
2640 lwpid_of (event_thread), event_child->status_pending);
2641 *wstatp = event_child->status_pending;
2642 event_child->status_pending_p = 0;
2643 event_child->status_pending = 0;
2644 current_thread = event_thread;
2645 return lwpid_of (event_thread);
2646 }
2647
2648 /* But if we don't find a pending event, we'll have to wait.
2649
2650 We only enter this loop if no process has a pending wait status.
2651 Thus any action taken in response to a wait status inside this
2652 loop is responding as soon as we detect the status, not after any
2653 pending events. */
2654
2655 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2656 all signals while here. */
2657 sigfillset (&block_mask);
2658 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2659
2660 /* Always pull all events out of the kernel. We'll randomly select
2661 an event LWP out of all that have events, to prevent
2662 starvation. */
2663 while (event_child == NULL)
2664 {
2665 pid_t ret = 0;
2666
2667 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2668 quirks:
2669
2670 - If the thread group leader exits while other threads in the
2671 thread group still exist, waitpid(TGID, ...) hangs. That
2672 waitpid won't return an exit status until the other threads
2673 in the group are reaped.
2674
2675 - When a non-leader thread execs, that thread just vanishes
2676 without reporting an exit (so we'd hang if we waited for it
2677 explicitly in that case). The exec event is reported to
2678 the TGID pid. */
2679 errno = 0;
2680 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2681
2682 if (debug_threads)
2683 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2684 ret, errno ? strerror (errno) : "ERRNO-OK");
2685
2686 if (ret > 0)
2687 {
2688 if (debug_threads)
2689 {
2690 debug_printf ("LLW: waitpid %ld received %s\n",
2691 (long) ret, status_to_str (*wstatp));
2692 }
2693
2694 /* Filter all events. IOW, leave all events pending. We'll
2695 randomly select an event LWP out of all that have events
2696 below. */
2697 linux_low_filter_event (ret, *wstatp);
2698 /* Retry until nothing comes out of waitpid. A single
2699 SIGCHLD can indicate more than one child stopped. */
2700 continue;
2701 }
2702
2703 /* Now that we've pulled all events out of the kernel, resume
2704 LWPs that don't have an interesting event to report. */
2705 if (stopping_threads == NOT_STOPPING_THREADS)
2706 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2707
2708 /* ... and find an LWP with a status to report to the core, if
2709 any. */
2710 event_thread = (struct thread_info *)
2711 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2712 if (event_thread != NULL)
2713 {
2714 event_child = get_thread_lwp (event_thread);
2715 *wstatp = event_child->status_pending;
2716 event_child->status_pending_p = 0;
2717 event_child->status_pending = 0;
2718 break;
2719 }
2720
2721 /* Check for zombie thread group leaders. Those can't be reaped
2722 until all other threads in the thread group are. */
2723 check_zombie_leaders ();
2724
2725 /* If there are no resumed children left in the set of LWPs we
2726 want to wait for, bail. We can't just block in
2727 waitpid/sigsuspend, because lwps might have been left stopped
2728 in trace-stop state, and we'd be stuck forever waiting for
2729 their status to change (which would only happen if we resumed
2730 them). Even if WNOHANG is set, this return code is preferred
2731 over 0 (below), as it is more detailed. */
2732 if ((find_inferior (&all_threads,
2733 not_stopped_callback,
2734 &wait_ptid) == NULL))
2735 {
2736 if (debug_threads)
2737 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2738 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2739 return -1;
2740 }
2741
2742 /* No interesting event to report to the caller. */
2743 if ((options & WNOHANG))
2744 {
2745 if (debug_threads)
2746 debug_printf ("WNOHANG set, no event found\n");
2747
2748 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2749 return 0;
2750 }
2751
2752 /* Block until we get an event reported with SIGCHLD. */
2753 if (debug_threads)
2754 debug_printf ("sigsuspend'ing\n");
2755
2756 sigsuspend (&prev_mask);
2757 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2758 goto retry;
2759 }
2760
2761 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2762
2763 current_thread = event_thread;
2764
2765 return lwpid_of (event_thread);
2766 }
2767
2768 /* Wait for an event from child(ren) PTID. PTIDs can be:
2769 minus_one_ptid, to specify any child; a pid PTID, specifying all
2770 lwps of a thread group; or a PTID representing a single lwp. Store
2771 the stop status through the status pointer WSTAT. OPTIONS is
2772 passed to the waitpid call. Return 0 if no event was found and
2773 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2774 was found. Return the PID of the stopped child otherwise. */
2775
2776 static int
2777 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2778 {
2779 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2780 }
2781
2782 /* Count the LWP's that have had events. */
2783
2784 static int
2785 count_events_callback (struct inferior_list_entry *entry, void *data)
2786 {
2787 struct thread_info *thread = (struct thread_info *) entry;
2788 struct lwp_info *lp = get_thread_lwp (thread);
2789 int *count = (int *) data;
2790
2791 gdb_assert (count != NULL);
2792
2793 /* Count only resumed LWPs that have an event pending. */
2794 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2795 && lp->status_pending_p)
2796 (*count)++;
2797
2798 return 0;
2799 }
2800
2801 /* Select the LWP (if any) that is currently being single-stepped. */
2802
2803 static int
2804 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2805 {
2806 struct thread_info *thread = (struct thread_info *) entry;
2807 struct lwp_info *lp = get_thread_lwp (thread);
2808
2809 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2810 && thread->last_resume_kind == resume_step
2811 && lp->status_pending_p)
2812 return 1;
2813 else
2814 return 0;
2815 }
2816
2817 /* Select the Nth LWP that has had an event. */
2818
2819 static int
2820 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2821 {
2822 struct thread_info *thread = (struct thread_info *) entry;
2823 struct lwp_info *lp = get_thread_lwp (thread);
2824 int *selector = (int *) data;
2825
2826 gdb_assert (selector != NULL);
2827
2828 /* Select only resumed LWPs that have an event pending. */
2829 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2830 && lp->status_pending_p)
2831 if ((*selector)-- == 0)
2832 return 1;
2833
2834 return 0;
2835 }
2836
2837 /* Select one LWP out of those that have events pending. */
2838
2839 static void
2840 select_event_lwp (struct lwp_info **orig_lp)
2841 {
2842 int num_events = 0;
2843 int random_selector;
2844 struct thread_info *event_thread = NULL;
2845
2846 /* In all-stop, give preference to the LWP that is being
2847 single-stepped. There will be at most one, and it's the LWP that
2848 the core is most interested in. If we didn't do this, then we'd
2849 have to handle pending step SIGTRAPs somehow in case the core
2850 later continues the previously-stepped thread, otherwise we'd
2851 report the pending SIGTRAP, and the core, not having stepped the
2852 thread, wouldn't understand what the trap was for, and therefore
2853 would report it to the user as a random signal. */
2854 if (!non_stop)
2855 {
2856 event_thread
2857 = (struct thread_info *) find_inferior (&all_threads,
2858 select_singlestep_lwp_callback,
2859 NULL);
2860 if (event_thread != NULL)
2861 {
2862 if (debug_threads)
2863 debug_printf ("SEL: Select single-step %s\n",
2864 target_pid_to_str (ptid_of (event_thread)));
2865 }
2866 }
2867 if (event_thread == NULL)
2868 {
2869 /* No single-stepping LWP. Select one at random, out of those
2870 which have had events. */
2871
2872 /* First see how many events we have. */
2873 find_inferior (&all_threads, count_events_callback, &num_events);
2874 gdb_assert (num_events > 0);
2875
2876 /* Now randomly pick a LWP out of those that have had
2877 events. */
2878 random_selector = (int)
2879 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2880
2881 if (debug_threads && num_events > 1)
2882 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2883 num_events, random_selector);
2884
2885 event_thread
2886 = (struct thread_info *) find_inferior (&all_threads,
2887 select_event_lwp_callback,
2888 &random_selector);
2889 }
2890
2891 if (event_thread != NULL)
2892 {
2893 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2894
2895 /* Switch the event LWP. */
2896 *orig_lp = event_lp;
2897 }
2898 }
2899
2900 /* Decrement the suspend count of an LWP. */
2901
2902 static int
2903 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2904 {
2905 struct thread_info *thread = (struct thread_info *) entry;
2906 struct lwp_info *lwp = get_thread_lwp (thread);
2907
2908 /* Ignore EXCEPT. */
2909 if (lwp == except)
2910 return 0;
2911
2912 lwp_suspended_decr (lwp);
2913 return 0;
2914 }
2915
2916 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2917 NULL. */
2918
2919 static void
2920 unsuspend_all_lwps (struct lwp_info *except)
2921 {
2922 find_inferior (&all_threads, unsuspend_one_lwp, except);
2923 }
2924
2925 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2926 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2927 void *data);
2928 static int lwp_running (struct inferior_list_entry *entry, void *data);
2929 static ptid_t linux_wait_1 (ptid_t ptid,
2930 struct target_waitstatus *ourstatus,
2931 int target_options);
2932
2933 /* Stabilize threads (move out of jump pads).
2934
2935 If a thread is midway collecting a fast tracepoint, we need to
2936 finish the collection and move it out of the jump pad before
2937 reporting the signal.
2938
2939 This avoids recursion while collecting (when a signal arrives
2940 midway, and the signal handler itself collects), which would trash
2941 the trace buffer. In case the user set a breakpoint in a signal
2942 handler, this avoids the backtrace showing the jump pad, etc..
2943 Most importantly, there are certain things we can't do safely if
2944 threads are stopped in a jump pad (or in its callee's). For
2945 example:
2946
2947 - starting a new trace run. A thread still collecting the
2948 previous run, could trash the trace buffer when resumed. The trace
2949 buffer control structures would have been reset but the thread had
2950 no way to tell. The thread could even midway memcpy'ing to the
2951 buffer, which would mean that when resumed, it would clobber the
2952 trace buffer that had been set for a new run.
2953
2954 - we can't rewrite/reuse the jump pads for new tracepoints
2955 safely. Say you do tstart while a thread is stopped midway while
2956 collecting. When the thread is later resumed, it finishes the
2957 collection, and returns to the jump pad, to execute the original
2958 instruction that was under the tracepoint jump at the time the
2959 older run had been started. If the jump pad had been rewritten
2960 since for something else in the new run, the thread would now
2961 execute the wrong / random instructions. */
2962
2963 static void
2964 linux_stabilize_threads (void)
2965 {
2966 struct thread_info *saved_thread;
2967 struct thread_info *thread_stuck;
2968
2969 thread_stuck
2970 = (struct thread_info *) find_inferior (&all_threads,
2971 stuck_in_jump_pad_callback,
2972 NULL);
2973 if (thread_stuck != NULL)
2974 {
2975 if (debug_threads)
2976 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2977 lwpid_of (thread_stuck));
2978 return;
2979 }
2980
2981 saved_thread = current_thread;
2982
2983 stabilizing_threads = 1;
2984
2985 /* Kick 'em all. */
2986 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2987
2988 /* Loop until all are stopped out of the jump pads. */
2989 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2990 {
2991 struct target_waitstatus ourstatus;
2992 struct lwp_info *lwp;
2993 int wstat;
2994
2995 /* Note that we go through the full wait even loop. While
2996 moving threads out of jump pad, we need to be able to step
2997 over internal breakpoints and such. */
2998 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2999
3000 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
3001 {
3002 lwp = get_thread_lwp (current_thread);
3003
3004 /* Lock it. */
3005 lwp_suspended_inc (lwp);
3006
3007 if (ourstatus.value.sig != GDB_SIGNAL_0
3008 || current_thread->last_resume_kind == resume_stop)
3009 {
3010 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3011 enqueue_one_deferred_signal (lwp, &wstat);
3012 }
3013 }
3014 }
3015
3016 unsuspend_all_lwps (NULL);
3017
3018 stabilizing_threads = 0;
3019
3020 current_thread = saved_thread;
3021
3022 if (debug_threads)
3023 {
3024 thread_stuck
3025 = (struct thread_info *) find_inferior (&all_threads,
3026 stuck_in_jump_pad_callback,
3027 NULL);
3028 if (thread_stuck != NULL)
3029 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3030 lwpid_of (thread_stuck));
3031 }
3032 }
3033
3034 /* Convenience function that is called when the kernel reports an
3035 event that is not passed out to GDB. */
3036
3037 static ptid_t
3038 ignore_event (struct target_waitstatus *ourstatus)
3039 {
3040 /* If we got an event, there may still be others, as a single
3041 SIGCHLD can indicate more than one child stopped. This forces
3042 another target_wait call. */
3043 async_file_mark ();
3044
3045 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3046 return null_ptid;
3047 }
3048
3049 /* Convenience function that is called when the kernel reports an exit
3050 event. This decides whether to report the event to GDB as a
3051 process exit event, a thread exit event, or to suppress the
3052 event. */
3053
3054 static ptid_t
3055 filter_exit_event (struct lwp_info *event_child,
3056 struct target_waitstatus *ourstatus)
3057 {
3058 struct thread_info *thread = get_lwp_thread (event_child);
3059 ptid_t ptid = ptid_of (thread);
3060
3061 if (!last_thread_of_process_p (pid_of (thread)))
3062 {
3063 if (report_thread_events)
3064 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3065 else
3066 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3067
3068 delete_lwp (event_child);
3069 }
3070 return ptid;
3071 }
3072
3073 /* Returns 1 if GDB is interested in any event_child syscalls. */
3074
3075 static int
3076 gdb_catching_syscalls_p (struct lwp_info *event_child)
3077 {
3078 struct thread_info *thread = get_lwp_thread (event_child);
3079 struct process_info *proc = get_thread_process (thread);
3080
3081 return !VEC_empty (int, proc->syscalls_to_catch);
3082 }
3083
3084 /* Returns 1 if GDB is interested in the event_child syscall.
3085 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3086
3087 static int
3088 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3089 {
3090 int i, iter;
3091 int sysno, sysret;
3092 struct thread_info *thread = get_lwp_thread (event_child);
3093 struct process_info *proc = get_thread_process (thread);
3094
3095 if (VEC_empty (int, proc->syscalls_to_catch))
3096 return 0;
3097
3098 if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
3099 return 1;
3100
3101 get_syscall_trapinfo (event_child, &sysno, &sysret);
3102 for (i = 0;
3103 VEC_iterate (int, proc->syscalls_to_catch, i, iter);
3104 i++)
3105 if (iter == sysno)
3106 return 1;
3107
3108 return 0;
3109 }
3110
3111 /* Wait for process, returns status. */
3112
3113 static ptid_t
3114 linux_wait_1 (ptid_t ptid,
3115 struct target_waitstatus *ourstatus, int target_options)
3116 {
3117 int w;
3118 struct lwp_info *event_child;
3119 int options;
3120 int pid;
3121 int step_over_finished;
3122 int bp_explains_trap;
3123 int maybe_internal_trap;
3124 int report_to_gdb;
3125 int trace_event;
3126 int in_step_range;
3127 int any_resumed;
3128
3129 if (debug_threads)
3130 {
3131 debug_enter ();
3132 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3133 }
3134
3135 /* Translate generic target options into linux options. */
3136 options = __WALL;
3137 if (target_options & TARGET_WNOHANG)
3138 options |= WNOHANG;
3139
3140 bp_explains_trap = 0;
3141 trace_event = 0;
3142 in_step_range = 0;
3143 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3144
3145 /* Find a resumed LWP, if any. */
3146 if (find_inferior (&all_threads,
3147 status_pending_p_callback,
3148 &minus_one_ptid) != NULL)
3149 any_resumed = 1;
3150 else if ((find_inferior (&all_threads,
3151 not_stopped_callback,
3152 &minus_one_ptid) != NULL))
3153 any_resumed = 1;
3154 else
3155 any_resumed = 0;
3156
3157 if (ptid_equal (step_over_bkpt, null_ptid))
3158 pid = linux_wait_for_event (ptid, &w, options);
3159 else
3160 {
3161 if (debug_threads)
3162 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3163 target_pid_to_str (step_over_bkpt));
3164 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3165 }
3166
3167 if (pid == 0 || (pid == -1 && !any_resumed))
3168 {
3169 gdb_assert (target_options & TARGET_WNOHANG);
3170
3171 if (debug_threads)
3172 {
3173 debug_printf ("linux_wait_1 ret = null_ptid, "
3174 "TARGET_WAITKIND_IGNORE\n");
3175 debug_exit ();
3176 }
3177
3178 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3179 return null_ptid;
3180 }
3181 else if (pid == -1)
3182 {
3183 if (debug_threads)
3184 {
3185 debug_printf ("linux_wait_1 ret = null_ptid, "
3186 "TARGET_WAITKIND_NO_RESUMED\n");
3187 debug_exit ();
3188 }
3189
3190 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3191 return null_ptid;
3192 }
3193
3194 event_child = get_thread_lwp (current_thread);
3195
3196 /* linux_wait_for_event only returns an exit status for the last
3197 child of a process. Report it. */
3198 if (WIFEXITED (w) || WIFSIGNALED (w))
3199 {
3200 if (WIFEXITED (w))
3201 {
3202 ourstatus->kind = TARGET_WAITKIND_EXITED;
3203 ourstatus->value.integer = WEXITSTATUS (w);
3204
3205 if (debug_threads)
3206 {
3207 debug_printf ("linux_wait_1 ret = %s, exited with "
3208 "retcode %d\n",
3209 target_pid_to_str (ptid_of (current_thread)),
3210 WEXITSTATUS (w));
3211 debug_exit ();
3212 }
3213 }
3214 else
3215 {
3216 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3217 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3218
3219 if (debug_threads)
3220 {
3221 debug_printf ("linux_wait_1 ret = %s, terminated with "
3222 "signal %d\n",
3223 target_pid_to_str (ptid_of (current_thread)),
3224 WTERMSIG (w));
3225 debug_exit ();
3226 }
3227 }
3228
3229 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3230 return filter_exit_event (event_child, ourstatus);
3231
3232 return ptid_of (current_thread);
3233 }
3234
3235 /* If step-over executes a breakpoint instruction, in the case of a
3236 hardware single step it means a gdb/gdbserver breakpoint had been
3237 planted on top of a permanent breakpoint, in the case of a software
3238 single step it may just mean that gdbserver hit the reinsert breakpoint.
3239 The PC has been adjusted by save_stop_reason to point at
3240 the breakpoint address.
3241 So in the case of the hardware single step advance the PC manually
3242 past the breakpoint and in the case of software single step advance only
3243 if it's not the reinsert_breakpoint we are hitting.
3244 This avoids that a program would keep trapping a permanent breakpoint
3245 forever. */
3246 if (!ptid_equal (step_over_bkpt, null_ptid)
3247 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3248 && (event_child->stepping
3249 || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
3250 {
3251 int increment_pc = 0;
3252 int breakpoint_kind = 0;
3253 CORE_ADDR stop_pc = event_child->stop_pc;
3254
3255 breakpoint_kind =
3256 the_target->breakpoint_kind_from_current_state (&stop_pc);
3257 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3258
3259 if (debug_threads)
3260 {
3261 debug_printf ("step-over for %s executed software breakpoint\n",
3262 target_pid_to_str (ptid_of (current_thread)));
3263 }
3264
3265 if (increment_pc != 0)
3266 {
3267 struct regcache *regcache
3268 = get_thread_regcache (current_thread, 1);
3269
3270 event_child->stop_pc += increment_pc;
3271 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3272
3273 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3274 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3275 }
3276 }
3277
3278 /* If this event was not handled before, and is not a SIGTRAP, we
3279 report it. SIGILL and SIGSEGV are also treated as traps in case
3280 a breakpoint is inserted at the current PC. If this target does
3281 not support internal breakpoints at all, we also report the
3282 SIGTRAP without further processing; it's of no concern to us. */
3283 maybe_internal_trap
3284 = (supports_breakpoints ()
3285 && (WSTOPSIG (w) == SIGTRAP
3286 || ((WSTOPSIG (w) == SIGILL
3287 || WSTOPSIG (w) == SIGSEGV)
3288 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3289
3290 if (maybe_internal_trap)
3291 {
3292 /* Handle anything that requires bookkeeping before deciding to
3293 report the event or continue waiting. */
3294
3295 /* First check if we can explain the SIGTRAP with an internal
3296 breakpoint, or if we should possibly report the event to GDB.
3297 Do this before anything that may remove or insert a
3298 breakpoint. */
3299 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3300
3301 /* We have a SIGTRAP, possibly a step-over dance has just
3302 finished. If so, tweak the state machine accordingly,
3303 reinsert breakpoints and delete any reinsert (software
3304 single-step) breakpoints. */
3305 step_over_finished = finish_step_over (event_child);
3306
3307 /* Now invoke the callbacks of any internal breakpoints there. */
3308 check_breakpoints (event_child->stop_pc);
3309
3310 /* Handle tracepoint data collecting. This may overflow the
3311 trace buffer, and cause a tracing stop, removing
3312 breakpoints. */
3313 trace_event = handle_tracepoints (event_child);
3314
3315 if (bp_explains_trap)
3316 {
3317 if (debug_threads)
3318 debug_printf ("Hit a gdbserver breakpoint.\n");
3319 }
3320 }
3321 else
3322 {
3323 /* We have some other signal, possibly a step-over dance was in
3324 progress, and it should be cancelled too. */
3325 step_over_finished = finish_step_over (event_child);
3326 }
3327
3328 /* We have all the data we need. Either report the event to GDB, or
3329 resume threads and keep waiting for more. */
3330
3331 /* If we're collecting a fast tracepoint, finish the collection and
3332 move out of the jump pad before delivering a signal. See
3333 linux_stabilize_threads. */
3334
3335 if (WIFSTOPPED (w)
3336 && WSTOPSIG (w) != SIGTRAP
3337 && supports_fast_tracepoints ()
3338 && agent_loaded_p ())
3339 {
3340 if (debug_threads)
3341 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3342 "to defer or adjust it.\n",
3343 WSTOPSIG (w), lwpid_of (current_thread));
3344
3345 /* Allow debugging the jump pad itself. */
3346 if (current_thread->last_resume_kind != resume_step
3347 && maybe_move_out_of_jump_pad (event_child, &w))
3348 {
3349 enqueue_one_deferred_signal (event_child, &w);
3350
3351 if (debug_threads)
3352 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3353 WSTOPSIG (w), lwpid_of (current_thread));
3354
3355 linux_resume_one_lwp (event_child, 0, 0, NULL);
3356
3357 return ignore_event (ourstatus);
3358 }
3359 }
3360
3361 if (event_child->collecting_fast_tracepoint)
3362 {
3363 if (debug_threads)
3364 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3365 "Check if we're already there.\n",
3366 lwpid_of (current_thread),
3367 event_child->collecting_fast_tracepoint);
3368
3369 trace_event = 1;
3370
3371 event_child->collecting_fast_tracepoint
3372 = linux_fast_tracepoint_collecting (event_child, NULL);
3373
3374 if (event_child->collecting_fast_tracepoint != 1)
3375 {
3376 /* No longer need this breakpoint. */
3377 if (event_child->exit_jump_pad_bkpt != NULL)
3378 {
3379 if (debug_threads)
3380 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3381 "stopping all threads momentarily.\n");
3382
3383 /* Other running threads could hit this breakpoint.
3384 We don't handle moribund locations like GDB does,
3385 instead we always pause all threads when removing
3386 breakpoints, so that any step-over or
3387 decr_pc_after_break adjustment is always taken
3388 care of while the breakpoint is still
3389 inserted. */
3390 stop_all_lwps (1, event_child);
3391
3392 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3393 event_child->exit_jump_pad_bkpt = NULL;
3394
3395 unstop_all_lwps (1, event_child);
3396
3397 gdb_assert (event_child->suspended >= 0);
3398 }
3399 }
3400
3401 if (event_child->collecting_fast_tracepoint == 0)
3402 {
3403 if (debug_threads)
3404 debug_printf ("fast tracepoint finished "
3405 "collecting successfully.\n");
3406
3407 /* We may have a deferred signal to report. */
3408 if (dequeue_one_deferred_signal (event_child, &w))
3409 {
3410 if (debug_threads)
3411 debug_printf ("dequeued one signal.\n");
3412 }
3413 else
3414 {
3415 if (debug_threads)
3416 debug_printf ("no deferred signals.\n");
3417
3418 if (stabilizing_threads)
3419 {
3420 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3421 ourstatus->value.sig = GDB_SIGNAL_0;
3422
3423 if (debug_threads)
3424 {
3425 debug_printf ("linux_wait_1 ret = %s, stopped "
3426 "while stabilizing threads\n",
3427 target_pid_to_str (ptid_of (current_thread)));
3428 debug_exit ();
3429 }
3430
3431 return ptid_of (current_thread);
3432 }
3433 }
3434 }
3435 }
3436
3437 /* Check whether GDB would be interested in this event. */
3438
3439 /* Check if GDB is interested in this syscall. */
3440 if (WIFSTOPPED (w)
3441 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3442 && !gdb_catch_this_syscall_p (event_child))
3443 {
3444 if (debug_threads)
3445 {
3446 debug_printf ("Ignored syscall for LWP %ld.\n",
3447 lwpid_of (current_thread));
3448 }
3449
3450 linux_resume_one_lwp (event_child, event_child->stepping,
3451 0, NULL);
3452 return ignore_event (ourstatus);
3453 }
3454
3455 /* If GDB is not interested in this signal, don't stop other
3456 threads, and don't report it to GDB. Just resume the inferior
3457 right away. We do this for threading-related signals as well as
3458 any that GDB specifically requested we ignore. But never ignore
3459 SIGSTOP if we sent it ourselves, and do not ignore signals when
3460 stepping - they may require special handling to skip the signal
3461 handler. Also never ignore signals that could be caused by a
3462 breakpoint. */
3463 if (WIFSTOPPED (w)
3464 && current_thread->last_resume_kind != resume_step
3465 && (
3466 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3467 (current_process ()->priv->thread_db != NULL
3468 && (WSTOPSIG (w) == __SIGRTMIN
3469 || WSTOPSIG (w) == __SIGRTMIN + 1))
3470 ||
3471 #endif
3472 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3473 && !(WSTOPSIG (w) == SIGSTOP
3474 && current_thread->last_resume_kind == resume_stop)
3475 && !linux_wstatus_maybe_breakpoint (w))))
3476 {
3477 siginfo_t info, *info_p;
3478
3479 if (debug_threads)
3480 debug_printf ("Ignored signal %d for LWP %ld.\n",
3481 WSTOPSIG (w), lwpid_of (current_thread));
3482
3483 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3484 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3485 info_p = &info;
3486 else
3487 info_p = NULL;
3488
3489 if (step_over_finished)
3490 {
3491 /* We cancelled this thread's step-over above. We still
3492 need to unsuspend all other LWPs, and set them back
3493 running again while the signal handler runs. */
3494 unsuspend_all_lwps (event_child);
3495
3496 /* Enqueue the pending signal info so that proceed_all_lwps
3497 doesn't lose it. */
3498 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3499
3500 proceed_all_lwps ();
3501 }
3502 else
3503 {
3504 linux_resume_one_lwp (event_child, event_child->stepping,
3505 WSTOPSIG (w), info_p);
3506 }
3507 return ignore_event (ourstatus);
3508 }
3509
3510 /* Note that all addresses are always "out of the step range" when
3511 there's no range to begin with. */
3512 in_step_range = lwp_in_step_range (event_child);
3513
3514 /* If GDB wanted this thread to single step, and the thread is out
3515 of the step range, we always want to report the SIGTRAP, and let
3516 GDB handle it. Watchpoints should always be reported. So should
3517 signals we can't explain. A SIGTRAP we can't explain could be a
3518 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3519 do, we're be able to handle GDB breakpoints on top of internal
3520 breakpoints, by handling the internal breakpoint and still
3521 reporting the event to GDB. If we don't, we're out of luck, GDB
3522 won't see the breakpoint hit. If we see a single-step event but
3523 the thread should be continuing, don't pass the trap to gdb.
3524 That indicates that we had previously finished a single-step but
3525 left the single-step pending -- see
3526 complete_ongoing_step_over. */
3527 report_to_gdb = (!maybe_internal_trap
3528 || (current_thread->last_resume_kind == resume_step
3529 && !in_step_range)
3530 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3531 || (!in_step_range
3532 && !bp_explains_trap
3533 && !trace_event
3534 && !step_over_finished
3535 && !(current_thread->last_resume_kind == resume_continue
3536 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3537 || (gdb_breakpoint_here (event_child->stop_pc)
3538 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3539 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3540 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3541
3542 run_breakpoint_commands (event_child->stop_pc);
3543
3544 /* We found no reason GDB would want us to stop. We either hit one
3545 of our own breakpoints, or finished an internal step GDB
3546 shouldn't know about. */
3547 if (!report_to_gdb)
3548 {
3549 if (debug_threads)
3550 {
3551 if (bp_explains_trap)
3552 debug_printf ("Hit a gdbserver breakpoint.\n");
3553 if (step_over_finished)
3554 debug_printf ("Step-over finished.\n");
3555 if (trace_event)
3556 debug_printf ("Tracepoint event.\n");
3557 if (lwp_in_step_range (event_child))
3558 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3559 paddress (event_child->stop_pc),
3560 paddress (event_child->step_range_start),
3561 paddress (event_child->step_range_end));
3562 }
3563
3564 /* We're not reporting this breakpoint to GDB, so apply the
3565 decr_pc_after_break adjustment to the inferior's regcache
3566 ourselves. */
3567
3568 if (the_low_target.set_pc != NULL)
3569 {
3570 struct regcache *regcache
3571 = get_thread_regcache (current_thread, 1);
3572 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3573 }
3574
3575 /* We may have finished stepping over a breakpoint. If so,
3576 we've stopped and suspended all LWPs momentarily except the
3577 stepping one. This is where we resume them all again. We're
3578 going to keep waiting, so use proceed, which handles stepping
3579 over the next breakpoint. */
3580 if (debug_threads)
3581 debug_printf ("proceeding all threads.\n");
3582
3583 if (step_over_finished)
3584 unsuspend_all_lwps (event_child);
3585
3586 proceed_all_lwps ();
3587 return ignore_event (ourstatus);
3588 }
3589
3590 if (debug_threads)
3591 {
3592 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3593 {
3594 char *str;
3595
3596 str = target_waitstatus_to_string (&event_child->waitstatus);
3597 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3598 lwpid_of (get_lwp_thread (event_child)), str);
3599 xfree (str);
3600 }
3601 if (current_thread->last_resume_kind == resume_step)
3602 {
3603 if (event_child->step_range_start == event_child->step_range_end)
3604 debug_printf ("GDB wanted to single-step, reporting event.\n");
3605 else if (!lwp_in_step_range (event_child))
3606 debug_printf ("Out of step range, reporting event.\n");
3607 }
3608 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3609 debug_printf ("Stopped by watchpoint.\n");
3610 else if (gdb_breakpoint_here (event_child->stop_pc))
3611 debug_printf ("Stopped by GDB breakpoint.\n");
3612 if (debug_threads)
3613 debug_printf ("Hit a non-gdbserver trap event.\n");
3614 }
3615
3616 /* Alright, we're going to report a stop. */
3617
3618 if (!stabilizing_threads)
3619 {
3620 /* In all-stop, stop all threads. */
3621 if (!non_stop)
3622 stop_all_lwps (0, NULL);
3623
3624 /* If we're not waiting for a specific LWP, choose an event LWP
3625 from among those that have had events. Giving equal priority
3626 to all LWPs that have had events helps prevent
3627 starvation. */
3628 if (ptid_equal (ptid, minus_one_ptid))
3629 {
3630 event_child->status_pending_p = 1;
3631 event_child->status_pending = w;
3632
3633 select_event_lwp (&event_child);
3634
3635 /* current_thread and event_child must stay in sync. */
3636 current_thread = get_lwp_thread (event_child);
3637
3638 event_child->status_pending_p = 0;
3639 w = event_child->status_pending;
3640 }
3641
3642 if (step_over_finished)
3643 {
3644 if (!non_stop)
3645 {
3646 /* If we were doing a step-over, all other threads but
3647 the stepping one had been paused in start_step_over,
3648 with their suspend counts incremented. We don't want
3649 to do a full unstop/unpause, because we're in
3650 all-stop mode (so we want threads stopped), but we
3651 still need to unsuspend the other threads, to
3652 decrement their `suspended' count back. */
3653 unsuspend_all_lwps (event_child);
3654 }
3655 else
3656 {
3657 /* If we just finished a step-over, then all threads had
3658 been momentarily paused. In all-stop, that's fine,
3659 we want threads stopped by now anyway. In non-stop,
3660 we need to re-resume threads that GDB wanted to be
3661 running. */
3662 unstop_all_lwps (1, event_child);
3663 }
3664 }
3665
3666 /* Stabilize threads (move out of jump pads). */
3667 if (!non_stop)
3668 stabilize_threads ();
3669 }
3670 else
3671 {
3672 /* If we just finished a step-over, then all threads had been
3673 momentarily paused. In all-stop, that's fine, we want
3674 threads stopped by now anyway. In non-stop, we need to
3675 re-resume threads that GDB wanted to be running. */
3676 if (step_over_finished)
3677 unstop_all_lwps (1, event_child);
3678 }
3679
3680 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3681 {
3682 /* If the reported event is an exit, fork, vfork or exec, let
3683 GDB know. */
3684 *ourstatus = event_child->waitstatus;
3685 /* Clear the event lwp's waitstatus since we handled it already. */
3686 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3687 }
3688 else
3689 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3690
3691 /* Now that we've selected our final event LWP, un-adjust its PC if
3692 it was a software breakpoint, and the client doesn't know we can
3693 adjust the breakpoint ourselves. */
3694 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3695 && !swbreak_feature)
3696 {
3697 int decr_pc = the_low_target.decr_pc_after_break;
3698
3699 if (decr_pc != 0)
3700 {
3701 struct regcache *regcache
3702 = get_thread_regcache (current_thread, 1);
3703 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3704 }
3705 }
3706
3707 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3708 {
3709 int sysret;
3710
3711 get_syscall_trapinfo (event_child,
3712 &ourstatus->value.syscall_number, &sysret);
3713 ourstatus->kind = event_child->syscall_state;
3714 }
3715 else if (current_thread->last_resume_kind == resume_stop
3716 && WSTOPSIG (w) == SIGSTOP)
3717 {
3718 /* A thread that has been requested to stop by GDB with vCont;t,
3719 and it stopped cleanly, so report as SIG0. The use of
3720 SIGSTOP is an implementation detail. */
3721 ourstatus->value.sig = GDB_SIGNAL_0;
3722 }
3723 else if (current_thread->last_resume_kind == resume_stop
3724 && WSTOPSIG (w) != SIGSTOP)
3725 {
3726 /* A thread that has been requested to stop by GDB with vCont;t,
3727 but, it stopped for other reasons. */
3728 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3729 }
3730 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3731 {
3732 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3733 }
3734
3735 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3736
3737 if (debug_threads)
3738 {
3739 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3740 target_pid_to_str (ptid_of (current_thread)),
3741 ourstatus->kind, ourstatus->value.sig);
3742 debug_exit ();
3743 }
3744
3745 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3746 return filter_exit_event (event_child, ourstatus);
3747
3748 return ptid_of (current_thread);
3749 }
3750
3751 /* Get rid of any pending event in the pipe. */
3752 static void
3753 async_file_flush (void)
3754 {
3755 int ret;
3756 char buf;
3757
3758 do
3759 ret = read (linux_event_pipe[0], &buf, 1);
3760 while (ret >= 0 || (ret == -1 && errno == EINTR));
3761 }
3762
3763 /* Put something in the pipe, so the event loop wakes up. */
3764 static void
3765 async_file_mark (void)
3766 {
3767 int ret;
3768
3769 async_file_flush ();
3770
3771 do
3772 ret = write (linux_event_pipe[1], "+", 1);
3773 while (ret == 0 || (ret == -1 && errno == EINTR));
3774
3775 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3776 be awakened anyway. */
3777 }
3778
3779 static ptid_t
3780 linux_wait (ptid_t ptid,
3781 struct target_waitstatus *ourstatus, int target_options)
3782 {
3783 ptid_t event_ptid;
3784
3785 /* Flush the async file first. */
3786 if (target_is_async_p ())
3787 async_file_flush ();
3788
3789 do
3790 {
3791 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3792 }
3793 while ((target_options & TARGET_WNOHANG) == 0
3794 && ptid_equal (event_ptid, null_ptid)
3795 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3796
3797 /* If at least one stop was reported, there may be more. A single
3798 SIGCHLD can signal more than one child stop. */
3799 if (target_is_async_p ()
3800 && (target_options & TARGET_WNOHANG) != 0
3801 && !ptid_equal (event_ptid, null_ptid))
3802 async_file_mark ();
3803
3804 return event_ptid;
3805 }
3806
3807 /* Send a signal to an LWP. */
3808
3809 static int
3810 kill_lwp (unsigned long lwpid, int signo)
3811 {
3812 int ret;
3813
3814 errno = 0;
3815 ret = syscall (__NR_tkill, lwpid, signo);
3816 if (errno == ENOSYS)
3817 {
3818 /* If tkill fails, then we are not using nptl threads, a
3819 configuration we no longer support. */
3820 perror_with_name (("tkill"));
3821 }
3822 return ret;
3823 }
3824
3825 void
3826 linux_stop_lwp (struct lwp_info *lwp)
3827 {
3828 send_sigstop (lwp);
3829 }
3830
3831 static void
3832 send_sigstop (struct lwp_info *lwp)
3833 {
3834 int pid;
3835
3836 pid = lwpid_of (get_lwp_thread (lwp));
3837
3838 /* If we already have a pending stop signal for this process, don't
3839 send another. */
3840 if (lwp->stop_expected)
3841 {
3842 if (debug_threads)
3843 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3844
3845 return;
3846 }
3847
3848 if (debug_threads)
3849 debug_printf ("Sending sigstop to lwp %d\n", pid);
3850
3851 lwp->stop_expected = 1;
3852 kill_lwp (pid, SIGSTOP);
3853 }
3854
3855 static int
3856 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3857 {
3858 struct thread_info *thread = (struct thread_info *) entry;
3859 struct lwp_info *lwp = get_thread_lwp (thread);
3860
3861 /* Ignore EXCEPT. */
3862 if (lwp == except)
3863 return 0;
3864
3865 if (lwp->stopped)
3866 return 0;
3867
3868 send_sigstop (lwp);
3869 return 0;
3870 }
3871
3872 /* Increment the suspend count of an LWP, and stop it, if not stopped
3873 yet. */
3874 static int
3875 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3876 void *except)
3877 {
3878 struct thread_info *thread = (struct thread_info *) entry;
3879 struct lwp_info *lwp = get_thread_lwp (thread);
3880
3881 /* Ignore EXCEPT. */
3882 if (lwp == except)
3883 return 0;
3884
3885 lwp_suspended_inc (lwp);
3886
3887 return send_sigstop_callback (entry, except);
3888 }
3889
3890 static void
3891 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3892 {
3893 /* Store the exit status for later. */
3894 lwp->status_pending_p = 1;
3895 lwp->status_pending = wstat;
3896
3897 /* Store in waitstatus as well, as there's nothing else to process
3898 for this event. */
3899 if (WIFEXITED (wstat))
3900 {
3901 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3902 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3903 }
3904 else if (WIFSIGNALED (wstat))
3905 {
3906 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3907 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3908 }
3909
3910 /* Prevent trying to stop it. */
3911 lwp->stopped = 1;
3912
3913 /* No further stops are expected from a dead lwp. */
3914 lwp->stop_expected = 0;
3915 }
3916
3917 /* Return true if LWP has exited already, and has a pending exit event
3918 to report to GDB. */
3919
3920 static int
3921 lwp_is_marked_dead (struct lwp_info *lwp)
3922 {
3923 return (lwp->status_pending_p
3924 && (WIFEXITED (lwp->status_pending)
3925 || WIFSIGNALED (lwp->status_pending)));
3926 }
3927
3928 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3929
3930 static void
3931 wait_for_sigstop (void)
3932 {
3933 struct thread_info *saved_thread;
3934 ptid_t saved_tid;
3935 int wstat;
3936 int ret;
3937
3938 saved_thread = current_thread;
3939 if (saved_thread != NULL)
3940 saved_tid = saved_thread->entry.id;
3941 else
3942 saved_tid = null_ptid; /* avoid bogus unused warning */
3943
3944 if (debug_threads)
3945 debug_printf ("wait_for_sigstop: pulling events\n");
3946
3947 /* Passing NULL_PTID as filter indicates we want all events to be
3948 left pending. Eventually this returns when there are no
3949 unwaited-for children left. */
3950 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3951 &wstat, __WALL);
3952 gdb_assert (ret == -1);
3953
3954 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3955 current_thread = saved_thread;
3956 else
3957 {
3958 if (debug_threads)
3959 debug_printf ("Previously current thread died.\n");
3960
3961 /* We can't change the current inferior behind GDB's back,
3962 otherwise, a subsequent command may apply to the wrong
3963 process. */
3964 current_thread = NULL;
3965 }
3966 }
3967
3968 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3969 move it out, because we need to report the stop event to GDB. For
3970 example, if the user puts a breakpoint in the jump pad, it's
3971 because she wants to debug it. */
3972
3973 static int
3974 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3975 {
3976 struct thread_info *thread = (struct thread_info *) entry;
3977 struct lwp_info *lwp = get_thread_lwp (thread);
3978
3979 if (lwp->suspended != 0)
3980 {
3981 internal_error (__FILE__, __LINE__,
3982 "LWP %ld is suspended, suspended=%d\n",
3983 lwpid_of (thread), lwp->suspended);
3984 }
3985 gdb_assert (lwp->stopped);
3986
3987 /* Allow debugging the jump pad, gdb_collect, etc.. */
3988 return (supports_fast_tracepoints ()
3989 && agent_loaded_p ()
3990 && (gdb_breakpoint_here (lwp->stop_pc)
3991 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3992 || thread->last_resume_kind == resume_step)
3993 && linux_fast_tracepoint_collecting (lwp, NULL));
3994 }
3995
3996 static void
3997 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3998 {
3999 struct thread_info *thread = (struct thread_info *) entry;
4000 struct thread_info *saved_thread;
4001 struct lwp_info *lwp = get_thread_lwp (thread);
4002 int *wstat;
4003
4004 if (lwp->suspended != 0)
4005 {
4006 internal_error (__FILE__, __LINE__,
4007 "LWP %ld is suspended, suspended=%d\n",
4008 lwpid_of (thread), lwp->suspended);
4009 }
4010 gdb_assert (lwp->stopped);
4011
4012 /* For gdb_breakpoint_here. */
4013 saved_thread = current_thread;
4014 current_thread = thread;
4015
4016 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4017
4018 /* Allow debugging the jump pad, gdb_collect, etc. */
4019 if (!gdb_breakpoint_here (lwp->stop_pc)
4020 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4021 && thread->last_resume_kind != resume_step
4022 && maybe_move_out_of_jump_pad (lwp, wstat))
4023 {
4024 if (debug_threads)
4025 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4026 lwpid_of (thread));
4027
4028 if (wstat)
4029 {
4030 lwp->status_pending_p = 0;
4031 enqueue_one_deferred_signal (lwp, wstat);
4032
4033 if (debug_threads)
4034 debug_printf ("Signal %d for LWP %ld deferred "
4035 "(in jump pad)\n",
4036 WSTOPSIG (*wstat), lwpid_of (thread));
4037 }
4038
4039 linux_resume_one_lwp (lwp, 0, 0, NULL);
4040 }
4041 else
4042 lwp_suspended_inc (lwp);
4043
4044 current_thread = saved_thread;
4045 }
4046
4047 static int
4048 lwp_running (struct inferior_list_entry *entry, void *data)
4049 {
4050 struct thread_info *thread = (struct thread_info *) entry;
4051 struct lwp_info *lwp = get_thread_lwp (thread);
4052
4053 if (lwp_is_marked_dead (lwp))
4054 return 0;
4055 if (lwp->stopped)
4056 return 0;
4057 return 1;
4058 }
4059
4060 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4061 If SUSPEND, then also increase the suspend count of every LWP,
4062 except EXCEPT. */
4063
4064 static void
4065 stop_all_lwps (int suspend, struct lwp_info *except)
4066 {
4067 /* Should not be called recursively. */
4068 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4069
4070 if (debug_threads)
4071 {
4072 debug_enter ();
4073 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4074 suspend ? "stop-and-suspend" : "stop",
4075 except != NULL
4076 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4077 : "none");
4078 }
4079
4080 stopping_threads = (suspend
4081 ? STOPPING_AND_SUSPENDING_THREADS
4082 : STOPPING_THREADS);
4083
4084 if (suspend)
4085 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4086 else
4087 find_inferior (&all_threads, send_sigstop_callback, except);
4088 wait_for_sigstop ();
4089 stopping_threads = NOT_STOPPING_THREADS;
4090
4091 if (debug_threads)
4092 {
4093 debug_printf ("stop_all_lwps done, setting stopping_threads "
4094 "back to !stopping\n");
4095 debug_exit ();
4096 }
4097 }
4098
4099 /* Enqueue one signal in the chain of signals which need to be
4100 delivered to this process on next resume. */
4101
4102 static void
4103 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4104 {
4105 struct pending_signals *p_sig = XNEW (struct pending_signals);
4106
4107 p_sig->prev = lwp->pending_signals;
4108 p_sig->signal = signal;
4109 if (info == NULL)
4110 memset (&p_sig->info, 0, sizeof (siginfo_t));
4111 else
4112 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4113 lwp->pending_signals = p_sig;
4114 }
4115
4116 /* Install breakpoints for software single stepping. */
4117
4118 static void
4119 install_software_single_step_breakpoints (struct lwp_info *lwp)
4120 {
4121 int i;
4122 CORE_ADDR pc;
4123 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4124 VEC (CORE_ADDR) *next_pcs = NULL;
4125 struct cleanup *old_chain = make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
4126
4127 next_pcs = (*the_low_target.get_next_pcs) (regcache);
4128
4129 for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
4130 set_reinsert_breakpoint (pc);
4131
4132 do_cleanups (old_chain);
4133 }
4134
4135 /* Single step via hardware or software single step.
4136 Return 1 if hardware single stepping, 0 if software single stepping
4137 or can't single step. */
4138
4139 static int
4140 single_step (struct lwp_info* lwp)
4141 {
4142 int step = 0;
4143
4144 if (can_hardware_single_step ())
4145 {
4146 step = 1;
4147 }
4148 else if (can_software_single_step ())
4149 {
4150 install_software_single_step_breakpoints (lwp);
4151 step = 0;
4152 }
4153 else
4154 {
4155 if (debug_threads)
4156 debug_printf ("stepping is not implemented on this target");
4157 }
4158
4159 return step;
4160 }
4161
4162 /* The signal can be delivered to the inferior if we are not trying to
4163 finish a fast tracepoint collect. Since signal can be delivered in
4164 the step-over, the program may go to signal handler and trap again
4165 after return from the signal handler. We can live with the spurious
4166 double traps. */
4167
4168 static int
4169 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4170 {
4171 return !lwp->collecting_fast_tracepoint;
4172 }
4173
4174 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4175 SIGNAL is nonzero, give it that signal. */
4176
4177 static void
4178 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4179 int step, int signal, siginfo_t *info)
4180 {
4181 struct thread_info *thread = get_lwp_thread (lwp);
4182 struct thread_info *saved_thread;
4183 int fast_tp_collecting;
4184 int ptrace_request;
4185 struct process_info *proc = get_thread_process (thread);
4186
4187 /* Note that target description may not be initialised
4188 (proc->tdesc == NULL) at this point because the program hasn't
4189 stopped at the first instruction yet. It means GDBserver skips
4190 the extra traps from the wrapper program (see option --wrapper).
4191 Code in this function that requires register access should be
4192 guarded by proc->tdesc == NULL or something else. */
4193
4194 if (lwp->stopped == 0)
4195 return;
4196
4197 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4198
4199 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4200
4201 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4202
4203 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4204 user used the "jump" command, or "set $pc = foo"). */
4205 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4206 {
4207 /* Collecting 'while-stepping' actions doesn't make sense
4208 anymore. */
4209 release_while_stepping_state_list (thread);
4210 }
4211
4212 /* If we have pending signals or status, and a new signal, enqueue the
4213 signal. Also enqueue the signal if it can't be delivered to the
4214 inferior right now. */
4215 if (signal != 0
4216 && (lwp->status_pending_p
4217 || lwp->pending_signals != NULL
4218 || !lwp_signal_can_be_delivered (lwp)))
4219 {
4220 enqueue_pending_signal (lwp, signal, info);
4221
4222 /* Postpone any pending signal. It was enqueued above. */
4223 signal = 0;
4224 }
4225
4226 if (lwp->status_pending_p)
4227 {
4228 if (debug_threads)
4229 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4230 " has pending status\n",
4231 lwpid_of (thread), step ? "step" : "continue",
4232 lwp->stop_expected ? "expected" : "not expected");
4233 return;
4234 }
4235
4236 saved_thread = current_thread;
4237 current_thread = thread;
4238
4239 /* This bit needs some thinking about. If we get a signal that
4240 we must report while a single-step reinsert is still pending,
4241 we often end up resuming the thread. It might be better to
4242 (ew) allow a stack of pending events; then we could be sure that
4243 the reinsert happened right away and not lose any signals.
4244
4245 Making this stack would also shrink the window in which breakpoints are
4246 uninserted (see comment in linux_wait_for_lwp) but not enough for
4247 complete correctness, so it won't solve that problem. It may be
4248 worthwhile just to solve this one, however. */
4249 if (lwp->bp_reinsert != 0)
4250 {
4251 if (debug_threads)
4252 debug_printf (" pending reinsert at 0x%s\n",
4253 paddress (lwp->bp_reinsert));
4254
4255 if (can_hardware_single_step ())
4256 {
4257 if (fast_tp_collecting == 0)
4258 {
4259 if (step == 0)
4260 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4261 if (lwp->suspended)
4262 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4263 lwp->suspended);
4264 }
4265 }
4266
4267 step = maybe_hw_step (thread);
4268 }
4269 else
4270 {
4271 /* If the thread isn't doing step-over, there shouldn't be any
4272 reinsert breakpoints. */
4273 gdb_assert (!has_reinsert_breakpoints (proc));
4274 }
4275
4276 if (fast_tp_collecting == 1)
4277 {
4278 if (debug_threads)
4279 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4280 " (exit-jump-pad-bkpt)\n",
4281 lwpid_of (thread));
4282 }
4283 else if (fast_tp_collecting == 2)
4284 {
4285 if (debug_threads)
4286 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4287 " single-stepping\n",
4288 lwpid_of (thread));
4289
4290 if (can_hardware_single_step ())
4291 step = 1;
4292 else
4293 {
4294 internal_error (__FILE__, __LINE__,
4295 "moving out of jump pad single-stepping"
4296 " not implemented on this target");
4297 }
4298 }
4299
4300 /* If we have while-stepping actions in this thread set it stepping.
4301 If we have a signal to deliver, it may or may not be set to
4302 SIG_IGN, we don't know. Assume so, and allow collecting
4303 while-stepping into a signal handler. A possible smart thing to
4304 do would be to set an internal breakpoint at the signal return
4305 address, continue, and carry on catching this while-stepping
4306 action only when that breakpoint is hit. A future
4307 enhancement. */
4308 if (thread->while_stepping != NULL)
4309 {
4310 if (debug_threads)
4311 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4312 lwpid_of (thread));
4313
4314 step = single_step (lwp);
4315 }
4316
4317 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4318 {
4319 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4320
4321 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4322
4323 if (debug_threads)
4324 {
4325 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4326 (long) lwp->stop_pc);
4327 }
4328 }
4329
4330 /* If we have pending signals, consume one if it can be delivered to
4331 the inferior. */
4332 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4333 {
4334 struct pending_signals **p_sig;
4335
4336 p_sig = &lwp->pending_signals;
4337 while ((*p_sig)->prev != NULL)
4338 p_sig = &(*p_sig)->prev;
4339
4340 signal = (*p_sig)->signal;
4341 if ((*p_sig)->info.si_signo != 0)
4342 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4343 &(*p_sig)->info);
4344
4345 free (*p_sig);
4346 *p_sig = NULL;
4347 }
4348
4349 if (debug_threads)
4350 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4351 lwpid_of (thread), step ? "step" : "continue", signal,
4352 lwp->stop_expected ? "expected" : "not expected");
4353
4354 if (the_low_target.prepare_to_resume != NULL)
4355 the_low_target.prepare_to_resume (lwp);
4356
4357 regcache_invalidate_thread (thread);
4358 errno = 0;
4359 lwp->stepping = step;
4360 if (step)
4361 ptrace_request = PTRACE_SINGLESTEP;
4362 else if (gdb_catching_syscalls_p (lwp))
4363 ptrace_request = PTRACE_SYSCALL;
4364 else
4365 ptrace_request = PTRACE_CONT;
4366 ptrace (ptrace_request,
4367 lwpid_of (thread),
4368 (PTRACE_TYPE_ARG3) 0,
4369 /* Coerce to a uintptr_t first to avoid potential gcc warning
4370 of coercing an 8 byte integer to a 4 byte pointer. */
4371 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4372
4373 current_thread = saved_thread;
4374 if (errno)
4375 perror_with_name ("resuming thread");
4376
4377 /* Successfully resumed. Clear state that no longer makes sense,
4378 and mark the LWP as running. Must not do this before resuming
4379 otherwise if that fails other code will be confused. E.g., we'd
4380 later try to stop the LWP and hang forever waiting for a stop
4381 status. Note that we must not throw after this is cleared,
4382 otherwise handle_zombie_lwp_error would get confused. */
4383 lwp->stopped = 0;
4384 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4385 }
4386
4387 /* Called when we try to resume a stopped LWP and that errors out. If
4388 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4389 or about to become), discard the error, clear any pending status
4390 the LWP may have, and return true (we'll collect the exit status
4391 soon enough). Otherwise, return false. */
4392
4393 static int
4394 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4395 {
4396 struct thread_info *thread = get_lwp_thread (lp);
4397
4398 /* If we get an error after resuming the LWP successfully, we'd
4399 confuse !T state for the LWP being gone. */
4400 gdb_assert (lp->stopped);
4401
4402 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4403 because even if ptrace failed with ESRCH, the tracee may be "not
4404 yet fully dead", but already refusing ptrace requests. In that
4405 case the tracee has 'R (Running)' state for a little bit
4406 (observed in Linux 3.18). See also the note on ESRCH in the
4407 ptrace(2) man page. Instead, check whether the LWP has any state
4408 other than ptrace-stopped. */
4409
4410 /* Don't assume anything if /proc/PID/status can't be read. */
4411 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4412 {
4413 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4414 lp->status_pending_p = 0;
4415 return 1;
4416 }
4417 return 0;
4418 }
4419
4420 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4421 disappears while we try to resume it. */
4422
4423 static void
4424 linux_resume_one_lwp (struct lwp_info *lwp,
4425 int step, int signal, siginfo_t *info)
4426 {
4427 TRY
4428 {
4429 linux_resume_one_lwp_throw (lwp, step, signal, info);
4430 }
4431 CATCH (ex, RETURN_MASK_ERROR)
4432 {
4433 if (!check_ptrace_stopped_lwp_gone (lwp))
4434 throw_exception (ex);
4435 }
4436 END_CATCH
4437 }
4438
4439 struct thread_resume_array
4440 {
4441 struct thread_resume *resume;
4442 size_t n;
4443 };
4444
4445 /* This function is called once per thread via find_inferior.
4446 ARG is a pointer to a thread_resume_array struct.
4447 We look up the thread specified by ENTRY in ARG, and mark the thread
4448 with a pointer to the appropriate resume request.
4449
4450 This algorithm is O(threads * resume elements), but resume elements
4451 is small (and will remain small at least until GDB supports thread
4452 suspension). */
4453
4454 static int
4455 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4456 {
4457 struct thread_info *thread = (struct thread_info *) entry;
4458 struct lwp_info *lwp = get_thread_lwp (thread);
4459 int ndx;
4460 struct thread_resume_array *r;
4461
4462 r = (struct thread_resume_array *) arg;
4463
4464 for (ndx = 0; ndx < r->n; ndx++)
4465 {
4466 ptid_t ptid = r->resume[ndx].thread;
4467 if (ptid_equal (ptid, minus_one_ptid)
4468 || ptid_equal (ptid, entry->id)
4469 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4470 of PID'. */
4471 || (ptid_get_pid (ptid) == pid_of (thread)
4472 && (ptid_is_pid (ptid)
4473 || ptid_get_lwp (ptid) == -1)))
4474 {
4475 if (r->resume[ndx].kind == resume_stop
4476 && thread->last_resume_kind == resume_stop)
4477 {
4478 if (debug_threads)
4479 debug_printf ("already %s LWP %ld at GDB's request\n",
4480 (thread->last_status.kind
4481 == TARGET_WAITKIND_STOPPED)
4482 ? "stopped"
4483 : "stopping",
4484 lwpid_of (thread));
4485
4486 continue;
4487 }
4488
4489 lwp->resume = &r->resume[ndx];
4490 thread->last_resume_kind = lwp->resume->kind;
4491
4492 lwp->step_range_start = lwp->resume->step_range_start;
4493 lwp->step_range_end = lwp->resume->step_range_end;
4494
4495 /* If we had a deferred signal to report, dequeue one now.
4496 This can happen if LWP gets more than one signal while
4497 trying to get out of a jump pad. */
4498 if (lwp->stopped
4499 && !lwp->status_pending_p
4500 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4501 {
4502 lwp->status_pending_p = 1;
4503
4504 if (debug_threads)
4505 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4506 "leaving status pending.\n",
4507 WSTOPSIG (lwp->status_pending),
4508 lwpid_of (thread));
4509 }
4510
4511 return 0;
4512 }
4513 }
4514
4515 /* No resume action for this thread. */
4516 lwp->resume = NULL;
4517
4518 return 0;
4519 }
4520
4521 /* find_inferior callback for linux_resume.
4522 Set *FLAG_P if this lwp has an interesting status pending. */
4523
4524 static int
4525 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4526 {
4527 struct thread_info *thread = (struct thread_info *) entry;
4528 struct lwp_info *lwp = get_thread_lwp (thread);
4529
4530 /* LWPs which will not be resumed are not interesting, because
4531 we might not wait for them next time through linux_wait. */
4532 if (lwp->resume == NULL)
4533 return 0;
4534
4535 if (thread_still_has_status_pending_p (thread))
4536 * (int *) flag_p = 1;
4537
4538 return 0;
4539 }
4540
4541 /* Return 1 if this lwp that GDB wants running is stopped at an
4542 internal breakpoint that we need to step over. It assumes that any
4543 required STOP_PC adjustment has already been propagated to the
4544 inferior's regcache. */
4545
4546 static int
4547 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4548 {
4549 struct thread_info *thread = (struct thread_info *) entry;
4550 struct lwp_info *lwp = get_thread_lwp (thread);
4551 struct thread_info *saved_thread;
4552 CORE_ADDR pc;
4553 struct process_info *proc = get_thread_process (thread);
4554
4555 /* GDBserver is skipping the extra traps from the wrapper program,
4556 don't have to do step over. */
4557 if (proc->tdesc == NULL)
4558 return 0;
4559
4560 /* LWPs which will not be resumed are not interesting, because we
4561 might not wait for them next time through linux_wait. */
4562
4563 if (!lwp->stopped)
4564 {
4565 if (debug_threads)
4566 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4567 lwpid_of (thread));
4568 return 0;
4569 }
4570
4571 if (thread->last_resume_kind == resume_stop)
4572 {
4573 if (debug_threads)
4574 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4575 " stopped\n",
4576 lwpid_of (thread));
4577 return 0;
4578 }
4579
4580 gdb_assert (lwp->suspended >= 0);
4581
4582 if (lwp->suspended)
4583 {
4584 if (debug_threads)
4585 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4586 lwpid_of (thread));
4587 return 0;
4588 }
4589
4590 if (lwp->status_pending_p)
4591 {
4592 if (debug_threads)
4593 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4594 " status.\n",
4595 lwpid_of (thread));
4596 return 0;
4597 }
4598
4599 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4600 or we have. */
4601 pc = get_pc (lwp);
4602
4603 /* If the PC has changed since we stopped, then don't do anything,
4604 and let the breakpoint/tracepoint be hit. This happens if, for
4605 instance, GDB handled the decr_pc_after_break subtraction itself,
4606 GDB is OOL stepping this thread, or the user has issued a "jump"
4607 command, or poked thread's registers herself. */
4608 if (pc != lwp->stop_pc)
4609 {
4610 if (debug_threads)
4611 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4612 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4613 lwpid_of (thread),
4614 paddress (lwp->stop_pc), paddress (pc));
4615 return 0;
4616 }
4617
4618 /* On software single step target, resume the inferior with signal
4619 rather than stepping over. */
4620 if (can_software_single_step ()
4621 && lwp->pending_signals != NULL
4622 && lwp_signal_can_be_delivered (lwp))
4623 {
4624 if (debug_threads)
4625 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4626 " signals.\n",
4627 lwpid_of (thread));
4628
4629 return 0;
4630 }
4631
4632 saved_thread = current_thread;
4633 current_thread = thread;
4634
4635 /* We can only step over breakpoints we know about. */
4636 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4637 {
4638 /* Don't step over a breakpoint that GDB expects to hit
4639 though. If the condition is being evaluated on the target's side
4640 and it evaluate to false, step over this breakpoint as well. */
4641 if (gdb_breakpoint_here (pc)
4642 && gdb_condition_true_at_breakpoint (pc)
4643 && gdb_no_commands_at_breakpoint (pc))
4644 {
4645 if (debug_threads)
4646 debug_printf ("Need step over [LWP %ld]? yes, but found"
4647 " GDB breakpoint at 0x%s; skipping step over\n",
4648 lwpid_of (thread), paddress (pc));
4649
4650 current_thread = saved_thread;
4651 return 0;
4652 }
4653 else
4654 {
4655 if (debug_threads)
4656 debug_printf ("Need step over [LWP %ld]? yes, "
4657 "found breakpoint at 0x%s\n",
4658 lwpid_of (thread), paddress (pc));
4659
4660 /* We've found an lwp that needs stepping over --- return 1 so
4661 that find_inferior stops looking. */
4662 current_thread = saved_thread;
4663
4664 return 1;
4665 }
4666 }
4667
4668 current_thread = saved_thread;
4669
4670 if (debug_threads)
4671 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4672 " at 0x%s\n",
4673 lwpid_of (thread), paddress (pc));
4674
4675 return 0;
4676 }
4677
4678 /* Start a step-over operation on LWP. When LWP stopped at a
4679 breakpoint, to make progress, we need to remove the breakpoint out
4680 of the way. If we let other threads run while we do that, they may
4681 pass by the breakpoint location and miss hitting it. To avoid
4682 that, a step-over momentarily stops all threads while LWP is
4683 single-stepped by either hardware or software while the breakpoint
4684 is temporarily uninserted from the inferior. When the single-step
4685 finishes, we reinsert the breakpoint, and let all threads that are
4686 supposed to be running, run again. */
4687
4688 static int
4689 start_step_over (struct lwp_info *lwp)
4690 {
4691 struct thread_info *thread = get_lwp_thread (lwp);
4692 struct thread_info *saved_thread;
4693 CORE_ADDR pc;
4694 int step;
4695
4696 if (debug_threads)
4697 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4698 lwpid_of (thread));
4699
4700 stop_all_lwps (1, lwp);
4701
4702 if (lwp->suspended != 0)
4703 {
4704 internal_error (__FILE__, __LINE__,
4705 "LWP %ld suspended=%d\n", lwpid_of (thread),
4706 lwp->suspended);
4707 }
4708
4709 if (debug_threads)
4710 debug_printf ("Done stopping all threads for step-over.\n");
4711
4712 /* Note, we should always reach here with an already adjusted PC,
4713 either by GDB (if we're resuming due to GDB's request), or by our
4714 caller, if we just finished handling an internal breakpoint GDB
4715 shouldn't care about. */
4716 pc = get_pc (lwp);
4717
4718 saved_thread = current_thread;
4719 current_thread = thread;
4720
4721 lwp->bp_reinsert = pc;
4722 uninsert_breakpoints_at (pc);
4723 uninsert_fast_tracepoint_jumps_at (pc);
4724
4725 step = single_step (lwp);
4726
4727 current_thread = saved_thread;
4728
4729 linux_resume_one_lwp (lwp, step, 0, NULL);
4730
4731 /* Require next event from this LWP. */
4732 step_over_bkpt = thread->entry.id;
4733 return 1;
4734 }
4735
4736 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4737 start_step_over, if still there, and delete any reinsert
4738 breakpoints we've set, on non hardware single-step targets. */
4739
4740 static int
4741 finish_step_over (struct lwp_info *lwp)
4742 {
4743 if (lwp->bp_reinsert != 0)
4744 {
4745 struct thread_info *saved_thread = current_thread;
4746
4747 if (debug_threads)
4748 debug_printf ("Finished step over.\n");
4749
4750 current_thread = get_lwp_thread (lwp);
4751
4752 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4753 may be no breakpoint to reinsert there by now. */
4754 reinsert_breakpoints_at (lwp->bp_reinsert);
4755 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4756
4757 lwp->bp_reinsert = 0;
4758
4759 /* Delete any software-single-step reinsert breakpoints. No
4760 longer needed. We don't have to worry about other threads
4761 hitting this trap, and later not being able to explain it,
4762 because we were stepping over a breakpoint, and we hold all
4763 threads but LWP stopped while doing that. */
4764 if (!can_hardware_single_step ())
4765 {
4766 gdb_assert (has_reinsert_breakpoints (current_process ()));
4767 delete_reinsert_breakpoints ();
4768 }
4769
4770 step_over_bkpt = null_ptid;
4771 current_thread = saved_thread;
4772 return 1;
4773 }
4774 else
4775 return 0;
4776 }
4777
4778 /* If there's a step over in progress, wait until all threads stop
4779 (that is, until the stepping thread finishes its step), and
4780 unsuspend all lwps. The stepping thread ends with its status
4781 pending, which is processed later when we get back to processing
4782 events. */
4783
4784 static void
4785 complete_ongoing_step_over (void)
4786 {
4787 if (!ptid_equal (step_over_bkpt, null_ptid))
4788 {
4789 struct lwp_info *lwp;
4790 int wstat;
4791 int ret;
4792
4793 if (debug_threads)
4794 debug_printf ("detach: step over in progress, finish it first\n");
4795
4796 /* Passing NULL_PTID as filter indicates we want all events to
4797 be left pending. Eventually this returns when there are no
4798 unwaited-for children left. */
4799 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4800 &wstat, __WALL);
4801 gdb_assert (ret == -1);
4802
4803 lwp = find_lwp_pid (step_over_bkpt);
4804 if (lwp != NULL)
4805 finish_step_over (lwp);
4806 step_over_bkpt = null_ptid;
4807 unsuspend_all_lwps (lwp);
4808 }
4809 }
4810
4811 /* This function is called once per thread. We check the thread's resume
4812 request, which will tell us whether to resume, step, or leave the thread
4813 stopped; and what signal, if any, it should be sent.
4814
4815 For threads which we aren't explicitly told otherwise, we preserve
4816 the stepping flag; this is used for stepping over gdbserver-placed
4817 breakpoints.
4818
4819 If pending_flags was set in any thread, we queue any needed
4820 signals, since we won't actually resume. We already have a pending
4821 event to report, so we don't need to preserve any step requests;
4822 they should be re-issued if necessary. */
4823
4824 static int
4825 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4826 {
4827 struct thread_info *thread = (struct thread_info *) entry;
4828 struct lwp_info *lwp = get_thread_lwp (thread);
4829 int step;
4830 int leave_all_stopped = * (int *) arg;
4831 int leave_pending;
4832
4833 if (lwp->resume == NULL)
4834 return 0;
4835
4836 if (lwp->resume->kind == resume_stop)
4837 {
4838 if (debug_threads)
4839 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4840
4841 if (!lwp->stopped)
4842 {
4843 if (debug_threads)
4844 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4845
4846 /* Stop the thread, and wait for the event asynchronously,
4847 through the event loop. */
4848 send_sigstop (lwp);
4849 }
4850 else
4851 {
4852 if (debug_threads)
4853 debug_printf ("already stopped LWP %ld\n",
4854 lwpid_of (thread));
4855
4856 /* The LWP may have been stopped in an internal event that
4857 was not meant to be notified back to GDB (e.g., gdbserver
4858 breakpoint), so we should be reporting a stop event in
4859 this case too. */
4860
4861 /* If the thread already has a pending SIGSTOP, this is a
4862 no-op. Otherwise, something later will presumably resume
4863 the thread and this will cause it to cancel any pending
4864 operation, due to last_resume_kind == resume_stop. If
4865 the thread already has a pending status to report, we
4866 will still report it the next time we wait - see
4867 status_pending_p_callback. */
4868
4869 /* If we already have a pending signal to report, then
4870 there's no need to queue a SIGSTOP, as this means we're
4871 midway through moving the LWP out of the jumppad, and we
4872 will report the pending signal as soon as that is
4873 finished. */
4874 if (lwp->pending_signals_to_report == NULL)
4875 send_sigstop (lwp);
4876 }
4877
4878 /* For stop requests, we're done. */
4879 lwp->resume = NULL;
4880 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4881 return 0;
4882 }
4883
4884 /* If this thread which is about to be resumed has a pending status,
4885 then don't resume it - we can just report the pending status.
4886 Likewise if it is suspended, because e.g., another thread is
4887 stepping past a breakpoint. Make sure to queue any signals that
4888 would otherwise be sent. In all-stop mode, we do this decision
4889 based on if *any* thread has a pending status. If there's a
4890 thread that needs the step-over-breakpoint dance, then don't
4891 resume any other thread but that particular one. */
4892 leave_pending = (lwp->suspended
4893 || lwp->status_pending_p
4894 || leave_all_stopped);
4895
4896 if (!leave_pending)
4897 {
4898 if (debug_threads)
4899 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4900
4901 step = (lwp->resume->kind == resume_step);
4902 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4903 }
4904 else
4905 {
4906 if (debug_threads)
4907 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4908
4909 /* If we have a new signal, enqueue the signal. */
4910 if (lwp->resume->sig != 0)
4911 {
4912 struct pending_signals *p_sig = XCNEW (struct pending_signals);
4913
4914 p_sig->prev = lwp->pending_signals;
4915 p_sig->signal = lwp->resume->sig;
4916
4917 /* If this is the same signal we were previously stopped by,
4918 make sure to queue its siginfo. We can ignore the return
4919 value of ptrace; if it fails, we'll skip
4920 PTRACE_SETSIGINFO. */
4921 if (WIFSTOPPED (lwp->last_status)
4922 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4923 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4924 &p_sig->info);
4925
4926 lwp->pending_signals = p_sig;
4927 }
4928 }
4929
4930 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4931 lwp->resume = NULL;
4932 return 0;
4933 }
4934
4935 static void
4936 linux_resume (struct thread_resume *resume_info, size_t n)
4937 {
4938 struct thread_resume_array array = { resume_info, n };
4939 struct thread_info *need_step_over = NULL;
4940 int any_pending;
4941 int leave_all_stopped;
4942
4943 if (debug_threads)
4944 {
4945 debug_enter ();
4946 debug_printf ("linux_resume:\n");
4947 }
4948
4949 find_inferior (&all_threads, linux_set_resume_request, &array);
4950
4951 /* If there is a thread which would otherwise be resumed, which has
4952 a pending status, then don't resume any threads - we can just
4953 report the pending status. Make sure to queue any signals that
4954 would otherwise be sent. In non-stop mode, we'll apply this
4955 logic to each thread individually. We consume all pending events
4956 before considering to start a step-over (in all-stop). */
4957 any_pending = 0;
4958 if (!non_stop)
4959 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4960
4961 /* If there is a thread which would otherwise be resumed, which is
4962 stopped at a breakpoint that needs stepping over, then don't
4963 resume any threads - have it step over the breakpoint with all
4964 other threads stopped, then resume all threads again. Make sure
4965 to queue any signals that would otherwise be delivered or
4966 queued. */
4967 if (!any_pending && supports_breakpoints ())
4968 need_step_over
4969 = (struct thread_info *) find_inferior (&all_threads,
4970 need_step_over_p, NULL);
4971
4972 leave_all_stopped = (need_step_over != NULL || any_pending);
4973
4974 if (debug_threads)
4975 {
4976 if (need_step_over != NULL)
4977 debug_printf ("Not resuming all, need step over\n");
4978 else if (any_pending)
4979 debug_printf ("Not resuming, all-stop and found "
4980 "an LWP with pending status\n");
4981 else
4982 debug_printf ("Resuming, no pending status or step over needed\n");
4983 }
4984
4985 /* Even if we're leaving threads stopped, queue all signals we'd
4986 otherwise deliver. */
4987 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4988
4989 if (need_step_over)
4990 start_step_over (get_thread_lwp (need_step_over));
4991
4992 if (debug_threads)
4993 {
4994 debug_printf ("linux_resume done\n");
4995 debug_exit ();
4996 }
4997
4998 /* We may have events that were pending that can/should be sent to
4999 the client now. Trigger a linux_wait call. */
5000 if (target_is_async_p ())
5001 async_file_mark ();
5002 }
5003
5004 /* This function is called once per thread. We check the thread's
5005 last resume request, which will tell us whether to resume, step, or
5006 leave the thread stopped. Any signal the client requested to be
5007 delivered has already been enqueued at this point.
5008
5009 If any thread that GDB wants running is stopped at an internal
5010 breakpoint that needs stepping over, we start a step-over operation
5011 on that particular thread, and leave all others stopped. */
5012
5013 static int
5014 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5015 {
5016 struct thread_info *thread = (struct thread_info *) entry;
5017 struct lwp_info *lwp = get_thread_lwp (thread);
5018 int step;
5019
5020 if (lwp == except)
5021 return 0;
5022
5023 if (debug_threads)
5024 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5025
5026 if (!lwp->stopped)
5027 {
5028 if (debug_threads)
5029 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5030 return 0;
5031 }
5032
5033 if (thread->last_resume_kind == resume_stop
5034 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5035 {
5036 if (debug_threads)
5037 debug_printf (" client wants LWP to remain %ld stopped\n",
5038 lwpid_of (thread));
5039 return 0;
5040 }
5041
5042 if (lwp->status_pending_p)
5043 {
5044 if (debug_threads)
5045 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5046 lwpid_of (thread));
5047 return 0;
5048 }
5049
5050 gdb_assert (lwp->suspended >= 0);
5051
5052 if (lwp->suspended)
5053 {
5054 if (debug_threads)
5055 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5056 return 0;
5057 }
5058
5059 if (thread->last_resume_kind == resume_stop
5060 && lwp->pending_signals_to_report == NULL
5061 && lwp->collecting_fast_tracepoint == 0)
5062 {
5063 /* We haven't reported this LWP as stopped yet (otherwise, the
5064 last_status.kind check above would catch it, and we wouldn't
5065 reach here. This LWP may have been momentarily paused by a
5066 stop_all_lwps call while handling for example, another LWP's
5067 step-over. In that case, the pending expected SIGSTOP signal
5068 that was queued at vCont;t handling time will have already
5069 been consumed by wait_for_sigstop, and so we need to requeue
5070 another one here. Note that if the LWP already has a SIGSTOP
5071 pending, this is a no-op. */
5072
5073 if (debug_threads)
5074 debug_printf ("Client wants LWP %ld to stop. "
5075 "Making sure it has a SIGSTOP pending\n",
5076 lwpid_of (thread));
5077
5078 send_sigstop (lwp);
5079 }
5080
5081 if (thread->last_resume_kind == resume_step)
5082 {
5083 if (debug_threads)
5084 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5085 lwpid_of (thread));
5086 step = 1;
5087 }
5088 else if (lwp->bp_reinsert != 0)
5089 {
5090 if (debug_threads)
5091 debug_printf (" stepping LWP %ld, reinsert set\n",
5092 lwpid_of (thread));
5093
5094 step = maybe_hw_step (thread);
5095 }
5096 else
5097 step = 0;
5098
5099 linux_resume_one_lwp (lwp, step, 0, NULL);
5100 return 0;
5101 }
5102
5103 static int
5104 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5105 {
5106 struct thread_info *thread = (struct thread_info *) entry;
5107 struct lwp_info *lwp = get_thread_lwp (thread);
5108
5109 if (lwp == except)
5110 return 0;
5111
5112 lwp_suspended_decr (lwp);
5113
5114 return proceed_one_lwp (entry, except);
5115 }
5116
5117 /* When we finish a step-over, set threads running again. If there's
5118 another thread that may need a step-over, now's the time to start
5119 it. Eventually, we'll move all threads past their breakpoints. */
5120
5121 static void
5122 proceed_all_lwps (void)
5123 {
5124 struct thread_info *need_step_over;
5125
5126 /* If there is a thread which would otherwise be resumed, which is
5127 stopped at a breakpoint that needs stepping over, then don't
5128 resume any threads - have it step over the breakpoint with all
5129 other threads stopped, then resume all threads again. */
5130
5131 if (supports_breakpoints ())
5132 {
5133 need_step_over
5134 = (struct thread_info *) find_inferior (&all_threads,
5135 need_step_over_p, NULL);
5136
5137 if (need_step_over != NULL)
5138 {
5139 if (debug_threads)
5140 debug_printf ("proceed_all_lwps: found "
5141 "thread %ld needing a step-over\n",
5142 lwpid_of (need_step_over));
5143
5144 start_step_over (get_thread_lwp (need_step_over));
5145 return;
5146 }
5147 }
5148
5149 if (debug_threads)
5150 debug_printf ("Proceeding, no step-over needed\n");
5151
5152 find_inferior (&all_threads, proceed_one_lwp, NULL);
5153 }
5154
5155 /* Stopped LWPs that the client wanted to be running, that don't have
5156 pending statuses, are set to run again, except for EXCEPT, if not
5157 NULL. This undoes a stop_all_lwps call. */
5158
5159 static void
5160 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5161 {
5162 if (debug_threads)
5163 {
5164 debug_enter ();
5165 if (except)
5166 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5167 lwpid_of (get_lwp_thread (except)));
5168 else
5169 debug_printf ("unstopping all lwps\n");
5170 }
5171
5172 if (unsuspend)
5173 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5174 else
5175 find_inferior (&all_threads, proceed_one_lwp, except);
5176
5177 if (debug_threads)
5178 {
5179 debug_printf ("unstop_all_lwps done\n");
5180 debug_exit ();
5181 }
5182 }
5183
5184
5185 #ifdef HAVE_LINUX_REGSETS
5186
5187 #define use_linux_regsets 1
5188
5189 /* Returns true if REGSET has been disabled. */
5190
5191 static int
5192 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5193 {
5194 return (info->disabled_regsets != NULL
5195 && info->disabled_regsets[regset - info->regsets]);
5196 }
5197
5198 /* Disable REGSET. */
5199
5200 static void
5201 disable_regset (struct regsets_info *info, struct regset_info *regset)
5202 {
5203 int dr_offset;
5204
5205 dr_offset = regset - info->regsets;
5206 if (info->disabled_regsets == NULL)
5207 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5208 info->disabled_regsets[dr_offset] = 1;
5209 }
5210
5211 static int
5212 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5213 struct regcache *regcache)
5214 {
5215 struct regset_info *regset;
5216 int saw_general_regs = 0;
5217 int pid;
5218 struct iovec iov;
5219
5220 pid = lwpid_of (current_thread);
5221 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5222 {
5223 void *buf, *data;
5224 int nt_type, res;
5225
5226 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5227 continue;
5228
5229 buf = xmalloc (regset->size);
5230
5231 nt_type = regset->nt_type;
5232 if (nt_type)
5233 {
5234 iov.iov_base = buf;
5235 iov.iov_len = regset->size;
5236 data = (void *) &iov;
5237 }
5238 else
5239 data = buf;
5240
5241 #ifndef __sparc__
5242 res = ptrace (regset->get_request, pid,
5243 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5244 #else
5245 res = ptrace (regset->get_request, pid, data, nt_type);
5246 #endif
5247 if (res < 0)
5248 {
5249 if (errno == EIO)
5250 {
5251 /* If we get EIO on a regset, do not try it again for
5252 this process mode. */
5253 disable_regset (regsets_info, regset);
5254 }
5255 else if (errno == ENODATA)
5256 {
5257 /* ENODATA may be returned if the regset is currently
5258 not "active". This can happen in normal operation,
5259 so suppress the warning in this case. */
5260 }
5261 else
5262 {
5263 char s[256];
5264 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5265 pid);
5266 perror (s);
5267 }
5268 }
5269 else
5270 {
5271 if (regset->type == GENERAL_REGS)
5272 saw_general_regs = 1;
5273 regset->store_function (regcache, buf);
5274 }
5275 free (buf);
5276 }
5277 if (saw_general_regs)
5278 return 0;
5279 else
5280 return 1;
5281 }
5282
5283 static int
5284 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5285 struct regcache *regcache)
5286 {
5287 struct regset_info *regset;
5288 int saw_general_regs = 0;
5289 int pid;
5290 struct iovec iov;
5291
5292 pid = lwpid_of (current_thread);
5293 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5294 {
5295 void *buf, *data;
5296 int nt_type, res;
5297
5298 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5299 || regset->fill_function == NULL)
5300 continue;
5301
5302 buf = xmalloc (regset->size);
5303
5304 /* First fill the buffer with the current register set contents,
5305 in case there are any items in the kernel's regset that are
5306 not in gdbserver's regcache. */
5307
5308 nt_type = regset->nt_type;
5309 if (nt_type)
5310 {
5311 iov.iov_base = buf;
5312 iov.iov_len = regset->size;
5313 data = (void *) &iov;
5314 }
5315 else
5316 data = buf;
5317
5318 #ifndef __sparc__
5319 res = ptrace (regset->get_request, pid,
5320 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5321 #else
5322 res = ptrace (regset->get_request, pid, data, nt_type);
5323 #endif
5324
5325 if (res == 0)
5326 {
5327 /* Then overlay our cached registers on that. */
5328 regset->fill_function (regcache, buf);
5329
5330 /* Only now do we write the register set. */
5331 #ifndef __sparc__
5332 res = ptrace (regset->set_request, pid,
5333 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5334 #else
5335 res = ptrace (regset->set_request, pid, data, nt_type);
5336 #endif
5337 }
5338
5339 if (res < 0)
5340 {
5341 if (errno == EIO)
5342 {
5343 /* If we get EIO on a regset, do not try it again for
5344 this process mode. */
5345 disable_regset (regsets_info, regset);
5346 }
5347 else if (errno == ESRCH)
5348 {
5349 /* At this point, ESRCH should mean the process is
5350 already gone, in which case we simply ignore attempts
5351 to change its registers. See also the related
5352 comment in linux_resume_one_lwp. */
5353 free (buf);
5354 return 0;
5355 }
5356 else
5357 {
5358 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5359 }
5360 }
5361 else if (regset->type == GENERAL_REGS)
5362 saw_general_regs = 1;
5363 free (buf);
5364 }
5365 if (saw_general_regs)
5366 return 0;
5367 else
5368 return 1;
5369 }
5370
5371 #else /* !HAVE_LINUX_REGSETS */
5372
5373 #define use_linux_regsets 0
5374 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5375 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5376
5377 #endif
5378
5379 /* Return 1 if register REGNO is supported by one of the regset ptrace
5380 calls or 0 if it has to be transferred individually. */
5381
5382 static int
5383 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5384 {
5385 unsigned char mask = 1 << (regno % 8);
5386 size_t index = regno / 8;
5387
5388 return (use_linux_regsets
5389 && (regs_info->regset_bitmap == NULL
5390 || (regs_info->regset_bitmap[index] & mask) != 0));
5391 }
5392
5393 #ifdef HAVE_LINUX_USRREGS
5394
5395 static int
5396 register_addr (const struct usrregs_info *usrregs, int regnum)
5397 {
5398 int addr;
5399
5400 if (regnum < 0 || regnum >= usrregs->num_regs)
5401 error ("Invalid register number %d.", regnum);
5402
5403 addr = usrregs->regmap[regnum];
5404
5405 return addr;
5406 }
5407
5408 /* Fetch one register. */
5409 static void
5410 fetch_register (const struct usrregs_info *usrregs,
5411 struct regcache *regcache, int regno)
5412 {
5413 CORE_ADDR regaddr;
5414 int i, size;
5415 char *buf;
5416 int pid;
5417
5418 if (regno >= usrregs->num_regs)
5419 return;
5420 if ((*the_low_target.cannot_fetch_register) (regno))
5421 return;
5422
5423 regaddr = register_addr (usrregs, regno);
5424 if (regaddr == -1)
5425 return;
5426
5427 size = ((register_size (regcache->tdesc, regno)
5428 + sizeof (PTRACE_XFER_TYPE) - 1)
5429 & -sizeof (PTRACE_XFER_TYPE));
5430 buf = (char *) alloca (size);
5431
5432 pid = lwpid_of (current_thread);
5433 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5434 {
5435 errno = 0;
5436 *(PTRACE_XFER_TYPE *) (buf + i) =
5437 ptrace (PTRACE_PEEKUSER, pid,
5438 /* Coerce to a uintptr_t first to avoid potential gcc warning
5439 of coercing an 8 byte integer to a 4 byte pointer. */
5440 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5441 regaddr += sizeof (PTRACE_XFER_TYPE);
5442 if (errno != 0)
5443 error ("reading register %d: %s", regno, strerror (errno));
5444 }
5445
5446 if (the_low_target.supply_ptrace_register)
5447 the_low_target.supply_ptrace_register (regcache, regno, buf);
5448 else
5449 supply_register (regcache, regno, buf);
5450 }
5451
5452 /* Store one register. */
5453 static void
5454 store_register (const struct usrregs_info *usrregs,
5455 struct regcache *regcache, int regno)
5456 {
5457 CORE_ADDR regaddr;
5458 int i, size;
5459 char *buf;
5460 int pid;
5461
5462 if (regno >= usrregs->num_regs)
5463 return;
5464 if ((*the_low_target.cannot_store_register) (regno))
5465 return;
5466
5467 regaddr = register_addr (usrregs, regno);
5468 if (regaddr == -1)
5469 return;
5470
5471 size = ((register_size (regcache->tdesc, regno)
5472 + sizeof (PTRACE_XFER_TYPE) - 1)
5473 & -sizeof (PTRACE_XFER_TYPE));
5474 buf = (char *) alloca (size);
5475 memset (buf, 0, size);
5476
5477 if (the_low_target.collect_ptrace_register)
5478 the_low_target.collect_ptrace_register (regcache, regno, buf);
5479 else
5480 collect_register (regcache, regno, buf);
5481
5482 pid = lwpid_of (current_thread);
5483 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5484 {
5485 errno = 0;
5486 ptrace (PTRACE_POKEUSER, pid,
5487 /* Coerce to a uintptr_t first to avoid potential gcc warning
5488 about coercing an 8 byte integer to a 4 byte pointer. */
5489 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5490 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5491 if (errno != 0)
5492 {
5493 /* At this point, ESRCH should mean the process is
5494 already gone, in which case we simply ignore attempts
5495 to change its registers. See also the related
5496 comment in linux_resume_one_lwp. */
5497 if (errno == ESRCH)
5498 return;
5499
5500 if ((*the_low_target.cannot_store_register) (regno) == 0)
5501 error ("writing register %d: %s", regno, strerror (errno));
5502 }
5503 regaddr += sizeof (PTRACE_XFER_TYPE);
5504 }
5505 }
5506
5507 /* Fetch all registers, or just one, from the child process.
5508 If REGNO is -1, do this for all registers, skipping any that are
5509 assumed to have been retrieved by regsets_fetch_inferior_registers,
5510 unless ALL is non-zero.
5511 Otherwise, REGNO specifies which register (so we can save time). */
5512 static void
5513 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5514 struct regcache *regcache, int regno, int all)
5515 {
5516 struct usrregs_info *usr = regs_info->usrregs;
5517
5518 if (regno == -1)
5519 {
5520 for (regno = 0; regno < usr->num_regs; regno++)
5521 if (all || !linux_register_in_regsets (regs_info, regno))
5522 fetch_register (usr, regcache, regno);
5523 }
5524 else
5525 fetch_register (usr, regcache, regno);
5526 }
5527
5528 /* Store our register values back into the inferior.
5529 If REGNO is -1, do this for all registers, skipping any that are
5530 assumed to have been saved by regsets_store_inferior_registers,
5531 unless ALL is non-zero.
5532 Otherwise, REGNO specifies which register (so we can save time). */
5533 static void
5534 usr_store_inferior_registers (const struct regs_info *regs_info,
5535 struct regcache *regcache, int regno, int all)
5536 {
5537 struct usrregs_info *usr = regs_info->usrregs;
5538
5539 if (regno == -1)
5540 {
5541 for (regno = 0; regno < usr->num_regs; regno++)
5542 if (all || !linux_register_in_regsets (regs_info, regno))
5543 store_register (usr, regcache, regno);
5544 }
5545 else
5546 store_register (usr, regcache, regno);
5547 }
5548
5549 #else /* !HAVE_LINUX_USRREGS */
5550
5551 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5552 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5553
5554 #endif
5555
5556
5557 static void
5558 linux_fetch_registers (struct regcache *regcache, int regno)
5559 {
5560 int use_regsets;
5561 int all = 0;
5562 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5563
5564 if (regno == -1)
5565 {
5566 if (the_low_target.fetch_register != NULL
5567 && regs_info->usrregs != NULL)
5568 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5569 (*the_low_target.fetch_register) (regcache, regno);
5570
5571 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5572 if (regs_info->usrregs != NULL)
5573 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5574 }
5575 else
5576 {
5577 if (the_low_target.fetch_register != NULL
5578 && (*the_low_target.fetch_register) (regcache, regno))
5579 return;
5580
5581 use_regsets = linux_register_in_regsets (regs_info, regno);
5582 if (use_regsets)
5583 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5584 regcache);
5585 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5586 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5587 }
5588 }
5589
5590 static void
5591 linux_store_registers (struct regcache *regcache, int regno)
5592 {
5593 int use_regsets;
5594 int all = 0;
5595 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5596
5597 if (regno == -1)
5598 {
5599 all = regsets_store_inferior_registers (regs_info->regsets_info,
5600 regcache);
5601 if (regs_info->usrregs != NULL)
5602 usr_store_inferior_registers (regs_info, regcache, regno, all);
5603 }
5604 else
5605 {
5606 use_regsets = linux_register_in_regsets (regs_info, regno);
5607 if (use_regsets)
5608 all = regsets_store_inferior_registers (regs_info->regsets_info,
5609 regcache);
5610 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5611 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5612 }
5613 }
5614
5615
5616 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5617 to debugger memory starting at MYADDR. */
5618
5619 static int
5620 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5621 {
5622 int pid = lwpid_of (current_thread);
5623 register PTRACE_XFER_TYPE *buffer;
5624 register CORE_ADDR addr;
5625 register int count;
5626 char filename[64];
5627 register int i;
5628 int ret;
5629 int fd;
5630
5631 /* Try using /proc. Don't bother for one word. */
5632 if (len >= 3 * sizeof (long))
5633 {
5634 int bytes;
5635
5636 /* We could keep this file open and cache it - possibly one per
5637 thread. That requires some juggling, but is even faster. */
5638 sprintf (filename, "/proc/%d/mem", pid);
5639 fd = open (filename, O_RDONLY | O_LARGEFILE);
5640 if (fd == -1)
5641 goto no_proc;
5642
5643 /* If pread64 is available, use it. It's faster if the kernel
5644 supports it (only one syscall), and it's 64-bit safe even on
5645 32-bit platforms (for instance, SPARC debugging a SPARC64
5646 application). */
5647 #ifdef HAVE_PREAD64
5648 bytes = pread64 (fd, myaddr, len, memaddr);
5649 #else
5650 bytes = -1;
5651 if (lseek (fd, memaddr, SEEK_SET) != -1)
5652 bytes = read (fd, myaddr, len);
5653 #endif
5654
5655 close (fd);
5656 if (bytes == len)
5657 return 0;
5658
5659 /* Some data was read, we'll try to get the rest with ptrace. */
5660 if (bytes > 0)
5661 {
5662 memaddr += bytes;
5663 myaddr += bytes;
5664 len -= bytes;
5665 }
5666 }
5667
5668 no_proc:
5669 /* Round starting address down to longword boundary. */
5670 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5671 /* Round ending address up; get number of longwords that makes. */
5672 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5673 / sizeof (PTRACE_XFER_TYPE));
5674 /* Allocate buffer of that many longwords. */
5675 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5676
5677 /* Read all the longwords */
5678 errno = 0;
5679 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5680 {
5681 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5682 about coercing an 8 byte integer to a 4 byte pointer. */
5683 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5684 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5685 (PTRACE_TYPE_ARG4) 0);
5686 if (errno)
5687 break;
5688 }
5689 ret = errno;
5690
5691 /* Copy appropriate bytes out of the buffer. */
5692 if (i > 0)
5693 {
5694 i *= sizeof (PTRACE_XFER_TYPE);
5695 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5696 memcpy (myaddr,
5697 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5698 i < len ? i : len);
5699 }
5700
5701 return ret;
5702 }
5703
5704 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5705 memory at MEMADDR. On failure (cannot write to the inferior)
5706 returns the value of errno. Always succeeds if LEN is zero. */
5707
5708 static int
5709 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5710 {
5711 register int i;
5712 /* Round starting address down to longword boundary. */
5713 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5714 /* Round ending address up; get number of longwords that makes. */
5715 register int count
5716 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5717 / sizeof (PTRACE_XFER_TYPE);
5718
5719 /* Allocate buffer of that many longwords. */
5720 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5721
5722 int pid = lwpid_of (current_thread);
5723
5724 if (len == 0)
5725 {
5726 /* Zero length write always succeeds. */
5727 return 0;
5728 }
5729
5730 if (debug_threads)
5731 {
5732 /* Dump up to four bytes. */
5733 char str[4 * 2 + 1];
5734 char *p = str;
5735 int dump = len < 4 ? len : 4;
5736
5737 for (i = 0; i < dump; i++)
5738 {
5739 sprintf (p, "%02x", myaddr[i]);
5740 p += 2;
5741 }
5742 *p = '\0';
5743
5744 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5745 str, (long) memaddr, pid);
5746 }
5747
5748 /* Fill start and end extra bytes of buffer with existing memory data. */
5749
5750 errno = 0;
5751 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5752 about coercing an 8 byte integer to a 4 byte pointer. */
5753 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5754 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5755 (PTRACE_TYPE_ARG4) 0);
5756 if (errno)
5757 return errno;
5758
5759 if (count > 1)
5760 {
5761 errno = 0;
5762 buffer[count - 1]
5763 = ptrace (PTRACE_PEEKTEXT, pid,
5764 /* Coerce to a uintptr_t first to avoid potential gcc warning
5765 about coercing an 8 byte integer to a 4 byte pointer. */
5766 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5767 * sizeof (PTRACE_XFER_TYPE)),
5768 (PTRACE_TYPE_ARG4) 0);
5769 if (errno)
5770 return errno;
5771 }
5772
5773 /* Copy data to be written over corresponding part of buffer. */
5774
5775 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5776 myaddr, len);
5777
5778 /* Write the entire buffer. */
5779
5780 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5781 {
5782 errno = 0;
5783 ptrace (PTRACE_POKETEXT, pid,
5784 /* Coerce to a uintptr_t first to avoid potential gcc warning
5785 about coercing an 8 byte integer to a 4 byte pointer. */
5786 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5787 (PTRACE_TYPE_ARG4) buffer[i]);
5788 if (errno)
5789 return errno;
5790 }
5791
5792 return 0;
5793 }
5794
5795 static void
5796 linux_look_up_symbols (void)
5797 {
5798 #ifdef USE_THREAD_DB
5799 struct process_info *proc = current_process ();
5800
5801 if (proc->priv->thread_db != NULL)
5802 return;
5803
5804 thread_db_init ();
5805 #endif
5806 }
5807
5808 static void
5809 linux_request_interrupt (void)
5810 {
5811 extern unsigned long signal_pid;
5812
5813 /* Send a SIGINT to the process group. This acts just like the user
5814 typed a ^C on the controlling terminal. */
5815 kill (-signal_pid, SIGINT);
5816 }
5817
5818 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5819 to debugger memory starting at MYADDR. */
5820
5821 static int
5822 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5823 {
5824 char filename[PATH_MAX];
5825 int fd, n;
5826 int pid = lwpid_of (current_thread);
5827
5828 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5829
5830 fd = open (filename, O_RDONLY);
5831 if (fd < 0)
5832 return -1;
5833
5834 if (offset != (CORE_ADDR) 0
5835 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5836 n = -1;
5837 else
5838 n = read (fd, myaddr, len);
5839
5840 close (fd);
5841
5842 return n;
5843 }
5844
5845 /* These breakpoint and watchpoint related wrapper functions simply
5846 pass on the function call if the target has registered a
5847 corresponding function. */
5848
5849 static int
5850 linux_supports_z_point_type (char z_type)
5851 {
5852 return (the_low_target.supports_z_point_type != NULL
5853 && the_low_target.supports_z_point_type (z_type));
5854 }
5855
5856 static int
5857 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5858 int size, struct raw_breakpoint *bp)
5859 {
5860 if (type == raw_bkpt_type_sw)
5861 return insert_memory_breakpoint (bp);
5862 else if (the_low_target.insert_point != NULL)
5863 return the_low_target.insert_point (type, addr, size, bp);
5864 else
5865 /* Unsupported (see target.h). */
5866 return 1;
5867 }
5868
5869 static int
5870 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5871 int size, struct raw_breakpoint *bp)
5872 {
5873 if (type == raw_bkpt_type_sw)
5874 return remove_memory_breakpoint (bp);
5875 else if (the_low_target.remove_point != NULL)
5876 return the_low_target.remove_point (type, addr, size, bp);
5877 else
5878 /* Unsupported (see target.h). */
5879 return 1;
5880 }
5881
5882 /* Implement the to_stopped_by_sw_breakpoint target_ops
5883 method. */
5884
5885 static int
5886 linux_stopped_by_sw_breakpoint (void)
5887 {
5888 struct lwp_info *lwp = get_thread_lwp (current_thread);
5889
5890 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5891 }
5892
5893 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5894 method. */
5895
5896 static int
5897 linux_supports_stopped_by_sw_breakpoint (void)
5898 {
5899 return USE_SIGTRAP_SIGINFO;
5900 }
5901
5902 /* Implement the to_stopped_by_hw_breakpoint target_ops
5903 method. */
5904
5905 static int
5906 linux_stopped_by_hw_breakpoint (void)
5907 {
5908 struct lwp_info *lwp = get_thread_lwp (current_thread);
5909
5910 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5911 }
5912
5913 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5914 method. */
5915
5916 static int
5917 linux_supports_stopped_by_hw_breakpoint (void)
5918 {
5919 return USE_SIGTRAP_SIGINFO;
5920 }
5921
5922 /* Implement the supports_hardware_single_step target_ops method. */
5923
5924 static int
5925 linux_supports_hardware_single_step (void)
5926 {
5927 return can_hardware_single_step ();
5928 }
5929
5930 static int
5931 linux_supports_software_single_step (void)
5932 {
5933 return can_software_single_step ();
5934 }
5935
5936 static int
5937 linux_stopped_by_watchpoint (void)
5938 {
5939 struct lwp_info *lwp = get_thread_lwp (current_thread);
5940
5941 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5942 }
5943
5944 static CORE_ADDR
5945 linux_stopped_data_address (void)
5946 {
5947 struct lwp_info *lwp = get_thread_lwp (current_thread);
5948
5949 return lwp->stopped_data_address;
5950 }
5951
5952 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5953 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5954 && defined(PT_TEXT_END_ADDR)
5955
5956 /* This is only used for targets that define PT_TEXT_ADDR,
5957 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5958 the target has different ways of acquiring this information, like
5959 loadmaps. */
5960
5961 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5962 to tell gdb about. */
5963
5964 static int
5965 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5966 {
5967 unsigned long text, text_end, data;
5968 int pid = lwpid_of (current_thread);
5969
5970 errno = 0;
5971
5972 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5973 (PTRACE_TYPE_ARG4) 0);
5974 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5975 (PTRACE_TYPE_ARG4) 0);
5976 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5977 (PTRACE_TYPE_ARG4) 0);
5978
5979 if (errno == 0)
5980 {
5981 /* Both text and data offsets produced at compile-time (and so
5982 used by gdb) are relative to the beginning of the program,
5983 with the data segment immediately following the text segment.
5984 However, the actual runtime layout in memory may put the data
5985 somewhere else, so when we send gdb a data base-address, we
5986 use the real data base address and subtract the compile-time
5987 data base-address from it (which is just the length of the
5988 text segment). BSS immediately follows data in both
5989 cases. */
5990 *text_p = text;
5991 *data_p = data - (text_end - text);
5992
5993 return 1;
5994 }
5995 return 0;
5996 }
5997 #endif
5998
5999 static int
6000 linux_qxfer_osdata (const char *annex,
6001 unsigned char *readbuf, unsigned const char *writebuf,
6002 CORE_ADDR offset, int len)
6003 {
6004 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6005 }
6006
6007 /* Convert a native/host siginfo object, into/from the siginfo in the
6008 layout of the inferiors' architecture. */
6009
6010 static void
6011 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6012 {
6013 int done = 0;
6014
6015 if (the_low_target.siginfo_fixup != NULL)
6016 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6017
6018 /* If there was no callback, or the callback didn't do anything,
6019 then just do a straight memcpy. */
6020 if (!done)
6021 {
6022 if (direction == 1)
6023 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6024 else
6025 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6026 }
6027 }
6028
6029 static int
6030 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6031 unsigned const char *writebuf, CORE_ADDR offset, int len)
6032 {
6033 int pid;
6034 siginfo_t siginfo;
6035 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6036
6037 if (current_thread == NULL)
6038 return -1;
6039
6040 pid = lwpid_of (current_thread);
6041
6042 if (debug_threads)
6043 debug_printf ("%s siginfo for lwp %d.\n",
6044 readbuf != NULL ? "Reading" : "Writing",
6045 pid);
6046
6047 if (offset >= sizeof (siginfo))
6048 return -1;
6049
6050 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6051 return -1;
6052
6053 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6054 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6055 inferior with a 64-bit GDBSERVER should look the same as debugging it
6056 with a 32-bit GDBSERVER, we need to convert it. */
6057 siginfo_fixup (&siginfo, inf_siginfo, 0);
6058
6059 if (offset + len > sizeof (siginfo))
6060 len = sizeof (siginfo) - offset;
6061
6062 if (readbuf != NULL)
6063 memcpy (readbuf, inf_siginfo + offset, len);
6064 else
6065 {
6066 memcpy (inf_siginfo + offset, writebuf, len);
6067
6068 /* Convert back to ptrace layout before flushing it out. */
6069 siginfo_fixup (&siginfo, inf_siginfo, 1);
6070
6071 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6072 return -1;
6073 }
6074
6075 return len;
6076 }
6077
6078 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6079 so we notice when children change state; as the handler for the
6080 sigsuspend in my_waitpid. */
6081
6082 static void
6083 sigchld_handler (int signo)
6084 {
6085 int old_errno = errno;
6086
6087 if (debug_threads)
6088 {
6089 do
6090 {
6091 /* fprintf is not async-signal-safe, so call write
6092 directly. */
6093 if (write (2, "sigchld_handler\n",
6094 sizeof ("sigchld_handler\n") - 1) < 0)
6095 break; /* just ignore */
6096 } while (0);
6097 }
6098
6099 if (target_is_async_p ())
6100 async_file_mark (); /* trigger a linux_wait */
6101
6102 errno = old_errno;
6103 }
6104
6105 static int
6106 linux_supports_non_stop (void)
6107 {
6108 return 1;
6109 }
6110
6111 static int
6112 linux_async (int enable)
6113 {
6114 int previous = target_is_async_p ();
6115
6116 if (debug_threads)
6117 debug_printf ("linux_async (%d), previous=%d\n",
6118 enable, previous);
6119
6120 if (previous != enable)
6121 {
6122 sigset_t mask;
6123 sigemptyset (&mask);
6124 sigaddset (&mask, SIGCHLD);
6125
6126 sigprocmask (SIG_BLOCK, &mask, NULL);
6127
6128 if (enable)
6129 {
6130 if (pipe (linux_event_pipe) == -1)
6131 {
6132 linux_event_pipe[0] = -1;
6133 linux_event_pipe[1] = -1;
6134 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6135
6136 warning ("creating event pipe failed.");
6137 return previous;
6138 }
6139
6140 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6141 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6142
6143 /* Register the event loop handler. */
6144 add_file_handler (linux_event_pipe[0],
6145 handle_target_event, NULL);
6146
6147 /* Always trigger a linux_wait. */
6148 async_file_mark ();
6149 }
6150 else
6151 {
6152 delete_file_handler (linux_event_pipe[0]);
6153
6154 close (linux_event_pipe[0]);
6155 close (linux_event_pipe[1]);
6156 linux_event_pipe[0] = -1;
6157 linux_event_pipe[1] = -1;
6158 }
6159
6160 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6161 }
6162
6163 return previous;
6164 }
6165
6166 static int
6167 linux_start_non_stop (int nonstop)
6168 {
6169 /* Register or unregister from event-loop accordingly. */
6170 linux_async (nonstop);
6171
6172 if (target_is_async_p () != (nonstop != 0))
6173 return -1;
6174
6175 return 0;
6176 }
6177
6178 static int
6179 linux_supports_multi_process (void)
6180 {
6181 return 1;
6182 }
6183
6184 /* Check if fork events are supported. */
6185
6186 static int
6187 linux_supports_fork_events (void)
6188 {
6189 return linux_supports_tracefork ();
6190 }
6191
6192 /* Check if vfork events are supported. */
6193
6194 static int
6195 linux_supports_vfork_events (void)
6196 {
6197 return linux_supports_tracefork ();
6198 }
6199
6200 /* Check if exec events are supported. */
6201
6202 static int
6203 linux_supports_exec_events (void)
6204 {
6205 return linux_supports_traceexec ();
6206 }
6207
6208 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6209 options for the specified lwp. */
6210
6211 static int
6212 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6213 void *args)
6214 {
6215 struct thread_info *thread = (struct thread_info *) entry;
6216 struct lwp_info *lwp = get_thread_lwp (thread);
6217
6218 if (!lwp->stopped)
6219 {
6220 /* Stop the lwp so we can modify its ptrace options. */
6221 lwp->must_set_ptrace_flags = 1;
6222 linux_stop_lwp (lwp);
6223 }
6224 else
6225 {
6226 /* Already stopped; go ahead and set the ptrace options. */
6227 struct process_info *proc = find_process_pid (pid_of (thread));
6228 int options = linux_low_ptrace_options (proc->attached);
6229
6230 linux_enable_event_reporting (lwpid_of (thread), options);
6231 lwp->must_set_ptrace_flags = 0;
6232 }
6233
6234 return 0;
6235 }
6236
6237 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6238 ptrace flags for all inferiors. This is in case the new GDB connection
6239 doesn't support the same set of events that the previous one did. */
6240
6241 static void
6242 linux_handle_new_gdb_connection (void)
6243 {
6244 pid_t pid;
6245
6246 /* Request that all the lwps reset their ptrace options. */
6247 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6248 }
6249
6250 static int
6251 linux_supports_disable_randomization (void)
6252 {
6253 #ifdef HAVE_PERSONALITY
6254 return 1;
6255 #else
6256 return 0;
6257 #endif
6258 }
6259
6260 static int
6261 linux_supports_agent (void)
6262 {
6263 return 1;
6264 }
6265
6266 static int
6267 linux_supports_range_stepping (void)
6268 {
6269 if (*the_low_target.supports_range_stepping == NULL)
6270 return 0;
6271
6272 return (*the_low_target.supports_range_stepping) ();
6273 }
6274
6275 /* Enumerate spufs IDs for process PID. */
6276 static int
6277 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6278 {
6279 int pos = 0;
6280 int written = 0;
6281 char path[128];
6282 DIR *dir;
6283 struct dirent *entry;
6284
6285 sprintf (path, "/proc/%ld/fd", pid);
6286 dir = opendir (path);
6287 if (!dir)
6288 return -1;
6289
6290 rewinddir (dir);
6291 while ((entry = readdir (dir)) != NULL)
6292 {
6293 struct stat st;
6294 struct statfs stfs;
6295 int fd;
6296
6297 fd = atoi (entry->d_name);
6298 if (!fd)
6299 continue;
6300
6301 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6302 if (stat (path, &st) != 0)
6303 continue;
6304 if (!S_ISDIR (st.st_mode))
6305 continue;
6306
6307 if (statfs (path, &stfs) != 0)
6308 continue;
6309 if (stfs.f_type != SPUFS_MAGIC)
6310 continue;
6311
6312 if (pos >= offset && pos + 4 <= offset + len)
6313 {
6314 *(unsigned int *)(buf + pos - offset) = fd;
6315 written += 4;
6316 }
6317 pos += 4;
6318 }
6319
6320 closedir (dir);
6321 return written;
6322 }
6323
6324 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6325 object type, using the /proc file system. */
6326 static int
6327 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6328 unsigned const char *writebuf,
6329 CORE_ADDR offset, int len)
6330 {
6331 long pid = lwpid_of (current_thread);
6332 char buf[128];
6333 int fd = 0;
6334 int ret = 0;
6335
6336 if (!writebuf && !readbuf)
6337 return -1;
6338
6339 if (!*annex)
6340 {
6341 if (!readbuf)
6342 return -1;
6343 else
6344 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6345 }
6346
6347 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6348 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6349 if (fd <= 0)
6350 return -1;
6351
6352 if (offset != 0
6353 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6354 {
6355 close (fd);
6356 return 0;
6357 }
6358
6359 if (writebuf)
6360 ret = write (fd, writebuf, (size_t) len);
6361 else
6362 ret = read (fd, readbuf, (size_t) len);
6363
6364 close (fd);
6365 return ret;
6366 }
6367
6368 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6369 struct target_loadseg
6370 {
6371 /* Core address to which the segment is mapped. */
6372 Elf32_Addr addr;
6373 /* VMA recorded in the program header. */
6374 Elf32_Addr p_vaddr;
6375 /* Size of this segment in memory. */
6376 Elf32_Word p_memsz;
6377 };
6378
6379 # if defined PT_GETDSBT
6380 struct target_loadmap
6381 {
6382 /* Protocol version number, must be zero. */
6383 Elf32_Word version;
6384 /* Pointer to the DSBT table, its size, and the DSBT index. */
6385 unsigned *dsbt_table;
6386 unsigned dsbt_size, dsbt_index;
6387 /* Number of segments in this map. */
6388 Elf32_Word nsegs;
6389 /* The actual memory map. */
6390 struct target_loadseg segs[/*nsegs*/];
6391 };
6392 # define LINUX_LOADMAP PT_GETDSBT
6393 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6394 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6395 # else
6396 struct target_loadmap
6397 {
6398 /* Protocol version number, must be zero. */
6399 Elf32_Half version;
6400 /* Number of segments in this map. */
6401 Elf32_Half nsegs;
6402 /* The actual memory map. */
6403 struct target_loadseg segs[/*nsegs*/];
6404 };
6405 # define LINUX_LOADMAP PTRACE_GETFDPIC
6406 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6407 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6408 # endif
6409
6410 static int
6411 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6412 unsigned char *myaddr, unsigned int len)
6413 {
6414 int pid = lwpid_of (current_thread);
6415 int addr = -1;
6416 struct target_loadmap *data = NULL;
6417 unsigned int actual_length, copy_length;
6418
6419 if (strcmp (annex, "exec") == 0)
6420 addr = (int) LINUX_LOADMAP_EXEC;
6421 else if (strcmp (annex, "interp") == 0)
6422 addr = (int) LINUX_LOADMAP_INTERP;
6423 else
6424 return -1;
6425
6426 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6427 return -1;
6428
6429 if (data == NULL)
6430 return -1;
6431
6432 actual_length = sizeof (struct target_loadmap)
6433 + sizeof (struct target_loadseg) * data->nsegs;
6434
6435 if (offset < 0 || offset > actual_length)
6436 return -1;
6437
6438 copy_length = actual_length - offset < len ? actual_length - offset : len;
6439 memcpy (myaddr, (char *) data + offset, copy_length);
6440 return copy_length;
6441 }
6442 #else
6443 # define linux_read_loadmap NULL
6444 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6445
6446 static void
6447 linux_process_qsupported (char **features, int count)
6448 {
6449 if (the_low_target.process_qsupported != NULL)
6450 the_low_target.process_qsupported (features, count);
6451 }
6452
6453 static int
6454 linux_supports_catch_syscall (void)
6455 {
6456 return (the_low_target.get_syscall_trapinfo != NULL
6457 && linux_supports_tracesysgood ());
6458 }
6459
6460 static int
6461 linux_get_ipa_tdesc_idx (void)
6462 {
6463 if (the_low_target.get_ipa_tdesc_idx == NULL)
6464 return 0;
6465
6466 return (*the_low_target.get_ipa_tdesc_idx) ();
6467 }
6468
6469 static int
6470 linux_supports_tracepoints (void)
6471 {
6472 if (*the_low_target.supports_tracepoints == NULL)
6473 return 0;
6474
6475 return (*the_low_target.supports_tracepoints) ();
6476 }
6477
6478 static CORE_ADDR
6479 linux_read_pc (struct regcache *regcache)
6480 {
6481 if (the_low_target.get_pc == NULL)
6482 return 0;
6483
6484 return (*the_low_target.get_pc) (regcache);
6485 }
6486
6487 static void
6488 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6489 {
6490 gdb_assert (the_low_target.set_pc != NULL);
6491
6492 (*the_low_target.set_pc) (regcache, pc);
6493 }
6494
6495 static int
6496 linux_thread_stopped (struct thread_info *thread)
6497 {
6498 return get_thread_lwp (thread)->stopped;
6499 }
6500
6501 /* This exposes stop-all-threads functionality to other modules. */
6502
6503 static void
6504 linux_pause_all (int freeze)
6505 {
6506 stop_all_lwps (freeze, NULL);
6507 }
6508
6509 /* This exposes unstop-all-threads functionality to other gdbserver
6510 modules. */
6511
6512 static void
6513 linux_unpause_all (int unfreeze)
6514 {
6515 unstop_all_lwps (unfreeze, NULL);
6516 }
6517
6518 static int
6519 linux_prepare_to_access_memory (void)
6520 {
6521 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6522 running LWP. */
6523 if (non_stop)
6524 linux_pause_all (1);
6525 return 0;
6526 }
6527
6528 static void
6529 linux_done_accessing_memory (void)
6530 {
6531 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6532 running LWP. */
6533 if (non_stop)
6534 linux_unpause_all (1);
6535 }
6536
6537 static int
6538 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6539 CORE_ADDR collector,
6540 CORE_ADDR lockaddr,
6541 ULONGEST orig_size,
6542 CORE_ADDR *jump_entry,
6543 CORE_ADDR *trampoline,
6544 ULONGEST *trampoline_size,
6545 unsigned char *jjump_pad_insn,
6546 ULONGEST *jjump_pad_insn_size,
6547 CORE_ADDR *adjusted_insn_addr,
6548 CORE_ADDR *adjusted_insn_addr_end,
6549 char *err)
6550 {
6551 return (*the_low_target.install_fast_tracepoint_jump_pad)
6552 (tpoint, tpaddr, collector, lockaddr, orig_size,
6553 jump_entry, trampoline, trampoline_size,
6554 jjump_pad_insn, jjump_pad_insn_size,
6555 adjusted_insn_addr, adjusted_insn_addr_end,
6556 err);
6557 }
6558
6559 static struct emit_ops *
6560 linux_emit_ops (void)
6561 {
6562 if (the_low_target.emit_ops != NULL)
6563 return (*the_low_target.emit_ops) ();
6564 else
6565 return NULL;
6566 }
6567
6568 static int
6569 linux_get_min_fast_tracepoint_insn_len (void)
6570 {
6571 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6572 }
6573
6574 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6575
6576 static int
6577 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6578 CORE_ADDR *phdr_memaddr, int *num_phdr)
6579 {
6580 char filename[PATH_MAX];
6581 int fd;
6582 const int auxv_size = is_elf64
6583 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6584 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6585
6586 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6587
6588 fd = open (filename, O_RDONLY);
6589 if (fd < 0)
6590 return 1;
6591
6592 *phdr_memaddr = 0;
6593 *num_phdr = 0;
6594 while (read (fd, buf, auxv_size) == auxv_size
6595 && (*phdr_memaddr == 0 || *num_phdr == 0))
6596 {
6597 if (is_elf64)
6598 {
6599 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6600
6601 switch (aux->a_type)
6602 {
6603 case AT_PHDR:
6604 *phdr_memaddr = aux->a_un.a_val;
6605 break;
6606 case AT_PHNUM:
6607 *num_phdr = aux->a_un.a_val;
6608 break;
6609 }
6610 }
6611 else
6612 {
6613 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6614
6615 switch (aux->a_type)
6616 {
6617 case AT_PHDR:
6618 *phdr_memaddr = aux->a_un.a_val;
6619 break;
6620 case AT_PHNUM:
6621 *num_phdr = aux->a_un.a_val;
6622 break;
6623 }
6624 }
6625 }
6626
6627 close (fd);
6628
6629 if (*phdr_memaddr == 0 || *num_phdr == 0)
6630 {
6631 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6632 "phdr_memaddr = %ld, phdr_num = %d",
6633 (long) *phdr_memaddr, *num_phdr);
6634 return 2;
6635 }
6636
6637 return 0;
6638 }
6639
6640 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6641
6642 static CORE_ADDR
6643 get_dynamic (const int pid, const int is_elf64)
6644 {
6645 CORE_ADDR phdr_memaddr, relocation;
6646 int num_phdr, i;
6647 unsigned char *phdr_buf;
6648 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6649
6650 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6651 return 0;
6652
6653 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6654 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6655
6656 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6657 return 0;
6658
6659 /* Compute relocation: it is expected to be 0 for "regular" executables,
6660 non-zero for PIE ones. */
6661 relocation = -1;
6662 for (i = 0; relocation == -1 && i < num_phdr; i++)
6663 if (is_elf64)
6664 {
6665 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6666
6667 if (p->p_type == PT_PHDR)
6668 relocation = phdr_memaddr - p->p_vaddr;
6669 }
6670 else
6671 {
6672 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6673
6674 if (p->p_type == PT_PHDR)
6675 relocation = phdr_memaddr - p->p_vaddr;
6676 }
6677
6678 if (relocation == -1)
6679 {
6680 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6681 any real world executables, including PIE executables, have always
6682 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6683 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6684 or present DT_DEBUG anyway (fpc binaries are statically linked).
6685
6686 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6687
6688 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6689
6690 return 0;
6691 }
6692
6693 for (i = 0; i < num_phdr; i++)
6694 {
6695 if (is_elf64)
6696 {
6697 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6698
6699 if (p->p_type == PT_DYNAMIC)
6700 return p->p_vaddr + relocation;
6701 }
6702 else
6703 {
6704 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6705
6706 if (p->p_type == PT_DYNAMIC)
6707 return p->p_vaddr + relocation;
6708 }
6709 }
6710
6711 return 0;
6712 }
6713
6714 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6715 can be 0 if the inferior does not yet have the library list initialized.
6716 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6717 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6718
6719 static CORE_ADDR
6720 get_r_debug (const int pid, const int is_elf64)
6721 {
6722 CORE_ADDR dynamic_memaddr;
6723 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6724 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6725 CORE_ADDR map = -1;
6726
6727 dynamic_memaddr = get_dynamic (pid, is_elf64);
6728 if (dynamic_memaddr == 0)
6729 return map;
6730
6731 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6732 {
6733 if (is_elf64)
6734 {
6735 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6736 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6737 union
6738 {
6739 Elf64_Xword map;
6740 unsigned char buf[sizeof (Elf64_Xword)];
6741 }
6742 rld_map;
6743 #endif
6744 #ifdef DT_MIPS_RLD_MAP
6745 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6746 {
6747 if (linux_read_memory (dyn->d_un.d_val,
6748 rld_map.buf, sizeof (rld_map.buf)) == 0)
6749 return rld_map.map;
6750 else
6751 break;
6752 }
6753 #endif /* DT_MIPS_RLD_MAP */
6754 #ifdef DT_MIPS_RLD_MAP_REL
6755 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6756 {
6757 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6758 rld_map.buf, sizeof (rld_map.buf)) == 0)
6759 return rld_map.map;
6760 else
6761 break;
6762 }
6763 #endif /* DT_MIPS_RLD_MAP_REL */
6764
6765 if (dyn->d_tag == DT_DEBUG && map == -1)
6766 map = dyn->d_un.d_val;
6767
6768 if (dyn->d_tag == DT_NULL)
6769 break;
6770 }
6771 else
6772 {
6773 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6774 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6775 union
6776 {
6777 Elf32_Word map;
6778 unsigned char buf[sizeof (Elf32_Word)];
6779 }
6780 rld_map;
6781 #endif
6782 #ifdef DT_MIPS_RLD_MAP
6783 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6784 {
6785 if (linux_read_memory (dyn->d_un.d_val,
6786 rld_map.buf, sizeof (rld_map.buf)) == 0)
6787 return rld_map.map;
6788 else
6789 break;
6790 }
6791 #endif /* DT_MIPS_RLD_MAP */
6792 #ifdef DT_MIPS_RLD_MAP_REL
6793 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6794 {
6795 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6796 rld_map.buf, sizeof (rld_map.buf)) == 0)
6797 return rld_map.map;
6798 else
6799 break;
6800 }
6801 #endif /* DT_MIPS_RLD_MAP_REL */
6802
6803 if (dyn->d_tag == DT_DEBUG && map == -1)
6804 map = dyn->d_un.d_val;
6805
6806 if (dyn->d_tag == DT_NULL)
6807 break;
6808 }
6809
6810 dynamic_memaddr += dyn_size;
6811 }
6812
6813 return map;
6814 }
6815
6816 /* Read one pointer from MEMADDR in the inferior. */
6817
6818 static int
6819 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6820 {
6821 int ret;
6822
6823 /* Go through a union so this works on either big or little endian
6824 hosts, when the inferior's pointer size is smaller than the size
6825 of CORE_ADDR. It is assumed the inferior's endianness is the
6826 same of the superior's. */
6827 union
6828 {
6829 CORE_ADDR core_addr;
6830 unsigned int ui;
6831 unsigned char uc;
6832 } addr;
6833
6834 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6835 if (ret == 0)
6836 {
6837 if (ptr_size == sizeof (CORE_ADDR))
6838 *ptr = addr.core_addr;
6839 else if (ptr_size == sizeof (unsigned int))
6840 *ptr = addr.ui;
6841 else
6842 gdb_assert_not_reached ("unhandled pointer size");
6843 }
6844 return ret;
6845 }
6846
6847 struct link_map_offsets
6848 {
6849 /* Offset and size of r_debug.r_version. */
6850 int r_version_offset;
6851
6852 /* Offset and size of r_debug.r_map. */
6853 int r_map_offset;
6854
6855 /* Offset to l_addr field in struct link_map. */
6856 int l_addr_offset;
6857
6858 /* Offset to l_name field in struct link_map. */
6859 int l_name_offset;
6860
6861 /* Offset to l_ld field in struct link_map. */
6862 int l_ld_offset;
6863
6864 /* Offset to l_next field in struct link_map. */
6865 int l_next_offset;
6866
6867 /* Offset to l_prev field in struct link_map. */
6868 int l_prev_offset;
6869 };
6870
6871 /* Construct qXfer:libraries-svr4:read reply. */
6872
6873 static int
6874 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6875 unsigned const char *writebuf,
6876 CORE_ADDR offset, int len)
6877 {
6878 char *document;
6879 unsigned document_len;
6880 struct process_info_private *const priv = current_process ()->priv;
6881 char filename[PATH_MAX];
6882 int pid, is_elf64;
6883
6884 static const struct link_map_offsets lmo_32bit_offsets =
6885 {
6886 0, /* r_version offset. */
6887 4, /* r_debug.r_map offset. */
6888 0, /* l_addr offset in link_map. */
6889 4, /* l_name offset in link_map. */
6890 8, /* l_ld offset in link_map. */
6891 12, /* l_next offset in link_map. */
6892 16 /* l_prev offset in link_map. */
6893 };
6894
6895 static const struct link_map_offsets lmo_64bit_offsets =
6896 {
6897 0, /* r_version offset. */
6898 8, /* r_debug.r_map offset. */
6899 0, /* l_addr offset in link_map. */
6900 8, /* l_name offset in link_map. */
6901 16, /* l_ld offset in link_map. */
6902 24, /* l_next offset in link_map. */
6903 32 /* l_prev offset in link_map. */
6904 };
6905 const struct link_map_offsets *lmo;
6906 unsigned int machine;
6907 int ptr_size;
6908 CORE_ADDR lm_addr = 0, lm_prev = 0;
6909 int allocated = 1024;
6910 char *p;
6911 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6912 int header_done = 0;
6913
6914 if (writebuf != NULL)
6915 return -2;
6916 if (readbuf == NULL)
6917 return -1;
6918
6919 pid = lwpid_of (current_thread);
6920 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6921 is_elf64 = elf_64_file_p (filename, &machine);
6922 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6923 ptr_size = is_elf64 ? 8 : 4;
6924
6925 while (annex[0] != '\0')
6926 {
6927 const char *sep;
6928 CORE_ADDR *addrp;
6929 int len;
6930
6931 sep = strchr (annex, '=');
6932 if (sep == NULL)
6933 break;
6934
6935 len = sep - annex;
6936 if (len == 5 && startswith (annex, "start"))
6937 addrp = &lm_addr;
6938 else if (len == 4 && startswith (annex, "prev"))
6939 addrp = &lm_prev;
6940 else
6941 {
6942 annex = strchr (sep, ';');
6943 if (annex == NULL)
6944 break;
6945 annex++;
6946 continue;
6947 }
6948
6949 annex = decode_address_to_semicolon (addrp, sep + 1);
6950 }
6951
6952 if (lm_addr == 0)
6953 {
6954 int r_version = 0;
6955
6956 if (priv->r_debug == 0)
6957 priv->r_debug = get_r_debug (pid, is_elf64);
6958
6959 /* We failed to find DT_DEBUG. Such situation will not change
6960 for this inferior - do not retry it. Report it to GDB as
6961 E01, see for the reasons at the GDB solib-svr4.c side. */
6962 if (priv->r_debug == (CORE_ADDR) -1)
6963 return -1;
6964
6965 if (priv->r_debug != 0)
6966 {
6967 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6968 (unsigned char *) &r_version,
6969 sizeof (r_version)) != 0
6970 || r_version != 1)
6971 {
6972 warning ("unexpected r_debug version %d", r_version);
6973 }
6974 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6975 &lm_addr, ptr_size) != 0)
6976 {
6977 warning ("unable to read r_map from 0x%lx",
6978 (long) priv->r_debug + lmo->r_map_offset);
6979 }
6980 }
6981 }
6982
6983 document = (char *) xmalloc (allocated);
6984 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6985 p = document + strlen (document);
6986
6987 while (lm_addr
6988 && read_one_ptr (lm_addr + lmo->l_name_offset,
6989 &l_name, ptr_size) == 0
6990 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6991 &l_addr, ptr_size) == 0
6992 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6993 &l_ld, ptr_size) == 0
6994 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6995 &l_prev, ptr_size) == 0
6996 && read_one_ptr (lm_addr + lmo->l_next_offset,
6997 &l_next, ptr_size) == 0)
6998 {
6999 unsigned char libname[PATH_MAX];
7000
7001 if (lm_prev != l_prev)
7002 {
7003 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7004 (long) lm_prev, (long) l_prev);
7005 break;
7006 }
7007
7008 /* Ignore the first entry even if it has valid name as the first entry
7009 corresponds to the main executable. The first entry should not be
7010 skipped if the dynamic loader was loaded late by a static executable
7011 (see solib-svr4.c parameter ignore_first). But in such case the main
7012 executable does not have PT_DYNAMIC present and this function already
7013 exited above due to failed get_r_debug. */
7014 if (lm_prev == 0)
7015 {
7016 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7017 p = p + strlen (p);
7018 }
7019 else
7020 {
7021 /* Not checking for error because reading may stop before
7022 we've got PATH_MAX worth of characters. */
7023 libname[0] = '\0';
7024 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7025 libname[sizeof (libname) - 1] = '\0';
7026 if (libname[0] != '\0')
7027 {
7028 /* 6x the size for xml_escape_text below. */
7029 size_t len = 6 * strlen ((char *) libname);
7030 char *name;
7031
7032 if (!header_done)
7033 {
7034 /* Terminate `<library-list-svr4'. */
7035 *p++ = '>';
7036 header_done = 1;
7037 }
7038
7039 while (allocated < p - document + len + 200)
7040 {
7041 /* Expand to guarantee sufficient storage. */
7042 uintptr_t document_len = p - document;
7043
7044 document = (char *) xrealloc (document, 2 * allocated);
7045 allocated *= 2;
7046 p = document + document_len;
7047 }
7048
7049 name = xml_escape_text ((char *) libname);
7050 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
7051 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7052 name, (unsigned long) lm_addr,
7053 (unsigned long) l_addr, (unsigned long) l_ld);
7054 free (name);
7055 }
7056 }
7057
7058 lm_prev = lm_addr;
7059 lm_addr = l_next;
7060 }
7061
7062 if (!header_done)
7063 {
7064 /* Empty list; terminate `<library-list-svr4'. */
7065 strcpy (p, "/>");
7066 }
7067 else
7068 strcpy (p, "</library-list-svr4>");
7069
7070 document_len = strlen (document);
7071 if (offset < document_len)
7072 document_len -= offset;
7073 else
7074 document_len = 0;
7075 if (len > document_len)
7076 len = document_len;
7077
7078 memcpy (readbuf, document + offset, len);
7079 xfree (document);
7080
7081 return len;
7082 }
7083
7084 #ifdef HAVE_LINUX_BTRACE
7085
7086 /* See to_disable_btrace target method. */
7087
7088 static int
7089 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7090 {
7091 enum btrace_error err;
7092
7093 err = linux_disable_btrace (tinfo);
7094 return (err == BTRACE_ERR_NONE ? 0 : -1);
7095 }
7096
7097 /* Encode an Intel Processor Trace configuration. */
7098
7099 static void
7100 linux_low_encode_pt_config (struct buffer *buffer,
7101 const struct btrace_data_pt_config *config)
7102 {
7103 buffer_grow_str (buffer, "<pt-config>\n");
7104
7105 switch (config->cpu.vendor)
7106 {
7107 case CV_INTEL:
7108 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7109 "model=\"%u\" stepping=\"%u\"/>\n",
7110 config->cpu.family, config->cpu.model,
7111 config->cpu.stepping);
7112 break;
7113
7114 default:
7115 break;
7116 }
7117
7118 buffer_grow_str (buffer, "</pt-config>\n");
7119 }
7120
7121 /* Encode a raw buffer. */
7122
7123 static void
7124 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7125 unsigned int size)
7126 {
7127 if (size == 0)
7128 return;
7129
7130 /* We use hex encoding - see common/rsp-low.h. */
7131 buffer_grow_str (buffer, "<raw>\n");
7132
7133 while (size-- > 0)
7134 {
7135 char elem[2];
7136
7137 elem[0] = tohex ((*data >> 4) & 0xf);
7138 elem[1] = tohex (*data++ & 0xf);
7139
7140 buffer_grow (buffer, elem, 2);
7141 }
7142
7143 buffer_grow_str (buffer, "</raw>\n");
7144 }
7145
7146 /* See to_read_btrace target method. */
7147
7148 static int
7149 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7150 enum btrace_read_type type)
7151 {
7152 struct btrace_data btrace;
7153 struct btrace_block *block;
7154 enum btrace_error err;
7155 int i;
7156
7157 btrace_data_init (&btrace);
7158
7159 err = linux_read_btrace (&btrace, tinfo, type);
7160 if (err != BTRACE_ERR_NONE)
7161 {
7162 if (err == BTRACE_ERR_OVERFLOW)
7163 buffer_grow_str0 (buffer, "E.Overflow.");
7164 else
7165 buffer_grow_str0 (buffer, "E.Generic Error.");
7166
7167 goto err;
7168 }
7169
7170 switch (btrace.format)
7171 {
7172 case BTRACE_FORMAT_NONE:
7173 buffer_grow_str0 (buffer, "E.No Trace.");
7174 goto err;
7175
7176 case BTRACE_FORMAT_BTS:
7177 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7178 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7179
7180 for (i = 0;
7181 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7182 i++)
7183 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7184 paddress (block->begin), paddress (block->end));
7185
7186 buffer_grow_str0 (buffer, "</btrace>\n");
7187 break;
7188
7189 case BTRACE_FORMAT_PT:
7190 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7191 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7192 buffer_grow_str (buffer, "<pt>\n");
7193
7194 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7195
7196 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7197 btrace.variant.pt.size);
7198
7199 buffer_grow_str (buffer, "</pt>\n");
7200 buffer_grow_str0 (buffer, "</btrace>\n");
7201 break;
7202
7203 default:
7204 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7205 goto err;
7206 }
7207
7208 btrace_data_fini (&btrace);
7209 return 0;
7210
7211 err:
7212 btrace_data_fini (&btrace);
7213 return -1;
7214 }
7215
7216 /* See to_btrace_conf target method. */
7217
7218 static int
7219 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7220 struct buffer *buffer)
7221 {
7222 const struct btrace_config *conf;
7223
7224 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7225 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7226
7227 conf = linux_btrace_conf (tinfo);
7228 if (conf != NULL)
7229 {
7230 switch (conf->format)
7231 {
7232 case BTRACE_FORMAT_NONE:
7233 break;
7234
7235 case BTRACE_FORMAT_BTS:
7236 buffer_xml_printf (buffer, "<bts");
7237 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7238 buffer_xml_printf (buffer, " />\n");
7239 break;
7240
7241 case BTRACE_FORMAT_PT:
7242 buffer_xml_printf (buffer, "<pt");
7243 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7244 buffer_xml_printf (buffer, "/>\n");
7245 break;
7246 }
7247 }
7248
7249 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7250 return 0;
7251 }
7252 #endif /* HAVE_LINUX_BTRACE */
7253
7254 /* See nat/linux-nat.h. */
7255
7256 ptid_t
7257 current_lwp_ptid (void)
7258 {
7259 return ptid_of (current_thread);
7260 }
7261
7262 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7263
7264 static int
7265 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7266 {
7267 if (the_low_target.breakpoint_kind_from_pc != NULL)
7268 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7269 else
7270 return default_breakpoint_kind_from_pc (pcptr);
7271 }
7272
7273 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7274
7275 static const gdb_byte *
7276 linux_sw_breakpoint_from_kind (int kind, int *size)
7277 {
7278 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7279
7280 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7281 }
7282
7283 /* Implementation of the target_ops method
7284 "breakpoint_kind_from_current_state". */
7285
7286 static int
7287 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7288 {
7289 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7290 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7291 else
7292 return linux_breakpoint_kind_from_pc (pcptr);
7293 }
7294
7295 /* Default implementation of linux_target_ops method "set_pc" for
7296 32-bit pc register which is literally named "pc". */
7297
7298 void
7299 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7300 {
7301 uint32_t newpc = pc;
7302
7303 supply_register_by_name (regcache, "pc", &newpc);
7304 }
7305
7306 /* Default implementation of linux_target_ops method "get_pc" for
7307 32-bit pc register which is literally named "pc". */
7308
7309 CORE_ADDR
7310 linux_get_pc_32bit (struct regcache *regcache)
7311 {
7312 uint32_t pc;
7313
7314 collect_register_by_name (regcache, "pc", &pc);
7315 if (debug_threads)
7316 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7317 return pc;
7318 }
7319
7320 /* Default implementation of linux_target_ops method "set_pc" for
7321 64-bit pc register which is literally named "pc". */
7322
7323 void
7324 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7325 {
7326 uint64_t newpc = pc;
7327
7328 supply_register_by_name (regcache, "pc", &newpc);
7329 }
7330
7331 /* Default implementation of linux_target_ops method "get_pc" for
7332 64-bit pc register which is literally named "pc". */
7333
7334 CORE_ADDR
7335 linux_get_pc_64bit (struct regcache *regcache)
7336 {
7337 uint64_t pc;
7338
7339 collect_register_by_name (regcache, "pc", &pc);
7340 if (debug_threads)
7341 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7342 return pc;
7343 }
7344
7345
7346 static struct target_ops linux_target_ops = {
7347 linux_create_inferior,
7348 linux_post_create_inferior,
7349 linux_attach,
7350 linux_kill,
7351 linux_detach,
7352 linux_mourn,
7353 linux_join,
7354 linux_thread_alive,
7355 linux_resume,
7356 linux_wait,
7357 linux_fetch_registers,
7358 linux_store_registers,
7359 linux_prepare_to_access_memory,
7360 linux_done_accessing_memory,
7361 linux_read_memory,
7362 linux_write_memory,
7363 linux_look_up_symbols,
7364 linux_request_interrupt,
7365 linux_read_auxv,
7366 linux_supports_z_point_type,
7367 linux_insert_point,
7368 linux_remove_point,
7369 linux_stopped_by_sw_breakpoint,
7370 linux_supports_stopped_by_sw_breakpoint,
7371 linux_stopped_by_hw_breakpoint,
7372 linux_supports_stopped_by_hw_breakpoint,
7373 linux_supports_hardware_single_step,
7374 linux_stopped_by_watchpoint,
7375 linux_stopped_data_address,
7376 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7377 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7378 && defined(PT_TEXT_END_ADDR)
7379 linux_read_offsets,
7380 #else
7381 NULL,
7382 #endif
7383 #ifdef USE_THREAD_DB
7384 thread_db_get_tls_address,
7385 #else
7386 NULL,
7387 #endif
7388 linux_qxfer_spu,
7389 hostio_last_error_from_errno,
7390 linux_qxfer_osdata,
7391 linux_xfer_siginfo,
7392 linux_supports_non_stop,
7393 linux_async,
7394 linux_start_non_stop,
7395 linux_supports_multi_process,
7396 linux_supports_fork_events,
7397 linux_supports_vfork_events,
7398 linux_supports_exec_events,
7399 linux_handle_new_gdb_connection,
7400 #ifdef USE_THREAD_DB
7401 thread_db_handle_monitor_command,
7402 #else
7403 NULL,
7404 #endif
7405 linux_common_core_of_thread,
7406 linux_read_loadmap,
7407 linux_process_qsupported,
7408 linux_supports_tracepoints,
7409 linux_read_pc,
7410 linux_write_pc,
7411 linux_thread_stopped,
7412 NULL,
7413 linux_pause_all,
7414 linux_unpause_all,
7415 linux_stabilize_threads,
7416 linux_install_fast_tracepoint_jump_pad,
7417 linux_emit_ops,
7418 linux_supports_disable_randomization,
7419 linux_get_min_fast_tracepoint_insn_len,
7420 linux_qxfer_libraries_svr4,
7421 linux_supports_agent,
7422 #ifdef HAVE_LINUX_BTRACE
7423 linux_supports_btrace,
7424 linux_enable_btrace,
7425 linux_low_disable_btrace,
7426 linux_low_read_btrace,
7427 linux_low_btrace_conf,
7428 #else
7429 NULL,
7430 NULL,
7431 NULL,
7432 NULL,
7433 NULL,
7434 #endif
7435 linux_supports_range_stepping,
7436 linux_proc_pid_to_exec_file,
7437 linux_mntns_open_cloexec,
7438 linux_mntns_unlink,
7439 linux_mntns_readlink,
7440 linux_breakpoint_kind_from_pc,
7441 linux_sw_breakpoint_from_kind,
7442 linux_proc_tid_get_name,
7443 linux_breakpoint_kind_from_current_state,
7444 linux_supports_software_single_step,
7445 linux_supports_catch_syscall,
7446 linux_get_ipa_tdesc_idx,
7447 };
7448
7449 #ifdef HAVE_LINUX_REGSETS
7450 void
7451 initialize_regsets_info (struct regsets_info *info)
7452 {
7453 for (info->num_regsets = 0;
7454 info->regsets[info->num_regsets].size >= 0;
7455 info->num_regsets++)
7456 ;
7457 }
7458 #endif
7459
7460 void
7461 initialize_low (void)
7462 {
7463 struct sigaction sigchld_action;
7464
7465 memset (&sigchld_action, 0, sizeof (sigchld_action));
7466 set_target_ops (&linux_target_ops);
7467
7468 linux_ptrace_init_warnings ();
7469
7470 sigchld_action.sa_handler = sigchld_handler;
7471 sigemptyset (&sigchld_action.sa_mask);
7472 sigchld_action.sa_flags = SA_RESTART;
7473 sigaction (SIGCHLD, &sigchld_action, NULL);
7474
7475 initialize_low_arch ();
7476
7477 linux_check_ptrace_features ();
7478 }
This page took 0.203609 seconds and 4 git commands to generate.