Change return type of gdbarch_software_single_step to vector<CORE_ADDR>
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2017 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25 #include "signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #ifndef ELFMAG0
51 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55 #include <elf.h>
56 #endif
57 #include "nat/linux-namespaces.h"
58
59 #ifndef SPUFS_MAGIC
60 #define SPUFS_MAGIC 0x23c9b64e
61 #endif
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 /* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77 #if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80 #if defined(__mcoldfire__)
81 /* These are still undefined in 3.10 kernels. */
82 #define PT_TEXT_ADDR 49*4
83 #define PT_DATA_ADDR 50*4
84 #define PT_TEXT_END_ADDR 51*4
85 /* BFIN already defines these since at least 2.6.32 kernels. */
86 #elif defined(BFIN)
87 #define PT_TEXT_ADDR 220
88 #define PT_TEXT_END_ADDR 224
89 #define PT_DATA_ADDR 228
90 /* These are still undefined in 3.10 kernels. */
91 #elif defined(__TMS320C6X__)
92 #define PT_TEXT_ADDR (0x10000*4)
93 #define PT_DATA_ADDR (0x10004*4)
94 #define PT_TEXT_END_ADDR (0x10008*4)
95 #endif
96 #endif
97
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "btrace-common.h"
101 #endif
102
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
105 typedef struct
106 {
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115 } Elf32_auxv_t;
116 #endif
117
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
120 typedef struct
121 {
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130 } Elf64_auxv_t;
131 #endif
132
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset = -1;
135
136 /* LWP accessors. */
137
138 /* See nat/linux-nat.h. */
139
140 ptid_t
141 ptid_of_lwp (struct lwp_info *lwp)
142 {
143 return ptid_of (get_lwp_thread (lwp));
144 }
145
146 /* See nat/linux-nat.h. */
147
148 void
149 lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151 {
152 lwp->arch_private = info;
153 }
154
155 /* See nat/linux-nat.h. */
156
157 struct arch_lwp_info *
158 lwp_arch_private_info (struct lwp_info *lwp)
159 {
160 return lwp->arch_private;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 int
166 lwp_is_stopped (struct lwp_info *lwp)
167 {
168 return lwp->stopped;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info *lwp)
175 {
176 return lwp->stop_reason;
177 }
178
179 /* See nat/linux-nat.h. */
180
181 int
182 lwp_is_stepping (struct lwp_info *lwp)
183 {
184 return lwp->stepping;
185 }
186
187 /* A list of all unknown processes which receive stop signals. Some
188 other process will presumably claim each of these as forked
189 children momentarily. */
190
191 struct simple_pid_list
192 {
193 /* The process ID. */
194 int pid;
195
196 /* The status as reported by waitpid. */
197 int status;
198
199 /* Next in chain. */
200 struct simple_pid_list *next;
201 };
202 struct simple_pid_list *stopped_pids;
203
204 /* Trivial list manipulation functions to keep track of a list of new
205 stopped processes. */
206
207 static void
208 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
209 {
210 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
211
212 new_pid->pid = pid;
213 new_pid->status = status;
214 new_pid->next = *listp;
215 *listp = new_pid;
216 }
217
218 static int
219 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
220 {
221 struct simple_pid_list **p;
222
223 for (p = listp; *p != NULL; p = &(*p)->next)
224 if ((*p)->pid == pid)
225 {
226 struct simple_pid_list *next = (*p)->next;
227
228 *statusp = (*p)->status;
229 xfree (*p);
230 *p = next;
231 return 1;
232 }
233 return 0;
234 }
235
236 enum stopping_threads_kind
237 {
238 /* Not stopping threads presently. */
239 NOT_STOPPING_THREADS,
240
241 /* Stopping threads. */
242 STOPPING_THREADS,
243
244 /* Stopping and suspending threads. */
245 STOPPING_AND_SUSPENDING_THREADS
246 };
247
248 /* This is set while stop_all_lwps is in effect. */
249 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
250
251 /* FIXME make into a target method? */
252 int using_threads = 1;
253
254 /* True if we're presently stabilizing threads (moving them out of
255 jump pads). */
256 static int stabilizing_threads;
257
258 static void linux_resume_one_lwp (struct lwp_info *lwp,
259 int step, int signal, siginfo_t *info);
260 static void linux_resume (struct thread_resume *resume_info, size_t n);
261 static void stop_all_lwps (int suspend, struct lwp_info *except);
262 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
263 static void unsuspend_all_lwps (struct lwp_info *except);
264 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
265 int *wstat, int options);
266 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
267 static struct lwp_info *add_lwp (ptid_t ptid);
268 static void linux_mourn (struct process_info *process);
269 static int linux_stopped_by_watchpoint (void);
270 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
271 static int lwp_is_marked_dead (struct lwp_info *lwp);
272 static void proceed_all_lwps (void);
273 static int finish_step_over (struct lwp_info *lwp);
274 static int kill_lwp (unsigned long lwpid, int signo);
275 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
276 static void complete_ongoing_step_over (void);
277 static int linux_low_ptrace_options (int attached);
278 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
279 static int proceed_one_lwp (struct inferior_list_entry *entry, void *except);
280
281 /* When the event-loop is doing a step-over, this points at the thread
282 being stepped. */
283 ptid_t step_over_bkpt;
284
285 /* True if the low target can hardware single-step. */
286
287 static int
288 can_hardware_single_step (void)
289 {
290 if (the_low_target.supports_hardware_single_step != NULL)
291 return the_low_target.supports_hardware_single_step ();
292 else
293 return 0;
294 }
295
296 /* True if the low target can software single-step. Such targets
297 implement the GET_NEXT_PCS callback. */
298
299 static int
300 can_software_single_step (void)
301 {
302 return (the_low_target.get_next_pcs != NULL);
303 }
304
305 /* True if the low target supports memory breakpoints. If so, we'll
306 have a GET_PC implementation. */
307
308 static int
309 supports_breakpoints (void)
310 {
311 return (the_low_target.get_pc != NULL);
312 }
313
314 /* Returns true if this target can support fast tracepoints. This
315 does not mean that the in-process agent has been loaded in the
316 inferior. */
317
318 static int
319 supports_fast_tracepoints (void)
320 {
321 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
322 }
323
324 /* True if LWP is stopped in its stepping range. */
325
326 static int
327 lwp_in_step_range (struct lwp_info *lwp)
328 {
329 CORE_ADDR pc = lwp->stop_pc;
330
331 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
332 }
333
334 struct pending_signals
335 {
336 int signal;
337 siginfo_t info;
338 struct pending_signals *prev;
339 };
340
341 /* The read/write ends of the pipe registered as waitable file in the
342 event loop. */
343 static int linux_event_pipe[2] = { -1, -1 };
344
345 /* True if we're currently in async mode. */
346 #define target_is_async_p() (linux_event_pipe[0] != -1)
347
348 static void send_sigstop (struct lwp_info *lwp);
349 static void wait_for_sigstop (void);
350
351 /* Return non-zero if HEADER is a 64-bit ELF file. */
352
353 static int
354 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
355 {
356 if (header->e_ident[EI_MAG0] == ELFMAG0
357 && header->e_ident[EI_MAG1] == ELFMAG1
358 && header->e_ident[EI_MAG2] == ELFMAG2
359 && header->e_ident[EI_MAG3] == ELFMAG3)
360 {
361 *machine = header->e_machine;
362 return header->e_ident[EI_CLASS] == ELFCLASS64;
363
364 }
365 *machine = EM_NONE;
366 return -1;
367 }
368
369 /* Return non-zero if FILE is a 64-bit ELF file,
370 zero if the file is not a 64-bit ELF file,
371 and -1 if the file is not accessible or doesn't exist. */
372
373 static int
374 elf_64_file_p (const char *file, unsigned int *machine)
375 {
376 Elf64_Ehdr header;
377 int fd;
378
379 fd = open (file, O_RDONLY);
380 if (fd < 0)
381 return -1;
382
383 if (read (fd, &header, sizeof (header)) != sizeof (header))
384 {
385 close (fd);
386 return 0;
387 }
388 close (fd);
389
390 return elf_64_header_p (&header, machine);
391 }
392
393 /* Accepts an integer PID; Returns true if the executable PID is
394 running is a 64-bit ELF file.. */
395
396 int
397 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
398 {
399 char file[PATH_MAX];
400
401 sprintf (file, "/proc/%d/exe", pid);
402 return elf_64_file_p (file, machine);
403 }
404
405 static void
406 delete_lwp (struct lwp_info *lwp)
407 {
408 struct thread_info *thr = get_lwp_thread (lwp);
409
410 if (debug_threads)
411 debug_printf ("deleting %ld\n", lwpid_of (thr));
412
413 remove_thread (thr);
414 free (lwp->arch_private);
415 free (lwp);
416 }
417
418 /* Add a process to the common process list, and set its private
419 data. */
420
421 static struct process_info *
422 linux_add_process (int pid, int attached)
423 {
424 struct process_info *proc;
425
426 proc = add_process (pid, attached);
427 proc->priv = XCNEW (struct process_info_private);
428
429 if (the_low_target.new_process != NULL)
430 proc->priv->arch_private = the_low_target.new_process ();
431
432 return proc;
433 }
434
435 static CORE_ADDR get_pc (struct lwp_info *lwp);
436
437 /* Call the target arch_setup function on the current thread. */
438
439 static void
440 linux_arch_setup (void)
441 {
442 the_low_target.arch_setup ();
443 }
444
445 /* Call the target arch_setup function on THREAD. */
446
447 static void
448 linux_arch_setup_thread (struct thread_info *thread)
449 {
450 struct thread_info *saved_thread;
451
452 saved_thread = current_thread;
453 current_thread = thread;
454
455 linux_arch_setup ();
456
457 current_thread = saved_thread;
458 }
459
460 /* Handle a GNU/Linux extended wait response. If we see a clone,
461 fork, or vfork event, we need to add the new LWP to our list
462 (and return 0 so as not to report the trap to higher layers).
463 If we see an exec event, we will modify ORIG_EVENT_LWP to point
464 to a new LWP representing the new program. */
465
466 static int
467 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
468 {
469 struct lwp_info *event_lwp = *orig_event_lwp;
470 int event = linux_ptrace_get_extended_event (wstat);
471 struct thread_info *event_thr = get_lwp_thread (event_lwp);
472 struct lwp_info *new_lwp;
473
474 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
475
476 /* All extended events we currently use are mid-syscall. Only
477 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
478 you have to be using PTRACE_SEIZE to get that. */
479 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
480
481 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
482 || (event == PTRACE_EVENT_CLONE))
483 {
484 ptid_t ptid;
485 unsigned long new_pid;
486 int ret, status;
487
488 /* Get the pid of the new lwp. */
489 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
490 &new_pid);
491
492 /* If we haven't already seen the new PID stop, wait for it now. */
493 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
494 {
495 /* The new child has a pending SIGSTOP. We can't affect it until it
496 hits the SIGSTOP, but we're already attached. */
497
498 ret = my_waitpid (new_pid, &status, __WALL);
499
500 if (ret == -1)
501 perror_with_name ("waiting for new child");
502 else if (ret != new_pid)
503 warning ("wait returned unexpected PID %d", ret);
504 else if (!WIFSTOPPED (status))
505 warning ("wait returned unexpected status 0x%x", status);
506 }
507
508 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
509 {
510 struct process_info *parent_proc;
511 struct process_info *child_proc;
512 struct lwp_info *child_lwp;
513 struct thread_info *child_thr;
514 struct target_desc *tdesc;
515
516 ptid = ptid_build (new_pid, new_pid, 0);
517
518 if (debug_threads)
519 {
520 debug_printf ("HEW: Got fork event from LWP %ld, "
521 "new child is %d\n",
522 ptid_get_lwp (ptid_of (event_thr)),
523 ptid_get_pid (ptid));
524 }
525
526 /* Add the new process to the tables and clone the breakpoint
527 lists of the parent. We need to do this even if the new process
528 will be detached, since we will need the process object and the
529 breakpoints to remove any breakpoints from memory when we
530 detach, and the client side will access registers. */
531 child_proc = linux_add_process (new_pid, 0);
532 gdb_assert (child_proc != NULL);
533 child_lwp = add_lwp (ptid);
534 gdb_assert (child_lwp != NULL);
535 child_lwp->stopped = 1;
536 child_lwp->must_set_ptrace_flags = 1;
537 child_lwp->status_pending_p = 0;
538 child_thr = get_lwp_thread (child_lwp);
539 child_thr->last_resume_kind = resume_stop;
540 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
541
542 /* If we're suspending all threads, leave this one suspended
543 too. If the fork/clone parent is stepping over a breakpoint,
544 all other threads have been suspended already. Leave the
545 child suspended too. */
546 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
547 || event_lwp->bp_reinsert != 0)
548 {
549 if (debug_threads)
550 debug_printf ("HEW: leaving child suspended\n");
551 child_lwp->suspended = 1;
552 }
553
554 parent_proc = get_thread_process (event_thr);
555 child_proc->attached = parent_proc->attached;
556
557 if (event_lwp->bp_reinsert != 0
558 && can_software_single_step ()
559 && event == PTRACE_EVENT_VFORK)
560 {
561 /* If we leave single-step breakpoints there, child will
562 hit it, so uninsert single-step breakpoints from parent
563 (and child). Once vfork child is done, reinsert
564 them back to parent. */
565 uninsert_single_step_breakpoints (event_thr);
566 }
567
568 clone_all_breakpoints (child_thr, event_thr);
569
570 tdesc = XNEW (struct target_desc);
571 copy_target_description (tdesc, parent_proc->tdesc);
572 child_proc->tdesc = tdesc;
573
574 /* Clone arch-specific process data. */
575 if (the_low_target.new_fork != NULL)
576 the_low_target.new_fork (parent_proc, child_proc);
577
578 /* Save fork info in the parent thread. */
579 if (event == PTRACE_EVENT_FORK)
580 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
581 else if (event == PTRACE_EVENT_VFORK)
582 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
583
584 event_lwp->waitstatus.value.related_pid = ptid;
585
586 /* The status_pending field contains bits denoting the
587 extended event, so when the pending event is handled,
588 the handler will look at lwp->waitstatus. */
589 event_lwp->status_pending_p = 1;
590 event_lwp->status_pending = wstat;
591
592 /* Link the threads until the parent event is passed on to
593 higher layers. */
594 event_lwp->fork_relative = child_lwp;
595 child_lwp->fork_relative = event_lwp;
596
597 /* If the parent thread is doing step-over with single-step
598 breakpoints, the list of single-step breakpoints are cloned
599 from the parent's. Remove them from the child process.
600 In case of vfork, we'll reinsert them back once vforked
601 child is done. */
602 if (event_lwp->bp_reinsert != 0
603 && can_software_single_step ())
604 {
605 /* The child process is forked and stopped, so it is safe
606 to access its memory without stopping all other threads
607 from other processes. */
608 delete_single_step_breakpoints (child_thr);
609
610 gdb_assert (has_single_step_breakpoints (event_thr));
611 gdb_assert (!has_single_step_breakpoints (child_thr));
612 }
613
614 /* Report the event. */
615 return 0;
616 }
617
618 if (debug_threads)
619 debug_printf ("HEW: Got clone event "
620 "from LWP %ld, new child is LWP %ld\n",
621 lwpid_of (event_thr), new_pid);
622
623 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
624 new_lwp = add_lwp (ptid);
625
626 /* Either we're going to immediately resume the new thread
627 or leave it stopped. linux_resume_one_lwp is a nop if it
628 thinks the thread is currently running, so set this first
629 before calling linux_resume_one_lwp. */
630 new_lwp->stopped = 1;
631
632 /* If we're suspending all threads, leave this one suspended
633 too. If the fork/clone parent is stepping over a breakpoint,
634 all other threads have been suspended already. Leave the
635 child suspended too. */
636 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
637 || event_lwp->bp_reinsert != 0)
638 new_lwp->suspended = 1;
639
640 /* Normally we will get the pending SIGSTOP. But in some cases
641 we might get another signal delivered to the group first.
642 If we do get another signal, be sure not to lose it. */
643 if (WSTOPSIG (status) != SIGSTOP)
644 {
645 new_lwp->stop_expected = 1;
646 new_lwp->status_pending_p = 1;
647 new_lwp->status_pending = status;
648 }
649 else if (report_thread_events)
650 {
651 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
652 new_lwp->status_pending_p = 1;
653 new_lwp->status_pending = status;
654 }
655
656 /* Don't report the event. */
657 return 1;
658 }
659 else if (event == PTRACE_EVENT_VFORK_DONE)
660 {
661 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
662
663 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
664 {
665 reinsert_single_step_breakpoints (event_thr);
666
667 gdb_assert (has_single_step_breakpoints (event_thr));
668 }
669
670 /* Report the event. */
671 return 0;
672 }
673 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
674 {
675 struct process_info *proc;
676 VEC (int) *syscalls_to_catch;
677 ptid_t event_ptid;
678 pid_t event_pid;
679
680 if (debug_threads)
681 {
682 debug_printf ("HEW: Got exec event from LWP %ld\n",
683 lwpid_of (event_thr));
684 }
685
686 /* Get the event ptid. */
687 event_ptid = ptid_of (event_thr);
688 event_pid = ptid_get_pid (event_ptid);
689
690 /* Save the syscall list from the execing process. */
691 proc = get_thread_process (event_thr);
692 syscalls_to_catch = proc->syscalls_to_catch;
693 proc->syscalls_to_catch = NULL;
694
695 /* Delete the execing process and all its threads. */
696 linux_mourn (proc);
697 current_thread = NULL;
698
699 /* Create a new process/lwp/thread. */
700 proc = linux_add_process (event_pid, 0);
701 event_lwp = add_lwp (event_ptid);
702 event_thr = get_lwp_thread (event_lwp);
703 gdb_assert (current_thread == event_thr);
704 linux_arch_setup_thread (event_thr);
705
706 /* Set the event status. */
707 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
708 event_lwp->waitstatus.value.execd_pathname
709 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
710
711 /* Mark the exec status as pending. */
712 event_lwp->stopped = 1;
713 event_lwp->status_pending_p = 1;
714 event_lwp->status_pending = wstat;
715 event_thr->last_resume_kind = resume_continue;
716 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
717
718 /* Update syscall state in the new lwp, effectively mid-syscall too. */
719 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
720
721 /* Restore the list to catch. Don't rely on the client, which is free
722 to avoid sending a new list when the architecture doesn't change.
723 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
724 proc->syscalls_to_catch = syscalls_to_catch;
725
726 /* Report the event. */
727 *orig_event_lwp = event_lwp;
728 return 0;
729 }
730
731 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
732 }
733
734 /* Return the PC as read from the regcache of LWP, without any
735 adjustment. */
736
737 static CORE_ADDR
738 get_pc (struct lwp_info *lwp)
739 {
740 struct thread_info *saved_thread;
741 struct regcache *regcache;
742 CORE_ADDR pc;
743
744 if (the_low_target.get_pc == NULL)
745 return 0;
746
747 saved_thread = current_thread;
748 current_thread = get_lwp_thread (lwp);
749
750 regcache = get_thread_regcache (current_thread, 1);
751 pc = (*the_low_target.get_pc) (regcache);
752
753 if (debug_threads)
754 debug_printf ("pc is 0x%lx\n", (long) pc);
755
756 current_thread = saved_thread;
757 return pc;
758 }
759
760 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
761 Fill *SYSNO with the syscall nr trapped. */
762
763 static void
764 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
765 {
766 struct thread_info *saved_thread;
767 struct regcache *regcache;
768
769 if (the_low_target.get_syscall_trapinfo == NULL)
770 {
771 /* If we cannot get the syscall trapinfo, report an unknown
772 system call number. */
773 *sysno = UNKNOWN_SYSCALL;
774 return;
775 }
776
777 saved_thread = current_thread;
778 current_thread = get_lwp_thread (lwp);
779
780 regcache = get_thread_regcache (current_thread, 1);
781 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
782
783 if (debug_threads)
784 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
785
786 current_thread = saved_thread;
787 }
788
789 static int check_stopped_by_watchpoint (struct lwp_info *child);
790
791 /* Called when the LWP stopped for a signal/trap. If it stopped for a
792 trap check what caused it (breakpoint, watchpoint, trace, etc.),
793 and save the result in the LWP's stop_reason field. If it stopped
794 for a breakpoint, decrement the PC if necessary on the lwp's
795 architecture. Returns true if we now have the LWP's stop PC. */
796
797 static int
798 save_stop_reason (struct lwp_info *lwp)
799 {
800 CORE_ADDR pc;
801 CORE_ADDR sw_breakpoint_pc;
802 struct thread_info *saved_thread;
803 #if USE_SIGTRAP_SIGINFO
804 siginfo_t siginfo;
805 #endif
806
807 if (the_low_target.get_pc == NULL)
808 return 0;
809
810 pc = get_pc (lwp);
811 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
812
813 /* breakpoint_at reads from the current thread. */
814 saved_thread = current_thread;
815 current_thread = get_lwp_thread (lwp);
816
817 #if USE_SIGTRAP_SIGINFO
818 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
819 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
820 {
821 if (siginfo.si_signo == SIGTRAP)
822 {
823 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
824 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
825 {
826 /* The si_code is ambiguous on this arch -- check debug
827 registers. */
828 if (!check_stopped_by_watchpoint (lwp))
829 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
830 }
831 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
832 {
833 /* If we determine the LWP stopped for a SW breakpoint,
834 trust it. Particularly don't check watchpoint
835 registers, because at least on s390, we'd find
836 stopped-by-watchpoint as long as there's a watchpoint
837 set. */
838 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
839 }
840 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
841 {
842 /* This can indicate either a hardware breakpoint or
843 hardware watchpoint. Check debug registers. */
844 if (!check_stopped_by_watchpoint (lwp))
845 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
846 }
847 else if (siginfo.si_code == TRAP_TRACE)
848 {
849 /* We may have single stepped an instruction that
850 triggered a watchpoint. In that case, on some
851 architectures (such as x86), instead of TRAP_HWBKPT,
852 si_code indicates TRAP_TRACE, and we need to check
853 the debug registers separately. */
854 if (!check_stopped_by_watchpoint (lwp))
855 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
856 }
857 }
858 }
859 #else
860 /* We may have just stepped a breakpoint instruction. E.g., in
861 non-stop mode, GDB first tells the thread A to step a range, and
862 then the user inserts a breakpoint inside the range. In that
863 case we need to report the breakpoint PC. */
864 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
865 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
866 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
867
868 if (hardware_breakpoint_inserted_here (pc))
869 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
870
871 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
872 check_stopped_by_watchpoint (lwp);
873 #endif
874
875 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
876 {
877 if (debug_threads)
878 {
879 struct thread_info *thr = get_lwp_thread (lwp);
880
881 debug_printf ("CSBB: %s stopped by software breakpoint\n",
882 target_pid_to_str (ptid_of (thr)));
883 }
884
885 /* Back up the PC if necessary. */
886 if (pc != sw_breakpoint_pc)
887 {
888 struct regcache *regcache
889 = get_thread_regcache (current_thread, 1);
890 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
891 }
892
893 /* Update this so we record the correct stop PC below. */
894 pc = sw_breakpoint_pc;
895 }
896 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
897 {
898 if (debug_threads)
899 {
900 struct thread_info *thr = get_lwp_thread (lwp);
901
902 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
903 target_pid_to_str (ptid_of (thr)));
904 }
905 }
906 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
907 {
908 if (debug_threads)
909 {
910 struct thread_info *thr = get_lwp_thread (lwp);
911
912 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
913 target_pid_to_str (ptid_of (thr)));
914 }
915 }
916 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
917 {
918 if (debug_threads)
919 {
920 struct thread_info *thr = get_lwp_thread (lwp);
921
922 debug_printf ("CSBB: %s stopped by trace\n",
923 target_pid_to_str (ptid_of (thr)));
924 }
925 }
926
927 lwp->stop_pc = pc;
928 current_thread = saved_thread;
929 return 1;
930 }
931
932 static struct lwp_info *
933 add_lwp (ptid_t ptid)
934 {
935 struct lwp_info *lwp;
936
937 lwp = XCNEW (struct lwp_info);
938
939 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
940
941 if (the_low_target.new_thread != NULL)
942 the_low_target.new_thread (lwp);
943
944 lwp->thread = add_thread (ptid, lwp);
945
946 return lwp;
947 }
948
949 /* Start an inferior process and returns its pid.
950 ALLARGS is a vector of program-name and args. */
951
952 static int
953 linux_create_inferior (char *program, char **allargs)
954 {
955 struct lwp_info *new_lwp;
956 int pid;
957 ptid_t ptid;
958 struct cleanup *restore_personality
959 = maybe_disable_address_space_randomization (disable_randomization);
960
961 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
962 pid = vfork ();
963 #else
964 pid = fork ();
965 #endif
966 if (pid < 0)
967 perror_with_name ("fork");
968
969 if (pid == 0)
970 {
971 close_most_fds ();
972 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
973
974 setpgid (0, 0);
975
976 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
977 stdout to stderr so that inferior i/o doesn't corrupt the connection.
978 Also, redirect stdin to /dev/null. */
979 if (remote_connection_is_stdio ())
980 {
981 close (0);
982 open ("/dev/null", O_RDONLY);
983 dup2 (2, 1);
984 if (write (2, "stdin/stdout redirected\n",
985 sizeof ("stdin/stdout redirected\n") - 1) < 0)
986 {
987 /* Errors ignored. */;
988 }
989 }
990
991 restore_original_signals_state ();
992
993 execv (program, allargs);
994 if (errno == ENOENT)
995 execvp (program, allargs);
996
997 fprintf (stderr, "Cannot exec %s: %s.\n", program,
998 strerror (errno));
999 fflush (stderr);
1000 _exit (0177);
1001 }
1002
1003 do_cleanups (restore_personality);
1004
1005 linux_add_process (pid, 0);
1006
1007 ptid = ptid_build (pid, pid, 0);
1008 new_lwp = add_lwp (ptid);
1009 new_lwp->must_set_ptrace_flags = 1;
1010
1011 return pid;
1012 }
1013
1014 /* Implement the post_create_inferior target_ops method. */
1015
1016 static void
1017 linux_post_create_inferior (void)
1018 {
1019 struct lwp_info *lwp = get_thread_lwp (current_thread);
1020
1021 linux_arch_setup ();
1022
1023 if (lwp->must_set_ptrace_flags)
1024 {
1025 struct process_info *proc = current_process ();
1026 int options = linux_low_ptrace_options (proc->attached);
1027
1028 linux_enable_event_reporting (lwpid_of (current_thread), options);
1029 lwp->must_set_ptrace_flags = 0;
1030 }
1031 }
1032
1033 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1034 error. */
1035
1036 int
1037 linux_attach_lwp (ptid_t ptid)
1038 {
1039 struct lwp_info *new_lwp;
1040 int lwpid = ptid_get_lwp (ptid);
1041
1042 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1043 != 0)
1044 return errno;
1045
1046 new_lwp = add_lwp (ptid);
1047
1048 /* We need to wait for SIGSTOP before being able to make the next
1049 ptrace call on this LWP. */
1050 new_lwp->must_set_ptrace_flags = 1;
1051
1052 if (linux_proc_pid_is_stopped (lwpid))
1053 {
1054 if (debug_threads)
1055 debug_printf ("Attached to a stopped process\n");
1056
1057 /* The process is definitely stopped. It is in a job control
1058 stop, unless the kernel predates the TASK_STOPPED /
1059 TASK_TRACED distinction, in which case it might be in a
1060 ptrace stop. Make sure it is in a ptrace stop; from there we
1061 can kill it, signal it, et cetera.
1062
1063 First make sure there is a pending SIGSTOP. Since we are
1064 already attached, the process can not transition from stopped
1065 to running without a PTRACE_CONT; so we know this signal will
1066 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1067 probably already in the queue (unless this kernel is old
1068 enough to use TASK_STOPPED for ptrace stops); but since
1069 SIGSTOP is not an RT signal, it can only be queued once. */
1070 kill_lwp (lwpid, SIGSTOP);
1071
1072 /* Finally, resume the stopped process. This will deliver the
1073 SIGSTOP (or a higher priority signal, just like normal
1074 PTRACE_ATTACH), which we'll catch later on. */
1075 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1076 }
1077
1078 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1079 brings it to a halt.
1080
1081 There are several cases to consider here:
1082
1083 1) gdbserver has already attached to the process and is being notified
1084 of a new thread that is being created.
1085 In this case we should ignore that SIGSTOP and resume the
1086 process. This is handled below by setting stop_expected = 1,
1087 and the fact that add_thread sets last_resume_kind ==
1088 resume_continue.
1089
1090 2) This is the first thread (the process thread), and we're attaching
1091 to it via attach_inferior.
1092 In this case we want the process thread to stop.
1093 This is handled by having linux_attach set last_resume_kind ==
1094 resume_stop after we return.
1095
1096 If the pid we are attaching to is also the tgid, we attach to and
1097 stop all the existing threads. Otherwise, we attach to pid and
1098 ignore any other threads in the same group as this pid.
1099
1100 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1101 existing threads.
1102 In this case we want the thread to stop.
1103 FIXME: This case is currently not properly handled.
1104 We should wait for the SIGSTOP but don't. Things work apparently
1105 because enough time passes between when we ptrace (ATTACH) and when
1106 gdb makes the next ptrace call on the thread.
1107
1108 On the other hand, if we are currently trying to stop all threads, we
1109 should treat the new thread as if we had sent it a SIGSTOP. This works
1110 because we are guaranteed that the add_lwp call above added us to the
1111 end of the list, and so the new thread has not yet reached
1112 wait_for_sigstop (but will). */
1113 new_lwp->stop_expected = 1;
1114
1115 return 0;
1116 }
1117
1118 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1119 already attached. Returns true if a new LWP is found, false
1120 otherwise. */
1121
1122 static int
1123 attach_proc_task_lwp_callback (ptid_t ptid)
1124 {
1125 /* Is this a new thread? */
1126 if (find_thread_ptid (ptid) == NULL)
1127 {
1128 int lwpid = ptid_get_lwp (ptid);
1129 int err;
1130
1131 if (debug_threads)
1132 debug_printf ("Found new lwp %d\n", lwpid);
1133
1134 err = linux_attach_lwp (ptid);
1135
1136 /* Be quiet if we simply raced with the thread exiting. EPERM
1137 is returned if the thread's task still exists, and is marked
1138 as exited or zombie, as well as other conditions, so in that
1139 case, confirm the status in /proc/PID/status. */
1140 if (err == ESRCH
1141 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1142 {
1143 if (debug_threads)
1144 {
1145 debug_printf ("Cannot attach to lwp %d: "
1146 "thread is gone (%d: %s)\n",
1147 lwpid, err, strerror (err));
1148 }
1149 }
1150 else if (err != 0)
1151 {
1152 warning (_("Cannot attach to lwp %d: %s"),
1153 lwpid,
1154 linux_ptrace_attach_fail_reason_string (ptid, err));
1155 }
1156
1157 return 1;
1158 }
1159 return 0;
1160 }
1161
1162 static void async_file_mark (void);
1163
1164 /* Attach to PID. If PID is the tgid, attach to it and all
1165 of its threads. */
1166
1167 static int
1168 linux_attach (unsigned long pid)
1169 {
1170 struct process_info *proc;
1171 struct thread_info *initial_thread;
1172 ptid_t ptid = ptid_build (pid, pid, 0);
1173 int err;
1174
1175 /* Attach to PID. We will check for other threads
1176 soon. */
1177 err = linux_attach_lwp (ptid);
1178 if (err != 0)
1179 error ("Cannot attach to process %ld: %s",
1180 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1181
1182 proc = linux_add_process (pid, 1);
1183
1184 /* Don't ignore the initial SIGSTOP if we just attached to this
1185 process. It will be collected by wait shortly. */
1186 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1187 initial_thread->last_resume_kind = resume_stop;
1188
1189 /* We must attach to every LWP. If /proc is mounted, use that to
1190 find them now. On the one hand, the inferior may be using raw
1191 clone instead of using pthreads. On the other hand, even if it
1192 is using pthreads, GDB may not be connected yet (thread_db needs
1193 to do symbol lookups, through qSymbol). Also, thread_db walks
1194 structures in the inferior's address space to find the list of
1195 threads/LWPs, and those structures may well be corrupted. Note
1196 that once thread_db is loaded, we'll still use it to list threads
1197 and associate pthread info with each LWP. */
1198 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1199
1200 /* GDB will shortly read the xml target description for this
1201 process, to figure out the process' architecture. But the target
1202 description is only filled in when the first process/thread in
1203 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1204 that now, otherwise, if GDB is fast enough, it could read the
1205 target description _before_ that initial stop. */
1206 if (non_stop)
1207 {
1208 struct lwp_info *lwp;
1209 int wstat, lwpid;
1210 ptid_t pid_ptid = pid_to_ptid (pid);
1211
1212 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1213 &wstat, __WALL);
1214 gdb_assert (lwpid > 0);
1215
1216 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1217
1218 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1219 {
1220 lwp->status_pending_p = 1;
1221 lwp->status_pending = wstat;
1222 }
1223
1224 initial_thread->last_resume_kind = resume_continue;
1225
1226 async_file_mark ();
1227
1228 gdb_assert (proc->tdesc != NULL);
1229 }
1230
1231 return 0;
1232 }
1233
1234 struct counter
1235 {
1236 int pid;
1237 int count;
1238 };
1239
1240 static int
1241 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1242 {
1243 struct counter *counter = (struct counter *) args;
1244
1245 if (ptid_get_pid (entry->id) == counter->pid)
1246 {
1247 if (++counter->count > 1)
1248 return 1;
1249 }
1250
1251 return 0;
1252 }
1253
1254 static int
1255 last_thread_of_process_p (int pid)
1256 {
1257 struct counter counter = { pid , 0 };
1258
1259 return (find_inferior (&all_threads,
1260 second_thread_of_pid_p, &counter) == NULL);
1261 }
1262
1263 /* Kill LWP. */
1264
1265 static void
1266 linux_kill_one_lwp (struct lwp_info *lwp)
1267 {
1268 struct thread_info *thr = get_lwp_thread (lwp);
1269 int pid = lwpid_of (thr);
1270
1271 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1272 there is no signal context, and ptrace(PTRACE_KILL) (or
1273 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1274 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1275 alternative is to kill with SIGKILL. We only need one SIGKILL
1276 per process, not one for each thread. But since we still support
1277 support debugging programs using raw clone without CLONE_THREAD,
1278 we send one for each thread. For years, we used PTRACE_KILL
1279 only, so we're being a bit paranoid about some old kernels where
1280 PTRACE_KILL might work better (dubious if there are any such, but
1281 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1282 second, and so we're fine everywhere. */
1283
1284 errno = 0;
1285 kill_lwp (pid, SIGKILL);
1286 if (debug_threads)
1287 {
1288 int save_errno = errno;
1289
1290 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1291 target_pid_to_str (ptid_of (thr)),
1292 save_errno ? strerror (save_errno) : "OK");
1293 }
1294
1295 errno = 0;
1296 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1297 if (debug_threads)
1298 {
1299 int save_errno = errno;
1300
1301 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1302 target_pid_to_str (ptid_of (thr)),
1303 save_errno ? strerror (save_errno) : "OK");
1304 }
1305 }
1306
1307 /* Kill LWP and wait for it to die. */
1308
1309 static void
1310 kill_wait_lwp (struct lwp_info *lwp)
1311 {
1312 struct thread_info *thr = get_lwp_thread (lwp);
1313 int pid = ptid_get_pid (ptid_of (thr));
1314 int lwpid = ptid_get_lwp (ptid_of (thr));
1315 int wstat;
1316 int res;
1317
1318 if (debug_threads)
1319 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1320
1321 do
1322 {
1323 linux_kill_one_lwp (lwp);
1324
1325 /* Make sure it died. Notes:
1326
1327 - The loop is most likely unnecessary.
1328
1329 - We don't use linux_wait_for_event as that could delete lwps
1330 while we're iterating over them. We're not interested in
1331 any pending status at this point, only in making sure all
1332 wait status on the kernel side are collected until the
1333 process is reaped.
1334
1335 - We don't use __WALL here as the __WALL emulation relies on
1336 SIGCHLD, and killing a stopped process doesn't generate
1337 one, nor an exit status.
1338 */
1339 res = my_waitpid (lwpid, &wstat, 0);
1340 if (res == -1 && errno == ECHILD)
1341 res = my_waitpid (lwpid, &wstat, __WCLONE);
1342 } while (res > 0 && WIFSTOPPED (wstat));
1343
1344 /* Even if it was stopped, the child may have already disappeared.
1345 E.g., if it was killed by SIGKILL. */
1346 if (res < 0 && errno != ECHILD)
1347 perror_with_name ("kill_wait_lwp");
1348 }
1349
1350 /* Callback for `find_inferior'. Kills an lwp of a given process,
1351 except the leader. */
1352
1353 static int
1354 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1355 {
1356 struct thread_info *thread = (struct thread_info *) entry;
1357 struct lwp_info *lwp = get_thread_lwp (thread);
1358 int pid = * (int *) args;
1359
1360 if (ptid_get_pid (entry->id) != pid)
1361 return 0;
1362
1363 /* We avoid killing the first thread here, because of a Linux kernel (at
1364 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1365 the children get a chance to be reaped, it will remain a zombie
1366 forever. */
1367
1368 if (lwpid_of (thread) == pid)
1369 {
1370 if (debug_threads)
1371 debug_printf ("lkop: is last of process %s\n",
1372 target_pid_to_str (entry->id));
1373 return 0;
1374 }
1375
1376 kill_wait_lwp (lwp);
1377 return 0;
1378 }
1379
1380 static int
1381 linux_kill (int pid)
1382 {
1383 struct process_info *process;
1384 struct lwp_info *lwp;
1385
1386 process = find_process_pid (pid);
1387 if (process == NULL)
1388 return -1;
1389
1390 /* If we're killing a running inferior, make sure it is stopped
1391 first, as PTRACE_KILL will not work otherwise. */
1392 stop_all_lwps (0, NULL);
1393
1394 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1395
1396 /* See the comment in linux_kill_one_lwp. We did not kill the first
1397 thread in the list, so do so now. */
1398 lwp = find_lwp_pid (pid_to_ptid (pid));
1399
1400 if (lwp == NULL)
1401 {
1402 if (debug_threads)
1403 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1404 pid);
1405 }
1406 else
1407 kill_wait_lwp (lwp);
1408
1409 the_target->mourn (process);
1410
1411 /* Since we presently can only stop all lwps of all processes, we
1412 need to unstop lwps of other processes. */
1413 unstop_all_lwps (0, NULL);
1414 return 0;
1415 }
1416
1417 /* Get pending signal of THREAD, for detaching purposes. This is the
1418 signal the thread last stopped for, which we need to deliver to the
1419 thread when detaching, otherwise, it'd be suppressed/lost. */
1420
1421 static int
1422 get_detach_signal (struct thread_info *thread)
1423 {
1424 enum gdb_signal signo = GDB_SIGNAL_0;
1425 int status;
1426 struct lwp_info *lp = get_thread_lwp (thread);
1427
1428 if (lp->status_pending_p)
1429 status = lp->status_pending;
1430 else
1431 {
1432 /* If the thread had been suspended by gdbserver, and it stopped
1433 cleanly, then it'll have stopped with SIGSTOP. But we don't
1434 want to deliver that SIGSTOP. */
1435 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1436 || thread->last_status.value.sig == GDB_SIGNAL_0)
1437 return 0;
1438
1439 /* Otherwise, we may need to deliver the signal we
1440 intercepted. */
1441 status = lp->last_status;
1442 }
1443
1444 if (!WIFSTOPPED (status))
1445 {
1446 if (debug_threads)
1447 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1448 target_pid_to_str (ptid_of (thread)));
1449 return 0;
1450 }
1451
1452 /* Extended wait statuses aren't real SIGTRAPs. */
1453 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1454 {
1455 if (debug_threads)
1456 debug_printf ("GPS: lwp %s had stopped with extended "
1457 "status: no pending signal\n",
1458 target_pid_to_str (ptid_of (thread)));
1459 return 0;
1460 }
1461
1462 signo = gdb_signal_from_host (WSTOPSIG (status));
1463
1464 if (program_signals_p && !program_signals[signo])
1465 {
1466 if (debug_threads)
1467 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1468 target_pid_to_str (ptid_of (thread)),
1469 gdb_signal_to_string (signo));
1470 return 0;
1471 }
1472 else if (!program_signals_p
1473 /* If we have no way to know which signals GDB does not
1474 want to have passed to the program, assume
1475 SIGTRAP/SIGINT, which is GDB's default. */
1476 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1477 {
1478 if (debug_threads)
1479 debug_printf ("GPS: lwp %s had signal %s, "
1480 "but we don't know if we should pass it. "
1481 "Default to not.\n",
1482 target_pid_to_str (ptid_of (thread)),
1483 gdb_signal_to_string (signo));
1484 return 0;
1485 }
1486 else
1487 {
1488 if (debug_threads)
1489 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1490 target_pid_to_str (ptid_of (thread)),
1491 gdb_signal_to_string (signo));
1492
1493 return WSTOPSIG (status);
1494 }
1495 }
1496
1497 /* Detach from LWP. */
1498
1499 static void
1500 linux_detach_one_lwp (struct lwp_info *lwp)
1501 {
1502 struct thread_info *thread = get_lwp_thread (lwp);
1503 int sig;
1504 int lwpid;
1505
1506 /* If there is a pending SIGSTOP, get rid of it. */
1507 if (lwp->stop_expected)
1508 {
1509 if (debug_threads)
1510 debug_printf ("Sending SIGCONT to %s\n",
1511 target_pid_to_str (ptid_of (thread)));
1512
1513 kill_lwp (lwpid_of (thread), SIGCONT);
1514 lwp->stop_expected = 0;
1515 }
1516
1517 /* Pass on any pending signal for this thread. */
1518 sig = get_detach_signal (thread);
1519
1520 /* Preparing to resume may try to write registers, and fail if the
1521 lwp is zombie. If that happens, ignore the error. We'll handle
1522 it below, when detach fails with ESRCH. */
1523 TRY
1524 {
1525 /* Flush any pending changes to the process's registers. */
1526 regcache_invalidate_thread (thread);
1527
1528 /* Finally, let it resume. */
1529 if (the_low_target.prepare_to_resume != NULL)
1530 the_low_target.prepare_to_resume (lwp);
1531 }
1532 CATCH (ex, RETURN_MASK_ERROR)
1533 {
1534 if (!check_ptrace_stopped_lwp_gone (lwp))
1535 throw_exception (ex);
1536 }
1537 END_CATCH
1538
1539 lwpid = lwpid_of (thread);
1540 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1541 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1542 {
1543 int save_errno = errno;
1544
1545 /* We know the thread exists, so ESRCH must mean the lwp is
1546 zombie. This can happen if one of the already-detached
1547 threads exits the whole thread group. In that case we're
1548 still attached, and must reap the lwp. */
1549 if (save_errno == ESRCH)
1550 {
1551 int ret, status;
1552
1553 ret = my_waitpid (lwpid, &status, __WALL);
1554 if (ret == -1)
1555 {
1556 warning (_("Couldn't reap LWP %d while detaching: %s"),
1557 lwpid, strerror (errno));
1558 }
1559 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1560 {
1561 warning (_("Reaping LWP %d while detaching "
1562 "returned unexpected status 0x%x"),
1563 lwpid, status);
1564 }
1565 }
1566 else
1567 {
1568 error (_("Can't detach %s: %s"),
1569 target_pid_to_str (ptid_of (thread)),
1570 strerror (save_errno));
1571 }
1572 }
1573 else if (debug_threads)
1574 {
1575 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1576 target_pid_to_str (ptid_of (thread)),
1577 strsignal (sig));
1578 }
1579
1580 delete_lwp (lwp);
1581 }
1582
1583 /* Callback for find_inferior. Detaches from non-leader threads of a
1584 given process. */
1585
1586 static int
1587 linux_detach_lwp_callback (struct inferior_list_entry *entry, void *args)
1588 {
1589 struct thread_info *thread = (struct thread_info *) entry;
1590 struct lwp_info *lwp = get_thread_lwp (thread);
1591 int pid = *(int *) args;
1592 int lwpid = lwpid_of (thread);
1593
1594 /* Skip other processes. */
1595 if (ptid_get_pid (entry->id) != pid)
1596 return 0;
1597
1598 /* We don't actually detach from the thread group leader just yet.
1599 If the thread group exits, we must reap the zombie clone lwps
1600 before we're able to reap the leader. */
1601 if (ptid_get_pid (entry->id) == lwpid)
1602 return 0;
1603
1604 linux_detach_one_lwp (lwp);
1605 return 0;
1606 }
1607
1608 static int
1609 linux_detach (int pid)
1610 {
1611 struct process_info *process;
1612 struct lwp_info *main_lwp;
1613
1614 process = find_process_pid (pid);
1615 if (process == NULL)
1616 return -1;
1617
1618 /* As there's a step over already in progress, let it finish first,
1619 otherwise nesting a stabilize_threads operation on top gets real
1620 messy. */
1621 complete_ongoing_step_over ();
1622
1623 /* Stop all threads before detaching. First, ptrace requires that
1624 the thread is stopped to sucessfully detach. Second, thread_db
1625 may need to uninstall thread event breakpoints from memory, which
1626 only works with a stopped process anyway. */
1627 stop_all_lwps (0, NULL);
1628
1629 #ifdef USE_THREAD_DB
1630 thread_db_detach (process);
1631 #endif
1632
1633 /* Stabilize threads (move out of jump pads). */
1634 stabilize_threads ();
1635
1636 /* Detach from the clone lwps first. If the thread group exits just
1637 while we're detaching, we must reap the clone lwps before we're
1638 able to reap the leader. */
1639 find_inferior (&all_threads, linux_detach_lwp_callback, &pid);
1640
1641 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1642 linux_detach_one_lwp (main_lwp);
1643
1644 the_target->mourn (process);
1645
1646 /* Since we presently can only stop all lwps of all processes, we
1647 need to unstop lwps of other processes. */
1648 unstop_all_lwps (0, NULL);
1649 return 0;
1650 }
1651
1652 /* Remove all LWPs that belong to process PROC from the lwp list. */
1653
1654 static int
1655 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1656 {
1657 struct thread_info *thread = (struct thread_info *) entry;
1658 struct lwp_info *lwp = get_thread_lwp (thread);
1659 struct process_info *process = (struct process_info *) proc;
1660
1661 if (pid_of (thread) == pid_of (process))
1662 delete_lwp (lwp);
1663
1664 return 0;
1665 }
1666
1667 static void
1668 linux_mourn (struct process_info *process)
1669 {
1670 struct process_info_private *priv;
1671
1672 #ifdef USE_THREAD_DB
1673 thread_db_mourn (process);
1674 #endif
1675
1676 find_inferior (&all_threads, delete_lwp_callback, process);
1677
1678 /* Freeing all private data. */
1679 priv = process->priv;
1680 free (priv->arch_private);
1681 free (priv);
1682 process->priv = NULL;
1683
1684 remove_process (process);
1685 }
1686
1687 static void
1688 linux_join (int pid)
1689 {
1690 int status, ret;
1691
1692 do {
1693 ret = my_waitpid (pid, &status, 0);
1694 if (WIFEXITED (status) || WIFSIGNALED (status))
1695 break;
1696 } while (ret != -1 || errno != ECHILD);
1697 }
1698
1699 /* Return nonzero if the given thread is still alive. */
1700 static int
1701 linux_thread_alive (ptid_t ptid)
1702 {
1703 struct lwp_info *lwp = find_lwp_pid (ptid);
1704
1705 /* We assume we always know if a thread exits. If a whole process
1706 exited but we still haven't been able to report it to GDB, we'll
1707 hold on to the last lwp of the dead process. */
1708 if (lwp != NULL)
1709 return !lwp_is_marked_dead (lwp);
1710 else
1711 return 0;
1712 }
1713
1714 /* Return 1 if this lwp still has an interesting status pending. If
1715 not (e.g., it had stopped for a breakpoint that is gone), return
1716 false. */
1717
1718 static int
1719 thread_still_has_status_pending_p (struct thread_info *thread)
1720 {
1721 struct lwp_info *lp = get_thread_lwp (thread);
1722
1723 if (!lp->status_pending_p)
1724 return 0;
1725
1726 if (thread->last_resume_kind != resume_stop
1727 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1728 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1729 {
1730 struct thread_info *saved_thread;
1731 CORE_ADDR pc;
1732 int discard = 0;
1733
1734 gdb_assert (lp->last_status != 0);
1735
1736 pc = get_pc (lp);
1737
1738 saved_thread = current_thread;
1739 current_thread = thread;
1740
1741 if (pc != lp->stop_pc)
1742 {
1743 if (debug_threads)
1744 debug_printf ("PC of %ld changed\n",
1745 lwpid_of (thread));
1746 discard = 1;
1747 }
1748
1749 #if !USE_SIGTRAP_SIGINFO
1750 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1751 && !(*the_low_target.breakpoint_at) (pc))
1752 {
1753 if (debug_threads)
1754 debug_printf ("previous SW breakpoint of %ld gone\n",
1755 lwpid_of (thread));
1756 discard = 1;
1757 }
1758 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1759 && !hardware_breakpoint_inserted_here (pc))
1760 {
1761 if (debug_threads)
1762 debug_printf ("previous HW breakpoint of %ld gone\n",
1763 lwpid_of (thread));
1764 discard = 1;
1765 }
1766 #endif
1767
1768 current_thread = saved_thread;
1769
1770 if (discard)
1771 {
1772 if (debug_threads)
1773 debug_printf ("discarding pending breakpoint status\n");
1774 lp->status_pending_p = 0;
1775 return 0;
1776 }
1777 }
1778
1779 return 1;
1780 }
1781
1782 /* Returns true if LWP is resumed from the client's perspective. */
1783
1784 static int
1785 lwp_resumed (struct lwp_info *lwp)
1786 {
1787 struct thread_info *thread = get_lwp_thread (lwp);
1788
1789 if (thread->last_resume_kind != resume_stop)
1790 return 1;
1791
1792 /* Did gdb send us a `vCont;t', but we haven't reported the
1793 corresponding stop to gdb yet? If so, the thread is still
1794 resumed/running from gdb's perspective. */
1795 if (thread->last_resume_kind == resume_stop
1796 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1797 return 1;
1798
1799 return 0;
1800 }
1801
1802 /* Return 1 if this lwp has an interesting status pending. */
1803 static int
1804 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1805 {
1806 struct thread_info *thread = (struct thread_info *) entry;
1807 struct lwp_info *lp = get_thread_lwp (thread);
1808 ptid_t ptid = * (ptid_t *) arg;
1809
1810 /* Check if we're only interested in events from a specific process
1811 or a specific LWP. */
1812 if (!ptid_match (ptid_of (thread), ptid))
1813 return 0;
1814
1815 if (!lwp_resumed (lp))
1816 return 0;
1817
1818 if (lp->status_pending_p
1819 && !thread_still_has_status_pending_p (thread))
1820 {
1821 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1822 return 0;
1823 }
1824
1825 return lp->status_pending_p;
1826 }
1827
1828 static int
1829 same_lwp (struct inferior_list_entry *entry, void *data)
1830 {
1831 ptid_t ptid = *(ptid_t *) data;
1832 int lwp;
1833
1834 if (ptid_get_lwp (ptid) != 0)
1835 lwp = ptid_get_lwp (ptid);
1836 else
1837 lwp = ptid_get_pid (ptid);
1838
1839 if (ptid_get_lwp (entry->id) == lwp)
1840 return 1;
1841
1842 return 0;
1843 }
1844
1845 struct lwp_info *
1846 find_lwp_pid (ptid_t ptid)
1847 {
1848 struct inferior_list_entry *thread
1849 = find_inferior (&all_threads, same_lwp, &ptid);
1850
1851 if (thread == NULL)
1852 return NULL;
1853
1854 return get_thread_lwp ((struct thread_info *) thread);
1855 }
1856
1857 /* Return the number of known LWPs in the tgid given by PID. */
1858
1859 static int
1860 num_lwps (int pid)
1861 {
1862 struct inferior_list_entry *inf, *tmp;
1863 int count = 0;
1864
1865 ALL_INFERIORS (&all_threads, inf, tmp)
1866 {
1867 if (ptid_get_pid (inf->id) == pid)
1868 count++;
1869 }
1870
1871 return count;
1872 }
1873
1874 /* The arguments passed to iterate_over_lwps. */
1875
1876 struct iterate_over_lwps_args
1877 {
1878 /* The FILTER argument passed to iterate_over_lwps. */
1879 ptid_t filter;
1880
1881 /* The CALLBACK argument passed to iterate_over_lwps. */
1882 iterate_over_lwps_ftype *callback;
1883
1884 /* The DATA argument passed to iterate_over_lwps. */
1885 void *data;
1886 };
1887
1888 /* Callback for find_inferior used by iterate_over_lwps to filter
1889 calls to the callback supplied to that function. Returning a
1890 nonzero value causes find_inferiors to stop iterating and return
1891 the current inferior_list_entry. Returning zero indicates that
1892 find_inferiors should continue iterating. */
1893
1894 static int
1895 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1896 {
1897 struct iterate_over_lwps_args *args
1898 = (struct iterate_over_lwps_args *) args_p;
1899
1900 if (ptid_match (entry->id, args->filter))
1901 {
1902 struct thread_info *thr = (struct thread_info *) entry;
1903 struct lwp_info *lwp = get_thread_lwp (thr);
1904
1905 return (*args->callback) (lwp, args->data);
1906 }
1907
1908 return 0;
1909 }
1910
1911 /* See nat/linux-nat.h. */
1912
1913 struct lwp_info *
1914 iterate_over_lwps (ptid_t filter,
1915 iterate_over_lwps_ftype callback,
1916 void *data)
1917 {
1918 struct iterate_over_lwps_args args = {filter, callback, data};
1919 struct inferior_list_entry *entry;
1920
1921 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1922 if (entry == NULL)
1923 return NULL;
1924
1925 return get_thread_lwp ((struct thread_info *) entry);
1926 }
1927
1928 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1929 their exits until all other threads in the group have exited. */
1930
1931 static void
1932 check_zombie_leaders (void)
1933 {
1934 struct process_info *proc, *tmp;
1935
1936 ALL_PROCESSES (proc, tmp)
1937 {
1938 pid_t leader_pid = pid_of (proc);
1939 struct lwp_info *leader_lp;
1940
1941 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1942
1943 if (debug_threads)
1944 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1945 "num_lwps=%d, zombie=%d\n",
1946 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1947 linux_proc_pid_is_zombie (leader_pid));
1948
1949 if (leader_lp != NULL && !leader_lp->stopped
1950 /* Check if there are other threads in the group, as we may
1951 have raced with the inferior simply exiting. */
1952 && !last_thread_of_process_p (leader_pid)
1953 && linux_proc_pid_is_zombie (leader_pid))
1954 {
1955 /* A leader zombie can mean one of two things:
1956
1957 - It exited, and there's an exit status pending
1958 available, or only the leader exited (not the whole
1959 program). In the latter case, we can't waitpid the
1960 leader's exit status until all other threads are gone.
1961
1962 - There are 3 or more threads in the group, and a thread
1963 other than the leader exec'd. On an exec, the Linux
1964 kernel destroys all other threads (except the execing
1965 one) in the thread group, and resets the execing thread's
1966 tid to the tgid. No exit notification is sent for the
1967 execing thread -- from the ptracer's perspective, it
1968 appears as though the execing thread just vanishes.
1969 Until we reap all other threads except the leader and the
1970 execing thread, the leader will be zombie, and the
1971 execing thread will be in `D (disc sleep)'. As soon as
1972 all other threads are reaped, the execing thread changes
1973 it's tid to the tgid, and the previous (zombie) leader
1974 vanishes, giving place to the "new" leader. We could try
1975 distinguishing the exit and exec cases, by waiting once
1976 more, and seeing if something comes out, but it doesn't
1977 sound useful. The previous leader _does_ go away, and
1978 we'll re-add the new one once we see the exec event
1979 (which is just the same as what would happen if the
1980 previous leader did exit voluntarily before some other
1981 thread execs). */
1982
1983 if (debug_threads)
1984 debug_printf ("CZL: Thread group leader %d zombie "
1985 "(it exited, or another thread execd).\n",
1986 leader_pid);
1987
1988 delete_lwp (leader_lp);
1989 }
1990 }
1991 }
1992
1993 /* Callback for `find_inferior'. Returns the first LWP that is not
1994 stopped. ARG is a PTID filter. */
1995
1996 static int
1997 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1998 {
1999 struct thread_info *thr = (struct thread_info *) entry;
2000 struct lwp_info *lwp;
2001 ptid_t filter = *(ptid_t *) arg;
2002
2003 if (!ptid_match (ptid_of (thr), filter))
2004 return 0;
2005
2006 lwp = get_thread_lwp (thr);
2007 if (!lwp->stopped)
2008 return 1;
2009
2010 return 0;
2011 }
2012
2013 /* Increment LWP's suspend count. */
2014
2015 static void
2016 lwp_suspended_inc (struct lwp_info *lwp)
2017 {
2018 lwp->suspended++;
2019
2020 if (debug_threads && lwp->suspended > 4)
2021 {
2022 struct thread_info *thread = get_lwp_thread (lwp);
2023
2024 debug_printf ("LWP %ld has a suspiciously high suspend count,"
2025 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
2026 }
2027 }
2028
2029 /* Decrement LWP's suspend count. */
2030
2031 static void
2032 lwp_suspended_decr (struct lwp_info *lwp)
2033 {
2034 lwp->suspended--;
2035
2036 if (lwp->suspended < 0)
2037 {
2038 struct thread_info *thread = get_lwp_thread (lwp);
2039
2040 internal_error (__FILE__, __LINE__,
2041 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
2042 lwp->suspended);
2043 }
2044 }
2045
2046 /* This function should only be called if the LWP got a SIGTRAP.
2047
2048 Handle any tracepoint steps or hits. Return true if a tracepoint
2049 event was handled, 0 otherwise. */
2050
2051 static int
2052 handle_tracepoints (struct lwp_info *lwp)
2053 {
2054 struct thread_info *tinfo = get_lwp_thread (lwp);
2055 int tpoint_related_event = 0;
2056
2057 gdb_assert (lwp->suspended == 0);
2058
2059 /* If this tracepoint hit causes a tracing stop, we'll immediately
2060 uninsert tracepoints. To do this, we temporarily pause all
2061 threads, unpatch away, and then unpause threads. We need to make
2062 sure the unpausing doesn't resume LWP too. */
2063 lwp_suspended_inc (lwp);
2064
2065 /* And we need to be sure that any all-threads-stopping doesn't try
2066 to move threads out of the jump pads, as it could deadlock the
2067 inferior (LWP could be in the jump pad, maybe even holding the
2068 lock.) */
2069
2070 /* Do any necessary step collect actions. */
2071 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
2072
2073 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2074
2075 /* See if we just hit a tracepoint and do its main collect
2076 actions. */
2077 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2078
2079 lwp_suspended_decr (lwp);
2080
2081 gdb_assert (lwp->suspended == 0);
2082 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
2083
2084 if (tpoint_related_event)
2085 {
2086 if (debug_threads)
2087 debug_printf ("got a tracepoint event\n");
2088 return 1;
2089 }
2090
2091 return 0;
2092 }
2093
2094 /* Convenience wrapper. Returns true if LWP is presently collecting a
2095 fast tracepoint. */
2096
2097 static int
2098 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2099 struct fast_tpoint_collect_status *status)
2100 {
2101 CORE_ADDR thread_area;
2102 struct thread_info *thread = get_lwp_thread (lwp);
2103
2104 if (the_low_target.get_thread_area == NULL)
2105 return 0;
2106
2107 /* Get the thread area address. This is used to recognize which
2108 thread is which when tracing with the in-process agent library.
2109 We don't read anything from the address, and treat it as opaque;
2110 it's the address itself that we assume is unique per-thread. */
2111 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2112 return 0;
2113
2114 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2115 }
2116
2117 /* The reason we resume in the caller, is because we want to be able
2118 to pass lwp->status_pending as WSTAT, and we need to clear
2119 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2120 refuses to resume. */
2121
2122 static int
2123 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2124 {
2125 struct thread_info *saved_thread;
2126
2127 saved_thread = current_thread;
2128 current_thread = get_lwp_thread (lwp);
2129
2130 if ((wstat == NULL
2131 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2132 && supports_fast_tracepoints ()
2133 && agent_loaded_p ())
2134 {
2135 struct fast_tpoint_collect_status status;
2136 int r;
2137
2138 if (debug_threads)
2139 debug_printf ("Checking whether LWP %ld needs to move out of the "
2140 "jump pad.\n",
2141 lwpid_of (current_thread));
2142
2143 r = linux_fast_tracepoint_collecting (lwp, &status);
2144
2145 if (wstat == NULL
2146 || (WSTOPSIG (*wstat) != SIGILL
2147 && WSTOPSIG (*wstat) != SIGFPE
2148 && WSTOPSIG (*wstat) != SIGSEGV
2149 && WSTOPSIG (*wstat) != SIGBUS))
2150 {
2151 lwp->collecting_fast_tracepoint = r;
2152
2153 if (r != 0)
2154 {
2155 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
2156 {
2157 /* Haven't executed the original instruction yet.
2158 Set breakpoint there, and wait till it's hit,
2159 then single-step until exiting the jump pad. */
2160 lwp->exit_jump_pad_bkpt
2161 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2162 }
2163
2164 if (debug_threads)
2165 debug_printf ("Checking whether LWP %ld needs to move out of "
2166 "the jump pad...it does\n",
2167 lwpid_of (current_thread));
2168 current_thread = saved_thread;
2169
2170 return 1;
2171 }
2172 }
2173 else
2174 {
2175 /* If we get a synchronous signal while collecting, *and*
2176 while executing the (relocated) original instruction,
2177 reset the PC to point at the tpoint address, before
2178 reporting to GDB. Otherwise, it's an IPA lib bug: just
2179 report the signal to GDB, and pray for the best. */
2180
2181 lwp->collecting_fast_tracepoint = 0;
2182
2183 if (r != 0
2184 && (status.adjusted_insn_addr <= lwp->stop_pc
2185 && lwp->stop_pc < status.adjusted_insn_addr_end))
2186 {
2187 siginfo_t info;
2188 struct regcache *regcache;
2189
2190 /* The si_addr on a few signals references the address
2191 of the faulting instruction. Adjust that as
2192 well. */
2193 if ((WSTOPSIG (*wstat) == SIGILL
2194 || WSTOPSIG (*wstat) == SIGFPE
2195 || WSTOPSIG (*wstat) == SIGBUS
2196 || WSTOPSIG (*wstat) == SIGSEGV)
2197 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2198 (PTRACE_TYPE_ARG3) 0, &info) == 0
2199 /* Final check just to make sure we don't clobber
2200 the siginfo of non-kernel-sent signals. */
2201 && (uintptr_t) info.si_addr == lwp->stop_pc)
2202 {
2203 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2204 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2205 (PTRACE_TYPE_ARG3) 0, &info);
2206 }
2207
2208 regcache = get_thread_regcache (current_thread, 1);
2209 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2210 lwp->stop_pc = status.tpoint_addr;
2211
2212 /* Cancel any fast tracepoint lock this thread was
2213 holding. */
2214 force_unlock_trace_buffer ();
2215 }
2216
2217 if (lwp->exit_jump_pad_bkpt != NULL)
2218 {
2219 if (debug_threads)
2220 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2221 "stopping all threads momentarily.\n");
2222
2223 stop_all_lwps (1, lwp);
2224
2225 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2226 lwp->exit_jump_pad_bkpt = NULL;
2227
2228 unstop_all_lwps (1, lwp);
2229
2230 gdb_assert (lwp->suspended >= 0);
2231 }
2232 }
2233 }
2234
2235 if (debug_threads)
2236 debug_printf ("Checking whether LWP %ld needs to move out of the "
2237 "jump pad...no\n",
2238 lwpid_of (current_thread));
2239
2240 current_thread = saved_thread;
2241 return 0;
2242 }
2243
2244 /* Enqueue one signal in the "signals to report later when out of the
2245 jump pad" list. */
2246
2247 static void
2248 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2249 {
2250 struct pending_signals *p_sig;
2251 struct thread_info *thread = get_lwp_thread (lwp);
2252
2253 if (debug_threads)
2254 debug_printf ("Deferring signal %d for LWP %ld.\n",
2255 WSTOPSIG (*wstat), lwpid_of (thread));
2256
2257 if (debug_threads)
2258 {
2259 struct pending_signals *sig;
2260
2261 for (sig = lwp->pending_signals_to_report;
2262 sig != NULL;
2263 sig = sig->prev)
2264 debug_printf (" Already queued %d\n",
2265 sig->signal);
2266
2267 debug_printf (" (no more currently queued signals)\n");
2268 }
2269
2270 /* Don't enqueue non-RT signals if they are already in the deferred
2271 queue. (SIGSTOP being the easiest signal to see ending up here
2272 twice) */
2273 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2274 {
2275 struct pending_signals *sig;
2276
2277 for (sig = lwp->pending_signals_to_report;
2278 sig != NULL;
2279 sig = sig->prev)
2280 {
2281 if (sig->signal == WSTOPSIG (*wstat))
2282 {
2283 if (debug_threads)
2284 debug_printf ("Not requeuing already queued non-RT signal %d"
2285 " for LWP %ld\n",
2286 sig->signal,
2287 lwpid_of (thread));
2288 return;
2289 }
2290 }
2291 }
2292
2293 p_sig = XCNEW (struct pending_signals);
2294 p_sig->prev = lwp->pending_signals_to_report;
2295 p_sig->signal = WSTOPSIG (*wstat);
2296
2297 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2298 &p_sig->info);
2299
2300 lwp->pending_signals_to_report = p_sig;
2301 }
2302
2303 /* Dequeue one signal from the "signals to report later when out of
2304 the jump pad" list. */
2305
2306 static int
2307 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2308 {
2309 struct thread_info *thread = get_lwp_thread (lwp);
2310
2311 if (lwp->pending_signals_to_report != NULL)
2312 {
2313 struct pending_signals **p_sig;
2314
2315 p_sig = &lwp->pending_signals_to_report;
2316 while ((*p_sig)->prev != NULL)
2317 p_sig = &(*p_sig)->prev;
2318
2319 *wstat = W_STOPCODE ((*p_sig)->signal);
2320 if ((*p_sig)->info.si_signo != 0)
2321 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2322 &(*p_sig)->info);
2323 free (*p_sig);
2324 *p_sig = NULL;
2325
2326 if (debug_threads)
2327 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2328 WSTOPSIG (*wstat), lwpid_of (thread));
2329
2330 if (debug_threads)
2331 {
2332 struct pending_signals *sig;
2333
2334 for (sig = lwp->pending_signals_to_report;
2335 sig != NULL;
2336 sig = sig->prev)
2337 debug_printf (" Still queued %d\n",
2338 sig->signal);
2339
2340 debug_printf (" (no more queued signals)\n");
2341 }
2342
2343 return 1;
2344 }
2345
2346 return 0;
2347 }
2348
2349 /* Fetch the possibly triggered data watchpoint info and store it in
2350 CHILD.
2351
2352 On some archs, like x86, that use debug registers to set
2353 watchpoints, it's possible that the way to know which watched
2354 address trapped, is to check the register that is used to select
2355 which address to watch. Problem is, between setting the watchpoint
2356 and reading back which data address trapped, the user may change
2357 the set of watchpoints, and, as a consequence, GDB changes the
2358 debug registers in the inferior. To avoid reading back a stale
2359 stopped-data-address when that happens, we cache in LP the fact
2360 that a watchpoint trapped, and the corresponding data address, as
2361 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2362 registers meanwhile, we have the cached data we can rely on. */
2363
2364 static int
2365 check_stopped_by_watchpoint (struct lwp_info *child)
2366 {
2367 if (the_low_target.stopped_by_watchpoint != NULL)
2368 {
2369 struct thread_info *saved_thread;
2370
2371 saved_thread = current_thread;
2372 current_thread = get_lwp_thread (child);
2373
2374 if (the_low_target.stopped_by_watchpoint ())
2375 {
2376 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2377
2378 if (the_low_target.stopped_data_address != NULL)
2379 child->stopped_data_address
2380 = the_low_target.stopped_data_address ();
2381 else
2382 child->stopped_data_address = 0;
2383 }
2384
2385 current_thread = saved_thread;
2386 }
2387
2388 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2389 }
2390
2391 /* Return the ptrace options that we want to try to enable. */
2392
2393 static int
2394 linux_low_ptrace_options (int attached)
2395 {
2396 int options = 0;
2397
2398 if (!attached)
2399 options |= PTRACE_O_EXITKILL;
2400
2401 if (report_fork_events)
2402 options |= PTRACE_O_TRACEFORK;
2403
2404 if (report_vfork_events)
2405 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2406
2407 if (report_exec_events)
2408 options |= PTRACE_O_TRACEEXEC;
2409
2410 options |= PTRACE_O_TRACESYSGOOD;
2411
2412 return options;
2413 }
2414
2415 /* Do low-level handling of the event, and check if we should go on
2416 and pass it to caller code. Return the affected lwp if we are, or
2417 NULL otherwise. */
2418
2419 static struct lwp_info *
2420 linux_low_filter_event (int lwpid, int wstat)
2421 {
2422 struct lwp_info *child;
2423 struct thread_info *thread;
2424 int have_stop_pc = 0;
2425
2426 child = find_lwp_pid (pid_to_ptid (lwpid));
2427
2428 /* Check for stop events reported by a process we didn't already
2429 know about - anything not already in our LWP list.
2430
2431 If we're expecting to receive stopped processes after
2432 fork, vfork, and clone events, then we'll just add the
2433 new one to our list and go back to waiting for the event
2434 to be reported - the stopped process might be returned
2435 from waitpid before or after the event is.
2436
2437 But note the case of a non-leader thread exec'ing after the
2438 leader having exited, and gone from our lists (because
2439 check_zombie_leaders deleted it). The non-leader thread
2440 changes its tid to the tgid. */
2441
2442 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2443 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2444 {
2445 ptid_t child_ptid;
2446
2447 /* A multi-thread exec after we had seen the leader exiting. */
2448 if (debug_threads)
2449 {
2450 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2451 "after exec.\n", lwpid);
2452 }
2453
2454 child_ptid = ptid_build (lwpid, lwpid, 0);
2455 child = add_lwp (child_ptid);
2456 child->stopped = 1;
2457 current_thread = child->thread;
2458 }
2459
2460 /* If we didn't find a process, one of two things presumably happened:
2461 - A process we started and then detached from has exited. Ignore it.
2462 - A process we are controlling has forked and the new child's stop
2463 was reported to us by the kernel. Save its PID. */
2464 if (child == NULL && WIFSTOPPED (wstat))
2465 {
2466 add_to_pid_list (&stopped_pids, lwpid, wstat);
2467 return NULL;
2468 }
2469 else if (child == NULL)
2470 return NULL;
2471
2472 thread = get_lwp_thread (child);
2473
2474 child->stopped = 1;
2475
2476 child->last_status = wstat;
2477
2478 /* Check if the thread has exited. */
2479 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2480 {
2481 if (debug_threads)
2482 debug_printf ("LLFE: %d exited.\n", lwpid);
2483
2484 if (finish_step_over (child))
2485 {
2486 /* Unsuspend all other LWPs, and set them back running again. */
2487 unsuspend_all_lwps (child);
2488 }
2489
2490 /* If there is at least one more LWP, then the exit signal was
2491 not the end of the debugged application and should be
2492 ignored, unless GDB wants to hear about thread exits. */
2493 if (report_thread_events
2494 || last_thread_of_process_p (pid_of (thread)))
2495 {
2496 /* Since events are serialized to GDB core, and we can't
2497 report this one right now. Leave the status pending for
2498 the next time we're able to report it. */
2499 mark_lwp_dead (child, wstat);
2500 return child;
2501 }
2502 else
2503 {
2504 delete_lwp (child);
2505 return NULL;
2506 }
2507 }
2508
2509 gdb_assert (WIFSTOPPED (wstat));
2510
2511 if (WIFSTOPPED (wstat))
2512 {
2513 struct process_info *proc;
2514
2515 /* Architecture-specific setup after inferior is running. */
2516 proc = find_process_pid (pid_of (thread));
2517 if (proc->tdesc == NULL)
2518 {
2519 if (proc->attached)
2520 {
2521 /* This needs to happen after we have attached to the
2522 inferior and it is stopped for the first time, but
2523 before we access any inferior registers. */
2524 linux_arch_setup_thread (thread);
2525 }
2526 else
2527 {
2528 /* The process is started, but GDBserver will do
2529 architecture-specific setup after the program stops at
2530 the first instruction. */
2531 child->status_pending_p = 1;
2532 child->status_pending = wstat;
2533 return child;
2534 }
2535 }
2536 }
2537
2538 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2539 {
2540 struct process_info *proc = find_process_pid (pid_of (thread));
2541 int options = linux_low_ptrace_options (proc->attached);
2542
2543 linux_enable_event_reporting (lwpid, options);
2544 child->must_set_ptrace_flags = 0;
2545 }
2546
2547 /* Always update syscall_state, even if it will be filtered later. */
2548 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2549 {
2550 child->syscall_state
2551 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2552 ? TARGET_WAITKIND_SYSCALL_RETURN
2553 : TARGET_WAITKIND_SYSCALL_ENTRY);
2554 }
2555 else
2556 {
2557 /* Almost all other ptrace-stops are known to be outside of system
2558 calls, with further exceptions in handle_extended_wait. */
2559 child->syscall_state = TARGET_WAITKIND_IGNORE;
2560 }
2561
2562 /* Be careful to not overwrite stop_pc until save_stop_reason is
2563 called. */
2564 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2565 && linux_is_extended_waitstatus (wstat))
2566 {
2567 child->stop_pc = get_pc (child);
2568 if (handle_extended_wait (&child, wstat))
2569 {
2570 /* The event has been handled, so just return without
2571 reporting it. */
2572 return NULL;
2573 }
2574 }
2575
2576 if (linux_wstatus_maybe_breakpoint (wstat))
2577 {
2578 if (save_stop_reason (child))
2579 have_stop_pc = 1;
2580 }
2581
2582 if (!have_stop_pc)
2583 child->stop_pc = get_pc (child);
2584
2585 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2586 && child->stop_expected)
2587 {
2588 if (debug_threads)
2589 debug_printf ("Expected stop.\n");
2590 child->stop_expected = 0;
2591
2592 if (thread->last_resume_kind == resume_stop)
2593 {
2594 /* We want to report the stop to the core. Treat the
2595 SIGSTOP as a normal event. */
2596 if (debug_threads)
2597 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2598 target_pid_to_str (ptid_of (thread)));
2599 }
2600 else if (stopping_threads != NOT_STOPPING_THREADS)
2601 {
2602 /* Stopping threads. We don't want this SIGSTOP to end up
2603 pending. */
2604 if (debug_threads)
2605 debug_printf ("LLW: SIGSTOP caught for %s "
2606 "while stopping threads.\n",
2607 target_pid_to_str (ptid_of (thread)));
2608 return NULL;
2609 }
2610 else
2611 {
2612 /* This is a delayed SIGSTOP. Filter out the event. */
2613 if (debug_threads)
2614 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2615 child->stepping ? "step" : "continue",
2616 target_pid_to_str (ptid_of (thread)));
2617
2618 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2619 return NULL;
2620 }
2621 }
2622
2623 child->status_pending_p = 1;
2624 child->status_pending = wstat;
2625 return child;
2626 }
2627
2628 /* Return true if THREAD is doing hardware single step. */
2629
2630 static int
2631 maybe_hw_step (struct thread_info *thread)
2632 {
2633 if (can_hardware_single_step ())
2634 return 1;
2635 else
2636 {
2637 /* GDBserver must insert single-step breakpoint for software
2638 single step. */
2639 gdb_assert (has_single_step_breakpoints (thread));
2640 return 0;
2641 }
2642 }
2643
2644 /* Resume LWPs that are currently stopped without any pending status
2645 to report, but are resumed from the core's perspective. */
2646
2647 static void
2648 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2649 {
2650 struct thread_info *thread = (struct thread_info *) entry;
2651 struct lwp_info *lp = get_thread_lwp (thread);
2652
2653 if (lp->stopped
2654 && !lp->suspended
2655 && !lp->status_pending_p
2656 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2657 {
2658 int step = 0;
2659
2660 if (thread->last_resume_kind == resume_step)
2661 step = maybe_hw_step (thread);
2662
2663 if (debug_threads)
2664 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2665 target_pid_to_str (ptid_of (thread)),
2666 paddress (lp->stop_pc),
2667 step);
2668
2669 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2670 }
2671 }
2672
2673 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2674 match FILTER_PTID (leaving others pending). The PTIDs can be:
2675 minus_one_ptid, to specify any child; a pid PTID, specifying all
2676 lwps of a thread group; or a PTID representing a single lwp. Store
2677 the stop status through the status pointer WSTAT. OPTIONS is
2678 passed to the waitpid call. Return 0 if no event was found and
2679 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2680 was found. Return the PID of the stopped child otherwise. */
2681
2682 static int
2683 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2684 int *wstatp, int options)
2685 {
2686 struct thread_info *event_thread;
2687 struct lwp_info *event_child, *requested_child;
2688 sigset_t block_mask, prev_mask;
2689
2690 retry:
2691 /* N.B. event_thread points to the thread_info struct that contains
2692 event_child. Keep them in sync. */
2693 event_thread = NULL;
2694 event_child = NULL;
2695 requested_child = NULL;
2696
2697 /* Check for a lwp with a pending status. */
2698
2699 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2700 {
2701 event_thread = (struct thread_info *)
2702 find_inferior_in_random (&all_threads, status_pending_p_callback,
2703 &filter_ptid);
2704 if (event_thread != NULL)
2705 event_child = get_thread_lwp (event_thread);
2706 if (debug_threads && event_thread)
2707 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2708 }
2709 else if (!ptid_equal (filter_ptid, null_ptid))
2710 {
2711 requested_child = find_lwp_pid (filter_ptid);
2712
2713 if (stopping_threads == NOT_STOPPING_THREADS
2714 && requested_child->status_pending_p
2715 && requested_child->collecting_fast_tracepoint)
2716 {
2717 enqueue_one_deferred_signal (requested_child,
2718 &requested_child->status_pending);
2719 requested_child->status_pending_p = 0;
2720 requested_child->status_pending = 0;
2721 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2722 }
2723
2724 if (requested_child->suspended
2725 && requested_child->status_pending_p)
2726 {
2727 internal_error (__FILE__, __LINE__,
2728 "requesting an event out of a"
2729 " suspended child?");
2730 }
2731
2732 if (requested_child->status_pending_p)
2733 {
2734 event_child = requested_child;
2735 event_thread = get_lwp_thread (event_child);
2736 }
2737 }
2738
2739 if (event_child != NULL)
2740 {
2741 if (debug_threads)
2742 debug_printf ("Got an event from pending child %ld (%04x)\n",
2743 lwpid_of (event_thread), event_child->status_pending);
2744 *wstatp = event_child->status_pending;
2745 event_child->status_pending_p = 0;
2746 event_child->status_pending = 0;
2747 current_thread = event_thread;
2748 return lwpid_of (event_thread);
2749 }
2750
2751 /* But if we don't find a pending event, we'll have to wait.
2752
2753 We only enter this loop if no process has a pending wait status.
2754 Thus any action taken in response to a wait status inside this
2755 loop is responding as soon as we detect the status, not after any
2756 pending events. */
2757
2758 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2759 all signals while here. */
2760 sigfillset (&block_mask);
2761 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2762
2763 /* Always pull all events out of the kernel. We'll randomly select
2764 an event LWP out of all that have events, to prevent
2765 starvation. */
2766 while (event_child == NULL)
2767 {
2768 pid_t ret = 0;
2769
2770 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2771 quirks:
2772
2773 - If the thread group leader exits while other threads in the
2774 thread group still exist, waitpid(TGID, ...) hangs. That
2775 waitpid won't return an exit status until the other threads
2776 in the group are reaped.
2777
2778 - When a non-leader thread execs, that thread just vanishes
2779 without reporting an exit (so we'd hang if we waited for it
2780 explicitly in that case). The exec event is reported to
2781 the TGID pid. */
2782 errno = 0;
2783 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2784
2785 if (debug_threads)
2786 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2787 ret, errno ? strerror (errno) : "ERRNO-OK");
2788
2789 if (ret > 0)
2790 {
2791 if (debug_threads)
2792 {
2793 debug_printf ("LLW: waitpid %ld received %s\n",
2794 (long) ret, status_to_str (*wstatp));
2795 }
2796
2797 /* Filter all events. IOW, leave all events pending. We'll
2798 randomly select an event LWP out of all that have events
2799 below. */
2800 linux_low_filter_event (ret, *wstatp);
2801 /* Retry until nothing comes out of waitpid. A single
2802 SIGCHLD can indicate more than one child stopped. */
2803 continue;
2804 }
2805
2806 /* Now that we've pulled all events out of the kernel, resume
2807 LWPs that don't have an interesting event to report. */
2808 if (stopping_threads == NOT_STOPPING_THREADS)
2809 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2810
2811 /* ... and find an LWP with a status to report to the core, if
2812 any. */
2813 event_thread = (struct thread_info *)
2814 find_inferior_in_random (&all_threads, status_pending_p_callback,
2815 &filter_ptid);
2816 if (event_thread != NULL)
2817 {
2818 event_child = get_thread_lwp (event_thread);
2819 *wstatp = event_child->status_pending;
2820 event_child->status_pending_p = 0;
2821 event_child->status_pending = 0;
2822 break;
2823 }
2824
2825 /* Check for zombie thread group leaders. Those can't be reaped
2826 until all other threads in the thread group are. */
2827 check_zombie_leaders ();
2828
2829 /* If there are no resumed children left in the set of LWPs we
2830 want to wait for, bail. We can't just block in
2831 waitpid/sigsuspend, because lwps might have been left stopped
2832 in trace-stop state, and we'd be stuck forever waiting for
2833 their status to change (which would only happen if we resumed
2834 them). Even if WNOHANG is set, this return code is preferred
2835 over 0 (below), as it is more detailed. */
2836 if ((find_inferior (&all_threads,
2837 not_stopped_callback,
2838 &wait_ptid) == NULL))
2839 {
2840 if (debug_threads)
2841 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2842 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2843 return -1;
2844 }
2845
2846 /* No interesting event to report to the caller. */
2847 if ((options & WNOHANG))
2848 {
2849 if (debug_threads)
2850 debug_printf ("WNOHANG set, no event found\n");
2851
2852 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2853 return 0;
2854 }
2855
2856 /* Block until we get an event reported with SIGCHLD. */
2857 if (debug_threads)
2858 debug_printf ("sigsuspend'ing\n");
2859
2860 sigsuspend (&prev_mask);
2861 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2862 goto retry;
2863 }
2864
2865 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2866
2867 current_thread = event_thread;
2868
2869 return lwpid_of (event_thread);
2870 }
2871
2872 /* Wait for an event from child(ren) PTID. PTIDs can be:
2873 minus_one_ptid, to specify any child; a pid PTID, specifying all
2874 lwps of a thread group; or a PTID representing a single lwp. Store
2875 the stop status through the status pointer WSTAT. OPTIONS is
2876 passed to the waitpid call. Return 0 if no event was found and
2877 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2878 was found. Return the PID of the stopped child otherwise. */
2879
2880 static int
2881 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2882 {
2883 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2884 }
2885
2886 /* Count the LWP's that have had events. */
2887
2888 static int
2889 count_events_callback (struct inferior_list_entry *entry, void *data)
2890 {
2891 struct thread_info *thread = (struct thread_info *) entry;
2892 struct lwp_info *lp = get_thread_lwp (thread);
2893 int *count = (int *) data;
2894
2895 gdb_assert (count != NULL);
2896
2897 /* Count only resumed LWPs that have an event pending. */
2898 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2899 && lp->status_pending_p)
2900 (*count)++;
2901
2902 return 0;
2903 }
2904
2905 /* Select the LWP (if any) that is currently being single-stepped. */
2906
2907 static int
2908 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2909 {
2910 struct thread_info *thread = (struct thread_info *) entry;
2911 struct lwp_info *lp = get_thread_lwp (thread);
2912
2913 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2914 && thread->last_resume_kind == resume_step
2915 && lp->status_pending_p)
2916 return 1;
2917 else
2918 return 0;
2919 }
2920
2921 /* Select the Nth LWP that has had an event. */
2922
2923 static int
2924 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2925 {
2926 struct thread_info *thread = (struct thread_info *) entry;
2927 struct lwp_info *lp = get_thread_lwp (thread);
2928 int *selector = (int *) data;
2929
2930 gdb_assert (selector != NULL);
2931
2932 /* Select only resumed LWPs that have an event pending. */
2933 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2934 && lp->status_pending_p)
2935 if ((*selector)-- == 0)
2936 return 1;
2937
2938 return 0;
2939 }
2940
2941 /* Select one LWP out of those that have events pending. */
2942
2943 static void
2944 select_event_lwp (struct lwp_info **orig_lp)
2945 {
2946 int num_events = 0;
2947 int random_selector;
2948 struct thread_info *event_thread = NULL;
2949
2950 /* In all-stop, give preference to the LWP that is being
2951 single-stepped. There will be at most one, and it's the LWP that
2952 the core is most interested in. If we didn't do this, then we'd
2953 have to handle pending step SIGTRAPs somehow in case the core
2954 later continues the previously-stepped thread, otherwise we'd
2955 report the pending SIGTRAP, and the core, not having stepped the
2956 thread, wouldn't understand what the trap was for, and therefore
2957 would report it to the user as a random signal. */
2958 if (!non_stop)
2959 {
2960 event_thread
2961 = (struct thread_info *) find_inferior (&all_threads,
2962 select_singlestep_lwp_callback,
2963 NULL);
2964 if (event_thread != NULL)
2965 {
2966 if (debug_threads)
2967 debug_printf ("SEL: Select single-step %s\n",
2968 target_pid_to_str (ptid_of (event_thread)));
2969 }
2970 }
2971 if (event_thread == NULL)
2972 {
2973 /* No single-stepping LWP. Select one at random, out of those
2974 which have had events. */
2975
2976 /* First see how many events we have. */
2977 find_inferior (&all_threads, count_events_callback, &num_events);
2978 gdb_assert (num_events > 0);
2979
2980 /* Now randomly pick a LWP out of those that have had
2981 events. */
2982 random_selector = (int)
2983 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2984
2985 if (debug_threads && num_events > 1)
2986 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2987 num_events, random_selector);
2988
2989 event_thread
2990 = (struct thread_info *) find_inferior (&all_threads,
2991 select_event_lwp_callback,
2992 &random_selector);
2993 }
2994
2995 if (event_thread != NULL)
2996 {
2997 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2998
2999 /* Switch the event LWP. */
3000 *orig_lp = event_lp;
3001 }
3002 }
3003
3004 /* Decrement the suspend count of an LWP. */
3005
3006 static int
3007 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
3008 {
3009 struct thread_info *thread = (struct thread_info *) entry;
3010 struct lwp_info *lwp = get_thread_lwp (thread);
3011
3012 /* Ignore EXCEPT. */
3013 if (lwp == except)
3014 return 0;
3015
3016 lwp_suspended_decr (lwp);
3017 return 0;
3018 }
3019
3020 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
3021 NULL. */
3022
3023 static void
3024 unsuspend_all_lwps (struct lwp_info *except)
3025 {
3026 find_inferior (&all_threads, unsuspend_one_lwp, except);
3027 }
3028
3029 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
3030 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
3031 void *data);
3032 static int lwp_running (struct inferior_list_entry *entry, void *data);
3033 static ptid_t linux_wait_1 (ptid_t ptid,
3034 struct target_waitstatus *ourstatus,
3035 int target_options);
3036
3037 /* Stabilize threads (move out of jump pads).
3038
3039 If a thread is midway collecting a fast tracepoint, we need to
3040 finish the collection and move it out of the jump pad before
3041 reporting the signal.
3042
3043 This avoids recursion while collecting (when a signal arrives
3044 midway, and the signal handler itself collects), which would trash
3045 the trace buffer. In case the user set a breakpoint in a signal
3046 handler, this avoids the backtrace showing the jump pad, etc..
3047 Most importantly, there are certain things we can't do safely if
3048 threads are stopped in a jump pad (or in its callee's). For
3049 example:
3050
3051 - starting a new trace run. A thread still collecting the
3052 previous run, could trash the trace buffer when resumed. The trace
3053 buffer control structures would have been reset but the thread had
3054 no way to tell. The thread could even midway memcpy'ing to the
3055 buffer, which would mean that when resumed, it would clobber the
3056 trace buffer that had been set for a new run.
3057
3058 - we can't rewrite/reuse the jump pads for new tracepoints
3059 safely. Say you do tstart while a thread is stopped midway while
3060 collecting. When the thread is later resumed, it finishes the
3061 collection, and returns to the jump pad, to execute the original
3062 instruction that was under the tracepoint jump at the time the
3063 older run had been started. If the jump pad had been rewritten
3064 since for something else in the new run, the thread would now
3065 execute the wrong / random instructions. */
3066
3067 static void
3068 linux_stabilize_threads (void)
3069 {
3070 struct thread_info *saved_thread;
3071 struct thread_info *thread_stuck;
3072
3073 thread_stuck
3074 = (struct thread_info *) find_inferior (&all_threads,
3075 stuck_in_jump_pad_callback,
3076 NULL);
3077 if (thread_stuck != NULL)
3078 {
3079 if (debug_threads)
3080 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
3081 lwpid_of (thread_stuck));
3082 return;
3083 }
3084
3085 saved_thread = current_thread;
3086
3087 stabilizing_threads = 1;
3088
3089 /* Kick 'em all. */
3090 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
3091
3092 /* Loop until all are stopped out of the jump pads. */
3093 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
3094 {
3095 struct target_waitstatus ourstatus;
3096 struct lwp_info *lwp;
3097 int wstat;
3098
3099 /* Note that we go through the full wait even loop. While
3100 moving threads out of jump pad, we need to be able to step
3101 over internal breakpoints and such. */
3102 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
3103
3104 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
3105 {
3106 lwp = get_thread_lwp (current_thread);
3107
3108 /* Lock it. */
3109 lwp_suspended_inc (lwp);
3110
3111 if (ourstatus.value.sig != GDB_SIGNAL_0
3112 || current_thread->last_resume_kind == resume_stop)
3113 {
3114 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3115 enqueue_one_deferred_signal (lwp, &wstat);
3116 }
3117 }
3118 }
3119
3120 unsuspend_all_lwps (NULL);
3121
3122 stabilizing_threads = 0;
3123
3124 current_thread = saved_thread;
3125
3126 if (debug_threads)
3127 {
3128 thread_stuck
3129 = (struct thread_info *) find_inferior (&all_threads,
3130 stuck_in_jump_pad_callback,
3131 NULL);
3132 if (thread_stuck != NULL)
3133 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3134 lwpid_of (thread_stuck));
3135 }
3136 }
3137
3138 /* Convenience function that is called when the kernel reports an
3139 event that is not passed out to GDB. */
3140
3141 static ptid_t
3142 ignore_event (struct target_waitstatus *ourstatus)
3143 {
3144 /* If we got an event, there may still be others, as a single
3145 SIGCHLD can indicate more than one child stopped. This forces
3146 another target_wait call. */
3147 async_file_mark ();
3148
3149 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3150 return null_ptid;
3151 }
3152
3153 /* Convenience function that is called when the kernel reports an exit
3154 event. This decides whether to report the event to GDB as a
3155 process exit event, a thread exit event, or to suppress the
3156 event. */
3157
3158 static ptid_t
3159 filter_exit_event (struct lwp_info *event_child,
3160 struct target_waitstatus *ourstatus)
3161 {
3162 struct thread_info *thread = get_lwp_thread (event_child);
3163 ptid_t ptid = ptid_of (thread);
3164
3165 if (!last_thread_of_process_p (pid_of (thread)))
3166 {
3167 if (report_thread_events)
3168 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3169 else
3170 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3171
3172 delete_lwp (event_child);
3173 }
3174 return ptid;
3175 }
3176
3177 /* Returns 1 if GDB is interested in any event_child syscalls. */
3178
3179 static int
3180 gdb_catching_syscalls_p (struct lwp_info *event_child)
3181 {
3182 struct thread_info *thread = get_lwp_thread (event_child);
3183 struct process_info *proc = get_thread_process (thread);
3184
3185 return !VEC_empty (int, proc->syscalls_to_catch);
3186 }
3187
3188 /* Returns 1 if GDB is interested in the event_child syscall.
3189 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3190
3191 static int
3192 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3193 {
3194 int i, iter;
3195 int sysno;
3196 struct thread_info *thread = get_lwp_thread (event_child);
3197 struct process_info *proc = get_thread_process (thread);
3198
3199 if (VEC_empty (int, proc->syscalls_to_catch))
3200 return 0;
3201
3202 if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
3203 return 1;
3204
3205 get_syscall_trapinfo (event_child, &sysno);
3206 for (i = 0;
3207 VEC_iterate (int, proc->syscalls_to_catch, i, iter);
3208 i++)
3209 if (iter == sysno)
3210 return 1;
3211
3212 return 0;
3213 }
3214
3215 /* Wait for process, returns status. */
3216
3217 static ptid_t
3218 linux_wait_1 (ptid_t ptid,
3219 struct target_waitstatus *ourstatus, int target_options)
3220 {
3221 int w;
3222 struct lwp_info *event_child;
3223 int options;
3224 int pid;
3225 int step_over_finished;
3226 int bp_explains_trap;
3227 int maybe_internal_trap;
3228 int report_to_gdb;
3229 int trace_event;
3230 int in_step_range;
3231 int any_resumed;
3232
3233 if (debug_threads)
3234 {
3235 debug_enter ();
3236 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3237 }
3238
3239 /* Translate generic target options into linux options. */
3240 options = __WALL;
3241 if (target_options & TARGET_WNOHANG)
3242 options |= WNOHANG;
3243
3244 bp_explains_trap = 0;
3245 trace_event = 0;
3246 in_step_range = 0;
3247 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3248
3249 /* Find a resumed LWP, if any. */
3250 if (find_inferior (&all_threads,
3251 status_pending_p_callback,
3252 &minus_one_ptid) != NULL)
3253 any_resumed = 1;
3254 else if ((find_inferior (&all_threads,
3255 not_stopped_callback,
3256 &minus_one_ptid) != NULL))
3257 any_resumed = 1;
3258 else
3259 any_resumed = 0;
3260
3261 if (ptid_equal (step_over_bkpt, null_ptid))
3262 pid = linux_wait_for_event (ptid, &w, options);
3263 else
3264 {
3265 if (debug_threads)
3266 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3267 target_pid_to_str (step_over_bkpt));
3268 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3269 }
3270
3271 if (pid == 0 || (pid == -1 && !any_resumed))
3272 {
3273 gdb_assert (target_options & TARGET_WNOHANG);
3274
3275 if (debug_threads)
3276 {
3277 debug_printf ("linux_wait_1 ret = null_ptid, "
3278 "TARGET_WAITKIND_IGNORE\n");
3279 debug_exit ();
3280 }
3281
3282 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3283 return null_ptid;
3284 }
3285 else if (pid == -1)
3286 {
3287 if (debug_threads)
3288 {
3289 debug_printf ("linux_wait_1 ret = null_ptid, "
3290 "TARGET_WAITKIND_NO_RESUMED\n");
3291 debug_exit ();
3292 }
3293
3294 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3295 return null_ptid;
3296 }
3297
3298 event_child = get_thread_lwp (current_thread);
3299
3300 /* linux_wait_for_event only returns an exit status for the last
3301 child of a process. Report it. */
3302 if (WIFEXITED (w) || WIFSIGNALED (w))
3303 {
3304 if (WIFEXITED (w))
3305 {
3306 ourstatus->kind = TARGET_WAITKIND_EXITED;
3307 ourstatus->value.integer = WEXITSTATUS (w);
3308
3309 if (debug_threads)
3310 {
3311 debug_printf ("linux_wait_1 ret = %s, exited with "
3312 "retcode %d\n",
3313 target_pid_to_str (ptid_of (current_thread)),
3314 WEXITSTATUS (w));
3315 debug_exit ();
3316 }
3317 }
3318 else
3319 {
3320 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3321 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3322
3323 if (debug_threads)
3324 {
3325 debug_printf ("linux_wait_1 ret = %s, terminated with "
3326 "signal %d\n",
3327 target_pid_to_str (ptid_of (current_thread)),
3328 WTERMSIG (w));
3329 debug_exit ();
3330 }
3331 }
3332
3333 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3334 return filter_exit_event (event_child, ourstatus);
3335
3336 return ptid_of (current_thread);
3337 }
3338
3339 /* If step-over executes a breakpoint instruction, in the case of a
3340 hardware single step it means a gdb/gdbserver breakpoint had been
3341 planted on top of a permanent breakpoint, in the case of a software
3342 single step it may just mean that gdbserver hit the reinsert breakpoint.
3343 The PC has been adjusted by save_stop_reason to point at
3344 the breakpoint address.
3345 So in the case of the hardware single step advance the PC manually
3346 past the breakpoint and in the case of software single step advance only
3347 if it's not the single_step_breakpoint we are hitting.
3348 This avoids that a program would keep trapping a permanent breakpoint
3349 forever. */
3350 if (!ptid_equal (step_over_bkpt, null_ptid)
3351 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3352 && (event_child->stepping
3353 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3354 {
3355 int increment_pc = 0;
3356 int breakpoint_kind = 0;
3357 CORE_ADDR stop_pc = event_child->stop_pc;
3358
3359 breakpoint_kind =
3360 the_target->breakpoint_kind_from_current_state (&stop_pc);
3361 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3362
3363 if (debug_threads)
3364 {
3365 debug_printf ("step-over for %s executed software breakpoint\n",
3366 target_pid_to_str (ptid_of (current_thread)));
3367 }
3368
3369 if (increment_pc != 0)
3370 {
3371 struct regcache *regcache
3372 = get_thread_regcache (current_thread, 1);
3373
3374 event_child->stop_pc += increment_pc;
3375 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3376
3377 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3378 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3379 }
3380 }
3381
3382 /* If this event was not handled before, and is not a SIGTRAP, we
3383 report it. SIGILL and SIGSEGV are also treated as traps in case
3384 a breakpoint is inserted at the current PC. If this target does
3385 not support internal breakpoints at all, we also report the
3386 SIGTRAP without further processing; it's of no concern to us. */
3387 maybe_internal_trap
3388 = (supports_breakpoints ()
3389 && (WSTOPSIG (w) == SIGTRAP
3390 || ((WSTOPSIG (w) == SIGILL
3391 || WSTOPSIG (w) == SIGSEGV)
3392 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3393
3394 if (maybe_internal_trap)
3395 {
3396 /* Handle anything that requires bookkeeping before deciding to
3397 report the event or continue waiting. */
3398
3399 /* First check if we can explain the SIGTRAP with an internal
3400 breakpoint, or if we should possibly report the event to GDB.
3401 Do this before anything that may remove or insert a
3402 breakpoint. */
3403 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3404
3405 /* We have a SIGTRAP, possibly a step-over dance has just
3406 finished. If so, tweak the state machine accordingly,
3407 reinsert breakpoints and delete any single-step
3408 breakpoints. */
3409 step_over_finished = finish_step_over (event_child);
3410
3411 /* Now invoke the callbacks of any internal breakpoints there. */
3412 check_breakpoints (event_child->stop_pc);
3413
3414 /* Handle tracepoint data collecting. This may overflow the
3415 trace buffer, and cause a tracing stop, removing
3416 breakpoints. */
3417 trace_event = handle_tracepoints (event_child);
3418
3419 if (bp_explains_trap)
3420 {
3421 if (debug_threads)
3422 debug_printf ("Hit a gdbserver breakpoint.\n");
3423 }
3424 }
3425 else
3426 {
3427 /* We have some other signal, possibly a step-over dance was in
3428 progress, and it should be cancelled too. */
3429 step_over_finished = finish_step_over (event_child);
3430 }
3431
3432 /* We have all the data we need. Either report the event to GDB, or
3433 resume threads and keep waiting for more. */
3434
3435 /* If we're collecting a fast tracepoint, finish the collection and
3436 move out of the jump pad before delivering a signal. See
3437 linux_stabilize_threads. */
3438
3439 if (WIFSTOPPED (w)
3440 && WSTOPSIG (w) != SIGTRAP
3441 && supports_fast_tracepoints ()
3442 && agent_loaded_p ())
3443 {
3444 if (debug_threads)
3445 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3446 "to defer or adjust it.\n",
3447 WSTOPSIG (w), lwpid_of (current_thread));
3448
3449 /* Allow debugging the jump pad itself. */
3450 if (current_thread->last_resume_kind != resume_step
3451 && maybe_move_out_of_jump_pad (event_child, &w))
3452 {
3453 enqueue_one_deferred_signal (event_child, &w);
3454
3455 if (debug_threads)
3456 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3457 WSTOPSIG (w), lwpid_of (current_thread));
3458
3459 linux_resume_one_lwp (event_child, 0, 0, NULL);
3460
3461 if (debug_threads)
3462 debug_exit ();
3463 return ignore_event (ourstatus);
3464 }
3465 }
3466
3467 if (event_child->collecting_fast_tracepoint)
3468 {
3469 if (debug_threads)
3470 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3471 "Check if we're already there.\n",
3472 lwpid_of (current_thread),
3473 event_child->collecting_fast_tracepoint);
3474
3475 trace_event = 1;
3476
3477 event_child->collecting_fast_tracepoint
3478 = linux_fast_tracepoint_collecting (event_child, NULL);
3479
3480 if (event_child->collecting_fast_tracepoint != 1)
3481 {
3482 /* No longer need this breakpoint. */
3483 if (event_child->exit_jump_pad_bkpt != NULL)
3484 {
3485 if (debug_threads)
3486 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3487 "stopping all threads momentarily.\n");
3488
3489 /* Other running threads could hit this breakpoint.
3490 We don't handle moribund locations like GDB does,
3491 instead we always pause all threads when removing
3492 breakpoints, so that any step-over or
3493 decr_pc_after_break adjustment is always taken
3494 care of while the breakpoint is still
3495 inserted. */
3496 stop_all_lwps (1, event_child);
3497
3498 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3499 event_child->exit_jump_pad_bkpt = NULL;
3500
3501 unstop_all_lwps (1, event_child);
3502
3503 gdb_assert (event_child->suspended >= 0);
3504 }
3505 }
3506
3507 if (event_child->collecting_fast_tracepoint == 0)
3508 {
3509 if (debug_threads)
3510 debug_printf ("fast tracepoint finished "
3511 "collecting successfully.\n");
3512
3513 /* We may have a deferred signal to report. */
3514 if (dequeue_one_deferred_signal (event_child, &w))
3515 {
3516 if (debug_threads)
3517 debug_printf ("dequeued one signal.\n");
3518 }
3519 else
3520 {
3521 if (debug_threads)
3522 debug_printf ("no deferred signals.\n");
3523
3524 if (stabilizing_threads)
3525 {
3526 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3527 ourstatus->value.sig = GDB_SIGNAL_0;
3528
3529 if (debug_threads)
3530 {
3531 debug_printf ("linux_wait_1 ret = %s, stopped "
3532 "while stabilizing threads\n",
3533 target_pid_to_str (ptid_of (current_thread)));
3534 debug_exit ();
3535 }
3536
3537 return ptid_of (current_thread);
3538 }
3539 }
3540 }
3541 }
3542
3543 /* Check whether GDB would be interested in this event. */
3544
3545 /* Check if GDB is interested in this syscall. */
3546 if (WIFSTOPPED (w)
3547 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3548 && !gdb_catch_this_syscall_p (event_child))
3549 {
3550 if (debug_threads)
3551 {
3552 debug_printf ("Ignored syscall for LWP %ld.\n",
3553 lwpid_of (current_thread));
3554 }
3555
3556 linux_resume_one_lwp (event_child, event_child->stepping,
3557 0, NULL);
3558
3559 if (debug_threads)
3560 debug_exit ();
3561 return ignore_event (ourstatus);
3562 }
3563
3564 /* If GDB is not interested in this signal, don't stop other
3565 threads, and don't report it to GDB. Just resume the inferior
3566 right away. We do this for threading-related signals as well as
3567 any that GDB specifically requested we ignore. But never ignore
3568 SIGSTOP if we sent it ourselves, and do not ignore signals when
3569 stepping - they may require special handling to skip the signal
3570 handler. Also never ignore signals that could be caused by a
3571 breakpoint. */
3572 if (WIFSTOPPED (w)
3573 && current_thread->last_resume_kind != resume_step
3574 && (
3575 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3576 (current_process ()->priv->thread_db != NULL
3577 && (WSTOPSIG (w) == __SIGRTMIN
3578 || WSTOPSIG (w) == __SIGRTMIN + 1))
3579 ||
3580 #endif
3581 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3582 && !(WSTOPSIG (w) == SIGSTOP
3583 && current_thread->last_resume_kind == resume_stop)
3584 && !linux_wstatus_maybe_breakpoint (w))))
3585 {
3586 siginfo_t info, *info_p;
3587
3588 if (debug_threads)
3589 debug_printf ("Ignored signal %d for LWP %ld.\n",
3590 WSTOPSIG (w), lwpid_of (current_thread));
3591
3592 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3593 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3594 info_p = &info;
3595 else
3596 info_p = NULL;
3597
3598 if (step_over_finished)
3599 {
3600 /* We cancelled this thread's step-over above. We still
3601 need to unsuspend all other LWPs, and set them back
3602 running again while the signal handler runs. */
3603 unsuspend_all_lwps (event_child);
3604
3605 /* Enqueue the pending signal info so that proceed_all_lwps
3606 doesn't lose it. */
3607 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3608
3609 proceed_all_lwps ();
3610 }
3611 else
3612 {
3613 linux_resume_one_lwp (event_child, event_child->stepping,
3614 WSTOPSIG (w), info_p);
3615 }
3616
3617 if (debug_threads)
3618 debug_exit ();
3619
3620 return ignore_event (ourstatus);
3621 }
3622
3623 /* Note that all addresses are always "out of the step range" when
3624 there's no range to begin with. */
3625 in_step_range = lwp_in_step_range (event_child);
3626
3627 /* If GDB wanted this thread to single step, and the thread is out
3628 of the step range, we always want to report the SIGTRAP, and let
3629 GDB handle it. Watchpoints should always be reported. So should
3630 signals we can't explain. A SIGTRAP we can't explain could be a
3631 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3632 do, we're be able to handle GDB breakpoints on top of internal
3633 breakpoints, by handling the internal breakpoint and still
3634 reporting the event to GDB. If we don't, we're out of luck, GDB
3635 won't see the breakpoint hit. If we see a single-step event but
3636 the thread should be continuing, don't pass the trap to gdb.
3637 That indicates that we had previously finished a single-step but
3638 left the single-step pending -- see
3639 complete_ongoing_step_over. */
3640 report_to_gdb = (!maybe_internal_trap
3641 || (current_thread->last_resume_kind == resume_step
3642 && !in_step_range)
3643 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3644 || (!in_step_range
3645 && !bp_explains_trap
3646 && !trace_event
3647 && !step_over_finished
3648 && !(current_thread->last_resume_kind == resume_continue
3649 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3650 || (gdb_breakpoint_here (event_child->stop_pc)
3651 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3652 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3653 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3654
3655 run_breakpoint_commands (event_child->stop_pc);
3656
3657 /* We found no reason GDB would want us to stop. We either hit one
3658 of our own breakpoints, or finished an internal step GDB
3659 shouldn't know about. */
3660 if (!report_to_gdb)
3661 {
3662 if (debug_threads)
3663 {
3664 if (bp_explains_trap)
3665 debug_printf ("Hit a gdbserver breakpoint.\n");
3666 if (step_over_finished)
3667 debug_printf ("Step-over finished.\n");
3668 if (trace_event)
3669 debug_printf ("Tracepoint event.\n");
3670 if (lwp_in_step_range (event_child))
3671 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3672 paddress (event_child->stop_pc),
3673 paddress (event_child->step_range_start),
3674 paddress (event_child->step_range_end));
3675 }
3676
3677 /* We're not reporting this breakpoint to GDB, so apply the
3678 decr_pc_after_break adjustment to the inferior's regcache
3679 ourselves. */
3680
3681 if (the_low_target.set_pc != NULL)
3682 {
3683 struct regcache *regcache
3684 = get_thread_regcache (current_thread, 1);
3685 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3686 }
3687
3688 if (step_over_finished)
3689 {
3690 /* If we have finished stepping over a breakpoint, we've
3691 stopped and suspended all LWPs momentarily except the
3692 stepping one. This is where we resume them all again.
3693 We're going to keep waiting, so use proceed, which
3694 handles stepping over the next breakpoint. */
3695 unsuspend_all_lwps (event_child);
3696 }
3697 else
3698 {
3699 /* Remove the single-step breakpoints if any. Note that
3700 there isn't single-step breakpoint if we finished stepping
3701 over. */
3702 if (can_software_single_step ()
3703 && has_single_step_breakpoints (current_thread))
3704 {
3705 stop_all_lwps (0, event_child);
3706 delete_single_step_breakpoints (current_thread);
3707 unstop_all_lwps (0, event_child);
3708 }
3709 }
3710
3711 if (debug_threads)
3712 debug_printf ("proceeding all threads.\n");
3713 proceed_all_lwps ();
3714
3715 if (debug_threads)
3716 debug_exit ();
3717
3718 return ignore_event (ourstatus);
3719 }
3720
3721 if (debug_threads)
3722 {
3723 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3724 {
3725 char *str;
3726
3727 str = target_waitstatus_to_string (&event_child->waitstatus);
3728 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3729 lwpid_of (get_lwp_thread (event_child)), str);
3730 xfree (str);
3731 }
3732 if (current_thread->last_resume_kind == resume_step)
3733 {
3734 if (event_child->step_range_start == event_child->step_range_end)
3735 debug_printf ("GDB wanted to single-step, reporting event.\n");
3736 else if (!lwp_in_step_range (event_child))
3737 debug_printf ("Out of step range, reporting event.\n");
3738 }
3739 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3740 debug_printf ("Stopped by watchpoint.\n");
3741 else if (gdb_breakpoint_here (event_child->stop_pc))
3742 debug_printf ("Stopped by GDB breakpoint.\n");
3743 if (debug_threads)
3744 debug_printf ("Hit a non-gdbserver trap event.\n");
3745 }
3746
3747 /* Alright, we're going to report a stop. */
3748
3749 /* Remove single-step breakpoints. */
3750 if (can_software_single_step ())
3751 {
3752 /* Remove single-step breakpoints or not. It it is true, stop all
3753 lwps, so that other threads won't hit the breakpoint in the
3754 staled memory. */
3755 int remove_single_step_breakpoints_p = 0;
3756
3757 if (non_stop)
3758 {
3759 remove_single_step_breakpoints_p
3760 = has_single_step_breakpoints (current_thread);
3761 }
3762 else
3763 {
3764 /* In all-stop, a stop reply cancels all previous resume
3765 requests. Delete all single-step breakpoints. */
3766 struct inferior_list_entry *inf, *tmp;
3767
3768 ALL_INFERIORS (&all_threads, inf, tmp)
3769 {
3770 struct thread_info *thread = (struct thread_info *) inf;
3771
3772 if (has_single_step_breakpoints (thread))
3773 {
3774 remove_single_step_breakpoints_p = 1;
3775 break;
3776 }
3777 }
3778 }
3779
3780 if (remove_single_step_breakpoints_p)
3781 {
3782 /* If we remove single-step breakpoints from memory, stop all lwps,
3783 so that other threads won't hit the breakpoint in the staled
3784 memory. */
3785 stop_all_lwps (0, event_child);
3786
3787 if (non_stop)
3788 {
3789 gdb_assert (has_single_step_breakpoints (current_thread));
3790 delete_single_step_breakpoints (current_thread);
3791 }
3792 else
3793 {
3794 struct inferior_list_entry *inf, *tmp;
3795
3796 ALL_INFERIORS (&all_threads, inf, tmp)
3797 {
3798 struct thread_info *thread = (struct thread_info *) inf;
3799
3800 if (has_single_step_breakpoints (thread))
3801 delete_single_step_breakpoints (thread);
3802 }
3803 }
3804
3805 unstop_all_lwps (0, event_child);
3806 }
3807 }
3808
3809 if (!stabilizing_threads)
3810 {
3811 /* In all-stop, stop all threads. */
3812 if (!non_stop)
3813 stop_all_lwps (0, NULL);
3814
3815 if (step_over_finished)
3816 {
3817 if (!non_stop)
3818 {
3819 /* If we were doing a step-over, all other threads but
3820 the stepping one had been paused in start_step_over,
3821 with their suspend counts incremented. We don't want
3822 to do a full unstop/unpause, because we're in
3823 all-stop mode (so we want threads stopped), but we
3824 still need to unsuspend the other threads, to
3825 decrement their `suspended' count back. */
3826 unsuspend_all_lwps (event_child);
3827 }
3828 else
3829 {
3830 /* If we just finished a step-over, then all threads had
3831 been momentarily paused. In all-stop, that's fine,
3832 we want threads stopped by now anyway. In non-stop,
3833 we need to re-resume threads that GDB wanted to be
3834 running. */
3835 unstop_all_lwps (1, event_child);
3836 }
3837 }
3838
3839 /* If we're not waiting for a specific LWP, choose an event LWP
3840 from among those that have had events. Giving equal priority
3841 to all LWPs that have had events helps prevent
3842 starvation. */
3843 if (ptid_equal (ptid, minus_one_ptid))
3844 {
3845 event_child->status_pending_p = 1;
3846 event_child->status_pending = w;
3847
3848 select_event_lwp (&event_child);
3849
3850 /* current_thread and event_child must stay in sync. */
3851 current_thread = get_lwp_thread (event_child);
3852
3853 event_child->status_pending_p = 0;
3854 w = event_child->status_pending;
3855 }
3856
3857
3858 /* Stabilize threads (move out of jump pads). */
3859 if (!non_stop)
3860 stabilize_threads ();
3861 }
3862 else
3863 {
3864 /* If we just finished a step-over, then all threads had been
3865 momentarily paused. In all-stop, that's fine, we want
3866 threads stopped by now anyway. In non-stop, we need to
3867 re-resume threads that GDB wanted to be running. */
3868 if (step_over_finished)
3869 unstop_all_lwps (1, event_child);
3870 }
3871
3872 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3873 {
3874 /* If the reported event is an exit, fork, vfork or exec, let
3875 GDB know. */
3876
3877 /* Break the unreported fork relationship chain. */
3878 if (event_child->waitstatus.kind == TARGET_WAITKIND_FORKED
3879 || event_child->waitstatus.kind == TARGET_WAITKIND_VFORKED)
3880 {
3881 event_child->fork_relative->fork_relative = NULL;
3882 event_child->fork_relative = NULL;
3883 }
3884
3885 *ourstatus = event_child->waitstatus;
3886 /* Clear the event lwp's waitstatus since we handled it already. */
3887 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3888 }
3889 else
3890 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3891
3892 /* Now that we've selected our final event LWP, un-adjust its PC if
3893 it was a software breakpoint, and the client doesn't know we can
3894 adjust the breakpoint ourselves. */
3895 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3896 && !swbreak_feature)
3897 {
3898 int decr_pc = the_low_target.decr_pc_after_break;
3899
3900 if (decr_pc != 0)
3901 {
3902 struct regcache *regcache
3903 = get_thread_regcache (current_thread, 1);
3904 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3905 }
3906 }
3907
3908 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3909 {
3910 get_syscall_trapinfo (event_child,
3911 &ourstatus->value.syscall_number);
3912 ourstatus->kind = event_child->syscall_state;
3913 }
3914 else if (current_thread->last_resume_kind == resume_stop
3915 && WSTOPSIG (w) == SIGSTOP)
3916 {
3917 /* A thread that has been requested to stop by GDB with vCont;t,
3918 and it stopped cleanly, so report as SIG0. The use of
3919 SIGSTOP is an implementation detail. */
3920 ourstatus->value.sig = GDB_SIGNAL_0;
3921 }
3922 else if (current_thread->last_resume_kind == resume_stop
3923 && WSTOPSIG (w) != SIGSTOP)
3924 {
3925 /* A thread that has been requested to stop by GDB with vCont;t,
3926 but, it stopped for other reasons. */
3927 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3928 }
3929 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3930 {
3931 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3932 }
3933
3934 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3935
3936 if (debug_threads)
3937 {
3938 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3939 target_pid_to_str (ptid_of (current_thread)),
3940 ourstatus->kind, ourstatus->value.sig);
3941 debug_exit ();
3942 }
3943
3944 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3945 return filter_exit_event (event_child, ourstatus);
3946
3947 return ptid_of (current_thread);
3948 }
3949
3950 /* Get rid of any pending event in the pipe. */
3951 static void
3952 async_file_flush (void)
3953 {
3954 int ret;
3955 char buf;
3956
3957 do
3958 ret = read (linux_event_pipe[0], &buf, 1);
3959 while (ret >= 0 || (ret == -1 && errno == EINTR));
3960 }
3961
3962 /* Put something in the pipe, so the event loop wakes up. */
3963 static void
3964 async_file_mark (void)
3965 {
3966 int ret;
3967
3968 async_file_flush ();
3969
3970 do
3971 ret = write (linux_event_pipe[1], "+", 1);
3972 while (ret == 0 || (ret == -1 && errno == EINTR));
3973
3974 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3975 be awakened anyway. */
3976 }
3977
3978 static ptid_t
3979 linux_wait (ptid_t ptid,
3980 struct target_waitstatus *ourstatus, int target_options)
3981 {
3982 ptid_t event_ptid;
3983
3984 /* Flush the async file first. */
3985 if (target_is_async_p ())
3986 async_file_flush ();
3987
3988 do
3989 {
3990 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3991 }
3992 while ((target_options & TARGET_WNOHANG) == 0
3993 && ptid_equal (event_ptid, null_ptid)
3994 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3995
3996 /* If at least one stop was reported, there may be more. A single
3997 SIGCHLD can signal more than one child stop. */
3998 if (target_is_async_p ()
3999 && (target_options & TARGET_WNOHANG) != 0
4000 && !ptid_equal (event_ptid, null_ptid))
4001 async_file_mark ();
4002
4003 return event_ptid;
4004 }
4005
4006 /* Send a signal to an LWP. */
4007
4008 static int
4009 kill_lwp (unsigned long lwpid, int signo)
4010 {
4011 int ret;
4012
4013 errno = 0;
4014 ret = syscall (__NR_tkill, lwpid, signo);
4015 if (errno == ENOSYS)
4016 {
4017 /* If tkill fails, then we are not using nptl threads, a
4018 configuration we no longer support. */
4019 perror_with_name (("tkill"));
4020 }
4021 return ret;
4022 }
4023
4024 void
4025 linux_stop_lwp (struct lwp_info *lwp)
4026 {
4027 send_sigstop (lwp);
4028 }
4029
4030 static void
4031 send_sigstop (struct lwp_info *lwp)
4032 {
4033 int pid;
4034
4035 pid = lwpid_of (get_lwp_thread (lwp));
4036
4037 /* If we already have a pending stop signal for this process, don't
4038 send another. */
4039 if (lwp->stop_expected)
4040 {
4041 if (debug_threads)
4042 debug_printf ("Have pending sigstop for lwp %d\n", pid);
4043
4044 return;
4045 }
4046
4047 if (debug_threads)
4048 debug_printf ("Sending sigstop to lwp %d\n", pid);
4049
4050 lwp->stop_expected = 1;
4051 kill_lwp (pid, SIGSTOP);
4052 }
4053
4054 static int
4055 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
4056 {
4057 struct thread_info *thread = (struct thread_info *) entry;
4058 struct lwp_info *lwp = get_thread_lwp (thread);
4059
4060 /* Ignore EXCEPT. */
4061 if (lwp == except)
4062 return 0;
4063
4064 if (lwp->stopped)
4065 return 0;
4066
4067 send_sigstop (lwp);
4068 return 0;
4069 }
4070
4071 /* Increment the suspend count of an LWP, and stop it, if not stopped
4072 yet. */
4073 static int
4074 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
4075 void *except)
4076 {
4077 struct thread_info *thread = (struct thread_info *) entry;
4078 struct lwp_info *lwp = get_thread_lwp (thread);
4079
4080 /* Ignore EXCEPT. */
4081 if (lwp == except)
4082 return 0;
4083
4084 lwp_suspended_inc (lwp);
4085
4086 return send_sigstop_callback (entry, except);
4087 }
4088
4089 static void
4090 mark_lwp_dead (struct lwp_info *lwp, int wstat)
4091 {
4092 /* Store the exit status for later. */
4093 lwp->status_pending_p = 1;
4094 lwp->status_pending = wstat;
4095
4096 /* Store in waitstatus as well, as there's nothing else to process
4097 for this event. */
4098 if (WIFEXITED (wstat))
4099 {
4100 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
4101 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
4102 }
4103 else if (WIFSIGNALED (wstat))
4104 {
4105 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
4106 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
4107 }
4108
4109 /* Prevent trying to stop it. */
4110 lwp->stopped = 1;
4111
4112 /* No further stops are expected from a dead lwp. */
4113 lwp->stop_expected = 0;
4114 }
4115
4116 /* Return true if LWP has exited already, and has a pending exit event
4117 to report to GDB. */
4118
4119 static int
4120 lwp_is_marked_dead (struct lwp_info *lwp)
4121 {
4122 return (lwp->status_pending_p
4123 && (WIFEXITED (lwp->status_pending)
4124 || WIFSIGNALED (lwp->status_pending)));
4125 }
4126
4127 /* Wait for all children to stop for the SIGSTOPs we just queued. */
4128
4129 static void
4130 wait_for_sigstop (void)
4131 {
4132 struct thread_info *saved_thread;
4133 ptid_t saved_tid;
4134 int wstat;
4135 int ret;
4136
4137 saved_thread = current_thread;
4138 if (saved_thread != NULL)
4139 saved_tid = saved_thread->entry.id;
4140 else
4141 saved_tid = null_ptid; /* avoid bogus unused warning */
4142
4143 if (debug_threads)
4144 debug_printf ("wait_for_sigstop: pulling events\n");
4145
4146 /* Passing NULL_PTID as filter indicates we want all events to be
4147 left pending. Eventually this returns when there are no
4148 unwaited-for children left. */
4149 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4150 &wstat, __WALL);
4151 gdb_assert (ret == -1);
4152
4153 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4154 current_thread = saved_thread;
4155 else
4156 {
4157 if (debug_threads)
4158 debug_printf ("Previously current thread died.\n");
4159
4160 /* We can't change the current inferior behind GDB's back,
4161 otherwise, a subsequent command may apply to the wrong
4162 process. */
4163 current_thread = NULL;
4164 }
4165 }
4166
4167 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
4168 move it out, because we need to report the stop event to GDB. For
4169 example, if the user puts a breakpoint in the jump pad, it's
4170 because she wants to debug it. */
4171
4172 static int
4173 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
4174 {
4175 struct thread_info *thread = (struct thread_info *) entry;
4176 struct lwp_info *lwp = get_thread_lwp (thread);
4177
4178 if (lwp->suspended != 0)
4179 {
4180 internal_error (__FILE__, __LINE__,
4181 "LWP %ld is suspended, suspended=%d\n",
4182 lwpid_of (thread), lwp->suspended);
4183 }
4184 gdb_assert (lwp->stopped);
4185
4186 /* Allow debugging the jump pad, gdb_collect, etc.. */
4187 return (supports_fast_tracepoints ()
4188 && agent_loaded_p ()
4189 && (gdb_breakpoint_here (lwp->stop_pc)
4190 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4191 || thread->last_resume_kind == resume_step)
4192 && linux_fast_tracepoint_collecting (lwp, NULL));
4193 }
4194
4195 static void
4196 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
4197 {
4198 struct thread_info *thread = (struct thread_info *) entry;
4199 struct thread_info *saved_thread;
4200 struct lwp_info *lwp = get_thread_lwp (thread);
4201 int *wstat;
4202
4203 if (lwp->suspended != 0)
4204 {
4205 internal_error (__FILE__, __LINE__,
4206 "LWP %ld is suspended, suspended=%d\n",
4207 lwpid_of (thread), lwp->suspended);
4208 }
4209 gdb_assert (lwp->stopped);
4210
4211 /* For gdb_breakpoint_here. */
4212 saved_thread = current_thread;
4213 current_thread = thread;
4214
4215 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4216
4217 /* Allow debugging the jump pad, gdb_collect, etc. */
4218 if (!gdb_breakpoint_here (lwp->stop_pc)
4219 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4220 && thread->last_resume_kind != resume_step
4221 && maybe_move_out_of_jump_pad (lwp, wstat))
4222 {
4223 if (debug_threads)
4224 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4225 lwpid_of (thread));
4226
4227 if (wstat)
4228 {
4229 lwp->status_pending_p = 0;
4230 enqueue_one_deferred_signal (lwp, wstat);
4231
4232 if (debug_threads)
4233 debug_printf ("Signal %d for LWP %ld deferred "
4234 "(in jump pad)\n",
4235 WSTOPSIG (*wstat), lwpid_of (thread));
4236 }
4237
4238 linux_resume_one_lwp (lwp, 0, 0, NULL);
4239 }
4240 else
4241 lwp_suspended_inc (lwp);
4242
4243 current_thread = saved_thread;
4244 }
4245
4246 static int
4247 lwp_running (struct inferior_list_entry *entry, void *data)
4248 {
4249 struct thread_info *thread = (struct thread_info *) entry;
4250 struct lwp_info *lwp = get_thread_lwp (thread);
4251
4252 if (lwp_is_marked_dead (lwp))
4253 return 0;
4254 if (lwp->stopped)
4255 return 0;
4256 return 1;
4257 }
4258
4259 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4260 If SUSPEND, then also increase the suspend count of every LWP,
4261 except EXCEPT. */
4262
4263 static void
4264 stop_all_lwps (int suspend, struct lwp_info *except)
4265 {
4266 /* Should not be called recursively. */
4267 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4268
4269 if (debug_threads)
4270 {
4271 debug_enter ();
4272 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4273 suspend ? "stop-and-suspend" : "stop",
4274 except != NULL
4275 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4276 : "none");
4277 }
4278
4279 stopping_threads = (suspend
4280 ? STOPPING_AND_SUSPENDING_THREADS
4281 : STOPPING_THREADS);
4282
4283 if (suspend)
4284 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4285 else
4286 find_inferior (&all_threads, send_sigstop_callback, except);
4287 wait_for_sigstop ();
4288 stopping_threads = NOT_STOPPING_THREADS;
4289
4290 if (debug_threads)
4291 {
4292 debug_printf ("stop_all_lwps done, setting stopping_threads "
4293 "back to !stopping\n");
4294 debug_exit ();
4295 }
4296 }
4297
4298 /* Enqueue one signal in the chain of signals which need to be
4299 delivered to this process on next resume. */
4300
4301 static void
4302 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4303 {
4304 struct pending_signals *p_sig = XNEW (struct pending_signals);
4305
4306 p_sig->prev = lwp->pending_signals;
4307 p_sig->signal = signal;
4308 if (info == NULL)
4309 memset (&p_sig->info, 0, sizeof (siginfo_t));
4310 else
4311 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4312 lwp->pending_signals = p_sig;
4313 }
4314
4315 /* Install breakpoints for software single stepping. */
4316
4317 static void
4318 install_software_single_step_breakpoints (struct lwp_info *lwp)
4319 {
4320 struct thread_info *thread = get_lwp_thread (lwp);
4321 struct regcache *regcache = get_thread_regcache (thread, 1);
4322 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
4323
4324 current_thread = thread;
4325 std::vector<CORE_ADDR> next_pcs = the_low_target.get_next_pcs (regcache);
4326
4327 for (CORE_ADDR pc : next_pcs)
4328 set_single_step_breakpoint (pc, current_ptid);
4329
4330 do_cleanups (old_chain);
4331 }
4332
4333 /* Single step via hardware or software single step.
4334 Return 1 if hardware single stepping, 0 if software single stepping
4335 or can't single step. */
4336
4337 static int
4338 single_step (struct lwp_info* lwp)
4339 {
4340 int step = 0;
4341
4342 if (can_hardware_single_step ())
4343 {
4344 step = 1;
4345 }
4346 else if (can_software_single_step ())
4347 {
4348 install_software_single_step_breakpoints (lwp);
4349 step = 0;
4350 }
4351 else
4352 {
4353 if (debug_threads)
4354 debug_printf ("stepping is not implemented on this target");
4355 }
4356
4357 return step;
4358 }
4359
4360 /* The signal can be delivered to the inferior if we are not trying to
4361 finish a fast tracepoint collect. Since signal can be delivered in
4362 the step-over, the program may go to signal handler and trap again
4363 after return from the signal handler. We can live with the spurious
4364 double traps. */
4365
4366 static int
4367 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4368 {
4369 return !lwp->collecting_fast_tracepoint;
4370 }
4371
4372 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4373 SIGNAL is nonzero, give it that signal. */
4374
4375 static void
4376 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4377 int step, int signal, siginfo_t *info)
4378 {
4379 struct thread_info *thread = get_lwp_thread (lwp);
4380 struct thread_info *saved_thread;
4381 int fast_tp_collecting;
4382 int ptrace_request;
4383 struct process_info *proc = get_thread_process (thread);
4384
4385 /* Note that target description may not be initialised
4386 (proc->tdesc == NULL) at this point because the program hasn't
4387 stopped at the first instruction yet. It means GDBserver skips
4388 the extra traps from the wrapper program (see option --wrapper).
4389 Code in this function that requires register access should be
4390 guarded by proc->tdesc == NULL or something else. */
4391
4392 if (lwp->stopped == 0)
4393 return;
4394
4395 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4396
4397 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4398
4399 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4400
4401 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4402 user used the "jump" command, or "set $pc = foo"). */
4403 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4404 {
4405 /* Collecting 'while-stepping' actions doesn't make sense
4406 anymore. */
4407 release_while_stepping_state_list (thread);
4408 }
4409
4410 /* If we have pending signals or status, and a new signal, enqueue the
4411 signal. Also enqueue the signal if it can't be delivered to the
4412 inferior right now. */
4413 if (signal != 0
4414 && (lwp->status_pending_p
4415 || lwp->pending_signals != NULL
4416 || !lwp_signal_can_be_delivered (lwp)))
4417 {
4418 enqueue_pending_signal (lwp, signal, info);
4419
4420 /* Postpone any pending signal. It was enqueued above. */
4421 signal = 0;
4422 }
4423
4424 if (lwp->status_pending_p)
4425 {
4426 if (debug_threads)
4427 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4428 " has pending status\n",
4429 lwpid_of (thread), step ? "step" : "continue",
4430 lwp->stop_expected ? "expected" : "not expected");
4431 return;
4432 }
4433
4434 saved_thread = current_thread;
4435 current_thread = thread;
4436
4437 /* This bit needs some thinking about. If we get a signal that
4438 we must report while a single-step reinsert is still pending,
4439 we often end up resuming the thread. It might be better to
4440 (ew) allow a stack of pending events; then we could be sure that
4441 the reinsert happened right away and not lose any signals.
4442
4443 Making this stack would also shrink the window in which breakpoints are
4444 uninserted (see comment in linux_wait_for_lwp) but not enough for
4445 complete correctness, so it won't solve that problem. It may be
4446 worthwhile just to solve this one, however. */
4447 if (lwp->bp_reinsert != 0)
4448 {
4449 if (debug_threads)
4450 debug_printf (" pending reinsert at 0x%s\n",
4451 paddress (lwp->bp_reinsert));
4452
4453 if (can_hardware_single_step ())
4454 {
4455 if (fast_tp_collecting == 0)
4456 {
4457 if (step == 0)
4458 warning ("BAD - reinserting but not stepping.");
4459 if (lwp->suspended)
4460 warning ("BAD - reinserting and suspended(%d).",
4461 lwp->suspended);
4462 }
4463 }
4464
4465 step = maybe_hw_step (thread);
4466 }
4467
4468 if (fast_tp_collecting == 1)
4469 {
4470 if (debug_threads)
4471 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4472 " (exit-jump-pad-bkpt)\n",
4473 lwpid_of (thread));
4474 }
4475 else if (fast_tp_collecting == 2)
4476 {
4477 if (debug_threads)
4478 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4479 " single-stepping\n",
4480 lwpid_of (thread));
4481
4482 if (can_hardware_single_step ())
4483 step = 1;
4484 else
4485 {
4486 internal_error (__FILE__, __LINE__,
4487 "moving out of jump pad single-stepping"
4488 " not implemented on this target");
4489 }
4490 }
4491
4492 /* If we have while-stepping actions in this thread set it stepping.
4493 If we have a signal to deliver, it may or may not be set to
4494 SIG_IGN, we don't know. Assume so, and allow collecting
4495 while-stepping into a signal handler. A possible smart thing to
4496 do would be to set an internal breakpoint at the signal return
4497 address, continue, and carry on catching this while-stepping
4498 action only when that breakpoint is hit. A future
4499 enhancement. */
4500 if (thread->while_stepping != NULL)
4501 {
4502 if (debug_threads)
4503 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4504 lwpid_of (thread));
4505
4506 step = single_step (lwp);
4507 }
4508
4509 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4510 {
4511 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4512
4513 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4514
4515 if (debug_threads)
4516 {
4517 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4518 (long) lwp->stop_pc);
4519 }
4520 }
4521
4522 /* If we have pending signals, consume one if it can be delivered to
4523 the inferior. */
4524 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4525 {
4526 struct pending_signals **p_sig;
4527
4528 p_sig = &lwp->pending_signals;
4529 while ((*p_sig)->prev != NULL)
4530 p_sig = &(*p_sig)->prev;
4531
4532 signal = (*p_sig)->signal;
4533 if ((*p_sig)->info.si_signo != 0)
4534 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4535 &(*p_sig)->info);
4536
4537 free (*p_sig);
4538 *p_sig = NULL;
4539 }
4540
4541 if (debug_threads)
4542 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4543 lwpid_of (thread), step ? "step" : "continue", signal,
4544 lwp->stop_expected ? "expected" : "not expected");
4545
4546 if (the_low_target.prepare_to_resume != NULL)
4547 the_low_target.prepare_to_resume (lwp);
4548
4549 regcache_invalidate_thread (thread);
4550 errno = 0;
4551 lwp->stepping = step;
4552 if (step)
4553 ptrace_request = PTRACE_SINGLESTEP;
4554 else if (gdb_catching_syscalls_p (lwp))
4555 ptrace_request = PTRACE_SYSCALL;
4556 else
4557 ptrace_request = PTRACE_CONT;
4558 ptrace (ptrace_request,
4559 lwpid_of (thread),
4560 (PTRACE_TYPE_ARG3) 0,
4561 /* Coerce to a uintptr_t first to avoid potential gcc warning
4562 of coercing an 8 byte integer to a 4 byte pointer. */
4563 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4564
4565 current_thread = saved_thread;
4566 if (errno)
4567 perror_with_name ("resuming thread");
4568
4569 /* Successfully resumed. Clear state that no longer makes sense,
4570 and mark the LWP as running. Must not do this before resuming
4571 otherwise if that fails other code will be confused. E.g., we'd
4572 later try to stop the LWP and hang forever waiting for a stop
4573 status. Note that we must not throw after this is cleared,
4574 otherwise handle_zombie_lwp_error would get confused. */
4575 lwp->stopped = 0;
4576 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4577 }
4578
4579 /* Called when we try to resume a stopped LWP and that errors out. If
4580 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4581 or about to become), discard the error, clear any pending status
4582 the LWP may have, and return true (we'll collect the exit status
4583 soon enough). Otherwise, return false. */
4584
4585 static int
4586 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4587 {
4588 struct thread_info *thread = get_lwp_thread (lp);
4589
4590 /* If we get an error after resuming the LWP successfully, we'd
4591 confuse !T state for the LWP being gone. */
4592 gdb_assert (lp->stopped);
4593
4594 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4595 because even if ptrace failed with ESRCH, the tracee may be "not
4596 yet fully dead", but already refusing ptrace requests. In that
4597 case the tracee has 'R (Running)' state for a little bit
4598 (observed in Linux 3.18). See also the note on ESRCH in the
4599 ptrace(2) man page. Instead, check whether the LWP has any state
4600 other than ptrace-stopped. */
4601
4602 /* Don't assume anything if /proc/PID/status can't be read. */
4603 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4604 {
4605 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4606 lp->status_pending_p = 0;
4607 return 1;
4608 }
4609 return 0;
4610 }
4611
4612 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4613 disappears while we try to resume it. */
4614
4615 static void
4616 linux_resume_one_lwp (struct lwp_info *lwp,
4617 int step, int signal, siginfo_t *info)
4618 {
4619 TRY
4620 {
4621 linux_resume_one_lwp_throw (lwp, step, signal, info);
4622 }
4623 CATCH (ex, RETURN_MASK_ERROR)
4624 {
4625 if (!check_ptrace_stopped_lwp_gone (lwp))
4626 throw_exception (ex);
4627 }
4628 END_CATCH
4629 }
4630
4631 struct thread_resume_array
4632 {
4633 struct thread_resume *resume;
4634 size_t n;
4635 };
4636
4637 /* This function is called once per thread via find_inferior.
4638 ARG is a pointer to a thread_resume_array struct.
4639 We look up the thread specified by ENTRY in ARG, and mark the thread
4640 with a pointer to the appropriate resume request.
4641
4642 This algorithm is O(threads * resume elements), but resume elements
4643 is small (and will remain small at least until GDB supports thread
4644 suspension). */
4645
4646 static int
4647 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4648 {
4649 struct thread_info *thread = (struct thread_info *) entry;
4650 struct lwp_info *lwp = get_thread_lwp (thread);
4651 int ndx;
4652 struct thread_resume_array *r;
4653
4654 r = (struct thread_resume_array *) arg;
4655
4656 for (ndx = 0; ndx < r->n; ndx++)
4657 {
4658 ptid_t ptid = r->resume[ndx].thread;
4659 if (ptid_equal (ptid, minus_one_ptid)
4660 || ptid_equal (ptid, entry->id)
4661 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4662 of PID'. */
4663 || (ptid_get_pid (ptid) == pid_of (thread)
4664 && (ptid_is_pid (ptid)
4665 || ptid_get_lwp (ptid) == -1)))
4666 {
4667 if (r->resume[ndx].kind == resume_stop
4668 && thread->last_resume_kind == resume_stop)
4669 {
4670 if (debug_threads)
4671 debug_printf ("already %s LWP %ld at GDB's request\n",
4672 (thread->last_status.kind
4673 == TARGET_WAITKIND_STOPPED)
4674 ? "stopped"
4675 : "stopping",
4676 lwpid_of (thread));
4677
4678 continue;
4679 }
4680
4681 /* Ignore (wildcard) resume requests for already-resumed
4682 threads. */
4683 if (r->resume[ndx].kind != resume_stop
4684 && thread->last_resume_kind != resume_stop)
4685 {
4686 if (debug_threads)
4687 debug_printf ("already %s LWP %ld at GDB's request\n",
4688 (thread->last_resume_kind
4689 == resume_step)
4690 ? "stepping"
4691 : "continuing",
4692 lwpid_of (thread));
4693 continue;
4694 }
4695
4696 /* Don't let wildcard resumes resume fork children that GDB
4697 does not yet know are new fork children. */
4698 if (lwp->fork_relative != NULL)
4699 {
4700 struct inferior_list_entry *inf, *tmp;
4701 struct lwp_info *rel = lwp->fork_relative;
4702
4703 if (rel->status_pending_p
4704 && (rel->waitstatus.kind == TARGET_WAITKIND_FORKED
4705 || rel->waitstatus.kind == TARGET_WAITKIND_VFORKED))
4706 {
4707 if (debug_threads)
4708 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4709 lwpid_of (thread));
4710 continue;
4711 }
4712 }
4713
4714 /* If the thread has a pending event that has already been
4715 reported to GDBserver core, but GDB has not pulled the
4716 event out of the vStopped queue yet, likewise, ignore the
4717 (wildcard) resume request. */
4718 if (in_queued_stop_replies (entry->id))
4719 {
4720 if (debug_threads)
4721 debug_printf ("not resuming LWP %ld: has queued stop reply\n",
4722 lwpid_of (thread));
4723 continue;
4724 }
4725
4726 lwp->resume = &r->resume[ndx];
4727 thread->last_resume_kind = lwp->resume->kind;
4728
4729 lwp->step_range_start = lwp->resume->step_range_start;
4730 lwp->step_range_end = lwp->resume->step_range_end;
4731
4732 /* If we had a deferred signal to report, dequeue one now.
4733 This can happen if LWP gets more than one signal while
4734 trying to get out of a jump pad. */
4735 if (lwp->stopped
4736 && !lwp->status_pending_p
4737 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4738 {
4739 lwp->status_pending_p = 1;
4740
4741 if (debug_threads)
4742 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4743 "leaving status pending.\n",
4744 WSTOPSIG (lwp->status_pending),
4745 lwpid_of (thread));
4746 }
4747
4748 return 0;
4749 }
4750 }
4751
4752 /* No resume action for this thread. */
4753 lwp->resume = NULL;
4754
4755 return 0;
4756 }
4757
4758 /* find_inferior callback for linux_resume.
4759 Set *FLAG_P if this lwp has an interesting status pending. */
4760
4761 static int
4762 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4763 {
4764 struct thread_info *thread = (struct thread_info *) entry;
4765 struct lwp_info *lwp = get_thread_lwp (thread);
4766
4767 /* LWPs which will not be resumed are not interesting, because
4768 we might not wait for them next time through linux_wait. */
4769 if (lwp->resume == NULL)
4770 return 0;
4771
4772 if (thread_still_has_status_pending_p (thread))
4773 * (int *) flag_p = 1;
4774
4775 return 0;
4776 }
4777
4778 /* Return 1 if this lwp that GDB wants running is stopped at an
4779 internal breakpoint that we need to step over. It assumes that any
4780 required STOP_PC adjustment has already been propagated to the
4781 inferior's regcache. */
4782
4783 static int
4784 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4785 {
4786 struct thread_info *thread = (struct thread_info *) entry;
4787 struct lwp_info *lwp = get_thread_lwp (thread);
4788 struct thread_info *saved_thread;
4789 CORE_ADDR pc;
4790 struct process_info *proc = get_thread_process (thread);
4791
4792 /* GDBserver is skipping the extra traps from the wrapper program,
4793 don't have to do step over. */
4794 if (proc->tdesc == NULL)
4795 return 0;
4796
4797 /* LWPs which will not be resumed are not interesting, because we
4798 might not wait for them next time through linux_wait. */
4799
4800 if (!lwp->stopped)
4801 {
4802 if (debug_threads)
4803 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4804 lwpid_of (thread));
4805 return 0;
4806 }
4807
4808 if (thread->last_resume_kind == resume_stop)
4809 {
4810 if (debug_threads)
4811 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4812 " stopped\n",
4813 lwpid_of (thread));
4814 return 0;
4815 }
4816
4817 gdb_assert (lwp->suspended >= 0);
4818
4819 if (lwp->suspended)
4820 {
4821 if (debug_threads)
4822 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4823 lwpid_of (thread));
4824 return 0;
4825 }
4826
4827 if (lwp->status_pending_p)
4828 {
4829 if (debug_threads)
4830 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4831 " status.\n",
4832 lwpid_of (thread));
4833 return 0;
4834 }
4835
4836 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4837 or we have. */
4838 pc = get_pc (lwp);
4839
4840 /* If the PC has changed since we stopped, then don't do anything,
4841 and let the breakpoint/tracepoint be hit. This happens if, for
4842 instance, GDB handled the decr_pc_after_break subtraction itself,
4843 GDB is OOL stepping this thread, or the user has issued a "jump"
4844 command, or poked thread's registers herself. */
4845 if (pc != lwp->stop_pc)
4846 {
4847 if (debug_threads)
4848 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4849 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4850 lwpid_of (thread),
4851 paddress (lwp->stop_pc), paddress (pc));
4852 return 0;
4853 }
4854
4855 /* On software single step target, resume the inferior with signal
4856 rather than stepping over. */
4857 if (can_software_single_step ()
4858 && lwp->pending_signals != NULL
4859 && lwp_signal_can_be_delivered (lwp))
4860 {
4861 if (debug_threads)
4862 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4863 " signals.\n",
4864 lwpid_of (thread));
4865
4866 return 0;
4867 }
4868
4869 saved_thread = current_thread;
4870 current_thread = thread;
4871
4872 /* We can only step over breakpoints we know about. */
4873 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4874 {
4875 /* Don't step over a breakpoint that GDB expects to hit
4876 though. If the condition is being evaluated on the target's side
4877 and it evaluate to false, step over this breakpoint as well. */
4878 if (gdb_breakpoint_here (pc)
4879 && gdb_condition_true_at_breakpoint (pc)
4880 && gdb_no_commands_at_breakpoint (pc))
4881 {
4882 if (debug_threads)
4883 debug_printf ("Need step over [LWP %ld]? yes, but found"
4884 " GDB breakpoint at 0x%s; skipping step over\n",
4885 lwpid_of (thread), paddress (pc));
4886
4887 current_thread = saved_thread;
4888 return 0;
4889 }
4890 else
4891 {
4892 if (debug_threads)
4893 debug_printf ("Need step over [LWP %ld]? yes, "
4894 "found breakpoint at 0x%s\n",
4895 lwpid_of (thread), paddress (pc));
4896
4897 /* We've found an lwp that needs stepping over --- return 1 so
4898 that find_inferior stops looking. */
4899 current_thread = saved_thread;
4900
4901 return 1;
4902 }
4903 }
4904
4905 current_thread = saved_thread;
4906
4907 if (debug_threads)
4908 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4909 " at 0x%s\n",
4910 lwpid_of (thread), paddress (pc));
4911
4912 return 0;
4913 }
4914
4915 /* Start a step-over operation on LWP. When LWP stopped at a
4916 breakpoint, to make progress, we need to remove the breakpoint out
4917 of the way. If we let other threads run while we do that, they may
4918 pass by the breakpoint location and miss hitting it. To avoid
4919 that, a step-over momentarily stops all threads while LWP is
4920 single-stepped by either hardware or software while the breakpoint
4921 is temporarily uninserted from the inferior. When the single-step
4922 finishes, we reinsert the breakpoint, and let all threads that are
4923 supposed to be running, run again. */
4924
4925 static int
4926 start_step_over (struct lwp_info *lwp)
4927 {
4928 struct thread_info *thread = get_lwp_thread (lwp);
4929 struct thread_info *saved_thread;
4930 CORE_ADDR pc;
4931 int step;
4932
4933 if (debug_threads)
4934 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4935 lwpid_of (thread));
4936
4937 stop_all_lwps (1, lwp);
4938
4939 if (lwp->suspended != 0)
4940 {
4941 internal_error (__FILE__, __LINE__,
4942 "LWP %ld suspended=%d\n", lwpid_of (thread),
4943 lwp->suspended);
4944 }
4945
4946 if (debug_threads)
4947 debug_printf ("Done stopping all threads for step-over.\n");
4948
4949 /* Note, we should always reach here with an already adjusted PC,
4950 either by GDB (if we're resuming due to GDB's request), or by our
4951 caller, if we just finished handling an internal breakpoint GDB
4952 shouldn't care about. */
4953 pc = get_pc (lwp);
4954
4955 saved_thread = current_thread;
4956 current_thread = thread;
4957
4958 lwp->bp_reinsert = pc;
4959 uninsert_breakpoints_at (pc);
4960 uninsert_fast_tracepoint_jumps_at (pc);
4961
4962 step = single_step (lwp);
4963
4964 current_thread = saved_thread;
4965
4966 linux_resume_one_lwp (lwp, step, 0, NULL);
4967
4968 /* Require next event from this LWP. */
4969 step_over_bkpt = thread->entry.id;
4970 return 1;
4971 }
4972
4973 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4974 start_step_over, if still there, and delete any single-step
4975 breakpoints we've set, on non hardware single-step targets. */
4976
4977 static int
4978 finish_step_over (struct lwp_info *lwp)
4979 {
4980 if (lwp->bp_reinsert != 0)
4981 {
4982 struct thread_info *saved_thread = current_thread;
4983
4984 if (debug_threads)
4985 debug_printf ("Finished step over.\n");
4986
4987 current_thread = get_lwp_thread (lwp);
4988
4989 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4990 may be no breakpoint to reinsert there by now. */
4991 reinsert_breakpoints_at (lwp->bp_reinsert);
4992 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4993
4994 lwp->bp_reinsert = 0;
4995
4996 /* Delete any single-step breakpoints. No longer needed. We
4997 don't have to worry about other threads hitting this trap,
4998 and later not being able to explain it, because we were
4999 stepping over a breakpoint, and we hold all threads but
5000 LWP stopped while doing that. */
5001 if (!can_hardware_single_step ())
5002 {
5003 gdb_assert (has_single_step_breakpoints (current_thread));
5004 delete_single_step_breakpoints (current_thread);
5005 }
5006
5007 step_over_bkpt = null_ptid;
5008 current_thread = saved_thread;
5009 return 1;
5010 }
5011 else
5012 return 0;
5013 }
5014
5015 /* If there's a step over in progress, wait until all threads stop
5016 (that is, until the stepping thread finishes its step), and
5017 unsuspend all lwps. The stepping thread ends with its status
5018 pending, which is processed later when we get back to processing
5019 events. */
5020
5021 static void
5022 complete_ongoing_step_over (void)
5023 {
5024 if (!ptid_equal (step_over_bkpt, null_ptid))
5025 {
5026 struct lwp_info *lwp;
5027 int wstat;
5028 int ret;
5029
5030 if (debug_threads)
5031 debug_printf ("detach: step over in progress, finish it first\n");
5032
5033 /* Passing NULL_PTID as filter indicates we want all events to
5034 be left pending. Eventually this returns when there are no
5035 unwaited-for children left. */
5036 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
5037 &wstat, __WALL);
5038 gdb_assert (ret == -1);
5039
5040 lwp = find_lwp_pid (step_over_bkpt);
5041 if (lwp != NULL)
5042 finish_step_over (lwp);
5043 step_over_bkpt = null_ptid;
5044 unsuspend_all_lwps (lwp);
5045 }
5046 }
5047
5048 /* This function is called once per thread. We check the thread's resume
5049 request, which will tell us whether to resume, step, or leave the thread
5050 stopped; and what signal, if any, it should be sent.
5051
5052 For threads which we aren't explicitly told otherwise, we preserve
5053 the stepping flag; this is used for stepping over gdbserver-placed
5054 breakpoints.
5055
5056 If pending_flags was set in any thread, we queue any needed
5057 signals, since we won't actually resume. We already have a pending
5058 event to report, so we don't need to preserve any step requests;
5059 they should be re-issued if necessary. */
5060
5061 static int
5062 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
5063 {
5064 struct thread_info *thread = (struct thread_info *) entry;
5065 struct lwp_info *lwp = get_thread_lwp (thread);
5066 int leave_all_stopped = * (int *) arg;
5067 int leave_pending;
5068
5069 if (lwp->resume == NULL)
5070 return 0;
5071
5072 if (lwp->resume->kind == resume_stop)
5073 {
5074 if (debug_threads)
5075 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
5076
5077 if (!lwp->stopped)
5078 {
5079 if (debug_threads)
5080 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
5081
5082 /* Stop the thread, and wait for the event asynchronously,
5083 through the event loop. */
5084 send_sigstop (lwp);
5085 }
5086 else
5087 {
5088 if (debug_threads)
5089 debug_printf ("already stopped LWP %ld\n",
5090 lwpid_of (thread));
5091
5092 /* The LWP may have been stopped in an internal event that
5093 was not meant to be notified back to GDB (e.g., gdbserver
5094 breakpoint), so we should be reporting a stop event in
5095 this case too. */
5096
5097 /* If the thread already has a pending SIGSTOP, this is a
5098 no-op. Otherwise, something later will presumably resume
5099 the thread and this will cause it to cancel any pending
5100 operation, due to last_resume_kind == resume_stop. If
5101 the thread already has a pending status to report, we
5102 will still report it the next time we wait - see
5103 status_pending_p_callback. */
5104
5105 /* If we already have a pending signal to report, then
5106 there's no need to queue a SIGSTOP, as this means we're
5107 midway through moving the LWP out of the jumppad, and we
5108 will report the pending signal as soon as that is
5109 finished. */
5110 if (lwp->pending_signals_to_report == NULL)
5111 send_sigstop (lwp);
5112 }
5113
5114 /* For stop requests, we're done. */
5115 lwp->resume = NULL;
5116 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5117 return 0;
5118 }
5119
5120 /* If this thread which is about to be resumed has a pending status,
5121 then don't resume it - we can just report the pending status.
5122 Likewise if it is suspended, because e.g., another thread is
5123 stepping past a breakpoint. Make sure to queue any signals that
5124 would otherwise be sent. In all-stop mode, we do this decision
5125 based on if *any* thread has a pending status. If there's a
5126 thread that needs the step-over-breakpoint dance, then don't
5127 resume any other thread but that particular one. */
5128 leave_pending = (lwp->suspended
5129 || lwp->status_pending_p
5130 || leave_all_stopped);
5131
5132 /* If we have a new signal, enqueue the signal. */
5133 if (lwp->resume->sig != 0)
5134 {
5135 siginfo_t info, *info_p;
5136
5137 /* If this is the same signal we were previously stopped by,
5138 make sure to queue its siginfo. */
5139 if (WIFSTOPPED (lwp->last_status)
5140 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
5141 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
5142 (PTRACE_TYPE_ARG3) 0, &info) == 0)
5143 info_p = &info;
5144 else
5145 info_p = NULL;
5146
5147 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
5148 }
5149
5150 if (!leave_pending)
5151 {
5152 if (debug_threads)
5153 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5154
5155 proceed_one_lwp (entry, NULL);
5156 }
5157 else
5158 {
5159 if (debug_threads)
5160 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5161 }
5162
5163 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5164 lwp->resume = NULL;
5165 return 0;
5166 }
5167
5168 static void
5169 linux_resume (struct thread_resume *resume_info, size_t n)
5170 {
5171 struct thread_resume_array array = { resume_info, n };
5172 struct thread_info *need_step_over = NULL;
5173 int any_pending;
5174 int leave_all_stopped;
5175
5176 if (debug_threads)
5177 {
5178 debug_enter ();
5179 debug_printf ("linux_resume:\n");
5180 }
5181
5182 find_inferior (&all_threads, linux_set_resume_request, &array);
5183
5184 /* If there is a thread which would otherwise be resumed, which has
5185 a pending status, then don't resume any threads - we can just
5186 report the pending status. Make sure to queue any signals that
5187 would otherwise be sent. In non-stop mode, we'll apply this
5188 logic to each thread individually. We consume all pending events
5189 before considering to start a step-over (in all-stop). */
5190 any_pending = 0;
5191 if (!non_stop)
5192 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
5193
5194 /* If there is a thread which would otherwise be resumed, which is
5195 stopped at a breakpoint that needs stepping over, then don't
5196 resume any threads - have it step over the breakpoint with all
5197 other threads stopped, then resume all threads again. Make sure
5198 to queue any signals that would otherwise be delivered or
5199 queued. */
5200 if (!any_pending && supports_breakpoints ())
5201 need_step_over
5202 = (struct thread_info *) find_inferior (&all_threads,
5203 need_step_over_p, NULL);
5204
5205 leave_all_stopped = (need_step_over != NULL || any_pending);
5206
5207 if (debug_threads)
5208 {
5209 if (need_step_over != NULL)
5210 debug_printf ("Not resuming all, need step over\n");
5211 else if (any_pending)
5212 debug_printf ("Not resuming, all-stop and found "
5213 "an LWP with pending status\n");
5214 else
5215 debug_printf ("Resuming, no pending status or step over needed\n");
5216 }
5217
5218 /* Even if we're leaving threads stopped, queue all signals we'd
5219 otherwise deliver. */
5220 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
5221
5222 if (need_step_over)
5223 start_step_over (get_thread_lwp (need_step_over));
5224
5225 if (debug_threads)
5226 {
5227 debug_printf ("linux_resume done\n");
5228 debug_exit ();
5229 }
5230
5231 /* We may have events that were pending that can/should be sent to
5232 the client now. Trigger a linux_wait call. */
5233 if (target_is_async_p ())
5234 async_file_mark ();
5235 }
5236
5237 /* This function is called once per thread. We check the thread's
5238 last resume request, which will tell us whether to resume, step, or
5239 leave the thread stopped. Any signal the client requested to be
5240 delivered has already been enqueued at this point.
5241
5242 If any thread that GDB wants running is stopped at an internal
5243 breakpoint that needs stepping over, we start a step-over operation
5244 on that particular thread, and leave all others stopped. */
5245
5246 static int
5247 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5248 {
5249 struct thread_info *thread = (struct thread_info *) entry;
5250 struct lwp_info *lwp = get_thread_lwp (thread);
5251 int step;
5252
5253 if (lwp == except)
5254 return 0;
5255
5256 if (debug_threads)
5257 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5258
5259 if (!lwp->stopped)
5260 {
5261 if (debug_threads)
5262 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5263 return 0;
5264 }
5265
5266 if (thread->last_resume_kind == resume_stop
5267 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5268 {
5269 if (debug_threads)
5270 debug_printf (" client wants LWP to remain %ld stopped\n",
5271 lwpid_of (thread));
5272 return 0;
5273 }
5274
5275 if (lwp->status_pending_p)
5276 {
5277 if (debug_threads)
5278 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5279 lwpid_of (thread));
5280 return 0;
5281 }
5282
5283 gdb_assert (lwp->suspended >= 0);
5284
5285 if (lwp->suspended)
5286 {
5287 if (debug_threads)
5288 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5289 return 0;
5290 }
5291
5292 if (thread->last_resume_kind == resume_stop
5293 && lwp->pending_signals_to_report == NULL
5294 && lwp->collecting_fast_tracepoint == 0)
5295 {
5296 /* We haven't reported this LWP as stopped yet (otherwise, the
5297 last_status.kind check above would catch it, and we wouldn't
5298 reach here. This LWP may have been momentarily paused by a
5299 stop_all_lwps call while handling for example, another LWP's
5300 step-over. In that case, the pending expected SIGSTOP signal
5301 that was queued at vCont;t handling time will have already
5302 been consumed by wait_for_sigstop, and so we need to requeue
5303 another one here. Note that if the LWP already has a SIGSTOP
5304 pending, this is a no-op. */
5305
5306 if (debug_threads)
5307 debug_printf ("Client wants LWP %ld to stop. "
5308 "Making sure it has a SIGSTOP pending\n",
5309 lwpid_of (thread));
5310
5311 send_sigstop (lwp);
5312 }
5313
5314 if (thread->last_resume_kind == resume_step)
5315 {
5316 if (debug_threads)
5317 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5318 lwpid_of (thread));
5319
5320 /* If resume_step is requested by GDB, install single-step
5321 breakpoints when the thread is about to be actually resumed if
5322 the single-step breakpoints weren't removed. */
5323 if (can_software_single_step ()
5324 && !has_single_step_breakpoints (thread))
5325 install_software_single_step_breakpoints (lwp);
5326
5327 step = maybe_hw_step (thread);
5328 }
5329 else if (lwp->bp_reinsert != 0)
5330 {
5331 if (debug_threads)
5332 debug_printf (" stepping LWP %ld, reinsert set\n",
5333 lwpid_of (thread));
5334
5335 step = maybe_hw_step (thread);
5336 }
5337 else
5338 step = 0;
5339
5340 linux_resume_one_lwp (lwp, step, 0, NULL);
5341 return 0;
5342 }
5343
5344 static int
5345 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5346 {
5347 struct thread_info *thread = (struct thread_info *) entry;
5348 struct lwp_info *lwp = get_thread_lwp (thread);
5349
5350 if (lwp == except)
5351 return 0;
5352
5353 lwp_suspended_decr (lwp);
5354
5355 return proceed_one_lwp (entry, except);
5356 }
5357
5358 /* When we finish a step-over, set threads running again. If there's
5359 another thread that may need a step-over, now's the time to start
5360 it. Eventually, we'll move all threads past their breakpoints. */
5361
5362 static void
5363 proceed_all_lwps (void)
5364 {
5365 struct thread_info *need_step_over;
5366
5367 /* If there is a thread which would otherwise be resumed, which is
5368 stopped at a breakpoint that needs stepping over, then don't
5369 resume any threads - have it step over the breakpoint with all
5370 other threads stopped, then resume all threads again. */
5371
5372 if (supports_breakpoints ())
5373 {
5374 need_step_over
5375 = (struct thread_info *) find_inferior (&all_threads,
5376 need_step_over_p, NULL);
5377
5378 if (need_step_over != NULL)
5379 {
5380 if (debug_threads)
5381 debug_printf ("proceed_all_lwps: found "
5382 "thread %ld needing a step-over\n",
5383 lwpid_of (need_step_over));
5384
5385 start_step_over (get_thread_lwp (need_step_over));
5386 return;
5387 }
5388 }
5389
5390 if (debug_threads)
5391 debug_printf ("Proceeding, no step-over needed\n");
5392
5393 find_inferior (&all_threads, proceed_one_lwp, NULL);
5394 }
5395
5396 /* Stopped LWPs that the client wanted to be running, that don't have
5397 pending statuses, are set to run again, except for EXCEPT, if not
5398 NULL. This undoes a stop_all_lwps call. */
5399
5400 static void
5401 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5402 {
5403 if (debug_threads)
5404 {
5405 debug_enter ();
5406 if (except)
5407 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5408 lwpid_of (get_lwp_thread (except)));
5409 else
5410 debug_printf ("unstopping all lwps\n");
5411 }
5412
5413 if (unsuspend)
5414 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5415 else
5416 find_inferior (&all_threads, proceed_one_lwp, except);
5417
5418 if (debug_threads)
5419 {
5420 debug_printf ("unstop_all_lwps done\n");
5421 debug_exit ();
5422 }
5423 }
5424
5425
5426 #ifdef HAVE_LINUX_REGSETS
5427
5428 #define use_linux_regsets 1
5429
5430 /* Returns true if REGSET has been disabled. */
5431
5432 static int
5433 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5434 {
5435 return (info->disabled_regsets != NULL
5436 && info->disabled_regsets[regset - info->regsets]);
5437 }
5438
5439 /* Disable REGSET. */
5440
5441 static void
5442 disable_regset (struct regsets_info *info, struct regset_info *regset)
5443 {
5444 int dr_offset;
5445
5446 dr_offset = regset - info->regsets;
5447 if (info->disabled_regsets == NULL)
5448 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5449 info->disabled_regsets[dr_offset] = 1;
5450 }
5451
5452 static int
5453 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5454 struct regcache *regcache)
5455 {
5456 struct regset_info *regset;
5457 int saw_general_regs = 0;
5458 int pid;
5459 struct iovec iov;
5460
5461 pid = lwpid_of (current_thread);
5462 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5463 {
5464 void *buf, *data;
5465 int nt_type, res;
5466
5467 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5468 continue;
5469
5470 buf = xmalloc (regset->size);
5471
5472 nt_type = regset->nt_type;
5473 if (nt_type)
5474 {
5475 iov.iov_base = buf;
5476 iov.iov_len = regset->size;
5477 data = (void *) &iov;
5478 }
5479 else
5480 data = buf;
5481
5482 #ifndef __sparc__
5483 res = ptrace (regset->get_request, pid,
5484 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5485 #else
5486 res = ptrace (regset->get_request, pid, data, nt_type);
5487 #endif
5488 if (res < 0)
5489 {
5490 if (errno == EIO)
5491 {
5492 /* If we get EIO on a regset, do not try it again for
5493 this process mode. */
5494 disable_regset (regsets_info, regset);
5495 }
5496 else if (errno == ENODATA)
5497 {
5498 /* ENODATA may be returned if the regset is currently
5499 not "active". This can happen in normal operation,
5500 so suppress the warning in this case. */
5501 }
5502 else if (errno == ESRCH)
5503 {
5504 /* At this point, ESRCH should mean the process is
5505 already gone, in which case we simply ignore attempts
5506 to read its registers. */
5507 }
5508 else
5509 {
5510 char s[256];
5511 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5512 pid);
5513 perror (s);
5514 }
5515 }
5516 else
5517 {
5518 if (regset->type == GENERAL_REGS)
5519 saw_general_regs = 1;
5520 regset->store_function (regcache, buf);
5521 }
5522 free (buf);
5523 }
5524 if (saw_general_regs)
5525 return 0;
5526 else
5527 return 1;
5528 }
5529
5530 static int
5531 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5532 struct regcache *regcache)
5533 {
5534 struct regset_info *regset;
5535 int saw_general_regs = 0;
5536 int pid;
5537 struct iovec iov;
5538
5539 pid = lwpid_of (current_thread);
5540 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5541 {
5542 void *buf, *data;
5543 int nt_type, res;
5544
5545 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5546 || regset->fill_function == NULL)
5547 continue;
5548
5549 buf = xmalloc (regset->size);
5550
5551 /* First fill the buffer with the current register set contents,
5552 in case there are any items in the kernel's regset that are
5553 not in gdbserver's regcache. */
5554
5555 nt_type = regset->nt_type;
5556 if (nt_type)
5557 {
5558 iov.iov_base = buf;
5559 iov.iov_len = regset->size;
5560 data = (void *) &iov;
5561 }
5562 else
5563 data = buf;
5564
5565 #ifndef __sparc__
5566 res = ptrace (regset->get_request, pid,
5567 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5568 #else
5569 res = ptrace (regset->get_request, pid, data, nt_type);
5570 #endif
5571
5572 if (res == 0)
5573 {
5574 /* Then overlay our cached registers on that. */
5575 regset->fill_function (regcache, buf);
5576
5577 /* Only now do we write the register set. */
5578 #ifndef __sparc__
5579 res = ptrace (regset->set_request, pid,
5580 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5581 #else
5582 res = ptrace (regset->set_request, pid, data, nt_type);
5583 #endif
5584 }
5585
5586 if (res < 0)
5587 {
5588 if (errno == EIO)
5589 {
5590 /* If we get EIO on a regset, do not try it again for
5591 this process mode. */
5592 disable_regset (regsets_info, regset);
5593 }
5594 else if (errno == ESRCH)
5595 {
5596 /* At this point, ESRCH should mean the process is
5597 already gone, in which case we simply ignore attempts
5598 to change its registers. See also the related
5599 comment in linux_resume_one_lwp. */
5600 free (buf);
5601 return 0;
5602 }
5603 else
5604 {
5605 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5606 }
5607 }
5608 else if (regset->type == GENERAL_REGS)
5609 saw_general_regs = 1;
5610 free (buf);
5611 }
5612 if (saw_general_regs)
5613 return 0;
5614 else
5615 return 1;
5616 }
5617
5618 #else /* !HAVE_LINUX_REGSETS */
5619
5620 #define use_linux_regsets 0
5621 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5622 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5623
5624 #endif
5625
5626 /* Return 1 if register REGNO is supported by one of the regset ptrace
5627 calls or 0 if it has to be transferred individually. */
5628
5629 static int
5630 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5631 {
5632 unsigned char mask = 1 << (regno % 8);
5633 size_t index = regno / 8;
5634
5635 return (use_linux_regsets
5636 && (regs_info->regset_bitmap == NULL
5637 || (regs_info->regset_bitmap[index] & mask) != 0));
5638 }
5639
5640 #ifdef HAVE_LINUX_USRREGS
5641
5642 static int
5643 register_addr (const struct usrregs_info *usrregs, int regnum)
5644 {
5645 int addr;
5646
5647 if (regnum < 0 || regnum >= usrregs->num_regs)
5648 error ("Invalid register number %d.", regnum);
5649
5650 addr = usrregs->regmap[regnum];
5651
5652 return addr;
5653 }
5654
5655 /* Fetch one register. */
5656 static void
5657 fetch_register (const struct usrregs_info *usrregs,
5658 struct regcache *regcache, int regno)
5659 {
5660 CORE_ADDR regaddr;
5661 int i, size;
5662 char *buf;
5663 int pid;
5664
5665 if (regno >= usrregs->num_regs)
5666 return;
5667 if ((*the_low_target.cannot_fetch_register) (regno))
5668 return;
5669
5670 regaddr = register_addr (usrregs, regno);
5671 if (regaddr == -1)
5672 return;
5673
5674 size = ((register_size (regcache->tdesc, regno)
5675 + sizeof (PTRACE_XFER_TYPE) - 1)
5676 & -sizeof (PTRACE_XFER_TYPE));
5677 buf = (char *) alloca (size);
5678
5679 pid = lwpid_of (current_thread);
5680 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5681 {
5682 errno = 0;
5683 *(PTRACE_XFER_TYPE *) (buf + i) =
5684 ptrace (PTRACE_PEEKUSER, pid,
5685 /* Coerce to a uintptr_t first to avoid potential gcc warning
5686 of coercing an 8 byte integer to a 4 byte pointer. */
5687 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5688 regaddr += sizeof (PTRACE_XFER_TYPE);
5689 if (errno != 0)
5690 error ("reading register %d: %s", regno, strerror (errno));
5691 }
5692
5693 if (the_low_target.supply_ptrace_register)
5694 the_low_target.supply_ptrace_register (regcache, regno, buf);
5695 else
5696 supply_register (regcache, regno, buf);
5697 }
5698
5699 /* Store one register. */
5700 static void
5701 store_register (const struct usrregs_info *usrregs,
5702 struct regcache *regcache, int regno)
5703 {
5704 CORE_ADDR regaddr;
5705 int i, size;
5706 char *buf;
5707 int pid;
5708
5709 if (regno >= usrregs->num_regs)
5710 return;
5711 if ((*the_low_target.cannot_store_register) (regno))
5712 return;
5713
5714 regaddr = register_addr (usrregs, regno);
5715 if (regaddr == -1)
5716 return;
5717
5718 size = ((register_size (regcache->tdesc, regno)
5719 + sizeof (PTRACE_XFER_TYPE) - 1)
5720 & -sizeof (PTRACE_XFER_TYPE));
5721 buf = (char *) alloca (size);
5722 memset (buf, 0, size);
5723
5724 if (the_low_target.collect_ptrace_register)
5725 the_low_target.collect_ptrace_register (regcache, regno, buf);
5726 else
5727 collect_register (regcache, regno, buf);
5728
5729 pid = lwpid_of (current_thread);
5730 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5731 {
5732 errno = 0;
5733 ptrace (PTRACE_POKEUSER, pid,
5734 /* Coerce to a uintptr_t first to avoid potential gcc warning
5735 about coercing an 8 byte integer to a 4 byte pointer. */
5736 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5737 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5738 if (errno != 0)
5739 {
5740 /* At this point, ESRCH should mean the process is
5741 already gone, in which case we simply ignore attempts
5742 to change its registers. See also the related
5743 comment in linux_resume_one_lwp. */
5744 if (errno == ESRCH)
5745 return;
5746
5747 if ((*the_low_target.cannot_store_register) (regno) == 0)
5748 error ("writing register %d: %s", regno, strerror (errno));
5749 }
5750 regaddr += sizeof (PTRACE_XFER_TYPE);
5751 }
5752 }
5753
5754 /* Fetch all registers, or just one, from the child process.
5755 If REGNO is -1, do this for all registers, skipping any that are
5756 assumed to have been retrieved by regsets_fetch_inferior_registers,
5757 unless ALL is non-zero.
5758 Otherwise, REGNO specifies which register (so we can save time). */
5759 static void
5760 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5761 struct regcache *regcache, int regno, int all)
5762 {
5763 struct usrregs_info *usr = regs_info->usrregs;
5764
5765 if (regno == -1)
5766 {
5767 for (regno = 0; regno < usr->num_regs; regno++)
5768 if (all || !linux_register_in_regsets (regs_info, regno))
5769 fetch_register (usr, regcache, regno);
5770 }
5771 else
5772 fetch_register (usr, regcache, regno);
5773 }
5774
5775 /* Store our register values back into the inferior.
5776 If REGNO is -1, do this for all registers, skipping any that are
5777 assumed to have been saved by regsets_store_inferior_registers,
5778 unless ALL is non-zero.
5779 Otherwise, REGNO specifies which register (so we can save time). */
5780 static void
5781 usr_store_inferior_registers (const struct regs_info *regs_info,
5782 struct regcache *regcache, int regno, int all)
5783 {
5784 struct usrregs_info *usr = regs_info->usrregs;
5785
5786 if (regno == -1)
5787 {
5788 for (regno = 0; regno < usr->num_regs; regno++)
5789 if (all || !linux_register_in_regsets (regs_info, regno))
5790 store_register (usr, regcache, regno);
5791 }
5792 else
5793 store_register (usr, regcache, regno);
5794 }
5795
5796 #else /* !HAVE_LINUX_USRREGS */
5797
5798 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5799 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5800
5801 #endif
5802
5803
5804 static void
5805 linux_fetch_registers (struct regcache *regcache, int regno)
5806 {
5807 int use_regsets;
5808 int all = 0;
5809 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5810
5811 if (regno == -1)
5812 {
5813 if (the_low_target.fetch_register != NULL
5814 && regs_info->usrregs != NULL)
5815 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5816 (*the_low_target.fetch_register) (regcache, regno);
5817
5818 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5819 if (regs_info->usrregs != NULL)
5820 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5821 }
5822 else
5823 {
5824 if (the_low_target.fetch_register != NULL
5825 && (*the_low_target.fetch_register) (regcache, regno))
5826 return;
5827
5828 use_regsets = linux_register_in_regsets (regs_info, regno);
5829 if (use_regsets)
5830 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5831 regcache);
5832 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5833 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5834 }
5835 }
5836
5837 static void
5838 linux_store_registers (struct regcache *regcache, int regno)
5839 {
5840 int use_regsets;
5841 int all = 0;
5842 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5843
5844 if (regno == -1)
5845 {
5846 all = regsets_store_inferior_registers (regs_info->regsets_info,
5847 regcache);
5848 if (regs_info->usrregs != NULL)
5849 usr_store_inferior_registers (regs_info, regcache, regno, all);
5850 }
5851 else
5852 {
5853 use_regsets = linux_register_in_regsets (regs_info, regno);
5854 if (use_regsets)
5855 all = regsets_store_inferior_registers (regs_info->regsets_info,
5856 regcache);
5857 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5858 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5859 }
5860 }
5861
5862
5863 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5864 to debugger memory starting at MYADDR. */
5865
5866 static int
5867 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5868 {
5869 int pid = lwpid_of (current_thread);
5870 register PTRACE_XFER_TYPE *buffer;
5871 register CORE_ADDR addr;
5872 register int count;
5873 char filename[64];
5874 register int i;
5875 int ret;
5876 int fd;
5877
5878 /* Try using /proc. Don't bother for one word. */
5879 if (len >= 3 * sizeof (long))
5880 {
5881 int bytes;
5882
5883 /* We could keep this file open and cache it - possibly one per
5884 thread. That requires some juggling, but is even faster. */
5885 sprintf (filename, "/proc/%d/mem", pid);
5886 fd = open (filename, O_RDONLY | O_LARGEFILE);
5887 if (fd == -1)
5888 goto no_proc;
5889
5890 /* If pread64 is available, use it. It's faster if the kernel
5891 supports it (only one syscall), and it's 64-bit safe even on
5892 32-bit platforms (for instance, SPARC debugging a SPARC64
5893 application). */
5894 #ifdef HAVE_PREAD64
5895 bytes = pread64 (fd, myaddr, len, memaddr);
5896 #else
5897 bytes = -1;
5898 if (lseek (fd, memaddr, SEEK_SET) != -1)
5899 bytes = read (fd, myaddr, len);
5900 #endif
5901
5902 close (fd);
5903 if (bytes == len)
5904 return 0;
5905
5906 /* Some data was read, we'll try to get the rest with ptrace. */
5907 if (bytes > 0)
5908 {
5909 memaddr += bytes;
5910 myaddr += bytes;
5911 len -= bytes;
5912 }
5913 }
5914
5915 no_proc:
5916 /* Round starting address down to longword boundary. */
5917 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5918 /* Round ending address up; get number of longwords that makes. */
5919 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5920 / sizeof (PTRACE_XFER_TYPE));
5921 /* Allocate buffer of that many longwords. */
5922 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5923
5924 /* Read all the longwords */
5925 errno = 0;
5926 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5927 {
5928 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5929 about coercing an 8 byte integer to a 4 byte pointer. */
5930 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5931 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5932 (PTRACE_TYPE_ARG4) 0);
5933 if (errno)
5934 break;
5935 }
5936 ret = errno;
5937
5938 /* Copy appropriate bytes out of the buffer. */
5939 if (i > 0)
5940 {
5941 i *= sizeof (PTRACE_XFER_TYPE);
5942 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5943 memcpy (myaddr,
5944 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5945 i < len ? i : len);
5946 }
5947
5948 return ret;
5949 }
5950
5951 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5952 memory at MEMADDR. On failure (cannot write to the inferior)
5953 returns the value of errno. Always succeeds if LEN is zero. */
5954
5955 static int
5956 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5957 {
5958 register int i;
5959 /* Round starting address down to longword boundary. */
5960 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5961 /* Round ending address up; get number of longwords that makes. */
5962 register int count
5963 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5964 / sizeof (PTRACE_XFER_TYPE);
5965
5966 /* Allocate buffer of that many longwords. */
5967 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5968
5969 int pid = lwpid_of (current_thread);
5970
5971 if (len == 0)
5972 {
5973 /* Zero length write always succeeds. */
5974 return 0;
5975 }
5976
5977 if (debug_threads)
5978 {
5979 /* Dump up to four bytes. */
5980 char str[4 * 2 + 1];
5981 char *p = str;
5982 int dump = len < 4 ? len : 4;
5983
5984 for (i = 0; i < dump; i++)
5985 {
5986 sprintf (p, "%02x", myaddr[i]);
5987 p += 2;
5988 }
5989 *p = '\0';
5990
5991 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5992 str, (long) memaddr, pid);
5993 }
5994
5995 /* Fill start and end extra bytes of buffer with existing memory data. */
5996
5997 errno = 0;
5998 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5999 about coercing an 8 byte integer to a 4 byte pointer. */
6000 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
6001 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
6002 (PTRACE_TYPE_ARG4) 0);
6003 if (errno)
6004 return errno;
6005
6006 if (count > 1)
6007 {
6008 errno = 0;
6009 buffer[count - 1]
6010 = ptrace (PTRACE_PEEKTEXT, pid,
6011 /* Coerce to a uintptr_t first to avoid potential gcc warning
6012 about coercing an 8 byte integer to a 4 byte pointer. */
6013 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
6014 * sizeof (PTRACE_XFER_TYPE)),
6015 (PTRACE_TYPE_ARG4) 0);
6016 if (errno)
6017 return errno;
6018 }
6019
6020 /* Copy data to be written over corresponding part of buffer. */
6021
6022 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
6023 myaddr, len);
6024
6025 /* Write the entire buffer. */
6026
6027 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
6028 {
6029 errno = 0;
6030 ptrace (PTRACE_POKETEXT, pid,
6031 /* Coerce to a uintptr_t first to avoid potential gcc warning
6032 about coercing an 8 byte integer to a 4 byte pointer. */
6033 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
6034 (PTRACE_TYPE_ARG4) buffer[i]);
6035 if (errno)
6036 return errno;
6037 }
6038
6039 return 0;
6040 }
6041
6042 static void
6043 linux_look_up_symbols (void)
6044 {
6045 #ifdef USE_THREAD_DB
6046 struct process_info *proc = current_process ();
6047
6048 if (proc->priv->thread_db != NULL)
6049 return;
6050
6051 thread_db_init ();
6052 #endif
6053 }
6054
6055 static void
6056 linux_request_interrupt (void)
6057 {
6058 extern unsigned long signal_pid;
6059
6060 /* Send a SIGINT to the process group. This acts just like the user
6061 typed a ^C on the controlling terminal. */
6062 kill (-signal_pid, SIGINT);
6063 }
6064
6065 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
6066 to debugger memory starting at MYADDR. */
6067
6068 static int
6069 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
6070 {
6071 char filename[PATH_MAX];
6072 int fd, n;
6073 int pid = lwpid_of (current_thread);
6074
6075 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6076
6077 fd = open (filename, O_RDONLY);
6078 if (fd < 0)
6079 return -1;
6080
6081 if (offset != (CORE_ADDR) 0
6082 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6083 n = -1;
6084 else
6085 n = read (fd, myaddr, len);
6086
6087 close (fd);
6088
6089 return n;
6090 }
6091
6092 /* These breakpoint and watchpoint related wrapper functions simply
6093 pass on the function call if the target has registered a
6094 corresponding function. */
6095
6096 static int
6097 linux_supports_z_point_type (char z_type)
6098 {
6099 return (the_low_target.supports_z_point_type != NULL
6100 && the_low_target.supports_z_point_type (z_type));
6101 }
6102
6103 static int
6104 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
6105 int size, struct raw_breakpoint *bp)
6106 {
6107 if (type == raw_bkpt_type_sw)
6108 return insert_memory_breakpoint (bp);
6109 else if (the_low_target.insert_point != NULL)
6110 return the_low_target.insert_point (type, addr, size, bp);
6111 else
6112 /* Unsupported (see target.h). */
6113 return 1;
6114 }
6115
6116 static int
6117 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
6118 int size, struct raw_breakpoint *bp)
6119 {
6120 if (type == raw_bkpt_type_sw)
6121 return remove_memory_breakpoint (bp);
6122 else if (the_low_target.remove_point != NULL)
6123 return the_low_target.remove_point (type, addr, size, bp);
6124 else
6125 /* Unsupported (see target.h). */
6126 return 1;
6127 }
6128
6129 /* Implement the to_stopped_by_sw_breakpoint target_ops
6130 method. */
6131
6132 static int
6133 linux_stopped_by_sw_breakpoint (void)
6134 {
6135 struct lwp_info *lwp = get_thread_lwp (current_thread);
6136
6137 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
6138 }
6139
6140 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
6141 method. */
6142
6143 static int
6144 linux_supports_stopped_by_sw_breakpoint (void)
6145 {
6146 return USE_SIGTRAP_SIGINFO;
6147 }
6148
6149 /* Implement the to_stopped_by_hw_breakpoint target_ops
6150 method. */
6151
6152 static int
6153 linux_stopped_by_hw_breakpoint (void)
6154 {
6155 struct lwp_info *lwp = get_thread_lwp (current_thread);
6156
6157 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6158 }
6159
6160 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6161 method. */
6162
6163 static int
6164 linux_supports_stopped_by_hw_breakpoint (void)
6165 {
6166 return USE_SIGTRAP_SIGINFO;
6167 }
6168
6169 /* Implement the supports_hardware_single_step target_ops method. */
6170
6171 static int
6172 linux_supports_hardware_single_step (void)
6173 {
6174 return can_hardware_single_step ();
6175 }
6176
6177 static int
6178 linux_supports_software_single_step (void)
6179 {
6180 return can_software_single_step ();
6181 }
6182
6183 static int
6184 linux_stopped_by_watchpoint (void)
6185 {
6186 struct lwp_info *lwp = get_thread_lwp (current_thread);
6187
6188 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6189 }
6190
6191 static CORE_ADDR
6192 linux_stopped_data_address (void)
6193 {
6194 struct lwp_info *lwp = get_thread_lwp (current_thread);
6195
6196 return lwp->stopped_data_address;
6197 }
6198
6199 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6200 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6201 && defined(PT_TEXT_END_ADDR)
6202
6203 /* This is only used for targets that define PT_TEXT_ADDR,
6204 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6205 the target has different ways of acquiring this information, like
6206 loadmaps. */
6207
6208 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6209 to tell gdb about. */
6210
6211 static int
6212 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6213 {
6214 unsigned long text, text_end, data;
6215 int pid = lwpid_of (current_thread);
6216
6217 errno = 0;
6218
6219 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6220 (PTRACE_TYPE_ARG4) 0);
6221 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6222 (PTRACE_TYPE_ARG4) 0);
6223 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6224 (PTRACE_TYPE_ARG4) 0);
6225
6226 if (errno == 0)
6227 {
6228 /* Both text and data offsets produced at compile-time (and so
6229 used by gdb) are relative to the beginning of the program,
6230 with the data segment immediately following the text segment.
6231 However, the actual runtime layout in memory may put the data
6232 somewhere else, so when we send gdb a data base-address, we
6233 use the real data base address and subtract the compile-time
6234 data base-address from it (which is just the length of the
6235 text segment). BSS immediately follows data in both
6236 cases. */
6237 *text_p = text;
6238 *data_p = data - (text_end - text);
6239
6240 return 1;
6241 }
6242 return 0;
6243 }
6244 #endif
6245
6246 static int
6247 linux_qxfer_osdata (const char *annex,
6248 unsigned char *readbuf, unsigned const char *writebuf,
6249 CORE_ADDR offset, int len)
6250 {
6251 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6252 }
6253
6254 /* Convert a native/host siginfo object, into/from the siginfo in the
6255 layout of the inferiors' architecture. */
6256
6257 static void
6258 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6259 {
6260 int done = 0;
6261
6262 if (the_low_target.siginfo_fixup != NULL)
6263 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6264
6265 /* If there was no callback, or the callback didn't do anything,
6266 then just do a straight memcpy. */
6267 if (!done)
6268 {
6269 if (direction == 1)
6270 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6271 else
6272 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6273 }
6274 }
6275
6276 static int
6277 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6278 unsigned const char *writebuf, CORE_ADDR offset, int len)
6279 {
6280 int pid;
6281 siginfo_t siginfo;
6282 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6283
6284 if (current_thread == NULL)
6285 return -1;
6286
6287 pid = lwpid_of (current_thread);
6288
6289 if (debug_threads)
6290 debug_printf ("%s siginfo for lwp %d.\n",
6291 readbuf != NULL ? "Reading" : "Writing",
6292 pid);
6293
6294 if (offset >= sizeof (siginfo))
6295 return -1;
6296
6297 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6298 return -1;
6299
6300 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6301 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6302 inferior with a 64-bit GDBSERVER should look the same as debugging it
6303 with a 32-bit GDBSERVER, we need to convert it. */
6304 siginfo_fixup (&siginfo, inf_siginfo, 0);
6305
6306 if (offset + len > sizeof (siginfo))
6307 len = sizeof (siginfo) - offset;
6308
6309 if (readbuf != NULL)
6310 memcpy (readbuf, inf_siginfo + offset, len);
6311 else
6312 {
6313 memcpy (inf_siginfo + offset, writebuf, len);
6314
6315 /* Convert back to ptrace layout before flushing it out. */
6316 siginfo_fixup (&siginfo, inf_siginfo, 1);
6317
6318 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6319 return -1;
6320 }
6321
6322 return len;
6323 }
6324
6325 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6326 so we notice when children change state; as the handler for the
6327 sigsuspend in my_waitpid. */
6328
6329 static void
6330 sigchld_handler (int signo)
6331 {
6332 int old_errno = errno;
6333
6334 if (debug_threads)
6335 {
6336 do
6337 {
6338 /* fprintf is not async-signal-safe, so call write
6339 directly. */
6340 if (write (2, "sigchld_handler\n",
6341 sizeof ("sigchld_handler\n") - 1) < 0)
6342 break; /* just ignore */
6343 } while (0);
6344 }
6345
6346 if (target_is_async_p ())
6347 async_file_mark (); /* trigger a linux_wait */
6348
6349 errno = old_errno;
6350 }
6351
6352 static int
6353 linux_supports_non_stop (void)
6354 {
6355 return 1;
6356 }
6357
6358 static int
6359 linux_async (int enable)
6360 {
6361 int previous = target_is_async_p ();
6362
6363 if (debug_threads)
6364 debug_printf ("linux_async (%d), previous=%d\n",
6365 enable, previous);
6366
6367 if (previous != enable)
6368 {
6369 sigset_t mask;
6370 sigemptyset (&mask);
6371 sigaddset (&mask, SIGCHLD);
6372
6373 sigprocmask (SIG_BLOCK, &mask, NULL);
6374
6375 if (enable)
6376 {
6377 if (pipe (linux_event_pipe) == -1)
6378 {
6379 linux_event_pipe[0] = -1;
6380 linux_event_pipe[1] = -1;
6381 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6382
6383 warning ("creating event pipe failed.");
6384 return previous;
6385 }
6386
6387 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6388 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6389
6390 /* Register the event loop handler. */
6391 add_file_handler (linux_event_pipe[0],
6392 handle_target_event, NULL);
6393
6394 /* Always trigger a linux_wait. */
6395 async_file_mark ();
6396 }
6397 else
6398 {
6399 delete_file_handler (linux_event_pipe[0]);
6400
6401 close (linux_event_pipe[0]);
6402 close (linux_event_pipe[1]);
6403 linux_event_pipe[0] = -1;
6404 linux_event_pipe[1] = -1;
6405 }
6406
6407 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6408 }
6409
6410 return previous;
6411 }
6412
6413 static int
6414 linux_start_non_stop (int nonstop)
6415 {
6416 /* Register or unregister from event-loop accordingly. */
6417 linux_async (nonstop);
6418
6419 if (target_is_async_p () != (nonstop != 0))
6420 return -1;
6421
6422 return 0;
6423 }
6424
6425 static int
6426 linux_supports_multi_process (void)
6427 {
6428 return 1;
6429 }
6430
6431 /* Check if fork events are supported. */
6432
6433 static int
6434 linux_supports_fork_events (void)
6435 {
6436 return linux_supports_tracefork ();
6437 }
6438
6439 /* Check if vfork events are supported. */
6440
6441 static int
6442 linux_supports_vfork_events (void)
6443 {
6444 return linux_supports_tracefork ();
6445 }
6446
6447 /* Check if exec events are supported. */
6448
6449 static int
6450 linux_supports_exec_events (void)
6451 {
6452 return linux_supports_traceexec ();
6453 }
6454
6455 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6456 options for the specified lwp. */
6457
6458 static int
6459 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6460 void *args)
6461 {
6462 struct thread_info *thread = (struct thread_info *) entry;
6463 struct lwp_info *lwp = get_thread_lwp (thread);
6464
6465 if (!lwp->stopped)
6466 {
6467 /* Stop the lwp so we can modify its ptrace options. */
6468 lwp->must_set_ptrace_flags = 1;
6469 linux_stop_lwp (lwp);
6470 }
6471 else
6472 {
6473 /* Already stopped; go ahead and set the ptrace options. */
6474 struct process_info *proc = find_process_pid (pid_of (thread));
6475 int options = linux_low_ptrace_options (proc->attached);
6476
6477 linux_enable_event_reporting (lwpid_of (thread), options);
6478 lwp->must_set_ptrace_flags = 0;
6479 }
6480
6481 return 0;
6482 }
6483
6484 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6485 ptrace flags for all inferiors. This is in case the new GDB connection
6486 doesn't support the same set of events that the previous one did. */
6487
6488 static void
6489 linux_handle_new_gdb_connection (void)
6490 {
6491 pid_t pid;
6492
6493 /* Request that all the lwps reset their ptrace options. */
6494 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6495 }
6496
6497 static int
6498 linux_supports_disable_randomization (void)
6499 {
6500 #ifdef HAVE_PERSONALITY
6501 return 1;
6502 #else
6503 return 0;
6504 #endif
6505 }
6506
6507 static int
6508 linux_supports_agent (void)
6509 {
6510 return 1;
6511 }
6512
6513 static int
6514 linux_supports_range_stepping (void)
6515 {
6516 if (can_software_single_step ())
6517 return 1;
6518 if (*the_low_target.supports_range_stepping == NULL)
6519 return 0;
6520
6521 return (*the_low_target.supports_range_stepping) ();
6522 }
6523
6524 /* Enumerate spufs IDs for process PID. */
6525 static int
6526 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6527 {
6528 int pos = 0;
6529 int written = 0;
6530 char path[128];
6531 DIR *dir;
6532 struct dirent *entry;
6533
6534 sprintf (path, "/proc/%ld/fd", pid);
6535 dir = opendir (path);
6536 if (!dir)
6537 return -1;
6538
6539 rewinddir (dir);
6540 while ((entry = readdir (dir)) != NULL)
6541 {
6542 struct stat st;
6543 struct statfs stfs;
6544 int fd;
6545
6546 fd = atoi (entry->d_name);
6547 if (!fd)
6548 continue;
6549
6550 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6551 if (stat (path, &st) != 0)
6552 continue;
6553 if (!S_ISDIR (st.st_mode))
6554 continue;
6555
6556 if (statfs (path, &stfs) != 0)
6557 continue;
6558 if (stfs.f_type != SPUFS_MAGIC)
6559 continue;
6560
6561 if (pos >= offset && pos + 4 <= offset + len)
6562 {
6563 *(unsigned int *)(buf + pos - offset) = fd;
6564 written += 4;
6565 }
6566 pos += 4;
6567 }
6568
6569 closedir (dir);
6570 return written;
6571 }
6572
6573 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6574 object type, using the /proc file system. */
6575 static int
6576 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6577 unsigned const char *writebuf,
6578 CORE_ADDR offset, int len)
6579 {
6580 long pid = lwpid_of (current_thread);
6581 char buf[128];
6582 int fd = 0;
6583 int ret = 0;
6584
6585 if (!writebuf && !readbuf)
6586 return -1;
6587
6588 if (!*annex)
6589 {
6590 if (!readbuf)
6591 return -1;
6592 else
6593 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6594 }
6595
6596 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6597 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6598 if (fd <= 0)
6599 return -1;
6600
6601 if (offset != 0
6602 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6603 {
6604 close (fd);
6605 return 0;
6606 }
6607
6608 if (writebuf)
6609 ret = write (fd, writebuf, (size_t) len);
6610 else
6611 ret = read (fd, readbuf, (size_t) len);
6612
6613 close (fd);
6614 return ret;
6615 }
6616
6617 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6618 struct target_loadseg
6619 {
6620 /* Core address to which the segment is mapped. */
6621 Elf32_Addr addr;
6622 /* VMA recorded in the program header. */
6623 Elf32_Addr p_vaddr;
6624 /* Size of this segment in memory. */
6625 Elf32_Word p_memsz;
6626 };
6627
6628 # if defined PT_GETDSBT
6629 struct target_loadmap
6630 {
6631 /* Protocol version number, must be zero. */
6632 Elf32_Word version;
6633 /* Pointer to the DSBT table, its size, and the DSBT index. */
6634 unsigned *dsbt_table;
6635 unsigned dsbt_size, dsbt_index;
6636 /* Number of segments in this map. */
6637 Elf32_Word nsegs;
6638 /* The actual memory map. */
6639 struct target_loadseg segs[/*nsegs*/];
6640 };
6641 # define LINUX_LOADMAP PT_GETDSBT
6642 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6643 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6644 # else
6645 struct target_loadmap
6646 {
6647 /* Protocol version number, must be zero. */
6648 Elf32_Half version;
6649 /* Number of segments in this map. */
6650 Elf32_Half nsegs;
6651 /* The actual memory map. */
6652 struct target_loadseg segs[/*nsegs*/];
6653 };
6654 # define LINUX_LOADMAP PTRACE_GETFDPIC
6655 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6656 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6657 # endif
6658
6659 static int
6660 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6661 unsigned char *myaddr, unsigned int len)
6662 {
6663 int pid = lwpid_of (current_thread);
6664 int addr = -1;
6665 struct target_loadmap *data = NULL;
6666 unsigned int actual_length, copy_length;
6667
6668 if (strcmp (annex, "exec") == 0)
6669 addr = (int) LINUX_LOADMAP_EXEC;
6670 else if (strcmp (annex, "interp") == 0)
6671 addr = (int) LINUX_LOADMAP_INTERP;
6672 else
6673 return -1;
6674
6675 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6676 return -1;
6677
6678 if (data == NULL)
6679 return -1;
6680
6681 actual_length = sizeof (struct target_loadmap)
6682 + sizeof (struct target_loadseg) * data->nsegs;
6683
6684 if (offset < 0 || offset > actual_length)
6685 return -1;
6686
6687 copy_length = actual_length - offset < len ? actual_length - offset : len;
6688 memcpy (myaddr, (char *) data + offset, copy_length);
6689 return copy_length;
6690 }
6691 #else
6692 # define linux_read_loadmap NULL
6693 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6694
6695 static void
6696 linux_process_qsupported (char **features, int count)
6697 {
6698 if (the_low_target.process_qsupported != NULL)
6699 the_low_target.process_qsupported (features, count);
6700 }
6701
6702 static int
6703 linux_supports_catch_syscall (void)
6704 {
6705 return (the_low_target.get_syscall_trapinfo != NULL
6706 && linux_supports_tracesysgood ());
6707 }
6708
6709 static int
6710 linux_get_ipa_tdesc_idx (void)
6711 {
6712 if (the_low_target.get_ipa_tdesc_idx == NULL)
6713 return 0;
6714
6715 return (*the_low_target.get_ipa_tdesc_idx) ();
6716 }
6717
6718 static int
6719 linux_supports_tracepoints (void)
6720 {
6721 if (*the_low_target.supports_tracepoints == NULL)
6722 return 0;
6723
6724 return (*the_low_target.supports_tracepoints) ();
6725 }
6726
6727 static CORE_ADDR
6728 linux_read_pc (struct regcache *regcache)
6729 {
6730 if (the_low_target.get_pc == NULL)
6731 return 0;
6732
6733 return (*the_low_target.get_pc) (regcache);
6734 }
6735
6736 static void
6737 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6738 {
6739 gdb_assert (the_low_target.set_pc != NULL);
6740
6741 (*the_low_target.set_pc) (regcache, pc);
6742 }
6743
6744 static int
6745 linux_thread_stopped (struct thread_info *thread)
6746 {
6747 return get_thread_lwp (thread)->stopped;
6748 }
6749
6750 /* This exposes stop-all-threads functionality to other modules. */
6751
6752 static void
6753 linux_pause_all (int freeze)
6754 {
6755 stop_all_lwps (freeze, NULL);
6756 }
6757
6758 /* This exposes unstop-all-threads functionality to other gdbserver
6759 modules. */
6760
6761 static void
6762 linux_unpause_all (int unfreeze)
6763 {
6764 unstop_all_lwps (unfreeze, NULL);
6765 }
6766
6767 static int
6768 linux_prepare_to_access_memory (void)
6769 {
6770 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6771 running LWP. */
6772 if (non_stop)
6773 linux_pause_all (1);
6774 return 0;
6775 }
6776
6777 static void
6778 linux_done_accessing_memory (void)
6779 {
6780 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6781 running LWP. */
6782 if (non_stop)
6783 linux_unpause_all (1);
6784 }
6785
6786 static int
6787 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6788 CORE_ADDR collector,
6789 CORE_ADDR lockaddr,
6790 ULONGEST orig_size,
6791 CORE_ADDR *jump_entry,
6792 CORE_ADDR *trampoline,
6793 ULONGEST *trampoline_size,
6794 unsigned char *jjump_pad_insn,
6795 ULONGEST *jjump_pad_insn_size,
6796 CORE_ADDR *adjusted_insn_addr,
6797 CORE_ADDR *adjusted_insn_addr_end,
6798 char *err)
6799 {
6800 return (*the_low_target.install_fast_tracepoint_jump_pad)
6801 (tpoint, tpaddr, collector, lockaddr, orig_size,
6802 jump_entry, trampoline, trampoline_size,
6803 jjump_pad_insn, jjump_pad_insn_size,
6804 adjusted_insn_addr, adjusted_insn_addr_end,
6805 err);
6806 }
6807
6808 static struct emit_ops *
6809 linux_emit_ops (void)
6810 {
6811 if (the_low_target.emit_ops != NULL)
6812 return (*the_low_target.emit_ops) ();
6813 else
6814 return NULL;
6815 }
6816
6817 static int
6818 linux_get_min_fast_tracepoint_insn_len (void)
6819 {
6820 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6821 }
6822
6823 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6824
6825 static int
6826 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6827 CORE_ADDR *phdr_memaddr, int *num_phdr)
6828 {
6829 char filename[PATH_MAX];
6830 int fd;
6831 const int auxv_size = is_elf64
6832 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6833 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6834
6835 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6836
6837 fd = open (filename, O_RDONLY);
6838 if (fd < 0)
6839 return 1;
6840
6841 *phdr_memaddr = 0;
6842 *num_phdr = 0;
6843 while (read (fd, buf, auxv_size) == auxv_size
6844 && (*phdr_memaddr == 0 || *num_phdr == 0))
6845 {
6846 if (is_elf64)
6847 {
6848 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6849
6850 switch (aux->a_type)
6851 {
6852 case AT_PHDR:
6853 *phdr_memaddr = aux->a_un.a_val;
6854 break;
6855 case AT_PHNUM:
6856 *num_phdr = aux->a_un.a_val;
6857 break;
6858 }
6859 }
6860 else
6861 {
6862 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6863
6864 switch (aux->a_type)
6865 {
6866 case AT_PHDR:
6867 *phdr_memaddr = aux->a_un.a_val;
6868 break;
6869 case AT_PHNUM:
6870 *num_phdr = aux->a_un.a_val;
6871 break;
6872 }
6873 }
6874 }
6875
6876 close (fd);
6877
6878 if (*phdr_memaddr == 0 || *num_phdr == 0)
6879 {
6880 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6881 "phdr_memaddr = %ld, phdr_num = %d",
6882 (long) *phdr_memaddr, *num_phdr);
6883 return 2;
6884 }
6885
6886 return 0;
6887 }
6888
6889 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6890
6891 static CORE_ADDR
6892 get_dynamic (const int pid, const int is_elf64)
6893 {
6894 CORE_ADDR phdr_memaddr, relocation;
6895 int num_phdr, i;
6896 unsigned char *phdr_buf;
6897 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6898
6899 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6900 return 0;
6901
6902 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6903 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6904
6905 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6906 return 0;
6907
6908 /* Compute relocation: it is expected to be 0 for "regular" executables,
6909 non-zero for PIE ones. */
6910 relocation = -1;
6911 for (i = 0; relocation == -1 && i < num_phdr; i++)
6912 if (is_elf64)
6913 {
6914 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6915
6916 if (p->p_type == PT_PHDR)
6917 relocation = phdr_memaddr - p->p_vaddr;
6918 }
6919 else
6920 {
6921 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6922
6923 if (p->p_type == PT_PHDR)
6924 relocation = phdr_memaddr - p->p_vaddr;
6925 }
6926
6927 if (relocation == -1)
6928 {
6929 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6930 any real world executables, including PIE executables, have always
6931 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6932 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6933 or present DT_DEBUG anyway (fpc binaries are statically linked).
6934
6935 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6936
6937 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6938
6939 return 0;
6940 }
6941
6942 for (i = 0; i < num_phdr; i++)
6943 {
6944 if (is_elf64)
6945 {
6946 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6947
6948 if (p->p_type == PT_DYNAMIC)
6949 return p->p_vaddr + relocation;
6950 }
6951 else
6952 {
6953 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6954
6955 if (p->p_type == PT_DYNAMIC)
6956 return p->p_vaddr + relocation;
6957 }
6958 }
6959
6960 return 0;
6961 }
6962
6963 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6964 can be 0 if the inferior does not yet have the library list initialized.
6965 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6966 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6967
6968 static CORE_ADDR
6969 get_r_debug (const int pid, const int is_elf64)
6970 {
6971 CORE_ADDR dynamic_memaddr;
6972 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6973 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6974 CORE_ADDR map = -1;
6975
6976 dynamic_memaddr = get_dynamic (pid, is_elf64);
6977 if (dynamic_memaddr == 0)
6978 return map;
6979
6980 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6981 {
6982 if (is_elf64)
6983 {
6984 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6985 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6986 union
6987 {
6988 Elf64_Xword map;
6989 unsigned char buf[sizeof (Elf64_Xword)];
6990 }
6991 rld_map;
6992 #endif
6993 #ifdef DT_MIPS_RLD_MAP
6994 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6995 {
6996 if (linux_read_memory (dyn->d_un.d_val,
6997 rld_map.buf, sizeof (rld_map.buf)) == 0)
6998 return rld_map.map;
6999 else
7000 break;
7001 }
7002 #endif /* DT_MIPS_RLD_MAP */
7003 #ifdef DT_MIPS_RLD_MAP_REL
7004 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
7005 {
7006 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
7007 rld_map.buf, sizeof (rld_map.buf)) == 0)
7008 return rld_map.map;
7009 else
7010 break;
7011 }
7012 #endif /* DT_MIPS_RLD_MAP_REL */
7013
7014 if (dyn->d_tag == DT_DEBUG && map == -1)
7015 map = dyn->d_un.d_val;
7016
7017 if (dyn->d_tag == DT_NULL)
7018 break;
7019 }
7020 else
7021 {
7022 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
7023 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
7024 union
7025 {
7026 Elf32_Word map;
7027 unsigned char buf[sizeof (Elf32_Word)];
7028 }
7029 rld_map;
7030 #endif
7031 #ifdef DT_MIPS_RLD_MAP
7032 if (dyn->d_tag == DT_MIPS_RLD_MAP)
7033 {
7034 if (linux_read_memory (dyn->d_un.d_val,
7035 rld_map.buf, sizeof (rld_map.buf)) == 0)
7036 return rld_map.map;
7037 else
7038 break;
7039 }
7040 #endif /* DT_MIPS_RLD_MAP */
7041 #ifdef DT_MIPS_RLD_MAP_REL
7042 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
7043 {
7044 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
7045 rld_map.buf, sizeof (rld_map.buf)) == 0)
7046 return rld_map.map;
7047 else
7048 break;
7049 }
7050 #endif /* DT_MIPS_RLD_MAP_REL */
7051
7052 if (dyn->d_tag == DT_DEBUG && map == -1)
7053 map = dyn->d_un.d_val;
7054
7055 if (dyn->d_tag == DT_NULL)
7056 break;
7057 }
7058
7059 dynamic_memaddr += dyn_size;
7060 }
7061
7062 return map;
7063 }
7064
7065 /* Read one pointer from MEMADDR in the inferior. */
7066
7067 static int
7068 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
7069 {
7070 int ret;
7071
7072 /* Go through a union so this works on either big or little endian
7073 hosts, when the inferior's pointer size is smaller than the size
7074 of CORE_ADDR. It is assumed the inferior's endianness is the
7075 same of the superior's. */
7076 union
7077 {
7078 CORE_ADDR core_addr;
7079 unsigned int ui;
7080 unsigned char uc;
7081 } addr;
7082
7083 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
7084 if (ret == 0)
7085 {
7086 if (ptr_size == sizeof (CORE_ADDR))
7087 *ptr = addr.core_addr;
7088 else if (ptr_size == sizeof (unsigned int))
7089 *ptr = addr.ui;
7090 else
7091 gdb_assert_not_reached ("unhandled pointer size");
7092 }
7093 return ret;
7094 }
7095
7096 struct link_map_offsets
7097 {
7098 /* Offset and size of r_debug.r_version. */
7099 int r_version_offset;
7100
7101 /* Offset and size of r_debug.r_map. */
7102 int r_map_offset;
7103
7104 /* Offset to l_addr field in struct link_map. */
7105 int l_addr_offset;
7106
7107 /* Offset to l_name field in struct link_map. */
7108 int l_name_offset;
7109
7110 /* Offset to l_ld field in struct link_map. */
7111 int l_ld_offset;
7112
7113 /* Offset to l_next field in struct link_map. */
7114 int l_next_offset;
7115
7116 /* Offset to l_prev field in struct link_map. */
7117 int l_prev_offset;
7118 };
7119
7120 /* Construct qXfer:libraries-svr4:read reply. */
7121
7122 static int
7123 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
7124 unsigned const char *writebuf,
7125 CORE_ADDR offset, int len)
7126 {
7127 char *document;
7128 unsigned document_len;
7129 struct process_info_private *const priv = current_process ()->priv;
7130 char filename[PATH_MAX];
7131 int pid, is_elf64;
7132
7133 static const struct link_map_offsets lmo_32bit_offsets =
7134 {
7135 0, /* r_version offset. */
7136 4, /* r_debug.r_map offset. */
7137 0, /* l_addr offset in link_map. */
7138 4, /* l_name offset in link_map. */
7139 8, /* l_ld offset in link_map. */
7140 12, /* l_next offset in link_map. */
7141 16 /* l_prev offset in link_map. */
7142 };
7143
7144 static const struct link_map_offsets lmo_64bit_offsets =
7145 {
7146 0, /* r_version offset. */
7147 8, /* r_debug.r_map offset. */
7148 0, /* l_addr offset in link_map. */
7149 8, /* l_name offset in link_map. */
7150 16, /* l_ld offset in link_map. */
7151 24, /* l_next offset in link_map. */
7152 32 /* l_prev offset in link_map. */
7153 };
7154 const struct link_map_offsets *lmo;
7155 unsigned int machine;
7156 int ptr_size;
7157 CORE_ADDR lm_addr = 0, lm_prev = 0;
7158 int allocated = 1024;
7159 char *p;
7160 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
7161 int header_done = 0;
7162
7163 if (writebuf != NULL)
7164 return -2;
7165 if (readbuf == NULL)
7166 return -1;
7167
7168 pid = lwpid_of (current_thread);
7169 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7170 is_elf64 = elf_64_file_p (filename, &machine);
7171 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7172 ptr_size = is_elf64 ? 8 : 4;
7173
7174 while (annex[0] != '\0')
7175 {
7176 const char *sep;
7177 CORE_ADDR *addrp;
7178 int len;
7179
7180 sep = strchr (annex, '=');
7181 if (sep == NULL)
7182 break;
7183
7184 len = sep - annex;
7185 if (len == 5 && startswith (annex, "start"))
7186 addrp = &lm_addr;
7187 else if (len == 4 && startswith (annex, "prev"))
7188 addrp = &lm_prev;
7189 else
7190 {
7191 annex = strchr (sep, ';');
7192 if (annex == NULL)
7193 break;
7194 annex++;
7195 continue;
7196 }
7197
7198 annex = decode_address_to_semicolon (addrp, sep + 1);
7199 }
7200
7201 if (lm_addr == 0)
7202 {
7203 int r_version = 0;
7204
7205 if (priv->r_debug == 0)
7206 priv->r_debug = get_r_debug (pid, is_elf64);
7207
7208 /* We failed to find DT_DEBUG. Such situation will not change
7209 for this inferior - do not retry it. Report it to GDB as
7210 E01, see for the reasons at the GDB solib-svr4.c side. */
7211 if (priv->r_debug == (CORE_ADDR) -1)
7212 return -1;
7213
7214 if (priv->r_debug != 0)
7215 {
7216 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7217 (unsigned char *) &r_version,
7218 sizeof (r_version)) != 0
7219 || r_version != 1)
7220 {
7221 warning ("unexpected r_debug version %d", r_version);
7222 }
7223 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7224 &lm_addr, ptr_size) != 0)
7225 {
7226 warning ("unable to read r_map from 0x%lx",
7227 (long) priv->r_debug + lmo->r_map_offset);
7228 }
7229 }
7230 }
7231
7232 document = (char *) xmalloc (allocated);
7233 strcpy (document, "<library-list-svr4 version=\"1.0\"");
7234 p = document + strlen (document);
7235
7236 while (lm_addr
7237 && read_one_ptr (lm_addr + lmo->l_name_offset,
7238 &l_name, ptr_size) == 0
7239 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7240 &l_addr, ptr_size) == 0
7241 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7242 &l_ld, ptr_size) == 0
7243 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7244 &l_prev, ptr_size) == 0
7245 && read_one_ptr (lm_addr + lmo->l_next_offset,
7246 &l_next, ptr_size) == 0)
7247 {
7248 unsigned char libname[PATH_MAX];
7249
7250 if (lm_prev != l_prev)
7251 {
7252 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7253 (long) lm_prev, (long) l_prev);
7254 break;
7255 }
7256
7257 /* Ignore the first entry even if it has valid name as the first entry
7258 corresponds to the main executable. The first entry should not be
7259 skipped if the dynamic loader was loaded late by a static executable
7260 (see solib-svr4.c parameter ignore_first). But in such case the main
7261 executable does not have PT_DYNAMIC present and this function already
7262 exited above due to failed get_r_debug. */
7263 if (lm_prev == 0)
7264 {
7265 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7266 p = p + strlen (p);
7267 }
7268 else
7269 {
7270 /* Not checking for error because reading may stop before
7271 we've got PATH_MAX worth of characters. */
7272 libname[0] = '\0';
7273 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7274 libname[sizeof (libname) - 1] = '\0';
7275 if (libname[0] != '\0')
7276 {
7277 /* 6x the size for xml_escape_text below. */
7278 size_t len = 6 * strlen ((char *) libname);
7279 char *name;
7280
7281 if (!header_done)
7282 {
7283 /* Terminate `<library-list-svr4'. */
7284 *p++ = '>';
7285 header_done = 1;
7286 }
7287
7288 while (allocated < p - document + len + 200)
7289 {
7290 /* Expand to guarantee sufficient storage. */
7291 uintptr_t document_len = p - document;
7292
7293 document = (char *) xrealloc (document, 2 * allocated);
7294 allocated *= 2;
7295 p = document + document_len;
7296 }
7297
7298 name = xml_escape_text ((char *) libname);
7299 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
7300 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7301 name, (unsigned long) lm_addr,
7302 (unsigned long) l_addr, (unsigned long) l_ld);
7303 free (name);
7304 }
7305 }
7306
7307 lm_prev = lm_addr;
7308 lm_addr = l_next;
7309 }
7310
7311 if (!header_done)
7312 {
7313 /* Empty list; terminate `<library-list-svr4'. */
7314 strcpy (p, "/>");
7315 }
7316 else
7317 strcpy (p, "</library-list-svr4>");
7318
7319 document_len = strlen (document);
7320 if (offset < document_len)
7321 document_len -= offset;
7322 else
7323 document_len = 0;
7324 if (len > document_len)
7325 len = document_len;
7326
7327 memcpy (readbuf, document + offset, len);
7328 xfree (document);
7329
7330 return len;
7331 }
7332
7333 #ifdef HAVE_LINUX_BTRACE
7334
7335 /* See to_disable_btrace target method. */
7336
7337 static int
7338 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7339 {
7340 enum btrace_error err;
7341
7342 err = linux_disable_btrace (tinfo);
7343 return (err == BTRACE_ERR_NONE ? 0 : -1);
7344 }
7345
7346 /* Encode an Intel Processor Trace configuration. */
7347
7348 static void
7349 linux_low_encode_pt_config (struct buffer *buffer,
7350 const struct btrace_data_pt_config *config)
7351 {
7352 buffer_grow_str (buffer, "<pt-config>\n");
7353
7354 switch (config->cpu.vendor)
7355 {
7356 case CV_INTEL:
7357 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7358 "model=\"%u\" stepping=\"%u\"/>\n",
7359 config->cpu.family, config->cpu.model,
7360 config->cpu.stepping);
7361 break;
7362
7363 default:
7364 break;
7365 }
7366
7367 buffer_grow_str (buffer, "</pt-config>\n");
7368 }
7369
7370 /* Encode a raw buffer. */
7371
7372 static void
7373 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7374 unsigned int size)
7375 {
7376 if (size == 0)
7377 return;
7378
7379 /* We use hex encoding - see common/rsp-low.h. */
7380 buffer_grow_str (buffer, "<raw>\n");
7381
7382 while (size-- > 0)
7383 {
7384 char elem[2];
7385
7386 elem[0] = tohex ((*data >> 4) & 0xf);
7387 elem[1] = tohex (*data++ & 0xf);
7388
7389 buffer_grow (buffer, elem, 2);
7390 }
7391
7392 buffer_grow_str (buffer, "</raw>\n");
7393 }
7394
7395 /* See to_read_btrace target method. */
7396
7397 static int
7398 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7399 enum btrace_read_type type)
7400 {
7401 struct btrace_data btrace;
7402 struct btrace_block *block;
7403 enum btrace_error err;
7404 int i;
7405
7406 btrace_data_init (&btrace);
7407
7408 err = linux_read_btrace (&btrace, tinfo, type);
7409 if (err != BTRACE_ERR_NONE)
7410 {
7411 if (err == BTRACE_ERR_OVERFLOW)
7412 buffer_grow_str0 (buffer, "E.Overflow.");
7413 else
7414 buffer_grow_str0 (buffer, "E.Generic Error.");
7415
7416 goto err;
7417 }
7418
7419 switch (btrace.format)
7420 {
7421 case BTRACE_FORMAT_NONE:
7422 buffer_grow_str0 (buffer, "E.No Trace.");
7423 goto err;
7424
7425 case BTRACE_FORMAT_BTS:
7426 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7427 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7428
7429 for (i = 0;
7430 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7431 i++)
7432 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7433 paddress (block->begin), paddress (block->end));
7434
7435 buffer_grow_str0 (buffer, "</btrace>\n");
7436 break;
7437
7438 case BTRACE_FORMAT_PT:
7439 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7440 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7441 buffer_grow_str (buffer, "<pt>\n");
7442
7443 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7444
7445 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7446 btrace.variant.pt.size);
7447
7448 buffer_grow_str (buffer, "</pt>\n");
7449 buffer_grow_str0 (buffer, "</btrace>\n");
7450 break;
7451
7452 default:
7453 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7454 goto err;
7455 }
7456
7457 btrace_data_fini (&btrace);
7458 return 0;
7459
7460 err:
7461 btrace_data_fini (&btrace);
7462 return -1;
7463 }
7464
7465 /* See to_btrace_conf target method. */
7466
7467 static int
7468 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7469 struct buffer *buffer)
7470 {
7471 const struct btrace_config *conf;
7472
7473 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7474 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7475
7476 conf = linux_btrace_conf (tinfo);
7477 if (conf != NULL)
7478 {
7479 switch (conf->format)
7480 {
7481 case BTRACE_FORMAT_NONE:
7482 break;
7483
7484 case BTRACE_FORMAT_BTS:
7485 buffer_xml_printf (buffer, "<bts");
7486 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7487 buffer_xml_printf (buffer, " />\n");
7488 break;
7489
7490 case BTRACE_FORMAT_PT:
7491 buffer_xml_printf (buffer, "<pt");
7492 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7493 buffer_xml_printf (buffer, "/>\n");
7494 break;
7495 }
7496 }
7497
7498 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7499 return 0;
7500 }
7501 #endif /* HAVE_LINUX_BTRACE */
7502
7503 /* See nat/linux-nat.h. */
7504
7505 ptid_t
7506 current_lwp_ptid (void)
7507 {
7508 return ptid_of (current_thread);
7509 }
7510
7511 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7512
7513 static int
7514 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7515 {
7516 if (the_low_target.breakpoint_kind_from_pc != NULL)
7517 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7518 else
7519 return default_breakpoint_kind_from_pc (pcptr);
7520 }
7521
7522 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7523
7524 static const gdb_byte *
7525 linux_sw_breakpoint_from_kind (int kind, int *size)
7526 {
7527 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7528
7529 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7530 }
7531
7532 /* Implementation of the target_ops method
7533 "breakpoint_kind_from_current_state". */
7534
7535 static int
7536 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7537 {
7538 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7539 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7540 else
7541 return linux_breakpoint_kind_from_pc (pcptr);
7542 }
7543
7544 /* Default implementation of linux_target_ops method "set_pc" for
7545 32-bit pc register which is literally named "pc". */
7546
7547 void
7548 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7549 {
7550 uint32_t newpc = pc;
7551
7552 supply_register_by_name (regcache, "pc", &newpc);
7553 }
7554
7555 /* Default implementation of linux_target_ops method "get_pc" for
7556 32-bit pc register which is literally named "pc". */
7557
7558 CORE_ADDR
7559 linux_get_pc_32bit (struct regcache *regcache)
7560 {
7561 uint32_t pc;
7562
7563 collect_register_by_name (regcache, "pc", &pc);
7564 if (debug_threads)
7565 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7566 return pc;
7567 }
7568
7569 /* Default implementation of linux_target_ops method "set_pc" for
7570 64-bit pc register which is literally named "pc". */
7571
7572 void
7573 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7574 {
7575 uint64_t newpc = pc;
7576
7577 supply_register_by_name (regcache, "pc", &newpc);
7578 }
7579
7580 /* Default implementation of linux_target_ops method "get_pc" for
7581 64-bit pc register which is literally named "pc". */
7582
7583 CORE_ADDR
7584 linux_get_pc_64bit (struct regcache *regcache)
7585 {
7586 uint64_t pc;
7587
7588 collect_register_by_name (regcache, "pc", &pc);
7589 if (debug_threads)
7590 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7591 return pc;
7592 }
7593
7594
7595 static struct target_ops linux_target_ops = {
7596 linux_create_inferior,
7597 linux_post_create_inferior,
7598 linux_attach,
7599 linux_kill,
7600 linux_detach,
7601 linux_mourn,
7602 linux_join,
7603 linux_thread_alive,
7604 linux_resume,
7605 linux_wait,
7606 linux_fetch_registers,
7607 linux_store_registers,
7608 linux_prepare_to_access_memory,
7609 linux_done_accessing_memory,
7610 linux_read_memory,
7611 linux_write_memory,
7612 linux_look_up_symbols,
7613 linux_request_interrupt,
7614 linux_read_auxv,
7615 linux_supports_z_point_type,
7616 linux_insert_point,
7617 linux_remove_point,
7618 linux_stopped_by_sw_breakpoint,
7619 linux_supports_stopped_by_sw_breakpoint,
7620 linux_stopped_by_hw_breakpoint,
7621 linux_supports_stopped_by_hw_breakpoint,
7622 linux_supports_hardware_single_step,
7623 linux_stopped_by_watchpoint,
7624 linux_stopped_data_address,
7625 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7626 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7627 && defined(PT_TEXT_END_ADDR)
7628 linux_read_offsets,
7629 #else
7630 NULL,
7631 #endif
7632 #ifdef USE_THREAD_DB
7633 thread_db_get_tls_address,
7634 #else
7635 NULL,
7636 #endif
7637 linux_qxfer_spu,
7638 hostio_last_error_from_errno,
7639 linux_qxfer_osdata,
7640 linux_xfer_siginfo,
7641 linux_supports_non_stop,
7642 linux_async,
7643 linux_start_non_stop,
7644 linux_supports_multi_process,
7645 linux_supports_fork_events,
7646 linux_supports_vfork_events,
7647 linux_supports_exec_events,
7648 linux_handle_new_gdb_connection,
7649 #ifdef USE_THREAD_DB
7650 thread_db_handle_monitor_command,
7651 #else
7652 NULL,
7653 #endif
7654 linux_common_core_of_thread,
7655 linux_read_loadmap,
7656 linux_process_qsupported,
7657 linux_supports_tracepoints,
7658 linux_read_pc,
7659 linux_write_pc,
7660 linux_thread_stopped,
7661 NULL,
7662 linux_pause_all,
7663 linux_unpause_all,
7664 linux_stabilize_threads,
7665 linux_install_fast_tracepoint_jump_pad,
7666 linux_emit_ops,
7667 linux_supports_disable_randomization,
7668 linux_get_min_fast_tracepoint_insn_len,
7669 linux_qxfer_libraries_svr4,
7670 linux_supports_agent,
7671 #ifdef HAVE_LINUX_BTRACE
7672 linux_supports_btrace,
7673 linux_enable_btrace,
7674 linux_low_disable_btrace,
7675 linux_low_read_btrace,
7676 linux_low_btrace_conf,
7677 #else
7678 NULL,
7679 NULL,
7680 NULL,
7681 NULL,
7682 NULL,
7683 #endif
7684 linux_supports_range_stepping,
7685 linux_proc_pid_to_exec_file,
7686 linux_mntns_open_cloexec,
7687 linux_mntns_unlink,
7688 linux_mntns_readlink,
7689 linux_breakpoint_kind_from_pc,
7690 linux_sw_breakpoint_from_kind,
7691 linux_proc_tid_get_name,
7692 linux_breakpoint_kind_from_current_state,
7693 linux_supports_software_single_step,
7694 linux_supports_catch_syscall,
7695 linux_get_ipa_tdesc_idx,
7696 };
7697
7698 #ifdef HAVE_LINUX_REGSETS
7699 void
7700 initialize_regsets_info (struct regsets_info *info)
7701 {
7702 for (info->num_regsets = 0;
7703 info->regsets[info->num_regsets].size >= 0;
7704 info->num_regsets++)
7705 ;
7706 }
7707 #endif
7708
7709 void
7710 initialize_low (void)
7711 {
7712 struct sigaction sigchld_action;
7713
7714 memset (&sigchld_action, 0, sizeof (sigchld_action));
7715 set_target_ops (&linux_target_ops);
7716
7717 linux_ptrace_init_warnings ();
7718
7719 sigchld_action.sa_handler = sigchld_handler;
7720 sigemptyset (&sigchld_action.sa_mask);
7721 sigchld_action.sa_flags = SA_RESTART;
7722 sigaction (SIGCHLD, &sigchld_action, NULL);
7723
7724 initialize_low_arch ();
7725
7726 linux_check_ptrace_features ();
7727 }
This page took 0.213646 seconds and 5 git commands to generate.