More assert checks on reinsert breakpoint
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2016 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #ifndef ELFMAG0
51 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55 #include <elf.h>
56 #endif
57 #include "nat/linux-namespaces.h"
58
59 #ifndef SPUFS_MAGIC
60 #define SPUFS_MAGIC 0x23c9b64e
61 #endif
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 /* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77 #if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80 #if defined(__mcoldfire__)
81 /* These are still undefined in 3.10 kernels. */
82 #define PT_TEXT_ADDR 49*4
83 #define PT_DATA_ADDR 50*4
84 #define PT_TEXT_END_ADDR 51*4
85 /* BFIN already defines these since at least 2.6.32 kernels. */
86 #elif defined(BFIN)
87 #define PT_TEXT_ADDR 220
88 #define PT_TEXT_END_ADDR 224
89 #define PT_DATA_ADDR 228
90 /* These are still undefined in 3.10 kernels. */
91 #elif defined(__TMS320C6X__)
92 #define PT_TEXT_ADDR (0x10000*4)
93 #define PT_DATA_ADDR (0x10004*4)
94 #define PT_TEXT_END_ADDR (0x10008*4)
95 #endif
96 #endif
97
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "btrace-common.h"
101 #endif
102
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
105 typedef struct
106 {
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115 } Elf32_auxv_t;
116 #endif
117
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
120 typedef struct
121 {
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130 } Elf64_auxv_t;
131 #endif
132
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset = -1;
135
136 /* LWP accessors. */
137
138 /* See nat/linux-nat.h. */
139
140 ptid_t
141 ptid_of_lwp (struct lwp_info *lwp)
142 {
143 return ptid_of (get_lwp_thread (lwp));
144 }
145
146 /* See nat/linux-nat.h. */
147
148 void
149 lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151 {
152 lwp->arch_private = info;
153 }
154
155 /* See nat/linux-nat.h. */
156
157 struct arch_lwp_info *
158 lwp_arch_private_info (struct lwp_info *lwp)
159 {
160 return lwp->arch_private;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 int
166 lwp_is_stopped (struct lwp_info *lwp)
167 {
168 return lwp->stopped;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info *lwp)
175 {
176 return lwp->stop_reason;
177 }
178
179 /* A list of all unknown processes which receive stop signals. Some
180 other process will presumably claim each of these as forked
181 children momentarily. */
182
183 struct simple_pid_list
184 {
185 /* The process ID. */
186 int pid;
187
188 /* The status as reported by waitpid. */
189 int status;
190
191 /* Next in chain. */
192 struct simple_pid_list *next;
193 };
194 struct simple_pid_list *stopped_pids;
195
196 /* Trivial list manipulation functions to keep track of a list of new
197 stopped processes. */
198
199 static void
200 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
201 {
202 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
203
204 new_pid->pid = pid;
205 new_pid->status = status;
206 new_pid->next = *listp;
207 *listp = new_pid;
208 }
209
210 static int
211 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
212 {
213 struct simple_pid_list **p;
214
215 for (p = listp; *p != NULL; p = &(*p)->next)
216 if ((*p)->pid == pid)
217 {
218 struct simple_pid_list *next = (*p)->next;
219
220 *statusp = (*p)->status;
221 xfree (*p);
222 *p = next;
223 return 1;
224 }
225 return 0;
226 }
227
228 enum stopping_threads_kind
229 {
230 /* Not stopping threads presently. */
231 NOT_STOPPING_THREADS,
232
233 /* Stopping threads. */
234 STOPPING_THREADS,
235
236 /* Stopping and suspending threads. */
237 STOPPING_AND_SUSPENDING_THREADS
238 };
239
240 /* This is set while stop_all_lwps is in effect. */
241 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
242
243 /* FIXME make into a target method? */
244 int using_threads = 1;
245
246 /* True if we're presently stabilizing threads (moving them out of
247 jump pads). */
248 static int stabilizing_threads;
249
250 static void linux_resume_one_lwp (struct lwp_info *lwp,
251 int step, int signal, siginfo_t *info);
252 static void linux_resume (struct thread_resume *resume_info, size_t n);
253 static void stop_all_lwps (int suspend, struct lwp_info *except);
254 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
255 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
256 int *wstat, int options);
257 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
258 static struct lwp_info *add_lwp (ptid_t ptid);
259 static void linux_mourn (struct process_info *process);
260 static int linux_stopped_by_watchpoint (void);
261 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
262 static int lwp_is_marked_dead (struct lwp_info *lwp);
263 static void proceed_all_lwps (void);
264 static int finish_step_over (struct lwp_info *lwp);
265 static int kill_lwp (unsigned long lwpid, int signo);
266 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
267 static void complete_ongoing_step_over (void);
268 static int linux_low_ptrace_options (int attached);
269
270 /* When the event-loop is doing a step-over, this points at the thread
271 being stepped. */
272 ptid_t step_over_bkpt;
273
274 /* True if the low target can hardware single-step. */
275
276 static int
277 can_hardware_single_step (void)
278 {
279 if (the_low_target.supports_hardware_single_step != NULL)
280 return the_low_target.supports_hardware_single_step ();
281 else
282 return 0;
283 }
284
285 /* True if the low target can software single-step. Such targets
286 implement the GET_NEXT_PCS callback. */
287
288 static int
289 can_software_single_step (void)
290 {
291 return (the_low_target.get_next_pcs != NULL);
292 }
293
294 /* True if the low target supports memory breakpoints. If so, we'll
295 have a GET_PC implementation. */
296
297 static int
298 supports_breakpoints (void)
299 {
300 return (the_low_target.get_pc != NULL);
301 }
302
303 /* Returns true if this target can support fast tracepoints. This
304 does not mean that the in-process agent has been loaded in the
305 inferior. */
306
307 static int
308 supports_fast_tracepoints (void)
309 {
310 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
311 }
312
313 /* True if LWP is stopped in its stepping range. */
314
315 static int
316 lwp_in_step_range (struct lwp_info *lwp)
317 {
318 CORE_ADDR pc = lwp->stop_pc;
319
320 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
321 }
322
323 struct pending_signals
324 {
325 int signal;
326 siginfo_t info;
327 struct pending_signals *prev;
328 };
329
330 /* The read/write ends of the pipe registered as waitable file in the
331 event loop. */
332 static int linux_event_pipe[2] = { -1, -1 };
333
334 /* True if we're currently in async mode. */
335 #define target_is_async_p() (linux_event_pipe[0] != -1)
336
337 static void send_sigstop (struct lwp_info *lwp);
338 static void wait_for_sigstop (void);
339
340 /* Return non-zero if HEADER is a 64-bit ELF file. */
341
342 static int
343 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
344 {
345 if (header->e_ident[EI_MAG0] == ELFMAG0
346 && header->e_ident[EI_MAG1] == ELFMAG1
347 && header->e_ident[EI_MAG2] == ELFMAG2
348 && header->e_ident[EI_MAG3] == ELFMAG3)
349 {
350 *machine = header->e_machine;
351 return header->e_ident[EI_CLASS] == ELFCLASS64;
352
353 }
354 *machine = EM_NONE;
355 return -1;
356 }
357
358 /* Return non-zero if FILE is a 64-bit ELF file,
359 zero if the file is not a 64-bit ELF file,
360 and -1 if the file is not accessible or doesn't exist. */
361
362 static int
363 elf_64_file_p (const char *file, unsigned int *machine)
364 {
365 Elf64_Ehdr header;
366 int fd;
367
368 fd = open (file, O_RDONLY);
369 if (fd < 0)
370 return -1;
371
372 if (read (fd, &header, sizeof (header)) != sizeof (header))
373 {
374 close (fd);
375 return 0;
376 }
377 close (fd);
378
379 return elf_64_header_p (&header, machine);
380 }
381
382 /* Accepts an integer PID; Returns true if the executable PID is
383 running is a 64-bit ELF file.. */
384
385 int
386 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
387 {
388 char file[PATH_MAX];
389
390 sprintf (file, "/proc/%d/exe", pid);
391 return elf_64_file_p (file, machine);
392 }
393
394 static void
395 delete_lwp (struct lwp_info *lwp)
396 {
397 struct thread_info *thr = get_lwp_thread (lwp);
398
399 if (debug_threads)
400 debug_printf ("deleting %ld\n", lwpid_of (thr));
401
402 remove_thread (thr);
403 free (lwp->arch_private);
404 free (lwp);
405 }
406
407 /* Add a process to the common process list, and set its private
408 data. */
409
410 static struct process_info *
411 linux_add_process (int pid, int attached)
412 {
413 struct process_info *proc;
414
415 proc = add_process (pid, attached);
416 proc->priv = XCNEW (struct process_info_private);
417
418 if (the_low_target.new_process != NULL)
419 proc->priv->arch_private = the_low_target.new_process ();
420
421 return proc;
422 }
423
424 static CORE_ADDR get_pc (struct lwp_info *lwp);
425
426 /* Call the target arch_setup function on the current thread. */
427
428 static void
429 linux_arch_setup (void)
430 {
431 the_low_target.arch_setup ();
432 }
433
434 /* Call the target arch_setup function on THREAD. */
435
436 static void
437 linux_arch_setup_thread (struct thread_info *thread)
438 {
439 struct thread_info *saved_thread;
440
441 saved_thread = current_thread;
442 current_thread = thread;
443
444 linux_arch_setup ();
445
446 current_thread = saved_thread;
447 }
448
449 /* Handle a GNU/Linux extended wait response. If we see a clone,
450 fork, or vfork event, we need to add the new LWP to our list
451 (and return 0 so as not to report the trap to higher layers).
452 If we see an exec event, we will modify ORIG_EVENT_LWP to point
453 to a new LWP representing the new program. */
454
455 static int
456 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
457 {
458 struct lwp_info *event_lwp = *orig_event_lwp;
459 int event = linux_ptrace_get_extended_event (wstat);
460 struct thread_info *event_thr = get_lwp_thread (event_lwp);
461 struct lwp_info *new_lwp;
462
463 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
464
465 /* All extended events we currently use are mid-syscall. Only
466 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
467 you have to be using PTRACE_SEIZE to get that. */
468 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
469
470 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
471 || (event == PTRACE_EVENT_CLONE))
472 {
473 ptid_t ptid;
474 unsigned long new_pid;
475 int ret, status;
476
477 /* Get the pid of the new lwp. */
478 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
479 &new_pid);
480
481 /* If we haven't already seen the new PID stop, wait for it now. */
482 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
483 {
484 /* The new child has a pending SIGSTOP. We can't affect it until it
485 hits the SIGSTOP, but we're already attached. */
486
487 ret = my_waitpid (new_pid, &status, __WALL);
488
489 if (ret == -1)
490 perror_with_name ("waiting for new child");
491 else if (ret != new_pid)
492 warning ("wait returned unexpected PID %d", ret);
493 else if (!WIFSTOPPED (status))
494 warning ("wait returned unexpected status 0x%x", status);
495 }
496
497 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
498 {
499 struct process_info *parent_proc;
500 struct process_info *child_proc;
501 struct lwp_info *child_lwp;
502 struct thread_info *child_thr;
503 struct target_desc *tdesc;
504
505 ptid = ptid_build (new_pid, new_pid, 0);
506
507 if (debug_threads)
508 {
509 debug_printf ("HEW: Got fork event from LWP %ld, "
510 "new child is %d\n",
511 ptid_get_lwp (ptid_of (event_thr)),
512 ptid_get_pid (ptid));
513 }
514
515 /* Add the new process to the tables and clone the breakpoint
516 lists of the parent. We need to do this even if the new process
517 will be detached, since we will need the process object and the
518 breakpoints to remove any breakpoints from memory when we
519 detach, and the client side will access registers. */
520 child_proc = linux_add_process (new_pid, 0);
521 gdb_assert (child_proc != NULL);
522 child_lwp = add_lwp (ptid);
523 gdb_assert (child_lwp != NULL);
524 child_lwp->stopped = 1;
525 child_lwp->must_set_ptrace_flags = 1;
526 child_lwp->status_pending_p = 0;
527 child_thr = get_lwp_thread (child_lwp);
528 child_thr->last_resume_kind = resume_stop;
529 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
530
531 /* If we're suspending all threads, leave this one suspended
532 too. If the fork/clone parent is stepping over a breakpoint,
533 all other threads have been suspended already. Leave the
534 child suspended too. */
535 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
536 || event_lwp->bp_reinsert != 0)
537 {
538 if (debug_threads)
539 debug_printf ("HEW: leaving child suspended\n");
540 child_lwp->suspended = 1;
541 }
542
543 parent_proc = get_thread_process (event_thr);
544 child_proc->attached = parent_proc->attached;
545 clone_all_breakpoints (&child_proc->breakpoints,
546 &child_proc->raw_breakpoints,
547 parent_proc->breakpoints);
548
549 tdesc = XNEW (struct target_desc);
550 copy_target_description (tdesc, parent_proc->tdesc);
551 child_proc->tdesc = tdesc;
552
553 /* Clone arch-specific process data. */
554 if (the_low_target.new_fork != NULL)
555 the_low_target.new_fork (parent_proc, child_proc);
556
557 /* Save fork info in the parent thread. */
558 if (event == PTRACE_EVENT_FORK)
559 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
560 else if (event == PTRACE_EVENT_VFORK)
561 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
562
563 event_lwp->waitstatus.value.related_pid = ptid;
564
565 /* The status_pending field contains bits denoting the
566 extended event, so when the pending event is handled,
567 the handler will look at lwp->waitstatus. */
568 event_lwp->status_pending_p = 1;
569 event_lwp->status_pending = wstat;
570
571 /* Report the event. */
572 return 0;
573 }
574
575 if (debug_threads)
576 debug_printf ("HEW: Got clone event "
577 "from LWP %ld, new child is LWP %ld\n",
578 lwpid_of (event_thr), new_pid);
579
580 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
581 new_lwp = add_lwp (ptid);
582
583 /* Either we're going to immediately resume the new thread
584 or leave it stopped. linux_resume_one_lwp is a nop if it
585 thinks the thread is currently running, so set this first
586 before calling linux_resume_one_lwp. */
587 new_lwp->stopped = 1;
588
589 /* If we're suspending all threads, leave this one suspended
590 too. If the fork/clone parent is stepping over a breakpoint,
591 all other threads have been suspended already. Leave the
592 child suspended too. */
593 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
594 || event_lwp->bp_reinsert != 0)
595 new_lwp->suspended = 1;
596
597 /* Normally we will get the pending SIGSTOP. But in some cases
598 we might get another signal delivered to the group first.
599 If we do get another signal, be sure not to lose it. */
600 if (WSTOPSIG (status) != SIGSTOP)
601 {
602 new_lwp->stop_expected = 1;
603 new_lwp->status_pending_p = 1;
604 new_lwp->status_pending = status;
605 }
606 else if (report_thread_events)
607 {
608 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
609 new_lwp->status_pending_p = 1;
610 new_lwp->status_pending = status;
611 }
612
613 /* Don't report the event. */
614 return 1;
615 }
616 else if (event == PTRACE_EVENT_VFORK_DONE)
617 {
618 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
619
620 /* Report the event. */
621 return 0;
622 }
623 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
624 {
625 struct process_info *proc;
626 VEC (int) *syscalls_to_catch;
627 ptid_t event_ptid;
628 pid_t event_pid;
629
630 if (debug_threads)
631 {
632 debug_printf ("HEW: Got exec event from LWP %ld\n",
633 lwpid_of (event_thr));
634 }
635
636 /* Get the event ptid. */
637 event_ptid = ptid_of (event_thr);
638 event_pid = ptid_get_pid (event_ptid);
639
640 /* Save the syscall list from the execing process. */
641 proc = get_thread_process (event_thr);
642 syscalls_to_catch = proc->syscalls_to_catch;
643 proc->syscalls_to_catch = NULL;
644
645 /* Delete the execing process and all its threads. */
646 linux_mourn (proc);
647 current_thread = NULL;
648
649 /* Create a new process/lwp/thread. */
650 proc = linux_add_process (event_pid, 0);
651 event_lwp = add_lwp (event_ptid);
652 event_thr = get_lwp_thread (event_lwp);
653 gdb_assert (current_thread == event_thr);
654 linux_arch_setup_thread (event_thr);
655
656 /* Set the event status. */
657 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
658 event_lwp->waitstatus.value.execd_pathname
659 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
660
661 /* Mark the exec status as pending. */
662 event_lwp->stopped = 1;
663 event_lwp->status_pending_p = 1;
664 event_lwp->status_pending = wstat;
665 event_thr->last_resume_kind = resume_continue;
666 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
667
668 /* Update syscall state in the new lwp, effectively mid-syscall too. */
669 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
670
671 /* Restore the list to catch. Don't rely on the client, which is free
672 to avoid sending a new list when the architecture doesn't change.
673 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
674 proc->syscalls_to_catch = syscalls_to_catch;
675
676 /* Report the event. */
677 *orig_event_lwp = event_lwp;
678 return 0;
679 }
680
681 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
682 }
683
684 /* Return the PC as read from the regcache of LWP, without any
685 adjustment. */
686
687 static CORE_ADDR
688 get_pc (struct lwp_info *lwp)
689 {
690 struct thread_info *saved_thread;
691 struct regcache *regcache;
692 CORE_ADDR pc;
693
694 if (the_low_target.get_pc == NULL)
695 return 0;
696
697 saved_thread = current_thread;
698 current_thread = get_lwp_thread (lwp);
699
700 regcache = get_thread_regcache (current_thread, 1);
701 pc = (*the_low_target.get_pc) (regcache);
702
703 if (debug_threads)
704 debug_printf ("pc is 0x%lx\n", (long) pc);
705
706 current_thread = saved_thread;
707 return pc;
708 }
709
710 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
711 Fill *SYSNO with the syscall nr trapped. Fill *SYSRET with the
712 return code. */
713
714 static void
715 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno, int *sysret)
716 {
717 struct thread_info *saved_thread;
718 struct regcache *regcache;
719
720 if (the_low_target.get_syscall_trapinfo == NULL)
721 {
722 /* If we cannot get the syscall trapinfo, report an unknown
723 system call number and -ENOSYS return value. */
724 *sysno = UNKNOWN_SYSCALL;
725 *sysret = -ENOSYS;
726 return;
727 }
728
729 saved_thread = current_thread;
730 current_thread = get_lwp_thread (lwp);
731
732 regcache = get_thread_regcache (current_thread, 1);
733 (*the_low_target.get_syscall_trapinfo) (regcache, sysno, sysret);
734
735 if (debug_threads)
736 {
737 debug_printf ("get_syscall_trapinfo sysno %d sysret %d\n",
738 *sysno, *sysret);
739 }
740
741 current_thread = saved_thread;
742 }
743
744 static int check_stopped_by_watchpoint (struct lwp_info *child);
745
746 /* Called when the LWP stopped for a signal/trap. If it stopped for a
747 trap check what caused it (breakpoint, watchpoint, trace, etc.),
748 and save the result in the LWP's stop_reason field. If it stopped
749 for a breakpoint, decrement the PC if necessary on the lwp's
750 architecture. Returns true if we now have the LWP's stop PC. */
751
752 static int
753 save_stop_reason (struct lwp_info *lwp)
754 {
755 CORE_ADDR pc;
756 CORE_ADDR sw_breakpoint_pc;
757 struct thread_info *saved_thread;
758 #if USE_SIGTRAP_SIGINFO
759 siginfo_t siginfo;
760 #endif
761
762 if (the_low_target.get_pc == NULL)
763 return 0;
764
765 pc = get_pc (lwp);
766 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
767
768 /* breakpoint_at reads from the current thread. */
769 saved_thread = current_thread;
770 current_thread = get_lwp_thread (lwp);
771
772 #if USE_SIGTRAP_SIGINFO
773 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
774 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
775 {
776 if (siginfo.si_signo == SIGTRAP)
777 {
778 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
779 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
780 {
781 /* The si_code is ambiguous on this arch -- check debug
782 registers. */
783 if (!check_stopped_by_watchpoint (lwp))
784 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
785 }
786 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
787 {
788 /* If we determine the LWP stopped for a SW breakpoint,
789 trust it. Particularly don't check watchpoint
790 registers, because at least on s390, we'd find
791 stopped-by-watchpoint as long as there's a watchpoint
792 set. */
793 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
794 }
795 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
796 {
797 /* This can indicate either a hardware breakpoint or
798 hardware watchpoint. Check debug registers. */
799 if (!check_stopped_by_watchpoint (lwp))
800 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
801 }
802 else if (siginfo.si_code == TRAP_TRACE)
803 {
804 /* We may have single stepped an instruction that
805 triggered a watchpoint. In that case, on some
806 architectures (such as x86), instead of TRAP_HWBKPT,
807 si_code indicates TRAP_TRACE, and we need to check
808 the debug registers separately. */
809 if (!check_stopped_by_watchpoint (lwp))
810 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
811 }
812 }
813 }
814 #else
815 /* We may have just stepped a breakpoint instruction. E.g., in
816 non-stop mode, GDB first tells the thread A to step a range, and
817 then the user inserts a breakpoint inside the range. In that
818 case we need to report the breakpoint PC. */
819 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
820 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
821 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
822
823 if (hardware_breakpoint_inserted_here (pc))
824 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
825
826 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
827 check_stopped_by_watchpoint (lwp);
828 #endif
829
830 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
831 {
832 if (debug_threads)
833 {
834 struct thread_info *thr = get_lwp_thread (lwp);
835
836 debug_printf ("CSBB: %s stopped by software breakpoint\n",
837 target_pid_to_str (ptid_of (thr)));
838 }
839
840 /* Back up the PC if necessary. */
841 if (pc != sw_breakpoint_pc)
842 {
843 struct regcache *regcache
844 = get_thread_regcache (current_thread, 1);
845 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
846 }
847
848 /* Update this so we record the correct stop PC below. */
849 pc = sw_breakpoint_pc;
850 }
851 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
852 {
853 if (debug_threads)
854 {
855 struct thread_info *thr = get_lwp_thread (lwp);
856
857 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
858 target_pid_to_str (ptid_of (thr)));
859 }
860 }
861 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
862 {
863 if (debug_threads)
864 {
865 struct thread_info *thr = get_lwp_thread (lwp);
866
867 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
868 target_pid_to_str (ptid_of (thr)));
869 }
870 }
871 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
872 {
873 if (debug_threads)
874 {
875 struct thread_info *thr = get_lwp_thread (lwp);
876
877 debug_printf ("CSBB: %s stopped by trace\n",
878 target_pid_to_str (ptid_of (thr)));
879 }
880 }
881
882 lwp->stop_pc = pc;
883 current_thread = saved_thread;
884 return 1;
885 }
886
887 static struct lwp_info *
888 add_lwp (ptid_t ptid)
889 {
890 struct lwp_info *lwp;
891
892 lwp = XCNEW (struct lwp_info);
893
894 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
895
896 if (the_low_target.new_thread != NULL)
897 the_low_target.new_thread (lwp);
898
899 lwp->thread = add_thread (ptid, lwp);
900
901 return lwp;
902 }
903
904 /* Start an inferior process and returns its pid.
905 ALLARGS is a vector of program-name and args. */
906
907 static int
908 linux_create_inferior (char *program, char **allargs)
909 {
910 struct lwp_info *new_lwp;
911 int pid;
912 ptid_t ptid;
913 struct cleanup *restore_personality
914 = maybe_disable_address_space_randomization (disable_randomization);
915
916 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
917 pid = vfork ();
918 #else
919 pid = fork ();
920 #endif
921 if (pid < 0)
922 perror_with_name ("fork");
923
924 if (pid == 0)
925 {
926 close_most_fds ();
927 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
928
929 setpgid (0, 0);
930
931 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
932 stdout to stderr so that inferior i/o doesn't corrupt the connection.
933 Also, redirect stdin to /dev/null. */
934 if (remote_connection_is_stdio ())
935 {
936 close (0);
937 open ("/dev/null", O_RDONLY);
938 dup2 (2, 1);
939 if (write (2, "stdin/stdout redirected\n",
940 sizeof ("stdin/stdout redirected\n") - 1) < 0)
941 {
942 /* Errors ignored. */;
943 }
944 }
945
946 execv (program, allargs);
947 if (errno == ENOENT)
948 execvp (program, allargs);
949
950 fprintf (stderr, "Cannot exec %s: %s.\n", program,
951 strerror (errno));
952 fflush (stderr);
953 _exit (0177);
954 }
955
956 do_cleanups (restore_personality);
957
958 linux_add_process (pid, 0);
959
960 ptid = ptid_build (pid, pid, 0);
961 new_lwp = add_lwp (ptid);
962 new_lwp->must_set_ptrace_flags = 1;
963
964 return pid;
965 }
966
967 /* Implement the post_create_inferior target_ops method. */
968
969 static void
970 linux_post_create_inferior (void)
971 {
972 struct lwp_info *lwp = get_thread_lwp (current_thread);
973
974 linux_arch_setup ();
975
976 if (lwp->must_set_ptrace_flags)
977 {
978 struct process_info *proc = current_process ();
979 int options = linux_low_ptrace_options (proc->attached);
980
981 linux_enable_event_reporting (lwpid_of (current_thread), options);
982 lwp->must_set_ptrace_flags = 0;
983 }
984 }
985
986 /* Attach to an inferior process. Returns 0 on success, ERRNO on
987 error. */
988
989 int
990 linux_attach_lwp (ptid_t ptid)
991 {
992 struct lwp_info *new_lwp;
993 int lwpid = ptid_get_lwp (ptid);
994
995 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
996 != 0)
997 return errno;
998
999 new_lwp = add_lwp (ptid);
1000
1001 /* We need to wait for SIGSTOP before being able to make the next
1002 ptrace call on this LWP. */
1003 new_lwp->must_set_ptrace_flags = 1;
1004
1005 if (linux_proc_pid_is_stopped (lwpid))
1006 {
1007 if (debug_threads)
1008 debug_printf ("Attached to a stopped process\n");
1009
1010 /* The process is definitely stopped. It is in a job control
1011 stop, unless the kernel predates the TASK_STOPPED /
1012 TASK_TRACED distinction, in which case it might be in a
1013 ptrace stop. Make sure it is in a ptrace stop; from there we
1014 can kill it, signal it, et cetera.
1015
1016 First make sure there is a pending SIGSTOP. Since we are
1017 already attached, the process can not transition from stopped
1018 to running without a PTRACE_CONT; so we know this signal will
1019 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1020 probably already in the queue (unless this kernel is old
1021 enough to use TASK_STOPPED for ptrace stops); but since
1022 SIGSTOP is not an RT signal, it can only be queued once. */
1023 kill_lwp (lwpid, SIGSTOP);
1024
1025 /* Finally, resume the stopped process. This will deliver the
1026 SIGSTOP (or a higher priority signal, just like normal
1027 PTRACE_ATTACH), which we'll catch later on. */
1028 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1029 }
1030
1031 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1032 brings it to a halt.
1033
1034 There are several cases to consider here:
1035
1036 1) gdbserver has already attached to the process and is being notified
1037 of a new thread that is being created.
1038 In this case we should ignore that SIGSTOP and resume the
1039 process. This is handled below by setting stop_expected = 1,
1040 and the fact that add_thread sets last_resume_kind ==
1041 resume_continue.
1042
1043 2) This is the first thread (the process thread), and we're attaching
1044 to it via attach_inferior.
1045 In this case we want the process thread to stop.
1046 This is handled by having linux_attach set last_resume_kind ==
1047 resume_stop after we return.
1048
1049 If the pid we are attaching to is also the tgid, we attach to and
1050 stop all the existing threads. Otherwise, we attach to pid and
1051 ignore any other threads in the same group as this pid.
1052
1053 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1054 existing threads.
1055 In this case we want the thread to stop.
1056 FIXME: This case is currently not properly handled.
1057 We should wait for the SIGSTOP but don't. Things work apparently
1058 because enough time passes between when we ptrace (ATTACH) and when
1059 gdb makes the next ptrace call on the thread.
1060
1061 On the other hand, if we are currently trying to stop all threads, we
1062 should treat the new thread as if we had sent it a SIGSTOP. This works
1063 because we are guaranteed that the add_lwp call above added us to the
1064 end of the list, and so the new thread has not yet reached
1065 wait_for_sigstop (but will). */
1066 new_lwp->stop_expected = 1;
1067
1068 return 0;
1069 }
1070
1071 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1072 already attached. Returns true if a new LWP is found, false
1073 otherwise. */
1074
1075 static int
1076 attach_proc_task_lwp_callback (ptid_t ptid)
1077 {
1078 /* Is this a new thread? */
1079 if (find_thread_ptid (ptid) == NULL)
1080 {
1081 int lwpid = ptid_get_lwp (ptid);
1082 int err;
1083
1084 if (debug_threads)
1085 debug_printf ("Found new lwp %d\n", lwpid);
1086
1087 err = linux_attach_lwp (ptid);
1088
1089 /* Be quiet if we simply raced with the thread exiting. EPERM
1090 is returned if the thread's task still exists, and is marked
1091 as exited or zombie, as well as other conditions, so in that
1092 case, confirm the status in /proc/PID/status. */
1093 if (err == ESRCH
1094 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1095 {
1096 if (debug_threads)
1097 {
1098 debug_printf ("Cannot attach to lwp %d: "
1099 "thread is gone (%d: %s)\n",
1100 lwpid, err, strerror (err));
1101 }
1102 }
1103 else if (err != 0)
1104 {
1105 warning (_("Cannot attach to lwp %d: %s"),
1106 lwpid,
1107 linux_ptrace_attach_fail_reason_string (ptid, err));
1108 }
1109
1110 return 1;
1111 }
1112 return 0;
1113 }
1114
1115 static void async_file_mark (void);
1116
1117 /* Attach to PID. If PID is the tgid, attach to it and all
1118 of its threads. */
1119
1120 static int
1121 linux_attach (unsigned long pid)
1122 {
1123 struct process_info *proc;
1124 struct thread_info *initial_thread;
1125 ptid_t ptid = ptid_build (pid, pid, 0);
1126 int err;
1127
1128 /* Attach to PID. We will check for other threads
1129 soon. */
1130 err = linux_attach_lwp (ptid);
1131 if (err != 0)
1132 error ("Cannot attach to process %ld: %s",
1133 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1134
1135 proc = linux_add_process (pid, 1);
1136
1137 /* Don't ignore the initial SIGSTOP if we just attached to this
1138 process. It will be collected by wait shortly. */
1139 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1140 initial_thread->last_resume_kind = resume_stop;
1141
1142 /* We must attach to every LWP. If /proc is mounted, use that to
1143 find them now. On the one hand, the inferior may be using raw
1144 clone instead of using pthreads. On the other hand, even if it
1145 is using pthreads, GDB may not be connected yet (thread_db needs
1146 to do symbol lookups, through qSymbol). Also, thread_db walks
1147 structures in the inferior's address space to find the list of
1148 threads/LWPs, and those structures may well be corrupted. Note
1149 that once thread_db is loaded, we'll still use it to list threads
1150 and associate pthread info with each LWP. */
1151 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1152
1153 /* GDB will shortly read the xml target description for this
1154 process, to figure out the process' architecture. But the target
1155 description is only filled in when the first process/thread in
1156 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1157 that now, otherwise, if GDB is fast enough, it could read the
1158 target description _before_ that initial stop. */
1159 if (non_stop)
1160 {
1161 struct lwp_info *lwp;
1162 int wstat, lwpid;
1163 ptid_t pid_ptid = pid_to_ptid (pid);
1164
1165 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1166 &wstat, __WALL);
1167 gdb_assert (lwpid > 0);
1168
1169 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1170
1171 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1172 {
1173 lwp->status_pending_p = 1;
1174 lwp->status_pending = wstat;
1175 }
1176
1177 initial_thread->last_resume_kind = resume_continue;
1178
1179 async_file_mark ();
1180
1181 gdb_assert (proc->tdesc != NULL);
1182 }
1183
1184 return 0;
1185 }
1186
1187 struct counter
1188 {
1189 int pid;
1190 int count;
1191 };
1192
1193 static int
1194 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1195 {
1196 struct counter *counter = (struct counter *) args;
1197
1198 if (ptid_get_pid (entry->id) == counter->pid)
1199 {
1200 if (++counter->count > 1)
1201 return 1;
1202 }
1203
1204 return 0;
1205 }
1206
1207 static int
1208 last_thread_of_process_p (int pid)
1209 {
1210 struct counter counter = { pid , 0 };
1211
1212 return (find_inferior (&all_threads,
1213 second_thread_of_pid_p, &counter) == NULL);
1214 }
1215
1216 /* Kill LWP. */
1217
1218 static void
1219 linux_kill_one_lwp (struct lwp_info *lwp)
1220 {
1221 struct thread_info *thr = get_lwp_thread (lwp);
1222 int pid = lwpid_of (thr);
1223
1224 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1225 there is no signal context, and ptrace(PTRACE_KILL) (or
1226 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1227 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1228 alternative is to kill with SIGKILL. We only need one SIGKILL
1229 per process, not one for each thread. But since we still support
1230 support debugging programs using raw clone without CLONE_THREAD,
1231 we send one for each thread. For years, we used PTRACE_KILL
1232 only, so we're being a bit paranoid about some old kernels where
1233 PTRACE_KILL might work better (dubious if there are any such, but
1234 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1235 second, and so we're fine everywhere. */
1236
1237 errno = 0;
1238 kill_lwp (pid, SIGKILL);
1239 if (debug_threads)
1240 {
1241 int save_errno = errno;
1242
1243 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1244 target_pid_to_str (ptid_of (thr)),
1245 save_errno ? strerror (save_errno) : "OK");
1246 }
1247
1248 errno = 0;
1249 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1250 if (debug_threads)
1251 {
1252 int save_errno = errno;
1253
1254 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1255 target_pid_to_str (ptid_of (thr)),
1256 save_errno ? strerror (save_errno) : "OK");
1257 }
1258 }
1259
1260 /* Kill LWP and wait for it to die. */
1261
1262 static void
1263 kill_wait_lwp (struct lwp_info *lwp)
1264 {
1265 struct thread_info *thr = get_lwp_thread (lwp);
1266 int pid = ptid_get_pid (ptid_of (thr));
1267 int lwpid = ptid_get_lwp (ptid_of (thr));
1268 int wstat;
1269 int res;
1270
1271 if (debug_threads)
1272 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1273
1274 do
1275 {
1276 linux_kill_one_lwp (lwp);
1277
1278 /* Make sure it died. Notes:
1279
1280 - The loop is most likely unnecessary.
1281
1282 - We don't use linux_wait_for_event as that could delete lwps
1283 while we're iterating over them. We're not interested in
1284 any pending status at this point, only in making sure all
1285 wait status on the kernel side are collected until the
1286 process is reaped.
1287
1288 - We don't use __WALL here as the __WALL emulation relies on
1289 SIGCHLD, and killing a stopped process doesn't generate
1290 one, nor an exit status.
1291 */
1292 res = my_waitpid (lwpid, &wstat, 0);
1293 if (res == -1 && errno == ECHILD)
1294 res = my_waitpid (lwpid, &wstat, __WCLONE);
1295 } while (res > 0 && WIFSTOPPED (wstat));
1296
1297 /* Even if it was stopped, the child may have already disappeared.
1298 E.g., if it was killed by SIGKILL. */
1299 if (res < 0 && errno != ECHILD)
1300 perror_with_name ("kill_wait_lwp");
1301 }
1302
1303 /* Callback for `find_inferior'. Kills an lwp of a given process,
1304 except the leader. */
1305
1306 static int
1307 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1308 {
1309 struct thread_info *thread = (struct thread_info *) entry;
1310 struct lwp_info *lwp = get_thread_lwp (thread);
1311 int pid = * (int *) args;
1312
1313 if (ptid_get_pid (entry->id) != pid)
1314 return 0;
1315
1316 /* We avoid killing the first thread here, because of a Linux kernel (at
1317 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1318 the children get a chance to be reaped, it will remain a zombie
1319 forever. */
1320
1321 if (lwpid_of (thread) == pid)
1322 {
1323 if (debug_threads)
1324 debug_printf ("lkop: is last of process %s\n",
1325 target_pid_to_str (entry->id));
1326 return 0;
1327 }
1328
1329 kill_wait_lwp (lwp);
1330 return 0;
1331 }
1332
1333 static int
1334 linux_kill (int pid)
1335 {
1336 struct process_info *process;
1337 struct lwp_info *lwp;
1338
1339 process = find_process_pid (pid);
1340 if (process == NULL)
1341 return -1;
1342
1343 /* If we're killing a running inferior, make sure it is stopped
1344 first, as PTRACE_KILL will not work otherwise. */
1345 stop_all_lwps (0, NULL);
1346
1347 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1348
1349 /* See the comment in linux_kill_one_lwp. We did not kill the first
1350 thread in the list, so do so now. */
1351 lwp = find_lwp_pid (pid_to_ptid (pid));
1352
1353 if (lwp == NULL)
1354 {
1355 if (debug_threads)
1356 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1357 pid);
1358 }
1359 else
1360 kill_wait_lwp (lwp);
1361
1362 the_target->mourn (process);
1363
1364 /* Since we presently can only stop all lwps of all processes, we
1365 need to unstop lwps of other processes. */
1366 unstop_all_lwps (0, NULL);
1367 return 0;
1368 }
1369
1370 /* Get pending signal of THREAD, for detaching purposes. This is the
1371 signal the thread last stopped for, which we need to deliver to the
1372 thread when detaching, otherwise, it'd be suppressed/lost. */
1373
1374 static int
1375 get_detach_signal (struct thread_info *thread)
1376 {
1377 enum gdb_signal signo = GDB_SIGNAL_0;
1378 int status;
1379 struct lwp_info *lp = get_thread_lwp (thread);
1380
1381 if (lp->status_pending_p)
1382 status = lp->status_pending;
1383 else
1384 {
1385 /* If the thread had been suspended by gdbserver, and it stopped
1386 cleanly, then it'll have stopped with SIGSTOP. But we don't
1387 want to deliver that SIGSTOP. */
1388 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1389 || thread->last_status.value.sig == GDB_SIGNAL_0)
1390 return 0;
1391
1392 /* Otherwise, we may need to deliver the signal we
1393 intercepted. */
1394 status = lp->last_status;
1395 }
1396
1397 if (!WIFSTOPPED (status))
1398 {
1399 if (debug_threads)
1400 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1401 target_pid_to_str (ptid_of (thread)));
1402 return 0;
1403 }
1404
1405 /* Extended wait statuses aren't real SIGTRAPs. */
1406 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1407 {
1408 if (debug_threads)
1409 debug_printf ("GPS: lwp %s had stopped with extended "
1410 "status: no pending signal\n",
1411 target_pid_to_str (ptid_of (thread)));
1412 return 0;
1413 }
1414
1415 signo = gdb_signal_from_host (WSTOPSIG (status));
1416
1417 if (program_signals_p && !program_signals[signo])
1418 {
1419 if (debug_threads)
1420 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1421 target_pid_to_str (ptid_of (thread)),
1422 gdb_signal_to_string (signo));
1423 return 0;
1424 }
1425 else if (!program_signals_p
1426 /* If we have no way to know which signals GDB does not
1427 want to have passed to the program, assume
1428 SIGTRAP/SIGINT, which is GDB's default. */
1429 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1430 {
1431 if (debug_threads)
1432 debug_printf ("GPS: lwp %s had signal %s, "
1433 "but we don't know if we should pass it. "
1434 "Default to not.\n",
1435 target_pid_to_str (ptid_of (thread)),
1436 gdb_signal_to_string (signo));
1437 return 0;
1438 }
1439 else
1440 {
1441 if (debug_threads)
1442 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1443 target_pid_to_str (ptid_of (thread)),
1444 gdb_signal_to_string (signo));
1445
1446 return WSTOPSIG (status);
1447 }
1448 }
1449
1450 static int
1451 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1452 {
1453 struct thread_info *thread = (struct thread_info *) entry;
1454 struct lwp_info *lwp = get_thread_lwp (thread);
1455 int pid = * (int *) args;
1456 int sig;
1457
1458 if (ptid_get_pid (entry->id) != pid)
1459 return 0;
1460
1461 /* If there is a pending SIGSTOP, get rid of it. */
1462 if (lwp->stop_expected)
1463 {
1464 if (debug_threads)
1465 debug_printf ("Sending SIGCONT to %s\n",
1466 target_pid_to_str (ptid_of (thread)));
1467
1468 kill_lwp (lwpid_of (thread), SIGCONT);
1469 lwp->stop_expected = 0;
1470 }
1471
1472 /* Flush any pending changes to the process's registers. */
1473 regcache_invalidate_thread (thread);
1474
1475 /* Pass on any pending signal for this thread. */
1476 sig = get_detach_signal (thread);
1477
1478 /* Finally, let it resume. */
1479 if (the_low_target.prepare_to_resume != NULL)
1480 the_low_target.prepare_to_resume (lwp);
1481 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1482 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1483 error (_("Can't detach %s: %s"),
1484 target_pid_to_str (ptid_of (thread)),
1485 strerror (errno));
1486
1487 delete_lwp (lwp);
1488 return 0;
1489 }
1490
1491 static int
1492 linux_detach (int pid)
1493 {
1494 struct process_info *process;
1495
1496 process = find_process_pid (pid);
1497 if (process == NULL)
1498 return -1;
1499
1500 /* As there's a step over already in progress, let it finish first,
1501 otherwise nesting a stabilize_threads operation on top gets real
1502 messy. */
1503 complete_ongoing_step_over ();
1504
1505 /* Stop all threads before detaching. First, ptrace requires that
1506 the thread is stopped to sucessfully detach. Second, thread_db
1507 may need to uninstall thread event breakpoints from memory, which
1508 only works with a stopped process anyway. */
1509 stop_all_lwps (0, NULL);
1510
1511 #ifdef USE_THREAD_DB
1512 thread_db_detach (process);
1513 #endif
1514
1515 /* Stabilize threads (move out of jump pads). */
1516 stabilize_threads ();
1517
1518 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1519
1520 the_target->mourn (process);
1521
1522 /* Since we presently can only stop all lwps of all processes, we
1523 need to unstop lwps of other processes. */
1524 unstop_all_lwps (0, NULL);
1525 return 0;
1526 }
1527
1528 /* Remove all LWPs that belong to process PROC from the lwp list. */
1529
1530 static int
1531 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1532 {
1533 struct thread_info *thread = (struct thread_info *) entry;
1534 struct lwp_info *lwp = get_thread_lwp (thread);
1535 struct process_info *process = (struct process_info *) proc;
1536
1537 if (pid_of (thread) == pid_of (process))
1538 delete_lwp (lwp);
1539
1540 return 0;
1541 }
1542
1543 static void
1544 linux_mourn (struct process_info *process)
1545 {
1546 struct process_info_private *priv;
1547
1548 #ifdef USE_THREAD_DB
1549 thread_db_mourn (process);
1550 #endif
1551
1552 find_inferior (&all_threads, delete_lwp_callback, process);
1553
1554 /* Freeing all private data. */
1555 priv = process->priv;
1556 free (priv->arch_private);
1557 free (priv);
1558 process->priv = NULL;
1559
1560 remove_process (process);
1561 }
1562
1563 static void
1564 linux_join (int pid)
1565 {
1566 int status, ret;
1567
1568 do {
1569 ret = my_waitpid (pid, &status, 0);
1570 if (WIFEXITED (status) || WIFSIGNALED (status))
1571 break;
1572 } while (ret != -1 || errno != ECHILD);
1573 }
1574
1575 /* Return nonzero if the given thread is still alive. */
1576 static int
1577 linux_thread_alive (ptid_t ptid)
1578 {
1579 struct lwp_info *lwp = find_lwp_pid (ptid);
1580
1581 /* We assume we always know if a thread exits. If a whole process
1582 exited but we still haven't been able to report it to GDB, we'll
1583 hold on to the last lwp of the dead process. */
1584 if (lwp != NULL)
1585 return !lwp_is_marked_dead (lwp);
1586 else
1587 return 0;
1588 }
1589
1590 /* Return 1 if this lwp still has an interesting status pending. If
1591 not (e.g., it had stopped for a breakpoint that is gone), return
1592 false. */
1593
1594 static int
1595 thread_still_has_status_pending_p (struct thread_info *thread)
1596 {
1597 struct lwp_info *lp = get_thread_lwp (thread);
1598
1599 if (!lp->status_pending_p)
1600 return 0;
1601
1602 if (thread->last_resume_kind != resume_stop
1603 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1604 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1605 {
1606 struct thread_info *saved_thread;
1607 CORE_ADDR pc;
1608 int discard = 0;
1609
1610 gdb_assert (lp->last_status != 0);
1611
1612 pc = get_pc (lp);
1613
1614 saved_thread = current_thread;
1615 current_thread = thread;
1616
1617 if (pc != lp->stop_pc)
1618 {
1619 if (debug_threads)
1620 debug_printf ("PC of %ld changed\n",
1621 lwpid_of (thread));
1622 discard = 1;
1623 }
1624
1625 #if !USE_SIGTRAP_SIGINFO
1626 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1627 && !(*the_low_target.breakpoint_at) (pc))
1628 {
1629 if (debug_threads)
1630 debug_printf ("previous SW breakpoint of %ld gone\n",
1631 lwpid_of (thread));
1632 discard = 1;
1633 }
1634 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1635 && !hardware_breakpoint_inserted_here (pc))
1636 {
1637 if (debug_threads)
1638 debug_printf ("previous HW breakpoint of %ld gone\n",
1639 lwpid_of (thread));
1640 discard = 1;
1641 }
1642 #endif
1643
1644 current_thread = saved_thread;
1645
1646 if (discard)
1647 {
1648 if (debug_threads)
1649 debug_printf ("discarding pending breakpoint status\n");
1650 lp->status_pending_p = 0;
1651 return 0;
1652 }
1653 }
1654
1655 return 1;
1656 }
1657
1658 /* Returns true if LWP is resumed from the client's perspective. */
1659
1660 static int
1661 lwp_resumed (struct lwp_info *lwp)
1662 {
1663 struct thread_info *thread = get_lwp_thread (lwp);
1664
1665 if (thread->last_resume_kind != resume_stop)
1666 return 1;
1667
1668 /* Did gdb send us a `vCont;t', but we haven't reported the
1669 corresponding stop to gdb yet? If so, the thread is still
1670 resumed/running from gdb's perspective. */
1671 if (thread->last_resume_kind == resume_stop
1672 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1673 return 1;
1674
1675 return 0;
1676 }
1677
1678 /* Return 1 if this lwp has an interesting status pending. */
1679 static int
1680 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1681 {
1682 struct thread_info *thread = (struct thread_info *) entry;
1683 struct lwp_info *lp = get_thread_lwp (thread);
1684 ptid_t ptid = * (ptid_t *) arg;
1685
1686 /* Check if we're only interested in events from a specific process
1687 or a specific LWP. */
1688 if (!ptid_match (ptid_of (thread), ptid))
1689 return 0;
1690
1691 if (!lwp_resumed (lp))
1692 return 0;
1693
1694 if (lp->status_pending_p
1695 && !thread_still_has_status_pending_p (thread))
1696 {
1697 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1698 return 0;
1699 }
1700
1701 return lp->status_pending_p;
1702 }
1703
1704 static int
1705 same_lwp (struct inferior_list_entry *entry, void *data)
1706 {
1707 ptid_t ptid = *(ptid_t *) data;
1708 int lwp;
1709
1710 if (ptid_get_lwp (ptid) != 0)
1711 lwp = ptid_get_lwp (ptid);
1712 else
1713 lwp = ptid_get_pid (ptid);
1714
1715 if (ptid_get_lwp (entry->id) == lwp)
1716 return 1;
1717
1718 return 0;
1719 }
1720
1721 struct lwp_info *
1722 find_lwp_pid (ptid_t ptid)
1723 {
1724 struct inferior_list_entry *thread
1725 = find_inferior (&all_threads, same_lwp, &ptid);
1726
1727 if (thread == NULL)
1728 return NULL;
1729
1730 return get_thread_lwp ((struct thread_info *) thread);
1731 }
1732
1733 /* Return the number of known LWPs in the tgid given by PID. */
1734
1735 static int
1736 num_lwps (int pid)
1737 {
1738 struct inferior_list_entry *inf, *tmp;
1739 int count = 0;
1740
1741 ALL_INFERIORS (&all_threads, inf, tmp)
1742 {
1743 if (ptid_get_pid (inf->id) == pid)
1744 count++;
1745 }
1746
1747 return count;
1748 }
1749
1750 /* The arguments passed to iterate_over_lwps. */
1751
1752 struct iterate_over_lwps_args
1753 {
1754 /* The FILTER argument passed to iterate_over_lwps. */
1755 ptid_t filter;
1756
1757 /* The CALLBACK argument passed to iterate_over_lwps. */
1758 iterate_over_lwps_ftype *callback;
1759
1760 /* The DATA argument passed to iterate_over_lwps. */
1761 void *data;
1762 };
1763
1764 /* Callback for find_inferior used by iterate_over_lwps to filter
1765 calls to the callback supplied to that function. Returning a
1766 nonzero value causes find_inferiors to stop iterating and return
1767 the current inferior_list_entry. Returning zero indicates that
1768 find_inferiors should continue iterating. */
1769
1770 static int
1771 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1772 {
1773 struct iterate_over_lwps_args *args
1774 = (struct iterate_over_lwps_args *) args_p;
1775
1776 if (ptid_match (entry->id, args->filter))
1777 {
1778 struct thread_info *thr = (struct thread_info *) entry;
1779 struct lwp_info *lwp = get_thread_lwp (thr);
1780
1781 return (*args->callback) (lwp, args->data);
1782 }
1783
1784 return 0;
1785 }
1786
1787 /* See nat/linux-nat.h. */
1788
1789 struct lwp_info *
1790 iterate_over_lwps (ptid_t filter,
1791 iterate_over_lwps_ftype callback,
1792 void *data)
1793 {
1794 struct iterate_over_lwps_args args = {filter, callback, data};
1795 struct inferior_list_entry *entry;
1796
1797 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1798 if (entry == NULL)
1799 return NULL;
1800
1801 return get_thread_lwp ((struct thread_info *) entry);
1802 }
1803
1804 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1805 their exits until all other threads in the group have exited. */
1806
1807 static void
1808 check_zombie_leaders (void)
1809 {
1810 struct process_info *proc, *tmp;
1811
1812 ALL_PROCESSES (proc, tmp)
1813 {
1814 pid_t leader_pid = pid_of (proc);
1815 struct lwp_info *leader_lp;
1816
1817 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1818
1819 if (debug_threads)
1820 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1821 "num_lwps=%d, zombie=%d\n",
1822 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1823 linux_proc_pid_is_zombie (leader_pid));
1824
1825 if (leader_lp != NULL && !leader_lp->stopped
1826 /* Check if there are other threads in the group, as we may
1827 have raced with the inferior simply exiting. */
1828 && !last_thread_of_process_p (leader_pid)
1829 && linux_proc_pid_is_zombie (leader_pid))
1830 {
1831 /* A leader zombie can mean one of two things:
1832
1833 - It exited, and there's an exit status pending
1834 available, or only the leader exited (not the whole
1835 program). In the latter case, we can't waitpid the
1836 leader's exit status until all other threads are gone.
1837
1838 - There are 3 or more threads in the group, and a thread
1839 other than the leader exec'd. On an exec, the Linux
1840 kernel destroys all other threads (except the execing
1841 one) in the thread group, and resets the execing thread's
1842 tid to the tgid. No exit notification is sent for the
1843 execing thread -- from the ptracer's perspective, it
1844 appears as though the execing thread just vanishes.
1845 Until we reap all other threads except the leader and the
1846 execing thread, the leader will be zombie, and the
1847 execing thread will be in `D (disc sleep)'. As soon as
1848 all other threads are reaped, the execing thread changes
1849 it's tid to the tgid, and the previous (zombie) leader
1850 vanishes, giving place to the "new" leader. We could try
1851 distinguishing the exit and exec cases, by waiting once
1852 more, and seeing if something comes out, but it doesn't
1853 sound useful. The previous leader _does_ go away, and
1854 we'll re-add the new one once we see the exec event
1855 (which is just the same as what would happen if the
1856 previous leader did exit voluntarily before some other
1857 thread execs). */
1858
1859 if (debug_threads)
1860 fprintf (stderr,
1861 "CZL: Thread group leader %d zombie "
1862 "(it exited, or another thread execd).\n",
1863 leader_pid);
1864
1865 delete_lwp (leader_lp);
1866 }
1867 }
1868 }
1869
1870 /* Callback for `find_inferior'. Returns the first LWP that is not
1871 stopped. ARG is a PTID filter. */
1872
1873 static int
1874 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1875 {
1876 struct thread_info *thr = (struct thread_info *) entry;
1877 struct lwp_info *lwp;
1878 ptid_t filter = *(ptid_t *) arg;
1879
1880 if (!ptid_match (ptid_of (thr), filter))
1881 return 0;
1882
1883 lwp = get_thread_lwp (thr);
1884 if (!lwp->stopped)
1885 return 1;
1886
1887 return 0;
1888 }
1889
1890 /* Increment LWP's suspend count. */
1891
1892 static void
1893 lwp_suspended_inc (struct lwp_info *lwp)
1894 {
1895 lwp->suspended++;
1896
1897 if (debug_threads && lwp->suspended > 4)
1898 {
1899 struct thread_info *thread = get_lwp_thread (lwp);
1900
1901 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1902 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1903 }
1904 }
1905
1906 /* Decrement LWP's suspend count. */
1907
1908 static void
1909 lwp_suspended_decr (struct lwp_info *lwp)
1910 {
1911 lwp->suspended--;
1912
1913 if (lwp->suspended < 0)
1914 {
1915 struct thread_info *thread = get_lwp_thread (lwp);
1916
1917 internal_error (__FILE__, __LINE__,
1918 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1919 lwp->suspended);
1920 }
1921 }
1922
1923 /* This function should only be called if the LWP got a SIGTRAP.
1924
1925 Handle any tracepoint steps or hits. Return true if a tracepoint
1926 event was handled, 0 otherwise. */
1927
1928 static int
1929 handle_tracepoints (struct lwp_info *lwp)
1930 {
1931 struct thread_info *tinfo = get_lwp_thread (lwp);
1932 int tpoint_related_event = 0;
1933
1934 gdb_assert (lwp->suspended == 0);
1935
1936 /* If this tracepoint hit causes a tracing stop, we'll immediately
1937 uninsert tracepoints. To do this, we temporarily pause all
1938 threads, unpatch away, and then unpause threads. We need to make
1939 sure the unpausing doesn't resume LWP too. */
1940 lwp_suspended_inc (lwp);
1941
1942 /* And we need to be sure that any all-threads-stopping doesn't try
1943 to move threads out of the jump pads, as it could deadlock the
1944 inferior (LWP could be in the jump pad, maybe even holding the
1945 lock.) */
1946
1947 /* Do any necessary step collect actions. */
1948 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1949
1950 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1951
1952 /* See if we just hit a tracepoint and do its main collect
1953 actions. */
1954 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1955
1956 lwp_suspended_decr (lwp);
1957
1958 gdb_assert (lwp->suspended == 0);
1959 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1960
1961 if (tpoint_related_event)
1962 {
1963 if (debug_threads)
1964 debug_printf ("got a tracepoint event\n");
1965 return 1;
1966 }
1967
1968 return 0;
1969 }
1970
1971 /* Convenience wrapper. Returns true if LWP is presently collecting a
1972 fast tracepoint. */
1973
1974 static int
1975 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1976 struct fast_tpoint_collect_status *status)
1977 {
1978 CORE_ADDR thread_area;
1979 struct thread_info *thread = get_lwp_thread (lwp);
1980
1981 if (the_low_target.get_thread_area == NULL)
1982 return 0;
1983
1984 /* Get the thread area address. This is used to recognize which
1985 thread is which when tracing with the in-process agent library.
1986 We don't read anything from the address, and treat it as opaque;
1987 it's the address itself that we assume is unique per-thread. */
1988 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1989 return 0;
1990
1991 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1992 }
1993
1994 /* The reason we resume in the caller, is because we want to be able
1995 to pass lwp->status_pending as WSTAT, and we need to clear
1996 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1997 refuses to resume. */
1998
1999 static int
2000 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2001 {
2002 struct thread_info *saved_thread;
2003
2004 saved_thread = current_thread;
2005 current_thread = get_lwp_thread (lwp);
2006
2007 if ((wstat == NULL
2008 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2009 && supports_fast_tracepoints ()
2010 && agent_loaded_p ())
2011 {
2012 struct fast_tpoint_collect_status status;
2013 int r;
2014
2015 if (debug_threads)
2016 debug_printf ("Checking whether LWP %ld needs to move out of the "
2017 "jump pad.\n",
2018 lwpid_of (current_thread));
2019
2020 r = linux_fast_tracepoint_collecting (lwp, &status);
2021
2022 if (wstat == NULL
2023 || (WSTOPSIG (*wstat) != SIGILL
2024 && WSTOPSIG (*wstat) != SIGFPE
2025 && WSTOPSIG (*wstat) != SIGSEGV
2026 && WSTOPSIG (*wstat) != SIGBUS))
2027 {
2028 lwp->collecting_fast_tracepoint = r;
2029
2030 if (r != 0)
2031 {
2032 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
2033 {
2034 /* Haven't executed the original instruction yet.
2035 Set breakpoint there, and wait till it's hit,
2036 then single-step until exiting the jump pad. */
2037 lwp->exit_jump_pad_bkpt
2038 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2039 }
2040
2041 if (debug_threads)
2042 debug_printf ("Checking whether LWP %ld needs to move out of "
2043 "the jump pad...it does\n",
2044 lwpid_of (current_thread));
2045 current_thread = saved_thread;
2046
2047 return 1;
2048 }
2049 }
2050 else
2051 {
2052 /* If we get a synchronous signal while collecting, *and*
2053 while executing the (relocated) original instruction,
2054 reset the PC to point at the tpoint address, before
2055 reporting to GDB. Otherwise, it's an IPA lib bug: just
2056 report the signal to GDB, and pray for the best. */
2057
2058 lwp->collecting_fast_tracepoint = 0;
2059
2060 if (r != 0
2061 && (status.adjusted_insn_addr <= lwp->stop_pc
2062 && lwp->stop_pc < status.adjusted_insn_addr_end))
2063 {
2064 siginfo_t info;
2065 struct regcache *regcache;
2066
2067 /* The si_addr on a few signals references the address
2068 of the faulting instruction. Adjust that as
2069 well. */
2070 if ((WSTOPSIG (*wstat) == SIGILL
2071 || WSTOPSIG (*wstat) == SIGFPE
2072 || WSTOPSIG (*wstat) == SIGBUS
2073 || WSTOPSIG (*wstat) == SIGSEGV)
2074 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2075 (PTRACE_TYPE_ARG3) 0, &info) == 0
2076 /* Final check just to make sure we don't clobber
2077 the siginfo of non-kernel-sent signals. */
2078 && (uintptr_t) info.si_addr == lwp->stop_pc)
2079 {
2080 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2081 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2082 (PTRACE_TYPE_ARG3) 0, &info);
2083 }
2084
2085 regcache = get_thread_regcache (current_thread, 1);
2086 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2087 lwp->stop_pc = status.tpoint_addr;
2088
2089 /* Cancel any fast tracepoint lock this thread was
2090 holding. */
2091 force_unlock_trace_buffer ();
2092 }
2093
2094 if (lwp->exit_jump_pad_bkpt != NULL)
2095 {
2096 if (debug_threads)
2097 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2098 "stopping all threads momentarily.\n");
2099
2100 stop_all_lwps (1, lwp);
2101
2102 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2103 lwp->exit_jump_pad_bkpt = NULL;
2104
2105 unstop_all_lwps (1, lwp);
2106
2107 gdb_assert (lwp->suspended >= 0);
2108 }
2109 }
2110 }
2111
2112 if (debug_threads)
2113 debug_printf ("Checking whether LWP %ld needs to move out of the "
2114 "jump pad...no\n",
2115 lwpid_of (current_thread));
2116
2117 current_thread = saved_thread;
2118 return 0;
2119 }
2120
2121 /* Enqueue one signal in the "signals to report later when out of the
2122 jump pad" list. */
2123
2124 static void
2125 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2126 {
2127 struct pending_signals *p_sig;
2128 struct thread_info *thread = get_lwp_thread (lwp);
2129
2130 if (debug_threads)
2131 debug_printf ("Deferring signal %d for LWP %ld.\n",
2132 WSTOPSIG (*wstat), lwpid_of (thread));
2133
2134 if (debug_threads)
2135 {
2136 struct pending_signals *sig;
2137
2138 for (sig = lwp->pending_signals_to_report;
2139 sig != NULL;
2140 sig = sig->prev)
2141 debug_printf (" Already queued %d\n",
2142 sig->signal);
2143
2144 debug_printf (" (no more currently queued signals)\n");
2145 }
2146
2147 /* Don't enqueue non-RT signals if they are already in the deferred
2148 queue. (SIGSTOP being the easiest signal to see ending up here
2149 twice) */
2150 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2151 {
2152 struct pending_signals *sig;
2153
2154 for (sig = lwp->pending_signals_to_report;
2155 sig != NULL;
2156 sig = sig->prev)
2157 {
2158 if (sig->signal == WSTOPSIG (*wstat))
2159 {
2160 if (debug_threads)
2161 debug_printf ("Not requeuing already queued non-RT signal %d"
2162 " for LWP %ld\n",
2163 sig->signal,
2164 lwpid_of (thread));
2165 return;
2166 }
2167 }
2168 }
2169
2170 p_sig = XCNEW (struct pending_signals);
2171 p_sig->prev = lwp->pending_signals_to_report;
2172 p_sig->signal = WSTOPSIG (*wstat);
2173
2174 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2175 &p_sig->info);
2176
2177 lwp->pending_signals_to_report = p_sig;
2178 }
2179
2180 /* Dequeue one signal from the "signals to report later when out of
2181 the jump pad" list. */
2182
2183 static int
2184 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2185 {
2186 struct thread_info *thread = get_lwp_thread (lwp);
2187
2188 if (lwp->pending_signals_to_report != NULL)
2189 {
2190 struct pending_signals **p_sig;
2191
2192 p_sig = &lwp->pending_signals_to_report;
2193 while ((*p_sig)->prev != NULL)
2194 p_sig = &(*p_sig)->prev;
2195
2196 *wstat = W_STOPCODE ((*p_sig)->signal);
2197 if ((*p_sig)->info.si_signo != 0)
2198 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2199 &(*p_sig)->info);
2200 free (*p_sig);
2201 *p_sig = NULL;
2202
2203 if (debug_threads)
2204 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2205 WSTOPSIG (*wstat), lwpid_of (thread));
2206
2207 if (debug_threads)
2208 {
2209 struct pending_signals *sig;
2210
2211 for (sig = lwp->pending_signals_to_report;
2212 sig != NULL;
2213 sig = sig->prev)
2214 debug_printf (" Still queued %d\n",
2215 sig->signal);
2216
2217 debug_printf (" (no more queued signals)\n");
2218 }
2219
2220 return 1;
2221 }
2222
2223 return 0;
2224 }
2225
2226 /* Fetch the possibly triggered data watchpoint info and store it in
2227 CHILD.
2228
2229 On some archs, like x86, that use debug registers to set
2230 watchpoints, it's possible that the way to know which watched
2231 address trapped, is to check the register that is used to select
2232 which address to watch. Problem is, between setting the watchpoint
2233 and reading back which data address trapped, the user may change
2234 the set of watchpoints, and, as a consequence, GDB changes the
2235 debug registers in the inferior. To avoid reading back a stale
2236 stopped-data-address when that happens, we cache in LP the fact
2237 that a watchpoint trapped, and the corresponding data address, as
2238 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2239 registers meanwhile, we have the cached data we can rely on. */
2240
2241 static int
2242 check_stopped_by_watchpoint (struct lwp_info *child)
2243 {
2244 if (the_low_target.stopped_by_watchpoint != NULL)
2245 {
2246 struct thread_info *saved_thread;
2247
2248 saved_thread = current_thread;
2249 current_thread = get_lwp_thread (child);
2250
2251 if (the_low_target.stopped_by_watchpoint ())
2252 {
2253 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2254
2255 if (the_low_target.stopped_data_address != NULL)
2256 child->stopped_data_address
2257 = the_low_target.stopped_data_address ();
2258 else
2259 child->stopped_data_address = 0;
2260 }
2261
2262 current_thread = saved_thread;
2263 }
2264
2265 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2266 }
2267
2268 /* Return the ptrace options that we want to try to enable. */
2269
2270 static int
2271 linux_low_ptrace_options (int attached)
2272 {
2273 int options = 0;
2274
2275 if (!attached)
2276 options |= PTRACE_O_EXITKILL;
2277
2278 if (report_fork_events)
2279 options |= PTRACE_O_TRACEFORK;
2280
2281 if (report_vfork_events)
2282 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2283
2284 if (report_exec_events)
2285 options |= PTRACE_O_TRACEEXEC;
2286
2287 options |= PTRACE_O_TRACESYSGOOD;
2288
2289 return options;
2290 }
2291
2292 /* Do low-level handling of the event, and check if we should go on
2293 and pass it to caller code. Return the affected lwp if we are, or
2294 NULL otherwise. */
2295
2296 static struct lwp_info *
2297 linux_low_filter_event (int lwpid, int wstat)
2298 {
2299 struct lwp_info *child;
2300 struct thread_info *thread;
2301 int have_stop_pc = 0;
2302
2303 child = find_lwp_pid (pid_to_ptid (lwpid));
2304
2305 /* Check for stop events reported by a process we didn't already
2306 know about - anything not already in our LWP list.
2307
2308 If we're expecting to receive stopped processes after
2309 fork, vfork, and clone events, then we'll just add the
2310 new one to our list and go back to waiting for the event
2311 to be reported - the stopped process might be returned
2312 from waitpid before or after the event is.
2313
2314 But note the case of a non-leader thread exec'ing after the
2315 leader having exited, and gone from our lists (because
2316 check_zombie_leaders deleted it). The non-leader thread
2317 changes its tid to the tgid. */
2318
2319 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2320 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2321 {
2322 ptid_t child_ptid;
2323
2324 /* A multi-thread exec after we had seen the leader exiting. */
2325 if (debug_threads)
2326 {
2327 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2328 "after exec.\n", lwpid);
2329 }
2330
2331 child_ptid = ptid_build (lwpid, lwpid, 0);
2332 child = add_lwp (child_ptid);
2333 child->stopped = 1;
2334 current_thread = child->thread;
2335 }
2336
2337 /* If we didn't find a process, one of two things presumably happened:
2338 - A process we started and then detached from has exited. Ignore it.
2339 - A process we are controlling has forked and the new child's stop
2340 was reported to us by the kernel. Save its PID. */
2341 if (child == NULL && WIFSTOPPED (wstat))
2342 {
2343 add_to_pid_list (&stopped_pids, lwpid, wstat);
2344 return NULL;
2345 }
2346 else if (child == NULL)
2347 return NULL;
2348
2349 thread = get_lwp_thread (child);
2350
2351 child->stopped = 1;
2352
2353 child->last_status = wstat;
2354
2355 /* Check if the thread has exited. */
2356 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2357 {
2358 if (debug_threads)
2359 debug_printf ("LLFE: %d exited.\n", lwpid);
2360 /* If there is at least one more LWP, then the exit signal was
2361 not the end of the debugged application and should be
2362 ignored, unless GDB wants to hear about thread exits. */
2363 if (report_thread_events
2364 || last_thread_of_process_p (pid_of (thread)))
2365 {
2366 /* Since events are serialized to GDB core, and we can't
2367 report this one right now. Leave the status pending for
2368 the next time we're able to report it. */
2369 mark_lwp_dead (child, wstat);
2370 return child;
2371 }
2372 else
2373 {
2374 delete_lwp (child);
2375 return NULL;
2376 }
2377 }
2378
2379 gdb_assert (WIFSTOPPED (wstat));
2380
2381 if (WIFSTOPPED (wstat))
2382 {
2383 struct process_info *proc;
2384
2385 /* Architecture-specific setup after inferior is running. */
2386 proc = find_process_pid (pid_of (thread));
2387 if (proc->tdesc == NULL)
2388 {
2389 if (proc->attached)
2390 {
2391 /* This needs to happen after we have attached to the
2392 inferior and it is stopped for the first time, but
2393 before we access any inferior registers. */
2394 linux_arch_setup_thread (thread);
2395 }
2396 else
2397 {
2398 /* The process is started, but GDBserver will do
2399 architecture-specific setup after the program stops at
2400 the first instruction. */
2401 child->status_pending_p = 1;
2402 child->status_pending = wstat;
2403 return child;
2404 }
2405 }
2406 }
2407
2408 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2409 {
2410 struct process_info *proc = find_process_pid (pid_of (thread));
2411 int options = linux_low_ptrace_options (proc->attached);
2412
2413 linux_enable_event_reporting (lwpid, options);
2414 child->must_set_ptrace_flags = 0;
2415 }
2416
2417 /* Always update syscall_state, even if it will be filtered later. */
2418 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2419 {
2420 child->syscall_state
2421 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2422 ? TARGET_WAITKIND_SYSCALL_RETURN
2423 : TARGET_WAITKIND_SYSCALL_ENTRY);
2424 }
2425 else
2426 {
2427 /* Almost all other ptrace-stops are known to be outside of system
2428 calls, with further exceptions in handle_extended_wait. */
2429 child->syscall_state = TARGET_WAITKIND_IGNORE;
2430 }
2431
2432 /* Be careful to not overwrite stop_pc until save_stop_reason is
2433 called. */
2434 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2435 && linux_is_extended_waitstatus (wstat))
2436 {
2437 child->stop_pc = get_pc (child);
2438 if (handle_extended_wait (&child, wstat))
2439 {
2440 /* The event has been handled, so just return without
2441 reporting it. */
2442 return NULL;
2443 }
2444 }
2445
2446 if (linux_wstatus_maybe_breakpoint (wstat))
2447 {
2448 if (save_stop_reason (child))
2449 have_stop_pc = 1;
2450 }
2451
2452 if (!have_stop_pc)
2453 child->stop_pc = get_pc (child);
2454
2455 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2456 && child->stop_expected)
2457 {
2458 if (debug_threads)
2459 debug_printf ("Expected stop.\n");
2460 child->stop_expected = 0;
2461
2462 if (thread->last_resume_kind == resume_stop)
2463 {
2464 /* We want to report the stop to the core. Treat the
2465 SIGSTOP as a normal event. */
2466 if (debug_threads)
2467 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2468 target_pid_to_str (ptid_of (thread)));
2469 }
2470 else if (stopping_threads != NOT_STOPPING_THREADS)
2471 {
2472 /* Stopping threads. We don't want this SIGSTOP to end up
2473 pending. */
2474 if (debug_threads)
2475 debug_printf ("LLW: SIGSTOP caught for %s "
2476 "while stopping threads.\n",
2477 target_pid_to_str (ptid_of (thread)));
2478 return NULL;
2479 }
2480 else
2481 {
2482 /* This is a delayed SIGSTOP. Filter out the event. */
2483 if (debug_threads)
2484 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2485 child->stepping ? "step" : "continue",
2486 target_pid_to_str (ptid_of (thread)));
2487
2488 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2489 return NULL;
2490 }
2491 }
2492
2493 child->status_pending_p = 1;
2494 child->status_pending = wstat;
2495 return child;
2496 }
2497
2498 /* Return true if THREAD is doing hardware single step. */
2499
2500 static int
2501 maybe_hw_step (struct thread_info *thread)
2502 {
2503 if (can_hardware_single_step ())
2504 return 1;
2505 else
2506 {
2507 struct process_info *proc = get_thread_process (thread);
2508
2509 /* GDBserver must insert reinsert breakpoint for software
2510 single step. */
2511 gdb_assert (has_reinsert_breakpoints (proc));
2512 return 0;
2513 }
2514 }
2515
2516 /* Resume LWPs that are currently stopped without any pending status
2517 to report, but are resumed from the core's perspective. */
2518
2519 static void
2520 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2521 {
2522 struct thread_info *thread = (struct thread_info *) entry;
2523 struct lwp_info *lp = get_thread_lwp (thread);
2524
2525 if (lp->stopped
2526 && !lp->suspended
2527 && !lp->status_pending_p
2528 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2529 {
2530 int step = thread->last_resume_kind == resume_step;
2531
2532 if (debug_threads)
2533 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2534 target_pid_to_str (ptid_of (thread)),
2535 paddress (lp->stop_pc),
2536 step);
2537
2538 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2539 }
2540 }
2541
2542 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2543 match FILTER_PTID (leaving others pending). The PTIDs can be:
2544 minus_one_ptid, to specify any child; a pid PTID, specifying all
2545 lwps of a thread group; or a PTID representing a single lwp. Store
2546 the stop status through the status pointer WSTAT. OPTIONS is
2547 passed to the waitpid call. Return 0 if no event was found and
2548 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2549 was found. Return the PID of the stopped child otherwise. */
2550
2551 static int
2552 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2553 int *wstatp, int options)
2554 {
2555 struct thread_info *event_thread;
2556 struct lwp_info *event_child, *requested_child;
2557 sigset_t block_mask, prev_mask;
2558
2559 retry:
2560 /* N.B. event_thread points to the thread_info struct that contains
2561 event_child. Keep them in sync. */
2562 event_thread = NULL;
2563 event_child = NULL;
2564 requested_child = NULL;
2565
2566 /* Check for a lwp with a pending status. */
2567
2568 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2569 {
2570 event_thread = (struct thread_info *)
2571 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2572 if (event_thread != NULL)
2573 event_child = get_thread_lwp (event_thread);
2574 if (debug_threads && event_thread)
2575 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2576 }
2577 else if (!ptid_equal (filter_ptid, null_ptid))
2578 {
2579 requested_child = find_lwp_pid (filter_ptid);
2580
2581 if (stopping_threads == NOT_STOPPING_THREADS
2582 && requested_child->status_pending_p
2583 && requested_child->collecting_fast_tracepoint)
2584 {
2585 enqueue_one_deferred_signal (requested_child,
2586 &requested_child->status_pending);
2587 requested_child->status_pending_p = 0;
2588 requested_child->status_pending = 0;
2589 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2590 }
2591
2592 if (requested_child->suspended
2593 && requested_child->status_pending_p)
2594 {
2595 internal_error (__FILE__, __LINE__,
2596 "requesting an event out of a"
2597 " suspended child?");
2598 }
2599
2600 if (requested_child->status_pending_p)
2601 {
2602 event_child = requested_child;
2603 event_thread = get_lwp_thread (event_child);
2604 }
2605 }
2606
2607 if (event_child != NULL)
2608 {
2609 if (debug_threads)
2610 debug_printf ("Got an event from pending child %ld (%04x)\n",
2611 lwpid_of (event_thread), event_child->status_pending);
2612 *wstatp = event_child->status_pending;
2613 event_child->status_pending_p = 0;
2614 event_child->status_pending = 0;
2615 current_thread = event_thread;
2616 return lwpid_of (event_thread);
2617 }
2618
2619 /* But if we don't find a pending event, we'll have to wait.
2620
2621 We only enter this loop if no process has a pending wait status.
2622 Thus any action taken in response to a wait status inside this
2623 loop is responding as soon as we detect the status, not after any
2624 pending events. */
2625
2626 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2627 all signals while here. */
2628 sigfillset (&block_mask);
2629 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2630
2631 /* Always pull all events out of the kernel. We'll randomly select
2632 an event LWP out of all that have events, to prevent
2633 starvation. */
2634 while (event_child == NULL)
2635 {
2636 pid_t ret = 0;
2637
2638 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2639 quirks:
2640
2641 - If the thread group leader exits while other threads in the
2642 thread group still exist, waitpid(TGID, ...) hangs. That
2643 waitpid won't return an exit status until the other threads
2644 in the group are reaped.
2645
2646 - When a non-leader thread execs, that thread just vanishes
2647 without reporting an exit (so we'd hang if we waited for it
2648 explicitly in that case). The exec event is reported to
2649 the TGID pid. */
2650 errno = 0;
2651 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2652
2653 if (debug_threads)
2654 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2655 ret, errno ? strerror (errno) : "ERRNO-OK");
2656
2657 if (ret > 0)
2658 {
2659 if (debug_threads)
2660 {
2661 debug_printf ("LLW: waitpid %ld received %s\n",
2662 (long) ret, status_to_str (*wstatp));
2663 }
2664
2665 /* Filter all events. IOW, leave all events pending. We'll
2666 randomly select an event LWP out of all that have events
2667 below. */
2668 linux_low_filter_event (ret, *wstatp);
2669 /* Retry until nothing comes out of waitpid. A single
2670 SIGCHLD can indicate more than one child stopped. */
2671 continue;
2672 }
2673
2674 /* Now that we've pulled all events out of the kernel, resume
2675 LWPs that don't have an interesting event to report. */
2676 if (stopping_threads == NOT_STOPPING_THREADS)
2677 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2678
2679 /* ... and find an LWP with a status to report to the core, if
2680 any. */
2681 event_thread = (struct thread_info *)
2682 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2683 if (event_thread != NULL)
2684 {
2685 event_child = get_thread_lwp (event_thread);
2686 *wstatp = event_child->status_pending;
2687 event_child->status_pending_p = 0;
2688 event_child->status_pending = 0;
2689 break;
2690 }
2691
2692 /* Check for zombie thread group leaders. Those can't be reaped
2693 until all other threads in the thread group are. */
2694 check_zombie_leaders ();
2695
2696 /* If there are no resumed children left in the set of LWPs we
2697 want to wait for, bail. We can't just block in
2698 waitpid/sigsuspend, because lwps might have been left stopped
2699 in trace-stop state, and we'd be stuck forever waiting for
2700 their status to change (which would only happen if we resumed
2701 them). Even if WNOHANG is set, this return code is preferred
2702 over 0 (below), as it is more detailed. */
2703 if ((find_inferior (&all_threads,
2704 not_stopped_callback,
2705 &wait_ptid) == NULL))
2706 {
2707 if (debug_threads)
2708 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2709 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2710 return -1;
2711 }
2712
2713 /* No interesting event to report to the caller. */
2714 if ((options & WNOHANG))
2715 {
2716 if (debug_threads)
2717 debug_printf ("WNOHANG set, no event found\n");
2718
2719 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2720 return 0;
2721 }
2722
2723 /* Block until we get an event reported with SIGCHLD. */
2724 if (debug_threads)
2725 debug_printf ("sigsuspend'ing\n");
2726
2727 sigsuspend (&prev_mask);
2728 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2729 goto retry;
2730 }
2731
2732 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2733
2734 current_thread = event_thread;
2735
2736 return lwpid_of (event_thread);
2737 }
2738
2739 /* Wait for an event from child(ren) PTID. PTIDs can be:
2740 minus_one_ptid, to specify any child; a pid PTID, specifying all
2741 lwps of a thread group; or a PTID representing a single lwp. Store
2742 the stop status through the status pointer WSTAT. OPTIONS is
2743 passed to the waitpid call. Return 0 if no event was found and
2744 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2745 was found. Return the PID of the stopped child otherwise. */
2746
2747 static int
2748 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2749 {
2750 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2751 }
2752
2753 /* Count the LWP's that have had events. */
2754
2755 static int
2756 count_events_callback (struct inferior_list_entry *entry, void *data)
2757 {
2758 struct thread_info *thread = (struct thread_info *) entry;
2759 struct lwp_info *lp = get_thread_lwp (thread);
2760 int *count = (int *) data;
2761
2762 gdb_assert (count != NULL);
2763
2764 /* Count only resumed LWPs that have an event pending. */
2765 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2766 && lp->status_pending_p)
2767 (*count)++;
2768
2769 return 0;
2770 }
2771
2772 /* Select the LWP (if any) that is currently being single-stepped. */
2773
2774 static int
2775 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2776 {
2777 struct thread_info *thread = (struct thread_info *) entry;
2778 struct lwp_info *lp = get_thread_lwp (thread);
2779
2780 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2781 && thread->last_resume_kind == resume_step
2782 && lp->status_pending_p)
2783 return 1;
2784 else
2785 return 0;
2786 }
2787
2788 /* Select the Nth LWP that has had an event. */
2789
2790 static int
2791 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2792 {
2793 struct thread_info *thread = (struct thread_info *) entry;
2794 struct lwp_info *lp = get_thread_lwp (thread);
2795 int *selector = (int *) data;
2796
2797 gdb_assert (selector != NULL);
2798
2799 /* Select only resumed LWPs that have an event pending. */
2800 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2801 && lp->status_pending_p)
2802 if ((*selector)-- == 0)
2803 return 1;
2804
2805 return 0;
2806 }
2807
2808 /* Select one LWP out of those that have events pending. */
2809
2810 static void
2811 select_event_lwp (struct lwp_info **orig_lp)
2812 {
2813 int num_events = 0;
2814 int random_selector;
2815 struct thread_info *event_thread = NULL;
2816
2817 /* In all-stop, give preference to the LWP that is being
2818 single-stepped. There will be at most one, and it's the LWP that
2819 the core is most interested in. If we didn't do this, then we'd
2820 have to handle pending step SIGTRAPs somehow in case the core
2821 later continues the previously-stepped thread, otherwise we'd
2822 report the pending SIGTRAP, and the core, not having stepped the
2823 thread, wouldn't understand what the trap was for, and therefore
2824 would report it to the user as a random signal. */
2825 if (!non_stop)
2826 {
2827 event_thread
2828 = (struct thread_info *) find_inferior (&all_threads,
2829 select_singlestep_lwp_callback,
2830 NULL);
2831 if (event_thread != NULL)
2832 {
2833 if (debug_threads)
2834 debug_printf ("SEL: Select single-step %s\n",
2835 target_pid_to_str (ptid_of (event_thread)));
2836 }
2837 }
2838 if (event_thread == NULL)
2839 {
2840 /* No single-stepping LWP. Select one at random, out of those
2841 which have had events. */
2842
2843 /* First see how many events we have. */
2844 find_inferior (&all_threads, count_events_callback, &num_events);
2845 gdb_assert (num_events > 0);
2846
2847 /* Now randomly pick a LWP out of those that have had
2848 events. */
2849 random_selector = (int)
2850 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2851
2852 if (debug_threads && num_events > 1)
2853 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2854 num_events, random_selector);
2855
2856 event_thread
2857 = (struct thread_info *) find_inferior (&all_threads,
2858 select_event_lwp_callback,
2859 &random_selector);
2860 }
2861
2862 if (event_thread != NULL)
2863 {
2864 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2865
2866 /* Switch the event LWP. */
2867 *orig_lp = event_lp;
2868 }
2869 }
2870
2871 /* Decrement the suspend count of an LWP. */
2872
2873 static int
2874 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2875 {
2876 struct thread_info *thread = (struct thread_info *) entry;
2877 struct lwp_info *lwp = get_thread_lwp (thread);
2878
2879 /* Ignore EXCEPT. */
2880 if (lwp == except)
2881 return 0;
2882
2883 lwp_suspended_decr (lwp);
2884 return 0;
2885 }
2886
2887 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2888 NULL. */
2889
2890 static void
2891 unsuspend_all_lwps (struct lwp_info *except)
2892 {
2893 find_inferior (&all_threads, unsuspend_one_lwp, except);
2894 }
2895
2896 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2897 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2898 void *data);
2899 static int lwp_running (struct inferior_list_entry *entry, void *data);
2900 static ptid_t linux_wait_1 (ptid_t ptid,
2901 struct target_waitstatus *ourstatus,
2902 int target_options);
2903
2904 /* Stabilize threads (move out of jump pads).
2905
2906 If a thread is midway collecting a fast tracepoint, we need to
2907 finish the collection and move it out of the jump pad before
2908 reporting the signal.
2909
2910 This avoids recursion while collecting (when a signal arrives
2911 midway, and the signal handler itself collects), which would trash
2912 the trace buffer. In case the user set a breakpoint in a signal
2913 handler, this avoids the backtrace showing the jump pad, etc..
2914 Most importantly, there are certain things we can't do safely if
2915 threads are stopped in a jump pad (or in its callee's). For
2916 example:
2917
2918 - starting a new trace run. A thread still collecting the
2919 previous run, could trash the trace buffer when resumed. The trace
2920 buffer control structures would have been reset but the thread had
2921 no way to tell. The thread could even midway memcpy'ing to the
2922 buffer, which would mean that when resumed, it would clobber the
2923 trace buffer that had been set for a new run.
2924
2925 - we can't rewrite/reuse the jump pads for new tracepoints
2926 safely. Say you do tstart while a thread is stopped midway while
2927 collecting. When the thread is later resumed, it finishes the
2928 collection, and returns to the jump pad, to execute the original
2929 instruction that was under the tracepoint jump at the time the
2930 older run had been started. If the jump pad had been rewritten
2931 since for something else in the new run, the thread would now
2932 execute the wrong / random instructions. */
2933
2934 static void
2935 linux_stabilize_threads (void)
2936 {
2937 struct thread_info *saved_thread;
2938 struct thread_info *thread_stuck;
2939
2940 thread_stuck
2941 = (struct thread_info *) find_inferior (&all_threads,
2942 stuck_in_jump_pad_callback,
2943 NULL);
2944 if (thread_stuck != NULL)
2945 {
2946 if (debug_threads)
2947 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2948 lwpid_of (thread_stuck));
2949 return;
2950 }
2951
2952 saved_thread = current_thread;
2953
2954 stabilizing_threads = 1;
2955
2956 /* Kick 'em all. */
2957 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2958
2959 /* Loop until all are stopped out of the jump pads. */
2960 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2961 {
2962 struct target_waitstatus ourstatus;
2963 struct lwp_info *lwp;
2964 int wstat;
2965
2966 /* Note that we go through the full wait even loop. While
2967 moving threads out of jump pad, we need to be able to step
2968 over internal breakpoints and such. */
2969 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2970
2971 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2972 {
2973 lwp = get_thread_lwp (current_thread);
2974
2975 /* Lock it. */
2976 lwp_suspended_inc (lwp);
2977
2978 if (ourstatus.value.sig != GDB_SIGNAL_0
2979 || current_thread->last_resume_kind == resume_stop)
2980 {
2981 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2982 enqueue_one_deferred_signal (lwp, &wstat);
2983 }
2984 }
2985 }
2986
2987 unsuspend_all_lwps (NULL);
2988
2989 stabilizing_threads = 0;
2990
2991 current_thread = saved_thread;
2992
2993 if (debug_threads)
2994 {
2995 thread_stuck
2996 = (struct thread_info *) find_inferior (&all_threads,
2997 stuck_in_jump_pad_callback,
2998 NULL);
2999 if (thread_stuck != NULL)
3000 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3001 lwpid_of (thread_stuck));
3002 }
3003 }
3004
3005 /* Convenience function that is called when the kernel reports an
3006 event that is not passed out to GDB. */
3007
3008 static ptid_t
3009 ignore_event (struct target_waitstatus *ourstatus)
3010 {
3011 /* If we got an event, there may still be others, as a single
3012 SIGCHLD can indicate more than one child stopped. This forces
3013 another target_wait call. */
3014 async_file_mark ();
3015
3016 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3017 return null_ptid;
3018 }
3019
3020 /* Convenience function that is called when the kernel reports an exit
3021 event. This decides whether to report the event to GDB as a
3022 process exit event, a thread exit event, or to suppress the
3023 event. */
3024
3025 static ptid_t
3026 filter_exit_event (struct lwp_info *event_child,
3027 struct target_waitstatus *ourstatus)
3028 {
3029 struct thread_info *thread = get_lwp_thread (event_child);
3030 ptid_t ptid = ptid_of (thread);
3031
3032 if (!last_thread_of_process_p (pid_of (thread)))
3033 {
3034 if (report_thread_events)
3035 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3036 else
3037 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3038
3039 delete_lwp (event_child);
3040 }
3041 return ptid;
3042 }
3043
3044 /* Returns 1 if GDB is interested in any event_child syscalls. */
3045
3046 static int
3047 gdb_catching_syscalls_p (struct lwp_info *event_child)
3048 {
3049 struct thread_info *thread = get_lwp_thread (event_child);
3050 struct process_info *proc = get_thread_process (thread);
3051
3052 return !VEC_empty (int, proc->syscalls_to_catch);
3053 }
3054
3055 /* Returns 1 if GDB is interested in the event_child syscall.
3056 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3057
3058 static int
3059 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3060 {
3061 int i, iter;
3062 int sysno, sysret;
3063 struct thread_info *thread = get_lwp_thread (event_child);
3064 struct process_info *proc = get_thread_process (thread);
3065
3066 if (VEC_empty (int, proc->syscalls_to_catch))
3067 return 0;
3068
3069 if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
3070 return 1;
3071
3072 get_syscall_trapinfo (event_child, &sysno, &sysret);
3073 for (i = 0;
3074 VEC_iterate (int, proc->syscalls_to_catch, i, iter);
3075 i++)
3076 if (iter == sysno)
3077 return 1;
3078
3079 return 0;
3080 }
3081
3082 /* Wait for process, returns status. */
3083
3084 static ptid_t
3085 linux_wait_1 (ptid_t ptid,
3086 struct target_waitstatus *ourstatus, int target_options)
3087 {
3088 int w;
3089 struct lwp_info *event_child;
3090 int options;
3091 int pid;
3092 int step_over_finished;
3093 int bp_explains_trap;
3094 int maybe_internal_trap;
3095 int report_to_gdb;
3096 int trace_event;
3097 int in_step_range;
3098 int any_resumed;
3099
3100 if (debug_threads)
3101 {
3102 debug_enter ();
3103 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3104 }
3105
3106 /* Translate generic target options into linux options. */
3107 options = __WALL;
3108 if (target_options & TARGET_WNOHANG)
3109 options |= WNOHANG;
3110
3111 bp_explains_trap = 0;
3112 trace_event = 0;
3113 in_step_range = 0;
3114 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3115
3116 /* Find a resumed LWP, if any. */
3117 if (find_inferior (&all_threads,
3118 status_pending_p_callback,
3119 &minus_one_ptid) != NULL)
3120 any_resumed = 1;
3121 else if ((find_inferior (&all_threads,
3122 not_stopped_callback,
3123 &minus_one_ptid) != NULL))
3124 any_resumed = 1;
3125 else
3126 any_resumed = 0;
3127
3128 if (ptid_equal (step_over_bkpt, null_ptid))
3129 pid = linux_wait_for_event (ptid, &w, options);
3130 else
3131 {
3132 if (debug_threads)
3133 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3134 target_pid_to_str (step_over_bkpt));
3135 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3136 }
3137
3138 if (pid == 0 || (pid == -1 && !any_resumed))
3139 {
3140 gdb_assert (target_options & TARGET_WNOHANG);
3141
3142 if (debug_threads)
3143 {
3144 debug_printf ("linux_wait_1 ret = null_ptid, "
3145 "TARGET_WAITKIND_IGNORE\n");
3146 debug_exit ();
3147 }
3148
3149 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3150 return null_ptid;
3151 }
3152 else if (pid == -1)
3153 {
3154 if (debug_threads)
3155 {
3156 debug_printf ("linux_wait_1 ret = null_ptid, "
3157 "TARGET_WAITKIND_NO_RESUMED\n");
3158 debug_exit ();
3159 }
3160
3161 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3162 return null_ptid;
3163 }
3164
3165 event_child = get_thread_lwp (current_thread);
3166
3167 /* linux_wait_for_event only returns an exit status for the last
3168 child of a process. Report it. */
3169 if (WIFEXITED (w) || WIFSIGNALED (w))
3170 {
3171 if (WIFEXITED (w))
3172 {
3173 ourstatus->kind = TARGET_WAITKIND_EXITED;
3174 ourstatus->value.integer = WEXITSTATUS (w);
3175
3176 if (debug_threads)
3177 {
3178 debug_printf ("linux_wait_1 ret = %s, exited with "
3179 "retcode %d\n",
3180 target_pid_to_str (ptid_of (current_thread)),
3181 WEXITSTATUS (w));
3182 debug_exit ();
3183 }
3184 }
3185 else
3186 {
3187 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3188 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3189
3190 if (debug_threads)
3191 {
3192 debug_printf ("linux_wait_1 ret = %s, terminated with "
3193 "signal %d\n",
3194 target_pid_to_str (ptid_of (current_thread)),
3195 WTERMSIG (w));
3196 debug_exit ();
3197 }
3198 }
3199
3200 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3201 return filter_exit_event (event_child, ourstatus);
3202
3203 return ptid_of (current_thread);
3204 }
3205
3206 /* If step-over executes a breakpoint instruction, in the case of a
3207 hardware single step it means a gdb/gdbserver breakpoint had been
3208 planted on top of a permanent breakpoint, in the case of a software
3209 single step it may just mean that gdbserver hit the reinsert breakpoint.
3210 The PC has been adjusted by save_stop_reason to point at
3211 the breakpoint address.
3212 So in the case of the hardware single step advance the PC manually
3213 past the breakpoint and in the case of software single step advance only
3214 if it's not the reinsert_breakpoint we are hitting.
3215 This avoids that a program would keep trapping a permanent breakpoint
3216 forever. */
3217 if (!ptid_equal (step_over_bkpt, null_ptid)
3218 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3219 && (event_child->stepping
3220 || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
3221 {
3222 int increment_pc = 0;
3223 int breakpoint_kind = 0;
3224 CORE_ADDR stop_pc = event_child->stop_pc;
3225
3226 breakpoint_kind =
3227 the_target->breakpoint_kind_from_current_state (&stop_pc);
3228 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3229
3230 if (debug_threads)
3231 {
3232 debug_printf ("step-over for %s executed software breakpoint\n",
3233 target_pid_to_str (ptid_of (current_thread)));
3234 }
3235
3236 if (increment_pc != 0)
3237 {
3238 struct regcache *regcache
3239 = get_thread_regcache (current_thread, 1);
3240
3241 event_child->stop_pc += increment_pc;
3242 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3243
3244 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3245 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3246 }
3247 }
3248
3249 /* If this event was not handled before, and is not a SIGTRAP, we
3250 report it. SIGILL and SIGSEGV are also treated as traps in case
3251 a breakpoint is inserted at the current PC. If this target does
3252 not support internal breakpoints at all, we also report the
3253 SIGTRAP without further processing; it's of no concern to us. */
3254 maybe_internal_trap
3255 = (supports_breakpoints ()
3256 && (WSTOPSIG (w) == SIGTRAP
3257 || ((WSTOPSIG (w) == SIGILL
3258 || WSTOPSIG (w) == SIGSEGV)
3259 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3260
3261 if (maybe_internal_trap)
3262 {
3263 /* Handle anything that requires bookkeeping before deciding to
3264 report the event or continue waiting. */
3265
3266 /* First check if we can explain the SIGTRAP with an internal
3267 breakpoint, or if we should possibly report the event to GDB.
3268 Do this before anything that may remove or insert a
3269 breakpoint. */
3270 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3271
3272 /* We have a SIGTRAP, possibly a step-over dance has just
3273 finished. If so, tweak the state machine accordingly,
3274 reinsert breakpoints and delete any reinsert (software
3275 single-step) breakpoints. */
3276 step_over_finished = finish_step_over (event_child);
3277
3278 /* Now invoke the callbacks of any internal breakpoints there. */
3279 check_breakpoints (event_child->stop_pc);
3280
3281 /* Handle tracepoint data collecting. This may overflow the
3282 trace buffer, and cause a tracing stop, removing
3283 breakpoints. */
3284 trace_event = handle_tracepoints (event_child);
3285
3286 if (bp_explains_trap)
3287 {
3288 if (debug_threads)
3289 debug_printf ("Hit a gdbserver breakpoint.\n");
3290 }
3291 }
3292 else
3293 {
3294 /* We have some other signal, possibly a step-over dance was in
3295 progress, and it should be cancelled too. */
3296 step_over_finished = finish_step_over (event_child);
3297 }
3298
3299 /* We have all the data we need. Either report the event to GDB, or
3300 resume threads and keep waiting for more. */
3301
3302 /* If we're collecting a fast tracepoint, finish the collection and
3303 move out of the jump pad before delivering a signal. See
3304 linux_stabilize_threads. */
3305
3306 if (WIFSTOPPED (w)
3307 && WSTOPSIG (w) != SIGTRAP
3308 && supports_fast_tracepoints ()
3309 && agent_loaded_p ())
3310 {
3311 if (debug_threads)
3312 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3313 "to defer or adjust it.\n",
3314 WSTOPSIG (w), lwpid_of (current_thread));
3315
3316 /* Allow debugging the jump pad itself. */
3317 if (current_thread->last_resume_kind != resume_step
3318 && maybe_move_out_of_jump_pad (event_child, &w))
3319 {
3320 enqueue_one_deferred_signal (event_child, &w);
3321
3322 if (debug_threads)
3323 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3324 WSTOPSIG (w), lwpid_of (current_thread));
3325
3326 linux_resume_one_lwp (event_child, 0, 0, NULL);
3327
3328 return ignore_event (ourstatus);
3329 }
3330 }
3331
3332 if (event_child->collecting_fast_tracepoint)
3333 {
3334 if (debug_threads)
3335 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3336 "Check if we're already there.\n",
3337 lwpid_of (current_thread),
3338 event_child->collecting_fast_tracepoint);
3339
3340 trace_event = 1;
3341
3342 event_child->collecting_fast_tracepoint
3343 = linux_fast_tracepoint_collecting (event_child, NULL);
3344
3345 if (event_child->collecting_fast_tracepoint != 1)
3346 {
3347 /* No longer need this breakpoint. */
3348 if (event_child->exit_jump_pad_bkpt != NULL)
3349 {
3350 if (debug_threads)
3351 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3352 "stopping all threads momentarily.\n");
3353
3354 /* Other running threads could hit this breakpoint.
3355 We don't handle moribund locations like GDB does,
3356 instead we always pause all threads when removing
3357 breakpoints, so that any step-over or
3358 decr_pc_after_break adjustment is always taken
3359 care of while the breakpoint is still
3360 inserted. */
3361 stop_all_lwps (1, event_child);
3362
3363 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3364 event_child->exit_jump_pad_bkpt = NULL;
3365
3366 unstop_all_lwps (1, event_child);
3367
3368 gdb_assert (event_child->suspended >= 0);
3369 }
3370 }
3371
3372 if (event_child->collecting_fast_tracepoint == 0)
3373 {
3374 if (debug_threads)
3375 debug_printf ("fast tracepoint finished "
3376 "collecting successfully.\n");
3377
3378 /* We may have a deferred signal to report. */
3379 if (dequeue_one_deferred_signal (event_child, &w))
3380 {
3381 if (debug_threads)
3382 debug_printf ("dequeued one signal.\n");
3383 }
3384 else
3385 {
3386 if (debug_threads)
3387 debug_printf ("no deferred signals.\n");
3388
3389 if (stabilizing_threads)
3390 {
3391 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3392 ourstatus->value.sig = GDB_SIGNAL_0;
3393
3394 if (debug_threads)
3395 {
3396 debug_printf ("linux_wait_1 ret = %s, stopped "
3397 "while stabilizing threads\n",
3398 target_pid_to_str (ptid_of (current_thread)));
3399 debug_exit ();
3400 }
3401
3402 return ptid_of (current_thread);
3403 }
3404 }
3405 }
3406 }
3407
3408 /* Check whether GDB would be interested in this event. */
3409
3410 /* Check if GDB is interested in this syscall. */
3411 if (WIFSTOPPED (w)
3412 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3413 && !gdb_catch_this_syscall_p (event_child))
3414 {
3415 if (debug_threads)
3416 {
3417 debug_printf ("Ignored syscall for LWP %ld.\n",
3418 lwpid_of (current_thread));
3419 }
3420
3421 linux_resume_one_lwp (event_child, event_child->stepping,
3422 0, NULL);
3423 return ignore_event (ourstatus);
3424 }
3425
3426 /* If GDB is not interested in this signal, don't stop other
3427 threads, and don't report it to GDB. Just resume the inferior
3428 right away. We do this for threading-related signals as well as
3429 any that GDB specifically requested we ignore. But never ignore
3430 SIGSTOP if we sent it ourselves, and do not ignore signals when
3431 stepping - they may require special handling to skip the signal
3432 handler. Also never ignore signals that could be caused by a
3433 breakpoint. */
3434 if (WIFSTOPPED (w)
3435 && current_thread->last_resume_kind != resume_step
3436 && (
3437 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3438 (current_process ()->priv->thread_db != NULL
3439 && (WSTOPSIG (w) == __SIGRTMIN
3440 || WSTOPSIG (w) == __SIGRTMIN + 1))
3441 ||
3442 #endif
3443 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3444 && !(WSTOPSIG (w) == SIGSTOP
3445 && current_thread->last_resume_kind == resume_stop)
3446 && !linux_wstatus_maybe_breakpoint (w))))
3447 {
3448 siginfo_t info, *info_p;
3449
3450 if (debug_threads)
3451 debug_printf ("Ignored signal %d for LWP %ld.\n",
3452 WSTOPSIG (w), lwpid_of (current_thread));
3453
3454 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3455 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3456 info_p = &info;
3457 else
3458 info_p = NULL;
3459
3460 if (step_over_finished)
3461 {
3462 /* We cancelled this thread's step-over above. We still
3463 need to unsuspend all other LWPs, and set them back
3464 running again while the signal handler runs. */
3465 unsuspend_all_lwps (event_child);
3466
3467 /* Enqueue the pending signal info so that proceed_all_lwps
3468 doesn't lose it. */
3469 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3470
3471 proceed_all_lwps ();
3472 }
3473 else
3474 {
3475 linux_resume_one_lwp (event_child, event_child->stepping,
3476 WSTOPSIG (w), info_p);
3477 }
3478 return ignore_event (ourstatus);
3479 }
3480
3481 /* Note that all addresses are always "out of the step range" when
3482 there's no range to begin with. */
3483 in_step_range = lwp_in_step_range (event_child);
3484
3485 /* If GDB wanted this thread to single step, and the thread is out
3486 of the step range, we always want to report the SIGTRAP, and let
3487 GDB handle it. Watchpoints should always be reported. So should
3488 signals we can't explain. A SIGTRAP we can't explain could be a
3489 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3490 do, we're be able to handle GDB breakpoints on top of internal
3491 breakpoints, by handling the internal breakpoint and still
3492 reporting the event to GDB. If we don't, we're out of luck, GDB
3493 won't see the breakpoint hit. If we see a single-step event but
3494 the thread should be continuing, don't pass the trap to gdb.
3495 That indicates that we had previously finished a single-step but
3496 left the single-step pending -- see
3497 complete_ongoing_step_over. */
3498 report_to_gdb = (!maybe_internal_trap
3499 || (current_thread->last_resume_kind == resume_step
3500 && !in_step_range)
3501 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3502 || (!in_step_range
3503 && !bp_explains_trap
3504 && !trace_event
3505 && !step_over_finished
3506 && !(current_thread->last_resume_kind == resume_continue
3507 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3508 || (gdb_breakpoint_here (event_child->stop_pc)
3509 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3510 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3511 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3512
3513 run_breakpoint_commands (event_child->stop_pc);
3514
3515 /* We found no reason GDB would want us to stop. We either hit one
3516 of our own breakpoints, or finished an internal step GDB
3517 shouldn't know about. */
3518 if (!report_to_gdb)
3519 {
3520 if (debug_threads)
3521 {
3522 if (bp_explains_trap)
3523 debug_printf ("Hit a gdbserver breakpoint.\n");
3524 if (step_over_finished)
3525 debug_printf ("Step-over finished.\n");
3526 if (trace_event)
3527 debug_printf ("Tracepoint event.\n");
3528 if (lwp_in_step_range (event_child))
3529 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3530 paddress (event_child->stop_pc),
3531 paddress (event_child->step_range_start),
3532 paddress (event_child->step_range_end));
3533 }
3534
3535 /* We're not reporting this breakpoint to GDB, so apply the
3536 decr_pc_after_break adjustment to the inferior's regcache
3537 ourselves. */
3538
3539 if (the_low_target.set_pc != NULL)
3540 {
3541 struct regcache *regcache
3542 = get_thread_regcache (current_thread, 1);
3543 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3544 }
3545
3546 /* We may have finished stepping over a breakpoint. If so,
3547 we've stopped and suspended all LWPs momentarily except the
3548 stepping one. This is where we resume them all again. We're
3549 going to keep waiting, so use proceed, which handles stepping
3550 over the next breakpoint. */
3551 if (debug_threads)
3552 debug_printf ("proceeding all threads.\n");
3553
3554 if (step_over_finished)
3555 unsuspend_all_lwps (event_child);
3556
3557 proceed_all_lwps ();
3558 return ignore_event (ourstatus);
3559 }
3560
3561 if (debug_threads)
3562 {
3563 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3564 {
3565 char *str;
3566
3567 str = target_waitstatus_to_string (&event_child->waitstatus);
3568 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3569 lwpid_of (get_lwp_thread (event_child)), str);
3570 xfree (str);
3571 }
3572 if (current_thread->last_resume_kind == resume_step)
3573 {
3574 if (event_child->step_range_start == event_child->step_range_end)
3575 debug_printf ("GDB wanted to single-step, reporting event.\n");
3576 else if (!lwp_in_step_range (event_child))
3577 debug_printf ("Out of step range, reporting event.\n");
3578 }
3579 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3580 debug_printf ("Stopped by watchpoint.\n");
3581 else if (gdb_breakpoint_here (event_child->stop_pc))
3582 debug_printf ("Stopped by GDB breakpoint.\n");
3583 if (debug_threads)
3584 debug_printf ("Hit a non-gdbserver trap event.\n");
3585 }
3586
3587 /* Alright, we're going to report a stop. */
3588
3589 if (!stabilizing_threads)
3590 {
3591 /* In all-stop, stop all threads. */
3592 if (!non_stop)
3593 stop_all_lwps (0, NULL);
3594
3595 /* If we're not waiting for a specific LWP, choose an event LWP
3596 from among those that have had events. Giving equal priority
3597 to all LWPs that have had events helps prevent
3598 starvation. */
3599 if (ptid_equal (ptid, minus_one_ptid))
3600 {
3601 event_child->status_pending_p = 1;
3602 event_child->status_pending = w;
3603
3604 select_event_lwp (&event_child);
3605
3606 /* current_thread and event_child must stay in sync. */
3607 current_thread = get_lwp_thread (event_child);
3608
3609 event_child->status_pending_p = 0;
3610 w = event_child->status_pending;
3611 }
3612
3613 if (step_over_finished)
3614 {
3615 if (!non_stop)
3616 {
3617 /* If we were doing a step-over, all other threads but
3618 the stepping one had been paused in start_step_over,
3619 with their suspend counts incremented. We don't want
3620 to do a full unstop/unpause, because we're in
3621 all-stop mode (so we want threads stopped), but we
3622 still need to unsuspend the other threads, to
3623 decrement their `suspended' count back. */
3624 unsuspend_all_lwps (event_child);
3625 }
3626 else
3627 {
3628 /* If we just finished a step-over, then all threads had
3629 been momentarily paused. In all-stop, that's fine,
3630 we want threads stopped by now anyway. In non-stop,
3631 we need to re-resume threads that GDB wanted to be
3632 running. */
3633 unstop_all_lwps (1, event_child);
3634 }
3635 }
3636
3637 /* Stabilize threads (move out of jump pads). */
3638 if (!non_stop)
3639 stabilize_threads ();
3640 }
3641 else
3642 {
3643 /* If we just finished a step-over, then all threads had been
3644 momentarily paused. In all-stop, that's fine, we want
3645 threads stopped by now anyway. In non-stop, we need to
3646 re-resume threads that GDB wanted to be running. */
3647 if (step_over_finished)
3648 unstop_all_lwps (1, event_child);
3649 }
3650
3651 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3652 {
3653 /* If the reported event is an exit, fork, vfork or exec, let
3654 GDB know. */
3655 *ourstatus = event_child->waitstatus;
3656 /* Clear the event lwp's waitstatus since we handled it already. */
3657 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3658 }
3659 else
3660 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3661
3662 /* Now that we've selected our final event LWP, un-adjust its PC if
3663 it was a software breakpoint, and the client doesn't know we can
3664 adjust the breakpoint ourselves. */
3665 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3666 && !swbreak_feature)
3667 {
3668 int decr_pc = the_low_target.decr_pc_after_break;
3669
3670 if (decr_pc != 0)
3671 {
3672 struct regcache *regcache
3673 = get_thread_regcache (current_thread, 1);
3674 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3675 }
3676 }
3677
3678 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3679 {
3680 int sysret;
3681
3682 get_syscall_trapinfo (event_child,
3683 &ourstatus->value.syscall_number, &sysret);
3684 ourstatus->kind = event_child->syscall_state;
3685 }
3686 else if (current_thread->last_resume_kind == resume_stop
3687 && WSTOPSIG (w) == SIGSTOP)
3688 {
3689 /* A thread that has been requested to stop by GDB with vCont;t,
3690 and it stopped cleanly, so report as SIG0. The use of
3691 SIGSTOP is an implementation detail. */
3692 ourstatus->value.sig = GDB_SIGNAL_0;
3693 }
3694 else if (current_thread->last_resume_kind == resume_stop
3695 && WSTOPSIG (w) != SIGSTOP)
3696 {
3697 /* A thread that has been requested to stop by GDB with vCont;t,
3698 but, it stopped for other reasons. */
3699 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3700 }
3701 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3702 {
3703 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3704 }
3705
3706 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3707
3708 if (debug_threads)
3709 {
3710 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3711 target_pid_to_str (ptid_of (current_thread)),
3712 ourstatus->kind, ourstatus->value.sig);
3713 debug_exit ();
3714 }
3715
3716 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3717 return filter_exit_event (event_child, ourstatus);
3718
3719 return ptid_of (current_thread);
3720 }
3721
3722 /* Get rid of any pending event in the pipe. */
3723 static void
3724 async_file_flush (void)
3725 {
3726 int ret;
3727 char buf;
3728
3729 do
3730 ret = read (linux_event_pipe[0], &buf, 1);
3731 while (ret >= 0 || (ret == -1 && errno == EINTR));
3732 }
3733
3734 /* Put something in the pipe, so the event loop wakes up. */
3735 static void
3736 async_file_mark (void)
3737 {
3738 int ret;
3739
3740 async_file_flush ();
3741
3742 do
3743 ret = write (linux_event_pipe[1], "+", 1);
3744 while (ret == 0 || (ret == -1 && errno == EINTR));
3745
3746 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3747 be awakened anyway. */
3748 }
3749
3750 static ptid_t
3751 linux_wait (ptid_t ptid,
3752 struct target_waitstatus *ourstatus, int target_options)
3753 {
3754 ptid_t event_ptid;
3755
3756 /* Flush the async file first. */
3757 if (target_is_async_p ())
3758 async_file_flush ();
3759
3760 do
3761 {
3762 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3763 }
3764 while ((target_options & TARGET_WNOHANG) == 0
3765 && ptid_equal (event_ptid, null_ptid)
3766 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3767
3768 /* If at least one stop was reported, there may be more. A single
3769 SIGCHLD can signal more than one child stop. */
3770 if (target_is_async_p ()
3771 && (target_options & TARGET_WNOHANG) != 0
3772 && !ptid_equal (event_ptid, null_ptid))
3773 async_file_mark ();
3774
3775 return event_ptid;
3776 }
3777
3778 /* Send a signal to an LWP. */
3779
3780 static int
3781 kill_lwp (unsigned long lwpid, int signo)
3782 {
3783 int ret;
3784
3785 errno = 0;
3786 ret = syscall (__NR_tkill, lwpid, signo);
3787 if (errno == ENOSYS)
3788 {
3789 /* If tkill fails, then we are not using nptl threads, a
3790 configuration we no longer support. */
3791 perror_with_name (("tkill"));
3792 }
3793 return ret;
3794 }
3795
3796 void
3797 linux_stop_lwp (struct lwp_info *lwp)
3798 {
3799 send_sigstop (lwp);
3800 }
3801
3802 static void
3803 send_sigstop (struct lwp_info *lwp)
3804 {
3805 int pid;
3806
3807 pid = lwpid_of (get_lwp_thread (lwp));
3808
3809 /* If we already have a pending stop signal for this process, don't
3810 send another. */
3811 if (lwp->stop_expected)
3812 {
3813 if (debug_threads)
3814 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3815
3816 return;
3817 }
3818
3819 if (debug_threads)
3820 debug_printf ("Sending sigstop to lwp %d\n", pid);
3821
3822 lwp->stop_expected = 1;
3823 kill_lwp (pid, SIGSTOP);
3824 }
3825
3826 static int
3827 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3828 {
3829 struct thread_info *thread = (struct thread_info *) entry;
3830 struct lwp_info *lwp = get_thread_lwp (thread);
3831
3832 /* Ignore EXCEPT. */
3833 if (lwp == except)
3834 return 0;
3835
3836 if (lwp->stopped)
3837 return 0;
3838
3839 send_sigstop (lwp);
3840 return 0;
3841 }
3842
3843 /* Increment the suspend count of an LWP, and stop it, if not stopped
3844 yet. */
3845 static int
3846 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3847 void *except)
3848 {
3849 struct thread_info *thread = (struct thread_info *) entry;
3850 struct lwp_info *lwp = get_thread_lwp (thread);
3851
3852 /* Ignore EXCEPT. */
3853 if (lwp == except)
3854 return 0;
3855
3856 lwp_suspended_inc (lwp);
3857
3858 return send_sigstop_callback (entry, except);
3859 }
3860
3861 static void
3862 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3863 {
3864 /* Store the exit status for later. */
3865 lwp->status_pending_p = 1;
3866 lwp->status_pending = wstat;
3867
3868 /* Store in waitstatus as well, as there's nothing else to process
3869 for this event. */
3870 if (WIFEXITED (wstat))
3871 {
3872 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3873 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3874 }
3875 else if (WIFSIGNALED (wstat))
3876 {
3877 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3878 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3879 }
3880
3881 /* Prevent trying to stop it. */
3882 lwp->stopped = 1;
3883
3884 /* No further stops are expected from a dead lwp. */
3885 lwp->stop_expected = 0;
3886 }
3887
3888 /* Return true if LWP has exited already, and has a pending exit event
3889 to report to GDB. */
3890
3891 static int
3892 lwp_is_marked_dead (struct lwp_info *lwp)
3893 {
3894 return (lwp->status_pending_p
3895 && (WIFEXITED (lwp->status_pending)
3896 || WIFSIGNALED (lwp->status_pending)));
3897 }
3898
3899 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3900
3901 static void
3902 wait_for_sigstop (void)
3903 {
3904 struct thread_info *saved_thread;
3905 ptid_t saved_tid;
3906 int wstat;
3907 int ret;
3908
3909 saved_thread = current_thread;
3910 if (saved_thread != NULL)
3911 saved_tid = saved_thread->entry.id;
3912 else
3913 saved_tid = null_ptid; /* avoid bogus unused warning */
3914
3915 if (debug_threads)
3916 debug_printf ("wait_for_sigstop: pulling events\n");
3917
3918 /* Passing NULL_PTID as filter indicates we want all events to be
3919 left pending. Eventually this returns when there are no
3920 unwaited-for children left. */
3921 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3922 &wstat, __WALL);
3923 gdb_assert (ret == -1);
3924
3925 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3926 current_thread = saved_thread;
3927 else
3928 {
3929 if (debug_threads)
3930 debug_printf ("Previously current thread died.\n");
3931
3932 /* We can't change the current inferior behind GDB's back,
3933 otherwise, a subsequent command may apply to the wrong
3934 process. */
3935 current_thread = NULL;
3936 }
3937 }
3938
3939 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3940 move it out, because we need to report the stop event to GDB. For
3941 example, if the user puts a breakpoint in the jump pad, it's
3942 because she wants to debug it. */
3943
3944 static int
3945 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3946 {
3947 struct thread_info *thread = (struct thread_info *) entry;
3948 struct lwp_info *lwp = get_thread_lwp (thread);
3949
3950 if (lwp->suspended != 0)
3951 {
3952 internal_error (__FILE__, __LINE__,
3953 "LWP %ld is suspended, suspended=%d\n",
3954 lwpid_of (thread), lwp->suspended);
3955 }
3956 gdb_assert (lwp->stopped);
3957
3958 /* Allow debugging the jump pad, gdb_collect, etc.. */
3959 return (supports_fast_tracepoints ()
3960 && agent_loaded_p ()
3961 && (gdb_breakpoint_here (lwp->stop_pc)
3962 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3963 || thread->last_resume_kind == resume_step)
3964 && linux_fast_tracepoint_collecting (lwp, NULL));
3965 }
3966
3967 static void
3968 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3969 {
3970 struct thread_info *thread = (struct thread_info *) entry;
3971 struct thread_info *saved_thread;
3972 struct lwp_info *lwp = get_thread_lwp (thread);
3973 int *wstat;
3974
3975 if (lwp->suspended != 0)
3976 {
3977 internal_error (__FILE__, __LINE__,
3978 "LWP %ld is suspended, suspended=%d\n",
3979 lwpid_of (thread), lwp->suspended);
3980 }
3981 gdb_assert (lwp->stopped);
3982
3983 /* For gdb_breakpoint_here. */
3984 saved_thread = current_thread;
3985 current_thread = thread;
3986
3987 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3988
3989 /* Allow debugging the jump pad, gdb_collect, etc. */
3990 if (!gdb_breakpoint_here (lwp->stop_pc)
3991 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3992 && thread->last_resume_kind != resume_step
3993 && maybe_move_out_of_jump_pad (lwp, wstat))
3994 {
3995 if (debug_threads)
3996 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3997 lwpid_of (thread));
3998
3999 if (wstat)
4000 {
4001 lwp->status_pending_p = 0;
4002 enqueue_one_deferred_signal (lwp, wstat);
4003
4004 if (debug_threads)
4005 debug_printf ("Signal %d for LWP %ld deferred "
4006 "(in jump pad)\n",
4007 WSTOPSIG (*wstat), lwpid_of (thread));
4008 }
4009
4010 linux_resume_one_lwp (lwp, 0, 0, NULL);
4011 }
4012 else
4013 lwp_suspended_inc (lwp);
4014
4015 current_thread = saved_thread;
4016 }
4017
4018 static int
4019 lwp_running (struct inferior_list_entry *entry, void *data)
4020 {
4021 struct thread_info *thread = (struct thread_info *) entry;
4022 struct lwp_info *lwp = get_thread_lwp (thread);
4023
4024 if (lwp_is_marked_dead (lwp))
4025 return 0;
4026 if (lwp->stopped)
4027 return 0;
4028 return 1;
4029 }
4030
4031 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4032 If SUSPEND, then also increase the suspend count of every LWP,
4033 except EXCEPT. */
4034
4035 static void
4036 stop_all_lwps (int suspend, struct lwp_info *except)
4037 {
4038 /* Should not be called recursively. */
4039 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4040
4041 if (debug_threads)
4042 {
4043 debug_enter ();
4044 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4045 suspend ? "stop-and-suspend" : "stop",
4046 except != NULL
4047 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4048 : "none");
4049 }
4050
4051 stopping_threads = (suspend
4052 ? STOPPING_AND_SUSPENDING_THREADS
4053 : STOPPING_THREADS);
4054
4055 if (suspend)
4056 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4057 else
4058 find_inferior (&all_threads, send_sigstop_callback, except);
4059 wait_for_sigstop ();
4060 stopping_threads = NOT_STOPPING_THREADS;
4061
4062 if (debug_threads)
4063 {
4064 debug_printf ("stop_all_lwps done, setting stopping_threads "
4065 "back to !stopping\n");
4066 debug_exit ();
4067 }
4068 }
4069
4070 /* Enqueue one signal in the chain of signals which need to be
4071 delivered to this process on next resume. */
4072
4073 static void
4074 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4075 {
4076 struct pending_signals *p_sig = XNEW (struct pending_signals);
4077
4078 p_sig->prev = lwp->pending_signals;
4079 p_sig->signal = signal;
4080 if (info == NULL)
4081 memset (&p_sig->info, 0, sizeof (siginfo_t));
4082 else
4083 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4084 lwp->pending_signals = p_sig;
4085 }
4086
4087 /* Install breakpoints for software single stepping. */
4088
4089 static void
4090 install_software_single_step_breakpoints (struct lwp_info *lwp)
4091 {
4092 int i;
4093 CORE_ADDR pc;
4094 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4095 VEC (CORE_ADDR) *next_pcs = NULL;
4096 struct cleanup *old_chain = make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
4097
4098 next_pcs = (*the_low_target.get_next_pcs) (regcache);
4099
4100 for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
4101 set_reinsert_breakpoint (pc);
4102
4103 do_cleanups (old_chain);
4104 }
4105
4106 /* Single step via hardware or software single step.
4107 Return 1 if hardware single stepping, 0 if software single stepping
4108 or can't single step. */
4109
4110 static int
4111 single_step (struct lwp_info* lwp)
4112 {
4113 int step = 0;
4114
4115 if (can_hardware_single_step ())
4116 {
4117 step = 1;
4118 }
4119 else if (can_software_single_step ())
4120 {
4121 install_software_single_step_breakpoints (lwp);
4122 step = 0;
4123 }
4124 else
4125 {
4126 if (debug_threads)
4127 debug_printf ("stepping is not implemented on this target");
4128 }
4129
4130 return step;
4131 }
4132
4133 /* The signal can be delivered to the inferior if we are not trying to
4134 finish a fast tracepoint collect. Since signal can be delivered in
4135 the step-over, the program may go to signal handler and trap again
4136 after return from the signal handler. We can live with the spurious
4137 double traps. */
4138
4139 static int
4140 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4141 {
4142 return !lwp->collecting_fast_tracepoint;
4143 }
4144
4145 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4146 SIGNAL is nonzero, give it that signal. */
4147
4148 static void
4149 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4150 int step, int signal, siginfo_t *info)
4151 {
4152 struct thread_info *thread = get_lwp_thread (lwp);
4153 struct thread_info *saved_thread;
4154 int fast_tp_collecting;
4155 int ptrace_request;
4156 struct process_info *proc = get_thread_process (thread);
4157
4158 /* Note that target description may not be initialised
4159 (proc->tdesc == NULL) at this point because the program hasn't
4160 stopped at the first instruction yet. It means GDBserver skips
4161 the extra traps from the wrapper program (see option --wrapper).
4162 Code in this function that requires register access should be
4163 guarded by proc->tdesc == NULL or something else. */
4164
4165 if (lwp->stopped == 0)
4166 return;
4167
4168 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4169
4170 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4171
4172 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4173
4174 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4175 user used the "jump" command, or "set $pc = foo"). */
4176 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4177 {
4178 /* Collecting 'while-stepping' actions doesn't make sense
4179 anymore. */
4180 release_while_stepping_state_list (thread);
4181 }
4182
4183 /* If we have pending signals or status, and a new signal, enqueue the
4184 signal. Also enqueue the signal if it can't be delivered to the
4185 inferior right now. */
4186 if (signal != 0
4187 && (lwp->status_pending_p
4188 || lwp->pending_signals != NULL
4189 || !lwp_signal_can_be_delivered (lwp)))
4190 {
4191 enqueue_pending_signal (lwp, signal, info);
4192
4193 /* Postpone any pending signal. It was enqueued above. */
4194 signal = 0;
4195 }
4196
4197 if (lwp->status_pending_p)
4198 {
4199 if (debug_threads)
4200 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4201 " has pending status\n",
4202 lwpid_of (thread), step ? "step" : "continue",
4203 lwp->stop_expected ? "expected" : "not expected");
4204 return;
4205 }
4206
4207 saved_thread = current_thread;
4208 current_thread = thread;
4209
4210 /* This bit needs some thinking about. If we get a signal that
4211 we must report while a single-step reinsert is still pending,
4212 we often end up resuming the thread. It might be better to
4213 (ew) allow a stack of pending events; then we could be sure that
4214 the reinsert happened right away and not lose any signals.
4215
4216 Making this stack would also shrink the window in which breakpoints are
4217 uninserted (see comment in linux_wait_for_lwp) but not enough for
4218 complete correctness, so it won't solve that problem. It may be
4219 worthwhile just to solve this one, however. */
4220 if (lwp->bp_reinsert != 0)
4221 {
4222 if (debug_threads)
4223 debug_printf (" pending reinsert at 0x%s\n",
4224 paddress (lwp->bp_reinsert));
4225
4226 if (can_hardware_single_step ())
4227 {
4228 if (fast_tp_collecting == 0)
4229 {
4230 if (step == 0)
4231 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4232 if (lwp->suspended)
4233 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4234 lwp->suspended);
4235 }
4236 }
4237
4238 step = maybe_hw_step (thread);
4239 }
4240 else
4241 {
4242 /* If the thread isn't doing step-over, there shouldn't be any
4243 reinsert breakpoints. */
4244 gdb_assert (!has_reinsert_breakpoints (proc));
4245 }
4246
4247 if (fast_tp_collecting == 1)
4248 {
4249 if (debug_threads)
4250 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4251 " (exit-jump-pad-bkpt)\n",
4252 lwpid_of (thread));
4253 }
4254 else if (fast_tp_collecting == 2)
4255 {
4256 if (debug_threads)
4257 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4258 " single-stepping\n",
4259 lwpid_of (thread));
4260
4261 if (can_hardware_single_step ())
4262 step = 1;
4263 else
4264 {
4265 internal_error (__FILE__, __LINE__,
4266 "moving out of jump pad single-stepping"
4267 " not implemented on this target");
4268 }
4269 }
4270
4271 /* If we have while-stepping actions in this thread set it stepping.
4272 If we have a signal to deliver, it may or may not be set to
4273 SIG_IGN, we don't know. Assume so, and allow collecting
4274 while-stepping into a signal handler. A possible smart thing to
4275 do would be to set an internal breakpoint at the signal return
4276 address, continue, and carry on catching this while-stepping
4277 action only when that breakpoint is hit. A future
4278 enhancement. */
4279 if (thread->while_stepping != NULL)
4280 {
4281 if (debug_threads)
4282 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4283 lwpid_of (thread));
4284
4285 step = single_step (lwp);
4286 }
4287
4288 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4289 {
4290 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4291
4292 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4293
4294 if (debug_threads)
4295 {
4296 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4297 (long) lwp->stop_pc);
4298 }
4299 }
4300
4301 /* If we have pending signals, consume one if it can be delivered to
4302 the inferior. */
4303 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4304 {
4305 struct pending_signals **p_sig;
4306
4307 p_sig = &lwp->pending_signals;
4308 while ((*p_sig)->prev != NULL)
4309 p_sig = &(*p_sig)->prev;
4310
4311 signal = (*p_sig)->signal;
4312 if ((*p_sig)->info.si_signo != 0)
4313 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4314 &(*p_sig)->info);
4315
4316 free (*p_sig);
4317 *p_sig = NULL;
4318 }
4319
4320 if (debug_threads)
4321 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4322 lwpid_of (thread), step ? "step" : "continue", signal,
4323 lwp->stop_expected ? "expected" : "not expected");
4324
4325 if (the_low_target.prepare_to_resume != NULL)
4326 the_low_target.prepare_to_resume (lwp);
4327
4328 regcache_invalidate_thread (thread);
4329 errno = 0;
4330 lwp->stepping = step;
4331 if (step)
4332 ptrace_request = PTRACE_SINGLESTEP;
4333 else if (gdb_catching_syscalls_p (lwp))
4334 ptrace_request = PTRACE_SYSCALL;
4335 else
4336 ptrace_request = PTRACE_CONT;
4337 ptrace (ptrace_request,
4338 lwpid_of (thread),
4339 (PTRACE_TYPE_ARG3) 0,
4340 /* Coerce to a uintptr_t first to avoid potential gcc warning
4341 of coercing an 8 byte integer to a 4 byte pointer. */
4342 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4343
4344 current_thread = saved_thread;
4345 if (errno)
4346 perror_with_name ("resuming thread");
4347
4348 /* Successfully resumed. Clear state that no longer makes sense,
4349 and mark the LWP as running. Must not do this before resuming
4350 otherwise if that fails other code will be confused. E.g., we'd
4351 later try to stop the LWP and hang forever waiting for a stop
4352 status. Note that we must not throw after this is cleared,
4353 otherwise handle_zombie_lwp_error would get confused. */
4354 lwp->stopped = 0;
4355 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4356 }
4357
4358 /* Called when we try to resume a stopped LWP and that errors out. If
4359 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4360 or about to become), discard the error, clear any pending status
4361 the LWP may have, and return true (we'll collect the exit status
4362 soon enough). Otherwise, return false. */
4363
4364 static int
4365 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4366 {
4367 struct thread_info *thread = get_lwp_thread (lp);
4368
4369 /* If we get an error after resuming the LWP successfully, we'd
4370 confuse !T state for the LWP being gone. */
4371 gdb_assert (lp->stopped);
4372
4373 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4374 because even if ptrace failed with ESRCH, the tracee may be "not
4375 yet fully dead", but already refusing ptrace requests. In that
4376 case the tracee has 'R (Running)' state for a little bit
4377 (observed in Linux 3.18). See also the note on ESRCH in the
4378 ptrace(2) man page. Instead, check whether the LWP has any state
4379 other than ptrace-stopped. */
4380
4381 /* Don't assume anything if /proc/PID/status can't be read. */
4382 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4383 {
4384 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4385 lp->status_pending_p = 0;
4386 return 1;
4387 }
4388 return 0;
4389 }
4390
4391 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4392 disappears while we try to resume it. */
4393
4394 static void
4395 linux_resume_one_lwp (struct lwp_info *lwp,
4396 int step, int signal, siginfo_t *info)
4397 {
4398 TRY
4399 {
4400 linux_resume_one_lwp_throw (lwp, step, signal, info);
4401 }
4402 CATCH (ex, RETURN_MASK_ERROR)
4403 {
4404 if (!check_ptrace_stopped_lwp_gone (lwp))
4405 throw_exception (ex);
4406 }
4407 END_CATCH
4408 }
4409
4410 struct thread_resume_array
4411 {
4412 struct thread_resume *resume;
4413 size_t n;
4414 };
4415
4416 /* This function is called once per thread via find_inferior.
4417 ARG is a pointer to a thread_resume_array struct.
4418 We look up the thread specified by ENTRY in ARG, and mark the thread
4419 with a pointer to the appropriate resume request.
4420
4421 This algorithm is O(threads * resume elements), but resume elements
4422 is small (and will remain small at least until GDB supports thread
4423 suspension). */
4424
4425 static int
4426 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4427 {
4428 struct thread_info *thread = (struct thread_info *) entry;
4429 struct lwp_info *lwp = get_thread_lwp (thread);
4430 int ndx;
4431 struct thread_resume_array *r;
4432
4433 r = (struct thread_resume_array *) arg;
4434
4435 for (ndx = 0; ndx < r->n; ndx++)
4436 {
4437 ptid_t ptid = r->resume[ndx].thread;
4438 if (ptid_equal (ptid, minus_one_ptid)
4439 || ptid_equal (ptid, entry->id)
4440 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4441 of PID'. */
4442 || (ptid_get_pid (ptid) == pid_of (thread)
4443 && (ptid_is_pid (ptid)
4444 || ptid_get_lwp (ptid) == -1)))
4445 {
4446 if (r->resume[ndx].kind == resume_stop
4447 && thread->last_resume_kind == resume_stop)
4448 {
4449 if (debug_threads)
4450 debug_printf ("already %s LWP %ld at GDB's request\n",
4451 (thread->last_status.kind
4452 == TARGET_WAITKIND_STOPPED)
4453 ? "stopped"
4454 : "stopping",
4455 lwpid_of (thread));
4456
4457 continue;
4458 }
4459
4460 lwp->resume = &r->resume[ndx];
4461 thread->last_resume_kind = lwp->resume->kind;
4462
4463 lwp->step_range_start = lwp->resume->step_range_start;
4464 lwp->step_range_end = lwp->resume->step_range_end;
4465
4466 /* If we had a deferred signal to report, dequeue one now.
4467 This can happen if LWP gets more than one signal while
4468 trying to get out of a jump pad. */
4469 if (lwp->stopped
4470 && !lwp->status_pending_p
4471 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4472 {
4473 lwp->status_pending_p = 1;
4474
4475 if (debug_threads)
4476 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4477 "leaving status pending.\n",
4478 WSTOPSIG (lwp->status_pending),
4479 lwpid_of (thread));
4480 }
4481
4482 return 0;
4483 }
4484 }
4485
4486 /* No resume action for this thread. */
4487 lwp->resume = NULL;
4488
4489 return 0;
4490 }
4491
4492 /* find_inferior callback for linux_resume.
4493 Set *FLAG_P if this lwp has an interesting status pending. */
4494
4495 static int
4496 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4497 {
4498 struct thread_info *thread = (struct thread_info *) entry;
4499 struct lwp_info *lwp = get_thread_lwp (thread);
4500
4501 /* LWPs which will not be resumed are not interesting, because
4502 we might not wait for them next time through linux_wait. */
4503 if (lwp->resume == NULL)
4504 return 0;
4505
4506 if (thread_still_has_status_pending_p (thread))
4507 * (int *) flag_p = 1;
4508
4509 return 0;
4510 }
4511
4512 /* Return 1 if this lwp that GDB wants running is stopped at an
4513 internal breakpoint that we need to step over. It assumes that any
4514 required STOP_PC adjustment has already been propagated to the
4515 inferior's regcache. */
4516
4517 static int
4518 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4519 {
4520 struct thread_info *thread = (struct thread_info *) entry;
4521 struct lwp_info *lwp = get_thread_lwp (thread);
4522 struct thread_info *saved_thread;
4523 CORE_ADDR pc;
4524 struct process_info *proc = get_thread_process (thread);
4525
4526 /* GDBserver is skipping the extra traps from the wrapper program,
4527 don't have to do step over. */
4528 if (proc->tdesc == NULL)
4529 return 0;
4530
4531 /* LWPs which will not be resumed are not interesting, because we
4532 might not wait for them next time through linux_wait. */
4533
4534 if (!lwp->stopped)
4535 {
4536 if (debug_threads)
4537 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4538 lwpid_of (thread));
4539 return 0;
4540 }
4541
4542 if (thread->last_resume_kind == resume_stop)
4543 {
4544 if (debug_threads)
4545 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4546 " stopped\n",
4547 lwpid_of (thread));
4548 return 0;
4549 }
4550
4551 gdb_assert (lwp->suspended >= 0);
4552
4553 if (lwp->suspended)
4554 {
4555 if (debug_threads)
4556 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4557 lwpid_of (thread));
4558 return 0;
4559 }
4560
4561 if (lwp->status_pending_p)
4562 {
4563 if (debug_threads)
4564 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4565 " status.\n",
4566 lwpid_of (thread));
4567 return 0;
4568 }
4569
4570 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4571 or we have. */
4572 pc = get_pc (lwp);
4573
4574 /* If the PC has changed since we stopped, then don't do anything,
4575 and let the breakpoint/tracepoint be hit. This happens if, for
4576 instance, GDB handled the decr_pc_after_break subtraction itself,
4577 GDB is OOL stepping this thread, or the user has issued a "jump"
4578 command, or poked thread's registers herself. */
4579 if (pc != lwp->stop_pc)
4580 {
4581 if (debug_threads)
4582 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4583 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4584 lwpid_of (thread),
4585 paddress (lwp->stop_pc), paddress (pc));
4586 return 0;
4587 }
4588
4589 /* On software single step target, resume the inferior with signal
4590 rather than stepping over. */
4591 if (can_software_single_step ()
4592 && lwp->pending_signals != NULL
4593 && lwp_signal_can_be_delivered (lwp))
4594 {
4595 if (debug_threads)
4596 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4597 " signals.\n",
4598 lwpid_of (thread));
4599
4600 return 0;
4601 }
4602
4603 saved_thread = current_thread;
4604 current_thread = thread;
4605
4606 /* We can only step over breakpoints we know about. */
4607 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4608 {
4609 /* Don't step over a breakpoint that GDB expects to hit
4610 though. If the condition is being evaluated on the target's side
4611 and it evaluate to false, step over this breakpoint as well. */
4612 if (gdb_breakpoint_here (pc)
4613 && gdb_condition_true_at_breakpoint (pc)
4614 && gdb_no_commands_at_breakpoint (pc))
4615 {
4616 if (debug_threads)
4617 debug_printf ("Need step over [LWP %ld]? yes, but found"
4618 " GDB breakpoint at 0x%s; skipping step over\n",
4619 lwpid_of (thread), paddress (pc));
4620
4621 current_thread = saved_thread;
4622 return 0;
4623 }
4624 else
4625 {
4626 if (debug_threads)
4627 debug_printf ("Need step over [LWP %ld]? yes, "
4628 "found breakpoint at 0x%s\n",
4629 lwpid_of (thread), paddress (pc));
4630
4631 /* We've found an lwp that needs stepping over --- return 1 so
4632 that find_inferior stops looking. */
4633 current_thread = saved_thread;
4634
4635 return 1;
4636 }
4637 }
4638
4639 current_thread = saved_thread;
4640
4641 if (debug_threads)
4642 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4643 " at 0x%s\n",
4644 lwpid_of (thread), paddress (pc));
4645
4646 return 0;
4647 }
4648
4649 /* Start a step-over operation on LWP. When LWP stopped at a
4650 breakpoint, to make progress, we need to remove the breakpoint out
4651 of the way. If we let other threads run while we do that, they may
4652 pass by the breakpoint location and miss hitting it. To avoid
4653 that, a step-over momentarily stops all threads while LWP is
4654 single-stepped by either hardware or software while the breakpoint
4655 is temporarily uninserted from the inferior. When the single-step
4656 finishes, we reinsert the breakpoint, and let all threads that are
4657 supposed to be running, run again. */
4658
4659 static int
4660 start_step_over (struct lwp_info *lwp)
4661 {
4662 struct thread_info *thread = get_lwp_thread (lwp);
4663 struct thread_info *saved_thread;
4664 CORE_ADDR pc;
4665 int step;
4666
4667 if (debug_threads)
4668 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4669 lwpid_of (thread));
4670
4671 stop_all_lwps (1, lwp);
4672
4673 if (lwp->suspended != 0)
4674 {
4675 internal_error (__FILE__, __LINE__,
4676 "LWP %ld suspended=%d\n", lwpid_of (thread),
4677 lwp->suspended);
4678 }
4679
4680 if (debug_threads)
4681 debug_printf ("Done stopping all threads for step-over.\n");
4682
4683 /* Note, we should always reach here with an already adjusted PC,
4684 either by GDB (if we're resuming due to GDB's request), or by our
4685 caller, if we just finished handling an internal breakpoint GDB
4686 shouldn't care about. */
4687 pc = get_pc (lwp);
4688
4689 saved_thread = current_thread;
4690 current_thread = thread;
4691
4692 lwp->bp_reinsert = pc;
4693 uninsert_breakpoints_at (pc);
4694 uninsert_fast_tracepoint_jumps_at (pc);
4695
4696 step = single_step (lwp);
4697
4698 current_thread = saved_thread;
4699
4700 linux_resume_one_lwp (lwp, step, 0, NULL);
4701
4702 /* Require next event from this LWP. */
4703 step_over_bkpt = thread->entry.id;
4704 return 1;
4705 }
4706
4707 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4708 start_step_over, if still there, and delete any reinsert
4709 breakpoints we've set, on non hardware single-step targets. */
4710
4711 static int
4712 finish_step_over (struct lwp_info *lwp)
4713 {
4714 if (lwp->bp_reinsert != 0)
4715 {
4716 struct thread_info *saved_thread = current_thread;
4717
4718 if (debug_threads)
4719 debug_printf ("Finished step over.\n");
4720
4721 current_thread = get_lwp_thread (lwp);
4722
4723 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4724 may be no breakpoint to reinsert there by now. */
4725 reinsert_breakpoints_at (lwp->bp_reinsert);
4726 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4727
4728 lwp->bp_reinsert = 0;
4729
4730 /* Delete any software-single-step reinsert breakpoints. No
4731 longer needed. We don't have to worry about other threads
4732 hitting this trap, and later not being able to explain it,
4733 because we were stepping over a breakpoint, and we hold all
4734 threads but LWP stopped while doing that. */
4735 if (!can_hardware_single_step ())
4736 {
4737 gdb_assert (has_reinsert_breakpoints (current_process ()));
4738 delete_reinsert_breakpoints ();
4739 }
4740
4741 step_over_bkpt = null_ptid;
4742 current_thread = saved_thread;
4743 return 1;
4744 }
4745 else
4746 return 0;
4747 }
4748
4749 /* If there's a step over in progress, wait until all threads stop
4750 (that is, until the stepping thread finishes its step), and
4751 unsuspend all lwps. The stepping thread ends with its status
4752 pending, which is processed later when we get back to processing
4753 events. */
4754
4755 static void
4756 complete_ongoing_step_over (void)
4757 {
4758 if (!ptid_equal (step_over_bkpt, null_ptid))
4759 {
4760 struct lwp_info *lwp;
4761 int wstat;
4762 int ret;
4763
4764 if (debug_threads)
4765 debug_printf ("detach: step over in progress, finish it first\n");
4766
4767 /* Passing NULL_PTID as filter indicates we want all events to
4768 be left pending. Eventually this returns when there are no
4769 unwaited-for children left. */
4770 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4771 &wstat, __WALL);
4772 gdb_assert (ret == -1);
4773
4774 lwp = find_lwp_pid (step_over_bkpt);
4775 if (lwp != NULL)
4776 finish_step_over (lwp);
4777 step_over_bkpt = null_ptid;
4778 unsuspend_all_lwps (lwp);
4779 }
4780 }
4781
4782 /* This function is called once per thread. We check the thread's resume
4783 request, which will tell us whether to resume, step, or leave the thread
4784 stopped; and what signal, if any, it should be sent.
4785
4786 For threads which we aren't explicitly told otherwise, we preserve
4787 the stepping flag; this is used for stepping over gdbserver-placed
4788 breakpoints.
4789
4790 If pending_flags was set in any thread, we queue any needed
4791 signals, since we won't actually resume. We already have a pending
4792 event to report, so we don't need to preserve any step requests;
4793 they should be re-issued if necessary. */
4794
4795 static int
4796 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4797 {
4798 struct thread_info *thread = (struct thread_info *) entry;
4799 struct lwp_info *lwp = get_thread_lwp (thread);
4800 int step;
4801 int leave_all_stopped = * (int *) arg;
4802 int leave_pending;
4803
4804 if (lwp->resume == NULL)
4805 return 0;
4806
4807 if (lwp->resume->kind == resume_stop)
4808 {
4809 if (debug_threads)
4810 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4811
4812 if (!lwp->stopped)
4813 {
4814 if (debug_threads)
4815 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4816
4817 /* Stop the thread, and wait for the event asynchronously,
4818 through the event loop. */
4819 send_sigstop (lwp);
4820 }
4821 else
4822 {
4823 if (debug_threads)
4824 debug_printf ("already stopped LWP %ld\n",
4825 lwpid_of (thread));
4826
4827 /* The LWP may have been stopped in an internal event that
4828 was not meant to be notified back to GDB (e.g., gdbserver
4829 breakpoint), so we should be reporting a stop event in
4830 this case too. */
4831
4832 /* If the thread already has a pending SIGSTOP, this is a
4833 no-op. Otherwise, something later will presumably resume
4834 the thread and this will cause it to cancel any pending
4835 operation, due to last_resume_kind == resume_stop. If
4836 the thread already has a pending status to report, we
4837 will still report it the next time we wait - see
4838 status_pending_p_callback. */
4839
4840 /* If we already have a pending signal to report, then
4841 there's no need to queue a SIGSTOP, as this means we're
4842 midway through moving the LWP out of the jumppad, and we
4843 will report the pending signal as soon as that is
4844 finished. */
4845 if (lwp->pending_signals_to_report == NULL)
4846 send_sigstop (lwp);
4847 }
4848
4849 /* For stop requests, we're done. */
4850 lwp->resume = NULL;
4851 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4852 return 0;
4853 }
4854
4855 /* If this thread which is about to be resumed has a pending status,
4856 then don't resume it - we can just report the pending status.
4857 Likewise if it is suspended, because e.g., another thread is
4858 stepping past a breakpoint. Make sure to queue any signals that
4859 would otherwise be sent. In all-stop mode, we do this decision
4860 based on if *any* thread has a pending status. If there's a
4861 thread that needs the step-over-breakpoint dance, then don't
4862 resume any other thread but that particular one. */
4863 leave_pending = (lwp->suspended
4864 || lwp->status_pending_p
4865 || leave_all_stopped);
4866
4867 if (!leave_pending)
4868 {
4869 if (debug_threads)
4870 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4871
4872 step = (lwp->resume->kind == resume_step);
4873 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4874 }
4875 else
4876 {
4877 if (debug_threads)
4878 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4879
4880 /* If we have a new signal, enqueue the signal. */
4881 if (lwp->resume->sig != 0)
4882 {
4883 struct pending_signals *p_sig = XCNEW (struct pending_signals);
4884
4885 p_sig->prev = lwp->pending_signals;
4886 p_sig->signal = lwp->resume->sig;
4887
4888 /* If this is the same signal we were previously stopped by,
4889 make sure to queue its siginfo. We can ignore the return
4890 value of ptrace; if it fails, we'll skip
4891 PTRACE_SETSIGINFO. */
4892 if (WIFSTOPPED (lwp->last_status)
4893 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4894 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4895 &p_sig->info);
4896
4897 lwp->pending_signals = p_sig;
4898 }
4899 }
4900
4901 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4902 lwp->resume = NULL;
4903 return 0;
4904 }
4905
4906 static void
4907 linux_resume (struct thread_resume *resume_info, size_t n)
4908 {
4909 struct thread_resume_array array = { resume_info, n };
4910 struct thread_info *need_step_over = NULL;
4911 int any_pending;
4912 int leave_all_stopped;
4913
4914 if (debug_threads)
4915 {
4916 debug_enter ();
4917 debug_printf ("linux_resume:\n");
4918 }
4919
4920 find_inferior (&all_threads, linux_set_resume_request, &array);
4921
4922 /* If there is a thread which would otherwise be resumed, which has
4923 a pending status, then don't resume any threads - we can just
4924 report the pending status. Make sure to queue any signals that
4925 would otherwise be sent. In non-stop mode, we'll apply this
4926 logic to each thread individually. We consume all pending events
4927 before considering to start a step-over (in all-stop). */
4928 any_pending = 0;
4929 if (!non_stop)
4930 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4931
4932 /* If there is a thread which would otherwise be resumed, which is
4933 stopped at a breakpoint that needs stepping over, then don't
4934 resume any threads - have it step over the breakpoint with all
4935 other threads stopped, then resume all threads again. Make sure
4936 to queue any signals that would otherwise be delivered or
4937 queued. */
4938 if (!any_pending && supports_breakpoints ())
4939 need_step_over
4940 = (struct thread_info *) find_inferior (&all_threads,
4941 need_step_over_p, NULL);
4942
4943 leave_all_stopped = (need_step_over != NULL || any_pending);
4944
4945 if (debug_threads)
4946 {
4947 if (need_step_over != NULL)
4948 debug_printf ("Not resuming all, need step over\n");
4949 else if (any_pending)
4950 debug_printf ("Not resuming, all-stop and found "
4951 "an LWP with pending status\n");
4952 else
4953 debug_printf ("Resuming, no pending status or step over needed\n");
4954 }
4955
4956 /* Even if we're leaving threads stopped, queue all signals we'd
4957 otherwise deliver. */
4958 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4959
4960 if (need_step_over)
4961 start_step_over (get_thread_lwp (need_step_over));
4962
4963 if (debug_threads)
4964 {
4965 debug_printf ("linux_resume done\n");
4966 debug_exit ();
4967 }
4968
4969 /* We may have events that were pending that can/should be sent to
4970 the client now. Trigger a linux_wait call. */
4971 if (target_is_async_p ())
4972 async_file_mark ();
4973 }
4974
4975 /* This function is called once per thread. We check the thread's
4976 last resume request, which will tell us whether to resume, step, or
4977 leave the thread stopped. Any signal the client requested to be
4978 delivered has already been enqueued at this point.
4979
4980 If any thread that GDB wants running is stopped at an internal
4981 breakpoint that needs stepping over, we start a step-over operation
4982 on that particular thread, and leave all others stopped. */
4983
4984 static int
4985 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4986 {
4987 struct thread_info *thread = (struct thread_info *) entry;
4988 struct lwp_info *lwp = get_thread_lwp (thread);
4989 int step;
4990
4991 if (lwp == except)
4992 return 0;
4993
4994 if (debug_threads)
4995 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4996
4997 if (!lwp->stopped)
4998 {
4999 if (debug_threads)
5000 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5001 return 0;
5002 }
5003
5004 if (thread->last_resume_kind == resume_stop
5005 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5006 {
5007 if (debug_threads)
5008 debug_printf (" client wants LWP to remain %ld stopped\n",
5009 lwpid_of (thread));
5010 return 0;
5011 }
5012
5013 if (lwp->status_pending_p)
5014 {
5015 if (debug_threads)
5016 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5017 lwpid_of (thread));
5018 return 0;
5019 }
5020
5021 gdb_assert (lwp->suspended >= 0);
5022
5023 if (lwp->suspended)
5024 {
5025 if (debug_threads)
5026 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5027 return 0;
5028 }
5029
5030 if (thread->last_resume_kind == resume_stop
5031 && lwp->pending_signals_to_report == NULL
5032 && lwp->collecting_fast_tracepoint == 0)
5033 {
5034 /* We haven't reported this LWP as stopped yet (otherwise, the
5035 last_status.kind check above would catch it, and we wouldn't
5036 reach here. This LWP may have been momentarily paused by a
5037 stop_all_lwps call while handling for example, another LWP's
5038 step-over. In that case, the pending expected SIGSTOP signal
5039 that was queued at vCont;t handling time will have already
5040 been consumed by wait_for_sigstop, and so we need to requeue
5041 another one here. Note that if the LWP already has a SIGSTOP
5042 pending, this is a no-op. */
5043
5044 if (debug_threads)
5045 debug_printf ("Client wants LWP %ld to stop. "
5046 "Making sure it has a SIGSTOP pending\n",
5047 lwpid_of (thread));
5048
5049 send_sigstop (lwp);
5050 }
5051
5052 if (thread->last_resume_kind == resume_step)
5053 {
5054 if (debug_threads)
5055 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5056 lwpid_of (thread));
5057 step = 1;
5058 }
5059 else if (lwp->bp_reinsert != 0)
5060 {
5061 if (debug_threads)
5062 debug_printf (" stepping LWP %ld, reinsert set\n",
5063 lwpid_of (thread));
5064
5065 step = maybe_hw_step (thread);
5066 }
5067 else
5068 step = 0;
5069
5070 linux_resume_one_lwp (lwp, step, 0, NULL);
5071 return 0;
5072 }
5073
5074 static int
5075 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5076 {
5077 struct thread_info *thread = (struct thread_info *) entry;
5078 struct lwp_info *lwp = get_thread_lwp (thread);
5079
5080 if (lwp == except)
5081 return 0;
5082
5083 lwp_suspended_decr (lwp);
5084
5085 return proceed_one_lwp (entry, except);
5086 }
5087
5088 /* When we finish a step-over, set threads running again. If there's
5089 another thread that may need a step-over, now's the time to start
5090 it. Eventually, we'll move all threads past their breakpoints. */
5091
5092 static void
5093 proceed_all_lwps (void)
5094 {
5095 struct thread_info *need_step_over;
5096
5097 /* If there is a thread which would otherwise be resumed, which is
5098 stopped at a breakpoint that needs stepping over, then don't
5099 resume any threads - have it step over the breakpoint with all
5100 other threads stopped, then resume all threads again. */
5101
5102 if (supports_breakpoints ())
5103 {
5104 need_step_over
5105 = (struct thread_info *) find_inferior (&all_threads,
5106 need_step_over_p, NULL);
5107
5108 if (need_step_over != NULL)
5109 {
5110 if (debug_threads)
5111 debug_printf ("proceed_all_lwps: found "
5112 "thread %ld needing a step-over\n",
5113 lwpid_of (need_step_over));
5114
5115 start_step_over (get_thread_lwp (need_step_over));
5116 return;
5117 }
5118 }
5119
5120 if (debug_threads)
5121 debug_printf ("Proceeding, no step-over needed\n");
5122
5123 find_inferior (&all_threads, proceed_one_lwp, NULL);
5124 }
5125
5126 /* Stopped LWPs that the client wanted to be running, that don't have
5127 pending statuses, are set to run again, except for EXCEPT, if not
5128 NULL. This undoes a stop_all_lwps call. */
5129
5130 static void
5131 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5132 {
5133 if (debug_threads)
5134 {
5135 debug_enter ();
5136 if (except)
5137 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5138 lwpid_of (get_lwp_thread (except)));
5139 else
5140 debug_printf ("unstopping all lwps\n");
5141 }
5142
5143 if (unsuspend)
5144 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5145 else
5146 find_inferior (&all_threads, proceed_one_lwp, except);
5147
5148 if (debug_threads)
5149 {
5150 debug_printf ("unstop_all_lwps done\n");
5151 debug_exit ();
5152 }
5153 }
5154
5155
5156 #ifdef HAVE_LINUX_REGSETS
5157
5158 #define use_linux_regsets 1
5159
5160 /* Returns true if REGSET has been disabled. */
5161
5162 static int
5163 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5164 {
5165 return (info->disabled_regsets != NULL
5166 && info->disabled_regsets[regset - info->regsets]);
5167 }
5168
5169 /* Disable REGSET. */
5170
5171 static void
5172 disable_regset (struct regsets_info *info, struct regset_info *regset)
5173 {
5174 int dr_offset;
5175
5176 dr_offset = regset - info->regsets;
5177 if (info->disabled_regsets == NULL)
5178 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5179 info->disabled_regsets[dr_offset] = 1;
5180 }
5181
5182 static int
5183 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5184 struct regcache *regcache)
5185 {
5186 struct regset_info *regset;
5187 int saw_general_regs = 0;
5188 int pid;
5189 struct iovec iov;
5190
5191 pid = lwpid_of (current_thread);
5192 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5193 {
5194 void *buf, *data;
5195 int nt_type, res;
5196
5197 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5198 continue;
5199
5200 buf = xmalloc (regset->size);
5201
5202 nt_type = regset->nt_type;
5203 if (nt_type)
5204 {
5205 iov.iov_base = buf;
5206 iov.iov_len = regset->size;
5207 data = (void *) &iov;
5208 }
5209 else
5210 data = buf;
5211
5212 #ifndef __sparc__
5213 res = ptrace (regset->get_request, pid,
5214 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5215 #else
5216 res = ptrace (regset->get_request, pid, data, nt_type);
5217 #endif
5218 if (res < 0)
5219 {
5220 if (errno == EIO)
5221 {
5222 /* If we get EIO on a regset, do not try it again for
5223 this process mode. */
5224 disable_regset (regsets_info, regset);
5225 }
5226 else if (errno == ENODATA)
5227 {
5228 /* ENODATA may be returned if the regset is currently
5229 not "active". This can happen in normal operation,
5230 so suppress the warning in this case. */
5231 }
5232 else
5233 {
5234 char s[256];
5235 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5236 pid);
5237 perror (s);
5238 }
5239 }
5240 else
5241 {
5242 if (regset->type == GENERAL_REGS)
5243 saw_general_regs = 1;
5244 regset->store_function (regcache, buf);
5245 }
5246 free (buf);
5247 }
5248 if (saw_general_regs)
5249 return 0;
5250 else
5251 return 1;
5252 }
5253
5254 static int
5255 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5256 struct regcache *regcache)
5257 {
5258 struct regset_info *regset;
5259 int saw_general_regs = 0;
5260 int pid;
5261 struct iovec iov;
5262
5263 pid = lwpid_of (current_thread);
5264 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5265 {
5266 void *buf, *data;
5267 int nt_type, res;
5268
5269 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5270 || regset->fill_function == NULL)
5271 continue;
5272
5273 buf = xmalloc (regset->size);
5274
5275 /* First fill the buffer with the current register set contents,
5276 in case there are any items in the kernel's regset that are
5277 not in gdbserver's regcache. */
5278
5279 nt_type = regset->nt_type;
5280 if (nt_type)
5281 {
5282 iov.iov_base = buf;
5283 iov.iov_len = regset->size;
5284 data = (void *) &iov;
5285 }
5286 else
5287 data = buf;
5288
5289 #ifndef __sparc__
5290 res = ptrace (regset->get_request, pid,
5291 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5292 #else
5293 res = ptrace (regset->get_request, pid, data, nt_type);
5294 #endif
5295
5296 if (res == 0)
5297 {
5298 /* Then overlay our cached registers on that. */
5299 regset->fill_function (regcache, buf);
5300
5301 /* Only now do we write the register set. */
5302 #ifndef __sparc__
5303 res = ptrace (regset->set_request, pid,
5304 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5305 #else
5306 res = ptrace (regset->set_request, pid, data, nt_type);
5307 #endif
5308 }
5309
5310 if (res < 0)
5311 {
5312 if (errno == EIO)
5313 {
5314 /* If we get EIO on a regset, do not try it again for
5315 this process mode. */
5316 disable_regset (regsets_info, regset);
5317 }
5318 else if (errno == ESRCH)
5319 {
5320 /* At this point, ESRCH should mean the process is
5321 already gone, in which case we simply ignore attempts
5322 to change its registers. See also the related
5323 comment in linux_resume_one_lwp. */
5324 free (buf);
5325 return 0;
5326 }
5327 else
5328 {
5329 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5330 }
5331 }
5332 else if (regset->type == GENERAL_REGS)
5333 saw_general_regs = 1;
5334 free (buf);
5335 }
5336 if (saw_general_regs)
5337 return 0;
5338 else
5339 return 1;
5340 }
5341
5342 #else /* !HAVE_LINUX_REGSETS */
5343
5344 #define use_linux_regsets 0
5345 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5346 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5347
5348 #endif
5349
5350 /* Return 1 if register REGNO is supported by one of the regset ptrace
5351 calls or 0 if it has to be transferred individually. */
5352
5353 static int
5354 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5355 {
5356 unsigned char mask = 1 << (regno % 8);
5357 size_t index = regno / 8;
5358
5359 return (use_linux_regsets
5360 && (regs_info->regset_bitmap == NULL
5361 || (regs_info->regset_bitmap[index] & mask) != 0));
5362 }
5363
5364 #ifdef HAVE_LINUX_USRREGS
5365
5366 static int
5367 register_addr (const struct usrregs_info *usrregs, int regnum)
5368 {
5369 int addr;
5370
5371 if (regnum < 0 || regnum >= usrregs->num_regs)
5372 error ("Invalid register number %d.", regnum);
5373
5374 addr = usrregs->regmap[regnum];
5375
5376 return addr;
5377 }
5378
5379 /* Fetch one register. */
5380 static void
5381 fetch_register (const struct usrregs_info *usrregs,
5382 struct regcache *regcache, int regno)
5383 {
5384 CORE_ADDR regaddr;
5385 int i, size;
5386 char *buf;
5387 int pid;
5388
5389 if (regno >= usrregs->num_regs)
5390 return;
5391 if ((*the_low_target.cannot_fetch_register) (regno))
5392 return;
5393
5394 regaddr = register_addr (usrregs, regno);
5395 if (regaddr == -1)
5396 return;
5397
5398 size = ((register_size (regcache->tdesc, regno)
5399 + sizeof (PTRACE_XFER_TYPE) - 1)
5400 & -sizeof (PTRACE_XFER_TYPE));
5401 buf = (char *) alloca (size);
5402
5403 pid = lwpid_of (current_thread);
5404 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5405 {
5406 errno = 0;
5407 *(PTRACE_XFER_TYPE *) (buf + i) =
5408 ptrace (PTRACE_PEEKUSER, pid,
5409 /* Coerce to a uintptr_t first to avoid potential gcc warning
5410 of coercing an 8 byte integer to a 4 byte pointer. */
5411 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5412 regaddr += sizeof (PTRACE_XFER_TYPE);
5413 if (errno != 0)
5414 error ("reading register %d: %s", regno, strerror (errno));
5415 }
5416
5417 if (the_low_target.supply_ptrace_register)
5418 the_low_target.supply_ptrace_register (regcache, regno, buf);
5419 else
5420 supply_register (regcache, regno, buf);
5421 }
5422
5423 /* Store one register. */
5424 static void
5425 store_register (const struct usrregs_info *usrregs,
5426 struct regcache *regcache, int regno)
5427 {
5428 CORE_ADDR regaddr;
5429 int i, size;
5430 char *buf;
5431 int pid;
5432
5433 if (regno >= usrregs->num_regs)
5434 return;
5435 if ((*the_low_target.cannot_store_register) (regno))
5436 return;
5437
5438 regaddr = register_addr (usrregs, regno);
5439 if (regaddr == -1)
5440 return;
5441
5442 size = ((register_size (regcache->tdesc, regno)
5443 + sizeof (PTRACE_XFER_TYPE) - 1)
5444 & -sizeof (PTRACE_XFER_TYPE));
5445 buf = (char *) alloca (size);
5446 memset (buf, 0, size);
5447
5448 if (the_low_target.collect_ptrace_register)
5449 the_low_target.collect_ptrace_register (regcache, regno, buf);
5450 else
5451 collect_register (regcache, regno, buf);
5452
5453 pid = lwpid_of (current_thread);
5454 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5455 {
5456 errno = 0;
5457 ptrace (PTRACE_POKEUSER, pid,
5458 /* Coerce to a uintptr_t first to avoid potential gcc warning
5459 about coercing an 8 byte integer to a 4 byte pointer. */
5460 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5461 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5462 if (errno != 0)
5463 {
5464 /* At this point, ESRCH should mean the process is
5465 already gone, in which case we simply ignore attempts
5466 to change its registers. See also the related
5467 comment in linux_resume_one_lwp. */
5468 if (errno == ESRCH)
5469 return;
5470
5471 if ((*the_low_target.cannot_store_register) (regno) == 0)
5472 error ("writing register %d: %s", regno, strerror (errno));
5473 }
5474 regaddr += sizeof (PTRACE_XFER_TYPE);
5475 }
5476 }
5477
5478 /* Fetch all registers, or just one, from the child process.
5479 If REGNO is -1, do this for all registers, skipping any that are
5480 assumed to have been retrieved by regsets_fetch_inferior_registers,
5481 unless ALL is non-zero.
5482 Otherwise, REGNO specifies which register (so we can save time). */
5483 static void
5484 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5485 struct regcache *regcache, int regno, int all)
5486 {
5487 struct usrregs_info *usr = regs_info->usrregs;
5488
5489 if (regno == -1)
5490 {
5491 for (regno = 0; regno < usr->num_regs; regno++)
5492 if (all || !linux_register_in_regsets (regs_info, regno))
5493 fetch_register (usr, regcache, regno);
5494 }
5495 else
5496 fetch_register (usr, regcache, regno);
5497 }
5498
5499 /* Store our register values back into the inferior.
5500 If REGNO is -1, do this for all registers, skipping any that are
5501 assumed to have been saved by regsets_store_inferior_registers,
5502 unless ALL is non-zero.
5503 Otherwise, REGNO specifies which register (so we can save time). */
5504 static void
5505 usr_store_inferior_registers (const struct regs_info *regs_info,
5506 struct regcache *regcache, int regno, int all)
5507 {
5508 struct usrregs_info *usr = regs_info->usrregs;
5509
5510 if (regno == -1)
5511 {
5512 for (regno = 0; regno < usr->num_regs; regno++)
5513 if (all || !linux_register_in_regsets (regs_info, regno))
5514 store_register (usr, regcache, regno);
5515 }
5516 else
5517 store_register (usr, regcache, regno);
5518 }
5519
5520 #else /* !HAVE_LINUX_USRREGS */
5521
5522 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5523 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5524
5525 #endif
5526
5527
5528 static void
5529 linux_fetch_registers (struct regcache *regcache, int regno)
5530 {
5531 int use_regsets;
5532 int all = 0;
5533 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5534
5535 if (regno == -1)
5536 {
5537 if (the_low_target.fetch_register != NULL
5538 && regs_info->usrregs != NULL)
5539 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5540 (*the_low_target.fetch_register) (regcache, regno);
5541
5542 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5543 if (regs_info->usrregs != NULL)
5544 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5545 }
5546 else
5547 {
5548 if (the_low_target.fetch_register != NULL
5549 && (*the_low_target.fetch_register) (regcache, regno))
5550 return;
5551
5552 use_regsets = linux_register_in_regsets (regs_info, regno);
5553 if (use_regsets)
5554 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5555 regcache);
5556 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5557 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5558 }
5559 }
5560
5561 static void
5562 linux_store_registers (struct regcache *regcache, int regno)
5563 {
5564 int use_regsets;
5565 int all = 0;
5566 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5567
5568 if (regno == -1)
5569 {
5570 all = regsets_store_inferior_registers (regs_info->regsets_info,
5571 regcache);
5572 if (regs_info->usrregs != NULL)
5573 usr_store_inferior_registers (regs_info, regcache, regno, all);
5574 }
5575 else
5576 {
5577 use_regsets = linux_register_in_regsets (regs_info, regno);
5578 if (use_regsets)
5579 all = regsets_store_inferior_registers (regs_info->regsets_info,
5580 regcache);
5581 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5582 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5583 }
5584 }
5585
5586
5587 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5588 to debugger memory starting at MYADDR. */
5589
5590 static int
5591 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5592 {
5593 int pid = lwpid_of (current_thread);
5594 register PTRACE_XFER_TYPE *buffer;
5595 register CORE_ADDR addr;
5596 register int count;
5597 char filename[64];
5598 register int i;
5599 int ret;
5600 int fd;
5601
5602 /* Try using /proc. Don't bother for one word. */
5603 if (len >= 3 * sizeof (long))
5604 {
5605 int bytes;
5606
5607 /* We could keep this file open and cache it - possibly one per
5608 thread. That requires some juggling, but is even faster. */
5609 sprintf (filename, "/proc/%d/mem", pid);
5610 fd = open (filename, O_RDONLY | O_LARGEFILE);
5611 if (fd == -1)
5612 goto no_proc;
5613
5614 /* If pread64 is available, use it. It's faster if the kernel
5615 supports it (only one syscall), and it's 64-bit safe even on
5616 32-bit platforms (for instance, SPARC debugging a SPARC64
5617 application). */
5618 #ifdef HAVE_PREAD64
5619 bytes = pread64 (fd, myaddr, len, memaddr);
5620 #else
5621 bytes = -1;
5622 if (lseek (fd, memaddr, SEEK_SET) != -1)
5623 bytes = read (fd, myaddr, len);
5624 #endif
5625
5626 close (fd);
5627 if (bytes == len)
5628 return 0;
5629
5630 /* Some data was read, we'll try to get the rest with ptrace. */
5631 if (bytes > 0)
5632 {
5633 memaddr += bytes;
5634 myaddr += bytes;
5635 len -= bytes;
5636 }
5637 }
5638
5639 no_proc:
5640 /* Round starting address down to longword boundary. */
5641 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5642 /* Round ending address up; get number of longwords that makes. */
5643 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5644 / sizeof (PTRACE_XFER_TYPE));
5645 /* Allocate buffer of that many longwords. */
5646 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5647
5648 /* Read all the longwords */
5649 errno = 0;
5650 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5651 {
5652 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5653 about coercing an 8 byte integer to a 4 byte pointer. */
5654 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5655 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5656 (PTRACE_TYPE_ARG4) 0);
5657 if (errno)
5658 break;
5659 }
5660 ret = errno;
5661
5662 /* Copy appropriate bytes out of the buffer. */
5663 if (i > 0)
5664 {
5665 i *= sizeof (PTRACE_XFER_TYPE);
5666 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5667 memcpy (myaddr,
5668 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5669 i < len ? i : len);
5670 }
5671
5672 return ret;
5673 }
5674
5675 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5676 memory at MEMADDR. On failure (cannot write to the inferior)
5677 returns the value of errno. Always succeeds if LEN is zero. */
5678
5679 static int
5680 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5681 {
5682 register int i;
5683 /* Round starting address down to longword boundary. */
5684 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5685 /* Round ending address up; get number of longwords that makes. */
5686 register int count
5687 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5688 / sizeof (PTRACE_XFER_TYPE);
5689
5690 /* Allocate buffer of that many longwords. */
5691 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5692
5693 int pid = lwpid_of (current_thread);
5694
5695 if (len == 0)
5696 {
5697 /* Zero length write always succeeds. */
5698 return 0;
5699 }
5700
5701 if (debug_threads)
5702 {
5703 /* Dump up to four bytes. */
5704 char str[4 * 2 + 1];
5705 char *p = str;
5706 int dump = len < 4 ? len : 4;
5707
5708 for (i = 0; i < dump; i++)
5709 {
5710 sprintf (p, "%02x", myaddr[i]);
5711 p += 2;
5712 }
5713 *p = '\0';
5714
5715 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5716 str, (long) memaddr, pid);
5717 }
5718
5719 /* Fill start and end extra bytes of buffer with existing memory data. */
5720
5721 errno = 0;
5722 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5723 about coercing an 8 byte integer to a 4 byte pointer. */
5724 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5725 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5726 (PTRACE_TYPE_ARG4) 0);
5727 if (errno)
5728 return errno;
5729
5730 if (count > 1)
5731 {
5732 errno = 0;
5733 buffer[count - 1]
5734 = ptrace (PTRACE_PEEKTEXT, pid,
5735 /* Coerce to a uintptr_t first to avoid potential gcc warning
5736 about coercing an 8 byte integer to a 4 byte pointer. */
5737 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5738 * sizeof (PTRACE_XFER_TYPE)),
5739 (PTRACE_TYPE_ARG4) 0);
5740 if (errno)
5741 return errno;
5742 }
5743
5744 /* Copy data to be written over corresponding part of buffer. */
5745
5746 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5747 myaddr, len);
5748
5749 /* Write the entire buffer. */
5750
5751 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5752 {
5753 errno = 0;
5754 ptrace (PTRACE_POKETEXT, pid,
5755 /* Coerce to a uintptr_t first to avoid potential gcc warning
5756 about coercing an 8 byte integer to a 4 byte pointer. */
5757 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5758 (PTRACE_TYPE_ARG4) buffer[i]);
5759 if (errno)
5760 return errno;
5761 }
5762
5763 return 0;
5764 }
5765
5766 static void
5767 linux_look_up_symbols (void)
5768 {
5769 #ifdef USE_THREAD_DB
5770 struct process_info *proc = current_process ();
5771
5772 if (proc->priv->thread_db != NULL)
5773 return;
5774
5775 thread_db_init ();
5776 #endif
5777 }
5778
5779 static void
5780 linux_request_interrupt (void)
5781 {
5782 extern unsigned long signal_pid;
5783
5784 /* Send a SIGINT to the process group. This acts just like the user
5785 typed a ^C on the controlling terminal. */
5786 kill (-signal_pid, SIGINT);
5787 }
5788
5789 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5790 to debugger memory starting at MYADDR. */
5791
5792 static int
5793 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5794 {
5795 char filename[PATH_MAX];
5796 int fd, n;
5797 int pid = lwpid_of (current_thread);
5798
5799 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5800
5801 fd = open (filename, O_RDONLY);
5802 if (fd < 0)
5803 return -1;
5804
5805 if (offset != (CORE_ADDR) 0
5806 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5807 n = -1;
5808 else
5809 n = read (fd, myaddr, len);
5810
5811 close (fd);
5812
5813 return n;
5814 }
5815
5816 /* These breakpoint and watchpoint related wrapper functions simply
5817 pass on the function call if the target has registered a
5818 corresponding function. */
5819
5820 static int
5821 linux_supports_z_point_type (char z_type)
5822 {
5823 return (the_low_target.supports_z_point_type != NULL
5824 && the_low_target.supports_z_point_type (z_type));
5825 }
5826
5827 static int
5828 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5829 int size, struct raw_breakpoint *bp)
5830 {
5831 if (type == raw_bkpt_type_sw)
5832 return insert_memory_breakpoint (bp);
5833 else if (the_low_target.insert_point != NULL)
5834 return the_low_target.insert_point (type, addr, size, bp);
5835 else
5836 /* Unsupported (see target.h). */
5837 return 1;
5838 }
5839
5840 static int
5841 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5842 int size, struct raw_breakpoint *bp)
5843 {
5844 if (type == raw_bkpt_type_sw)
5845 return remove_memory_breakpoint (bp);
5846 else if (the_low_target.remove_point != NULL)
5847 return the_low_target.remove_point (type, addr, size, bp);
5848 else
5849 /* Unsupported (see target.h). */
5850 return 1;
5851 }
5852
5853 /* Implement the to_stopped_by_sw_breakpoint target_ops
5854 method. */
5855
5856 static int
5857 linux_stopped_by_sw_breakpoint (void)
5858 {
5859 struct lwp_info *lwp = get_thread_lwp (current_thread);
5860
5861 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5862 }
5863
5864 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5865 method. */
5866
5867 static int
5868 linux_supports_stopped_by_sw_breakpoint (void)
5869 {
5870 return USE_SIGTRAP_SIGINFO;
5871 }
5872
5873 /* Implement the to_stopped_by_hw_breakpoint target_ops
5874 method. */
5875
5876 static int
5877 linux_stopped_by_hw_breakpoint (void)
5878 {
5879 struct lwp_info *lwp = get_thread_lwp (current_thread);
5880
5881 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5882 }
5883
5884 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5885 method. */
5886
5887 static int
5888 linux_supports_stopped_by_hw_breakpoint (void)
5889 {
5890 return USE_SIGTRAP_SIGINFO;
5891 }
5892
5893 /* Implement the supports_hardware_single_step target_ops method. */
5894
5895 static int
5896 linux_supports_hardware_single_step (void)
5897 {
5898 return can_hardware_single_step ();
5899 }
5900
5901 static int
5902 linux_supports_software_single_step (void)
5903 {
5904 return can_software_single_step ();
5905 }
5906
5907 static int
5908 linux_stopped_by_watchpoint (void)
5909 {
5910 struct lwp_info *lwp = get_thread_lwp (current_thread);
5911
5912 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5913 }
5914
5915 static CORE_ADDR
5916 linux_stopped_data_address (void)
5917 {
5918 struct lwp_info *lwp = get_thread_lwp (current_thread);
5919
5920 return lwp->stopped_data_address;
5921 }
5922
5923 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5924 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5925 && defined(PT_TEXT_END_ADDR)
5926
5927 /* This is only used for targets that define PT_TEXT_ADDR,
5928 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5929 the target has different ways of acquiring this information, like
5930 loadmaps. */
5931
5932 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5933 to tell gdb about. */
5934
5935 static int
5936 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5937 {
5938 unsigned long text, text_end, data;
5939 int pid = lwpid_of (current_thread);
5940
5941 errno = 0;
5942
5943 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5944 (PTRACE_TYPE_ARG4) 0);
5945 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5946 (PTRACE_TYPE_ARG4) 0);
5947 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5948 (PTRACE_TYPE_ARG4) 0);
5949
5950 if (errno == 0)
5951 {
5952 /* Both text and data offsets produced at compile-time (and so
5953 used by gdb) are relative to the beginning of the program,
5954 with the data segment immediately following the text segment.
5955 However, the actual runtime layout in memory may put the data
5956 somewhere else, so when we send gdb a data base-address, we
5957 use the real data base address and subtract the compile-time
5958 data base-address from it (which is just the length of the
5959 text segment). BSS immediately follows data in both
5960 cases. */
5961 *text_p = text;
5962 *data_p = data - (text_end - text);
5963
5964 return 1;
5965 }
5966 return 0;
5967 }
5968 #endif
5969
5970 static int
5971 linux_qxfer_osdata (const char *annex,
5972 unsigned char *readbuf, unsigned const char *writebuf,
5973 CORE_ADDR offset, int len)
5974 {
5975 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5976 }
5977
5978 /* Convert a native/host siginfo object, into/from the siginfo in the
5979 layout of the inferiors' architecture. */
5980
5981 static void
5982 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5983 {
5984 int done = 0;
5985
5986 if (the_low_target.siginfo_fixup != NULL)
5987 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5988
5989 /* If there was no callback, or the callback didn't do anything,
5990 then just do a straight memcpy. */
5991 if (!done)
5992 {
5993 if (direction == 1)
5994 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5995 else
5996 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5997 }
5998 }
5999
6000 static int
6001 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6002 unsigned const char *writebuf, CORE_ADDR offset, int len)
6003 {
6004 int pid;
6005 siginfo_t siginfo;
6006 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6007
6008 if (current_thread == NULL)
6009 return -1;
6010
6011 pid = lwpid_of (current_thread);
6012
6013 if (debug_threads)
6014 debug_printf ("%s siginfo for lwp %d.\n",
6015 readbuf != NULL ? "Reading" : "Writing",
6016 pid);
6017
6018 if (offset >= sizeof (siginfo))
6019 return -1;
6020
6021 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6022 return -1;
6023
6024 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6025 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6026 inferior with a 64-bit GDBSERVER should look the same as debugging it
6027 with a 32-bit GDBSERVER, we need to convert it. */
6028 siginfo_fixup (&siginfo, inf_siginfo, 0);
6029
6030 if (offset + len > sizeof (siginfo))
6031 len = sizeof (siginfo) - offset;
6032
6033 if (readbuf != NULL)
6034 memcpy (readbuf, inf_siginfo + offset, len);
6035 else
6036 {
6037 memcpy (inf_siginfo + offset, writebuf, len);
6038
6039 /* Convert back to ptrace layout before flushing it out. */
6040 siginfo_fixup (&siginfo, inf_siginfo, 1);
6041
6042 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6043 return -1;
6044 }
6045
6046 return len;
6047 }
6048
6049 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6050 so we notice when children change state; as the handler for the
6051 sigsuspend in my_waitpid. */
6052
6053 static void
6054 sigchld_handler (int signo)
6055 {
6056 int old_errno = errno;
6057
6058 if (debug_threads)
6059 {
6060 do
6061 {
6062 /* fprintf is not async-signal-safe, so call write
6063 directly. */
6064 if (write (2, "sigchld_handler\n",
6065 sizeof ("sigchld_handler\n") - 1) < 0)
6066 break; /* just ignore */
6067 } while (0);
6068 }
6069
6070 if (target_is_async_p ())
6071 async_file_mark (); /* trigger a linux_wait */
6072
6073 errno = old_errno;
6074 }
6075
6076 static int
6077 linux_supports_non_stop (void)
6078 {
6079 return 1;
6080 }
6081
6082 static int
6083 linux_async (int enable)
6084 {
6085 int previous = target_is_async_p ();
6086
6087 if (debug_threads)
6088 debug_printf ("linux_async (%d), previous=%d\n",
6089 enable, previous);
6090
6091 if (previous != enable)
6092 {
6093 sigset_t mask;
6094 sigemptyset (&mask);
6095 sigaddset (&mask, SIGCHLD);
6096
6097 sigprocmask (SIG_BLOCK, &mask, NULL);
6098
6099 if (enable)
6100 {
6101 if (pipe (linux_event_pipe) == -1)
6102 {
6103 linux_event_pipe[0] = -1;
6104 linux_event_pipe[1] = -1;
6105 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6106
6107 warning ("creating event pipe failed.");
6108 return previous;
6109 }
6110
6111 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6112 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6113
6114 /* Register the event loop handler. */
6115 add_file_handler (linux_event_pipe[0],
6116 handle_target_event, NULL);
6117
6118 /* Always trigger a linux_wait. */
6119 async_file_mark ();
6120 }
6121 else
6122 {
6123 delete_file_handler (linux_event_pipe[0]);
6124
6125 close (linux_event_pipe[0]);
6126 close (linux_event_pipe[1]);
6127 linux_event_pipe[0] = -1;
6128 linux_event_pipe[1] = -1;
6129 }
6130
6131 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6132 }
6133
6134 return previous;
6135 }
6136
6137 static int
6138 linux_start_non_stop (int nonstop)
6139 {
6140 /* Register or unregister from event-loop accordingly. */
6141 linux_async (nonstop);
6142
6143 if (target_is_async_p () != (nonstop != 0))
6144 return -1;
6145
6146 return 0;
6147 }
6148
6149 static int
6150 linux_supports_multi_process (void)
6151 {
6152 return 1;
6153 }
6154
6155 /* Check if fork events are supported. */
6156
6157 static int
6158 linux_supports_fork_events (void)
6159 {
6160 return linux_supports_tracefork ();
6161 }
6162
6163 /* Check if vfork events are supported. */
6164
6165 static int
6166 linux_supports_vfork_events (void)
6167 {
6168 return linux_supports_tracefork ();
6169 }
6170
6171 /* Check if exec events are supported. */
6172
6173 static int
6174 linux_supports_exec_events (void)
6175 {
6176 return linux_supports_traceexec ();
6177 }
6178
6179 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6180 options for the specified lwp. */
6181
6182 static int
6183 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6184 void *args)
6185 {
6186 struct thread_info *thread = (struct thread_info *) entry;
6187 struct lwp_info *lwp = get_thread_lwp (thread);
6188
6189 if (!lwp->stopped)
6190 {
6191 /* Stop the lwp so we can modify its ptrace options. */
6192 lwp->must_set_ptrace_flags = 1;
6193 linux_stop_lwp (lwp);
6194 }
6195 else
6196 {
6197 /* Already stopped; go ahead and set the ptrace options. */
6198 struct process_info *proc = find_process_pid (pid_of (thread));
6199 int options = linux_low_ptrace_options (proc->attached);
6200
6201 linux_enable_event_reporting (lwpid_of (thread), options);
6202 lwp->must_set_ptrace_flags = 0;
6203 }
6204
6205 return 0;
6206 }
6207
6208 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6209 ptrace flags for all inferiors. This is in case the new GDB connection
6210 doesn't support the same set of events that the previous one did. */
6211
6212 static void
6213 linux_handle_new_gdb_connection (void)
6214 {
6215 pid_t pid;
6216
6217 /* Request that all the lwps reset their ptrace options. */
6218 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6219 }
6220
6221 static int
6222 linux_supports_disable_randomization (void)
6223 {
6224 #ifdef HAVE_PERSONALITY
6225 return 1;
6226 #else
6227 return 0;
6228 #endif
6229 }
6230
6231 static int
6232 linux_supports_agent (void)
6233 {
6234 return 1;
6235 }
6236
6237 static int
6238 linux_supports_range_stepping (void)
6239 {
6240 if (*the_low_target.supports_range_stepping == NULL)
6241 return 0;
6242
6243 return (*the_low_target.supports_range_stepping) ();
6244 }
6245
6246 /* Enumerate spufs IDs for process PID. */
6247 static int
6248 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6249 {
6250 int pos = 0;
6251 int written = 0;
6252 char path[128];
6253 DIR *dir;
6254 struct dirent *entry;
6255
6256 sprintf (path, "/proc/%ld/fd", pid);
6257 dir = opendir (path);
6258 if (!dir)
6259 return -1;
6260
6261 rewinddir (dir);
6262 while ((entry = readdir (dir)) != NULL)
6263 {
6264 struct stat st;
6265 struct statfs stfs;
6266 int fd;
6267
6268 fd = atoi (entry->d_name);
6269 if (!fd)
6270 continue;
6271
6272 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6273 if (stat (path, &st) != 0)
6274 continue;
6275 if (!S_ISDIR (st.st_mode))
6276 continue;
6277
6278 if (statfs (path, &stfs) != 0)
6279 continue;
6280 if (stfs.f_type != SPUFS_MAGIC)
6281 continue;
6282
6283 if (pos >= offset && pos + 4 <= offset + len)
6284 {
6285 *(unsigned int *)(buf + pos - offset) = fd;
6286 written += 4;
6287 }
6288 pos += 4;
6289 }
6290
6291 closedir (dir);
6292 return written;
6293 }
6294
6295 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6296 object type, using the /proc file system. */
6297 static int
6298 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6299 unsigned const char *writebuf,
6300 CORE_ADDR offset, int len)
6301 {
6302 long pid = lwpid_of (current_thread);
6303 char buf[128];
6304 int fd = 0;
6305 int ret = 0;
6306
6307 if (!writebuf && !readbuf)
6308 return -1;
6309
6310 if (!*annex)
6311 {
6312 if (!readbuf)
6313 return -1;
6314 else
6315 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6316 }
6317
6318 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6319 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6320 if (fd <= 0)
6321 return -1;
6322
6323 if (offset != 0
6324 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6325 {
6326 close (fd);
6327 return 0;
6328 }
6329
6330 if (writebuf)
6331 ret = write (fd, writebuf, (size_t) len);
6332 else
6333 ret = read (fd, readbuf, (size_t) len);
6334
6335 close (fd);
6336 return ret;
6337 }
6338
6339 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6340 struct target_loadseg
6341 {
6342 /* Core address to which the segment is mapped. */
6343 Elf32_Addr addr;
6344 /* VMA recorded in the program header. */
6345 Elf32_Addr p_vaddr;
6346 /* Size of this segment in memory. */
6347 Elf32_Word p_memsz;
6348 };
6349
6350 # if defined PT_GETDSBT
6351 struct target_loadmap
6352 {
6353 /* Protocol version number, must be zero. */
6354 Elf32_Word version;
6355 /* Pointer to the DSBT table, its size, and the DSBT index. */
6356 unsigned *dsbt_table;
6357 unsigned dsbt_size, dsbt_index;
6358 /* Number of segments in this map. */
6359 Elf32_Word nsegs;
6360 /* The actual memory map. */
6361 struct target_loadseg segs[/*nsegs*/];
6362 };
6363 # define LINUX_LOADMAP PT_GETDSBT
6364 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6365 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6366 # else
6367 struct target_loadmap
6368 {
6369 /* Protocol version number, must be zero. */
6370 Elf32_Half version;
6371 /* Number of segments in this map. */
6372 Elf32_Half nsegs;
6373 /* The actual memory map. */
6374 struct target_loadseg segs[/*nsegs*/];
6375 };
6376 # define LINUX_LOADMAP PTRACE_GETFDPIC
6377 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6378 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6379 # endif
6380
6381 static int
6382 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6383 unsigned char *myaddr, unsigned int len)
6384 {
6385 int pid = lwpid_of (current_thread);
6386 int addr = -1;
6387 struct target_loadmap *data = NULL;
6388 unsigned int actual_length, copy_length;
6389
6390 if (strcmp (annex, "exec") == 0)
6391 addr = (int) LINUX_LOADMAP_EXEC;
6392 else if (strcmp (annex, "interp") == 0)
6393 addr = (int) LINUX_LOADMAP_INTERP;
6394 else
6395 return -1;
6396
6397 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6398 return -1;
6399
6400 if (data == NULL)
6401 return -1;
6402
6403 actual_length = sizeof (struct target_loadmap)
6404 + sizeof (struct target_loadseg) * data->nsegs;
6405
6406 if (offset < 0 || offset > actual_length)
6407 return -1;
6408
6409 copy_length = actual_length - offset < len ? actual_length - offset : len;
6410 memcpy (myaddr, (char *) data + offset, copy_length);
6411 return copy_length;
6412 }
6413 #else
6414 # define linux_read_loadmap NULL
6415 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6416
6417 static void
6418 linux_process_qsupported (char **features, int count)
6419 {
6420 if (the_low_target.process_qsupported != NULL)
6421 the_low_target.process_qsupported (features, count);
6422 }
6423
6424 static int
6425 linux_supports_catch_syscall (void)
6426 {
6427 return (the_low_target.get_syscall_trapinfo != NULL
6428 && linux_supports_tracesysgood ());
6429 }
6430
6431 static int
6432 linux_get_ipa_tdesc_idx (void)
6433 {
6434 if (the_low_target.get_ipa_tdesc_idx == NULL)
6435 return 0;
6436
6437 return (*the_low_target.get_ipa_tdesc_idx) ();
6438 }
6439
6440 static int
6441 linux_supports_tracepoints (void)
6442 {
6443 if (*the_low_target.supports_tracepoints == NULL)
6444 return 0;
6445
6446 return (*the_low_target.supports_tracepoints) ();
6447 }
6448
6449 static CORE_ADDR
6450 linux_read_pc (struct regcache *regcache)
6451 {
6452 if (the_low_target.get_pc == NULL)
6453 return 0;
6454
6455 return (*the_low_target.get_pc) (regcache);
6456 }
6457
6458 static void
6459 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6460 {
6461 gdb_assert (the_low_target.set_pc != NULL);
6462
6463 (*the_low_target.set_pc) (regcache, pc);
6464 }
6465
6466 static int
6467 linux_thread_stopped (struct thread_info *thread)
6468 {
6469 return get_thread_lwp (thread)->stopped;
6470 }
6471
6472 /* This exposes stop-all-threads functionality to other modules. */
6473
6474 static void
6475 linux_pause_all (int freeze)
6476 {
6477 stop_all_lwps (freeze, NULL);
6478 }
6479
6480 /* This exposes unstop-all-threads functionality to other gdbserver
6481 modules. */
6482
6483 static void
6484 linux_unpause_all (int unfreeze)
6485 {
6486 unstop_all_lwps (unfreeze, NULL);
6487 }
6488
6489 static int
6490 linux_prepare_to_access_memory (void)
6491 {
6492 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6493 running LWP. */
6494 if (non_stop)
6495 linux_pause_all (1);
6496 return 0;
6497 }
6498
6499 static void
6500 linux_done_accessing_memory (void)
6501 {
6502 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6503 running LWP. */
6504 if (non_stop)
6505 linux_unpause_all (1);
6506 }
6507
6508 static int
6509 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6510 CORE_ADDR collector,
6511 CORE_ADDR lockaddr,
6512 ULONGEST orig_size,
6513 CORE_ADDR *jump_entry,
6514 CORE_ADDR *trampoline,
6515 ULONGEST *trampoline_size,
6516 unsigned char *jjump_pad_insn,
6517 ULONGEST *jjump_pad_insn_size,
6518 CORE_ADDR *adjusted_insn_addr,
6519 CORE_ADDR *adjusted_insn_addr_end,
6520 char *err)
6521 {
6522 return (*the_low_target.install_fast_tracepoint_jump_pad)
6523 (tpoint, tpaddr, collector, lockaddr, orig_size,
6524 jump_entry, trampoline, trampoline_size,
6525 jjump_pad_insn, jjump_pad_insn_size,
6526 adjusted_insn_addr, adjusted_insn_addr_end,
6527 err);
6528 }
6529
6530 static struct emit_ops *
6531 linux_emit_ops (void)
6532 {
6533 if (the_low_target.emit_ops != NULL)
6534 return (*the_low_target.emit_ops) ();
6535 else
6536 return NULL;
6537 }
6538
6539 static int
6540 linux_get_min_fast_tracepoint_insn_len (void)
6541 {
6542 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6543 }
6544
6545 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6546
6547 static int
6548 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6549 CORE_ADDR *phdr_memaddr, int *num_phdr)
6550 {
6551 char filename[PATH_MAX];
6552 int fd;
6553 const int auxv_size = is_elf64
6554 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6555 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6556
6557 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6558
6559 fd = open (filename, O_RDONLY);
6560 if (fd < 0)
6561 return 1;
6562
6563 *phdr_memaddr = 0;
6564 *num_phdr = 0;
6565 while (read (fd, buf, auxv_size) == auxv_size
6566 && (*phdr_memaddr == 0 || *num_phdr == 0))
6567 {
6568 if (is_elf64)
6569 {
6570 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6571
6572 switch (aux->a_type)
6573 {
6574 case AT_PHDR:
6575 *phdr_memaddr = aux->a_un.a_val;
6576 break;
6577 case AT_PHNUM:
6578 *num_phdr = aux->a_un.a_val;
6579 break;
6580 }
6581 }
6582 else
6583 {
6584 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6585
6586 switch (aux->a_type)
6587 {
6588 case AT_PHDR:
6589 *phdr_memaddr = aux->a_un.a_val;
6590 break;
6591 case AT_PHNUM:
6592 *num_phdr = aux->a_un.a_val;
6593 break;
6594 }
6595 }
6596 }
6597
6598 close (fd);
6599
6600 if (*phdr_memaddr == 0 || *num_phdr == 0)
6601 {
6602 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6603 "phdr_memaddr = %ld, phdr_num = %d",
6604 (long) *phdr_memaddr, *num_phdr);
6605 return 2;
6606 }
6607
6608 return 0;
6609 }
6610
6611 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6612
6613 static CORE_ADDR
6614 get_dynamic (const int pid, const int is_elf64)
6615 {
6616 CORE_ADDR phdr_memaddr, relocation;
6617 int num_phdr, i;
6618 unsigned char *phdr_buf;
6619 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6620
6621 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6622 return 0;
6623
6624 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6625 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6626
6627 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6628 return 0;
6629
6630 /* Compute relocation: it is expected to be 0 for "regular" executables,
6631 non-zero for PIE ones. */
6632 relocation = -1;
6633 for (i = 0; relocation == -1 && i < num_phdr; i++)
6634 if (is_elf64)
6635 {
6636 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6637
6638 if (p->p_type == PT_PHDR)
6639 relocation = phdr_memaddr - p->p_vaddr;
6640 }
6641 else
6642 {
6643 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6644
6645 if (p->p_type == PT_PHDR)
6646 relocation = phdr_memaddr - p->p_vaddr;
6647 }
6648
6649 if (relocation == -1)
6650 {
6651 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6652 any real world executables, including PIE executables, have always
6653 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6654 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6655 or present DT_DEBUG anyway (fpc binaries are statically linked).
6656
6657 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6658
6659 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6660
6661 return 0;
6662 }
6663
6664 for (i = 0; i < num_phdr; i++)
6665 {
6666 if (is_elf64)
6667 {
6668 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6669
6670 if (p->p_type == PT_DYNAMIC)
6671 return p->p_vaddr + relocation;
6672 }
6673 else
6674 {
6675 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6676
6677 if (p->p_type == PT_DYNAMIC)
6678 return p->p_vaddr + relocation;
6679 }
6680 }
6681
6682 return 0;
6683 }
6684
6685 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6686 can be 0 if the inferior does not yet have the library list initialized.
6687 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6688 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6689
6690 static CORE_ADDR
6691 get_r_debug (const int pid, const int is_elf64)
6692 {
6693 CORE_ADDR dynamic_memaddr;
6694 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6695 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6696 CORE_ADDR map = -1;
6697
6698 dynamic_memaddr = get_dynamic (pid, is_elf64);
6699 if (dynamic_memaddr == 0)
6700 return map;
6701
6702 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6703 {
6704 if (is_elf64)
6705 {
6706 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6707 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6708 union
6709 {
6710 Elf64_Xword map;
6711 unsigned char buf[sizeof (Elf64_Xword)];
6712 }
6713 rld_map;
6714 #endif
6715 #ifdef DT_MIPS_RLD_MAP
6716 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6717 {
6718 if (linux_read_memory (dyn->d_un.d_val,
6719 rld_map.buf, sizeof (rld_map.buf)) == 0)
6720 return rld_map.map;
6721 else
6722 break;
6723 }
6724 #endif /* DT_MIPS_RLD_MAP */
6725 #ifdef DT_MIPS_RLD_MAP_REL
6726 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6727 {
6728 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6729 rld_map.buf, sizeof (rld_map.buf)) == 0)
6730 return rld_map.map;
6731 else
6732 break;
6733 }
6734 #endif /* DT_MIPS_RLD_MAP_REL */
6735
6736 if (dyn->d_tag == DT_DEBUG && map == -1)
6737 map = dyn->d_un.d_val;
6738
6739 if (dyn->d_tag == DT_NULL)
6740 break;
6741 }
6742 else
6743 {
6744 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6745 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6746 union
6747 {
6748 Elf32_Word map;
6749 unsigned char buf[sizeof (Elf32_Word)];
6750 }
6751 rld_map;
6752 #endif
6753 #ifdef DT_MIPS_RLD_MAP
6754 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6755 {
6756 if (linux_read_memory (dyn->d_un.d_val,
6757 rld_map.buf, sizeof (rld_map.buf)) == 0)
6758 return rld_map.map;
6759 else
6760 break;
6761 }
6762 #endif /* DT_MIPS_RLD_MAP */
6763 #ifdef DT_MIPS_RLD_MAP_REL
6764 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6765 {
6766 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6767 rld_map.buf, sizeof (rld_map.buf)) == 0)
6768 return rld_map.map;
6769 else
6770 break;
6771 }
6772 #endif /* DT_MIPS_RLD_MAP_REL */
6773
6774 if (dyn->d_tag == DT_DEBUG && map == -1)
6775 map = dyn->d_un.d_val;
6776
6777 if (dyn->d_tag == DT_NULL)
6778 break;
6779 }
6780
6781 dynamic_memaddr += dyn_size;
6782 }
6783
6784 return map;
6785 }
6786
6787 /* Read one pointer from MEMADDR in the inferior. */
6788
6789 static int
6790 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6791 {
6792 int ret;
6793
6794 /* Go through a union so this works on either big or little endian
6795 hosts, when the inferior's pointer size is smaller than the size
6796 of CORE_ADDR. It is assumed the inferior's endianness is the
6797 same of the superior's. */
6798 union
6799 {
6800 CORE_ADDR core_addr;
6801 unsigned int ui;
6802 unsigned char uc;
6803 } addr;
6804
6805 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6806 if (ret == 0)
6807 {
6808 if (ptr_size == sizeof (CORE_ADDR))
6809 *ptr = addr.core_addr;
6810 else if (ptr_size == sizeof (unsigned int))
6811 *ptr = addr.ui;
6812 else
6813 gdb_assert_not_reached ("unhandled pointer size");
6814 }
6815 return ret;
6816 }
6817
6818 struct link_map_offsets
6819 {
6820 /* Offset and size of r_debug.r_version. */
6821 int r_version_offset;
6822
6823 /* Offset and size of r_debug.r_map. */
6824 int r_map_offset;
6825
6826 /* Offset to l_addr field in struct link_map. */
6827 int l_addr_offset;
6828
6829 /* Offset to l_name field in struct link_map. */
6830 int l_name_offset;
6831
6832 /* Offset to l_ld field in struct link_map. */
6833 int l_ld_offset;
6834
6835 /* Offset to l_next field in struct link_map. */
6836 int l_next_offset;
6837
6838 /* Offset to l_prev field in struct link_map. */
6839 int l_prev_offset;
6840 };
6841
6842 /* Construct qXfer:libraries-svr4:read reply. */
6843
6844 static int
6845 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6846 unsigned const char *writebuf,
6847 CORE_ADDR offset, int len)
6848 {
6849 char *document;
6850 unsigned document_len;
6851 struct process_info_private *const priv = current_process ()->priv;
6852 char filename[PATH_MAX];
6853 int pid, is_elf64;
6854
6855 static const struct link_map_offsets lmo_32bit_offsets =
6856 {
6857 0, /* r_version offset. */
6858 4, /* r_debug.r_map offset. */
6859 0, /* l_addr offset in link_map. */
6860 4, /* l_name offset in link_map. */
6861 8, /* l_ld offset in link_map. */
6862 12, /* l_next offset in link_map. */
6863 16 /* l_prev offset in link_map. */
6864 };
6865
6866 static const struct link_map_offsets lmo_64bit_offsets =
6867 {
6868 0, /* r_version offset. */
6869 8, /* r_debug.r_map offset. */
6870 0, /* l_addr offset in link_map. */
6871 8, /* l_name offset in link_map. */
6872 16, /* l_ld offset in link_map. */
6873 24, /* l_next offset in link_map. */
6874 32 /* l_prev offset in link_map. */
6875 };
6876 const struct link_map_offsets *lmo;
6877 unsigned int machine;
6878 int ptr_size;
6879 CORE_ADDR lm_addr = 0, lm_prev = 0;
6880 int allocated = 1024;
6881 char *p;
6882 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6883 int header_done = 0;
6884
6885 if (writebuf != NULL)
6886 return -2;
6887 if (readbuf == NULL)
6888 return -1;
6889
6890 pid = lwpid_of (current_thread);
6891 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6892 is_elf64 = elf_64_file_p (filename, &machine);
6893 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6894 ptr_size = is_elf64 ? 8 : 4;
6895
6896 while (annex[0] != '\0')
6897 {
6898 const char *sep;
6899 CORE_ADDR *addrp;
6900 int len;
6901
6902 sep = strchr (annex, '=');
6903 if (sep == NULL)
6904 break;
6905
6906 len = sep - annex;
6907 if (len == 5 && startswith (annex, "start"))
6908 addrp = &lm_addr;
6909 else if (len == 4 && startswith (annex, "prev"))
6910 addrp = &lm_prev;
6911 else
6912 {
6913 annex = strchr (sep, ';');
6914 if (annex == NULL)
6915 break;
6916 annex++;
6917 continue;
6918 }
6919
6920 annex = decode_address_to_semicolon (addrp, sep + 1);
6921 }
6922
6923 if (lm_addr == 0)
6924 {
6925 int r_version = 0;
6926
6927 if (priv->r_debug == 0)
6928 priv->r_debug = get_r_debug (pid, is_elf64);
6929
6930 /* We failed to find DT_DEBUG. Such situation will not change
6931 for this inferior - do not retry it. Report it to GDB as
6932 E01, see for the reasons at the GDB solib-svr4.c side. */
6933 if (priv->r_debug == (CORE_ADDR) -1)
6934 return -1;
6935
6936 if (priv->r_debug != 0)
6937 {
6938 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6939 (unsigned char *) &r_version,
6940 sizeof (r_version)) != 0
6941 || r_version != 1)
6942 {
6943 warning ("unexpected r_debug version %d", r_version);
6944 }
6945 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6946 &lm_addr, ptr_size) != 0)
6947 {
6948 warning ("unable to read r_map from 0x%lx",
6949 (long) priv->r_debug + lmo->r_map_offset);
6950 }
6951 }
6952 }
6953
6954 document = (char *) xmalloc (allocated);
6955 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6956 p = document + strlen (document);
6957
6958 while (lm_addr
6959 && read_one_ptr (lm_addr + lmo->l_name_offset,
6960 &l_name, ptr_size) == 0
6961 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6962 &l_addr, ptr_size) == 0
6963 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6964 &l_ld, ptr_size) == 0
6965 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6966 &l_prev, ptr_size) == 0
6967 && read_one_ptr (lm_addr + lmo->l_next_offset,
6968 &l_next, ptr_size) == 0)
6969 {
6970 unsigned char libname[PATH_MAX];
6971
6972 if (lm_prev != l_prev)
6973 {
6974 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6975 (long) lm_prev, (long) l_prev);
6976 break;
6977 }
6978
6979 /* Ignore the first entry even if it has valid name as the first entry
6980 corresponds to the main executable. The first entry should not be
6981 skipped if the dynamic loader was loaded late by a static executable
6982 (see solib-svr4.c parameter ignore_first). But in such case the main
6983 executable does not have PT_DYNAMIC present and this function already
6984 exited above due to failed get_r_debug. */
6985 if (lm_prev == 0)
6986 {
6987 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6988 p = p + strlen (p);
6989 }
6990 else
6991 {
6992 /* Not checking for error because reading may stop before
6993 we've got PATH_MAX worth of characters. */
6994 libname[0] = '\0';
6995 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6996 libname[sizeof (libname) - 1] = '\0';
6997 if (libname[0] != '\0')
6998 {
6999 /* 6x the size for xml_escape_text below. */
7000 size_t len = 6 * strlen ((char *) libname);
7001 char *name;
7002
7003 if (!header_done)
7004 {
7005 /* Terminate `<library-list-svr4'. */
7006 *p++ = '>';
7007 header_done = 1;
7008 }
7009
7010 while (allocated < p - document + len + 200)
7011 {
7012 /* Expand to guarantee sufficient storage. */
7013 uintptr_t document_len = p - document;
7014
7015 document = (char *) xrealloc (document, 2 * allocated);
7016 allocated *= 2;
7017 p = document + document_len;
7018 }
7019
7020 name = xml_escape_text ((char *) libname);
7021 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
7022 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7023 name, (unsigned long) lm_addr,
7024 (unsigned long) l_addr, (unsigned long) l_ld);
7025 free (name);
7026 }
7027 }
7028
7029 lm_prev = lm_addr;
7030 lm_addr = l_next;
7031 }
7032
7033 if (!header_done)
7034 {
7035 /* Empty list; terminate `<library-list-svr4'. */
7036 strcpy (p, "/>");
7037 }
7038 else
7039 strcpy (p, "</library-list-svr4>");
7040
7041 document_len = strlen (document);
7042 if (offset < document_len)
7043 document_len -= offset;
7044 else
7045 document_len = 0;
7046 if (len > document_len)
7047 len = document_len;
7048
7049 memcpy (readbuf, document + offset, len);
7050 xfree (document);
7051
7052 return len;
7053 }
7054
7055 #ifdef HAVE_LINUX_BTRACE
7056
7057 /* See to_disable_btrace target method. */
7058
7059 static int
7060 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7061 {
7062 enum btrace_error err;
7063
7064 err = linux_disable_btrace (tinfo);
7065 return (err == BTRACE_ERR_NONE ? 0 : -1);
7066 }
7067
7068 /* Encode an Intel Processor Trace configuration. */
7069
7070 static void
7071 linux_low_encode_pt_config (struct buffer *buffer,
7072 const struct btrace_data_pt_config *config)
7073 {
7074 buffer_grow_str (buffer, "<pt-config>\n");
7075
7076 switch (config->cpu.vendor)
7077 {
7078 case CV_INTEL:
7079 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7080 "model=\"%u\" stepping=\"%u\"/>\n",
7081 config->cpu.family, config->cpu.model,
7082 config->cpu.stepping);
7083 break;
7084
7085 default:
7086 break;
7087 }
7088
7089 buffer_grow_str (buffer, "</pt-config>\n");
7090 }
7091
7092 /* Encode a raw buffer. */
7093
7094 static void
7095 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7096 unsigned int size)
7097 {
7098 if (size == 0)
7099 return;
7100
7101 /* We use hex encoding - see common/rsp-low.h. */
7102 buffer_grow_str (buffer, "<raw>\n");
7103
7104 while (size-- > 0)
7105 {
7106 char elem[2];
7107
7108 elem[0] = tohex ((*data >> 4) & 0xf);
7109 elem[1] = tohex (*data++ & 0xf);
7110
7111 buffer_grow (buffer, elem, 2);
7112 }
7113
7114 buffer_grow_str (buffer, "</raw>\n");
7115 }
7116
7117 /* See to_read_btrace target method. */
7118
7119 static int
7120 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7121 enum btrace_read_type type)
7122 {
7123 struct btrace_data btrace;
7124 struct btrace_block *block;
7125 enum btrace_error err;
7126 int i;
7127
7128 btrace_data_init (&btrace);
7129
7130 err = linux_read_btrace (&btrace, tinfo, type);
7131 if (err != BTRACE_ERR_NONE)
7132 {
7133 if (err == BTRACE_ERR_OVERFLOW)
7134 buffer_grow_str0 (buffer, "E.Overflow.");
7135 else
7136 buffer_grow_str0 (buffer, "E.Generic Error.");
7137
7138 goto err;
7139 }
7140
7141 switch (btrace.format)
7142 {
7143 case BTRACE_FORMAT_NONE:
7144 buffer_grow_str0 (buffer, "E.No Trace.");
7145 goto err;
7146
7147 case BTRACE_FORMAT_BTS:
7148 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7149 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7150
7151 for (i = 0;
7152 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7153 i++)
7154 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7155 paddress (block->begin), paddress (block->end));
7156
7157 buffer_grow_str0 (buffer, "</btrace>\n");
7158 break;
7159
7160 case BTRACE_FORMAT_PT:
7161 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7162 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7163 buffer_grow_str (buffer, "<pt>\n");
7164
7165 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7166
7167 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7168 btrace.variant.pt.size);
7169
7170 buffer_grow_str (buffer, "</pt>\n");
7171 buffer_grow_str0 (buffer, "</btrace>\n");
7172 break;
7173
7174 default:
7175 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7176 goto err;
7177 }
7178
7179 btrace_data_fini (&btrace);
7180 return 0;
7181
7182 err:
7183 btrace_data_fini (&btrace);
7184 return -1;
7185 }
7186
7187 /* See to_btrace_conf target method. */
7188
7189 static int
7190 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7191 struct buffer *buffer)
7192 {
7193 const struct btrace_config *conf;
7194
7195 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7196 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7197
7198 conf = linux_btrace_conf (tinfo);
7199 if (conf != NULL)
7200 {
7201 switch (conf->format)
7202 {
7203 case BTRACE_FORMAT_NONE:
7204 break;
7205
7206 case BTRACE_FORMAT_BTS:
7207 buffer_xml_printf (buffer, "<bts");
7208 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7209 buffer_xml_printf (buffer, " />\n");
7210 break;
7211
7212 case BTRACE_FORMAT_PT:
7213 buffer_xml_printf (buffer, "<pt");
7214 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7215 buffer_xml_printf (buffer, "/>\n");
7216 break;
7217 }
7218 }
7219
7220 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7221 return 0;
7222 }
7223 #endif /* HAVE_LINUX_BTRACE */
7224
7225 /* See nat/linux-nat.h. */
7226
7227 ptid_t
7228 current_lwp_ptid (void)
7229 {
7230 return ptid_of (current_thread);
7231 }
7232
7233 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7234
7235 static int
7236 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7237 {
7238 if (the_low_target.breakpoint_kind_from_pc != NULL)
7239 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7240 else
7241 return default_breakpoint_kind_from_pc (pcptr);
7242 }
7243
7244 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7245
7246 static const gdb_byte *
7247 linux_sw_breakpoint_from_kind (int kind, int *size)
7248 {
7249 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7250
7251 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7252 }
7253
7254 /* Implementation of the target_ops method
7255 "breakpoint_kind_from_current_state". */
7256
7257 static int
7258 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7259 {
7260 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7261 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7262 else
7263 return linux_breakpoint_kind_from_pc (pcptr);
7264 }
7265
7266 /* Default implementation of linux_target_ops method "set_pc" for
7267 32-bit pc register which is literally named "pc". */
7268
7269 void
7270 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7271 {
7272 uint32_t newpc = pc;
7273
7274 supply_register_by_name (regcache, "pc", &newpc);
7275 }
7276
7277 /* Default implementation of linux_target_ops method "get_pc" for
7278 32-bit pc register which is literally named "pc". */
7279
7280 CORE_ADDR
7281 linux_get_pc_32bit (struct regcache *regcache)
7282 {
7283 uint32_t pc;
7284
7285 collect_register_by_name (regcache, "pc", &pc);
7286 if (debug_threads)
7287 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7288 return pc;
7289 }
7290
7291 /* Default implementation of linux_target_ops method "set_pc" for
7292 64-bit pc register which is literally named "pc". */
7293
7294 void
7295 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7296 {
7297 uint64_t newpc = pc;
7298
7299 supply_register_by_name (regcache, "pc", &newpc);
7300 }
7301
7302 /* Default implementation of linux_target_ops method "get_pc" for
7303 64-bit pc register which is literally named "pc". */
7304
7305 CORE_ADDR
7306 linux_get_pc_64bit (struct regcache *regcache)
7307 {
7308 uint64_t pc;
7309
7310 collect_register_by_name (regcache, "pc", &pc);
7311 if (debug_threads)
7312 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7313 return pc;
7314 }
7315
7316
7317 static struct target_ops linux_target_ops = {
7318 linux_create_inferior,
7319 linux_post_create_inferior,
7320 linux_attach,
7321 linux_kill,
7322 linux_detach,
7323 linux_mourn,
7324 linux_join,
7325 linux_thread_alive,
7326 linux_resume,
7327 linux_wait,
7328 linux_fetch_registers,
7329 linux_store_registers,
7330 linux_prepare_to_access_memory,
7331 linux_done_accessing_memory,
7332 linux_read_memory,
7333 linux_write_memory,
7334 linux_look_up_symbols,
7335 linux_request_interrupt,
7336 linux_read_auxv,
7337 linux_supports_z_point_type,
7338 linux_insert_point,
7339 linux_remove_point,
7340 linux_stopped_by_sw_breakpoint,
7341 linux_supports_stopped_by_sw_breakpoint,
7342 linux_stopped_by_hw_breakpoint,
7343 linux_supports_stopped_by_hw_breakpoint,
7344 linux_supports_hardware_single_step,
7345 linux_stopped_by_watchpoint,
7346 linux_stopped_data_address,
7347 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7348 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7349 && defined(PT_TEXT_END_ADDR)
7350 linux_read_offsets,
7351 #else
7352 NULL,
7353 #endif
7354 #ifdef USE_THREAD_DB
7355 thread_db_get_tls_address,
7356 #else
7357 NULL,
7358 #endif
7359 linux_qxfer_spu,
7360 hostio_last_error_from_errno,
7361 linux_qxfer_osdata,
7362 linux_xfer_siginfo,
7363 linux_supports_non_stop,
7364 linux_async,
7365 linux_start_non_stop,
7366 linux_supports_multi_process,
7367 linux_supports_fork_events,
7368 linux_supports_vfork_events,
7369 linux_supports_exec_events,
7370 linux_handle_new_gdb_connection,
7371 #ifdef USE_THREAD_DB
7372 thread_db_handle_monitor_command,
7373 #else
7374 NULL,
7375 #endif
7376 linux_common_core_of_thread,
7377 linux_read_loadmap,
7378 linux_process_qsupported,
7379 linux_supports_tracepoints,
7380 linux_read_pc,
7381 linux_write_pc,
7382 linux_thread_stopped,
7383 NULL,
7384 linux_pause_all,
7385 linux_unpause_all,
7386 linux_stabilize_threads,
7387 linux_install_fast_tracepoint_jump_pad,
7388 linux_emit_ops,
7389 linux_supports_disable_randomization,
7390 linux_get_min_fast_tracepoint_insn_len,
7391 linux_qxfer_libraries_svr4,
7392 linux_supports_agent,
7393 #ifdef HAVE_LINUX_BTRACE
7394 linux_supports_btrace,
7395 linux_enable_btrace,
7396 linux_low_disable_btrace,
7397 linux_low_read_btrace,
7398 linux_low_btrace_conf,
7399 #else
7400 NULL,
7401 NULL,
7402 NULL,
7403 NULL,
7404 NULL,
7405 #endif
7406 linux_supports_range_stepping,
7407 linux_proc_pid_to_exec_file,
7408 linux_mntns_open_cloexec,
7409 linux_mntns_unlink,
7410 linux_mntns_readlink,
7411 linux_breakpoint_kind_from_pc,
7412 linux_sw_breakpoint_from_kind,
7413 linux_proc_tid_get_name,
7414 linux_breakpoint_kind_from_current_state,
7415 linux_supports_software_single_step,
7416 linux_supports_catch_syscall,
7417 linux_get_ipa_tdesc_idx,
7418 };
7419
7420 #ifdef HAVE_LINUX_REGSETS
7421 void
7422 initialize_regsets_info (struct regsets_info *info)
7423 {
7424 for (info->num_regsets = 0;
7425 info->regsets[info->num_regsets].size >= 0;
7426 info->num_regsets++)
7427 ;
7428 }
7429 #endif
7430
7431 void
7432 initialize_low (void)
7433 {
7434 struct sigaction sigchld_action;
7435
7436 memset (&sigchld_action, 0, sizeof (sigchld_action));
7437 set_target_ops (&linux_target_ops);
7438
7439 linux_ptrace_init_warnings ();
7440
7441 sigchld_action.sa_handler = sigchld_handler;
7442 sigemptyset (&sigchld_action.sa_mask);
7443 sigchld_action.sa_flags = SA_RESTART;
7444 sigaction (SIGCHLD, &sigchld_action, NULL);
7445
7446 initialize_low_arch ();
7447
7448 linux_check_ptrace_features ();
7449 }
This page took 0.198266 seconds and 4 git commands to generate.