linux-nat: Add function lwp_is_stepping
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2016 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25 #include "signals-state-save-restore.h"
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #ifndef ELFMAG0
51 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55 #include <elf.h>
56 #endif
57 #include "nat/linux-namespaces.h"
58
59 #ifndef SPUFS_MAGIC
60 #define SPUFS_MAGIC 0x23c9b64e
61 #endif
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 /* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77 #if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80 #if defined(__mcoldfire__)
81 /* These are still undefined in 3.10 kernels. */
82 #define PT_TEXT_ADDR 49*4
83 #define PT_DATA_ADDR 50*4
84 #define PT_TEXT_END_ADDR 51*4
85 /* BFIN already defines these since at least 2.6.32 kernels. */
86 #elif defined(BFIN)
87 #define PT_TEXT_ADDR 220
88 #define PT_TEXT_END_ADDR 224
89 #define PT_DATA_ADDR 228
90 /* These are still undefined in 3.10 kernels. */
91 #elif defined(__TMS320C6X__)
92 #define PT_TEXT_ADDR (0x10000*4)
93 #define PT_DATA_ADDR (0x10004*4)
94 #define PT_TEXT_END_ADDR (0x10008*4)
95 #endif
96 #endif
97
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "btrace-common.h"
101 #endif
102
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
105 typedef struct
106 {
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115 } Elf32_auxv_t;
116 #endif
117
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
120 typedef struct
121 {
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130 } Elf64_auxv_t;
131 #endif
132
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset = -1;
135
136 /* LWP accessors. */
137
138 /* See nat/linux-nat.h. */
139
140 ptid_t
141 ptid_of_lwp (struct lwp_info *lwp)
142 {
143 return ptid_of (get_lwp_thread (lwp));
144 }
145
146 /* See nat/linux-nat.h. */
147
148 void
149 lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151 {
152 lwp->arch_private = info;
153 }
154
155 /* See nat/linux-nat.h. */
156
157 struct arch_lwp_info *
158 lwp_arch_private_info (struct lwp_info *lwp)
159 {
160 return lwp->arch_private;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 int
166 lwp_is_stopped (struct lwp_info *lwp)
167 {
168 return lwp->stopped;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info *lwp)
175 {
176 return lwp->stop_reason;
177 }
178
179 /* See nat/linux-nat.h. */
180
181 int
182 lwp_is_stepping (struct lwp_info *lwp)
183 {
184 return lwp->stepping;
185 }
186
187 /* A list of all unknown processes which receive stop signals. Some
188 other process will presumably claim each of these as forked
189 children momentarily. */
190
191 struct simple_pid_list
192 {
193 /* The process ID. */
194 int pid;
195
196 /* The status as reported by waitpid. */
197 int status;
198
199 /* Next in chain. */
200 struct simple_pid_list *next;
201 };
202 struct simple_pid_list *stopped_pids;
203
204 /* Trivial list manipulation functions to keep track of a list of new
205 stopped processes. */
206
207 static void
208 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
209 {
210 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
211
212 new_pid->pid = pid;
213 new_pid->status = status;
214 new_pid->next = *listp;
215 *listp = new_pid;
216 }
217
218 static int
219 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
220 {
221 struct simple_pid_list **p;
222
223 for (p = listp; *p != NULL; p = &(*p)->next)
224 if ((*p)->pid == pid)
225 {
226 struct simple_pid_list *next = (*p)->next;
227
228 *statusp = (*p)->status;
229 xfree (*p);
230 *p = next;
231 return 1;
232 }
233 return 0;
234 }
235
236 enum stopping_threads_kind
237 {
238 /* Not stopping threads presently. */
239 NOT_STOPPING_THREADS,
240
241 /* Stopping threads. */
242 STOPPING_THREADS,
243
244 /* Stopping and suspending threads. */
245 STOPPING_AND_SUSPENDING_THREADS
246 };
247
248 /* This is set while stop_all_lwps is in effect. */
249 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
250
251 /* FIXME make into a target method? */
252 int using_threads = 1;
253
254 /* True if we're presently stabilizing threads (moving them out of
255 jump pads). */
256 static int stabilizing_threads;
257
258 static void linux_resume_one_lwp (struct lwp_info *lwp,
259 int step, int signal, siginfo_t *info);
260 static void linux_resume (struct thread_resume *resume_info, size_t n);
261 static void stop_all_lwps (int suspend, struct lwp_info *except);
262 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
263 static void unsuspend_all_lwps (struct lwp_info *except);
264 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
265 int *wstat, int options);
266 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
267 static struct lwp_info *add_lwp (ptid_t ptid);
268 static void linux_mourn (struct process_info *process);
269 static int linux_stopped_by_watchpoint (void);
270 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
271 static int lwp_is_marked_dead (struct lwp_info *lwp);
272 static void proceed_all_lwps (void);
273 static int finish_step_over (struct lwp_info *lwp);
274 static int kill_lwp (unsigned long lwpid, int signo);
275 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
276 static void complete_ongoing_step_over (void);
277 static int linux_low_ptrace_options (int attached);
278 static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);
279 static int proceed_one_lwp (struct inferior_list_entry *entry, void *except);
280
281 /* When the event-loop is doing a step-over, this points at the thread
282 being stepped. */
283 ptid_t step_over_bkpt;
284
285 /* True if the low target can hardware single-step. */
286
287 static int
288 can_hardware_single_step (void)
289 {
290 if (the_low_target.supports_hardware_single_step != NULL)
291 return the_low_target.supports_hardware_single_step ();
292 else
293 return 0;
294 }
295
296 /* True if the low target can software single-step. Such targets
297 implement the GET_NEXT_PCS callback. */
298
299 static int
300 can_software_single_step (void)
301 {
302 return (the_low_target.get_next_pcs != NULL);
303 }
304
305 /* True if the low target supports memory breakpoints. If so, we'll
306 have a GET_PC implementation. */
307
308 static int
309 supports_breakpoints (void)
310 {
311 return (the_low_target.get_pc != NULL);
312 }
313
314 /* Returns true if this target can support fast tracepoints. This
315 does not mean that the in-process agent has been loaded in the
316 inferior. */
317
318 static int
319 supports_fast_tracepoints (void)
320 {
321 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
322 }
323
324 /* True if LWP is stopped in its stepping range. */
325
326 static int
327 lwp_in_step_range (struct lwp_info *lwp)
328 {
329 CORE_ADDR pc = lwp->stop_pc;
330
331 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
332 }
333
334 struct pending_signals
335 {
336 int signal;
337 siginfo_t info;
338 struct pending_signals *prev;
339 };
340
341 /* The read/write ends of the pipe registered as waitable file in the
342 event loop. */
343 static int linux_event_pipe[2] = { -1, -1 };
344
345 /* True if we're currently in async mode. */
346 #define target_is_async_p() (linux_event_pipe[0] != -1)
347
348 static void send_sigstop (struct lwp_info *lwp);
349 static void wait_for_sigstop (void);
350
351 /* Return non-zero if HEADER is a 64-bit ELF file. */
352
353 static int
354 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
355 {
356 if (header->e_ident[EI_MAG0] == ELFMAG0
357 && header->e_ident[EI_MAG1] == ELFMAG1
358 && header->e_ident[EI_MAG2] == ELFMAG2
359 && header->e_ident[EI_MAG3] == ELFMAG3)
360 {
361 *machine = header->e_machine;
362 return header->e_ident[EI_CLASS] == ELFCLASS64;
363
364 }
365 *machine = EM_NONE;
366 return -1;
367 }
368
369 /* Return non-zero if FILE is a 64-bit ELF file,
370 zero if the file is not a 64-bit ELF file,
371 and -1 if the file is not accessible or doesn't exist. */
372
373 static int
374 elf_64_file_p (const char *file, unsigned int *machine)
375 {
376 Elf64_Ehdr header;
377 int fd;
378
379 fd = open (file, O_RDONLY);
380 if (fd < 0)
381 return -1;
382
383 if (read (fd, &header, sizeof (header)) != sizeof (header))
384 {
385 close (fd);
386 return 0;
387 }
388 close (fd);
389
390 return elf_64_header_p (&header, machine);
391 }
392
393 /* Accepts an integer PID; Returns true if the executable PID is
394 running is a 64-bit ELF file.. */
395
396 int
397 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
398 {
399 char file[PATH_MAX];
400
401 sprintf (file, "/proc/%d/exe", pid);
402 return elf_64_file_p (file, machine);
403 }
404
405 static void
406 delete_lwp (struct lwp_info *lwp)
407 {
408 struct thread_info *thr = get_lwp_thread (lwp);
409
410 if (debug_threads)
411 debug_printf ("deleting %ld\n", lwpid_of (thr));
412
413 remove_thread (thr);
414 free (lwp->arch_private);
415 free (lwp);
416 }
417
418 /* Add a process to the common process list, and set its private
419 data. */
420
421 static struct process_info *
422 linux_add_process (int pid, int attached)
423 {
424 struct process_info *proc;
425
426 proc = add_process (pid, attached);
427 proc->priv = XCNEW (struct process_info_private);
428
429 if (the_low_target.new_process != NULL)
430 proc->priv->arch_private = the_low_target.new_process ();
431
432 return proc;
433 }
434
435 static CORE_ADDR get_pc (struct lwp_info *lwp);
436
437 /* Call the target arch_setup function on the current thread. */
438
439 static void
440 linux_arch_setup (void)
441 {
442 the_low_target.arch_setup ();
443 }
444
445 /* Call the target arch_setup function on THREAD. */
446
447 static void
448 linux_arch_setup_thread (struct thread_info *thread)
449 {
450 struct thread_info *saved_thread;
451
452 saved_thread = current_thread;
453 current_thread = thread;
454
455 linux_arch_setup ();
456
457 current_thread = saved_thread;
458 }
459
460 /* Handle a GNU/Linux extended wait response. If we see a clone,
461 fork, or vfork event, we need to add the new LWP to our list
462 (and return 0 so as not to report the trap to higher layers).
463 If we see an exec event, we will modify ORIG_EVENT_LWP to point
464 to a new LWP representing the new program. */
465
466 static int
467 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
468 {
469 struct lwp_info *event_lwp = *orig_event_lwp;
470 int event = linux_ptrace_get_extended_event (wstat);
471 struct thread_info *event_thr = get_lwp_thread (event_lwp);
472 struct lwp_info *new_lwp;
473
474 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
475
476 /* All extended events we currently use are mid-syscall. Only
477 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
478 you have to be using PTRACE_SEIZE to get that. */
479 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
480
481 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
482 || (event == PTRACE_EVENT_CLONE))
483 {
484 ptid_t ptid;
485 unsigned long new_pid;
486 int ret, status;
487
488 /* Get the pid of the new lwp. */
489 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
490 &new_pid);
491
492 /* If we haven't already seen the new PID stop, wait for it now. */
493 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
494 {
495 /* The new child has a pending SIGSTOP. We can't affect it until it
496 hits the SIGSTOP, but we're already attached. */
497
498 ret = my_waitpid (new_pid, &status, __WALL);
499
500 if (ret == -1)
501 perror_with_name ("waiting for new child");
502 else if (ret != new_pid)
503 warning ("wait returned unexpected PID %d", ret);
504 else if (!WIFSTOPPED (status))
505 warning ("wait returned unexpected status 0x%x", status);
506 }
507
508 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
509 {
510 struct process_info *parent_proc;
511 struct process_info *child_proc;
512 struct lwp_info *child_lwp;
513 struct thread_info *child_thr;
514 struct target_desc *tdesc;
515
516 ptid = ptid_build (new_pid, new_pid, 0);
517
518 if (debug_threads)
519 {
520 debug_printf ("HEW: Got fork event from LWP %ld, "
521 "new child is %d\n",
522 ptid_get_lwp (ptid_of (event_thr)),
523 ptid_get_pid (ptid));
524 }
525
526 /* Add the new process to the tables and clone the breakpoint
527 lists of the parent. We need to do this even if the new process
528 will be detached, since we will need the process object and the
529 breakpoints to remove any breakpoints from memory when we
530 detach, and the client side will access registers. */
531 child_proc = linux_add_process (new_pid, 0);
532 gdb_assert (child_proc != NULL);
533 child_lwp = add_lwp (ptid);
534 gdb_assert (child_lwp != NULL);
535 child_lwp->stopped = 1;
536 child_lwp->must_set_ptrace_flags = 1;
537 child_lwp->status_pending_p = 0;
538 child_thr = get_lwp_thread (child_lwp);
539 child_thr->last_resume_kind = resume_stop;
540 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
541
542 /* If we're suspending all threads, leave this one suspended
543 too. If the fork/clone parent is stepping over a breakpoint,
544 all other threads have been suspended already. Leave the
545 child suspended too. */
546 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
547 || event_lwp->bp_reinsert != 0)
548 {
549 if (debug_threads)
550 debug_printf ("HEW: leaving child suspended\n");
551 child_lwp->suspended = 1;
552 }
553
554 parent_proc = get_thread_process (event_thr);
555 child_proc->attached = parent_proc->attached;
556
557 if (event_lwp->bp_reinsert != 0
558 && can_software_single_step ()
559 && event == PTRACE_EVENT_VFORK)
560 {
561 /* If we leave single-step breakpoints there, child will
562 hit it, so uninsert single-step breakpoints from parent
563 (and child). Once vfork child is done, reinsert
564 them back to parent. */
565 uninsert_single_step_breakpoints (event_thr);
566 }
567
568 clone_all_breakpoints (child_thr, event_thr);
569
570 tdesc = XNEW (struct target_desc);
571 copy_target_description (tdesc, parent_proc->tdesc);
572 child_proc->tdesc = tdesc;
573
574 /* Clone arch-specific process data. */
575 if (the_low_target.new_fork != NULL)
576 the_low_target.new_fork (parent_proc, child_proc);
577
578 /* Save fork info in the parent thread. */
579 if (event == PTRACE_EVENT_FORK)
580 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
581 else if (event == PTRACE_EVENT_VFORK)
582 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
583
584 event_lwp->waitstatus.value.related_pid = ptid;
585
586 /* The status_pending field contains bits denoting the
587 extended event, so when the pending event is handled,
588 the handler will look at lwp->waitstatus. */
589 event_lwp->status_pending_p = 1;
590 event_lwp->status_pending = wstat;
591
592 /* If the parent thread is doing step-over with single-step
593 breakpoints, the list of single-step breakpoints are cloned
594 from the parent's. Remove them from the child process.
595 In case of vfork, we'll reinsert them back once vforked
596 child is done. */
597 if (event_lwp->bp_reinsert != 0
598 && can_software_single_step ())
599 {
600 /* The child process is forked and stopped, so it is safe
601 to access its memory without stopping all other threads
602 from other processes. */
603 delete_single_step_breakpoints (child_thr);
604
605 gdb_assert (has_single_step_breakpoints (event_thr));
606 gdb_assert (!has_single_step_breakpoints (child_thr));
607 }
608
609 /* Report the event. */
610 return 0;
611 }
612
613 if (debug_threads)
614 debug_printf ("HEW: Got clone event "
615 "from LWP %ld, new child is LWP %ld\n",
616 lwpid_of (event_thr), new_pid);
617
618 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
619 new_lwp = add_lwp (ptid);
620
621 /* Either we're going to immediately resume the new thread
622 or leave it stopped. linux_resume_one_lwp is a nop if it
623 thinks the thread is currently running, so set this first
624 before calling linux_resume_one_lwp. */
625 new_lwp->stopped = 1;
626
627 /* If we're suspending all threads, leave this one suspended
628 too. If the fork/clone parent is stepping over a breakpoint,
629 all other threads have been suspended already. Leave the
630 child suspended too. */
631 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
632 || event_lwp->bp_reinsert != 0)
633 new_lwp->suspended = 1;
634
635 /* Normally we will get the pending SIGSTOP. But in some cases
636 we might get another signal delivered to the group first.
637 If we do get another signal, be sure not to lose it. */
638 if (WSTOPSIG (status) != SIGSTOP)
639 {
640 new_lwp->stop_expected = 1;
641 new_lwp->status_pending_p = 1;
642 new_lwp->status_pending = status;
643 }
644 else if (report_thread_events)
645 {
646 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
647 new_lwp->status_pending_p = 1;
648 new_lwp->status_pending = status;
649 }
650
651 /* Don't report the event. */
652 return 1;
653 }
654 else if (event == PTRACE_EVENT_VFORK_DONE)
655 {
656 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
657
658 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
659 {
660 reinsert_single_step_breakpoints (event_thr);
661
662 gdb_assert (has_single_step_breakpoints (event_thr));
663 }
664
665 /* Report the event. */
666 return 0;
667 }
668 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
669 {
670 struct process_info *proc;
671 VEC (int) *syscalls_to_catch;
672 ptid_t event_ptid;
673 pid_t event_pid;
674
675 if (debug_threads)
676 {
677 debug_printf ("HEW: Got exec event from LWP %ld\n",
678 lwpid_of (event_thr));
679 }
680
681 /* Get the event ptid. */
682 event_ptid = ptid_of (event_thr);
683 event_pid = ptid_get_pid (event_ptid);
684
685 /* Save the syscall list from the execing process. */
686 proc = get_thread_process (event_thr);
687 syscalls_to_catch = proc->syscalls_to_catch;
688 proc->syscalls_to_catch = NULL;
689
690 /* Delete the execing process and all its threads. */
691 linux_mourn (proc);
692 current_thread = NULL;
693
694 /* Create a new process/lwp/thread. */
695 proc = linux_add_process (event_pid, 0);
696 event_lwp = add_lwp (event_ptid);
697 event_thr = get_lwp_thread (event_lwp);
698 gdb_assert (current_thread == event_thr);
699 linux_arch_setup_thread (event_thr);
700
701 /* Set the event status. */
702 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
703 event_lwp->waitstatus.value.execd_pathname
704 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
705
706 /* Mark the exec status as pending. */
707 event_lwp->stopped = 1;
708 event_lwp->status_pending_p = 1;
709 event_lwp->status_pending = wstat;
710 event_thr->last_resume_kind = resume_continue;
711 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
712
713 /* Update syscall state in the new lwp, effectively mid-syscall too. */
714 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
715
716 /* Restore the list to catch. Don't rely on the client, which is free
717 to avoid sending a new list when the architecture doesn't change.
718 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
719 proc->syscalls_to_catch = syscalls_to_catch;
720
721 /* Report the event. */
722 *orig_event_lwp = event_lwp;
723 return 0;
724 }
725
726 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
727 }
728
729 /* Return the PC as read from the regcache of LWP, without any
730 adjustment. */
731
732 static CORE_ADDR
733 get_pc (struct lwp_info *lwp)
734 {
735 struct thread_info *saved_thread;
736 struct regcache *regcache;
737 CORE_ADDR pc;
738
739 if (the_low_target.get_pc == NULL)
740 return 0;
741
742 saved_thread = current_thread;
743 current_thread = get_lwp_thread (lwp);
744
745 regcache = get_thread_regcache (current_thread, 1);
746 pc = (*the_low_target.get_pc) (regcache);
747
748 if (debug_threads)
749 debug_printf ("pc is 0x%lx\n", (long) pc);
750
751 current_thread = saved_thread;
752 return pc;
753 }
754
755 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
756 Fill *SYSNO with the syscall nr trapped. */
757
758 static void
759 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno)
760 {
761 struct thread_info *saved_thread;
762 struct regcache *regcache;
763
764 if (the_low_target.get_syscall_trapinfo == NULL)
765 {
766 /* If we cannot get the syscall trapinfo, report an unknown
767 system call number. */
768 *sysno = UNKNOWN_SYSCALL;
769 return;
770 }
771
772 saved_thread = current_thread;
773 current_thread = get_lwp_thread (lwp);
774
775 regcache = get_thread_regcache (current_thread, 1);
776 (*the_low_target.get_syscall_trapinfo) (regcache, sysno);
777
778 if (debug_threads)
779 debug_printf ("get_syscall_trapinfo sysno %d\n", *sysno);
780
781 current_thread = saved_thread;
782 }
783
784 static int check_stopped_by_watchpoint (struct lwp_info *child);
785
786 /* Called when the LWP stopped for a signal/trap. If it stopped for a
787 trap check what caused it (breakpoint, watchpoint, trace, etc.),
788 and save the result in the LWP's stop_reason field. If it stopped
789 for a breakpoint, decrement the PC if necessary on the lwp's
790 architecture. Returns true if we now have the LWP's stop PC. */
791
792 static int
793 save_stop_reason (struct lwp_info *lwp)
794 {
795 CORE_ADDR pc;
796 CORE_ADDR sw_breakpoint_pc;
797 struct thread_info *saved_thread;
798 #if USE_SIGTRAP_SIGINFO
799 siginfo_t siginfo;
800 #endif
801
802 if (the_low_target.get_pc == NULL)
803 return 0;
804
805 pc = get_pc (lwp);
806 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
807
808 /* breakpoint_at reads from the current thread. */
809 saved_thread = current_thread;
810 current_thread = get_lwp_thread (lwp);
811
812 #if USE_SIGTRAP_SIGINFO
813 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
814 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
815 {
816 if (siginfo.si_signo == SIGTRAP)
817 {
818 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
819 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
820 {
821 /* The si_code is ambiguous on this arch -- check debug
822 registers. */
823 if (!check_stopped_by_watchpoint (lwp))
824 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
825 }
826 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
827 {
828 /* If we determine the LWP stopped for a SW breakpoint,
829 trust it. Particularly don't check watchpoint
830 registers, because at least on s390, we'd find
831 stopped-by-watchpoint as long as there's a watchpoint
832 set. */
833 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
834 }
835 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
836 {
837 /* This can indicate either a hardware breakpoint or
838 hardware watchpoint. Check debug registers. */
839 if (!check_stopped_by_watchpoint (lwp))
840 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
841 }
842 else if (siginfo.si_code == TRAP_TRACE)
843 {
844 /* We may have single stepped an instruction that
845 triggered a watchpoint. In that case, on some
846 architectures (such as x86), instead of TRAP_HWBKPT,
847 si_code indicates TRAP_TRACE, and we need to check
848 the debug registers separately. */
849 if (!check_stopped_by_watchpoint (lwp))
850 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
851 }
852 }
853 }
854 #else
855 /* We may have just stepped a breakpoint instruction. E.g., in
856 non-stop mode, GDB first tells the thread A to step a range, and
857 then the user inserts a breakpoint inside the range. In that
858 case we need to report the breakpoint PC. */
859 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
860 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
861 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
862
863 if (hardware_breakpoint_inserted_here (pc))
864 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
865
866 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
867 check_stopped_by_watchpoint (lwp);
868 #endif
869
870 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
871 {
872 if (debug_threads)
873 {
874 struct thread_info *thr = get_lwp_thread (lwp);
875
876 debug_printf ("CSBB: %s stopped by software breakpoint\n",
877 target_pid_to_str (ptid_of (thr)));
878 }
879
880 /* Back up the PC if necessary. */
881 if (pc != sw_breakpoint_pc)
882 {
883 struct regcache *regcache
884 = get_thread_regcache (current_thread, 1);
885 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
886 }
887
888 /* Update this so we record the correct stop PC below. */
889 pc = sw_breakpoint_pc;
890 }
891 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
892 {
893 if (debug_threads)
894 {
895 struct thread_info *thr = get_lwp_thread (lwp);
896
897 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
898 target_pid_to_str (ptid_of (thr)));
899 }
900 }
901 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
902 {
903 if (debug_threads)
904 {
905 struct thread_info *thr = get_lwp_thread (lwp);
906
907 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
908 target_pid_to_str (ptid_of (thr)));
909 }
910 }
911 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
912 {
913 if (debug_threads)
914 {
915 struct thread_info *thr = get_lwp_thread (lwp);
916
917 debug_printf ("CSBB: %s stopped by trace\n",
918 target_pid_to_str (ptid_of (thr)));
919 }
920 }
921
922 lwp->stop_pc = pc;
923 current_thread = saved_thread;
924 return 1;
925 }
926
927 static struct lwp_info *
928 add_lwp (ptid_t ptid)
929 {
930 struct lwp_info *lwp;
931
932 lwp = XCNEW (struct lwp_info);
933
934 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
935
936 if (the_low_target.new_thread != NULL)
937 the_low_target.new_thread (lwp);
938
939 lwp->thread = add_thread (ptid, lwp);
940
941 return lwp;
942 }
943
944 /* Start an inferior process and returns its pid.
945 ALLARGS is a vector of program-name and args. */
946
947 static int
948 linux_create_inferior (char *program, char **allargs)
949 {
950 struct lwp_info *new_lwp;
951 int pid;
952 ptid_t ptid;
953 struct cleanup *restore_personality
954 = maybe_disable_address_space_randomization (disable_randomization);
955
956 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
957 pid = vfork ();
958 #else
959 pid = fork ();
960 #endif
961 if (pid < 0)
962 perror_with_name ("fork");
963
964 if (pid == 0)
965 {
966 close_most_fds ();
967 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
968
969 setpgid (0, 0);
970
971 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
972 stdout to stderr so that inferior i/o doesn't corrupt the connection.
973 Also, redirect stdin to /dev/null. */
974 if (remote_connection_is_stdio ())
975 {
976 close (0);
977 open ("/dev/null", O_RDONLY);
978 dup2 (2, 1);
979 if (write (2, "stdin/stdout redirected\n",
980 sizeof ("stdin/stdout redirected\n") - 1) < 0)
981 {
982 /* Errors ignored. */;
983 }
984 }
985
986 restore_original_signals_state ();
987
988 execv (program, allargs);
989 if (errno == ENOENT)
990 execvp (program, allargs);
991
992 fprintf (stderr, "Cannot exec %s: %s.\n", program,
993 strerror (errno));
994 fflush (stderr);
995 _exit (0177);
996 }
997
998 do_cleanups (restore_personality);
999
1000 linux_add_process (pid, 0);
1001
1002 ptid = ptid_build (pid, pid, 0);
1003 new_lwp = add_lwp (ptid);
1004 new_lwp->must_set_ptrace_flags = 1;
1005
1006 return pid;
1007 }
1008
1009 /* Implement the post_create_inferior target_ops method. */
1010
1011 static void
1012 linux_post_create_inferior (void)
1013 {
1014 struct lwp_info *lwp = get_thread_lwp (current_thread);
1015
1016 linux_arch_setup ();
1017
1018 if (lwp->must_set_ptrace_flags)
1019 {
1020 struct process_info *proc = current_process ();
1021 int options = linux_low_ptrace_options (proc->attached);
1022
1023 linux_enable_event_reporting (lwpid_of (current_thread), options);
1024 lwp->must_set_ptrace_flags = 0;
1025 }
1026 }
1027
1028 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1029 error. */
1030
1031 int
1032 linux_attach_lwp (ptid_t ptid)
1033 {
1034 struct lwp_info *new_lwp;
1035 int lwpid = ptid_get_lwp (ptid);
1036
1037 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1038 != 0)
1039 return errno;
1040
1041 new_lwp = add_lwp (ptid);
1042
1043 /* We need to wait for SIGSTOP before being able to make the next
1044 ptrace call on this LWP. */
1045 new_lwp->must_set_ptrace_flags = 1;
1046
1047 if (linux_proc_pid_is_stopped (lwpid))
1048 {
1049 if (debug_threads)
1050 debug_printf ("Attached to a stopped process\n");
1051
1052 /* The process is definitely stopped. It is in a job control
1053 stop, unless the kernel predates the TASK_STOPPED /
1054 TASK_TRACED distinction, in which case it might be in a
1055 ptrace stop. Make sure it is in a ptrace stop; from there we
1056 can kill it, signal it, et cetera.
1057
1058 First make sure there is a pending SIGSTOP. Since we are
1059 already attached, the process can not transition from stopped
1060 to running without a PTRACE_CONT; so we know this signal will
1061 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1062 probably already in the queue (unless this kernel is old
1063 enough to use TASK_STOPPED for ptrace stops); but since
1064 SIGSTOP is not an RT signal, it can only be queued once. */
1065 kill_lwp (lwpid, SIGSTOP);
1066
1067 /* Finally, resume the stopped process. This will deliver the
1068 SIGSTOP (or a higher priority signal, just like normal
1069 PTRACE_ATTACH), which we'll catch later on. */
1070 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1071 }
1072
1073 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1074 brings it to a halt.
1075
1076 There are several cases to consider here:
1077
1078 1) gdbserver has already attached to the process and is being notified
1079 of a new thread that is being created.
1080 In this case we should ignore that SIGSTOP and resume the
1081 process. This is handled below by setting stop_expected = 1,
1082 and the fact that add_thread sets last_resume_kind ==
1083 resume_continue.
1084
1085 2) This is the first thread (the process thread), and we're attaching
1086 to it via attach_inferior.
1087 In this case we want the process thread to stop.
1088 This is handled by having linux_attach set last_resume_kind ==
1089 resume_stop after we return.
1090
1091 If the pid we are attaching to is also the tgid, we attach to and
1092 stop all the existing threads. Otherwise, we attach to pid and
1093 ignore any other threads in the same group as this pid.
1094
1095 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1096 existing threads.
1097 In this case we want the thread to stop.
1098 FIXME: This case is currently not properly handled.
1099 We should wait for the SIGSTOP but don't. Things work apparently
1100 because enough time passes between when we ptrace (ATTACH) and when
1101 gdb makes the next ptrace call on the thread.
1102
1103 On the other hand, if we are currently trying to stop all threads, we
1104 should treat the new thread as if we had sent it a SIGSTOP. This works
1105 because we are guaranteed that the add_lwp call above added us to the
1106 end of the list, and so the new thread has not yet reached
1107 wait_for_sigstop (but will). */
1108 new_lwp->stop_expected = 1;
1109
1110 return 0;
1111 }
1112
1113 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1114 already attached. Returns true if a new LWP is found, false
1115 otherwise. */
1116
1117 static int
1118 attach_proc_task_lwp_callback (ptid_t ptid)
1119 {
1120 /* Is this a new thread? */
1121 if (find_thread_ptid (ptid) == NULL)
1122 {
1123 int lwpid = ptid_get_lwp (ptid);
1124 int err;
1125
1126 if (debug_threads)
1127 debug_printf ("Found new lwp %d\n", lwpid);
1128
1129 err = linux_attach_lwp (ptid);
1130
1131 /* Be quiet if we simply raced with the thread exiting. EPERM
1132 is returned if the thread's task still exists, and is marked
1133 as exited or zombie, as well as other conditions, so in that
1134 case, confirm the status in /proc/PID/status. */
1135 if (err == ESRCH
1136 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1137 {
1138 if (debug_threads)
1139 {
1140 debug_printf ("Cannot attach to lwp %d: "
1141 "thread is gone (%d: %s)\n",
1142 lwpid, err, strerror (err));
1143 }
1144 }
1145 else if (err != 0)
1146 {
1147 warning (_("Cannot attach to lwp %d: %s"),
1148 lwpid,
1149 linux_ptrace_attach_fail_reason_string (ptid, err));
1150 }
1151
1152 return 1;
1153 }
1154 return 0;
1155 }
1156
1157 static void async_file_mark (void);
1158
1159 /* Attach to PID. If PID is the tgid, attach to it and all
1160 of its threads. */
1161
1162 static int
1163 linux_attach (unsigned long pid)
1164 {
1165 struct process_info *proc;
1166 struct thread_info *initial_thread;
1167 ptid_t ptid = ptid_build (pid, pid, 0);
1168 int err;
1169
1170 /* Attach to PID. We will check for other threads
1171 soon. */
1172 err = linux_attach_lwp (ptid);
1173 if (err != 0)
1174 error ("Cannot attach to process %ld: %s",
1175 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1176
1177 proc = linux_add_process (pid, 1);
1178
1179 /* Don't ignore the initial SIGSTOP if we just attached to this
1180 process. It will be collected by wait shortly. */
1181 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1182 initial_thread->last_resume_kind = resume_stop;
1183
1184 /* We must attach to every LWP. If /proc is mounted, use that to
1185 find them now. On the one hand, the inferior may be using raw
1186 clone instead of using pthreads. On the other hand, even if it
1187 is using pthreads, GDB may not be connected yet (thread_db needs
1188 to do symbol lookups, through qSymbol). Also, thread_db walks
1189 structures in the inferior's address space to find the list of
1190 threads/LWPs, and those structures may well be corrupted. Note
1191 that once thread_db is loaded, we'll still use it to list threads
1192 and associate pthread info with each LWP. */
1193 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1194
1195 /* GDB will shortly read the xml target description for this
1196 process, to figure out the process' architecture. But the target
1197 description is only filled in when the first process/thread in
1198 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1199 that now, otherwise, if GDB is fast enough, it could read the
1200 target description _before_ that initial stop. */
1201 if (non_stop)
1202 {
1203 struct lwp_info *lwp;
1204 int wstat, lwpid;
1205 ptid_t pid_ptid = pid_to_ptid (pid);
1206
1207 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1208 &wstat, __WALL);
1209 gdb_assert (lwpid > 0);
1210
1211 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1212
1213 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1214 {
1215 lwp->status_pending_p = 1;
1216 lwp->status_pending = wstat;
1217 }
1218
1219 initial_thread->last_resume_kind = resume_continue;
1220
1221 async_file_mark ();
1222
1223 gdb_assert (proc->tdesc != NULL);
1224 }
1225
1226 return 0;
1227 }
1228
1229 struct counter
1230 {
1231 int pid;
1232 int count;
1233 };
1234
1235 static int
1236 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1237 {
1238 struct counter *counter = (struct counter *) args;
1239
1240 if (ptid_get_pid (entry->id) == counter->pid)
1241 {
1242 if (++counter->count > 1)
1243 return 1;
1244 }
1245
1246 return 0;
1247 }
1248
1249 static int
1250 last_thread_of_process_p (int pid)
1251 {
1252 struct counter counter = { pid , 0 };
1253
1254 return (find_inferior (&all_threads,
1255 second_thread_of_pid_p, &counter) == NULL);
1256 }
1257
1258 /* Kill LWP. */
1259
1260 static void
1261 linux_kill_one_lwp (struct lwp_info *lwp)
1262 {
1263 struct thread_info *thr = get_lwp_thread (lwp);
1264 int pid = lwpid_of (thr);
1265
1266 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1267 there is no signal context, and ptrace(PTRACE_KILL) (or
1268 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1269 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1270 alternative is to kill with SIGKILL. We only need one SIGKILL
1271 per process, not one for each thread. But since we still support
1272 support debugging programs using raw clone without CLONE_THREAD,
1273 we send one for each thread. For years, we used PTRACE_KILL
1274 only, so we're being a bit paranoid about some old kernels where
1275 PTRACE_KILL might work better (dubious if there are any such, but
1276 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1277 second, and so we're fine everywhere. */
1278
1279 errno = 0;
1280 kill_lwp (pid, SIGKILL);
1281 if (debug_threads)
1282 {
1283 int save_errno = errno;
1284
1285 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1286 target_pid_to_str (ptid_of (thr)),
1287 save_errno ? strerror (save_errno) : "OK");
1288 }
1289
1290 errno = 0;
1291 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1292 if (debug_threads)
1293 {
1294 int save_errno = errno;
1295
1296 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1297 target_pid_to_str (ptid_of (thr)),
1298 save_errno ? strerror (save_errno) : "OK");
1299 }
1300 }
1301
1302 /* Kill LWP and wait for it to die. */
1303
1304 static void
1305 kill_wait_lwp (struct lwp_info *lwp)
1306 {
1307 struct thread_info *thr = get_lwp_thread (lwp);
1308 int pid = ptid_get_pid (ptid_of (thr));
1309 int lwpid = ptid_get_lwp (ptid_of (thr));
1310 int wstat;
1311 int res;
1312
1313 if (debug_threads)
1314 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1315
1316 do
1317 {
1318 linux_kill_one_lwp (lwp);
1319
1320 /* Make sure it died. Notes:
1321
1322 - The loop is most likely unnecessary.
1323
1324 - We don't use linux_wait_for_event as that could delete lwps
1325 while we're iterating over them. We're not interested in
1326 any pending status at this point, only in making sure all
1327 wait status on the kernel side are collected until the
1328 process is reaped.
1329
1330 - We don't use __WALL here as the __WALL emulation relies on
1331 SIGCHLD, and killing a stopped process doesn't generate
1332 one, nor an exit status.
1333 */
1334 res = my_waitpid (lwpid, &wstat, 0);
1335 if (res == -1 && errno == ECHILD)
1336 res = my_waitpid (lwpid, &wstat, __WCLONE);
1337 } while (res > 0 && WIFSTOPPED (wstat));
1338
1339 /* Even if it was stopped, the child may have already disappeared.
1340 E.g., if it was killed by SIGKILL. */
1341 if (res < 0 && errno != ECHILD)
1342 perror_with_name ("kill_wait_lwp");
1343 }
1344
1345 /* Callback for `find_inferior'. Kills an lwp of a given process,
1346 except the leader. */
1347
1348 static int
1349 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1350 {
1351 struct thread_info *thread = (struct thread_info *) entry;
1352 struct lwp_info *lwp = get_thread_lwp (thread);
1353 int pid = * (int *) args;
1354
1355 if (ptid_get_pid (entry->id) != pid)
1356 return 0;
1357
1358 /* We avoid killing the first thread here, because of a Linux kernel (at
1359 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1360 the children get a chance to be reaped, it will remain a zombie
1361 forever. */
1362
1363 if (lwpid_of (thread) == pid)
1364 {
1365 if (debug_threads)
1366 debug_printf ("lkop: is last of process %s\n",
1367 target_pid_to_str (entry->id));
1368 return 0;
1369 }
1370
1371 kill_wait_lwp (lwp);
1372 return 0;
1373 }
1374
1375 static int
1376 linux_kill (int pid)
1377 {
1378 struct process_info *process;
1379 struct lwp_info *lwp;
1380
1381 process = find_process_pid (pid);
1382 if (process == NULL)
1383 return -1;
1384
1385 /* If we're killing a running inferior, make sure it is stopped
1386 first, as PTRACE_KILL will not work otherwise. */
1387 stop_all_lwps (0, NULL);
1388
1389 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1390
1391 /* See the comment in linux_kill_one_lwp. We did not kill the first
1392 thread in the list, so do so now. */
1393 lwp = find_lwp_pid (pid_to_ptid (pid));
1394
1395 if (lwp == NULL)
1396 {
1397 if (debug_threads)
1398 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1399 pid);
1400 }
1401 else
1402 kill_wait_lwp (lwp);
1403
1404 the_target->mourn (process);
1405
1406 /* Since we presently can only stop all lwps of all processes, we
1407 need to unstop lwps of other processes. */
1408 unstop_all_lwps (0, NULL);
1409 return 0;
1410 }
1411
1412 /* Get pending signal of THREAD, for detaching purposes. This is the
1413 signal the thread last stopped for, which we need to deliver to the
1414 thread when detaching, otherwise, it'd be suppressed/lost. */
1415
1416 static int
1417 get_detach_signal (struct thread_info *thread)
1418 {
1419 enum gdb_signal signo = GDB_SIGNAL_0;
1420 int status;
1421 struct lwp_info *lp = get_thread_lwp (thread);
1422
1423 if (lp->status_pending_p)
1424 status = lp->status_pending;
1425 else
1426 {
1427 /* If the thread had been suspended by gdbserver, and it stopped
1428 cleanly, then it'll have stopped with SIGSTOP. But we don't
1429 want to deliver that SIGSTOP. */
1430 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1431 || thread->last_status.value.sig == GDB_SIGNAL_0)
1432 return 0;
1433
1434 /* Otherwise, we may need to deliver the signal we
1435 intercepted. */
1436 status = lp->last_status;
1437 }
1438
1439 if (!WIFSTOPPED (status))
1440 {
1441 if (debug_threads)
1442 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1443 target_pid_to_str (ptid_of (thread)));
1444 return 0;
1445 }
1446
1447 /* Extended wait statuses aren't real SIGTRAPs. */
1448 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1449 {
1450 if (debug_threads)
1451 debug_printf ("GPS: lwp %s had stopped with extended "
1452 "status: no pending signal\n",
1453 target_pid_to_str (ptid_of (thread)));
1454 return 0;
1455 }
1456
1457 signo = gdb_signal_from_host (WSTOPSIG (status));
1458
1459 if (program_signals_p && !program_signals[signo])
1460 {
1461 if (debug_threads)
1462 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1463 target_pid_to_str (ptid_of (thread)),
1464 gdb_signal_to_string (signo));
1465 return 0;
1466 }
1467 else if (!program_signals_p
1468 /* If we have no way to know which signals GDB does not
1469 want to have passed to the program, assume
1470 SIGTRAP/SIGINT, which is GDB's default. */
1471 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1472 {
1473 if (debug_threads)
1474 debug_printf ("GPS: lwp %s had signal %s, "
1475 "but we don't know if we should pass it. "
1476 "Default to not.\n",
1477 target_pid_to_str (ptid_of (thread)),
1478 gdb_signal_to_string (signo));
1479 return 0;
1480 }
1481 else
1482 {
1483 if (debug_threads)
1484 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1485 target_pid_to_str (ptid_of (thread)),
1486 gdb_signal_to_string (signo));
1487
1488 return WSTOPSIG (status);
1489 }
1490 }
1491
1492 /* Detach from LWP. */
1493
1494 static void
1495 linux_detach_one_lwp (struct lwp_info *lwp)
1496 {
1497 struct thread_info *thread = get_lwp_thread (lwp);
1498 int sig;
1499 int lwpid;
1500
1501 /* If there is a pending SIGSTOP, get rid of it. */
1502 if (lwp->stop_expected)
1503 {
1504 if (debug_threads)
1505 debug_printf ("Sending SIGCONT to %s\n",
1506 target_pid_to_str (ptid_of (thread)));
1507
1508 kill_lwp (lwpid_of (thread), SIGCONT);
1509 lwp->stop_expected = 0;
1510 }
1511
1512 /* Pass on any pending signal for this thread. */
1513 sig = get_detach_signal (thread);
1514
1515 /* Preparing to resume may try to write registers, and fail if the
1516 lwp is zombie. If that happens, ignore the error. We'll handle
1517 it below, when detach fails with ESRCH. */
1518 TRY
1519 {
1520 /* Flush any pending changes to the process's registers. */
1521 regcache_invalidate_thread (thread);
1522
1523 /* Finally, let it resume. */
1524 if (the_low_target.prepare_to_resume != NULL)
1525 the_low_target.prepare_to_resume (lwp);
1526 }
1527 CATCH (ex, RETURN_MASK_ERROR)
1528 {
1529 if (!check_ptrace_stopped_lwp_gone (lwp))
1530 throw_exception (ex);
1531 }
1532 END_CATCH
1533
1534 lwpid = lwpid_of (thread);
1535 if (ptrace (PTRACE_DETACH, lwpid, (PTRACE_TYPE_ARG3) 0,
1536 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1537 {
1538 int save_errno = errno;
1539
1540 /* We know the thread exists, so ESRCH must mean the lwp is
1541 zombie. This can happen if one of the already-detached
1542 threads exits the whole thread group. In that case we're
1543 still attached, and must reap the lwp. */
1544 if (save_errno == ESRCH)
1545 {
1546 int ret, status;
1547
1548 ret = my_waitpid (lwpid, &status, __WALL);
1549 if (ret == -1)
1550 {
1551 warning (_("Couldn't reap LWP %d while detaching: %s"),
1552 lwpid, strerror (errno));
1553 }
1554 else if (!WIFEXITED (status) && !WIFSIGNALED (status))
1555 {
1556 warning (_("Reaping LWP %d while detaching "
1557 "returned unexpected status 0x%x"),
1558 lwpid, status);
1559 }
1560 }
1561 else
1562 {
1563 error (_("Can't detach %s: %s"),
1564 target_pid_to_str (ptid_of (thread)),
1565 strerror (save_errno));
1566 }
1567 }
1568 else if (debug_threads)
1569 {
1570 debug_printf ("PTRACE_DETACH (%s, %s, 0) (OK)\n",
1571 target_pid_to_str (ptid_of (thread)),
1572 strsignal (sig));
1573 }
1574
1575 delete_lwp (lwp);
1576 }
1577
1578 /* Callback for find_inferior. Detaches from non-leader threads of a
1579 given process. */
1580
1581 static int
1582 linux_detach_lwp_callback (struct inferior_list_entry *entry, void *args)
1583 {
1584 struct thread_info *thread = (struct thread_info *) entry;
1585 struct lwp_info *lwp = get_thread_lwp (thread);
1586 int pid = *(int *) args;
1587 int lwpid = lwpid_of (thread);
1588
1589 /* Skip other processes. */
1590 if (ptid_get_pid (entry->id) != pid)
1591 return 0;
1592
1593 /* We don't actually detach from the thread group leader just yet.
1594 If the thread group exits, we must reap the zombie clone lwps
1595 before we're able to reap the leader. */
1596 if (ptid_get_pid (entry->id) == lwpid)
1597 return 0;
1598
1599 linux_detach_one_lwp (lwp);
1600 return 0;
1601 }
1602
1603 static int
1604 linux_detach (int pid)
1605 {
1606 struct process_info *process;
1607 struct lwp_info *main_lwp;
1608
1609 process = find_process_pid (pid);
1610 if (process == NULL)
1611 return -1;
1612
1613 /* As there's a step over already in progress, let it finish first,
1614 otherwise nesting a stabilize_threads operation on top gets real
1615 messy. */
1616 complete_ongoing_step_over ();
1617
1618 /* Stop all threads before detaching. First, ptrace requires that
1619 the thread is stopped to sucessfully detach. Second, thread_db
1620 may need to uninstall thread event breakpoints from memory, which
1621 only works with a stopped process anyway. */
1622 stop_all_lwps (0, NULL);
1623
1624 #ifdef USE_THREAD_DB
1625 thread_db_detach (process);
1626 #endif
1627
1628 /* Stabilize threads (move out of jump pads). */
1629 stabilize_threads ();
1630
1631 /* Detach from the clone lwps first. If the thread group exits just
1632 while we're detaching, we must reap the clone lwps before we're
1633 able to reap the leader. */
1634 find_inferior (&all_threads, linux_detach_lwp_callback, &pid);
1635
1636 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1637 linux_detach_one_lwp (main_lwp);
1638
1639 the_target->mourn (process);
1640
1641 /* Since we presently can only stop all lwps of all processes, we
1642 need to unstop lwps of other processes. */
1643 unstop_all_lwps (0, NULL);
1644 return 0;
1645 }
1646
1647 /* Remove all LWPs that belong to process PROC from the lwp list. */
1648
1649 static int
1650 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1651 {
1652 struct thread_info *thread = (struct thread_info *) entry;
1653 struct lwp_info *lwp = get_thread_lwp (thread);
1654 struct process_info *process = (struct process_info *) proc;
1655
1656 if (pid_of (thread) == pid_of (process))
1657 delete_lwp (lwp);
1658
1659 return 0;
1660 }
1661
1662 static void
1663 linux_mourn (struct process_info *process)
1664 {
1665 struct process_info_private *priv;
1666
1667 #ifdef USE_THREAD_DB
1668 thread_db_mourn (process);
1669 #endif
1670
1671 find_inferior (&all_threads, delete_lwp_callback, process);
1672
1673 /* Freeing all private data. */
1674 priv = process->priv;
1675 free (priv->arch_private);
1676 free (priv);
1677 process->priv = NULL;
1678
1679 remove_process (process);
1680 }
1681
1682 static void
1683 linux_join (int pid)
1684 {
1685 int status, ret;
1686
1687 do {
1688 ret = my_waitpid (pid, &status, 0);
1689 if (WIFEXITED (status) || WIFSIGNALED (status))
1690 break;
1691 } while (ret != -1 || errno != ECHILD);
1692 }
1693
1694 /* Return nonzero if the given thread is still alive. */
1695 static int
1696 linux_thread_alive (ptid_t ptid)
1697 {
1698 struct lwp_info *lwp = find_lwp_pid (ptid);
1699
1700 /* We assume we always know if a thread exits. If a whole process
1701 exited but we still haven't been able to report it to GDB, we'll
1702 hold on to the last lwp of the dead process. */
1703 if (lwp != NULL)
1704 return !lwp_is_marked_dead (lwp);
1705 else
1706 return 0;
1707 }
1708
1709 /* Return 1 if this lwp still has an interesting status pending. If
1710 not (e.g., it had stopped for a breakpoint that is gone), return
1711 false. */
1712
1713 static int
1714 thread_still_has_status_pending_p (struct thread_info *thread)
1715 {
1716 struct lwp_info *lp = get_thread_lwp (thread);
1717
1718 if (!lp->status_pending_p)
1719 return 0;
1720
1721 if (thread->last_resume_kind != resume_stop
1722 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1723 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1724 {
1725 struct thread_info *saved_thread;
1726 CORE_ADDR pc;
1727 int discard = 0;
1728
1729 gdb_assert (lp->last_status != 0);
1730
1731 pc = get_pc (lp);
1732
1733 saved_thread = current_thread;
1734 current_thread = thread;
1735
1736 if (pc != lp->stop_pc)
1737 {
1738 if (debug_threads)
1739 debug_printf ("PC of %ld changed\n",
1740 lwpid_of (thread));
1741 discard = 1;
1742 }
1743
1744 #if !USE_SIGTRAP_SIGINFO
1745 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1746 && !(*the_low_target.breakpoint_at) (pc))
1747 {
1748 if (debug_threads)
1749 debug_printf ("previous SW breakpoint of %ld gone\n",
1750 lwpid_of (thread));
1751 discard = 1;
1752 }
1753 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1754 && !hardware_breakpoint_inserted_here (pc))
1755 {
1756 if (debug_threads)
1757 debug_printf ("previous HW breakpoint of %ld gone\n",
1758 lwpid_of (thread));
1759 discard = 1;
1760 }
1761 #endif
1762
1763 current_thread = saved_thread;
1764
1765 if (discard)
1766 {
1767 if (debug_threads)
1768 debug_printf ("discarding pending breakpoint status\n");
1769 lp->status_pending_p = 0;
1770 return 0;
1771 }
1772 }
1773
1774 return 1;
1775 }
1776
1777 /* Returns true if LWP is resumed from the client's perspective. */
1778
1779 static int
1780 lwp_resumed (struct lwp_info *lwp)
1781 {
1782 struct thread_info *thread = get_lwp_thread (lwp);
1783
1784 if (thread->last_resume_kind != resume_stop)
1785 return 1;
1786
1787 /* Did gdb send us a `vCont;t', but we haven't reported the
1788 corresponding stop to gdb yet? If so, the thread is still
1789 resumed/running from gdb's perspective. */
1790 if (thread->last_resume_kind == resume_stop
1791 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1792 return 1;
1793
1794 return 0;
1795 }
1796
1797 /* Return 1 if this lwp has an interesting status pending. */
1798 static int
1799 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1800 {
1801 struct thread_info *thread = (struct thread_info *) entry;
1802 struct lwp_info *lp = get_thread_lwp (thread);
1803 ptid_t ptid = * (ptid_t *) arg;
1804
1805 /* Check if we're only interested in events from a specific process
1806 or a specific LWP. */
1807 if (!ptid_match (ptid_of (thread), ptid))
1808 return 0;
1809
1810 if (!lwp_resumed (lp))
1811 return 0;
1812
1813 if (lp->status_pending_p
1814 && !thread_still_has_status_pending_p (thread))
1815 {
1816 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1817 return 0;
1818 }
1819
1820 return lp->status_pending_p;
1821 }
1822
1823 static int
1824 same_lwp (struct inferior_list_entry *entry, void *data)
1825 {
1826 ptid_t ptid = *(ptid_t *) data;
1827 int lwp;
1828
1829 if (ptid_get_lwp (ptid) != 0)
1830 lwp = ptid_get_lwp (ptid);
1831 else
1832 lwp = ptid_get_pid (ptid);
1833
1834 if (ptid_get_lwp (entry->id) == lwp)
1835 return 1;
1836
1837 return 0;
1838 }
1839
1840 struct lwp_info *
1841 find_lwp_pid (ptid_t ptid)
1842 {
1843 struct inferior_list_entry *thread
1844 = find_inferior (&all_threads, same_lwp, &ptid);
1845
1846 if (thread == NULL)
1847 return NULL;
1848
1849 return get_thread_lwp ((struct thread_info *) thread);
1850 }
1851
1852 /* Return the number of known LWPs in the tgid given by PID. */
1853
1854 static int
1855 num_lwps (int pid)
1856 {
1857 struct inferior_list_entry *inf, *tmp;
1858 int count = 0;
1859
1860 ALL_INFERIORS (&all_threads, inf, tmp)
1861 {
1862 if (ptid_get_pid (inf->id) == pid)
1863 count++;
1864 }
1865
1866 return count;
1867 }
1868
1869 /* The arguments passed to iterate_over_lwps. */
1870
1871 struct iterate_over_lwps_args
1872 {
1873 /* The FILTER argument passed to iterate_over_lwps. */
1874 ptid_t filter;
1875
1876 /* The CALLBACK argument passed to iterate_over_lwps. */
1877 iterate_over_lwps_ftype *callback;
1878
1879 /* The DATA argument passed to iterate_over_lwps. */
1880 void *data;
1881 };
1882
1883 /* Callback for find_inferior used by iterate_over_lwps to filter
1884 calls to the callback supplied to that function. Returning a
1885 nonzero value causes find_inferiors to stop iterating and return
1886 the current inferior_list_entry. Returning zero indicates that
1887 find_inferiors should continue iterating. */
1888
1889 static int
1890 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1891 {
1892 struct iterate_over_lwps_args *args
1893 = (struct iterate_over_lwps_args *) args_p;
1894
1895 if (ptid_match (entry->id, args->filter))
1896 {
1897 struct thread_info *thr = (struct thread_info *) entry;
1898 struct lwp_info *lwp = get_thread_lwp (thr);
1899
1900 return (*args->callback) (lwp, args->data);
1901 }
1902
1903 return 0;
1904 }
1905
1906 /* See nat/linux-nat.h. */
1907
1908 struct lwp_info *
1909 iterate_over_lwps (ptid_t filter,
1910 iterate_over_lwps_ftype callback,
1911 void *data)
1912 {
1913 struct iterate_over_lwps_args args = {filter, callback, data};
1914 struct inferior_list_entry *entry;
1915
1916 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1917 if (entry == NULL)
1918 return NULL;
1919
1920 return get_thread_lwp ((struct thread_info *) entry);
1921 }
1922
1923 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1924 their exits until all other threads in the group have exited. */
1925
1926 static void
1927 check_zombie_leaders (void)
1928 {
1929 struct process_info *proc, *tmp;
1930
1931 ALL_PROCESSES (proc, tmp)
1932 {
1933 pid_t leader_pid = pid_of (proc);
1934 struct lwp_info *leader_lp;
1935
1936 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1937
1938 if (debug_threads)
1939 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1940 "num_lwps=%d, zombie=%d\n",
1941 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1942 linux_proc_pid_is_zombie (leader_pid));
1943
1944 if (leader_lp != NULL && !leader_lp->stopped
1945 /* Check if there are other threads in the group, as we may
1946 have raced with the inferior simply exiting. */
1947 && !last_thread_of_process_p (leader_pid)
1948 && linux_proc_pid_is_zombie (leader_pid))
1949 {
1950 /* A leader zombie can mean one of two things:
1951
1952 - It exited, and there's an exit status pending
1953 available, or only the leader exited (not the whole
1954 program). In the latter case, we can't waitpid the
1955 leader's exit status until all other threads are gone.
1956
1957 - There are 3 or more threads in the group, and a thread
1958 other than the leader exec'd. On an exec, the Linux
1959 kernel destroys all other threads (except the execing
1960 one) in the thread group, and resets the execing thread's
1961 tid to the tgid. No exit notification is sent for the
1962 execing thread -- from the ptracer's perspective, it
1963 appears as though the execing thread just vanishes.
1964 Until we reap all other threads except the leader and the
1965 execing thread, the leader will be zombie, and the
1966 execing thread will be in `D (disc sleep)'. As soon as
1967 all other threads are reaped, the execing thread changes
1968 it's tid to the tgid, and the previous (zombie) leader
1969 vanishes, giving place to the "new" leader. We could try
1970 distinguishing the exit and exec cases, by waiting once
1971 more, and seeing if something comes out, but it doesn't
1972 sound useful. The previous leader _does_ go away, and
1973 we'll re-add the new one once we see the exec event
1974 (which is just the same as what would happen if the
1975 previous leader did exit voluntarily before some other
1976 thread execs). */
1977
1978 if (debug_threads)
1979 fprintf (stderr,
1980 "CZL: Thread group leader %d zombie "
1981 "(it exited, or another thread execd).\n",
1982 leader_pid);
1983
1984 delete_lwp (leader_lp);
1985 }
1986 }
1987 }
1988
1989 /* Callback for `find_inferior'. Returns the first LWP that is not
1990 stopped. ARG is a PTID filter. */
1991
1992 static int
1993 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1994 {
1995 struct thread_info *thr = (struct thread_info *) entry;
1996 struct lwp_info *lwp;
1997 ptid_t filter = *(ptid_t *) arg;
1998
1999 if (!ptid_match (ptid_of (thr), filter))
2000 return 0;
2001
2002 lwp = get_thread_lwp (thr);
2003 if (!lwp->stopped)
2004 return 1;
2005
2006 return 0;
2007 }
2008
2009 /* Increment LWP's suspend count. */
2010
2011 static void
2012 lwp_suspended_inc (struct lwp_info *lwp)
2013 {
2014 lwp->suspended++;
2015
2016 if (debug_threads && lwp->suspended > 4)
2017 {
2018 struct thread_info *thread = get_lwp_thread (lwp);
2019
2020 debug_printf ("LWP %ld has a suspiciously high suspend count,"
2021 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
2022 }
2023 }
2024
2025 /* Decrement LWP's suspend count. */
2026
2027 static void
2028 lwp_suspended_decr (struct lwp_info *lwp)
2029 {
2030 lwp->suspended--;
2031
2032 if (lwp->suspended < 0)
2033 {
2034 struct thread_info *thread = get_lwp_thread (lwp);
2035
2036 internal_error (__FILE__, __LINE__,
2037 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
2038 lwp->suspended);
2039 }
2040 }
2041
2042 /* This function should only be called if the LWP got a SIGTRAP.
2043
2044 Handle any tracepoint steps or hits. Return true if a tracepoint
2045 event was handled, 0 otherwise. */
2046
2047 static int
2048 handle_tracepoints (struct lwp_info *lwp)
2049 {
2050 struct thread_info *tinfo = get_lwp_thread (lwp);
2051 int tpoint_related_event = 0;
2052
2053 gdb_assert (lwp->suspended == 0);
2054
2055 /* If this tracepoint hit causes a tracing stop, we'll immediately
2056 uninsert tracepoints. To do this, we temporarily pause all
2057 threads, unpatch away, and then unpause threads. We need to make
2058 sure the unpausing doesn't resume LWP too. */
2059 lwp_suspended_inc (lwp);
2060
2061 /* And we need to be sure that any all-threads-stopping doesn't try
2062 to move threads out of the jump pads, as it could deadlock the
2063 inferior (LWP could be in the jump pad, maybe even holding the
2064 lock.) */
2065
2066 /* Do any necessary step collect actions. */
2067 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
2068
2069 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2070
2071 /* See if we just hit a tracepoint and do its main collect
2072 actions. */
2073 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2074
2075 lwp_suspended_decr (lwp);
2076
2077 gdb_assert (lwp->suspended == 0);
2078 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
2079
2080 if (tpoint_related_event)
2081 {
2082 if (debug_threads)
2083 debug_printf ("got a tracepoint event\n");
2084 return 1;
2085 }
2086
2087 return 0;
2088 }
2089
2090 /* Convenience wrapper. Returns true if LWP is presently collecting a
2091 fast tracepoint. */
2092
2093 static int
2094 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2095 struct fast_tpoint_collect_status *status)
2096 {
2097 CORE_ADDR thread_area;
2098 struct thread_info *thread = get_lwp_thread (lwp);
2099
2100 if (the_low_target.get_thread_area == NULL)
2101 return 0;
2102
2103 /* Get the thread area address. This is used to recognize which
2104 thread is which when tracing with the in-process agent library.
2105 We don't read anything from the address, and treat it as opaque;
2106 it's the address itself that we assume is unique per-thread. */
2107 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2108 return 0;
2109
2110 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2111 }
2112
2113 /* The reason we resume in the caller, is because we want to be able
2114 to pass lwp->status_pending as WSTAT, and we need to clear
2115 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2116 refuses to resume. */
2117
2118 static int
2119 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2120 {
2121 struct thread_info *saved_thread;
2122
2123 saved_thread = current_thread;
2124 current_thread = get_lwp_thread (lwp);
2125
2126 if ((wstat == NULL
2127 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2128 && supports_fast_tracepoints ()
2129 && agent_loaded_p ())
2130 {
2131 struct fast_tpoint_collect_status status;
2132 int r;
2133
2134 if (debug_threads)
2135 debug_printf ("Checking whether LWP %ld needs to move out of the "
2136 "jump pad.\n",
2137 lwpid_of (current_thread));
2138
2139 r = linux_fast_tracepoint_collecting (lwp, &status);
2140
2141 if (wstat == NULL
2142 || (WSTOPSIG (*wstat) != SIGILL
2143 && WSTOPSIG (*wstat) != SIGFPE
2144 && WSTOPSIG (*wstat) != SIGSEGV
2145 && WSTOPSIG (*wstat) != SIGBUS))
2146 {
2147 lwp->collecting_fast_tracepoint = r;
2148
2149 if (r != 0)
2150 {
2151 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
2152 {
2153 /* Haven't executed the original instruction yet.
2154 Set breakpoint there, and wait till it's hit,
2155 then single-step until exiting the jump pad. */
2156 lwp->exit_jump_pad_bkpt
2157 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2158 }
2159
2160 if (debug_threads)
2161 debug_printf ("Checking whether LWP %ld needs to move out of "
2162 "the jump pad...it does\n",
2163 lwpid_of (current_thread));
2164 current_thread = saved_thread;
2165
2166 return 1;
2167 }
2168 }
2169 else
2170 {
2171 /* If we get a synchronous signal while collecting, *and*
2172 while executing the (relocated) original instruction,
2173 reset the PC to point at the tpoint address, before
2174 reporting to GDB. Otherwise, it's an IPA lib bug: just
2175 report the signal to GDB, and pray for the best. */
2176
2177 lwp->collecting_fast_tracepoint = 0;
2178
2179 if (r != 0
2180 && (status.adjusted_insn_addr <= lwp->stop_pc
2181 && lwp->stop_pc < status.adjusted_insn_addr_end))
2182 {
2183 siginfo_t info;
2184 struct regcache *regcache;
2185
2186 /* The si_addr on a few signals references the address
2187 of the faulting instruction. Adjust that as
2188 well. */
2189 if ((WSTOPSIG (*wstat) == SIGILL
2190 || WSTOPSIG (*wstat) == SIGFPE
2191 || WSTOPSIG (*wstat) == SIGBUS
2192 || WSTOPSIG (*wstat) == SIGSEGV)
2193 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2194 (PTRACE_TYPE_ARG3) 0, &info) == 0
2195 /* Final check just to make sure we don't clobber
2196 the siginfo of non-kernel-sent signals. */
2197 && (uintptr_t) info.si_addr == lwp->stop_pc)
2198 {
2199 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2200 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2201 (PTRACE_TYPE_ARG3) 0, &info);
2202 }
2203
2204 regcache = get_thread_regcache (current_thread, 1);
2205 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2206 lwp->stop_pc = status.tpoint_addr;
2207
2208 /* Cancel any fast tracepoint lock this thread was
2209 holding. */
2210 force_unlock_trace_buffer ();
2211 }
2212
2213 if (lwp->exit_jump_pad_bkpt != NULL)
2214 {
2215 if (debug_threads)
2216 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2217 "stopping all threads momentarily.\n");
2218
2219 stop_all_lwps (1, lwp);
2220
2221 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2222 lwp->exit_jump_pad_bkpt = NULL;
2223
2224 unstop_all_lwps (1, lwp);
2225
2226 gdb_assert (lwp->suspended >= 0);
2227 }
2228 }
2229 }
2230
2231 if (debug_threads)
2232 debug_printf ("Checking whether LWP %ld needs to move out of the "
2233 "jump pad...no\n",
2234 lwpid_of (current_thread));
2235
2236 current_thread = saved_thread;
2237 return 0;
2238 }
2239
2240 /* Enqueue one signal in the "signals to report later when out of the
2241 jump pad" list. */
2242
2243 static void
2244 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2245 {
2246 struct pending_signals *p_sig;
2247 struct thread_info *thread = get_lwp_thread (lwp);
2248
2249 if (debug_threads)
2250 debug_printf ("Deferring signal %d for LWP %ld.\n",
2251 WSTOPSIG (*wstat), lwpid_of (thread));
2252
2253 if (debug_threads)
2254 {
2255 struct pending_signals *sig;
2256
2257 for (sig = lwp->pending_signals_to_report;
2258 sig != NULL;
2259 sig = sig->prev)
2260 debug_printf (" Already queued %d\n",
2261 sig->signal);
2262
2263 debug_printf (" (no more currently queued signals)\n");
2264 }
2265
2266 /* Don't enqueue non-RT signals if they are already in the deferred
2267 queue. (SIGSTOP being the easiest signal to see ending up here
2268 twice) */
2269 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2270 {
2271 struct pending_signals *sig;
2272
2273 for (sig = lwp->pending_signals_to_report;
2274 sig != NULL;
2275 sig = sig->prev)
2276 {
2277 if (sig->signal == WSTOPSIG (*wstat))
2278 {
2279 if (debug_threads)
2280 debug_printf ("Not requeuing already queued non-RT signal %d"
2281 " for LWP %ld\n",
2282 sig->signal,
2283 lwpid_of (thread));
2284 return;
2285 }
2286 }
2287 }
2288
2289 p_sig = XCNEW (struct pending_signals);
2290 p_sig->prev = lwp->pending_signals_to_report;
2291 p_sig->signal = WSTOPSIG (*wstat);
2292
2293 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2294 &p_sig->info);
2295
2296 lwp->pending_signals_to_report = p_sig;
2297 }
2298
2299 /* Dequeue one signal from the "signals to report later when out of
2300 the jump pad" list. */
2301
2302 static int
2303 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2304 {
2305 struct thread_info *thread = get_lwp_thread (lwp);
2306
2307 if (lwp->pending_signals_to_report != NULL)
2308 {
2309 struct pending_signals **p_sig;
2310
2311 p_sig = &lwp->pending_signals_to_report;
2312 while ((*p_sig)->prev != NULL)
2313 p_sig = &(*p_sig)->prev;
2314
2315 *wstat = W_STOPCODE ((*p_sig)->signal);
2316 if ((*p_sig)->info.si_signo != 0)
2317 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2318 &(*p_sig)->info);
2319 free (*p_sig);
2320 *p_sig = NULL;
2321
2322 if (debug_threads)
2323 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2324 WSTOPSIG (*wstat), lwpid_of (thread));
2325
2326 if (debug_threads)
2327 {
2328 struct pending_signals *sig;
2329
2330 for (sig = lwp->pending_signals_to_report;
2331 sig != NULL;
2332 sig = sig->prev)
2333 debug_printf (" Still queued %d\n",
2334 sig->signal);
2335
2336 debug_printf (" (no more queued signals)\n");
2337 }
2338
2339 return 1;
2340 }
2341
2342 return 0;
2343 }
2344
2345 /* Fetch the possibly triggered data watchpoint info and store it in
2346 CHILD.
2347
2348 On some archs, like x86, that use debug registers to set
2349 watchpoints, it's possible that the way to know which watched
2350 address trapped, is to check the register that is used to select
2351 which address to watch. Problem is, between setting the watchpoint
2352 and reading back which data address trapped, the user may change
2353 the set of watchpoints, and, as a consequence, GDB changes the
2354 debug registers in the inferior. To avoid reading back a stale
2355 stopped-data-address when that happens, we cache in LP the fact
2356 that a watchpoint trapped, and the corresponding data address, as
2357 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2358 registers meanwhile, we have the cached data we can rely on. */
2359
2360 static int
2361 check_stopped_by_watchpoint (struct lwp_info *child)
2362 {
2363 if (the_low_target.stopped_by_watchpoint != NULL)
2364 {
2365 struct thread_info *saved_thread;
2366
2367 saved_thread = current_thread;
2368 current_thread = get_lwp_thread (child);
2369
2370 if (the_low_target.stopped_by_watchpoint ())
2371 {
2372 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2373
2374 if (the_low_target.stopped_data_address != NULL)
2375 child->stopped_data_address
2376 = the_low_target.stopped_data_address ();
2377 else
2378 child->stopped_data_address = 0;
2379 }
2380
2381 current_thread = saved_thread;
2382 }
2383
2384 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2385 }
2386
2387 /* Return the ptrace options that we want to try to enable. */
2388
2389 static int
2390 linux_low_ptrace_options (int attached)
2391 {
2392 int options = 0;
2393
2394 if (!attached)
2395 options |= PTRACE_O_EXITKILL;
2396
2397 if (report_fork_events)
2398 options |= PTRACE_O_TRACEFORK;
2399
2400 if (report_vfork_events)
2401 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2402
2403 if (report_exec_events)
2404 options |= PTRACE_O_TRACEEXEC;
2405
2406 options |= PTRACE_O_TRACESYSGOOD;
2407
2408 return options;
2409 }
2410
2411 /* Do low-level handling of the event, and check if we should go on
2412 and pass it to caller code. Return the affected lwp if we are, or
2413 NULL otherwise. */
2414
2415 static struct lwp_info *
2416 linux_low_filter_event (int lwpid, int wstat)
2417 {
2418 struct lwp_info *child;
2419 struct thread_info *thread;
2420 int have_stop_pc = 0;
2421
2422 child = find_lwp_pid (pid_to_ptid (lwpid));
2423
2424 /* Check for stop events reported by a process we didn't already
2425 know about - anything not already in our LWP list.
2426
2427 If we're expecting to receive stopped processes after
2428 fork, vfork, and clone events, then we'll just add the
2429 new one to our list and go back to waiting for the event
2430 to be reported - the stopped process might be returned
2431 from waitpid before or after the event is.
2432
2433 But note the case of a non-leader thread exec'ing after the
2434 leader having exited, and gone from our lists (because
2435 check_zombie_leaders deleted it). The non-leader thread
2436 changes its tid to the tgid. */
2437
2438 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2439 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2440 {
2441 ptid_t child_ptid;
2442
2443 /* A multi-thread exec after we had seen the leader exiting. */
2444 if (debug_threads)
2445 {
2446 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2447 "after exec.\n", lwpid);
2448 }
2449
2450 child_ptid = ptid_build (lwpid, lwpid, 0);
2451 child = add_lwp (child_ptid);
2452 child->stopped = 1;
2453 current_thread = child->thread;
2454 }
2455
2456 /* If we didn't find a process, one of two things presumably happened:
2457 - A process we started and then detached from has exited. Ignore it.
2458 - A process we are controlling has forked and the new child's stop
2459 was reported to us by the kernel. Save its PID. */
2460 if (child == NULL && WIFSTOPPED (wstat))
2461 {
2462 add_to_pid_list (&stopped_pids, lwpid, wstat);
2463 return NULL;
2464 }
2465 else if (child == NULL)
2466 return NULL;
2467
2468 thread = get_lwp_thread (child);
2469
2470 child->stopped = 1;
2471
2472 child->last_status = wstat;
2473
2474 /* Check if the thread has exited. */
2475 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2476 {
2477 if (debug_threads)
2478 debug_printf ("LLFE: %d exited.\n", lwpid);
2479
2480 if (finish_step_over (child))
2481 {
2482 /* Unsuspend all other LWPs, and set them back running again. */
2483 unsuspend_all_lwps (child);
2484 }
2485
2486 /* If there is at least one more LWP, then the exit signal was
2487 not the end of the debugged application and should be
2488 ignored, unless GDB wants to hear about thread exits. */
2489 if (report_thread_events
2490 || last_thread_of_process_p (pid_of (thread)))
2491 {
2492 /* Since events are serialized to GDB core, and we can't
2493 report this one right now. Leave the status pending for
2494 the next time we're able to report it. */
2495 mark_lwp_dead (child, wstat);
2496 return child;
2497 }
2498 else
2499 {
2500 delete_lwp (child);
2501 return NULL;
2502 }
2503 }
2504
2505 gdb_assert (WIFSTOPPED (wstat));
2506
2507 if (WIFSTOPPED (wstat))
2508 {
2509 struct process_info *proc;
2510
2511 /* Architecture-specific setup after inferior is running. */
2512 proc = find_process_pid (pid_of (thread));
2513 if (proc->tdesc == NULL)
2514 {
2515 if (proc->attached)
2516 {
2517 /* This needs to happen after we have attached to the
2518 inferior and it is stopped for the first time, but
2519 before we access any inferior registers. */
2520 linux_arch_setup_thread (thread);
2521 }
2522 else
2523 {
2524 /* The process is started, but GDBserver will do
2525 architecture-specific setup after the program stops at
2526 the first instruction. */
2527 child->status_pending_p = 1;
2528 child->status_pending = wstat;
2529 return child;
2530 }
2531 }
2532 }
2533
2534 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2535 {
2536 struct process_info *proc = find_process_pid (pid_of (thread));
2537 int options = linux_low_ptrace_options (proc->attached);
2538
2539 linux_enable_event_reporting (lwpid, options);
2540 child->must_set_ptrace_flags = 0;
2541 }
2542
2543 /* Always update syscall_state, even if it will be filtered later. */
2544 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2545 {
2546 child->syscall_state
2547 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2548 ? TARGET_WAITKIND_SYSCALL_RETURN
2549 : TARGET_WAITKIND_SYSCALL_ENTRY);
2550 }
2551 else
2552 {
2553 /* Almost all other ptrace-stops are known to be outside of system
2554 calls, with further exceptions in handle_extended_wait. */
2555 child->syscall_state = TARGET_WAITKIND_IGNORE;
2556 }
2557
2558 /* Be careful to not overwrite stop_pc until save_stop_reason is
2559 called. */
2560 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2561 && linux_is_extended_waitstatus (wstat))
2562 {
2563 child->stop_pc = get_pc (child);
2564 if (handle_extended_wait (&child, wstat))
2565 {
2566 /* The event has been handled, so just return without
2567 reporting it. */
2568 return NULL;
2569 }
2570 }
2571
2572 if (linux_wstatus_maybe_breakpoint (wstat))
2573 {
2574 if (save_stop_reason (child))
2575 have_stop_pc = 1;
2576 }
2577
2578 if (!have_stop_pc)
2579 child->stop_pc = get_pc (child);
2580
2581 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2582 && child->stop_expected)
2583 {
2584 if (debug_threads)
2585 debug_printf ("Expected stop.\n");
2586 child->stop_expected = 0;
2587
2588 if (thread->last_resume_kind == resume_stop)
2589 {
2590 /* We want to report the stop to the core. Treat the
2591 SIGSTOP as a normal event. */
2592 if (debug_threads)
2593 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2594 target_pid_to_str (ptid_of (thread)));
2595 }
2596 else if (stopping_threads != NOT_STOPPING_THREADS)
2597 {
2598 /* Stopping threads. We don't want this SIGSTOP to end up
2599 pending. */
2600 if (debug_threads)
2601 debug_printf ("LLW: SIGSTOP caught for %s "
2602 "while stopping threads.\n",
2603 target_pid_to_str (ptid_of (thread)));
2604 return NULL;
2605 }
2606 else
2607 {
2608 /* This is a delayed SIGSTOP. Filter out the event. */
2609 if (debug_threads)
2610 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2611 child->stepping ? "step" : "continue",
2612 target_pid_to_str (ptid_of (thread)));
2613
2614 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2615 return NULL;
2616 }
2617 }
2618
2619 child->status_pending_p = 1;
2620 child->status_pending = wstat;
2621 return child;
2622 }
2623
2624 /* Return true if THREAD is doing hardware single step. */
2625
2626 static int
2627 maybe_hw_step (struct thread_info *thread)
2628 {
2629 if (can_hardware_single_step ())
2630 return 1;
2631 else
2632 {
2633 /* GDBserver must insert single-step breakpoint for software
2634 single step. */
2635 gdb_assert (has_single_step_breakpoints (thread));
2636 return 0;
2637 }
2638 }
2639
2640 /* Resume LWPs that are currently stopped without any pending status
2641 to report, but are resumed from the core's perspective. */
2642
2643 static void
2644 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2645 {
2646 struct thread_info *thread = (struct thread_info *) entry;
2647 struct lwp_info *lp = get_thread_lwp (thread);
2648
2649 if (lp->stopped
2650 && !lp->suspended
2651 && !lp->status_pending_p
2652 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2653 {
2654 int step = 0;
2655
2656 if (thread->last_resume_kind == resume_step)
2657 step = maybe_hw_step (thread);
2658
2659 if (debug_threads)
2660 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2661 target_pid_to_str (ptid_of (thread)),
2662 paddress (lp->stop_pc),
2663 step);
2664
2665 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2666 }
2667 }
2668
2669 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2670 match FILTER_PTID (leaving others pending). The PTIDs can be:
2671 minus_one_ptid, to specify any child; a pid PTID, specifying all
2672 lwps of a thread group; or a PTID representing a single lwp. Store
2673 the stop status through the status pointer WSTAT. OPTIONS is
2674 passed to the waitpid call. Return 0 if no event was found and
2675 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2676 was found. Return the PID of the stopped child otherwise. */
2677
2678 static int
2679 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2680 int *wstatp, int options)
2681 {
2682 struct thread_info *event_thread;
2683 struct lwp_info *event_child, *requested_child;
2684 sigset_t block_mask, prev_mask;
2685
2686 retry:
2687 /* N.B. event_thread points to the thread_info struct that contains
2688 event_child. Keep them in sync. */
2689 event_thread = NULL;
2690 event_child = NULL;
2691 requested_child = NULL;
2692
2693 /* Check for a lwp with a pending status. */
2694
2695 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2696 {
2697 event_thread = (struct thread_info *)
2698 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2699 if (event_thread != NULL)
2700 event_child = get_thread_lwp (event_thread);
2701 if (debug_threads && event_thread)
2702 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2703 }
2704 else if (!ptid_equal (filter_ptid, null_ptid))
2705 {
2706 requested_child = find_lwp_pid (filter_ptid);
2707
2708 if (stopping_threads == NOT_STOPPING_THREADS
2709 && requested_child->status_pending_p
2710 && requested_child->collecting_fast_tracepoint)
2711 {
2712 enqueue_one_deferred_signal (requested_child,
2713 &requested_child->status_pending);
2714 requested_child->status_pending_p = 0;
2715 requested_child->status_pending = 0;
2716 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2717 }
2718
2719 if (requested_child->suspended
2720 && requested_child->status_pending_p)
2721 {
2722 internal_error (__FILE__, __LINE__,
2723 "requesting an event out of a"
2724 " suspended child?");
2725 }
2726
2727 if (requested_child->status_pending_p)
2728 {
2729 event_child = requested_child;
2730 event_thread = get_lwp_thread (event_child);
2731 }
2732 }
2733
2734 if (event_child != NULL)
2735 {
2736 if (debug_threads)
2737 debug_printf ("Got an event from pending child %ld (%04x)\n",
2738 lwpid_of (event_thread), event_child->status_pending);
2739 *wstatp = event_child->status_pending;
2740 event_child->status_pending_p = 0;
2741 event_child->status_pending = 0;
2742 current_thread = event_thread;
2743 return lwpid_of (event_thread);
2744 }
2745
2746 /* But if we don't find a pending event, we'll have to wait.
2747
2748 We only enter this loop if no process has a pending wait status.
2749 Thus any action taken in response to a wait status inside this
2750 loop is responding as soon as we detect the status, not after any
2751 pending events. */
2752
2753 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2754 all signals while here. */
2755 sigfillset (&block_mask);
2756 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2757
2758 /* Always pull all events out of the kernel. We'll randomly select
2759 an event LWP out of all that have events, to prevent
2760 starvation. */
2761 while (event_child == NULL)
2762 {
2763 pid_t ret = 0;
2764
2765 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2766 quirks:
2767
2768 - If the thread group leader exits while other threads in the
2769 thread group still exist, waitpid(TGID, ...) hangs. That
2770 waitpid won't return an exit status until the other threads
2771 in the group are reaped.
2772
2773 - When a non-leader thread execs, that thread just vanishes
2774 without reporting an exit (so we'd hang if we waited for it
2775 explicitly in that case). The exec event is reported to
2776 the TGID pid. */
2777 errno = 0;
2778 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2779
2780 if (debug_threads)
2781 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2782 ret, errno ? strerror (errno) : "ERRNO-OK");
2783
2784 if (ret > 0)
2785 {
2786 if (debug_threads)
2787 {
2788 debug_printf ("LLW: waitpid %ld received %s\n",
2789 (long) ret, status_to_str (*wstatp));
2790 }
2791
2792 /* Filter all events. IOW, leave all events pending. We'll
2793 randomly select an event LWP out of all that have events
2794 below. */
2795 linux_low_filter_event (ret, *wstatp);
2796 /* Retry until nothing comes out of waitpid. A single
2797 SIGCHLD can indicate more than one child stopped. */
2798 continue;
2799 }
2800
2801 /* Now that we've pulled all events out of the kernel, resume
2802 LWPs that don't have an interesting event to report. */
2803 if (stopping_threads == NOT_STOPPING_THREADS)
2804 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2805
2806 /* ... and find an LWP with a status to report to the core, if
2807 any. */
2808 event_thread = (struct thread_info *)
2809 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2810 if (event_thread != NULL)
2811 {
2812 event_child = get_thread_lwp (event_thread);
2813 *wstatp = event_child->status_pending;
2814 event_child->status_pending_p = 0;
2815 event_child->status_pending = 0;
2816 break;
2817 }
2818
2819 /* Check for zombie thread group leaders. Those can't be reaped
2820 until all other threads in the thread group are. */
2821 check_zombie_leaders ();
2822
2823 /* If there are no resumed children left in the set of LWPs we
2824 want to wait for, bail. We can't just block in
2825 waitpid/sigsuspend, because lwps might have been left stopped
2826 in trace-stop state, and we'd be stuck forever waiting for
2827 their status to change (which would only happen if we resumed
2828 them). Even if WNOHANG is set, this return code is preferred
2829 over 0 (below), as it is more detailed. */
2830 if ((find_inferior (&all_threads,
2831 not_stopped_callback,
2832 &wait_ptid) == NULL))
2833 {
2834 if (debug_threads)
2835 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2836 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2837 return -1;
2838 }
2839
2840 /* No interesting event to report to the caller. */
2841 if ((options & WNOHANG))
2842 {
2843 if (debug_threads)
2844 debug_printf ("WNOHANG set, no event found\n");
2845
2846 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2847 return 0;
2848 }
2849
2850 /* Block until we get an event reported with SIGCHLD. */
2851 if (debug_threads)
2852 debug_printf ("sigsuspend'ing\n");
2853
2854 sigsuspend (&prev_mask);
2855 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2856 goto retry;
2857 }
2858
2859 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2860
2861 current_thread = event_thread;
2862
2863 return lwpid_of (event_thread);
2864 }
2865
2866 /* Wait for an event from child(ren) PTID. PTIDs can be:
2867 minus_one_ptid, to specify any child; a pid PTID, specifying all
2868 lwps of a thread group; or a PTID representing a single lwp. Store
2869 the stop status through the status pointer WSTAT. OPTIONS is
2870 passed to the waitpid call. Return 0 if no event was found and
2871 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2872 was found. Return the PID of the stopped child otherwise. */
2873
2874 static int
2875 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2876 {
2877 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2878 }
2879
2880 /* Count the LWP's that have had events. */
2881
2882 static int
2883 count_events_callback (struct inferior_list_entry *entry, void *data)
2884 {
2885 struct thread_info *thread = (struct thread_info *) entry;
2886 struct lwp_info *lp = get_thread_lwp (thread);
2887 int *count = (int *) data;
2888
2889 gdb_assert (count != NULL);
2890
2891 /* Count only resumed LWPs that have an event pending. */
2892 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2893 && lp->status_pending_p)
2894 (*count)++;
2895
2896 return 0;
2897 }
2898
2899 /* Select the LWP (if any) that is currently being single-stepped. */
2900
2901 static int
2902 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2903 {
2904 struct thread_info *thread = (struct thread_info *) entry;
2905 struct lwp_info *lp = get_thread_lwp (thread);
2906
2907 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2908 && thread->last_resume_kind == resume_step
2909 && lp->status_pending_p)
2910 return 1;
2911 else
2912 return 0;
2913 }
2914
2915 /* Select the Nth LWP that has had an event. */
2916
2917 static int
2918 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2919 {
2920 struct thread_info *thread = (struct thread_info *) entry;
2921 struct lwp_info *lp = get_thread_lwp (thread);
2922 int *selector = (int *) data;
2923
2924 gdb_assert (selector != NULL);
2925
2926 /* Select only resumed LWPs that have an event pending. */
2927 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2928 && lp->status_pending_p)
2929 if ((*selector)-- == 0)
2930 return 1;
2931
2932 return 0;
2933 }
2934
2935 /* Select one LWP out of those that have events pending. */
2936
2937 static void
2938 select_event_lwp (struct lwp_info **orig_lp)
2939 {
2940 int num_events = 0;
2941 int random_selector;
2942 struct thread_info *event_thread = NULL;
2943
2944 /* In all-stop, give preference to the LWP that is being
2945 single-stepped. There will be at most one, and it's the LWP that
2946 the core is most interested in. If we didn't do this, then we'd
2947 have to handle pending step SIGTRAPs somehow in case the core
2948 later continues the previously-stepped thread, otherwise we'd
2949 report the pending SIGTRAP, and the core, not having stepped the
2950 thread, wouldn't understand what the trap was for, and therefore
2951 would report it to the user as a random signal. */
2952 if (!non_stop)
2953 {
2954 event_thread
2955 = (struct thread_info *) find_inferior (&all_threads,
2956 select_singlestep_lwp_callback,
2957 NULL);
2958 if (event_thread != NULL)
2959 {
2960 if (debug_threads)
2961 debug_printf ("SEL: Select single-step %s\n",
2962 target_pid_to_str (ptid_of (event_thread)));
2963 }
2964 }
2965 if (event_thread == NULL)
2966 {
2967 /* No single-stepping LWP. Select one at random, out of those
2968 which have had events. */
2969
2970 /* First see how many events we have. */
2971 find_inferior (&all_threads, count_events_callback, &num_events);
2972 gdb_assert (num_events > 0);
2973
2974 /* Now randomly pick a LWP out of those that have had
2975 events. */
2976 random_selector = (int)
2977 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2978
2979 if (debug_threads && num_events > 1)
2980 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2981 num_events, random_selector);
2982
2983 event_thread
2984 = (struct thread_info *) find_inferior (&all_threads,
2985 select_event_lwp_callback,
2986 &random_selector);
2987 }
2988
2989 if (event_thread != NULL)
2990 {
2991 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2992
2993 /* Switch the event LWP. */
2994 *orig_lp = event_lp;
2995 }
2996 }
2997
2998 /* Decrement the suspend count of an LWP. */
2999
3000 static int
3001 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
3002 {
3003 struct thread_info *thread = (struct thread_info *) entry;
3004 struct lwp_info *lwp = get_thread_lwp (thread);
3005
3006 /* Ignore EXCEPT. */
3007 if (lwp == except)
3008 return 0;
3009
3010 lwp_suspended_decr (lwp);
3011 return 0;
3012 }
3013
3014 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
3015 NULL. */
3016
3017 static void
3018 unsuspend_all_lwps (struct lwp_info *except)
3019 {
3020 find_inferior (&all_threads, unsuspend_one_lwp, except);
3021 }
3022
3023 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
3024 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
3025 void *data);
3026 static int lwp_running (struct inferior_list_entry *entry, void *data);
3027 static ptid_t linux_wait_1 (ptid_t ptid,
3028 struct target_waitstatus *ourstatus,
3029 int target_options);
3030
3031 /* Stabilize threads (move out of jump pads).
3032
3033 If a thread is midway collecting a fast tracepoint, we need to
3034 finish the collection and move it out of the jump pad before
3035 reporting the signal.
3036
3037 This avoids recursion while collecting (when a signal arrives
3038 midway, and the signal handler itself collects), which would trash
3039 the trace buffer. In case the user set a breakpoint in a signal
3040 handler, this avoids the backtrace showing the jump pad, etc..
3041 Most importantly, there are certain things we can't do safely if
3042 threads are stopped in a jump pad (or in its callee's). For
3043 example:
3044
3045 - starting a new trace run. A thread still collecting the
3046 previous run, could trash the trace buffer when resumed. The trace
3047 buffer control structures would have been reset but the thread had
3048 no way to tell. The thread could even midway memcpy'ing to the
3049 buffer, which would mean that when resumed, it would clobber the
3050 trace buffer that had been set for a new run.
3051
3052 - we can't rewrite/reuse the jump pads for new tracepoints
3053 safely. Say you do tstart while a thread is stopped midway while
3054 collecting. When the thread is later resumed, it finishes the
3055 collection, and returns to the jump pad, to execute the original
3056 instruction that was under the tracepoint jump at the time the
3057 older run had been started. If the jump pad had been rewritten
3058 since for something else in the new run, the thread would now
3059 execute the wrong / random instructions. */
3060
3061 static void
3062 linux_stabilize_threads (void)
3063 {
3064 struct thread_info *saved_thread;
3065 struct thread_info *thread_stuck;
3066
3067 thread_stuck
3068 = (struct thread_info *) find_inferior (&all_threads,
3069 stuck_in_jump_pad_callback,
3070 NULL);
3071 if (thread_stuck != NULL)
3072 {
3073 if (debug_threads)
3074 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
3075 lwpid_of (thread_stuck));
3076 return;
3077 }
3078
3079 saved_thread = current_thread;
3080
3081 stabilizing_threads = 1;
3082
3083 /* Kick 'em all. */
3084 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
3085
3086 /* Loop until all are stopped out of the jump pads. */
3087 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
3088 {
3089 struct target_waitstatus ourstatus;
3090 struct lwp_info *lwp;
3091 int wstat;
3092
3093 /* Note that we go through the full wait even loop. While
3094 moving threads out of jump pad, we need to be able to step
3095 over internal breakpoints and such. */
3096 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
3097
3098 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
3099 {
3100 lwp = get_thread_lwp (current_thread);
3101
3102 /* Lock it. */
3103 lwp_suspended_inc (lwp);
3104
3105 if (ourstatus.value.sig != GDB_SIGNAL_0
3106 || current_thread->last_resume_kind == resume_stop)
3107 {
3108 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3109 enqueue_one_deferred_signal (lwp, &wstat);
3110 }
3111 }
3112 }
3113
3114 unsuspend_all_lwps (NULL);
3115
3116 stabilizing_threads = 0;
3117
3118 current_thread = saved_thread;
3119
3120 if (debug_threads)
3121 {
3122 thread_stuck
3123 = (struct thread_info *) find_inferior (&all_threads,
3124 stuck_in_jump_pad_callback,
3125 NULL);
3126 if (thread_stuck != NULL)
3127 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3128 lwpid_of (thread_stuck));
3129 }
3130 }
3131
3132 /* Convenience function that is called when the kernel reports an
3133 event that is not passed out to GDB. */
3134
3135 static ptid_t
3136 ignore_event (struct target_waitstatus *ourstatus)
3137 {
3138 /* If we got an event, there may still be others, as a single
3139 SIGCHLD can indicate more than one child stopped. This forces
3140 another target_wait call. */
3141 async_file_mark ();
3142
3143 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3144 return null_ptid;
3145 }
3146
3147 /* Convenience function that is called when the kernel reports an exit
3148 event. This decides whether to report the event to GDB as a
3149 process exit event, a thread exit event, or to suppress the
3150 event. */
3151
3152 static ptid_t
3153 filter_exit_event (struct lwp_info *event_child,
3154 struct target_waitstatus *ourstatus)
3155 {
3156 struct thread_info *thread = get_lwp_thread (event_child);
3157 ptid_t ptid = ptid_of (thread);
3158
3159 if (!last_thread_of_process_p (pid_of (thread)))
3160 {
3161 if (report_thread_events)
3162 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3163 else
3164 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3165
3166 delete_lwp (event_child);
3167 }
3168 return ptid;
3169 }
3170
3171 /* Returns 1 if GDB is interested in any event_child syscalls. */
3172
3173 static int
3174 gdb_catching_syscalls_p (struct lwp_info *event_child)
3175 {
3176 struct thread_info *thread = get_lwp_thread (event_child);
3177 struct process_info *proc = get_thread_process (thread);
3178
3179 return !VEC_empty (int, proc->syscalls_to_catch);
3180 }
3181
3182 /* Returns 1 if GDB is interested in the event_child syscall.
3183 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3184
3185 static int
3186 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3187 {
3188 int i, iter;
3189 int sysno;
3190 struct thread_info *thread = get_lwp_thread (event_child);
3191 struct process_info *proc = get_thread_process (thread);
3192
3193 if (VEC_empty (int, proc->syscalls_to_catch))
3194 return 0;
3195
3196 if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
3197 return 1;
3198
3199 get_syscall_trapinfo (event_child, &sysno);
3200 for (i = 0;
3201 VEC_iterate (int, proc->syscalls_to_catch, i, iter);
3202 i++)
3203 if (iter == sysno)
3204 return 1;
3205
3206 return 0;
3207 }
3208
3209 /* Wait for process, returns status. */
3210
3211 static ptid_t
3212 linux_wait_1 (ptid_t ptid,
3213 struct target_waitstatus *ourstatus, int target_options)
3214 {
3215 int w;
3216 struct lwp_info *event_child;
3217 int options;
3218 int pid;
3219 int step_over_finished;
3220 int bp_explains_trap;
3221 int maybe_internal_trap;
3222 int report_to_gdb;
3223 int trace_event;
3224 int in_step_range;
3225 int any_resumed;
3226
3227 if (debug_threads)
3228 {
3229 debug_enter ();
3230 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3231 }
3232
3233 /* Translate generic target options into linux options. */
3234 options = __WALL;
3235 if (target_options & TARGET_WNOHANG)
3236 options |= WNOHANG;
3237
3238 bp_explains_trap = 0;
3239 trace_event = 0;
3240 in_step_range = 0;
3241 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3242
3243 /* Find a resumed LWP, if any. */
3244 if (find_inferior (&all_threads,
3245 status_pending_p_callback,
3246 &minus_one_ptid) != NULL)
3247 any_resumed = 1;
3248 else if ((find_inferior (&all_threads,
3249 not_stopped_callback,
3250 &minus_one_ptid) != NULL))
3251 any_resumed = 1;
3252 else
3253 any_resumed = 0;
3254
3255 if (ptid_equal (step_over_bkpt, null_ptid))
3256 pid = linux_wait_for_event (ptid, &w, options);
3257 else
3258 {
3259 if (debug_threads)
3260 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3261 target_pid_to_str (step_over_bkpt));
3262 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3263 }
3264
3265 if (pid == 0 || (pid == -1 && !any_resumed))
3266 {
3267 gdb_assert (target_options & TARGET_WNOHANG);
3268
3269 if (debug_threads)
3270 {
3271 debug_printf ("linux_wait_1 ret = null_ptid, "
3272 "TARGET_WAITKIND_IGNORE\n");
3273 debug_exit ();
3274 }
3275
3276 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3277 return null_ptid;
3278 }
3279 else if (pid == -1)
3280 {
3281 if (debug_threads)
3282 {
3283 debug_printf ("linux_wait_1 ret = null_ptid, "
3284 "TARGET_WAITKIND_NO_RESUMED\n");
3285 debug_exit ();
3286 }
3287
3288 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3289 return null_ptid;
3290 }
3291
3292 event_child = get_thread_lwp (current_thread);
3293
3294 /* linux_wait_for_event only returns an exit status for the last
3295 child of a process. Report it. */
3296 if (WIFEXITED (w) || WIFSIGNALED (w))
3297 {
3298 if (WIFEXITED (w))
3299 {
3300 ourstatus->kind = TARGET_WAITKIND_EXITED;
3301 ourstatus->value.integer = WEXITSTATUS (w);
3302
3303 if (debug_threads)
3304 {
3305 debug_printf ("linux_wait_1 ret = %s, exited with "
3306 "retcode %d\n",
3307 target_pid_to_str (ptid_of (current_thread)),
3308 WEXITSTATUS (w));
3309 debug_exit ();
3310 }
3311 }
3312 else
3313 {
3314 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3315 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3316
3317 if (debug_threads)
3318 {
3319 debug_printf ("linux_wait_1 ret = %s, terminated with "
3320 "signal %d\n",
3321 target_pid_to_str (ptid_of (current_thread)),
3322 WTERMSIG (w));
3323 debug_exit ();
3324 }
3325 }
3326
3327 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3328 return filter_exit_event (event_child, ourstatus);
3329
3330 return ptid_of (current_thread);
3331 }
3332
3333 /* If step-over executes a breakpoint instruction, in the case of a
3334 hardware single step it means a gdb/gdbserver breakpoint had been
3335 planted on top of a permanent breakpoint, in the case of a software
3336 single step it may just mean that gdbserver hit the reinsert breakpoint.
3337 The PC has been adjusted by save_stop_reason to point at
3338 the breakpoint address.
3339 So in the case of the hardware single step advance the PC manually
3340 past the breakpoint and in the case of software single step advance only
3341 if it's not the single_step_breakpoint we are hitting.
3342 This avoids that a program would keep trapping a permanent breakpoint
3343 forever. */
3344 if (!ptid_equal (step_over_bkpt, null_ptid)
3345 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3346 && (event_child->stepping
3347 || !single_step_breakpoint_inserted_here (event_child->stop_pc)))
3348 {
3349 int increment_pc = 0;
3350 int breakpoint_kind = 0;
3351 CORE_ADDR stop_pc = event_child->stop_pc;
3352
3353 breakpoint_kind =
3354 the_target->breakpoint_kind_from_current_state (&stop_pc);
3355 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3356
3357 if (debug_threads)
3358 {
3359 debug_printf ("step-over for %s executed software breakpoint\n",
3360 target_pid_to_str (ptid_of (current_thread)));
3361 }
3362
3363 if (increment_pc != 0)
3364 {
3365 struct regcache *regcache
3366 = get_thread_regcache (current_thread, 1);
3367
3368 event_child->stop_pc += increment_pc;
3369 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3370
3371 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3372 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3373 }
3374 }
3375
3376 /* If this event was not handled before, and is not a SIGTRAP, we
3377 report it. SIGILL and SIGSEGV are also treated as traps in case
3378 a breakpoint is inserted at the current PC. If this target does
3379 not support internal breakpoints at all, we also report the
3380 SIGTRAP without further processing; it's of no concern to us. */
3381 maybe_internal_trap
3382 = (supports_breakpoints ()
3383 && (WSTOPSIG (w) == SIGTRAP
3384 || ((WSTOPSIG (w) == SIGILL
3385 || WSTOPSIG (w) == SIGSEGV)
3386 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3387
3388 if (maybe_internal_trap)
3389 {
3390 /* Handle anything that requires bookkeeping before deciding to
3391 report the event or continue waiting. */
3392
3393 /* First check if we can explain the SIGTRAP with an internal
3394 breakpoint, or if we should possibly report the event to GDB.
3395 Do this before anything that may remove or insert a
3396 breakpoint. */
3397 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3398
3399 /* We have a SIGTRAP, possibly a step-over dance has just
3400 finished. If so, tweak the state machine accordingly,
3401 reinsert breakpoints and delete any single-step
3402 breakpoints. */
3403 step_over_finished = finish_step_over (event_child);
3404
3405 /* Now invoke the callbacks of any internal breakpoints there. */
3406 check_breakpoints (event_child->stop_pc);
3407
3408 /* Handle tracepoint data collecting. This may overflow the
3409 trace buffer, and cause a tracing stop, removing
3410 breakpoints. */
3411 trace_event = handle_tracepoints (event_child);
3412
3413 if (bp_explains_trap)
3414 {
3415 if (debug_threads)
3416 debug_printf ("Hit a gdbserver breakpoint.\n");
3417 }
3418 }
3419 else
3420 {
3421 /* We have some other signal, possibly a step-over dance was in
3422 progress, and it should be cancelled too. */
3423 step_over_finished = finish_step_over (event_child);
3424 }
3425
3426 /* We have all the data we need. Either report the event to GDB, or
3427 resume threads and keep waiting for more. */
3428
3429 /* If we're collecting a fast tracepoint, finish the collection and
3430 move out of the jump pad before delivering a signal. See
3431 linux_stabilize_threads. */
3432
3433 if (WIFSTOPPED (w)
3434 && WSTOPSIG (w) != SIGTRAP
3435 && supports_fast_tracepoints ()
3436 && agent_loaded_p ())
3437 {
3438 if (debug_threads)
3439 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3440 "to defer or adjust it.\n",
3441 WSTOPSIG (w), lwpid_of (current_thread));
3442
3443 /* Allow debugging the jump pad itself. */
3444 if (current_thread->last_resume_kind != resume_step
3445 && maybe_move_out_of_jump_pad (event_child, &w))
3446 {
3447 enqueue_one_deferred_signal (event_child, &w);
3448
3449 if (debug_threads)
3450 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3451 WSTOPSIG (w), lwpid_of (current_thread));
3452
3453 linux_resume_one_lwp (event_child, 0, 0, NULL);
3454
3455 return ignore_event (ourstatus);
3456 }
3457 }
3458
3459 if (event_child->collecting_fast_tracepoint)
3460 {
3461 if (debug_threads)
3462 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3463 "Check if we're already there.\n",
3464 lwpid_of (current_thread),
3465 event_child->collecting_fast_tracepoint);
3466
3467 trace_event = 1;
3468
3469 event_child->collecting_fast_tracepoint
3470 = linux_fast_tracepoint_collecting (event_child, NULL);
3471
3472 if (event_child->collecting_fast_tracepoint != 1)
3473 {
3474 /* No longer need this breakpoint. */
3475 if (event_child->exit_jump_pad_bkpt != NULL)
3476 {
3477 if (debug_threads)
3478 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3479 "stopping all threads momentarily.\n");
3480
3481 /* Other running threads could hit this breakpoint.
3482 We don't handle moribund locations like GDB does,
3483 instead we always pause all threads when removing
3484 breakpoints, so that any step-over or
3485 decr_pc_after_break adjustment is always taken
3486 care of while the breakpoint is still
3487 inserted. */
3488 stop_all_lwps (1, event_child);
3489
3490 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3491 event_child->exit_jump_pad_bkpt = NULL;
3492
3493 unstop_all_lwps (1, event_child);
3494
3495 gdb_assert (event_child->suspended >= 0);
3496 }
3497 }
3498
3499 if (event_child->collecting_fast_tracepoint == 0)
3500 {
3501 if (debug_threads)
3502 debug_printf ("fast tracepoint finished "
3503 "collecting successfully.\n");
3504
3505 /* We may have a deferred signal to report. */
3506 if (dequeue_one_deferred_signal (event_child, &w))
3507 {
3508 if (debug_threads)
3509 debug_printf ("dequeued one signal.\n");
3510 }
3511 else
3512 {
3513 if (debug_threads)
3514 debug_printf ("no deferred signals.\n");
3515
3516 if (stabilizing_threads)
3517 {
3518 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3519 ourstatus->value.sig = GDB_SIGNAL_0;
3520
3521 if (debug_threads)
3522 {
3523 debug_printf ("linux_wait_1 ret = %s, stopped "
3524 "while stabilizing threads\n",
3525 target_pid_to_str (ptid_of (current_thread)));
3526 debug_exit ();
3527 }
3528
3529 return ptid_of (current_thread);
3530 }
3531 }
3532 }
3533 }
3534
3535 /* Check whether GDB would be interested in this event. */
3536
3537 /* Check if GDB is interested in this syscall. */
3538 if (WIFSTOPPED (w)
3539 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3540 && !gdb_catch_this_syscall_p (event_child))
3541 {
3542 if (debug_threads)
3543 {
3544 debug_printf ("Ignored syscall for LWP %ld.\n",
3545 lwpid_of (current_thread));
3546 }
3547
3548 linux_resume_one_lwp (event_child, event_child->stepping,
3549 0, NULL);
3550 return ignore_event (ourstatus);
3551 }
3552
3553 /* If GDB is not interested in this signal, don't stop other
3554 threads, and don't report it to GDB. Just resume the inferior
3555 right away. We do this for threading-related signals as well as
3556 any that GDB specifically requested we ignore. But never ignore
3557 SIGSTOP if we sent it ourselves, and do not ignore signals when
3558 stepping - they may require special handling to skip the signal
3559 handler. Also never ignore signals that could be caused by a
3560 breakpoint. */
3561 if (WIFSTOPPED (w)
3562 && current_thread->last_resume_kind != resume_step
3563 && (
3564 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3565 (current_process ()->priv->thread_db != NULL
3566 && (WSTOPSIG (w) == __SIGRTMIN
3567 || WSTOPSIG (w) == __SIGRTMIN + 1))
3568 ||
3569 #endif
3570 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3571 && !(WSTOPSIG (w) == SIGSTOP
3572 && current_thread->last_resume_kind == resume_stop)
3573 && !linux_wstatus_maybe_breakpoint (w))))
3574 {
3575 siginfo_t info, *info_p;
3576
3577 if (debug_threads)
3578 debug_printf ("Ignored signal %d for LWP %ld.\n",
3579 WSTOPSIG (w), lwpid_of (current_thread));
3580
3581 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3582 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3583 info_p = &info;
3584 else
3585 info_p = NULL;
3586
3587 if (step_over_finished)
3588 {
3589 /* We cancelled this thread's step-over above. We still
3590 need to unsuspend all other LWPs, and set them back
3591 running again while the signal handler runs. */
3592 unsuspend_all_lwps (event_child);
3593
3594 /* Enqueue the pending signal info so that proceed_all_lwps
3595 doesn't lose it. */
3596 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3597
3598 proceed_all_lwps ();
3599 }
3600 else
3601 {
3602 linux_resume_one_lwp (event_child, event_child->stepping,
3603 WSTOPSIG (w), info_p);
3604 }
3605 return ignore_event (ourstatus);
3606 }
3607
3608 /* Note that all addresses are always "out of the step range" when
3609 there's no range to begin with. */
3610 in_step_range = lwp_in_step_range (event_child);
3611
3612 /* If GDB wanted this thread to single step, and the thread is out
3613 of the step range, we always want to report the SIGTRAP, and let
3614 GDB handle it. Watchpoints should always be reported. So should
3615 signals we can't explain. A SIGTRAP we can't explain could be a
3616 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3617 do, we're be able to handle GDB breakpoints on top of internal
3618 breakpoints, by handling the internal breakpoint and still
3619 reporting the event to GDB. If we don't, we're out of luck, GDB
3620 won't see the breakpoint hit. If we see a single-step event but
3621 the thread should be continuing, don't pass the trap to gdb.
3622 That indicates that we had previously finished a single-step but
3623 left the single-step pending -- see
3624 complete_ongoing_step_over. */
3625 report_to_gdb = (!maybe_internal_trap
3626 || (current_thread->last_resume_kind == resume_step
3627 && !in_step_range)
3628 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3629 || (!in_step_range
3630 && !bp_explains_trap
3631 && !trace_event
3632 && !step_over_finished
3633 && !(current_thread->last_resume_kind == resume_continue
3634 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3635 || (gdb_breakpoint_here (event_child->stop_pc)
3636 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3637 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3638 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3639
3640 run_breakpoint_commands (event_child->stop_pc);
3641
3642 /* We found no reason GDB would want us to stop. We either hit one
3643 of our own breakpoints, or finished an internal step GDB
3644 shouldn't know about. */
3645 if (!report_to_gdb)
3646 {
3647 if (debug_threads)
3648 {
3649 if (bp_explains_trap)
3650 debug_printf ("Hit a gdbserver breakpoint.\n");
3651 if (step_over_finished)
3652 debug_printf ("Step-over finished.\n");
3653 if (trace_event)
3654 debug_printf ("Tracepoint event.\n");
3655 if (lwp_in_step_range (event_child))
3656 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3657 paddress (event_child->stop_pc),
3658 paddress (event_child->step_range_start),
3659 paddress (event_child->step_range_end));
3660 }
3661
3662 /* We're not reporting this breakpoint to GDB, so apply the
3663 decr_pc_after_break adjustment to the inferior's regcache
3664 ourselves. */
3665
3666 if (the_low_target.set_pc != NULL)
3667 {
3668 struct regcache *regcache
3669 = get_thread_regcache (current_thread, 1);
3670 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3671 }
3672
3673 /* We may have finished stepping over a breakpoint. If so,
3674 we've stopped and suspended all LWPs momentarily except the
3675 stepping one. This is where we resume them all again. We're
3676 going to keep waiting, so use proceed, which handles stepping
3677 over the next breakpoint. */
3678 if (debug_threads)
3679 debug_printf ("proceeding all threads.\n");
3680
3681 if (step_over_finished)
3682 unsuspend_all_lwps (event_child);
3683
3684 proceed_all_lwps ();
3685 return ignore_event (ourstatus);
3686 }
3687
3688 if (debug_threads)
3689 {
3690 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3691 {
3692 char *str;
3693
3694 str = target_waitstatus_to_string (&event_child->waitstatus);
3695 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3696 lwpid_of (get_lwp_thread (event_child)), str);
3697 xfree (str);
3698 }
3699 if (current_thread->last_resume_kind == resume_step)
3700 {
3701 if (event_child->step_range_start == event_child->step_range_end)
3702 debug_printf ("GDB wanted to single-step, reporting event.\n");
3703 else if (!lwp_in_step_range (event_child))
3704 debug_printf ("Out of step range, reporting event.\n");
3705 }
3706 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3707 debug_printf ("Stopped by watchpoint.\n");
3708 else if (gdb_breakpoint_here (event_child->stop_pc))
3709 debug_printf ("Stopped by GDB breakpoint.\n");
3710 if (debug_threads)
3711 debug_printf ("Hit a non-gdbserver trap event.\n");
3712 }
3713
3714 /* Alright, we're going to report a stop. */
3715
3716 /* Remove single-step breakpoints. */
3717 if (can_software_single_step ())
3718 {
3719 /* Remove single-step breakpoints or not. It it is true, stop all
3720 lwps, so that other threads won't hit the breakpoint in the
3721 staled memory. */
3722 int remove_single_step_breakpoints_p = 0;
3723
3724 if (non_stop)
3725 {
3726 remove_single_step_breakpoints_p
3727 = has_single_step_breakpoints (current_thread);
3728 }
3729 else
3730 {
3731 /* In all-stop, a stop reply cancels all previous resume
3732 requests. Delete all single-step breakpoints. */
3733 struct inferior_list_entry *inf, *tmp;
3734
3735 ALL_INFERIORS (&all_threads, inf, tmp)
3736 {
3737 struct thread_info *thread = (struct thread_info *) inf;
3738
3739 if (has_single_step_breakpoints (thread))
3740 {
3741 remove_single_step_breakpoints_p = 1;
3742 break;
3743 }
3744 }
3745 }
3746
3747 if (remove_single_step_breakpoints_p)
3748 {
3749 /* If we remove single-step breakpoints from memory, stop all lwps,
3750 so that other threads won't hit the breakpoint in the staled
3751 memory. */
3752 stop_all_lwps (0, event_child);
3753
3754 if (non_stop)
3755 {
3756 gdb_assert (has_single_step_breakpoints (current_thread));
3757 delete_single_step_breakpoints (current_thread);
3758 }
3759 else
3760 {
3761 struct inferior_list_entry *inf, *tmp;
3762
3763 ALL_INFERIORS (&all_threads, inf, tmp)
3764 {
3765 struct thread_info *thread = (struct thread_info *) inf;
3766
3767 if (has_single_step_breakpoints (thread))
3768 delete_single_step_breakpoints (thread);
3769 }
3770 }
3771
3772 unstop_all_lwps (0, event_child);
3773 }
3774 }
3775
3776 if (!stabilizing_threads)
3777 {
3778 /* In all-stop, stop all threads. */
3779 if (!non_stop)
3780 stop_all_lwps (0, NULL);
3781
3782 if (step_over_finished)
3783 {
3784 if (!non_stop)
3785 {
3786 /* If we were doing a step-over, all other threads but
3787 the stepping one had been paused in start_step_over,
3788 with their suspend counts incremented. We don't want
3789 to do a full unstop/unpause, because we're in
3790 all-stop mode (so we want threads stopped), but we
3791 still need to unsuspend the other threads, to
3792 decrement their `suspended' count back. */
3793 unsuspend_all_lwps (event_child);
3794 }
3795 else
3796 {
3797 /* If we just finished a step-over, then all threads had
3798 been momentarily paused. In all-stop, that's fine,
3799 we want threads stopped by now anyway. In non-stop,
3800 we need to re-resume threads that GDB wanted to be
3801 running. */
3802 unstop_all_lwps (1, event_child);
3803 }
3804 }
3805
3806 /* If we're not waiting for a specific LWP, choose an event LWP
3807 from among those that have had events. Giving equal priority
3808 to all LWPs that have had events helps prevent
3809 starvation. */
3810 if (ptid_equal (ptid, minus_one_ptid))
3811 {
3812 event_child->status_pending_p = 1;
3813 event_child->status_pending = w;
3814
3815 select_event_lwp (&event_child);
3816
3817 /* current_thread and event_child must stay in sync. */
3818 current_thread = get_lwp_thread (event_child);
3819
3820 event_child->status_pending_p = 0;
3821 w = event_child->status_pending;
3822 }
3823
3824
3825 /* Stabilize threads (move out of jump pads). */
3826 if (!non_stop)
3827 stabilize_threads ();
3828 }
3829 else
3830 {
3831 /* If we just finished a step-over, then all threads had been
3832 momentarily paused. In all-stop, that's fine, we want
3833 threads stopped by now anyway. In non-stop, we need to
3834 re-resume threads that GDB wanted to be running. */
3835 if (step_over_finished)
3836 unstop_all_lwps (1, event_child);
3837 }
3838
3839 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3840 {
3841 /* If the reported event is an exit, fork, vfork or exec, let
3842 GDB know. */
3843 *ourstatus = event_child->waitstatus;
3844 /* Clear the event lwp's waitstatus since we handled it already. */
3845 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3846 }
3847 else
3848 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3849
3850 /* Now that we've selected our final event LWP, un-adjust its PC if
3851 it was a software breakpoint, and the client doesn't know we can
3852 adjust the breakpoint ourselves. */
3853 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3854 && !swbreak_feature)
3855 {
3856 int decr_pc = the_low_target.decr_pc_after_break;
3857
3858 if (decr_pc != 0)
3859 {
3860 struct regcache *regcache
3861 = get_thread_regcache (current_thread, 1);
3862 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3863 }
3864 }
3865
3866 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3867 {
3868 get_syscall_trapinfo (event_child,
3869 &ourstatus->value.syscall_number);
3870 ourstatus->kind = event_child->syscall_state;
3871 }
3872 else if (current_thread->last_resume_kind == resume_stop
3873 && WSTOPSIG (w) == SIGSTOP)
3874 {
3875 /* A thread that has been requested to stop by GDB with vCont;t,
3876 and it stopped cleanly, so report as SIG0. The use of
3877 SIGSTOP is an implementation detail. */
3878 ourstatus->value.sig = GDB_SIGNAL_0;
3879 }
3880 else if (current_thread->last_resume_kind == resume_stop
3881 && WSTOPSIG (w) != SIGSTOP)
3882 {
3883 /* A thread that has been requested to stop by GDB with vCont;t,
3884 but, it stopped for other reasons. */
3885 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3886 }
3887 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3888 {
3889 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3890 }
3891
3892 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3893
3894 if (debug_threads)
3895 {
3896 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3897 target_pid_to_str (ptid_of (current_thread)),
3898 ourstatus->kind, ourstatus->value.sig);
3899 debug_exit ();
3900 }
3901
3902 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3903 return filter_exit_event (event_child, ourstatus);
3904
3905 return ptid_of (current_thread);
3906 }
3907
3908 /* Get rid of any pending event in the pipe. */
3909 static void
3910 async_file_flush (void)
3911 {
3912 int ret;
3913 char buf;
3914
3915 do
3916 ret = read (linux_event_pipe[0], &buf, 1);
3917 while (ret >= 0 || (ret == -1 && errno == EINTR));
3918 }
3919
3920 /* Put something in the pipe, so the event loop wakes up. */
3921 static void
3922 async_file_mark (void)
3923 {
3924 int ret;
3925
3926 async_file_flush ();
3927
3928 do
3929 ret = write (linux_event_pipe[1], "+", 1);
3930 while (ret == 0 || (ret == -1 && errno == EINTR));
3931
3932 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3933 be awakened anyway. */
3934 }
3935
3936 static ptid_t
3937 linux_wait (ptid_t ptid,
3938 struct target_waitstatus *ourstatus, int target_options)
3939 {
3940 ptid_t event_ptid;
3941
3942 /* Flush the async file first. */
3943 if (target_is_async_p ())
3944 async_file_flush ();
3945
3946 do
3947 {
3948 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3949 }
3950 while ((target_options & TARGET_WNOHANG) == 0
3951 && ptid_equal (event_ptid, null_ptid)
3952 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3953
3954 /* If at least one stop was reported, there may be more. A single
3955 SIGCHLD can signal more than one child stop. */
3956 if (target_is_async_p ()
3957 && (target_options & TARGET_WNOHANG) != 0
3958 && !ptid_equal (event_ptid, null_ptid))
3959 async_file_mark ();
3960
3961 return event_ptid;
3962 }
3963
3964 /* Send a signal to an LWP. */
3965
3966 static int
3967 kill_lwp (unsigned long lwpid, int signo)
3968 {
3969 int ret;
3970
3971 errno = 0;
3972 ret = syscall (__NR_tkill, lwpid, signo);
3973 if (errno == ENOSYS)
3974 {
3975 /* If tkill fails, then we are not using nptl threads, a
3976 configuration we no longer support. */
3977 perror_with_name (("tkill"));
3978 }
3979 return ret;
3980 }
3981
3982 void
3983 linux_stop_lwp (struct lwp_info *lwp)
3984 {
3985 send_sigstop (lwp);
3986 }
3987
3988 static void
3989 send_sigstop (struct lwp_info *lwp)
3990 {
3991 int pid;
3992
3993 pid = lwpid_of (get_lwp_thread (lwp));
3994
3995 /* If we already have a pending stop signal for this process, don't
3996 send another. */
3997 if (lwp->stop_expected)
3998 {
3999 if (debug_threads)
4000 debug_printf ("Have pending sigstop for lwp %d\n", pid);
4001
4002 return;
4003 }
4004
4005 if (debug_threads)
4006 debug_printf ("Sending sigstop to lwp %d\n", pid);
4007
4008 lwp->stop_expected = 1;
4009 kill_lwp (pid, SIGSTOP);
4010 }
4011
4012 static int
4013 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
4014 {
4015 struct thread_info *thread = (struct thread_info *) entry;
4016 struct lwp_info *lwp = get_thread_lwp (thread);
4017
4018 /* Ignore EXCEPT. */
4019 if (lwp == except)
4020 return 0;
4021
4022 if (lwp->stopped)
4023 return 0;
4024
4025 send_sigstop (lwp);
4026 return 0;
4027 }
4028
4029 /* Increment the suspend count of an LWP, and stop it, if not stopped
4030 yet. */
4031 static int
4032 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
4033 void *except)
4034 {
4035 struct thread_info *thread = (struct thread_info *) entry;
4036 struct lwp_info *lwp = get_thread_lwp (thread);
4037
4038 /* Ignore EXCEPT. */
4039 if (lwp == except)
4040 return 0;
4041
4042 lwp_suspended_inc (lwp);
4043
4044 return send_sigstop_callback (entry, except);
4045 }
4046
4047 static void
4048 mark_lwp_dead (struct lwp_info *lwp, int wstat)
4049 {
4050 /* Store the exit status for later. */
4051 lwp->status_pending_p = 1;
4052 lwp->status_pending = wstat;
4053
4054 /* Store in waitstatus as well, as there's nothing else to process
4055 for this event. */
4056 if (WIFEXITED (wstat))
4057 {
4058 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
4059 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
4060 }
4061 else if (WIFSIGNALED (wstat))
4062 {
4063 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
4064 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
4065 }
4066
4067 /* Prevent trying to stop it. */
4068 lwp->stopped = 1;
4069
4070 /* No further stops are expected from a dead lwp. */
4071 lwp->stop_expected = 0;
4072 }
4073
4074 /* Return true if LWP has exited already, and has a pending exit event
4075 to report to GDB. */
4076
4077 static int
4078 lwp_is_marked_dead (struct lwp_info *lwp)
4079 {
4080 return (lwp->status_pending_p
4081 && (WIFEXITED (lwp->status_pending)
4082 || WIFSIGNALED (lwp->status_pending)));
4083 }
4084
4085 /* Wait for all children to stop for the SIGSTOPs we just queued. */
4086
4087 static void
4088 wait_for_sigstop (void)
4089 {
4090 struct thread_info *saved_thread;
4091 ptid_t saved_tid;
4092 int wstat;
4093 int ret;
4094
4095 saved_thread = current_thread;
4096 if (saved_thread != NULL)
4097 saved_tid = saved_thread->entry.id;
4098 else
4099 saved_tid = null_ptid; /* avoid bogus unused warning */
4100
4101 if (debug_threads)
4102 debug_printf ("wait_for_sigstop: pulling events\n");
4103
4104 /* Passing NULL_PTID as filter indicates we want all events to be
4105 left pending. Eventually this returns when there are no
4106 unwaited-for children left. */
4107 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4108 &wstat, __WALL);
4109 gdb_assert (ret == -1);
4110
4111 if (saved_thread == NULL || linux_thread_alive (saved_tid))
4112 current_thread = saved_thread;
4113 else
4114 {
4115 if (debug_threads)
4116 debug_printf ("Previously current thread died.\n");
4117
4118 /* We can't change the current inferior behind GDB's back,
4119 otherwise, a subsequent command may apply to the wrong
4120 process. */
4121 current_thread = NULL;
4122 }
4123 }
4124
4125 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
4126 move it out, because we need to report the stop event to GDB. For
4127 example, if the user puts a breakpoint in the jump pad, it's
4128 because she wants to debug it. */
4129
4130 static int
4131 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
4132 {
4133 struct thread_info *thread = (struct thread_info *) entry;
4134 struct lwp_info *lwp = get_thread_lwp (thread);
4135
4136 if (lwp->suspended != 0)
4137 {
4138 internal_error (__FILE__, __LINE__,
4139 "LWP %ld is suspended, suspended=%d\n",
4140 lwpid_of (thread), lwp->suspended);
4141 }
4142 gdb_assert (lwp->stopped);
4143
4144 /* Allow debugging the jump pad, gdb_collect, etc.. */
4145 return (supports_fast_tracepoints ()
4146 && agent_loaded_p ()
4147 && (gdb_breakpoint_here (lwp->stop_pc)
4148 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4149 || thread->last_resume_kind == resume_step)
4150 && linux_fast_tracepoint_collecting (lwp, NULL));
4151 }
4152
4153 static void
4154 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
4155 {
4156 struct thread_info *thread = (struct thread_info *) entry;
4157 struct thread_info *saved_thread;
4158 struct lwp_info *lwp = get_thread_lwp (thread);
4159 int *wstat;
4160
4161 if (lwp->suspended != 0)
4162 {
4163 internal_error (__FILE__, __LINE__,
4164 "LWP %ld is suspended, suspended=%d\n",
4165 lwpid_of (thread), lwp->suspended);
4166 }
4167 gdb_assert (lwp->stopped);
4168
4169 /* For gdb_breakpoint_here. */
4170 saved_thread = current_thread;
4171 current_thread = thread;
4172
4173 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4174
4175 /* Allow debugging the jump pad, gdb_collect, etc. */
4176 if (!gdb_breakpoint_here (lwp->stop_pc)
4177 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4178 && thread->last_resume_kind != resume_step
4179 && maybe_move_out_of_jump_pad (lwp, wstat))
4180 {
4181 if (debug_threads)
4182 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4183 lwpid_of (thread));
4184
4185 if (wstat)
4186 {
4187 lwp->status_pending_p = 0;
4188 enqueue_one_deferred_signal (lwp, wstat);
4189
4190 if (debug_threads)
4191 debug_printf ("Signal %d for LWP %ld deferred "
4192 "(in jump pad)\n",
4193 WSTOPSIG (*wstat), lwpid_of (thread));
4194 }
4195
4196 linux_resume_one_lwp (lwp, 0, 0, NULL);
4197 }
4198 else
4199 lwp_suspended_inc (lwp);
4200
4201 current_thread = saved_thread;
4202 }
4203
4204 static int
4205 lwp_running (struct inferior_list_entry *entry, void *data)
4206 {
4207 struct thread_info *thread = (struct thread_info *) entry;
4208 struct lwp_info *lwp = get_thread_lwp (thread);
4209
4210 if (lwp_is_marked_dead (lwp))
4211 return 0;
4212 if (lwp->stopped)
4213 return 0;
4214 return 1;
4215 }
4216
4217 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4218 If SUSPEND, then also increase the suspend count of every LWP,
4219 except EXCEPT. */
4220
4221 static void
4222 stop_all_lwps (int suspend, struct lwp_info *except)
4223 {
4224 /* Should not be called recursively. */
4225 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4226
4227 if (debug_threads)
4228 {
4229 debug_enter ();
4230 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4231 suspend ? "stop-and-suspend" : "stop",
4232 except != NULL
4233 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4234 : "none");
4235 }
4236
4237 stopping_threads = (suspend
4238 ? STOPPING_AND_SUSPENDING_THREADS
4239 : STOPPING_THREADS);
4240
4241 if (suspend)
4242 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4243 else
4244 find_inferior (&all_threads, send_sigstop_callback, except);
4245 wait_for_sigstop ();
4246 stopping_threads = NOT_STOPPING_THREADS;
4247
4248 if (debug_threads)
4249 {
4250 debug_printf ("stop_all_lwps done, setting stopping_threads "
4251 "back to !stopping\n");
4252 debug_exit ();
4253 }
4254 }
4255
4256 /* Enqueue one signal in the chain of signals which need to be
4257 delivered to this process on next resume. */
4258
4259 static void
4260 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4261 {
4262 struct pending_signals *p_sig = XNEW (struct pending_signals);
4263
4264 p_sig->prev = lwp->pending_signals;
4265 p_sig->signal = signal;
4266 if (info == NULL)
4267 memset (&p_sig->info, 0, sizeof (siginfo_t));
4268 else
4269 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4270 lwp->pending_signals = p_sig;
4271 }
4272
4273 /* Install breakpoints for software single stepping. */
4274
4275 static void
4276 install_software_single_step_breakpoints (struct lwp_info *lwp)
4277 {
4278 int i;
4279 CORE_ADDR pc;
4280 struct thread_info *thread = get_lwp_thread (lwp);
4281 struct regcache *regcache = get_thread_regcache (thread, 1);
4282 VEC (CORE_ADDR) *next_pcs = NULL;
4283 struct cleanup *old_chain = make_cleanup_restore_current_thread ();
4284
4285 make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
4286
4287 current_thread = thread;
4288 next_pcs = (*the_low_target.get_next_pcs) (regcache);
4289
4290 for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
4291 set_single_step_breakpoint (pc, current_ptid);
4292
4293 do_cleanups (old_chain);
4294 }
4295
4296 /* Single step via hardware or software single step.
4297 Return 1 if hardware single stepping, 0 if software single stepping
4298 or can't single step. */
4299
4300 static int
4301 single_step (struct lwp_info* lwp)
4302 {
4303 int step = 0;
4304
4305 if (can_hardware_single_step ())
4306 {
4307 step = 1;
4308 }
4309 else if (can_software_single_step ())
4310 {
4311 install_software_single_step_breakpoints (lwp);
4312 step = 0;
4313 }
4314 else
4315 {
4316 if (debug_threads)
4317 debug_printf ("stepping is not implemented on this target");
4318 }
4319
4320 return step;
4321 }
4322
4323 /* The signal can be delivered to the inferior if we are not trying to
4324 finish a fast tracepoint collect. Since signal can be delivered in
4325 the step-over, the program may go to signal handler and trap again
4326 after return from the signal handler. We can live with the spurious
4327 double traps. */
4328
4329 static int
4330 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4331 {
4332 return !lwp->collecting_fast_tracepoint;
4333 }
4334
4335 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4336 SIGNAL is nonzero, give it that signal. */
4337
4338 static void
4339 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4340 int step, int signal, siginfo_t *info)
4341 {
4342 struct thread_info *thread = get_lwp_thread (lwp);
4343 struct thread_info *saved_thread;
4344 int fast_tp_collecting;
4345 int ptrace_request;
4346 struct process_info *proc = get_thread_process (thread);
4347
4348 /* Note that target description may not be initialised
4349 (proc->tdesc == NULL) at this point because the program hasn't
4350 stopped at the first instruction yet. It means GDBserver skips
4351 the extra traps from the wrapper program (see option --wrapper).
4352 Code in this function that requires register access should be
4353 guarded by proc->tdesc == NULL or something else. */
4354
4355 if (lwp->stopped == 0)
4356 return;
4357
4358 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4359
4360 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4361
4362 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4363
4364 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4365 user used the "jump" command, or "set $pc = foo"). */
4366 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4367 {
4368 /* Collecting 'while-stepping' actions doesn't make sense
4369 anymore. */
4370 release_while_stepping_state_list (thread);
4371 }
4372
4373 /* If we have pending signals or status, and a new signal, enqueue the
4374 signal. Also enqueue the signal if it can't be delivered to the
4375 inferior right now. */
4376 if (signal != 0
4377 && (lwp->status_pending_p
4378 || lwp->pending_signals != NULL
4379 || !lwp_signal_can_be_delivered (lwp)))
4380 {
4381 enqueue_pending_signal (lwp, signal, info);
4382
4383 /* Postpone any pending signal. It was enqueued above. */
4384 signal = 0;
4385 }
4386
4387 if (lwp->status_pending_p)
4388 {
4389 if (debug_threads)
4390 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4391 " has pending status\n",
4392 lwpid_of (thread), step ? "step" : "continue",
4393 lwp->stop_expected ? "expected" : "not expected");
4394 return;
4395 }
4396
4397 saved_thread = current_thread;
4398 current_thread = thread;
4399
4400 /* This bit needs some thinking about. If we get a signal that
4401 we must report while a single-step reinsert is still pending,
4402 we often end up resuming the thread. It might be better to
4403 (ew) allow a stack of pending events; then we could be sure that
4404 the reinsert happened right away and not lose any signals.
4405
4406 Making this stack would also shrink the window in which breakpoints are
4407 uninserted (see comment in linux_wait_for_lwp) but not enough for
4408 complete correctness, so it won't solve that problem. It may be
4409 worthwhile just to solve this one, however. */
4410 if (lwp->bp_reinsert != 0)
4411 {
4412 if (debug_threads)
4413 debug_printf (" pending reinsert at 0x%s\n",
4414 paddress (lwp->bp_reinsert));
4415
4416 if (can_hardware_single_step ())
4417 {
4418 if (fast_tp_collecting == 0)
4419 {
4420 if (step == 0)
4421 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4422 if (lwp->suspended)
4423 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4424 lwp->suspended);
4425 }
4426 }
4427
4428 step = maybe_hw_step (thread);
4429 }
4430
4431 if (fast_tp_collecting == 1)
4432 {
4433 if (debug_threads)
4434 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4435 " (exit-jump-pad-bkpt)\n",
4436 lwpid_of (thread));
4437 }
4438 else if (fast_tp_collecting == 2)
4439 {
4440 if (debug_threads)
4441 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4442 " single-stepping\n",
4443 lwpid_of (thread));
4444
4445 if (can_hardware_single_step ())
4446 step = 1;
4447 else
4448 {
4449 internal_error (__FILE__, __LINE__,
4450 "moving out of jump pad single-stepping"
4451 " not implemented on this target");
4452 }
4453 }
4454
4455 /* If we have while-stepping actions in this thread set it stepping.
4456 If we have a signal to deliver, it may or may not be set to
4457 SIG_IGN, we don't know. Assume so, and allow collecting
4458 while-stepping into a signal handler. A possible smart thing to
4459 do would be to set an internal breakpoint at the signal return
4460 address, continue, and carry on catching this while-stepping
4461 action only when that breakpoint is hit. A future
4462 enhancement. */
4463 if (thread->while_stepping != NULL)
4464 {
4465 if (debug_threads)
4466 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4467 lwpid_of (thread));
4468
4469 step = single_step (lwp);
4470 }
4471
4472 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4473 {
4474 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4475
4476 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4477
4478 if (debug_threads)
4479 {
4480 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4481 (long) lwp->stop_pc);
4482 }
4483 }
4484
4485 /* If we have pending signals, consume one if it can be delivered to
4486 the inferior. */
4487 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4488 {
4489 struct pending_signals **p_sig;
4490
4491 p_sig = &lwp->pending_signals;
4492 while ((*p_sig)->prev != NULL)
4493 p_sig = &(*p_sig)->prev;
4494
4495 signal = (*p_sig)->signal;
4496 if ((*p_sig)->info.si_signo != 0)
4497 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4498 &(*p_sig)->info);
4499
4500 free (*p_sig);
4501 *p_sig = NULL;
4502 }
4503
4504 if (debug_threads)
4505 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4506 lwpid_of (thread), step ? "step" : "continue", signal,
4507 lwp->stop_expected ? "expected" : "not expected");
4508
4509 if (the_low_target.prepare_to_resume != NULL)
4510 the_low_target.prepare_to_resume (lwp);
4511
4512 regcache_invalidate_thread (thread);
4513 errno = 0;
4514 lwp->stepping = step;
4515 if (step)
4516 ptrace_request = PTRACE_SINGLESTEP;
4517 else if (gdb_catching_syscalls_p (lwp))
4518 ptrace_request = PTRACE_SYSCALL;
4519 else
4520 ptrace_request = PTRACE_CONT;
4521 ptrace (ptrace_request,
4522 lwpid_of (thread),
4523 (PTRACE_TYPE_ARG3) 0,
4524 /* Coerce to a uintptr_t first to avoid potential gcc warning
4525 of coercing an 8 byte integer to a 4 byte pointer. */
4526 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4527
4528 current_thread = saved_thread;
4529 if (errno)
4530 perror_with_name ("resuming thread");
4531
4532 /* Successfully resumed. Clear state that no longer makes sense,
4533 and mark the LWP as running. Must not do this before resuming
4534 otherwise if that fails other code will be confused. E.g., we'd
4535 later try to stop the LWP and hang forever waiting for a stop
4536 status. Note that we must not throw after this is cleared,
4537 otherwise handle_zombie_lwp_error would get confused. */
4538 lwp->stopped = 0;
4539 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4540 }
4541
4542 /* Called when we try to resume a stopped LWP and that errors out. If
4543 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4544 or about to become), discard the error, clear any pending status
4545 the LWP may have, and return true (we'll collect the exit status
4546 soon enough). Otherwise, return false. */
4547
4548 static int
4549 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4550 {
4551 struct thread_info *thread = get_lwp_thread (lp);
4552
4553 /* If we get an error after resuming the LWP successfully, we'd
4554 confuse !T state for the LWP being gone. */
4555 gdb_assert (lp->stopped);
4556
4557 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4558 because even if ptrace failed with ESRCH, the tracee may be "not
4559 yet fully dead", but already refusing ptrace requests. In that
4560 case the tracee has 'R (Running)' state for a little bit
4561 (observed in Linux 3.18). See also the note on ESRCH in the
4562 ptrace(2) man page. Instead, check whether the LWP has any state
4563 other than ptrace-stopped. */
4564
4565 /* Don't assume anything if /proc/PID/status can't be read. */
4566 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4567 {
4568 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4569 lp->status_pending_p = 0;
4570 return 1;
4571 }
4572 return 0;
4573 }
4574
4575 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4576 disappears while we try to resume it. */
4577
4578 static void
4579 linux_resume_one_lwp (struct lwp_info *lwp,
4580 int step, int signal, siginfo_t *info)
4581 {
4582 TRY
4583 {
4584 linux_resume_one_lwp_throw (lwp, step, signal, info);
4585 }
4586 CATCH (ex, RETURN_MASK_ERROR)
4587 {
4588 if (!check_ptrace_stopped_lwp_gone (lwp))
4589 throw_exception (ex);
4590 }
4591 END_CATCH
4592 }
4593
4594 struct thread_resume_array
4595 {
4596 struct thread_resume *resume;
4597 size_t n;
4598 };
4599
4600 /* This function is called once per thread via find_inferior.
4601 ARG is a pointer to a thread_resume_array struct.
4602 We look up the thread specified by ENTRY in ARG, and mark the thread
4603 with a pointer to the appropriate resume request.
4604
4605 This algorithm is O(threads * resume elements), but resume elements
4606 is small (and will remain small at least until GDB supports thread
4607 suspension). */
4608
4609 static int
4610 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4611 {
4612 struct thread_info *thread = (struct thread_info *) entry;
4613 struct lwp_info *lwp = get_thread_lwp (thread);
4614 int ndx;
4615 struct thread_resume_array *r;
4616
4617 r = (struct thread_resume_array *) arg;
4618
4619 for (ndx = 0; ndx < r->n; ndx++)
4620 {
4621 ptid_t ptid = r->resume[ndx].thread;
4622 if (ptid_equal (ptid, minus_one_ptid)
4623 || ptid_equal (ptid, entry->id)
4624 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4625 of PID'. */
4626 || (ptid_get_pid (ptid) == pid_of (thread)
4627 && (ptid_is_pid (ptid)
4628 || ptid_get_lwp (ptid) == -1)))
4629 {
4630 if (r->resume[ndx].kind == resume_stop
4631 && thread->last_resume_kind == resume_stop)
4632 {
4633 if (debug_threads)
4634 debug_printf ("already %s LWP %ld at GDB's request\n",
4635 (thread->last_status.kind
4636 == TARGET_WAITKIND_STOPPED)
4637 ? "stopped"
4638 : "stopping",
4639 lwpid_of (thread));
4640
4641 continue;
4642 }
4643
4644 lwp->resume = &r->resume[ndx];
4645 thread->last_resume_kind = lwp->resume->kind;
4646
4647 lwp->step_range_start = lwp->resume->step_range_start;
4648 lwp->step_range_end = lwp->resume->step_range_end;
4649
4650 /* If we had a deferred signal to report, dequeue one now.
4651 This can happen if LWP gets more than one signal while
4652 trying to get out of a jump pad. */
4653 if (lwp->stopped
4654 && !lwp->status_pending_p
4655 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4656 {
4657 lwp->status_pending_p = 1;
4658
4659 if (debug_threads)
4660 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4661 "leaving status pending.\n",
4662 WSTOPSIG (lwp->status_pending),
4663 lwpid_of (thread));
4664 }
4665
4666 return 0;
4667 }
4668 }
4669
4670 /* No resume action for this thread. */
4671 lwp->resume = NULL;
4672
4673 return 0;
4674 }
4675
4676 /* find_inferior callback for linux_resume.
4677 Set *FLAG_P if this lwp has an interesting status pending. */
4678
4679 static int
4680 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4681 {
4682 struct thread_info *thread = (struct thread_info *) entry;
4683 struct lwp_info *lwp = get_thread_lwp (thread);
4684
4685 /* LWPs which will not be resumed are not interesting, because
4686 we might not wait for them next time through linux_wait. */
4687 if (lwp->resume == NULL)
4688 return 0;
4689
4690 if (thread_still_has_status_pending_p (thread))
4691 * (int *) flag_p = 1;
4692
4693 return 0;
4694 }
4695
4696 /* Return 1 if this lwp that GDB wants running is stopped at an
4697 internal breakpoint that we need to step over. It assumes that any
4698 required STOP_PC adjustment has already been propagated to the
4699 inferior's regcache. */
4700
4701 static int
4702 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4703 {
4704 struct thread_info *thread = (struct thread_info *) entry;
4705 struct lwp_info *lwp = get_thread_lwp (thread);
4706 struct thread_info *saved_thread;
4707 CORE_ADDR pc;
4708 struct process_info *proc = get_thread_process (thread);
4709
4710 /* GDBserver is skipping the extra traps from the wrapper program,
4711 don't have to do step over. */
4712 if (proc->tdesc == NULL)
4713 return 0;
4714
4715 /* LWPs which will not be resumed are not interesting, because we
4716 might not wait for them next time through linux_wait. */
4717
4718 if (!lwp->stopped)
4719 {
4720 if (debug_threads)
4721 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4722 lwpid_of (thread));
4723 return 0;
4724 }
4725
4726 if (thread->last_resume_kind == resume_stop)
4727 {
4728 if (debug_threads)
4729 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4730 " stopped\n",
4731 lwpid_of (thread));
4732 return 0;
4733 }
4734
4735 gdb_assert (lwp->suspended >= 0);
4736
4737 if (lwp->suspended)
4738 {
4739 if (debug_threads)
4740 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4741 lwpid_of (thread));
4742 return 0;
4743 }
4744
4745 if (lwp->status_pending_p)
4746 {
4747 if (debug_threads)
4748 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4749 " status.\n",
4750 lwpid_of (thread));
4751 return 0;
4752 }
4753
4754 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4755 or we have. */
4756 pc = get_pc (lwp);
4757
4758 /* If the PC has changed since we stopped, then don't do anything,
4759 and let the breakpoint/tracepoint be hit. This happens if, for
4760 instance, GDB handled the decr_pc_after_break subtraction itself,
4761 GDB is OOL stepping this thread, or the user has issued a "jump"
4762 command, or poked thread's registers herself. */
4763 if (pc != lwp->stop_pc)
4764 {
4765 if (debug_threads)
4766 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4767 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4768 lwpid_of (thread),
4769 paddress (lwp->stop_pc), paddress (pc));
4770 return 0;
4771 }
4772
4773 /* On software single step target, resume the inferior with signal
4774 rather than stepping over. */
4775 if (can_software_single_step ()
4776 && lwp->pending_signals != NULL
4777 && lwp_signal_can_be_delivered (lwp))
4778 {
4779 if (debug_threads)
4780 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4781 " signals.\n",
4782 lwpid_of (thread));
4783
4784 return 0;
4785 }
4786
4787 saved_thread = current_thread;
4788 current_thread = thread;
4789
4790 /* We can only step over breakpoints we know about. */
4791 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4792 {
4793 /* Don't step over a breakpoint that GDB expects to hit
4794 though. If the condition is being evaluated on the target's side
4795 and it evaluate to false, step over this breakpoint as well. */
4796 if (gdb_breakpoint_here (pc)
4797 && gdb_condition_true_at_breakpoint (pc)
4798 && gdb_no_commands_at_breakpoint (pc))
4799 {
4800 if (debug_threads)
4801 debug_printf ("Need step over [LWP %ld]? yes, but found"
4802 " GDB breakpoint at 0x%s; skipping step over\n",
4803 lwpid_of (thread), paddress (pc));
4804
4805 current_thread = saved_thread;
4806 return 0;
4807 }
4808 else
4809 {
4810 if (debug_threads)
4811 debug_printf ("Need step over [LWP %ld]? yes, "
4812 "found breakpoint at 0x%s\n",
4813 lwpid_of (thread), paddress (pc));
4814
4815 /* We've found an lwp that needs stepping over --- return 1 so
4816 that find_inferior stops looking. */
4817 current_thread = saved_thread;
4818
4819 return 1;
4820 }
4821 }
4822
4823 current_thread = saved_thread;
4824
4825 if (debug_threads)
4826 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4827 " at 0x%s\n",
4828 lwpid_of (thread), paddress (pc));
4829
4830 return 0;
4831 }
4832
4833 /* Start a step-over operation on LWP. When LWP stopped at a
4834 breakpoint, to make progress, we need to remove the breakpoint out
4835 of the way. If we let other threads run while we do that, they may
4836 pass by the breakpoint location and miss hitting it. To avoid
4837 that, a step-over momentarily stops all threads while LWP is
4838 single-stepped by either hardware or software while the breakpoint
4839 is temporarily uninserted from the inferior. When the single-step
4840 finishes, we reinsert the breakpoint, and let all threads that are
4841 supposed to be running, run again. */
4842
4843 static int
4844 start_step_over (struct lwp_info *lwp)
4845 {
4846 struct thread_info *thread = get_lwp_thread (lwp);
4847 struct thread_info *saved_thread;
4848 CORE_ADDR pc;
4849 int step;
4850
4851 if (debug_threads)
4852 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4853 lwpid_of (thread));
4854
4855 stop_all_lwps (1, lwp);
4856
4857 if (lwp->suspended != 0)
4858 {
4859 internal_error (__FILE__, __LINE__,
4860 "LWP %ld suspended=%d\n", lwpid_of (thread),
4861 lwp->suspended);
4862 }
4863
4864 if (debug_threads)
4865 debug_printf ("Done stopping all threads for step-over.\n");
4866
4867 /* Note, we should always reach here with an already adjusted PC,
4868 either by GDB (if we're resuming due to GDB's request), or by our
4869 caller, if we just finished handling an internal breakpoint GDB
4870 shouldn't care about. */
4871 pc = get_pc (lwp);
4872
4873 saved_thread = current_thread;
4874 current_thread = thread;
4875
4876 lwp->bp_reinsert = pc;
4877 uninsert_breakpoints_at (pc);
4878 uninsert_fast_tracepoint_jumps_at (pc);
4879
4880 step = single_step (lwp);
4881
4882 current_thread = saved_thread;
4883
4884 linux_resume_one_lwp (lwp, step, 0, NULL);
4885
4886 /* Require next event from this LWP. */
4887 step_over_bkpt = thread->entry.id;
4888 return 1;
4889 }
4890
4891 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4892 start_step_over, if still there, and delete any single-step
4893 breakpoints we've set, on non hardware single-step targets. */
4894
4895 static int
4896 finish_step_over (struct lwp_info *lwp)
4897 {
4898 if (lwp->bp_reinsert != 0)
4899 {
4900 struct thread_info *saved_thread = current_thread;
4901
4902 if (debug_threads)
4903 debug_printf ("Finished step over.\n");
4904
4905 current_thread = get_lwp_thread (lwp);
4906
4907 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4908 may be no breakpoint to reinsert there by now. */
4909 reinsert_breakpoints_at (lwp->bp_reinsert);
4910 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4911
4912 lwp->bp_reinsert = 0;
4913
4914 /* Delete any single-step breakpoints. No longer needed. We
4915 don't have to worry about other threads hitting this trap,
4916 and later not being able to explain it, because we were
4917 stepping over a breakpoint, and we hold all threads but
4918 LWP stopped while doing that. */
4919 if (!can_hardware_single_step ())
4920 {
4921 gdb_assert (has_single_step_breakpoints (current_thread));
4922 delete_single_step_breakpoints (current_thread);
4923 }
4924
4925 step_over_bkpt = null_ptid;
4926 current_thread = saved_thread;
4927 return 1;
4928 }
4929 else
4930 return 0;
4931 }
4932
4933 /* If there's a step over in progress, wait until all threads stop
4934 (that is, until the stepping thread finishes its step), and
4935 unsuspend all lwps. The stepping thread ends with its status
4936 pending, which is processed later when we get back to processing
4937 events. */
4938
4939 static void
4940 complete_ongoing_step_over (void)
4941 {
4942 if (!ptid_equal (step_over_bkpt, null_ptid))
4943 {
4944 struct lwp_info *lwp;
4945 int wstat;
4946 int ret;
4947
4948 if (debug_threads)
4949 debug_printf ("detach: step over in progress, finish it first\n");
4950
4951 /* Passing NULL_PTID as filter indicates we want all events to
4952 be left pending. Eventually this returns when there are no
4953 unwaited-for children left. */
4954 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4955 &wstat, __WALL);
4956 gdb_assert (ret == -1);
4957
4958 lwp = find_lwp_pid (step_over_bkpt);
4959 if (lwp != NULL)
4960 finish_step_over (lwp);
4961 step_over_bkpt = null_ptid;
4962 unsuspend_all_lwps (lwp);
4963 }
4964 }
4965
4966 /* This function is called once per thread. We check the thread's resume
4967 request, which will tell us whether to resume, step, or leave the thread
4968 stopped; and what signal, if any, it should be sent.
4969
4970 For threads which we aren't explicitly told otherwise, we preserve
4971 the stepping flag; this is used for stepping over gdbserver-placed
4972 breakpoints.
4973
4974 If pending_flags was set in any thread, we queue any needed
4975 signals, since we won't actually resume. We already have a pending
4976 event to report, so we don't need to preserve any step requests;
4977 they should be re-issued if necessary. */
4978
4979 static int
4980 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4981 {
4982 struct thread_info *thread = (struct thread_info *) entry;
4983 struct lwp_info *lwp = get_thread_lwp (thread);
4984 int leave_all_stopped = * (int *) arg;
4985 int leave_pending;
4986
4987 if (lwp->resume == NULL)
4988 return 0;
4989
4990 if (lwp->resume->kind == resume_stop)
4991 {
4992 if (debug_threads)
4993 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4994
4995 if (!lwp->stopped)
4996 {
4997 if (debug_threads)
4998 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4999
5000 /* Stop the thread, and wait for the event asynchronously,
5001 through the event loop. */
5002 send_sigstop (lwp);
5003 }
5004 else
5005 {
5006 if (debug_threads)
5007 debug_printf ("already stopped LWP %ld\n",
5008 lwpid_of (thread));
5009
5010 /* The LWP may have been stopped in an internal event that
5011 was not meant to be notified back to GDB (e.g., gdbserver
5012 breakpoint), so we should be reporting a stop event in
5013 this case too. */
5014
5015 /* If the thread already has a pending SIGSTOP, this is a
5016 no-op. Otherwise, something later will presumably resume
5017 the thread and this will cause it to cancel any pending
5018 operation, due to last_resume_kind == resume_stop. If
5019 the thread already has a pending status to report, we
5020 will still report it the next time we wait - see
5021 status_pending_p_callback. */
5022
5023 /* If we already have a pending signal to report, then
5024 there's no need to queue a SIGSTOP, as this means we're
5025 midway through moving the LWP out of the jumppad, and we
5026 will report the pending signal as soon as that is
5027 finished. */
5028 if (lwp->pending_signals_to_report == NULL)
5029 send_sigstop (lwp);
5030 }
5031
5032 /* For stop requests, we're done. */
5033 lwp->resume = NULL;
5034 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5035 return 0;
5036 }
5037
5038 /* If this thread which is about to be resumed has a pending status,
5039 then don't resume it - we can just report the pending status.
5040 Likewise if it is suspended, because e.g., another thread is
5041 stepping past a breakpoint. Make sure to queue any signals that
5042 would otherwise be sent. In all-stop mode, we do this decision
5043 based on if *any* thread has a pending status. If there's a
5044 thread that needs the step-over-breakpoint dance, then don't
5045 resume any other thread but that particular one. */
5046 leave_pending = (lwp->suspended
5047 || lwp->status_pending_p
5048 || leave_all_stopped);
5049
5050 /* If we have a new signal, enqueue the signal. */
5051 if (lwp->resume->sig != 0)
5052 {
5053 siginfo_t info, *info_p;
5054
5055 /* If this is the same signal we were previously stopped by,
5056 make sure to queue its siginfo. */
5057 if (WIFSTOPPED (lwp->last_status)
5058 && WSTOPSIG (lwp->last_status) == lwp->resume->sig
5059 && ptrace (PTRACE_GETSIGINFO, lwpid_of (thread),
5060 (PTRACE_TYPE_ARG3) 0, &info) == 0)
5061 info_p = &info;
5062 else
5063 info_p = NULL;
5064
5065 enqueue_pending_signal (lwp, lwp->resume->sig, info_p);
5066 }
5067
5068 if (!leave_pending)
5069 {
5070 if (debug_threads)
5071 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
5072
5073 proceed_one_lwp (entry, NULL);
5074 }
5075 else
5076 {
5077 if (debug_threads)
5078 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
5079 }
5080
5081 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
5082 lwp->resume = NULL;
5083 return 0;
5084 }
5085
5086 static void
5087 linux_resume (struct thread_resume *resume_info, size_t n)
5088 {
5089 struct thread_resume_array array = { resume_info, n };
5090 struct thread_info *need_step_over = NULL;
5091 int any_pending;
5092 int leave_all_stopped;
5093
5094 if (debug_threads)
5095 {
5096 debug_enter ();
5097 debug_printf ("linux_resume:\n");
5098 }
5099
5100 find_inferior (&all_threads, linux_set_resume_request, &array);
5101
5102 /* If there is a thread which would otherwise be resumed, which has
5103 a pending status, then don't resume any threads - we can just
5104 report the pending status. Make sure to queue any signals that
5105 would otherwise be sent. In non-stop mode, we'll apply this
5106 logic to each thread individually. We consume all pending events
5107 before considering to start a step-over (in all-stop). */
5108 any_pending = 0;
5109 if (!non_stop)
5110 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
5111
5112 /* If there is a thread which would otherwise be resumed, which is
5113 stopped at a breakpoint that needs stepping over, then don't
5114 resume any threads - have it step over the breakpoint with all
5115 other threads stopped, then resume all threads again. Make sure
5116 to queue any signals that would otherwise be delivered or
5117 queued. */
5118 if (!any_pending && supports_breakpoints ())
5119 need_step_over
5120 = (struct thread_info *) find_inferior (&all_threads,
5121 need_step_over_p, NULL);
5122
5123 leave_all_stopped = (need_step_over != NULL || any_pending);
5124
5125 if (debug_threads)
5126 {
5127 if (need_step_over != NULL)
5128 debug_printf ("Not resuming all, need step over\n");
5129 else if (any_pending)
5130 debug_printf ("Not resuming, all-stop and found "
5131 "an LWP with pending status\n");
5132 else
5133 debug_printf ("Resuming, no pending status or step over needed\n");
5134 }
5135
5136 /* Even if we're leaving threads stopped, queue all signals we'd
5137 otherwise deliver. */
5138 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
5139
5140 if (need_step_over)
5141 start_step_over (get_thread_lwp (need_step_over));
5142
5143 if (debug_threads)
5144 {
5145 debug_printf ("linux_resume done\n");
5146 debug_exit ();
5147 }
5148
5149 /* We may have events that were pending that can/should be sent to
5150 the client now. Trigger a linux_wait call. */
5151 if (target_is_async_p ())
5152 async_file_mark ();
5153 }
5154
5155 /* This function is called once per thread. We check the thread's
5156 last resume request, which will tell us whether to resume, step, or
5157 leave the thread stopped. Any signal the client requested to be
5158 delivered has already been enqueued at this point.
5159
5160 If any thread that GDB wants running is stopped at an internal
5161 breakpoint that needs stepping over, we start a step-over operation
5162 on that particular thread, and leave all others stopped. */
5163
5164 static int
5165 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5166 {
5167 struct thread_info *thread = (struct thread_info *) entry;
5168 struct lwp_info *lwp = get_thread_lwp (thread);
5169 int step;
5170
5171 if (lwp == except)
5172 return 0;
5173
5174 if (debug_threads)
5175 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5176
5177 if (!lwp->stopped)
5178 {
5179 if (debug_threads)
5180 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5181 return 0;
5182 }
5183
5184 if (thread->last_resume_kind == resume_stop
5185 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5186 {
5187 if (debug_threads)
5188 debug_printf (" client wants LWP to remain %ld stopped\n",
5189 lwpid_of (thread));
5190 return 0;
5191 }
5192
5193 if (lwp->status_pending_p)
5194 {
5195 if (debug_threads)
5196 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5197 lwpid_of (thread));
5198 return 0;
5199 }
5200
5201 gdb_assert (lwp->suspended >= 0);
5202
5203 if (lwp->suspended)
5204 {
5205 if (debug_threads)
5206 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5207 return 0;
5208 }
5209
5210 if (thread->last_resume_kind == resume_stop
5211 && lwp->pending_signals_to_report == NULL
5212 && lwp->collecting_fast_tracepoint == 0)
5213 {
5214 /* We haven't reported this LWP as stopped yet (otherwise, the
5215 last_status.kind check above would catch it, and we wouldn't
5216 reach here. This LWP may have been momentarily paused by a
5217 stop_all_lwps call while handling for example, another LWP's
5218 step-over. In that case, the pending expected SIGSTOP signal
5219 that was queued at vCont;t handling time will have already
5220 been consumed by wait_for_sigstop, and so we need to requeue
5221 another one here. Note that if the LWP already has a SIGSTOP
5222 pending, this is a no-op. */
5223
5224 if (debug_threads)
5225 debug_printf ("Client wants LWP %ld to stop. "
5226 "Making sure it has a SIGSTOP pending\n",
5227 lwpid_of (thread));
5228
5229 send_sigstop (lwp);
5230 }
5231
5232 if (thread->last_resume_kind == resume_step)
5233 {
5234 if (debug_threads)
5235 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5236 lwpid_of (thread));
5237
5238 /* If resume_step is requested by GDB, install single-step
5239 breakpoints when the thread is about to be actually resumed if
5240 the single-step breakpoints weren't removed. */
5241 if (can_software_single_step ()
5242 && !has_single_step_breakpoints (thread))
5243 install_software_single_step_breakpoints (lwp);
5244
5245 step = maybe_hw_step (thread);
5246 }
5247 else if (lwp->bp_reinsert != 0)
5248 {
5249 if (debug_threads)
5250 debug_printf (" stepping LWP %ld, reinsert set\n",
5251 lwpid_of (thread));
5252
5253 step = maybe_hw_step (thread);
5254 }
5255 else
5256 step = 0;
5257
5258 linux_resume_one_lwp (lwp, step, 0, NULL);
5259 return 0;
5260 }
5261
5262 static int
5263 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5264 {
5265 struct thread_info *thread = (struct thread_info *) entry;
5266 struct lwp_info *lwp = get_thread_lwp (thread);
5267
5268 if (lwp == except)
5269 return 0;
5270
5271 lwp_suspended_decr (lwp);
5272
5273 return proceed_one_lwp (entry, except);
5274 }
5275
5276 /* When we finish a step-over, set threads running again. If there's
5277 another thread that may need a step-over, now's the time to start
5278 it. Eventually, we'll move all threads past their breakpoints. */
5279
5280 static void
5281 proceed_all_lwps (void)
5282 {
5283 struct thread_info *need_step_over;
5284
5285 /* If there is a thread which would otherwise be resumed, which is
5286 stopped at a breakpoint that needs stepping over, then don't
5287 resume any threads - have it step over the breakpoint with all
5288 other threads stopped, then resume all threads again. */
5289
5290 if (supports_breakpoints ())
5291 {
5292 need_step_over
5293 = (struct thread_info *) find_inferior (&all_threads,
5294 need_step_over_p, NULL);
5295
5296 if (need_step_over != NULL)
5297 {
5298 if (debug_threads)
5299 debug_printf ("proceed_all_lwps: found "
5300 "thread %ld needing a step-over\n",
5301 lwpid_of (need_step_over));
5302
5303 start_step_over (get_thread_lwp (need_step_over));
5304 return;
5305 }
5306 }
5307
5308 if (debug_threads)
5309 debug_printf ("Proceeding, no step-over needed\n");
5310
5311 find_inferior (&all_threads, proceed_one_lwp, NULL);
5312 }
5313
5314 /* Stopped LWPs that the client wanted to be running, that don't have
5315 pending statuses, are set to run again, except for EXCEPT, if not
5316 NULL. This undoes a stop_all_lwps call. */
5317
5318 static void
5319 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5320 {
5321 if (debug_threads)
5322 {
5323 debug_enter ();
5324 if (except)
5325 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5326 lwpid_of (get_lwp_thread (except)));
5327 else
5328 debug_printf ("unstopping all lwps\n");
5329 }
5330
5331 if (unsuspend)
5332 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5333 else
5334 find_inferior (&all_threads, proceed_one_lwp, except);
5335
5336 if (debug_threads)
5337 {
5338 debug_printf ("unstop_all_lwps done\n");
5339 debug_exit ();
5340 }
5341 }
5342
5343
5344 #ifdef HAVE_LINUX_REGSETS
5345
5346 #define use_linux_regsets 1
5347
5348 /* Returns true if REGSET has been disabled. */
5349
5350 static int
5351 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5352 {
5353 return (info->disabled_regsets != NULL
5354 && info->disabled_regsets[regset - info->regsets]);
5355 }
5356
5357 /* Disable REGSET. */
5358
5359 static void
5360 disable_regset (struct regsets_info *info, struct regset_info *regset)
5361 {
5362 int dr_offset;
5363
5364 dr_offset = regset - info->regsets;
5365 if (info->disabled_regsets == NULL)
5366 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5367 info->disabled_regsets[dr_offset] = 1;
5368 }
5369
5370 static int
5371 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5372 struct regcache *regcache)
5373 {
5374 struct regset_info *regset;
5375 int saw_general_regs = 0;
5376 int pid;
5377 struct iovec iov;
5378
5379 pid = lwpid_of (current_thread);
5380 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5381 {
5382 void *buf, *data;
5383 int nt_type, res;
5384
5385 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5386 continue;
5387
5388 buf = xmalloc (regset->size);
5389
5390 nt_type = regset->nt_type;
5391 if (nt_type)
5392 {
5393 iov.iov_base = buf;
5394 iov.iov_len = regset->size;
5395 data = (void *) &iov;
5396 }
5397 else
5398 data = buf;
5399
5400 #ifndef __sparc__
5401 res = ptrace (regset->get_request, pid,
5402 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5403 #else
5404 res = ptrace (regset->get_request, pid, data, nt_type);
5405 #endif
5406 if (res < 0)
5407 {
5408 if (errno == EIO)
5409 {
5410 /* If we get EIO on a regset, do not try it again for
5411 this process mode. */
5412 disable_regset (regsets_info, regset);
5413 }
5414 else if (errno == ENODATA)
5415 {
5416 /* ENODATA may be returned if the regset is currently
5417 not "active". This can happen in normal operation,
5418 so suppress the warning in this case. */
5419 }
5420 else if (errno == ESRCH)
5421 {
5422 /* At this point, ESRCH should mean the process is
5423 already gone, in which case we simply ignore attempts
5424 to read its registers. */
5425 }
5426 else
5427 {
5428 char s[256];
5429 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5430 pid);
5431 perror (s);
5432 }
5433 }
5434 else
5435 {
5436 if (regset->type == GENERAL_REGS)
5437 saw_general_regs = 1;
5438 regset->store_function (regcache, buf);
5439 }
5440 free (buf);
5441 }
5442 if (saw_general_regs)
5443 return 0;
5444 else
5445 return 1;
5446 }
5447
5448 static int
5449 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5450 struct regcache *regcache)
5451 {
5452 struct regset_info *regset;
5453 int saw_general_regs = 0;
5454 int pid;
5455 struct iovec iov;
5456
5457 pid = lwpid_of (current_thread);
5458 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5459 {
5460 void *buf, *data;
5461 int nt_type, res;
5462
5463 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5464 || regset->fill_function == NULL)
5465 continue;
5466
5467 buf = xmalloc (regset->size);
5468
5469 /* First fill the buffer with the current register set contents,
5470 in case there are any items in the kernel's regset that are
5471 not in gdbserver's regcache. */
5472
5473 nt_type = regset->nt_type;
5474 if (nt_type)
5475 {
5476 iov.iov_base = buf;
5477 iov.iov_len = regset->size;
5478 data = (void *) &iov;
5479 }
5480 else
5481 data = buf;
5482
5483 #ifndef __sparc__
5484 res = ptrace (regset->get_request, pid,
5485 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5486 #else
5487 res = ptrace (regset->get_request, pid, data, nt_type);
5488 #endif
5489
5490 if (res == 0)
5491 {
5492 /* Then overlay our cached registers on that. */
5493 regset->fill_function (regcache, buf);
5494
5495 /* Only now do we write the register set. */
5496 #ifndef __sparc__
5497 res = ptrace (regset->set_request, pid,
5498 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5499 #else
5500 res = ptrace (regset->set_request, pid, data, nt_type);
5501 #endif
5502 }
5503
5504 if (res < 0)
5505 {
5506 if (errno == EIO)
5507 {
5508 /* If we get EIO on a regset, do not try it again for
5509 this process mode. */
5510 disable_regset (regsets_info, regset);
5511 }
5512 else if (errno == ESRCH)
5513 {
5514 /* At this point, ESRCH should mean the process is
5515 already gone, in which case we simply ignore attempts
5516 to change its registers. See also the related
5517 comment in linux_resume_one_lwp. */
5518 free (buf);
5519 return 0;
5520 }
5521 else
5522 {
5523 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5524 }
5525 }
5526 else if (regset->type == GENERAL_REGS)
5527 saw_general_regs = 1;
5528 free (buf);
5529 }
5530 if (saw_general_regs)
5531 return 0;
5532 else
5533 return 1;
5534 }
5535
5536 #else /* !HAVE_LINUX_REGSETS */
5537
5538 #define use_linux_regsets 0
5539 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5540 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5541
5542 #endif
5543
5544 /* Return 1 if register REGNO is supported by one of the regset ptrace
5545 calls or 0 if it has to be transferred individually. */
5546
5547 static int
5548 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5549 {
5550 unsigned char mask = 1 << (regno % 8);
5551 size_t index = regno / 8;
5552
5553 return (use_linux_regsets
5554 && (regs_info->regset_bitmap == NULL
5555 || (regs_info->regset_bitmap[index] & mask) != 0));
5556 }
5557
5558 #ifdef HAVE_LINUX_USRREGS
5559
5560 static int
5561 register_addr (const struct usrregs_info *usrregs, int regnum)
5562 {
5563 int addr;
5564
5565 if (regnum < 0 || regnum >= usrregs->num_regs)
5566 error ("Invalid register number %d.", regnum);
5567
5568 addr = usrregs->regmap[regnum];
5569
5570 return addr;
5571 }
5572
5573 /* Fetch one register. */
5574 static void
5575 fetch_register (const struct usrregs_info *usrregs,
5576 struct regcache *regcache, int regno)
5577 {
5578 CORE_ADDR regaddr;
5579 int i, size;
5580 char *buf;
5581 int pid;
5582
5583 if (regno >= usrregs->num_regs)
5584 return;
5585 if ((*the_low_target.cannot_fetch_register) (regno))
5586 return;
5587
5588 regaddr = register_addr (usrregs, regno);
5589 if (regaddr == -1)
5590 return;
5591
5592 size = ((register_size (regcache->tdesc, regno)
5593 + sizeof (PTRACE_XFER_TYPE) - 1)
5594 & -sizeof (PTRACE_XFER_TYPE));
5595 buf = (char *) alloca (size);
5596
5597 pid = lwpid_of (current_thread);
5598 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5599 {
5600 errno = 0;
5601 *(PTRACE_XFER_TYPE *) (buf + i) =
5602 ptrace (PTRACE_PEEKUSER, pid,
5603 /* Coerce to a uintptr_t first to avoid potential gcc warning
5604 of coercing an 8 byte integer to a 4 byte pointer. */
5605 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5606 regaddr += sizeof (PTRACE_XFER_TYPE);
5607 if (errno != 0)
5608 error ("reading register %d: %s", regno, strerror (errno));
5609 }
5610
5611 if (the_low_target.supply_ptrace_register)
5612 the_low_target.supply_ptrace_register (regcache, regno, buf);
5613 else
5614 supply_register (regcache, regno, buf);
5615 }
5616
5617 /* Store one register. */
5618 static void
5619 store_register (const struct usrregs_info *usrregs,
5620 struct regcache *regcache, int regno)
5621 {
5622 CORE_ADDR regaddr;
5623 int i, size;
5624 char *buf;
5625 int pid;
5626
5627 if (regno >= usrregs->num_regs)
5628 return;
5629 if ((*the_low_target.cannot_store_register) (regno))
5630 return;
5631
5632 regaddr = register_addr (usrregs, regno);
5633 if (regaddr == -1)
5634 return;
5635
5636 size = ((register_size (regcache->tdesc, regno)
5637 + sizeof (PTRACE_XFER_TYPE) - 1)
5638 & -sizeof (PTRACE_XFER_TYPE));
5639 buf = (char *) alloca (size);
5640 memset (buf, 0, size);
5641
5642 if (the_low_target.collect_ptrace_register)
5643 the_low_target.collect_ptrace_register (regcache, regno, buf);
5644 else
5645 collect_register (regcache, regno, buf);
5646
5647 pid = lwpid_of (current_thread);
5648 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5649 {
5650 errno = 0;
5651 ptrace (PTRACE_POKEUSER, pid,
5652 /* Coerce to a uintptr_t first to avoid potential gcc warning
5653 about coercing an 8 byte integer to a 4 byte pointer. */
5654 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5655 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5656 if (errno != 0)
5657 {
5658 /* At this point, ESRCH should mean the process is
5659 already gone, in which case we simply ignore attempts
5660 to change its registers. See also the related
5661 comment in linux_resume_one_lwp. */
5662 if (errno == ESRCH)
5663 return;
5664
5665 if ((*the_low_target.cannot_store_register) (regno) == 0)
5666 error ("writing register %d: %s", regno, strerror (errno));
5667 }
5668 regaddr += sizeof (PTRACE_XFER_TYPE);
5669 }
5670 }
5671
5672 /* Fetch all registers, or just one, from the child process.
5673 If REGNO is -1, do this for all registers, skipping any that are
5674 assumed to have been retrieved by regsets_fetch_inferior_registers,
5675 unless ALL is non-zero.
5676 Otherwise, REGNO specifies which register (so we can save time). */
5677 static void
5678 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5679 struct regcache *regcache, int regno, int all)
5680 {
5681 struct usrregs_info *usr = regs_info->usrregs;
5682
5683 if (regno == -1)
5684 {
5685 for (regno = 0; regno < usr->num_regs; regno++)
5686 if (all || !linux_register_in_regsets (regs_info, regno))
5687 fetch_register (usr, regcache, regno);
5688 }
5689 else
5690 fetch_register (usr, regcache, regno);
5691 }
5692
5693 /* Store our register values back into the inferior.
5694 If REGNO is -1, do this for all registers, skipping any that are
5695 assumed to have been saved by regsets_store_inferior_registers,
5696 unless ALL is non-zero.
5697 Otherwise, REGNO specifies which register (so we can save time). */
5698 static void
5699 usr_store_inferior_registers (const struct regs_info *regs_info,
5700 struct regcache *regcache, int regno, int all)
5701 {
5702 struct usrregs_info *usr = regs_info->usrregs;
5703
5704 if (regno == -1)
5705 {
5706 for (regno = 0; regno < usr->num_regs; regno++)
5707 if (all || !linux_register_in_regsets (regs_info, regno))
5708 store_register (usr, regcache, regno);
5709 }
5710 else
5711 store_register (usr, regcache, regno);
5712 }
5713
5714 #else /* !HAVE_LINUX_USRREGS */
5715
5716 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5717 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5718
5719 #endif
5720
5721
5722 static void
5723 linux_fetch_registers (struct regcache *regcache, int regno)
5724 {
5725 int use_regsets;
5726 int all = 0;
5727 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5728
5729 if (regno == -1)
5730 {
5731 if (the_low_target.fetch_register != NULL
5732 && regs_info->usrregs != NULL)
5733 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5734 (*the_low_target.fetch_register) (regcache, regno);
5735
5736 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5737 if (regs_info->usrregs != NULL)
5738 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5739 }
5740 else
5741 {
5742 if (the_low_target.fetch_register != NULL
5743 && (*the_low_target.fetch_register) (regcache, regno))
5744 return;
5745
5746 use_regsets = linux_register_in_regsets (regs_info, regno);
5747 if (use_regsets)
5748 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5749 regcache);
5750 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5751 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5752 }
5753 }
5754
5755 static void
5756 linux_store_registers (struct regcache *regcache, int regno)
5757 {
5758 int use_regsets;
5759 int all = 0;
5760 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5761
5762 if (regno == -1)
5763 {
5764 all = regsets_store_inferior_registers (regs_info->regsets_info,
5765 regcache);
5766 if (regs_info->usrregs != NULL)
5767 usr_store_inferior_registers (regs_info, regcache, regno, all);
5768 }
5769 else
5770 {
5771 use_regsets = linux_register_in_regsets (regs_info, regno);
5772 if (use_regsets)
5773 all = regsets_store_inferior_registers (regs_info->regsets_info,
5774 regcache);
5775 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5776 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5777 }
5778 }
5779
5780
5781 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5782 to debugger memory starting at MYADDR. */
5783
5784 static int
5785 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5786 {
5787 int pid = lwpid_of (current_thread);
5788 register PTRACE_XFER_TYPE *buffer;
5789 register CORE_ADDR addr;
5790 register int count;
5791 char filename[64];
5792 register int i;
5793 int ret;
5794 int fd;
5795
5796 /* Try using /proc. Don't bother for one word. */
5797 if (len >= 3 * sizeof (long))
5798 {
5799 int bytes;
5800
5801 /* We could keep this file open and cache it - possibly one per
5802 thread. That requires some juggling, but is even faster. */
5803 sprintf (filename, "/proc/%d/mem", pid);
5804 fd = open (filename, O_RDONLY | O_LARGEFILE);
5805 if (fd == -1)
5806 goto no_proc;
5807
5808 /* If pread64 is available, use it. It's faster if the kernel
5809 supports it (only one syscall), and it's 64-bit safe even on
5810 32-bit platforms (for instance, SPARC debugging a SPARC64
5811 application). */
5812 #ifdef HAVE_PREAD64
5813 bytes = pread64 (fd, myaddr, len, memaddr);
5814 #else
5815 bytes = -1;
5816 if (lseek (fd, memaddr, SEEK_SET) != -1)
5817 bytes = read (fd, myaddr, len);
5818 #endif
5819
5820 close (fd);
5821 if (bytes == len)
5822 return 0;
5823
5824 /* Some data was read, we'll try to get the rest with ptrace. */
5825 if (bytes > 0)
5826 {
5827 memaddr += bytes;
5828 myaddr += bytes;
5829 len -= bytes;
5830 }
5831 }
5832
5833 no_proc:
5834 /* Round starting address down to longword boundary. */
5835 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5836 /* Round ending address up; get number of longwords that makes. */
5837 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5838 / sizeof (PTRACE_XFER_TYPE));
5839 /* Allocate buffer of that many longwords. */
5840 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5841
5842 /* Read all the longwords */
5843 errno = 0;
5844 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5845 {
5846 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5847 about coercing an 8 byte integer to a 4 byte pointer. */
5848 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5849 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5850 (PTRACE_TYPE_ARG4) 0);
5851 if (errno)
5852 break;
5853 }
5854 ret = errno;
5855
5856 /* Copy appropriate bytes out of the buffer. */
5857 if (i > 0)
5858 {
5859 i *= sizeof (PTRACE_XFER_TYPE);
5860 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5861 memcpy (myaddr,
5862 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5863 i < len ? i : len);
5864 }
5865
5866 return ret;
5867 }
5868
5869 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5870 memory at MEMADDR. On failure (cannot write to the inferior)
5871 returns the value of errno. Always succeeds if LEN is zero. */
5872
5873 static int
5874 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5875 {
5876 register int i;
5877 /* Round starting address down to longword boundary. */
5878 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5879 /* Round ending address up; get number of longwords that makes. */
5880 register int count
5881 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5882 / sizeof (PTRACE_XFER_TYPE);
5883
5884 /* Allocate buffer of that many longwords. */
5885 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5886
5887 int pid = lwpid_of (current_thread);
5888
5889 if (len == 0)
5890 {
5891 /* Zero length write always succeeds. */
5892 return 0;
5893 }
5894
5895 if (debug_threads)
5896 {
5897 /* Dump up to four bytes. */
5898 char str[4 * 2 + 1];
5899 char *p = str;
5900 int dump = len < 4 ? len : 4;
5901
5902 for (i = 0; i < dump; i++)
5903 {
5904 sprintf (p, "%02x", myaddr[i]);
5905 p += 2;
5906 }
5907 *p = '\0';
5908
5909 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5910 str, (long) memaddr, pid);
5911 }
5912
5913 /* Fill start and end extra bytes of buffer with existing memory data. */
5914
5915 errno = 0;
5916 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5917 about coercing an 8 byte integer to a 4 byte pointer. */
5918 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5919 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5920 (PTRACE_TYPE_ARG4) 0);
5921 if (errno)
5922 return errno;
5923
5924 if (count > 1)
5925 {
5926 errno = 0;
5927 buffer[count - 1]
5928 = ptrace (PTRACE_PEEKTEXT, pid,
5929 /* Coerce to a uintptr_t first to avoid potential gcc warning
5930 about coercing an 8 byte integer to a 4 byte pointer. */
5931 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5932 * sizeof (PTRACE_XFER_TYPE)),
5933 (PTRACE_TYPE_ARG4) 0);
5934 if (errno)
5935 return errno;
5936 }
5937
5938 /* Copy data to be written over corresponding part of buffer. */
5939
5940 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5941 myaddr, len);
5942
5943 /* Write the entire buffer. */
5944
5945 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5946 {
5947 errno = 0;
5948 ptrace (PTRACE_POKETEXT, pid,
5949 /* Coerce to a uintptr_t first to avoid potential gcc warning
5950 about coercing an 8 byte integer to a 4 byte pointer. */
5951 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5952 (PTRACE_TYPE_ARG4) buffer[i]);
5953 if (errno)
5954 return errno;
5955 }
5956
5957 return 0;
5958 }
5959
5960 static void
5961 linux_look_up_symbols (void)
5962 {
5963 #ifdef USE_THREAD_DB
5964 struct process_info *proc = current_process ();
5965
5966 if (proc->priv->thread_db != NULL)
5967 return;
5968
5969 thread_db_init ();
5970 #endif
5971 }
5972
5973 static void
5974 linux_request_interrupt (void)
5975 {
5976 extern unsigned long signal_pid;
5977
5978 /* Send a SIGINT to the process group. This acts just like the user
5979 typed a ^C on the controlling terminal. */
5980 kill (-signal_pid, SIGINT);
5981 }
5982
5983 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5984 to debugger memory starting at MYADDR. */
5985
5986 static int
5987 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5988 {
5989 char filename[PATH_MAX];
5990 int fd, n;
5991 int pid = lwpid_of (current_thread);
5992
5993 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5994
5995 fd = open (filename, O_RDONLY);
5996 if (fd < 0)
5997 return -1;
5998
5999 if (offset != (CORE_ADDR) 0
6000 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6001 n = -1;
6002 else
6003 n = read (fd, myaddr, len);
6004
6005 close (fd);
6006
6007 return n;
6008 }
6009
6010 /* These breakpoint and watchpoint related wrapper functions simply
6011 pass on the function call if the target has registered a
6012 corresponding function. */
6013
6014 static int
6015 linux_supports_z_point_type (char z_type)
6016 {
6017 return (the_low_target.supports_z_point_type != NULL
6018 && the_low_target.supports_z_point_type (z_type));
6019 }
6020
6021 static int
6022 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
6023 int size, struct raw_breakpoint *bp)
6024 {
6025 if (type == raw_bkpt_type_sw)
6026 return insert_memory_breakpoint (bp);
6027 else if (the_low_target.insert_point != NULL)
6028 return the_low_target.insert_point (type, addr, size, bp);
6029 else
6030 /* Unsupported (see target.h). */
6031 return 1;
6032 }
6033
6034 static int
6035 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
6036 int size, struct raw_breakpoint *bp)
6037 {
6038 if (type == raw_bkpt_type_sw)
6039 return remove_memory_breakpoint (bp);
6040 else if (the_low_target.remove_point != NULL)
6041 return the_low_target.remove_point (type, addr, size, bp);
6042 else
6043 /* Unsupported (see target.h). */
6044 return 1;
6045 }
6046
6047 /* Implement the to_stopped_by_sw_breakpoint target_ops
6048 method. */
6049
6050 static int
6051 linux_stopped_by_sw_breakpoint (void)
6052 {
6053 struct lwp_info *lwp = get_thread_lwp (current_thread);
6054
6055 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
6056 }
6057
6058 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
6059 method. */
6060
6061 static int
6062 linux_supports_stopped_by_sw_breakpoint (void)
6063 {
6064 return USE_SIGTRAP_SIGINFO;
6065 }
6066
6067 /* Implement the to_stopped_by_hw_breakpoint target_ops
6068 method. */
6069
6070 static int
6071 linux_stopped_by_hw_breakpoint (void)
6072 {
6073 struct lwp_info *lwp = get_thread_lwp (current_thread);
6074
6075 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
6076 }
6077
6078 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
6079 method. */
6080
6081 static int
6082 linux_supports_stopped_by_hw_breakpoint (void)
6083 {
6084 return USE_SIGTRAP_SIGINFO;
6085 }
6086
6087 /* Implement the supports_hardware_single_step target_ops method. */
6088
6089 static int
6090 linux_supports_hardware_single_step (void)
6091 {
6092 return can_hardware_single_step ();
6093 }
6094
6095 static int
6096 linux_supports_software_single_step (void)
6097 {
6098 return can_software_single_step ();
6099 }
6100
6101 static int
6102 linux_stopped_by_watchpoint (void)
6103 {
6104 struct lwp_info *lwp = get_thread_lwp (current_thread);
6105
6106 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
6107 }
6108
6109 static CORE_ADDR
6110 linux_stopped_data_address (void)
6111 {
6112 struct lwp_info *lwp = get_thread_lwp (current_thread);
6113
6114 return lwp->stopped_data_address;
6115 }
6116
6117 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6118 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6119 && defined(PT_TEXT_END_ADDR)
6120
6121 /* This is only used for targets that define PT_TEXT_ADDR,
6122 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
6123 the target has different ways of acquiring this information, like
6124 loadmaps. */
6125
6126 /* Under uClinux, programs are loaded at non-zero offsets, which we need
6127 to tell gdb about. */
6128
6129 static int
6130 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
6131 {
6132 unsigned long text, text_end, data;
6133 int pid = lwpid_of (current_thread);
6134
6135 errno = 0;
6136
6137 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6138 (PTRACE_TYPE_ARG4) 0);
6139 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6140 (PTRACE_TYPE_ARG4) 0);
6141 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6142 (PTRACE_TYPE_ARG4) 0);
6143
6144 if (errno == 0)
6145 {
6146 /* Both text and data offsets produced at compile-time (and so
6147 used by gdb) are relative to the beginning of the program,
6148 with the data segment immediately following the text segment.
6149 However, the actual runtime layout in memory may put the data
6150 somewhere else, so when we send gdb a data base-address, we
6151 use the real data base address and subtract the compile-time
6152 data base-address from it (which is just the length of the
6153 text segment). BSS immediately follows data in both
6154 cases. */
6155 *text_p = text;
6156 *data_p = data - (text_end - text);
6157
6158 return 1;
6159 }
6160 return 0;
6161 }
6162 #endif
6163
6164 static int
6165 linux_qxfer_osdata (const char *annex,
6166 unsigned char *readbuf, unsigned const char *writebuf,
6167 CORE_ADDR offset, int len)
6168 {
6169 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6170 }
6171
6172 /* Convert a native/host siginfo object, into/from the siginfo in the
6173 layout of the inferiors' architecture. */
6174
6175 static void
6176 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6177 {
6178 int done = 0;
6179
6180 if (the_low_target.siginfo_fixup != NULL)
6181 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6182
6183 /* If there was no callback, or the callback didn't do anything,
6184 then just do a straight memcpy. */
6185 if (!done)
6186 {
6187 if (direction == 1)
6188 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6189 else
6190 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6191 }
6192 }
6193
6194 static int
6195 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6196 unsigned const char *writebuf, CORE_ADDR offset, int len)
6197 {
6198 int pid;
6199 siginfo_t siginfo;
6200 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6201
6202 if (current_thread == NULL)
6203 return -1;
6204
6205 pid = lwpid_of (current_thread);
6206
6207 if (debug_threads)
6208 debug_printf ("%s siginfo for lwp %d.\n",
6209 readbuf != NULL ? "Reading" : "Writing",
6210 pid);
6211
6212 if (offset >= sizeof (siginfo))
6213 return -1;
6214
6215 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6216 return -1;
6217
6218 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6219 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6220 inferior with a 64-bit GDBSERVER should look the same as debugging it
6221 with a 32-bit GDBSERVER, we need to convert it. */
6222 siginfo_fixup (&siginfo, inf_siginfo, 0);
6223
6224 if (offset + len > sizeof (siginfo))
6225 len = sizeof (siginfo) - offset;
6226
6227 if (readbuf != NULL)
6228 memcpy (readbuf, inf_siginfo + offset, len);
6229 else
6230 {
6231 memcpy (inf_siginfo + offset, writebuf, len);
6232
6233 /* Convert back to ptrace layout before flushing it out. */
6234 siginfo_fixup (&siginfo, inf_siginfo, 1);
6235
6236 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6237 return -1;
6238 }
6239
6240 return len;
6241 }
6242
6243 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6244 so we notice when children change state; as the handler for the
6245 sigsuspend in my_waitpid. */
6246
6247 static void
6248 sigchld_handler (int signo)
6249 {
6250 int old_errno = errno;
6251
6252 if (debug_threads)
6253 {
6254 do
6255 {
6256 /* fprintf is not async-signal-safe, so call write
6257 directly. */
6258 if (write (2, "sigchld_handler\n",
6259 sizeof ("sigchld_handler\n") - 1) < 0)
6260 break; /* just ignore */
6261 } while (0);
6262 }
6263
6264 if (target_is_async_p ())
6265 async_file_mark (); /* trigger a linux_wait */
6266
6267 errno = old_errno;
6268 }
6269
6270 static int
6271 linux_supports_non_stop (void)
6272 {
6273 return 1;
6274 }
6275
6276 static int
6277 linux_async (int enable)
6278 {
6279 int previous = target_is_async_p ();
6280
6281 if (debug_threads)
6282 debug_printf ("linux_async (%d), previous=%d\n",
6283 enable, previous);
6284
6285 if (previous != enable)
6286 {
6287 sigset_t mask;
6288 sigemptyset (&mask);
6289 sigaddset (&mask, SIGCHLD);
6290
6291 sigprocmask (SIG_BLOCK, &mask, NULL);
6292
6293 if (enable)
6294 {
6295 if (pipe (linux_event_pipe) == -1)
6296 {
6297 linux_event_pipe[0] = -1;
6298 linux_event_pipe[1] = -1;
6299 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6300
6301 warning ("creating event pipe failed.");
6302 return previous;
6303 }
6304
6305 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6306 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6307
6308 /* Register the event loop handler. */
6309 add_file_handler (linux_event_pipe[0],
6310 handle_target_event, NULL);
6311
6312 /* Always trigger a linux_wait. */
6313 async_file_mark ();
6314 }
6315 else
6316 {
6317 delete_file_handler (linux_event_pipe[0]);
6318
6319 close (linux_event_pipe[0]);
6320 close (linux_event_pipe[1]);
6321 linux_event_pipe[0] = -1;
6322 linux_event_pipe[1] = -1;
6323 }
6324
6325 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6326 }
6327
6328 return previous;
6329 }
6330
6331 static int
6332 linux_start_non_stop (int nonstop)
6333 {
6334 /* Register or unregister from event-loop accordingly. */
6335 linux_async (nonstop);
6336
6337 if (target_is_async_p () != (nonstop != 0))
6338 return -1;
6339
6340 return 0;
6341 }
6342
6343 static int
6344 linux_supports_multi_process (void)
6345 {
6346 return 1;
6347 }
6348
6349 /* Check if fork events are supported. */
6350
6351 static int
6352 linux_supports_fork_events (void)
6353 {
6354 return linux_supports_tracefork ();
6355 }
6356
6357 /* Check if vfork events are supported. */
6358
6359 static int
6360 linux_supports_vfork_events (void)
6361 {
6362 return linux_supports_tracefork ();
6363 }
6364
6365 /* Check if exec events are supported. */
6366
6367 static int
6368 linux_supports_exec_events (void)
6369 {
6370 return linux_supports_traceexec ();
6371 }
6372
6373 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6374 options for the specified lwp. */
6375
6376 static int
6377 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6378 void *args)
6379 {
6380 struct thread_info *thread = (struct thread_info *) entry;
6381 struct lwp_info *lwp = get_thread_lwp (thread);
6382
6383 if (!lwp->stopped)
6384 {
6385 /* Stop the lwp so we can modify its ptrace options. */
6386 lwp->must_set_ptrace_flags = 1;
6387 linux_stop_lwp (lwp);
6388 }
6389 else
6390 {
6391 /* Already stopped; go ahead and set the ptrace options. */
6392 struct process_info *proc = find_process_pid (pid_of (thread));
6393 int options = linux_low_ptrace_options (proc->attached);
6394
6395 linux_enable_event_reporting (lwpid_of (thread), options);
6396 lwp->must_set_ptrace_flags = 0;
6397 }
6398
6399 return 0;
6400 }
6401
6402 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6403 ptrace flags for all inferiors. This is in case the new GDB connection
6404 doesn't support the same set of events that the previous one did. */
6405
6406 static void
6407 linux_handle_new_gdb_connection (void)
6408 {
6409 pid_t pid;
6410
6411 /* Request that all the lwps reset their ptrace options. */
6412 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6413 }
6414
6415 static int
6416 linux_supports_disable_randomization (void)
6417 {
6418 #ifdef HAVE_PERSONALITY
6419 return 1;
6420 #else
6421 return 0;
6422 #endif
6423 }
6424
6425 static int
6426 linux_supports_agent (void)
6427 {
6428 return 1;
6429 }
6430
6431 static int
6432 linux_supports_range_stepping (void)
6433 {
6434 if (*the_low_target.supports_range_stepping == NULL)
6435 return 0;
6436
6437 return (*the_low_target.supports_range_stepping) ();
6438 }
6439
6440 /* Enumerate spufs IDs for process PID. */
6441 static int
6442 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6443 {
6444 int pos = 0;
6445 int written = 0;
6446 char path[128];
6447 DIR *dir;
6448 struct dirent *entry;
6449
6450 sprintf (path, "/proc/%ld/fd", pid);
6451 dir = opendir (path);
6452 if (!dir)
6453 return -1;
6454
6455 rewinddir (dir);
6456 while ((entry = readdir (dir)) != NULL)
6457 {
6458 struct stat st;
6459 struct statfs stfs;
6460 int fd;
6461
6462 fd = atoi (entry->d_name);
6463 if (!fd)
6464 continue;
6465
6466 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6467 if (stat (path, &st) != 0)
6468 continue;
6469 if (!S_ISDIR (st.st_mode))
6470 continue;
6471
6472 if (statfs (path, &stfs) != 0)
6473 continue;
6474 if (stfs.f_type != SPUFS_MAGIC)
6475 continue;
6476
6477 if (pos >= offset && pos + 4 <= offset + len)
6478 {
6479 *(unsigned int *)(buf + pos - offset) = fd;
6480 written += 4;
6481 }
6482 pos += 4;
6483 }
6484
6485 closedir (dir);
6486 return written;
6487 }
6488
6489 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6490 object type, using the /proc file system. */
6491 static int
6492 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6493 unsigned const char *writebuf,
6494 CORE_ADDR offset, int len)
6495 {
6496 long pid = lwpid_of (current_thread);
6497 char buf[128];
6498 int fd = 0;
6499 int ret = 0;
6500
6501 if (!writebuf && !readbuf)
6502 return -1;
6503
6504 if (!*annex)
6505 {
6506 if (!readbuf)
6507 return -1;
6508 else
6509 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6510 }
6511
6512 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6513 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6514 if (fd <= 0)
6515 return -1;
6516
6517 if (offset != 0
6518 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6519 {
6520 close (fd);
6521 return 0;
6522 }
6523
6524 if (writebuf)
6525 ret = write (fd, writebuf, (size_t) len);
6526 else
6527 ret = read (fd, readbuf, (size_t) len);
6528
6529 close (fd);
6530 return ret;
6531 }
6532
6533 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6534 struct target_loadseg
6535 {
6536 /* Core address to which the segment is mapped. */
6537 Elf32_Addr addr;
6538 /* VMA recorded in the program header. */
6539 Elf32_Addr p_vaddr;
6540 /* Size of this segment in memory. */
6541 Elf32_Word p_memsz;
6542 };
6543
6544 # if defined PT_GETDSBT
6545 struct target_loadmap
6546 {
6547 /* Protocol version number, must be zero. */
6548 Elf32_Word version;
6549 /* Pointer to the DSBT table, its size, and the DSBT index. */
6550 unsigned *dsbt_table;
6551 unsigned dsbt_size, dsbt_index;
6552 /* Number of segments in this map. */
6553 Elf32_Word nsegs;
6554 /* The actual memory map. */
6555 struct target_loadseg segs[/*nsegs*/];
6556 };
6557 # define LINUX_LOADMAP PT_GETDSBT
6558 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6559 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6560 # else
6561 struct target_loadmap
6562 {
6563 /* Protocol version number, must be zero. */
6564 Elf32_Half version;
6565 /* Number of segments in this map. */
6566 Elf32_Half nsegs;
6567 /* The actual memory map. */
6568 struct target_loadseg segs[/*nsegs*/];
6569 };
6570 # define LINUX_LOADMAP PTRACE_GETFDPIC
6571 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6572 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6573 # endif
6574
6575 static int
6576 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6577 unsigned char *myaddr, unsigned int len)
6578 {
6579 int pid = lwpid_of (current_thread);
6580 int addr = -1;
6581 struct target_loadmap *data = NULL;
6582 unsigned int actual_length, copy_length;
6583
6584 if (strcmp (annex, "exec") == 0)
6585 addr = (int) LINUX_LOADMAP_EXEC;
6586 else if (strcmp (annex, "interp") == 0)
6587 addr = (int) LINUX_LOADMAP_INTERP;
6588 else
6589 return -1;
6590
6591 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6592 return -1;
6593
6594 if (data == NULL)
6595 return -1;
6596
6597 actual_length = sizeof (struct target_loadmap)
6598 + sizeof (struct target_loadseg) * data->nsegs;
6599
6600 if (offset < 0 || offset > actual_length)
6601 return -1;
6602
6603 copy_length = actual_length - offset < len ? actual_length - offset : len;
6604 memcpy (myaddr, (char *) data + offset, copy_length);
6605 return copy_length;
6606 }
6607 #else
6608 # define linux_read_loadmap NULL
6609 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6610
6611 static void
6612 linux_process_qsupported (char **features, int count)
6613 {
6614 if (the_low_target.process_qsupported != NULL)
6615 the_low_target.process_qsupported (features, count);
6616 }
6617
6618 static int
6619 linux_supports_catch_syscall (void)
6620 {
6621 return (the_low_target.get_syscall_trapinfo != NULL
6622 && linux_supports_tracesysgood ());
6623 }
6624
6625 static int
6626 linux_get_ipa_tdesc_idx (void)
6627 {
6628 if (the_low_target.get_ipa_tdesc_idx == NULL)
6629 return 0;
6630
6631 return (*the_low_target.get_ipa_tdesc_idx) ();
6632 }
6633
6634 static int
6635 linux_supports_tracepoints (void)
6636 {
6637 if (*the_low_target.supports_tracepoints == NULL)
6638 return 0;
6639
6640 return (*the_low_target.supports_tracepoints) ();
6641 }
6642
6643 static CORE_ADDR
6644 linux_read_pc (struct regcache *regcache)
6645 {
6646 if (the_low_target.get_pc == NULL)
6647 return 0;
6648
6649 return (*the_low_target.get_pc) (regcache);
6650 }
6651
6652 static void
6653 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6654 {
6655 gdb_assert (the_low_target.set_pc != NULL);
6656
6657 (*the_low_target.set_pc) (regcache, pc);
6658 }
6659
6660 static int
6661 linux_thread_stopped (struct thread_info *thread)
6662 {
6663 return get_thread_lwp (thread)->stopped;
6664 }
6665
6666 /* This exposes stop-all-threads functionality to other modules. */
6667
6668 static void
6669 linux_pause_all (int freeze)
6670 {
6671 stop_all_lwps (freeze, NULL);
6672 }
6673
6674 /* This exposes unstop-all-threads functionality to other gdbserver
6675 modules. */
6676
6677 static void
6678 linux_unpause_all (int unfreeze)
6679 {
6680 unstop_all_lwps (unfreeze, NULL);
6681 }
6682
6683 static int
6684 linux_prepare_to_access_memory (void)
6685 {
6686 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6687 running LWP. */
6688 if (non_stop)
6689 linux_pause_all (1);
6690 return 0;
6691 }
6692
6693 static void
6694 linux_done_accessing_memory (void)
6695 {
6696 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6697 running LWP. */
6698 if (non_stop)
6699 linux_unpause_all (1);
6700 }
6701
6702 static int
6703 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6704 CORE_ADDR collector,
6705 CORE_ADDR lockaddr,
6706 ULONGEST orig_size,
6707 CORE_ADDR *jump_entry,
6708 CORE_ADDR *trampoline,
6709 ULONGEST *trampoline_size,
6710 unsigned char *jjump_pad_insn,
6711 ULONGEST *jjump_pad_insn_size,
6712 CORE_ADDR *adjusted_insn_addr,
6713 CORE_ADDR *adjusted_insn_addr_end,
6714 char *err)
6715 {
6716 return (*the_low_target.install_fast_tracepoint_jump_pad)
6717 (tpoint, tpaddr, collector, lockaddr, orig_size,
6718 jump_entry, trampoline, trampoline_size,
6719 jjump_pad_insn, jjump_pad_insn_size,
6720 adjusted_insn_addr, adjusted_insn_addr_end,
6721 err);
6722 }
6723
6724 static struct emit_ops *
6725 linux_emit_ops (void)
6726 {
6727 if (the_low_target.emit_ops != NULL)
6728 return (*the_low_target.emit_ops) ();
6729 else
6730 return NULL;
6731 }
6732
6733 static int
6734 linux_get_min_fast_tracepoint_insn_len (void)
6735 {
6736 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6737 }
6738
6739 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6740
6741 static int
6742 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6743 CORE_ADDR *phdr_memaddr, int *num_phdr)
6744 {
6745 char filename[PATH_MAX];
6746 int fd;
6747 const int auxv_size = is_elf64
6748 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6749 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6750
6751 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6752
6753 fd = open (filename, O_RDONLY);
6754 if (fd < 0)
6755 return 1;
6756
6757 *phdr_memaddr = 0;
6758 *num_phdr = 0;
6759 while (read (fd, buf, auxv_size) == auxv_size
6760 && (*phdr_memaddr == 0 || *num_phdr == 0))
6761 {
6762 if (is_elf64)
6763 {
6764 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6765
6766 switch (aux->a_type)
6767 {
6768 case AT_PHDR:
6769 *phdr_memaddr = aux->a_un.a_val;
6770 break;
6771 case AT_PHNUM:
6772 *num_phdr = aux->a_un.a_val;
6773 break;
6774 }
6775 }
6776 else
6777 {
6778 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6779
6780 switch (aux->a_type)
6781 {
6782 case AT_PHDR:
6783 *phdr_memaddr = aux->a_un.a_val;
6784 break;
6785 case AT_PHNUM:
6786 *num_phdr = aux->a_un.a_val;
6787 break;
6788 }
6789 }
6790 }
6791
6792 close (fd);
6793
6794 if (*phdr_memaddr == 0 || *num_phdr == 0)
6795 {
6796 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6797 "phdr_memaddr = %ld, phdr_num = %d",
6798 (long) *phdr_memaddr, *num_phdr);
6799 return 2;
6800 }
6801
6802 return 0;
6803 }
6804
6805 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6806
6807 static CORE_ADDR
6808 get_dynamic (const int pid, const int is_elf64)
6809 {
6810 CORE_ADDR phdr_memaddr, relocation;
6811 int num_phdr, i;
6812 unsigned char *phdr_buf;
6813 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6814
6815 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6816 return 0;
6817
6818 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6819 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6820
6821 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6822 return 0;
6823
6824 /* Compute relocation: it is expected to be 0 for "regular" executables,
6825 non-zero for PIE ones. */
6826 relocation = -1;
6827 for (i = 0; relocation == -1 && i < num_phdr; i++)
6828 if (is_elf64)
6829 {
6830 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6831
6832 if (p->p_type == PT_PHDR)
6833 relocation = phdr_memaddr - p->p_vaddr;
6834 }
6835 else
6836 {
6837 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6838
6839 if (p->p_type == PT_PHDR)
6840 relocation = phdr_memaddr - p->p_vaddr;
6841 }
6842
6843 if (relocation == -1)
6844 {
6845 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6846 any real world executables, including PIE executables, have always
6847 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6848 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6849 or present DT_DEBUG anyway (fpc binaries are statically linked).
6850
6851 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6852
6853 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6854
6855 return 0;
6856 }
6857
6858 for (i = 0; i < num_phdr; i++)
6859 {
6860 if (is_elf64)
6861 {
6862 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6863
6864 if (p->p_type == PT_DYNAMIC)
6865 return p->p_vaddr + relocation;
6866 }
6867 else
6868 {
6869 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6870
6871 if (p->p_type == PT_DYNAMIC)
6872 return p->p_vaddr + relocation;
6873 }
6874 }
6875
6876 return 0;
6877 }
6878
6879 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6880 can be 0 if the inferior does not yet have the library list initialized.
6881 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6882 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6883
6884 static CORE_ADDR
6885 get_r_debug (const int pid, const int is_elf64)
6886 {
6887 CORE_ADDR dynamic_memaddr;
6888 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6889 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6890 CORE_ADDR map = -1;
6891
6892 dynamic_memaddr = get_dynamic (pid, is_elf64);
6893 if (dynamic_memaddr == 0)
6894 return map;
6895
6896 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6897 {
6898 if (is_elf64)
6899 {
6900 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6901 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6902 union
6903 {
6904 Elf64_Xword map;
6905 unsigned char buf[sizeof (Elf64_Xword)];
6906 }
6907 rld_map;
6908 #endif
6909 #ifdef DT_MIPS_RLD_MAP
6910 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6911 {
6912 if (linux_read_memory (dyn->d_un.d_val,
6913 rld_map.buf, sizeof (rld_map.buf)) == 0)
6914 return rld_map.map;
6915 else
6916 break;
6917 }
6918 #endif /* DT_MIPS_RLD_MAP */
6919 #ifdef DT_MIPS_RLD_MAP_REL
6920 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6921 {
6922 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6923 rld_map.buf, sizeof (rld_map.buf)) == 0)
6924 return rld_map.map;
6925 else
6926 break;
6927 }
6928 #endif /* DT_MIPS_RLD_MAP_REL */
6929
6930 if (dyn->d_tag == DT_DEBUG && map == -1)
6931 map = dyn->d_un.d_val;
6932
6933 if (dyn->d_tag == DT_NULL)
6934 break;
6935 }
6936 else
6937 {
6938 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6939 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6940 union
6941 {
6942 Elf32_Word map;
6943 unsigned char buf[sizeof (Elf32_Word)];
6944 }
6945 rld_map;
6946 #endif
6947 #ifdef DT_MIPS_RLD_MAP
6948 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6949 {
6950 if (linux_read_memory (dyn->d_un.d_val,
6951 rld_map.buf, sizeof (rld_map.buf)) == 0)
6952 return rld_map.map;
6953 else
6954 break;
6955 }
6956 #endif /* DT_MIPS_RLD_MAP */
6957 #ifdef DT_MIPS_RLD_MAP_REL
6958 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6959 {
6960 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6961 rld_map.buf, sizeof (rld_map.buf)) == 0)
6962 return rld_map.map;
6963 else
6964 break;
6965 }
6966 #endif /* DT_MIPS_RLD_MAP_REL */
6967
6968 if (dyn->d_tag == DT_DEBUG && map == -1)
6969 map = dyn->d_un.d_val;
6970
6971 if (dyn->d_tag == DT_NULL)
6972 break;
6973 }
6974
6975 dynamic_memaddr += dyn_size;
6976 }
6977
6978 return map;
6979 }
6980
6981 /* Read one pointer from MEMADDR in the inferior. */
6982
6983 static int
6984 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6985 {
6986 int ret;
6987
6988 /* Go through a union so this works on either big or little endian
6989 hosts, when the inferior's pointer size is smaller than the size
6990 of CORE_ADDR. It is assumed the inferior's endianness is the
6991 same of the superior's. */
6992 union
6993 {
6994 CORE_ADDR core_addr;
6995 unsigned int ui;
6996 unsigned char uc;
6997 } addr;
6998
6999 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
7000 if (ret == 0)
7001 {
7002 if (ptr_size == sizeof (CORE_ADDR))
7003 *ptr = addr.core_addr;
7004 else if (ptr_size == sizeof (unsigned int))
7005 *ptr = addr.ui;
7006 else
7007 gdb_assert_not_reached ("unhandled pointer size");
7008 }
7009 return ret;
7010 }
7011
7012 struct link_map_offsets
7013 {
7014 /* Offset and size of r_debug.r_version. */
7015 int r_version_offset;
7016
7017 /* Offset and size of r_debug.r_map. */
7018 int r_map_offset;
7019
7020 /* Offset to l_addr field in struct link_map. */
7021 int l_addr_offset;
7022
7023 /* Offset to l_name field in struct link_map. */
7024 int l_name_offset;
7025
7026 /* Offset to l_ld field in struct link_map. */
7027 int l_ld_offset;
7028
7029 /* Offset to l_next field in struct link_map. */
7030 int l_next_offset;
7031
7032 /* Offset to l_prev field in struct link_map. */
7033 int l_prev_offset;
7034 };
7035
7036 /* Construct qXfer:libraries-svr4:read reply. */
7037
7038 static int
7039 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
7040 unsigned const char *writebuf,
7041 CORE_ADDR offset, int len)
7042 {
7043 char *document;
7044 unsigned document_len;
7045 struct process_info_private *const priv = current_process ()->priv;
7046 char filename[PATH_MAX];
7047 int pid, is_elf64;
7048
7049 static const struct link_map_offsets lmo_32bit_offsets =
7050 {
7051 0, /* r_version offset. */
7052 4, /* r_debug.r_map offset. */
7053 0, /* l_addr offset in link_map. */
7054 4, /* l_name offset in link_map. */
7055 8, /* l_ld offset in link_map. */
7056 12, /* l_next offset in link_map. */
7057 16 /* l_prev offset in link_map. */
7058 };
7059
7060 static const struct link_map_offsets lmo_64bit_offsets =
7061 {
7062 0, /* r_version offset. */
7063 8, /* r_debug.r_map offset. */
7064 0, /* l_addr offset in link_map. */
7065 8, /* l_name offset in link_map. */
7066 16, /* l_ld offset in link_map. */
7067 24, /* l_next offset in link_map. */
7068 32 /* l_prev offset in link_map. */
7069 };
7070 const struct link_map_offsets *lmo;
7071 unsigned int machine;
7072 int ptr_size;
7073 CORE_ADDR lm_addr = 0, lm_prev = 0;
7074 int allocated = 1024;
7075 char *p;
7076 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
7077 int header_done = 0;
7078
7079 if (writebuf != NULL)
7080 return -2;
7081 if (readbuf == NULL)
7082 return -1;
7083
7084 pid = lwpid_of (current_thread);
7085 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
7086 is_elf64 = elf_64_file_p (filename, &machine);
7087 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
7088 ptr_size = is_elf64 ? 8 : 4;
7089
7090 while (annex[0] != '\0')
7091 {
7092 const char *sep;
7093 CORE_ADDR *addrp;
7094 int len;
7095
7096 sep = strchr (annex, '=');
7097 if (sep == NULL)
7098 break;
7099
7100 len = sep - annex;
7101 if (len == 5 && startswith (annex, "start"))
7102 addrp = &lm_addr;
7103 else if (len == 4 && startswith (annex, "prev"))
7104 addrp = &lm_prev;
7105 else
7106 {
7107 annex = strchr (sep, ';');
7108 if (annex == NULL)
7109 break;
7110 annex++;
7111 continue;
7112 }
7113
7114 annex = decode_address_to_semicolon (addrp, sep + 1);
7115 }
7116
7117 if (lm_addr == 0)
7118 {
7119 int r_version = 0;
7120
7121 if (priv->r_debug == 0)
7122 priv->r_debug = get_r_debug (pid, is_elf64);
7123
7124 /* We failed to find DT_DEBUG. Such situation will not change
7125 for this inferior - do not retry it. Report it to GDB as
7126 E01, see for the reasons at the GDB solib-svr4.c side. */
7127 if (priv->r_debug == (CORE_ADDR) -1)
7128 return -1;
7129
7130 if (priv->r_debug != 0)
7131 {
7132 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
7133 (unsigned char *) &r_version,
7134 sizeof (r_version)) != 0
7135 || r_version != 1)
7136 {
7137 warning ("unexpected r_debug version %d", r_version);
7138 }
7139 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7140 &lm_addr, ptr_size) != 0)
7141 {
7142 warning ("unable to read r_map from 0x%lx",
7143 (long) priv->r_debug + lmo->r_map_offset);
7144 }
7145 }
7146 }
7147
7148 document = (char *) xmalloc (allocated);
7149 strcpy (document, "<library-list-svr4 version=\"1.0\"");
7150 p = document + strlen (document);
7151
7152 while (lm_addr
7153 && read_one_ptr (lm_addr + lmo->l_name_offset,
7154 &l_name, ptr_size) == 0
7155 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7156 &l_addr, ptr_size) == 0
7157 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7158 &l_ld, ptr_size) == 0
7159 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7160 &l_prev, ptr_size) == 0
7161 && read_one_ptr (lm_addr + lmo->l_next_offset,
7162 &l_next, ptr_size) == 0)
7163 {
7164 unsigned char libname[PATH_MAX];
7165
7166 if (lm_prev != l_prev)
7167 {
7168 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7169 (long) lm_prev, (long) l_prev);
7170 break;
7171 }
7172
7173 /* Ignore the first entry even if it has valid name as the first entry
7174 corresponds to the main executable. The first entry should not be
7175 skipped if the dynamic loader was loaded late by a static executable
7176 (see solib-svr4.c parameter ignore_first). But in such case the main
7177 executable does not have PT_DYNAMIC present and this function already
7178 exited above due to failed get_r_debug. */
7179 if (lm_prev == 0)
7180 {
7181 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7182 p = p + strlen (p);
7183 }
7184 else
7185 {
7186 /* Not checking for error because reading may stop before
7187 we've got PATH_MAX worth of characters. */
7188 libname[0] = '\0';
7189 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7190 libname[sizeof (libname) - 1] = '\0';
7191 if (libname[0] != '\0')
7192 {
7193 /* 6x the size for xml_escape_text below. */
7194 size_t len = 6 * strlen ((char *) libname);
7195 char *name;
7196
7197 if (!header_done)
7198 {
7199 /* Terminate `<library-list-svr4'. */
7200 *p++ = '>';
7201 header_done = 1;
7202 }
7203
7204 while (allocated < p - document + len + 200)
7205 {
7206 /* Expand to guarantee sufficient storage. */
7207 uintptr_t document_len = p - document;
7208
7209 document = (char *) xrealloc (document, 2 * allocated);
7210 allocated *= 2;
7211 p = document + document_len;
7212 }
7213
7214 name = xml_escape_text ((char *) libname);
7215 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
7216 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7217 name, (unsigned long) lm_addr,
7218 (unsigned long) l_addr, (unsigned long) l_ld);
7219 free (name);
7220 }
7221 }
7222
7223 lm_prev = lm_addr;
7224 lm_addr = l_next;
7225 }
7226
7227 if (!header_done)
7228 {
7229 /* Empty list; terminate `<library-list-svr4'. */
7230 strcpy (p, "/>");
7231 }
7232 else
7233 strcpy (p, "</library-list-svr4>");
7234
7235 document_len = strlen (document);
7236 if (offset < document_len)
7237 document_len -= offset;
7238 else
7239 document_len = 0;
7240 if (len > document_len)
7241 len = document_len;
7242
7243 memcpy (readbuf, document + offset, len);
7244 xfree (document);
7245
7246 return len;
7247 }
7248
7249 #ifdef HAVE_LINUX_BTRACE
7250
7251 /* See to_disable_btrace target method. */
7252
7253 static int
7254 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7255 {
7256 enum btrace_error err;
7257
7258 err = linux_disable_btrace (tinfo);
7259 return (err == BTRACE_ERR_NONE ? 0 : -1);
7260 }
7261
7262 /* Encode an Intel Processor Trace configuration. */
7263
7264 static void
7265 linux_low_encode_pt_config (struct buffer *buffer,
7266 const struct btrace_data_pt_config *config)
7267 {
7268 buffer_grow_str (buffer, "<pt-config>\n");
7269
7270 switch (config->cpu.vendor)
7271 {
7272 case CV_INTEL:
7273 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7274 "model=\"%u\" stepping=\"%u\"/>\n",
7275 config->cpu.family, config->cpu.model,
7276 config->cpu.stepping);
7277 break;
7278
7279 default:
7280 break;
7281 }
7282
7283 buffer_grow_str (buffer, "</pt-config>\n");
7284 }
7285
7286 /* Encode a raw buffer. */
7287
7288 static void
7289 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7290 unsigned int size)
7291 {
7292 if (size == 0)
7293 return;
7294
7295 /* We use hex encoding - see common/rsp-low.h. */
7296 buffer_grow_str (buffer, "<raw>\n");
7297
7298 while (size-- > 0)
7299 {
7300 char elem[2];
7301
7302 elem[0] = tohex ((*data >> 4) & 0xf);
7303 elem[1] = tohex (*data++ & 0xf);
7304
7305 buffer_grow (buffer, elem, 2);
7306 }
7307
7308 buffer_grow_str (buffer, "</raw>\n");
7309 }
7310
7311 /* See to_read_btrace target method. */
7312
7313 static int
7314 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7315 enum btrace_read_type type)
7316 {
7317 struct btrace_data btrace;
7318 struct btrace_block *block;
7319 enum btrace_error err;
7320 int i;
7321
7322 btrace_data_init (&btrace);
7323
7324 err = linux_read_btrace (&btrace, tinfo, type);
7325 if (err != BTRACE_ERR_NONE)
7326 {
7327 if (err == BTRACE_ERR_OVERFLOW)
7328 buffer_grow_str0 (buffer, "E.Overflow.");
7329 else
7330 buffer_grow_str0 (buffer, "E.Generic Error.");
7331
7332 goto err;
7333 }
7334
7335 switch (btrace.format)
7336 {
7337 case BTRACE_FORMAT_NONE:
7338 buffer_grow_str0 (buffer, "E.No Trace.");
7339 goto err;
7340
7341 case BTRACE_FORMAT_BTS:
7342 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7343 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7344
7345 for (i = 0;
7346 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7347 i++)
7348 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7349 paddress (block->begin), paddress (block->end));
7350
7351 buffer_grow_str0 (buffer, "</btrace>\n");
7352 break;
7353
7354 case BTRACE_FORMAT_PT:
7355 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7356 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7357 buffer_grow_str (buffer, "<pt>\n");
7358
7359 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7360
7361 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7362 btrace.variant.pt.size);
7363
7364 buffer_grow_str (buffer, "</pt>\n");
7365 buffer_grow_str0 (buffer, "</btrace>\n");
7366 break;
7367
7368 default:
7369 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7370 goto err;
7371 }
7372
7373 btrace_data_fini (&btrace);
7374 return 0;
7375
7376 err:
7377 btrace_data_fini (&btrace);
7378 return -1;
7379 }
7380
7381 /* See to_btrace_conf target method. */
7382
7383 static int
7384 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7385 struct buffer *buffer)
7386 {
7387 const struct btrace_config *conf;
7388
7389 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7390 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7391
7392 conf = linux_btrace_conf (tinfo);
7393 if (conf != NULL)
7394 {
7395 switch (conf->format)
7396 {
7397 case BTRACE_FORMAT_NONE:
7398 break;
7399
7400 case BTRACE_FORMAT_BTS:
7401 buffer_xml_printf (buffer, "<bts");
7402 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7403 buffer_xml_printf (buffer, " />\n");
7404 break;
7405
7406 case BTRACE_FORMAT_PT:
7407 buffer_xml_printf (buffer, "<pt");
7408 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7409 buffer_xml_printf (buffer, "/>\n");
7410 break;
7411 }
7412 }
7413
7414 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7415 return 0;
7416 }
7417 #endif /* HAVE_LINUX_BTRACE */
7418
7419 /* See nat/linux-nat.h. */
7420
7421 ptid_t
7422 current_lwp_ptid (void)
7423 {
7424 return ptid_of (current_thread);
7425 }
7426
7427 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7428
7429 static int
7430 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7431 {
7432 if (the_low_target.breakpoint_kind_from_pc != NULL)
7433 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7434 else
7435 return default_breakpoint_kind_from_pc (pcptr);
7436 }
7437
7438 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7439
7440 static const gdb_byte *
7441 linux_sw_breakpoint_from_kind (int kind, int *size)
7442 {
7443 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7444
7445 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7446 }
7447
7448 /* Implementation of the target_ops method
7449 "breakpoint_kind_from_current_state". */
7450
7451 static int
7452 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7453 {
7454 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7455 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7456 else
7457 return linux_breakpoint_kind_from_pc (pcptr);
7458 }
7459
7460 /* Default implementation of linux_target_ops method "set_pc" for
7461 32-bit pc register which is literally named "pc". */
7462
7463 void
7464 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7465 {
7466 uint32_t newpc = pc;
7467
7468 supply_register_by_name (regcache, "pc", &newpc);
7469 }
7470
7471 /* Default implementation of linux_target_ops method "get_pc" for
7472 32-bit pc register which is literally named "pc". */
7473
7474 CORE_ADDR
7475 linux_get_pc_32bit (struct regcache *regcache)
7476 {
7477 uint32_t pc;
7478
7479 collect_register_by_name (regcache, "pc", &pc);
7480 if (debug_threads)
7481 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7482 return pc;
7483 }
7484
7485 /* Default implementation of linux_target_ops method "set_pc" for
7486 64-bit pc register which is literally named "pc". */
7487
7488 void
7489 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7490 {
7491 uint64_t newpc = pc;
7492
7493 supply_register_by_name (regcache, "pc", &newpc);
7494 }
7495
7496 /* Default implementation of linux_target_ops method "get_pc" for
7497 64-bit pc register which is literally named "pc". */
7498
7499 CORE_ADDR
7500 linux_get_pc_64bit (struct regcache *regcache)
7501 {
7502 uint64_t pc;
7503
7504 collect_register_by_name (regcache, "pc", &pc);
7505 if (debug_threads)
7506 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7507 return pc;
7508 }
7509
7510
7511 static struct target_ops linux_target_ops = {
7512 linux_create_inferior,
7513 linux_post_create_inferior,
7514 linux_attach,
7515 linux_kill,
7516 linux_detach,
7517 linux_mourn,
7518 linux_join,
7519 linux_thread_alive,
7520 linux_resume,
7521 linux_wait,
7522 linux_fetch_registers,
7523 linux_store_registers,
7524 linux_prepare_to_access_memory,
7525 linux_done_accessing_memory,
7526 linux_read_memory,
7527 linux_write_memory,
7528 linux_look_up_symbols,
7529 linux_request_interrupt,
7530 linux_read_auxv,
7531 linux_supports_z_point_type,
7532 linux_insert_point,
7533 linux_remove_point,
7534 linux_stopped_by_sw_breakpoint,
7535 linux_supports_stopped_by_sw_breakpoint,
7536 linux_stopped_by_hw_breakpoint,
7537 linux_supports_stopped_by_hw_breakpoint,
7538 linux_supports_hardware_single_step,
7539 linux_stopped_by_watchpoint,
7540 linux_stopped_data_address,
7541 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7542 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7543 && defined(PT_TEXT_END_ADDR)
7544 linux_read_offsets,
7545 #else
7546 NULL,
7547 #endif
7548 #ifdef USE_THREAD_DB
7549 thread_db_get_tls_address,
7550 #else
7551 NULL,
7552 #endif
7553 linux_qxfer_spu,
7554 hostio_last_error_from_errno,
7555 linux_qxfer_osdata,
7556 linux_xfer_siginfo,
7557 linux_supports_non_stop,
7558 linux_async,
7559 linux_start_non_stop,
7560 linux_supports_multi_process,
7561 linux_supports_fork_events,
7562 linux_supports_vfork_events,
7563 linux_supports_exec_events,
7564 linux_handle_new_gdb_connection,
7565 #ifdef USE_THREAD_DB
7566 thread_db_handle_monitor_command,
7567 #else
7568 NULL,
7569 #endif
7570 linux_common_core_of_thread,
7571 linux_read_loadmap,
7572 linux_process_qsupported,
7573 linux_supports_tracepoints,
7574 linux_read_pc,
7575 linux_write_pc,
7576 linux_thread_stopped,
7577 NULL,
7578 linux_pause_all,
7579 linux_unpause_all,
7580 linux_stabilize_threads,
7581 linux_install_fast_tracepoint_jump_pad,
7582 linux_emit_ops,
7583 linux_supports_disable_randomization,
7584 linux_get_min_fast_tracepoint_insn_len,
7585 linux_qxfer_libraries_svr4,
7586 linux_supports_agent,
7587 #ifdef HAVE_LINUX_BTRACE
7588 linux_supports_btrace,
7589 linux_enable_btrace,
7590 linux_low_disable_btrace,
7591 linux_low_read_btrace,
7592 linux_low_btrace_conf,
7593 #else
7594 NULL,
7595 NULL,
7596 NULL,
7597 NULL,
7598 NULL,
7599 #endif
7600 linux_supports_range_stepping,
7601 linux_proc_pid_to_exec_file,
7602 linux_mntns_open_cloexec,
7603 linux_mntns_unlink,
7604 linux_mntns_readlink,
7605 linux_breakpoint_kind_from_pc,
7606 linux_sw_breakpoint_from_kind,
7607 linux_proc_tid_get_name,
7608 linux_breakpoint_kind_from_current_state,
7609 linux_supports_software_single_step,
7610 linux_supports_catch_syscall,
7611 linux_get_ipa_tdesc_idx,
7612 };
7613
7614 #ifdef HAVE_LINUX_REGSETS
7615 void
7616 initialize_regsets_info (struct regsets_info *info)
7617 {
7618 for (info->num_regsets = 0;
7619 info->regsets[info->num_regsets].size >= 0;
7620 info->num_regsets++)
7621 ;
7622 }
7623 #endif
7624
7625 void
7626 initialize_low (void)
7627 {
7628 struct sigaction sigchld_action;
7629
7630 memset (&sigchld_action, 0, sizeof (sigchld_action));
7631 set_target_ops (&linux_target_ops);
7632
7633 linux_ptrace_init_warnings ();
7634
7635 sigchld_action.sa_handler = sigchld_handler;
7636 sigemptyset (&sigchld_action.sa_mask);
7637 sigchld_action.sa_flags = SA_RESTART;
7638 sigaction (SIGCHLD, &sigchld_action, NULL);
7639
7640 initialize_low_arch ();
7641
7642 linux_check_ptrace_features ();
7643 }
This page took 0.184022 seconds and 5 git commands to generate.