dd92e7814af99d8e91b1d7168321a0cedb6a3eb9
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2016 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #include <inttypes.h>
50 #ifndef ELFMAG0
51 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
52 then ELFMAG0 will have been defined. If it didn't get included by
53 gdb_proc_service.h then including it will likely introduce a duplicate
54 definition of elf_fpregset_t. */
55 #include <elf.h>
56 #endif
57 #include "nat/linux-namespaces.h"
58
59 #ifndef SPUFS_MAGIC
60 #define SPUFS_MAGIC 0x23c9b64e
61 #endif
62
63 #ifdef HAVE_PERSONALITY
64 # include <sys/personality.h>
65 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
66 # define ADDR_NO_RANDOMIZE 0x0040000
67 # endif
68 #endif
69
70 #ifndef O_LARGEFILE
71 #define O_LARGEFILE 0
72 #endif
73
74 /* Some targets did not define these ptrace constants from the start,
75 so gdbserver defines them locally here. In the future, these may
76 be removed after they are added to asm/ptrace.h. */
77 #if !(defined(PT_TEXT_ADDR) \
78 || defined(PT_DATA_ADDR) \
79 || defined(PT_TEXT_END_ADDR))
80 #if defined(__mcoldfire__)
81 /* These are still undefined in 3.10 kernels. */
82 #define PT_TEXT_ADDR 49*4
83 #define PT_DATA_ADDR 50*4
84 #define PT_TEXT_END_ADDR 51*4
85 /* BFIN already defines these since at least 2.6.32 kernels. */
86 #elif defined(BFIN)
87 #define PT_TEXT_ADDR 220
88 #define PT_TEXT_END_ADDR 224
89 #define PT_DATA_ADDR 228
90 /* These are still undefined in 3.10 kernels. */
91 #elif defined(__TMS320C6X__)
92 #define PT_TEXT_ADDR (0x10000*4)
93 #define PT_DATA_ADDR (0x10004*4)
94 #define PT_TEXT_END_ADDR (0x10008*4)
95 #endif
96 #endif
97
98 #ifdef HAVE_LINUX_BTRACE
99 # include "nat/linux-btrace.h"
100 # include "btrace-common.h"
101 #endif
102
103 #ifndef HAVE_ELF32_AUXV_T
104 /* Copied from glibc's elf.h. */
105 typedef struct
106 {
107 uint32_t a_type; /* Entry type */
108 union
109 {
110 uint32_t a_val; /* Integer value */
111 /* We use to have pointer elements added here. We cannot do that,
112 though, since it does not work when using 32-bit definitions
113 on 64-bit platforms and vice versa. */
114 } a_un;
115 } Elf32_auxv_t;
116 #endif
117
118 #ifndef HAVE_ELF64_AUXV_T
119 /* Copied from glibc's elf.h. */
120 typedef struct
121 {
122 uint64_t a_type; /* Entry type */
123 union
124 {
125 uint64_t a_val; /* Integer value */
126 /* We use to have pointer elements added here. We cannot do that,
127 though, since it does not work when using 32-bit definitions
128 on 64-bit platforms and vice versa. */
129 } a_un;
130 } Elf64_auxv_t;
131 #endif
132
133 /* Does the current host support PTRACE_GETREGSET? */
134 int have_ptrace_getregset = -1;
135
136 /* LWP accessors. */
137
138 /* See nat/linux-nat.h. */
139
140 ptid_t
141 ptid_of_lwp (struct lwp_info *lwp)
142 {
143 return ptid_of (get_lwp_thread (lwp));
144 }
145
146 /* See nat/linux-nat.h. */
147
148 void
149 lwp_set_arch_private_info (struct lwp_info *lwp,
150 struct arch_lwp_info *info)
151 {
152 lwp->arch_private = info;
153 }
154
155 /* See nat/linux-nat.h. */
156
157 struct arch_lwp_info *
158 lwp_arch_private_info (struct lwp_info *lwp)
159 {
160 return lwp->arch_private;
161 }
162
163 /* See nat/linux-nat.h. */
164
165 int
166 lwp_is_stopped (struct lwp_info *lwp)
167 {
168 return lwp->stopped;
169 }
170
171 /* See nat/linux-nat.h. */
172
173 enum target_stop_reason
174 lwp_stop_reason (struct lwp_info *lwp)
175 {
176 return lwp->stop_reason;
177 }
178
179 /* A list of all unknown processes which receive stop signals. Some
180 other process will presumably claim each of these as forked
181 children momentarily. */
182
183 struct simple_pid_list
184 {
185 /* The process ID. */
186 int pid;
187
188 /* The status as reported by waitpid. */
189 int status;
190
191 /* Next in chain. */
192 struct simple_pid_list *next;
193 };
194 struct simple_pid_list *stopped_pids;
195
196 /* Trivial list manipulation functions to keep track of a list of new
197 stopped processes. */
198
199 static void
200 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
201 {
202 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
203
204 new_pid->pid = pid;
205 new_pid->status = status;
206 new_pid->next = *listp;
207 *listp = new_pid;
208 }
209
210 static int
211 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
212 {
213 struct simple_pid_list **p;
214
215 for (p = listp; *p != NULL; p = &(*p)->next)
216 if ((*p)->pid == pid)
217 {
218 struct simple_pid_list *next = (*p)->next;
219
220 *statusp = (*p)->status;
221 xfree (*p);
222 *p = next;
223 return 1;
224 }
225 return 0;
226 }
227
228 enum stopping_threads_kind
229 {
230 /* Not stopping threads presently. */
231 NOT_STOPPING_THREADS,
232
233 /* Stopping threads. */
234 STOPPING_THREADS,
235
236 /* Stopping and suspending threads. */
237 STOPPING_AND_SUSPENDING_THREADS
238 };
239
240 /* This is set while stop_all_lwps is in effect. */
241 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
242
243 /* FIXME make into a target method? */
244 int using_threads = 1;
245
246 /* True if we're presently stabilizing threads (moving them out of
247 jump pads). */
248 static int stabilizing_threads;
249
250 static void linux_resume_one_lwp (struct lwp_info *lwp,
251 int step, int signal, siginfo_t *info);
252 static void linux_resume (struct thread_resume *resume_info, size_t n);
253 static void stop_all_lwps (int suspend, struct lwp_info *except);
254 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
255 static void unsuspend_all_lwps (struct lwp_info *except);
256 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
257 int *wstat, int options);
258 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
259 static struct lwp_info *add_lwp (ptid_t ptid);
260 static void linux_mourn (struct process_info *process);
261 static int linux_stopped_by_watchpoint (void);
262 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
263 static int lwp_is_marked_dead (struct lwp_info *lwp);
264 static void proceed_all_lwps (void);
265 static int finish_step_over (struct lwp_info *lwp);
266 static int kill_lwp (unsigned long lwpid, int signo);
267 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
268 static void complete_ongoing_step_over (void);
269 static int linux_low_ptrace_options (int attached);
270
271 /* When the event-loop is doing a step-over, this points at the thread
272 being stepped. */
273 ptid_t step_over_bkpt;
274
275 /* True if the low target can hardware single-step. */
276
277 static int
278 can_hardware_single_step (void)
279 {
280 if (the_low_target.supports_hardware_single_step != NULL)
281 return the_low_target.supports_hardware_single_step ();
282 else
283 return 0;
284 }
285
286 /* True if the low target can software single-step. Such targets
287 implement the GET_NEXT_PCS callback. */
288
289 static int
290 can_software_single_step (void)
291 {
292 return (the_low_target.get_next_pcs != NULL);
293 }
294
295 /* True if the low target supports memory breakpoints. If so, we'll
296 have a GET_PC implementation. */
297
298 static int
299 supports_breakpoints (void)
300 {
301 return (the_low_target.get_pc != NULL);
302 }
303
304 /* Returns true if this target can support fast tracepoints. This
305 does not mean that the in-process agent has been loaded in the
306 inferior. */
307
308 static int
309 supports_fast_tracepoints (void)
310 {
311 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
312 }
313
314 /* True if LWP is stopped in its stepping range. */
315
316 static int
317 lwp_in_step_range (struct lwp_info *lwp)
318 {
319 CORE_ADDR pc = lwp->stop_pc;
320
321 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
322 }
323
324 struct pending_signals
325 {
326 int signal;
327 siginfo_t info;
328 struct pending_signals *prev;
329 };
330
331 /* The read/write ends of the pipe registered as waitable file in the
332 event loop. */
333 static int linux_event_pipe[2] = { -1, -1 };
334
335 /* True if we're currently in async mode. */
336 #define target_is_async_p() (linux_event_pipe[0] != -1)
337
338 static void send_sigstop (struct lwp_info *lwp);
339 static void wait_for_sigstop (void);
340
341 /* Return non-zero if HEADER is a 64-bit ELF file. */
342
343 static int
344 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
345 {
346 if (header->e_ident[EI_MAG0] == ELFMAG0
347 && header->e_ident[EI_MAG1] == ELFMAG1
348 && header->e_ident[EI_MAG2] == ELFMAG2
349 && header->e_ident[EI_MAG3] == ELFMAG3)
350 {
351 *machine = header->e_machine;
352 return header->e_ident[EI_CLASS] == ELFCLASS64;
353
354 }
355 *machine = EM_NONE;
356 return -1;
357 }
358
359 /* Return non-zero if FILE is a 64-bit ELF file,
360 zero if the file is not a 64-bit ELF file,
361 and -1 if the file is not accessible or doesn't exist. */
362
363 static int
364 elf_64_file_p (const char *file, unsigned int *machine)
365 {
366 Elf64_Ehdr header;
367 int fd;
368
369 fd = open (file, O_RDONLY);
370 if (fd < 0)
371 return -1;
372
373 if (read (fd, &header, sizeof (header)) != sizeof (header))
374 {
375 close (fd);
376 return 0;
377 }
378 close (fd);
379
380 return elf_64_header_p (&header, machine);
381 }
382
383 /* Accepts an integer PID; Returns true if the executable PID is
384 running is a 64-bit ELF file.. */
385
386 int
387 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
388 {
389 char file[PATH_MAX];
390
391 sprintf (file, "/proc/%d/exe", pid);
392 return elf_64_file_p (file, machine);
393 }
394
395 static void
396 delete_lwp (struct lwp_info *lwp)
397 {
398 struct thread_info *thr = get_lwp_thread (lwp);
399
400 if (debug_threads)
401 debug_printf ("deleting %ld\n", lwpid_of (thr));
402
403 remove_thread (thr);
404 free (lwp->arch_private);
405 free (lwp);
406 }
407
408 /* Add a process to the common process list, and set its private
409 data. */
410
411 static struct process_info *
412 linux_add_process (int pid, int attached)
413 {
414 struct process_info *proc;
415
416 proc = add_process (pid, attached);
417 proc->priv = XCNEW (struct process_info_private);
418
419 if (the_low_target.new_process != NULL)
420 proc->priv->arch_private = the_low_target.new_process ();
421
422 return proc;
423 }
424
425 static CORE_ADDR get_pc (struct lwp_info *lwp);
426
427 /* Call the target arch_setup function on the current thread. */
428
429 static void
430 linux_arch_setup (void)
431 {
432 the_low_target.arch_setup ();
433 }
434
435 /* Call the target arch_setup function on THREAD. */
436
437 static void
438 linux_arch_setup_thread (struct thread_info *thread)
439 {
440 struct thread_info *saved_thread;
441
442 saved_thread = current_thread;
443 current_thread = thread;
444
445 linux_arch_setup ();
446
447 current_thread = saved_thread;
448 }
449
450 /* Handle a GNU/Linux extended wait response. If we see a clone,
451 fork, or vfork event, we need to add the new LWP to our list
452 (and return 0 so as not to report the trap to higher layers).
453 If we see an exec event, we will modify ORIG_EVENT_LWP to point
454 to a new LWP representing the new program. */
455
456 static int
457 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
458 {
459 struct lwp_info *event_lwp = *orig_event_lwp;
460 int event = linux_ptrace_get_extended_event (wstat);
461 struct thread_info *event_thr = get_lwp_thread (event_lwp);
462 struct lwp_info *new_lwp;
463
464 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
465
466 /* All extended events we currently use are mid-syscall. Only
467 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
468 you have to be using PTRACE_SEIZE to get that. */
469 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
470
471 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
472 || (event == PTRACE_EVENT_CLONE))
473 {
474 ptid_t ptid;
475 unsigned long new_pid;
476 int ret, status;
477
478 /* Get the pid of the new lwp. */
479 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
480 &new_pid);
481
482 /* If we haven't already seen the new PID stop, wait for it now. */
483 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
484 {
485 /* The new child has a pending SIGSTOP. We can't affect it until it
486 hits the SIGSTOP, but we're already attached. */
487
488 ret = my_waitpid (new_pid, &status, __WALL);
489
490 if (ret == -1)
491 perror_with_name ("waiting for new child");
492 else if (ret != new_pid)
493 warning ("wait returned unexpected PID %d", ret);
494 else if (!WIFSTOPPED (status))
495 warning ("wait returned unexpected status 0x%x", status);
496 }
497
498 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
499 {
500 struct process_info *parent_proc;
501 struct process_info *child_proc;
502 struct lwp_info *child_lwp;
503 struct thread_info *child_thr;
504 struct target_desc *tdesc;
505
506 ptid = ptid_build (new_pid, new_pid, 0);
507
508 if (debug_threads)
509 {
510 debug_printf ("HEW: Got fork event from LWP %ld, "
511 "new child is %d\n",
512 ptid_get_lwp (ptid_of (event_thr)),
513 ptid_get_pid (ptid));
514 }
515
516 /* Add the new process to the tables and clone the breakpoint
517 lists of the parent. We need to do this even if the new process
518 will be detached, since we will need the process object and the
519 breakpoints to remove any breakpoints from memory when we
520 detach, and the client side will access registers. */
521 child_proc = linux_add_process (new_pid, 0);
522 gdb_assert (child_proc != NULL);
523 child_lwp = add_lwp (ptid);
524 gdb_assert (child_lwp != NULL);
525 child_lwp->stopped = 1;
526 child_lwp->must_set_ptrace_flags = 1;
527 child_lwp->status_pending_p = 0;
528 child_thr = get_lwp_thread (child_lwp);
529 child_thr->last_resume_kind = resume_stop;
530 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
531
532 /* If we're suspending all threads, leave this one suspended
533 too. If the fork/clone parent is stepping over a breakpoint,
534 all other threads have been suspended already. Leave the
535 child suspended too. */
536 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
537 || event_lwp->bp_reinsert != 0)
538 {
539 if (debug_threads)
540 debug_printf ("HEW: leaving child suspended\n");
541 child_lwp->suspended = 1;
542 }
543
544 parent_proc = get_thread_process (event_thr);
545 child_proc->attached = parent_proc->attached;
546
547 if (event_lwp->bp_reinsert != 0
548 && can_software_single_step ()
549 && event == PTRACE_EVENT_VFORK)
550 {
551 struct thread_info *saved_thread = current_thread;
552
553 current_thread = event_thr;
554 /* If we leave reinsert breakpoints there, child will
555 hit it, so uninsert reinsert breakpoints from parent
556 (and child). Once vfork child is done, reinsert
557 them back to parent. */
558 uninsert_reinsert_breakpoints ();
559 current_thread = saved_thread;
560 }
561
562 clone_all_breakpoints (&child_proc->breakpoints,
563 &child_proc->raw_breakpoints,
564 parent_proc->breakpoints);
565
566 tdesc = XNEW (struct target_desc);
567 copy_target_description (tdesc, parent_proc->tdesc);
568 child_proc->tdesc = tdesc;
569
570 /* Clone arch-specific process data. */
571 if (the_low_target.new_fork != NULL)
572 the_low_target.new_fork (parent_proc, child_proc);
573
574 /* Save fork info in the parent thread. */
575 if (event == PTRACE_EVENT_FORK)
576 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
577 else if (event == PTRACE_EVENT_VFORK)
578 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
579
580 event_lwp->waitstatus.value.related_pid = ptid;
581
582 /* The status_pending field contains bits denoting the
583 extended event, so when the pending event is handled,
584 the handler will look at lwp->waitstatus. */
585 event_lwp->status_pending_p = 1;
586 event_lwp->status_pending = wstat;
587
588 /* If the parent thread is doing step-over with reinsert
589 breakpoints, the list of reinsert breakpoints are cloned
590 from the parent's. Remove them from the child process.
591 In case of vfork, we'll reinsert them back once vforked
592 child is done. */
593 if (event_lwp->bp_reinsert != 0
594 && can_software_single_step ())
595 {
596 struct thread_info *saved_thread = current_thread;
597
598 /* The child process is forked and stopped, so it is safe
599 to access its memory without stopping all other threads
600 from other processes. */
601 current_thread = child_thr;
602 delete_reinsert_breakpoints ();
603 current_thread = saved_thread;
604
605 gdb_assert (has_reinsert_breakpoints (parent_proc));
606 gdb_assert (!has_reinsert_breakpoints (child_proc));
607 }
608
609 /* Report the event. */
610 return 0;
611 }
612
613 if (debug_threads)
614 debug_printf ("HEW: Got clone event "
615 "from LWP %ld, new child is LWP %ld\n",
616 lwpid_of (event_thr), new_pid);
617
618 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
619 new_lwp = add_lwp (ptid);
620
621 /* Either we're going to immediately resume the new thread
622 or leave it stopped. linux_resume_one_lwp is a nop if it
623 thinks the thread is currently running, so set this first
624 before calling linux_resume_one_lwp. */
625 new_lwp->stopped = 1;
626
627 /* If we're suspending all threads, leave this one suspended
628 too. If the fork/clone parent is stepping over a breakpoint,
629 all other threads have been suspended already. Leave the
630 child suspended too. */
631 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS
632 || event_lwp->bp_reinsert != 0)
633 new_lwp->suspended = 1;
634
635 /* Normally we will get the pending SIGSTOP. But in some cases
636 we might get another signal delivered to the group first.
637 If we do get another signal, be sure not to lose it. */
638 if (WSTOPSIG (status) != SIGSTOP)
639 {
640 new_lwp->stop_expected = 1;
641 new_lwp->status_pending_p = 1;
642 new_lwp->status_pending = status;
643 }
644 else if (report_thread_events)
645 {
646 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
647 new_lwp->status_pending_p = 1;
648 new_lwp->status_pending = status;
649 }
650
651 /* Don't report the event. */
652 return 1;
653 }
654 else if (event == PTRACE_EVENT_VFORK_DONE)
655 {
656 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
657
658 if (event_lwp->bp_reinsert != 0 && can_software_single_step ())
659 {
660 struct thread_info *saved_thread = current_thread;
661 struct process_info *proc = get_thread_process (event_thr);
662
663 current_thread = event_thr;
664 reinsert_reinsert_breakpoints ();
665 current_thread = saved_thread;
666
667 gdb_assert (has_reinsert_breakpoints (proc));
668 }
669
670 /* Report the event. */
671 return 0;
672 }
673 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
674 {
675 struct process_info *proc;
676 VEC (int) *syscalls_to_catch;
677 ptid_t event_ptid;
678 pid_t event_pid;
679
680 if (debug_threads)
681 {
682 debug_printf ("HEW: Got exec event from LWP %ld\n",
683 lwpid_of (event_thr));
684 }
685
686 /* Get the event ptid. */
687 event_ptid = ptid_of (event_thr);
688 event_pid = ptid_get_pid (event_ptid);
689
690 /* Save the syscall list from the execing process. */
691 proc = get_thread_process (event_thr);
692 syscalls_to_catch = proc->syscalls_to_catch;
693 proc->syscalls_to_catch = NULL;
694
695 /* Delete the execing process and all its threads. */
696 linux_mourn (proc);
697 current_thread = NULL;
698
699 /* Create a new process/lwp/thread. */
700 proc = linux_add_process (event_pid, 0);
701 event_lwp = add_lwp (event_ptid);
702 event_thr = get_lwp_thread (event_lwp);
703 gdb_assert (current_thread == event_thr);
704 linux_arch_setup_thread (event_thr);
705
706 /* Set the event status. */
707 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
708 event_lwp->waitstatus.value.execd_pathname
709 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
710
711 /* Mark the exec status as pending. */
712 event_lwp->stopped = 1;
713 event_lwp->status_pending_p = 1;
714 event_lwp->status_pending = wstat;
715 event_thr->last_resume_kind = resume_continue;
716 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
717
718 /* Update syscall state in the new lwp, effectively mid-syscall too. */
719 event_lwp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
720
721 /* Restore the list to catch. Don't rely on the client, which is free
722 to avoid sending a new list when the architecture doesn't change.
723 Also, for ANY_SYSCALL, the architecture doesn't really matter. */
724 proc->syscalls_to_catch = syscalls_to_catch;
725
726 /* Report the event. */
727 *orig_event_lwp = event_lwp;
728 return 0;
729 }
730
731 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
732 }
733
734 /* Return the PC as read from the regcache of LWP, without any
735 adjustment. */
736
737 static CORE_ADDR
738 get_pc (struct lwp_info *lwp)
739 {
740 struct thread_info *saved_thread;
741 struct regcache *regcache;
742 CORE_ADDR pc;
743
744 if (the_low_target.get_pc == NULL)
745 return 0;
746
747 saved_thread = current_thread;
748 current_thread = get_lwp_thread (lwp);
749
750 regcache = get_thread_regcache (current_thread, 1);
751 pc = (*the_low_target.get_pc) (regcache);
752
753 if (debug_threads)
754 debug_printf ("pc is 0x%lx\n", (long) pc);
755
756 current_thread = saved_thread;
757 return pc;
758 }
759
760 /* This function should only be called if LWP got a SYSCALL_SIGTRAP.
761 Fill *SYSNO with the syscall nr trapped. Fill *SYSRET with the
762 return code. */
763
764 static void
765 get_syscall_trapinfo (struct lwp_info *lwp, int *sysno, int *sysret)
766 {
767 struct thread_info *saved_thread;
768 struct regcache *regcache;
769
770 if (the_low_target.get_syscall_trapinfo == NULL)
771 {
772 /* If we cannot get the syscall trapinfo, report an unknown
773 system call number and -ENOSYS return value. */
774 *sysno = UNKNOWN_SYSCALL;
775 *sysret = -ENOSYS;
776 return;
777 }
778
779 saved_thread = current_thread;
780 current_thread = get_lwp_thread (lwp);
781
782 regcache = get_thread_regcache (current_thread, 1);
783 (*the_low_target.get_syscall_trapinfo) (regcache, sysno, sysret);
784
785 if (debug_threads)
786 {
787 debug_printf ("get_syscall_trapinfo sysno %d sysret %d\n",
788 *sysno, *sysret);
789 }
790
791 current_thread = saved_thread;
792 }
793
794 static int check_stopped_by_watchpoint (struct lwp_info *child);
795
796 /* Called when the LWP stopped for a signal/trap. If it stopped for a
797 trap check what caused it (breakpoint, watchpoint, trace, etc.),
798 and save the result in the LWP's stop_reason field. If it stopped
799 for a breakpoint, decrement the PC if necessary on the lwp's
800 architecture. Returns true if we now have the LWP's stop PC. */
801
802 static int
803 save_stop_reason (struct lwp_info *lwp)
804 {
805 CORE_ADDR pc;
806 CORE_ADDR sw_breakpoint_pc;
807 struct thread_info *saved_thread;
808 #if USE_SIGTRAP_SIGINFO
809 siginfo_t siginfo;
810 #endif
811
812 if (the_low_target.get_pc == NULL)
813 return 0;
814
815 pc = get_pc (lwp);
816 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
817
818 /* breakpoint_at reads from the current thread. */
819 saved_thread = current_thread;
820 current_thread = get_lwp_thread (lwp);
821
822 #if USE_SIGTRAP_SIGINFO
823 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
824 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
825 {
826 if (siginfo.si_signo == SIGTRAP)
827 {
828 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
829 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
830 {
831 /* The si_code is ambiguous on this arch -- check debug
832 registers. */
833 if (!check_stopped_by_watchpoint (lwp))
834 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
835 }
836 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
837 {
838 /* If we determine the LWP stopped for a SW breakpoint,
839 trust it. Particularly don't check watchpoint
840 registers, because at least on s390, we'd find
841 stopped-by-watchpoint as long as there's a watchpoint
842 set. */
843 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
844 }
845 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
846 {
847 /* This can indicate either a hardware breakpoint or
848 hardware watchpoint. Check debug registers. */
849 if (!check_stopped_by_watchpoint (lwp))
850 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
851 }
852 else if (siginfo.si_code == TRAP_TRACE)
853 {
854 /* We may have single stepped an instruction that
855 triggered a watchpoint. In that case, on some
856 architectures (such as x86), instead of TRAP_HWBKPT,
857 si_code indicates TRAP_TRACE, and we need to check
858 the debug registers separately. */
859 if (!check_stopped_by_watchpoint (lwp))
860 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
861 }
862 }
863 }
864 #else
865 /* We may have just stepped a breakpoint instruction. E.g., in
866 non-stop mode, GDB first tells the thread A to step a range, and
867 then the user inserts a breakpoint inside the range. In that
868 case we need to report the breakpoint PC. */
869 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
870 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
871 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
872
873 if (hardware_breakpoint_inserted_here (pc))
874 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
875
876 if (lwp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
877 check_stopped_by_watchpoint (lwp);
878 #endif
879
880 if (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
881 {
882 if (debug_threads)
883 {
884 struct thread_info *thr = get_lwp_thread (lwp);
885
886 debug_printf ("CSBB: %s stopped by software breakpoint\n",
887 target_pid_to_str (ptid_of (thr)));
888 }
889
890 /* Back up the PC if necessary. */
891 if (pc != sw_breakpoint_pc)
892 {
893 struct regcache *regcache
894 = get_thread_regcache (current_thread, 1);
895 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
896 }
897
898 /* Update this so we record the correct stop PC below. */
899 pc = sw_breakpoint_pc;
900 }
901 else if (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
902 {
903 if (debug_threads)
904 {
905 struct thread_info *thr = get_lwp_thread (lwp);
906
907 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
908 target_pid_to_str (ptid_of (thr)));
909 }
910 }
911 else if (lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
912 {
913 if (debug_threads)
914 {
915 struct thread_info *thr = get_lwp_thread (lwp);
916
917 debug_printf ("CSBB: %s stopped by hardware watchpoint\n",
918 target_pid_to_str (ptid_of (thr)));
919 }
920 }
921 else if (lwp->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP)
922 {
923 if (debug_threads)
924 {
925 struct thread_info *thr = get_lwp_thread (lwp);
926
927 debug_printf ("CSBB: %s stopped by trace\n",
928 target_pid_to_str (ptid_of (thr)));
929 }
930 }
931
932 lwp->stop_pc = pc;
933 current_thread = saved_thread;
934 return 1;
935 }
936
937 static struct lwp_info *
938 add_lwp (ptid_t ptid)
939 {
940 struct lwp_info *lwp;
941
942 lwp = XCNEW (struct lwp_info);
943
944 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
945
946 if (the_low_target.new_thread != NULL)
947 the_low_target.new_thread (lwp);
948
949 lwp->thread = add_thread (ptid, lwp);
950
951 return lwp;
952 }
953
954 /* Start an inferior process and returns its pid.
955 ALLARGS is a vector of program-name and args. */
956
957 static int
958 linux_create_inferior (char *program, char **allargs)
959 {
960 struct lwp_info *new_lwp;
961 int pid;
962 ptid_t ptid;
963 struct cleanup *restore_personality
964 = maybe_disable_address_space_randomization (disable_randomization);
965
966 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
967 pid = vfork ();
968 #else
969 pid = fork ();
970 #endif
971 if (pid < 0)
972 perror_with_name ("fork");
973
974 if (pid == 0)
975 {
976 close_most_fds ();
977 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
978
979 setpgid (0, 0);
980
981 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
982 stdout to stderr so that inferior i/o doesn't corrupt the connection.
983 Also, redirect stdin to /dev/null. */
984 if (remote_connection_is_stdio ())
985 {
986 close (0);
987 open ("/dev/null", O_RDONLY);
988 dup2 (2, 1);
989 if (write (2, "stdin/stdout redirected\n",
990 sizeof ("stdin/stdout redirected\n") - 1) < 0)
991 {
992 /* Errors ignored. */;
993 }
994 }
995
996 execv (program, allargs);
997 if (errno == ENOENT)
998 execvp (program, allargs);
999
1000 fprintf (stderr, "Cannot exec %s: %s.\n", program,
1001 strerror (errno));
1002 fflush (stderr);
1003 _exit (0177);
1004 }
1005
1006 do_cleanups (restore_personality);
1007
1008 linux_add_process (pid, 0);
1009
1010 ptid = ptid_build (pid, pid, 0);
1011 new_lwp = add_lwp (ptid);
1012 new_lwp->must_set_ptrace_flags = 1;
1013
1014 return pid;
1015 }
1016
1017 /* Implement the post_create_inferior target_ops method. */
1018
1019 static void
1020 linux_post_create_inferior (void)
1021 {
1022 struct lwp_info *lwp = get_thread_lwp (current_thread);
1023
1024 linux_arch_setup ();
1025
1026 if (lwp->must_set_ptrace_flags)
1027 {
1028 struct process_info *proc = current_process ();
1029 int options = linux_low_ptrace_options (proc->attached);
1030
1031 linux_enable_event_reporting (lwpid_of (current_thread), options);
1032 lwp->must_set_ptrace_flags = 0;
1033 }
1034 }
1035
1036 /* Attach to an inferior process. Returns 0 on success, ERRNO on
1037 error. */
1038
1039 int
1040 linux_attach_lwp (ptid_t ptid)
1041 {
1042 struct lwp_info *new_lwp;
1043 int lwpid = ptid_get_lwp (ptid);
1044
1045 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
1046 != 0)
1047 return errno;
1048
1049 new_lwp = add_lwp (ptid);
1050
1051 /* We need to wait for SIGSTOP before being able to make the next
1052 ptrace call on this LWP. */
1053 new_lwp->must_set_ptrace_flags = 1;
1054
1055 if (linux_proc_pid_is_stopped (lwpid))
1056 {
1057 if (debug_threads)
1058 debug_printf ("Attached to a stopped process\n");
1059
1060 /* The process is definitely stopped. It is in a job control
1061 stop, unless the kernel predates the TASK_STOPPED /
1062 TASK_TRACED distinction, in which case it might be in a
1063 ptrace stop. Make sure it is in a ptrace stop; from there we
1064 can kill it, signal it, et cetera.
1065
1066 First make sure there is a pending SIGSTOP. Since we are
1067 already attached, the process can not transition from stopped
1068 to running without a PTRACE_CONT; so we know this signal will
1069 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1070 probably already in the queue (unless this kernel is old
1071 enough to use TASK_STOPPED for ptrace stops); but since
1072 SIGSTOP is not an RT signal, it can only be queued once. */
1073 kill_lwp (lwpid, SIGSTOP);
1074
1075 /* Finally, resume the stopped process. This will deliver the
1076 SIGSTOP (or a higher priority signal, just like normal
1077 PTRACE_ATTACH), which we'll catch later on. */
1078 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1079 }
1080
1081 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
1082 brings it to a halt.
1083
1084 There are several cases to consider here:
1085
1086 1) gdbserver has already attached to the process and is being notified
1087 of a new thread that is being created.
1088 In this case we should ignore that SIGSTOP and resume the
1089 process. This is handled below by setting stop_expected = 1,
1090 and the fact that add_thread sets last_resume_kind ==
1091 resume_continue.
1092
1093 2) This is the first thread (the process thread), and we're attaching
1094 to it via attach_inferior.
1095 In this case we want the process thread to stop.
1096 This is handled by having linux_attach set last_resume_kind ==
1097 resume_stop after we return.
1098
1099 If the pid we are attaching to is also the tgid, we attach to and
1100 stop all the existing threads. Otherwise, we attach to pid and
1101 ignore any other threads in the same group as this pid.
1102
1103 3) GDB is connecting to gdbserver and is requesting an enumeration of all
1104 existing threads.
1105 In this case we want the thread to stop.
1106 FIXME: This case is currently not properly handled.
1107 We should wait for the SIGSTOP but don't. Things work apparently
1108 because enough time passes between when we ptrace (ATTACH) and when
1109 gdb makes the next ptrace call on the thread.
1110
1111 On the other hand, if we are currently trying to stop all threads, we
1112 should treat the new thread as if we had sent it a SIGSTOP. This works
1113 because we are guaranteed that the add_lwp call above added us to the
1114 end of the list, and so the new thread has not yet reached
1115 wait_for_sigstop (but will). */
1116 new_lwp->stop_expected = 1;
1117
1118 return 0;
1119 }
1120
1121 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1122 already attached. Returns true if a new LWP is found, false
1123 otherwise. */
1124
1125 static int
1126 attach_proc_task_lwp_callback (ptid_t ptid)
1127 {
1128 /* Is this a new thread? */
1129 if (find_thread_ptid (ptid) == NULL)
1130 {
1131 int lwpid = ptid_get_lwp (ptid);
1132 int err;
1133
1134 if (debug_threads)
1135 debug_printf ("Found new lwp %d\n", lwpid);
1136
1137 err = linux_attach_lwp (ptid);
1138
1139 /* Be quiet if we simply raced with the thread exiting. EPERM
1140 is returned if the thread's task still exists, and is marked
1141 as exited or zombie, as well as other conditions, so in that
1142 case, confirm the status in /proc/PID/status. */
1143 if (err == ESRCH
1144 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1145 {
1146 if (debug_threads)
1147 {
1148 debug_printf ("Cannot attach to lwp %d: "
1149 "thread is gone (%d: %s)\n",
1150 lwpid, err, strerror (err));
1151 }
1152 }
1153 else if (err != 0)
1154 {
1155 warning (_("Cannot attach to lwp %d: %s"),
1156 lwpid,
1157 linux_ptrace_attach_fail_reason_string (ptid, err));
1158 }
1159
1160 return 1;
1161 }
1162 return 0;
1163 }
1164
1165 static void async_file_mark (void);
1166
1167 /* Attach to PID. If PID is the tgid, attach to it and all
1168 of its threads. */
1169
1170 static int
1171 linux_attach (unsigned long pid)
1172 {
1173 struct process_info *proc;
1174 struct thread_info *initial_thread;
1175 ptid_t ptid = ptid_build (pid, pid, 0);
1176 int err;
1177
1178 /* Attach to PID. We will check for other threads
1179 soon. */
1180 err = linux_attach_lwp (ptid);
1181 if (err != 0)
1182 error ("Cannot attach to process %ld: %s",
1183 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1184
1185 proc = linux_add_process (pid, 1);
1186
1187 /* Don't ignore the initial SIGSTOP if we just attached to this
1188 process. It will be collected by wait shortly. */
1189 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1190 initial_thread->last_resume_kind = resume_stop;
1191
1192 /* We must attach to every LWP. If /proc is mounted, use that to
1193 find them now. On the one hand, the inferior may be using raw
1194 clone instead of using pthreads. On the other hand, even if it
1195 is using pthreads, GDB may not be connected yet (thread_db needs
1196 to do symbol lookups, through qSymbol). Also, thread_db walks
1197 structures in the inferior's address space to find the list of
1198 threads/LWPs, and those structures may well be corrupted. Note
1199 that once thread_db is loaded, we'll still use it to list threads
1200 and associate pthread info with each LWP. */
1201 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1202
1203 /* GDB will shortly read the xml target description for this
1204 process, to figure out the process' architecture. But the target
1205 description is only filled in when the first process/thread in
1206 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1207 that now, otherwise, if GDB is fast enough, it could read the
1208 target description _before_ that initial stop. */
1209 if (non_stop)
1210 {
1211 struct lwp_info *lwp;
1212 int wstat, lwpid;
1213 ptid_t pid_ptid = pid_to_ptid (pid);
1214
1215 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1216 &wstat, __WALL);
1217 gdb_assert (lwpid > 0);
1218
1219 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1220
1221 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1222 {
1223 lwp->status_pending_p = 1;
1224 lwp->status_pending = wstat;
1225 }
1226
1227 initial_thread->last_resume_kind = resume_continue;
1228
1229 async_file_mark ();
1230
1231 gdb_assert (proc->tdesc != NULL);
1232 }
1233
1234 return 0;
1235 }
1236
1237 struct counter
1238 {
1239 int pid;
1240 int count;
1241 };
1242
1243 static int
1244 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1245 {
1246 struct counter *counter = (struct counter *) args;
1247
1248 if (ptid_get_pid (entry->id) == counter->pid)
1249 {
1250 if (++counter->count > 1)
1251 return 1;
1252 }
1253
1254 return 0;
1255 }
1256
1257 static int
1258 last_thread_of_process_p (int pid)
1259 {
1260 struct counter counter = { pid , 0 };
1261
1262 return (find_inferior (&all_threads,
1263 second_thread_of_pid_p, &counter) == NULL);
1264 }
1265
1266 /* Kill LWP. */
1267
1268 static void
1269 linux_kill_one_lwp (struct lwp_info *lwp)
1270 {
1271 struct thread_info *thr = get_lwp_thread (lwp);
1272 int pid = lwpid_of (thr);
1273
1274 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1275 there is no signal context, and ptrace(PTRACE_KILL) (or
1276 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1277 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1278 alternative is to kill with SIGKILL. We only need one SIGKILL
1279 per process, not one for each thread. But since we still support
1280 support debugging programs using raw clone without CLONE_THREAD,
1281 we send one for each thread. For years, we used PTRACE_KILL
1282 only, so we're being a bit paranoid about some old kernels where
1283 PTRACE_KILL might work better (dubious if there are any such, but
1284 that's why it's paranoia), so we try SIGKILL first, PTRACE_KILL
1285 second, and so we're fine everywhere. */
1286
1287 errno = 0;
1288 kill_lwp (pid, SIGKILL);
1289 if (debug_threads)
1290 {
1291 int save_errno = errno;
1292
1293 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1294 target_pid_to_str (ptid_of (thr)),
1295 save_errno ? strerror (save_errno) : "OK");
1296 }
1297
1298 errno = 0;
1299 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1300 if (debug_threads)
1301 {
1302 int save_errno = errno;
1303
1304 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1305 target_pid_to_str (ptid_of (thr)),
1306 save_errno ? strerror (save_errno) : "OK");
1307 }
1308 }
1309
1310 /* Kill LWP and wait for it to die. */
1311
1312 static void
1313 kill_wait_lwp (struct lwp_info *lwp)
1314 {
1315 struct thread_info *thr = get_lwp_thread (lwp);
1316 int pid = ptid_get_pid (ptid_of (thr));
1317 int lwpid = ptid_get_lwp (ptid_of (thr));
1318 int wstat;
1319 int res;
1320
1321 if (debug_threads)
1322 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1323
1324 do
1325 {
1326 linux_kill_one_lwp (lwp);
1327
1328 /* Make sure it died. Notes:
1329
1330 - The loop is most likely unnecessary.
1331
1332 - We don't use linux_wait_for_event as that could delete lwps
1333 while we're iterating over them. We're not interested in
1334 any pending status at this point, only in making sure all
1335 wait status on the kernel side are collected until the
1336 process is reaped.
1337
1338 - We don't use __WALL here as the __WALL emulation relies on
1339 SIGCHLD, and killing a stopped process doesn't generate
1340 one, nor an exit status.
1341 */
1342 res = my_waitpid (lwpid, &wstat, 0);
1343 if (res == -1 && errno == ECHILD)
1344 res = my_waitpid (lwpid, &wstat, __WCLONE);
1345 } while (res > 0 && WIFSTOPPED (wstat));
1346
1347 /* Even if it was stopped, the child may have already disappeared.
1348 E.g., if it was killed by SIGKILL. */
1349 if (res < 0 && errno != ECHILD)
1350 perror_with_name ("kill_wait_lwp");
1351 }
1352
1353 /* Callback for `find_inferior'. Kills an lwp of a given process,
1354 except the leader. */
1355
1356 static int
1357 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1358 {
1359 struct thread_info *thread = (struct thread_info *) entry;
1360 struct lwp_info *lwp = get_thread_lwp (thread);
1361 int pid = * (int *) args;
1362
1363 if (ptid_get_pid (entry->id) != pid)
1364 return 0;
1365
1366 /* We avoid killing the first thread here, because of a Linux kernel (at
1367 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1368 the children get a chance to be reaped, it will remain a zombie
1369 forever. */
1370
1371 if (lwpid_of (thread) == pid)
1372 {
1373 if (debug_threads)
1374 debug_printf ("lkop: is last of process %s\n",
1375 target_pid_to_str (entry->id));
1376 return 0;
1377 }
1378
1379 kill_wait_lwp (lwp);
1380 return 0;
1381 }
1382
1383 static int
1384 linux_kill (int pid)
1385 {
1386 struct process_info *process;
1387 struct lwp_info *lwp;
1388
1389 process = find_process_pid (pid);
1390 if (process == NULL)
1391 return -1;
1392
1393 /* If we're killing a running inferior, make sure it is stopped
1394 first, as PTRACE_KILL will not work otherwise. */
1395 stop_all_lwps (0, NULL);
1396
1397 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1398
1399 /* See the comment in linux_kill_one_lwp. We did not kill the first
1400 thread in the list, so do so now. */
1401 lwp = find_lwp_pid (pid_to_ptid (pid));
1402
1403 if (lwp == NULL)
1404 {
1405 if (debug_threads)
1406 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1407 pid);
1408 }
1409 else
1410 kill_wait_lwp (lwp);
1411
1412 the_target->mourn (process);
1413
1414 /* Since we presently can only stop all lwps of all processes, we
1415 need to unstop lwps of other processes. */
1416 unstop_all_lwps (0, NULL);
1417 return 0;
1418 }
1419
1420 /* Get pending signal of THREAD, for detaching purposes. This is the
1421 signal the thread last stopped for, which we need to deliver to the
1422 thread when detaching, otherwise, it'd be suppressed/lost. */
1423
1424 static int
1425 get_detach_signal (struct thread_info *thread)
1426 {
1427 enum gdb_signal signo = GDB_SIGNAL_0;
1428 int status;
1429 struct lwp_info *lp = get_thread_lwp (thread);
1430
1431 if (lp->status_pending_p)
1432 status = lp->status_pending;
1433 else
1434 {
1435 /* If the thread had been suspended by gdbserver, and it stopped
1436 cleanly, then it'll have stopped with SIGSTOP. But we don't
1437 want to deliver that SIGSTOP. */
1438 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1439 || thread->last_status.value.sig == GDB_SIGNAL_0)
1440 return 0;
1441
1442 /* Otherwise, we may need to deliver the signal we
1443 intercepted. */
1444 status = lp->last_status;
1445 }
1446
1447 if (!WIFSTOPPED (status))
1448 {
1449 if (debug_threads)
1450 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1451 target_pid_to_str (ptid_of (thread)));
1452 return 0;
1453 }
1454
1455 /* Extended wait statuses aren't real SIGTRAPs. */
1456 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1457 {
1458 if (debug_threads)
1459 debug_printf ("GPS: lwp %s had stopped with extended "
1460 "status: no pending signal\n",
1461 target_pid_to_str (ptid_of (thread)));
1462 return 0;
1463 }
1464
1465 signo = gdb_signal_from_host (WSTOPSIG (status));
1466
1467 if (program_signals_p && !program_signals[signo])
1468 {
1469 if (debug_threads)
1470 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1471 target_pid_to_str (ptid_of (thread)),
1472 gdb_signal_to_string (signo));
1473 return 0;
1474 }
1475 else if (!program_signals_p
1476 /* If we have no way to know which signals GDB does not
1477 want to have passed to the program, assume
1478 SIGTRAP/SIGINT, which is GDB's default. */
1479 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1480 {
1481 if (debug_threads)
1482 debug_printf ("GPS: lwp %s had signal %s, "
1483 "but we don't know if we should pass it. "
1484 "Default to not.\n",
1485 target_pid_to_str (ptid_of (thread)),
1486 gdb_signal_to_string (signo));
1487 return 0;
1488 }
1489 else
1490 {
1491 if (debug_threads)
1492 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1493 target_pid_to_str (ptid_of (thread)),
1494 gdb_signal_to_string (signo));
1495
1496 return WSTOPSIG (status);
1497 }
1498 }
1499
1500 static int
1501 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1502 {
1503 struct thread_info *thread = (struct thread_info *) entry;
1504 struct lwp_info *lwp = get_thread_lwp (thread);
1505 int pid = * (int *) args;
1506 int sig;
1507
1508 if (ptid_get_pid (entry->id) != pid)
1509 return 0;
1510
1511 /* If there is a pending SIGSTOP, get rid of it. */
1512 if (lwp->stop_expected)
1513 {
1514 if (debug_threads)
1515 debug_printf ("Sending SIGCONT to %s\n",
1516 target_pid_to_str (ptid_of (thread)));
1517
1518 kill_lwp (lwpid_of (thread), SIGCONT);
1519 lwp->stop_expected = 0;
1520 }
1521
1522 /* Flush any pending changes to the process's registers. */
1523 regcache_invalidate_thread (thread);
1524
1525 /* Pass on any pending signal for this thread. */
1526 sig = get_detach_signal (thread);
1527
1528 /* Finally, let it resume. */
1529 if (the_low_target.prepare_to_resume != NULL)
1530 the_low_target.prepare_to_resume (lwp);
1531 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1532 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1533 error (_("Can't detach %s: %s"),
1534 target_pid_to_str (ptid_of (thread)),
1535 strerror (errno));
1536
1537 delete_lwp (lwp);
1538 return 0;
1539 }
1540
1541 static int
1542 linux_detach (int pid)
1543 {
1544 struct process_info *process;
1545
1546 process = find_process_pid (pid);
1547 if (process == NULL)
1548 return -1;
1549
1550 /* As there's a step over already in progress, let it finish first,
1551 otherwise nesting a stabilize_threads operation on top gets real
1552 messy. */
1553 complete_ongoing_step_over ();
1554
1555 /* Stop all threads before detaching. First, ptrace requires that
1556 the thread is stopped to sucessfully detach. Second, thread_db
1557 may need to uninstall thread event breakpoints from memory, which
1558 only works with a stopped process anyway. */
1559 stop_all_lwps (0, NULL);
1560
1561 #ifdef USE_THREAD_DB
1562 thread_db_detach (process);
1563 #endif
1564
1565 /* Stabilize threads (move out of jump pads). */
1566 stabilize_threads ();
1567
1568 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1569
1570 the_target->mourn (process);
1571
1572 /* Since we presently can only stop all lwps of all processes, we
1573 need to unstop lwps of other processes. */
1574 unstop_all_lwps (0, NULL);
1575 return 0;
1576 }
1577
1578 /* Remove all LWPs that belong to process PROC from the lwp list. */
1579
1580 static int
1581 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1582 {
1583 struct thread_info *thread = (struct thread_info *) entry;
1584 struct lwp_info *lwp = get_thread_lwp (thread);
1585 struct process_info *process = (struct process_info *) proc;
1586
1587 if (pid_of (thread) == pid_of (process))
1588 delete_lwp (lwp);
1589
1590 return 0;
1591 }
1592
1593 static void
1594 linux_mourn (struct process_info *process)
1595 {
1596 struct process_info_private *priv;
1597
1598 #ifdef USE_THREAD_DB
1599 thread_db_mourn (process);
1600 #endif
1601
1602 find_inferior (&all_threads, delete_lwp_callback, process);
1603
1604 /* Freeing all private data. */
1605 priv = process->priv;
1606 free (priv->arch_private);
1607 free (priv);
1608 process->priv = NULL;
1609
1610 remove_process (process);
1611 }
1612
1613 static void
1614 linux_join (int pid)
1615 {
1616 int status, ret;
1617
1618 do {
1619 ret = my_waitpid (pid, &status, 0);
1620 if (WIFEXITED (status) || WIFSIGNALED (status))
1621 break;
1622 } while (ret != -1 || errno != ECHILD);
1623 }
1624
1625 /* Return nonzero if the given thread is still alive. */
1626 static int
1627 linux_thread_alive (ptid_t ptid)
1628 {
1629 struct lwp_info *lwp = find_lwp_pid (ptid);
1630
1631 /* We assume we always know if a thread exits. If a whole process
1632 exited but we still haven't been able to report it to GDB, we'll
1633 hold on to the last lwp of the dead process. */
1634 if (lwp != NULL)
1635 return !lwp_is_marked_dead (lwp);
1636 else
1637 return 0;
1638 }
1639
1640 /* Return 1 if this lwp still has an interesting status pending. If
1641 not (e.g., it had stopped for a breakpoint that is gone), return
1642 false. */
1643
1644 static int
1645 thread_still_has_status_pending_p (struct thread_info *thread)
1646 {
1647 struct lwp_info *lp = get_thread_lwp (thread);
1648
1649 if (!lp->status_pending_p)
1650 return 0;
1651
1652 if (thread->last_resume_kind != resume_stop
1653 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1654 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1655 {
1656 struct thread_info *saved_thread;
1657 CORE_ADDR pc;
1658 int discard = 0;
1659
1660 gdb_assert (lp->last_status != 0);
1661
1662 pc = get_pc (lp);
1663
1664 saved_thread = current_thread;
1665 current_thread = thread;
1666
1667 if (pc != lp->stop_pc)
1668 {
1669 if (debug_threads)
1670 debug_printf ("PC of %ld changed\n",
1671 lwpid_of (thread));
1672 discard = 1;
1673 }
1674
1675 #if !USE_SIGTRAP_SIGINFO
1676 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1677 && !(*the_low_target.breakpoint_at) (pc))
1678 {
1679 if (debug_threads)
1680 debug_printf ("previous SW breakpoint of %ld gone\n",
1681 lwpid_of (thread));
1682 discard = 1;
1683 }
1684 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1685 && !hardware_breakpoint_inserted_here (pc))
1686 {
1687 if (debug_threads)
1688 debug_printf ("previous HW breakpoint of %ld gone\n",
1689 lwpid_of (thread));
1690 discard = 1;
1691 }
1692 #endif
1693
1694 current_thread = saved_thread;
1695
1696 if (discard)
1697 {
1698 if (debug_threads)
1699 debug_printf ("discarding pending breakpoint status\n");
1700 lp->status_pending_p = 0;
1701 return 0;
1702 }
1703 }
1704
1705 return 1;
1706 }
1707
1708 /* Returns true if LWP is resumed from the client's perspective. */
1709
1710 static int
1711 lwp_resumed (struct lwp_info *lwp)
1712 {
1713 struct thread_info *thread = get_lwp_thread (lwp);
1714
1715 if (thread->last_resume_kind != resume_stop)
1716 return 1;
1717
1718 /* Did gdb send us a `vCont;t', but we haven't reported the
1719 corresponding stop to gdb yet? If so, the thread is still
1720 resumed/running from gdb's perspective. */
1721 if (thread->last_resume_kind == resume_stop
1722 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1723 return 1;
1724
1725 return 0;
1726 }
1727
1728 /* Return 1 if this lwp has an interesting status pending. */
1729 static int
1730 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1731 {
1732 struct thread_info *thread = (struct thread_info *) entry;
1733 struct lwp_info *lp = get_thread_lwp (thread);
1734 ptid_t ptid = * (ptid_t *) arg;
1735
1736 /* Check if we're only interested in events from a specific process
1737 or a specific LWP. */
1738 if (!ptid_match (ptid_of (thread), ptid))
1739 return 0;
1740
1741 if (!lwp_resumed (lp))
1742 return 0;
1743
1744 if (lp->status_pending_p
1745 && !thread_still_has_status_pending_p (thread))
1746 {
1747 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1748 return 0;
1749 }
1750
1751 return lp->status_pending_p;
1752 }
1753
1754 static int
1755 same_lwp (struct inferior_list_entry *entry, void *data)
1756 {
1757 ptid_t ptid = *(ptid_t *) data;
1758 int lwp;
1759
1760 if (ptid_get_lwp (ptid) != 0)
1761 lwp = ptid_get_lwp (ptid);
1762 else
1763 lwp = ptid_get_pid (ptid);
1764
1765 if (ptid_get_lwp (entry->id) == lwp)
1766 return 1;
1767
1768 return 0;
1769 }
1770
1771 struct lwp_info *
1772 find_lwp_pid (ptid_t ptid)
1773 {
1774 struct inferior_list_entry *thread
1775 = find_inferior (&all_threads, same_lwp, &ptid);
1776
1777 if (thread == NULL)
1778 return NULL;
1779
1780 return get_thread_lwp ((struct thread_info *) thread);
1781 }
1782
1783 /* Return the number of known LWPs in the tgid given by PID. */
1784
1785 static int
1786 num_lwps (int pid)
1787 {
1788 struct inferior_list_entry *inf, *tmp;
1789 int count = 0;
1790
1791 ALL_INFERIORS (&all_threads, inf, tmp)
1792 {
1793 if (ptid_get_pid (inf->id) == pid)
1794 count++;
1795 }
1796
1797 return count;
1798 }
1799
1800 /* The arguments passed to iterate_over_lwps. */
1801
1802 struct iterate_over_lwps_args
1803 {
1804 /* The FILTER argument passed to iterate_over_lwps. */
1805 ptid_t filter;
1806
1807 /* The CALLBACK argument passed to iterate_over_lwps. */
1808 iterate_over_lwps_ftype *callback;
1809
1810 /* The DATA argument passed to iterate_over_lwps. */
1811 void *data;
1812 };
1813
1814 /* Callback for find_inferior used by iterate_over_lwps to filter
1815 calls to the callback supplied to that function. Returning a
1816 nonzero value causes find_inferiors to stop iterating and return
1817 the current inferior_list_entry. Returning zero indicates that
1818 find_inferiors should continue iterating. */
1819
1820 static int
1821 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1822 {
1823 struct iterate_over_lwps_args *args
1824 = (struct iterate_over_lwps_args *) args_p;
1825
1826 if (ptid_match (entry->id, args->filter))
1827 {
1828 struct thread_info *thr = (struct thread_info *) entry;
1829 struct lwp_info *lwp = get_thread_lwp (thr);
1830
1831 return (*args->callback) (lwp, args->data);
1832 }
1833
1834 return 0;
1835 }
1836
1837 /* See nat/linux-nat.h. */
1838
1839 struct lwp_info *
1840 iterate_over_lwps (ptid_t filter,
1841 iterate_over_lwps_ftype callback,
1842 void *data)
1843 {
1844 struct iterate_over_lwps_args args = {filter, callback, data};
1845 struct inferior_list_entry *entry;
1846
1847 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1848 if (entry == NULL)
1849 return NULL;
1850
1851 return get_thread_lwp ((struct thread_info *) entry);
1852 }
1853
1854 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1855 their exits until all other threads in the group have exited. */
1856
1857 static void
1858 check_zombie_leaders (void)
1859 {
1860 struct process_info *proc, *tmp;
1861
1862 ALL_PROCESSES (proc, tmp)
1863 {
1864 pid_t leader_pid = pid_of (proc);
1865 struct lwp_info *leader_lp;
1866
1867 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1868
1869 if (debug_threads)
1870 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1871 "num_lwps=%d, zombie=%d\n",
1872 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1873 linux_proc_pid_is_zombie (leader_pid));
1874
1875 if (leader_lp != NULL && !leader_lp->stopped
1876 /* Check if there are other threads in the group, as we may
1877 have raced with the inferior simply exiting. */
1878 && !last_thread_of_process_p (leader_pid)
1879 && linux_proc_pid_is_zombie (leader_pid))
1880 {
1881 /* A leader zombie can mean one of two things:
1882
1883 - It exited, and there's an exit status pending
1884 available, or only the leader exited (not the whole
1885 program). In the latter case, we can't waitpid the
1886 leader's exit status until all other threads are gone.
1887
1888 - There are 3 or more threads in the group, and a thread
1889 other than the leader exec'd. On an exec, the Linux
1890 kernel destroys all other threads (except the execing
1891 one) in the thread group, and resets the execing thread's
1892 tid to the tgid. No exit notification is sent for the
1893 execing thread -- from the ptracer's perspective, it
1894 appears as though the execing thread just vanishes.
1895 Until we reap all other threads except the leader and the
1896 execing thread, the leader will be zombie, and the
1897 execing thread will be in `D (disc sleep)'. As soon as
1898 all other threads are reaped, the execing thread changes
1899 it's tid to the tgid, and the previous (zombie) leader
1900 vanishes, giving place to the "new" leader. We could try
1901 distinguishing the exit and exec cases, by waiting once
1902 more, and seeing if something comes out, but it doesn't
1903 sound useful. The previous leader _does_ go away, and
1904 we'll re-add the new one once we see the exec event
1905 (which is just the same as what would happen if the
1906 previous leader did exit voluntarily before some other
1907 thread execs). */
1908
1909 if (debug_threads)
1910 fprintf (stderr,
1911 "CZL: Thread group leader %d zombie "
1912 "(it exited, or another thread execd).\n",
1913 leader_pid);
1914
1915 delete_lwp (leader_lp);
1916 }
1917 }
1918 }
1919
1920 /* Callback for `find_inferior'. Returns the first LWP that is not
1921 stopped. ARG is a PTID filter. */
1922
1923 static int
1924 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1925 {
1926 struct thread_info *thr = (struct thread_info *) entry;
1927 struct lwp_info *lwp;
1928 ptid_t filter = *(ptid_t *) arg;
1929
1930 if (!ptid_match (ptid_of (thr), filter))
1931 return 0;
1932
1933 lwp = get_thread_lwp (thr);
1934 if (!lwp->stopped)
1935 return 1;
1936
1937 return 0;
1938 }
1939
1940 /* Increment LWP's suspend count. */
1941
1942 static void
1943 lwp_suspended_inc (struct lwp_info *lwp)
1944 {
1945 lwp->suspended++;
1946
1947 if (debug_threads && lwp->suspended > 4)
1948 {
1949 struct thread_info *thread = get_lwp_thread (lwp);
1950
1951 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1952 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1953 }
1954 }
1955
1956 /* Decrement LWP's suspend count. */
1957
1958 static void
1959 lwp_suspended_decr (struct lwp_info *lwp)
1960 {
1961 lwp->suspended--;
1962
1963 if (lwp->suspended < 0)
1964 {
1965 struct thread_info *thread = get_lwp_thread (lwp);
1966
1967 internal_error (__FILE__, __LINE__,
1968 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1969 lwp->suspended);
1970 }
1971 }
1972
1973 /* This function should only be called if the LWP got a SIGTRAP.
1974
1975 Handle any tracepoint steps or hits. Return true if a tracepoint
1976 event was handled, 0 otherwise. */
1977
1978 static int
1979 handle_tracepoints (struct lwp_info *lwp)
1980 {
1981 struct thread_info *tinfo = get_lwp_thread (lwp);
1982 int tpoint_related_event = 0;
1983
1984 gdb_assert (lwp->suspended == 0);
1985
1986 /* If this tracepoint hit causes a tracing stop, we'll immediately
1987 uninsert tracepoints. To do this, we temporarily pause all
1988 threads, unpatch away, and then unpause threads. We need to make
1989 sure the unpausing doesn't resume LWP too. */
1990 lwp_suspended_inc (lwp);
1991
1992 /* And we need to be sure that any all-threads-stopping doesn't try
1993 to move threads out of the jump pads, as it could deadlock the
1994 inferior (LWP could be in the jump pad, maybe even holding the
1995 lock.) */
1996
1997 /* Do any necessary step collect actions. */
1998 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1999
2000 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
2001
2002 /* See if we just hit a tracepoint and do its main collect
2003 actions. */
2004 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
2005
2006 lwp_suspended_decr (lwp);
2007
2008 gdb_assert (lwp->suspended == 0);
2009 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
2010
2011 if (tpoint_related_event)
2012 {
2013 if (debug_threads)
2014 debug_printf ("got a tracepoint event\n");
2015 return 1;
2016 }
2017
2018 return 0;
2019 }
2020
2021 /* Convenience wrapper. Returns true if LWP is presently collecting a
2022 fast tracepoint. */
2023
2024 static int
2025 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
2026 struct fast_tpoint_collect_status *status)
2027 {
2028 CORE_ADDR thread_area;
2029 struct thread_info *thread = get_lwp_thread (lwp);
2030
2031 if (the_low_target.get_thread_area == NULL)
2032 return 0;
2033
2034 /* Get the thread area address. This is used to recognize which
2035 thread is which when tracing with the in-process agent library.
2036 We don't read anything from the address, and treat it as opaque;
2037 it's the address itself that we assume is unique per-thread. */
2038 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
2039 return 0;
2040
2041 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
2042 }
2043
2044 /* The reason we resume in the caller, is because we want to be able
2045 to pass lwp->status_pending as WSTAT, and we need to clear
2046 status_pending_p before resuming, otherwise, linux_resume_one_lwp
2047 refuses to resume. */
2048
2049 static int
2050 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
2051 {
2052 struct thread_info *saved_thread;
2053
2054 saved_thread = current_thread;
2055 current_thread = get_lwp_thread (lwp);
2056
2057 if ((wstat == NULL
2058 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
2059 && supports_fast_tracepoints ()
2060 && agent_loaded_p ())
2061 {
2062 struct fast_tpoint_collect_status status;
2063 int r;
2064
2065 if (debug_threads)
2066 debug_printf ("Checking whether LWP %ld needs to move out of the "
2067 "jump pad.\n",
2068 lwpid_of (current_thread));
2069
2070 r = linux_fast_tracepoint_collecting (lwp, &status);
2071
2072 if (wstat == NULL
2073 || (WSTOPSIG (*wstat) != SIGILL
2074 && WSTOPSIG (*wstat) != SIGFPE
2075 && WSTOPSIG (*wstat) != SIGSEGV
2076 && WSTOPSIG (*wstat) != SIGBUS))
2077 {
2078 lwp->collecting_fast_tracepoint = r;
2079
2080 if (r != 0)
2081 {
2082 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
2083 {
2084 /* Haven't executed the original instruction yet.
2085 Set breakpoint there, and wait till it's hit,
2086 then single-step until exiting the jump pad. */
2087 lwp->exit_jump_pad_bkpt
2088 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
2089 }
2090
2091 if (debug_threads)
2092 debug_printf ("Checking whether LWP %ld needs to move out of "
2093 "the jump pad...it does\n",
2094 lwpid_of (current_thread));
2095 current_thread = saved_thread;
2096
2097 return 1;
2098 }
2099 }
2100 else
2101 {
2102 /* If we get a synchronous signal while collecting, *and*
2103 while executing the (relocated) original instruction,
2104 reset the PC to point at the tpoint address, before
2105 reporting to GDB. Otherwise, it's an IPA lib bug: just
2106 report the signal to GDB, and pray for the best. */
2107
2108 lwp->collecting_fast_tracepoint = 0;
2109
2110 if (r != 0
2111 && (status.adjusted_insn_addr <= lwp->stop_pc
2112 && lwp->stop_pc < status.adjusted_insn_addr_end))
2113 {
2114 siginfo_t info;
2115 struct regcache *regcache;
2116
2117 /* The si_addr on a few signals references the address
2118 of the faulting instruction. Adjust that as
2119 well. */
2120 if ((WSTOPSIG (*wstat) == SIGILL
2121 || WSTOPSIG (*wstat) == SIGFPE
2122 || WSTOPSIG (*wstat) == SIGBUS
2123 || WSTOPSIG (*wstat) == SIGSEGV)
2124 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2125 (PTRACE_TYPE_ARG3) 0, &info) == 0
2126 /* Final check just to make sure we don't clobber
2127 the siginfo of non-kernel-sent signals. */
2128 && (uintptr_t) info.si_addr == lwp->stop_pc)
2129 {
2130 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2131 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2132 (PTRACE_TYPE_ARG3) 0, &info);
2133 }
2134
2135 regcache = get_thread_regcache (current_thread, 1);
2136 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2137 lwp->stop_pc = status.tpoint_addr;
2138
2139 /* Cancel any fast tracepoint lock this thread was
2140 holding. */
2141 force_unlock_trace_buffer ();
2142 }
2143
2144 if (lwp->exit_jump_pad_bkpt != NULL)
2145 {
2146 if (debug_threads)
2147 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2148 "stopping all threads momentarily.\n");
2149
2150 stop_all_lwps (1, lwp);
2151
2152 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2153 lwp->exit_jump_pad_bkpt = NULL;
2154
2155 unstop_all_lwps (1, lwp);
2156
2157 gdb_assert (lwp->suspended >= 0);
2158 }
2159 }
2160 }
2161
2162 if (debug_threads)
2163 debug_printf ("Checking whether LWP %ld needs to move out of the "
2164 "jump pad...no\n",
2165 lwpid_of (current_thread));
2166
2167 current_thread = saved_thread;
2168 return 0;
2169 }
2170
2171 /* Enqueue one signal in the "signals to report later when out of the
2172 jump pad" list. */
2173
2174 static void
2175 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2176 {
2177 struct pending_signals *p_sig;
2178 struct thread_info *thread = get_lwp_thread (lwp);
2179
2180 if (debug_threads)
2181 debug_printf ("Deferring signal %d for LWP %ld.\n",
2182 WSTOPSIG (*wstat), lwpid_of (thread));
2183
2184 if (debug_threads)
2185 {
2186 struct pending_signals *sig;
2187
2188 for (sig = lwp->pending_signals_to_report;
2189 sig != NULL;
2190 sig = sig->prev)
2191 debug_printf (" Already queued %d\n",
2192 sig->signal);
2193
2194 debug_printf (" (no more currently queued signals)\n");
2195 }
2196
2197 /* Don't enqueue non-RT signals if they are already in the deferred
2198 queue. (SIGSTOP being the easiest signal to see ending up here
2199 twice) */
2200 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2201 {
2202 struct pending_signals *sig;
2203
2204 for (sig = lwp->pending_signals_to_report;
2205 sig != NULL;
2206 sig = sig->prev)
2207 {
2208 if (sig->signal == WSTOPSIG (*wstat))
2209 {
2210 if (debug_threads)
2211 debug_printf ("Not requeuing already queued non-RT signal %d"
2212 " for LWP %ld\n",
2213 sig->signal,
2214 lwpid_of (thread));
2215 return;
2216 }
2217 }
2218 }
2219
2220 p_sig = XCNEW (struct pending_signals);
2221 p_sig->prev = lwp->pending_signals_to_report;
2222 p_sig->signal = WSTOPSIG (*wstat);
2223
2224 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2225 &p_sig->info);
2226
2227 lwp->pending_signals_to_report = p_sig;
2228 }
2229
2230 /* Dequeue one signal from the "signals to report later when out of
2231 the jump pad" list. */
2232
2233 static int
2234 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2235 {
2236 struct thread_info *thread = get_lwp_thread (lwp);
2237
2238 if (lwp->pending_signals_to_report != NULL)
2239 {
2240 struct pending_signals **p_sig;
2241
2242 p_sig = &lwp->pending_signals_to_report;
2243 while ((*p_sig)->prev != NULL)
2244 p_sig = &(*p_sig)->prev;
2245
2246 *wstat = W_STOPCODE ((*p_sig)->signal);
2247 if ((*p_sig)->info.si_signo != 0)
2248 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2249 &(*p_sig)->info);
2250 free (*p_sig);
2251 *p_sig = NULL;
2252
2253 if (debug_threads)
2254 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2255 WSTOPSIG (*wstat), lwpid_of (thread));
2256
2257 if (debug_threads)
2258 {
2259 struct pending_signals *sig;
2260
2261 for (sig = lwp->pending_signals_to_report;
2262 sig != NULL;
2263 sig = sig->prev)
2264 debug_printf (" Still queued %d\n",
2265 sig->signal);
2266
2267 debug_printf (" (no more queued signals)\n");
2268 }
2269
2270 return 1;
2271 }
2272
2273 return 0;
2274 }
2275
2276 /* Fetch the possibly triggered data watchpoint info and store it in
2277 CHILD.
2278
2279 On some archs, like x86, that use debug registers to set
2280 watchpoints, it's possible that the way to know which watched
2281 address trapped, is to check the register that is used to select
2282 which address to watch. Problem is, between setting the watchpoint
2283 and reading back which data address trapped, the user may change
2284 the set of watchpoints, and, as a consequence, GDB changes the
2285 debug registers in the inferior. To avoid reading back a stale
2286 stopped-data-address when that happens, we cache in LP the fact
2287 that a watchpoint trapped, and the corresponding data address, as
2288 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2289 registers meanwhile, we have the cached data we can rely on. */
2290
2291 static int
2292 check_stopped_by_watchpoint (struct lwp_info *child)
2293 {
2294 if (the_low_target.stopped_by_watchpoint != NULL)
2295 {
2296 struct thread_info *saved_thread;
2297
2298 saved_thread = current_thread;
2299 current_thread = get_lwp_thread (child);
2300
2301 if (the_low_target.stopped_by_watchpoint ())
2302 {
2303 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2304
2305 if (the_low_target.stopped_data_address != NULL)
2306 child->stopped_data_address
2307 = the_low_target.stopped_data_address ();
2308 else
2309 child->stopped_data_address = 0;
2310 }
2311
2312 current_thread = saved_thread;
2313 }
2314
2315 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2316 }
2317
2318 /* Return the ptrace options that we want to try to enable. */
2319
2320 static int
2321 linux_low_ptrace_options (int attached)
2322 {
2323 int options = 0;
2324
2325 if (!attached)
2326 options |= PTRACE_O_EXITKILL;
2327
2328 if (report_fork_events)
2329 options |= PTRACE_O_TRACEFORK;
2330
2331 if (report_vfork_events)
2332 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2333
2334 if (report_exec_events)
2335 options |= PTRACE_O_TRACEEXEC;
2336
2337 options |= PTRACE_O_TRACESYSGOOD;
2338
2339 return options;
2340 }
2341
2342 /* Do low-level handling of the event, and check if we should go on
2343 and pass it to caller code. Return the affected lwp if we are, or
2344 NULL otherwise. */
2345
2346 static struct lwp_info *
2347 linux_low_filter_event (int lwpid, int wstat)
2348 {
2349 struct lwp_info *child;
2350 struct thread_info *thread;
2351 int have_stop_pc = 0;
2352
2353 child = find_lwp_pid (pid_to_ptid (lwpid));
2354
2355 /* Check for stop events reported by a process we didn't already
2356 know about - anything not already in our LWP list.
2357
2358 If we're expecting to receive stopped processes after
2359 fork, vfork, and clone events, then we'll just add the
2360 new one to our list and go back to waiting for the event
2361 to be reported - the stopped process might be returned
2362 from waitpid before or after the event is.
2363
2364 But note the case of a non-leader thread exec'ing after the
2365 leader having exited, and gone from our lists (because
2366 check_zombie_leaders deleted it). The non-leader thread
2367 changes its tid to the tgid. */
2368
2369 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2370 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2371 {
2372 ptid_t child_ptid;
2373
2374 /* A multi-thread exec after we had seen the leader exiting. */
2375 if (debug_threads)
2376 {
2377 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2378 "after exec.\n", lwpid);
2379 }
2380
2381 child_ptid = ptid_build (lwpid, lwpid, 0);
2382 child = add_lwp (child_ptid);
2383 child->stopped = 1;
2384 current_thread = child->thread;
2385 }
2386
2387 /* If we didn't find a process, one of two things presumably happened:
2388 - A process we started and then detached from has exited. Ignore it.
2389 - A process we are controlling has forked and the new child's stop
2390 was reported to us by the kernel. Save its PID. */
2391 if (child == NULL && WIFSTOPPED (wstat))
2392 {
2393 add_to_pid_list (&stopped_pids, lwpid, wstat);
2394 return NULL;
2395 }
2396 else if (child == NULL)
2397 return NULL;
2398
2399 thread = get_lwp_thread (child);
2400
2401 child->stopped = 1;
2402
2403 child->last_status = wstat;
2404
2405 /* Check if the thread has exited. */
2406 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2407 {
2408 if (debug_threads)
2409 debug_printf ("LLFE: %d exited.\n", lwpid);
2410
2411 if (finish_step_over (child))
2412 {
2413 /* Unsuspend all other LWPs, and set them back running again. */
2414 unsuspend_all_lwps (child);
2415 }
2416
2417 /* If there is at least one more LWP, then the exit signal was
2418 not the end of the debugged application and should be
2419 ignored, unless GDB wants to hear about thread exits. */
2420 if (report_thread_events
2421 || last_thread_of_process_p (pid_of (thread)))
2422 {
2423 /* Since events are serialized to GDB core, and we can't
2424 report this one right now. Leave the status pending for
2425 the next time we're able to report it. */
2426 mark_lwp_dead (child, wstat);
2427 return child;
2428 }
2429 else
2430 {
2431 delete_lwp (child);
2432 return NULL;
2433 }
2434 }
2435
2436 gdb_assert (WIFSTOPPED (wstat));
2437
2438 if (WIFSTOPPED (wstat))
2439 {
2440 struct process_info *proc;
2441
2442 /* Architecture-specific setup after inferior is running. */
2443 proc = find_process_pid (pid_of (thread));
2444 if (proc->tdesc == NULL)
2445 {
2446 if (proc->attached)
2447 {
2448 /* This needs to happen after we have attached to the
2449 inferior and it is stopped for the first time, but
2450 before we access any inferior registers. */
2451 linux_arch_setup_thread (thread);
2452 }
2453 else
2454 {
2455 /* The process is started, but GDBserver will do
2456 architecture-specific setup after the program stops at
2457 the first instruction. */
2458 child->status_pending_p = 1;
2459 child->status_pending = wstat;
2460 return child;
2461 }
2462 }
2463 }
2464
2465 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2466 {
2467 struct process_info *proc = find_process_pid (pid_of (thread));
2468 int options = linux_low_ptrace_options (proc->attached);
2469
2470 linux_enable_event_reporting (lwpid, options);
2471 child->must_set_ptrace_flags = 0;
2472 }
2473
2474 /* Always update syscall_state, even if it will be filtered later. */
2475 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SYSCALL_SIGTRAP)
2476 {
2477 child->syscall_state
2478 = (child->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2479 ? TARGET_WAITKIND_SYSCALL_RETURN
2480 : TARGET_WAITKIND_SYSCALL_ENTRY);
2481 }
2482 else
2483 {
2484 /* Almost all other ptrace-stops are known to be outside of system
2485 calls, with further exceptions in handle_extended_wait. */
2486 child->syscall_state = TARGET_WAITKIND_IGNORE;
2487 }
2488
2489 /* Be careful to not overwrite stop_pc until save_stop_reason is
2490 called. */
2491 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2492 && linux_is_extended_waitstatus (wstat))
2493 {
2494 child->stop_pc = get_pc (child);
2495 if (handle_extended_wait (&child, wstat))
2496 {
2497 /* The event has been handled, so just return without
2498 reporting it. */
2499 return NULL;
2500 }
2501 }
2502
2503 if (linux_wstatus_maybe_breakpoint (wstat))
2504 {
2505 if (save_stop_reason (child))
2506 have_stop_pc = 1;
2507 }
2508
2509 if (!have_stop_pc)
2510 child->stop_pc = get_pc (child);
2511
2512 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2513 && child->stop_expected)
2514 {
2515 if (debug_threads)
2516 debug_printf ("Expected stop.\n");
2517 child->stop_expected = 0;
2518
2519 if (thread->last_resume_kind == resume_stop)
2520 {
2521 /* We want to report the stop to the core. Treat the
2522 SIGSTOP as a normal event. */
2523 if (debug_threads)
2524 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2525 target_pid_to_str (ptid_of (thread)));
2526 }
2527 else if (stopping_threads != NOT_STOPPING_THREADS)
2528 {
2529 /* Stopping threads. We don't want this SIGSTOP to end up
2530 pending. */
2531 if (debug_threads)
2532 debug_printf ("LLW: SIGSTOP caught for %s "
2533 "while stopping threads.\n",
2534 target_pid_to_str (ptid_of (thread)));
2535 return NULL;
2536 }
2537 else
2538 {
2539 /* This is a delayed SIGSTOP. Filter out the event. */
2540 if (debug_threads)
2541 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2542 child->stepping ? "step" : "continue",
2543 target_pid_to_str (ptid_of (thread)));
2544
2545 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2546 return NULL;
2547 }
2548 }
2549
2550 child->status_pending_p = 1;
2551 child->status_pending = wstat;
2552 return child;
2553 }
2554
2555 /* Return true if THREAD is doing hardware single step. */
2556
2557 static int
2558 maybe_hw_step (struct thread_info *thread)
2559 {
2560 if (can_hardware_single_step ())
2561 return 1;
2562 else
2563 {
2564 struct process_info *proc = get_thread_process (thread);
2565
2566 /* GDBserver must insert reinsert breakpoint for software
2567 single step. */
2568 gdb_assert (has_reinsert_breakpoints (proc));
2569 return 0;
2570 }
2571 }
2572
2573 /* Resume LWPs that are currently stopped without any pending status
2574 to report, but are resumed from the core's perspective. */
2575
2576 static void
2577 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2578 {
2579 struct thread_info *thread = (struct thread_info *) entry;
2580 struct lwp_info *lp = get_thread_lwp (thread);
2581
2582 if (lp->stopped
2583 && !lp->suspended
2584 && !lp->status_pending_p
2585 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2586 {
2587 int step = thread->last_resume_kind == resume_step;
2588
2589 if (debug_threads)
2590 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2591 target_pid_to_str (ptid_of (thread)),
2592 paddress (lp->stop_pc),
2593 step);
2594
2595 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2596 }
2597 }
2598
2599 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2600 match FILTER_PTID (leaving others pending). The PTIDs can be:
2601 minus_one_ptid, to specify any child; a pid PTID, specifying all
2602 lwps of a thread group; or a PTID representing a single lwp. Store
2603 the stop status through the status pointer WSTAT. OPTIONS is
2604 passed to the waitpid call. Return 0 if no event was found and
2605 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2606 was found. Return the PID of the stopped child otherwise. */
2607
2608 static int
2609 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2610 int *wstatp, int options)
2611 {
2612 struct thread_info *event_thread;
2613 struct lwp_info *event_child, *requested_child;
2614 sigset_t block_mask, prev_mask;
2615
2616 retry:
2617 /* N.B. event_thread points to the thread_info struct that contains
2618 event_child. Keep them in sync. */
2619 event_thread = NULL;
2620 event_child = NULL;
2621 requested_child = NULL;
2622
2623 /* Check for a lwp with a pending status. */
2624
2625 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2626 {
2627 event_thread = (struct thread_info *)
2628 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2629 if (event_thread != NULL)
2630 event_child = get_thread_lwp (event_thread);
2631 if (debug_threads && event_thread)
2632 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2633 }
2634 else if (!ptid_equal (filter_ptid, null_ptid))
2635 {
2636 requested_child = find_lwp_pid (filter_ptid);
2637
2638 if (stopping_threads == NOT_STOPPING_THREADS
2639 && requested_child->status_pending_p
2640 && requested_child->collecting_fast_tracepoint)
2641 {
2642 enqueue_one_deferred_signal (requested_child,
2643 &requested_child->status_pending);
2644 requested_child->status_pending_p = 0;
2645 requested_child->status_pending = 0;
2646 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2647 }
2648
2649 if (requested_child->suspended
2650 && requested_child->status_pending_p)
2651 {
2652 internal_error (__FILE__, __LINE__,
2653 "requesting an event out of a"
2654 " suspended child?");
2655 }
2656
2657 if (requested_child->status_pending_p)
2658 {
2659 event_child = requested_child;
2660 event_thread = get_lwp_thread (event_child);
2661 }
2662 }
2663
2664 if (event_child != NULL)
2665 {
2666 if (debug_threads)
2667 debug_printf ("Got an event from pending child %ld (%04x)\n",
2668 lwpid_of (event_thread), event_child->status_pending);
2669 *wstatp = event_child->status_pending;
2670 event_child->status_pending_p = 0;
2671 event_child->status_pending = 0;
2672 current_thread = event_thread;
2673 return lwpid_of (event_thread);
2674 }
2675
2676 /* But if we don't find a pending event, we'll have to wait.
2677
2678 We only enter this loop if no process has a pending wait status.
2679 Thus any action taken in response to a wait status inside this
2680 loop is responding as soon as we detect the status, not after any
2681 pending events. */
2682
2683 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2684 all signals while here. */
2685 sigfillset (&block_mask);
2686 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2687
2688 /* Always pull all events out of the kernel. We'll randomly select
2689 an event LWP out of all that have events, to prevent
2690 starvation. */
2691 while (event_child == NULL)
2692 {
2693 pid_t ret = 0;
2694
2695 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2696 quirks:
2697
2698 - If the thread group leader exits while other threads in the
2699 thread group still exist, waitpid(TGID, ...) hangs. That
2700 waitpid won't return an exit status until the other threads
2701 in the group are reaped.
2702
2703 - When a non-leader thread execs, that thread just vanishes
2704 without reporting an exit (so we'd hang if we waited for it
2705 explicitly in that case). The exec event is reported to
2706 the TGID pid. */
2707 errno = 0;
2708 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2709
2710 if (debug_threads)
2711 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2712 ret, errno ? strerror (errno) : "ERRNO-OK");
2713
2714 if (ret > 0)
2715 {
2716 if (debug_threads)
2717 {
2718 debug_printf ("LLW: waitpid %ld received %s\n",
2719 (long) ret, status_to_str (*wstatp));
2720 }
2721
2722 /* Filter all events. IOW, leave all events pending. We'll
2723 randomly select an event LWP out of all that have events
2724 below. */
2725 linux_low_filter_event (ret, *wstatp);
2726 /* Retry until nothing comes out of waitpid. A single
2727 SIGCHLD can indicate more than one child stopped. */
2728 continue;
2729 }
2730
2731 /* Now that we've pulled all events out of the kernel, resume
2732 LWPs that don't have an interesting event to report. */
2733 if (stopping_threads == NOT_STOPPING_THREADS)
2734 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2735
2736 /* ... and find an LWP with a status to report to the core, if
2737 any. */
2738 event_thread = (struct thread_info *)
2739 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2740 if (event_thread != NULL)
2741 {
2742 event_child = get_thread_lwp (event_thread);
2743 *wstatp = event_child->status_pending;
2744 event_child->status_pending_p = 0;
2745 event_child->status_pending = 0;
2746 break;
2747 }
2748
2749 /* Check for zombie thread group leaders. Those can't be reaped
2750 until all other threads in the thread group are. */
2751 check_zombie_leaders ();
2752
2753 /* If there are no resumed children left in the set of LWPs we
2754 want to wait for, bail. We can't just block in
2755 waitpid/sigsuspend, because lwps might have been left stopped
2756 in trace-stop state, and we'd be stuck forever waiting for
2757 their status to change (which would only happen if we resumed
2758 them). Even if WNOHANG is set, this return code is preferred
2759 over 0 (below), as it is more detailed. */
2760 if ((find_inferior (&all_threads,
2761 not_stopped_callback,
2762 &wait_ptid) == NULL))
2763 {
2764 if (debug_threads)
2765 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2766 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2767 return -1;
2768 }
2769
2770 /* No interesting event to report to the caller. */
2771 if ((options & WNOHANG))
2772 {
2773 if (debug_threads)
2774 debug_printf ("WNOHANG set, no event found\n");
2775
2776 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2777 return 0;
2778 }
2779
2780 /* Block until we get an event reported with SIGCHLD. */
2781 if (debug_threads)
2782 debug_printf ("sigsuspend'ing\n");
2783
2784 sigsuspend (&prev_mask);
2785 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2786 goto retry;
2787 }
2788
2789 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2790
2791 current_thread = event_thread;
2792
2793 return lwpid_of (event_thread);
2794 }
2795
2796 /* Wait for an event from child(ren) PTID. PTIDs can be:
2797 minus_one_ptid, to specify any child; a pid PTID, specifying all
2798 lwps of a thread group; or a PTID representing a single lwp. Store
2799 the stop status through the status pointer WSTAT. OPTIONS is
2800 passed to the waitpid call. Return 0 if no event was found and
2801 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2802 was found. Return the PID of the stopped child otherwise. */
2803
2804 static int
2805 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2806 {
2807 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2808 }
2809
2810 /* Count the LWP's that have had events. */
2811
2812 static int
2813 count_events_callback (struct inferior_list_entry *entry, void *data)
2814 {
2815 struct thread_info *thread = (struct thread_info *) entry;
2816 struct lwp_info *lp = get_thread_lwp (thread);
2817 int *count = (int *) data;
2818
2819 gdb_assert (count != NULL);
2820
2821 /* Count only resumed LWPs that have an event pending. */
2822 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2823 && lp->status_pending_p)
2824 (*count)++;
2825
2826 return 0;
2827 }
2828
2829 /* Select the LWP (if any) that is currently being single-stepped. */
2830
2831 static int
2832 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2833 {
2834 struct thread_info *thread = (struct thread_info *) entry;
2835 struct lwp_info *lp = get_thread_lwp (thread);
2836
2837 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2838 && thread->last_resume_kind == resume_step
2839 && lp->status_pending_p)
2840 return 1;
2841 else
2842 return 0;
2843 }
2844
2845 /* Select the Nth LWP that has had an event. */
2846
2847 static int
2848 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2849 {
2850 struct thread_info *thread = (struct thread_info *) entry;
2851 struct lwp_info *lp = get_thread_lwp (thread);
2852 int *selector = (int *) data;
2853
2854 gdb_assert (selector != NULL);
2855
2856 /* Select only resumed LWPs that have an event pending. */
2857 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2858 && lp->status_pending_p)
2859 if ((*selector)-- == 0)
2860 return 1;
2861
2862 return 0;
2863 }
2864
2865 /* Select one LWP out of those that have events pending. */
2866
2867 static void
2868 select_event_lwp (struct lwp_info **orig_lp)
2869 {
2870 int num_events = 0;
2871 int random_selector;
2872 struct thread_info *event_thread = NULL;
2873
2874 /* In all-stop, give preference to the LWP that is being
2875 single-stepped. There will be at most one, and it's the LWP that
2876 the core is most interested in. If we didn't do this, then we'd
2877 have to handle pending step SIGTRAPs somehow in case the core
2878 later continues the previously-stepped thread, otherwise we'd
2879 report the pending SIGTRAP, and the core, not having stepped the
2880 thread, wouldn't understand what the trap was for, and therefore
2881 would report it to the user as a random signal. */
2882 if (!non_stop)
2883 {
2884 event_thread
2885 = (struct thread_info *) find_inferior (&all_threads,
2886 select_singlestep_lwp_callback,
2887 NULL);
2888 if (event_thread != NULL)
2889 {
2890 if (debug_threads)
2891 debug_printf ("SEL: Select single-step %s\n",
2892 target_pid_to_str (ptid_of (event_thread)));
2893 }
2894 }
2895 if (event_thread == NULL)
2896 {
2897 /* No single-stepping LWP. Select one at random, out of those
2898 which have had events. */
2899
2900 /* First see how many events we have. */
2901 find_inferior (&all_threads, count_events_callback, &num_events);
2902 gdb_assert (num_events > 0);
2903
2904 /* Now randomly pick a LWP out of those that have had
2905 events. */
2906 random_selector = (int)
2907 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2908
2909 if (debug_threads && num_events > 1)
2910 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2911 num_events, random_selector);
2912
2913 event_thread
2914 = (struct thread_info *) find_inferior (&all_threads,
2915 select_event_lwp_callback,
2916 &random_selector);
2917 }
2918
2919 if (event_thread != NULL)
2920 {
2921 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2922
2923 /* Switch the event LWP. */
2924 *orig_lp = event_lp;
2925 }
2926 }
2927
2928 /* Decrement the suspend count of an LWP. */
2929
2930 static int
2931 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2932 {
2933 struct thread_info *thread = (struct thread_info *) entry;
2934 struct lwp_info *lwp = get_thread_lwp (thread);
2935
2936 /* Ignore EXCEPT. */
2937 if (lwp == except)
2938 return 0;
2939
2940 lwp_suspended_decr (lwp);
2941 return 0;
2942 }
2943
2944 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2945 NULL. */
2946
2947 static void
2948 unsuspend_all_lwps (struct lwp_info *except)
2949 {
2950 find_inferior (&all_threads, unsuspend_one_lwp, except);
2951 }
2952
2953 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2954 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2955 void *data);
2956 static int lwp_running (struct inferior_list_entry *entry, void *data);
2957 static ptid_t linux_wait_1 (ptid_t ptid,
2958 struct target_waitstatus *ourstatus,
2959 int target_options);
2960
2961 /* Stabilize threads (move out of jump pads).
2962
2963 If a thread is midway collecting a fast tracepoint, we need to
2964 finish the collection and move it out of the jump pad before
2965 reporting the signal.
2966
2967 This avoids recursion while collecting (when a signal arrives
2968 midway, and the signal handler itself collects), which would trash
2969 the trace buffer. In case the user set a breakpoint in a signal
2970 handler, this avoids the backtrace showing the jump pad, etc..
2971 Most importantly, there are certain things we can't do safely if
2972 threads are stopped in a jump pad (or in its callee's). For
2973 example:
2974
2975 - starting a new trace run. A thread still collecting the
2976 previous run, could trash the trace buffer when resumed. The trace
2977 buffer control structures would have been reset but the thread had
2978 no way to tell. The thread could even midway memcpy'ing to the
2979 buffer, which would mean that when resumed, it would clobber the
2980 trace buffer that had been set for a new run.
2981
2982 - we can't rewrite/reuse the jump pads for new tracepoints
2983 safely. Say you do tstart while a thread is stopped midway while
2984 collecting. When the thread is later resumed, it finishes the
2985 collection, and returns to the jump pad, to execute the original
2986 instruction that was under the tracepoint jump at the time the
2987 older run had been started. If the jump pad had been rewritten
2988 since for something else in the new run, the thread would now
2989 execute the wrong / random instructions. */
2990
2991 static void
2992 linux_stabilize_threads (void)
2993 {
2994 struct thread_info *saved_thread;
2995 struct thread_info *thread_stuck;
2996
2997 thread_stuck
2998 = (struct thread_info *) find_inferior (&all_threads,
2999 stuck_in_jump_pad_callback,
3000 NULL);
3001 if (thread_stuck != NULL)
3002 {
3003 if (debug_threads)
3004 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
3005 lwpid_of (thread_stuck));
3006 return;
3007 }
3008
3009 saved_thread = current_thread;
3010
3011 stabilizing_threads = 1;
3012
3013 /* Kick 'em all. */
3014 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
3015
3016 /* Loop until all are stopped out of the jump pads. */
3017 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
3018 {
3019 struct target_waitstatus ourstatus;
3020 struct lwp_info *lwp;
3021 int wstat;
3022
3023 /* Note that we go through the full wait even loop. While
3024 moving threads out of jump pad, we need to be able to step
3025 over internal breakpoints and such. */
3026 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
3027
3028 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
3029 {
3030 lwp = get_thread_lwp (current_thread);
3031
3032 /* Lock it. */
3033 lwp_suspended_inc (lwp);
3034
3035 if (ourstatus.value.sig != GDB_SIGNAL_0
3036 || current_thread->last_resume_kind == resume_stop)
3037 {
3038 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
3039 enqueue_one_deferred_signal (lwp, &wstat);
3040 }
3041 }
3042 }
3043
3044 unsuspend_all_lwps (NULL);
3045
3046 stabilizing_threads = 0;
3047
3048 current_thread = saved_thread;
3049
3050 if (debug_threads)
3051 {
3052 thread_stuck
3053 = (struct thread_info *) find_inferior (&all_threads,
3054 stuck_in_jump_pad_callback,
3055 NULL);
3056 if (thread_stuck != NULL)
3057 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
3058 lwpid_of (thread_stuck));
3059 }
3060 }
3061
3062 /* Convenience function that is called when the kernel reports an
3063 event that is not passed out to GDB. */
3064
3065 static ptid_t
3066 ignore_event (struct target_waitstatus *ourstatus)
3067 {
3068 /* If we got an event, there may still be others, as a single
3069 SIGCHLD can indicate more than one child stopped. This forces
3070 another target_wait call. */
3071 async_file_mark ();
3072
3073 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3074 return null_ptid;
3075 }
3076
3077 /* Convenience function that is called when the kernel reports an exit
3078 event. This decides whether to report the event to GDB as a
3079 process exit event, a thread exit event, or to suppress the
3080 event. */
3081
3082 static ptid_t
3083 filter_exit_event (struct lwp_info *event_child,
3084 struct target_waitstatus *ourstatus)
3085 {
3086 struct thread_info *thread = get_lwp_thread (event_child);
3087 ptid_t ptid = ptid_of (thread);
3088
3089 if (!last_thread_of_process_p (pid_of (thread)))
3090 {
3091 if (report_thread_events)
3092 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3093 else
3094 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3095
3096 delete_lwp (event_child);
3097 }
3098 return ptid;
3099 }
3100
3101 /* Returns 1 if GDB is interested in any event_child syscalls. */
3102
3103 static int
3104 gdb_catching_syscalls_p (struct lwp_info *event_child)
3105 {
3106 struct thread_info *thread = get_lwp_thread (event_child);
3107 struct process_info *proc = get_thread_process (thread);
3108
3109 return !VEC_empty (int, proc->syscalls_to_catch);
3110 }
3111
3112 /* Returns 1 if GDB is interested in the event_child syscall.
3113 Only to be called when stopped reason is SYSCALL_SIGTRAP. */
3114
3115 static int
3116 gdb_catch_this_syscall_p (struct lwp_info *event_child)
3117 {
3118 int i, iter;
3119 int sysno, sysret;
3120 struct thread_info *thread = get_lwp_thread (event_child);
3121 struct process_info *proc = get_thread_process (thread);
3122
3123 if (VEC_empty (int, proc->syscalls_to_catch))
3124 return 0;
3125
3126 if (VEC_index (int, proc->syscalls_to_catch, 0) == ANY_SYSCALL)
3127 return 1;
3128
3129 get_syscall_trapinfo (event_child, &sysno, &sysret);
3130 for (i = 0;
3131 VEC_iterate (int, proc->syscalls_to_catch, i, iter);
3132 i++)
3133 if (iter == sysno)
3134 return 1;
3135
3136 return 0;
3137 }
3138
3139 /* Wait for process, returns status. */
3140
3141 static ptid_t
3142 linux_wait_1 (ptid_t ptid,
3143 struct target_waitstatus *ourstatus, int target_options)
3144 {
3145 int w;
3146 struct lwp_info *event_child;
3147 int options;
3148 int pid;
3149 int step_over_finished;
3150 int bp_explains_trap;
3151 int maybe_internal_trap;
3152 int report_to_gdb;
3153 int trace_event;
3154 int in_step_range;
3155 int any_resumed;
3156
3157 if (debug_threads)
3158 {
3159 debug_enter ();
3160 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
3161 }
3162
3163 /* Translate generic target options into linux options. */
3164 options = __WALL;
3165 if (target_options & TARGET_WNOHANG)
3166 options |= WNOHANG;
3167
3168 bp_explains_trap = 0;
3169 trace_event = 0;
3170 in_step_range = 0;
3171 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3172
3173 /* Find a resumed LWP, if any. */
3174 if (find_inferior (&all_threads,
3175 status_pending_p_callback,
3176 &minus_one_ptid) != NULL)
3177 any_resumed = 1;
3178 else if ((find_inferior (&all_threads,
3179 not_stopped_callback,
3180 &minus_one_ptid) != NULL))
3181 any_resumed = 1;
3182 else
3183 any_resumed = 0;
3184
3185 if (ptid_equal (step_over_bkpt, null_ptid))
3186 pid = linux_wait_for_event (ptid, &w, options);
3187 else
3188 {
3189 if (debug_threads)
3190 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
3191 target_pid_to_str (step_over_bkpt));
3192 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
3193 }
3194
3195 if (pid == 0 || (pid == -1 && !any_resumed))
3196 {
3197 gdb_assert (target_options & TARGET_WNOHANG);
3198
3199 if (debug_threads)
3200 {
3201 debug_printf ("linux_wait_1 ret = null_ptid, "
3202 "TARGET_WAITKIND_IGNORE\n");
3203 debug_exit ();
3204 }
3205
3206 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3207 return null_ptid;
3208 }
3209 else if (pid == -1)
3210 {
3211 if (debug_threads)
3212 {
3213 debug_printf ("linux_wait_1 ret = null_ptid, "
3214 "TARGET_WAITKIND_NO_RESUMED\n");
3215 debug_exit ();
3216 }
3217
3218 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3219 return null_ptid;
3220 }
3221
3222 event_child = get_thread_lwp (current_thread);
3223
3224 /* linux_wait_for_event only returns an exit status for the last
3225 child of a process. Report it. */
3226 if (WIFEXITED (w) || WIFSIGNALED (w))
3227 {
3228 if (WIFEXITED (w))
3229 {
3230 ourstatus->kind = TARGET_WAITKIND_EXITED;
3231 ourstatus->value.integer = WEXITSTATUS (w);
3232
3233 if (debug_threads)
3234 {
3235 debug_printf ("linux_wait_1 ret = %s, exited with "
3236 "retcode %d\n",
3237 target_pid_to_str (ptid_of (current_thread)),
3238 WEXITSTATUS (w));
3239 debug_exit ();
3240 }
3241 }
3242 else
3243 {
3244 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3245 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3246
3247 if (debug_threads)
3248 {
3249 debug_printf ("linux_wait_1 ret = %s, terminated with "
3250 "signal %d\n",
3251 target_pid_to_str (ptid_of (current_thread)),
3252 WTERMSIG (w));
3253 debug_exit ();
3254 }
3255 }
3256
3257 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3258 return filter_exit_event (event_child, ourstatus);
3259
3260 return ptid_of (current_thread);
3261 }
3262
3263 /* If step-over executes a breakpoint instruction, in the case of a
3264 hardware single step it means a gdb/gdbserver breakpoint had been
3265 planted on top of a permanent breakpoint, in the case of a software
3266 single step it may just mean that gdbserver hit the reinsert breakpoint.
3267 The PC has been adjusted by save_stop_reason to point at
3268 the breakpoint address.
3269 So in the case of the hardware single step advance the PC manually
3270 past the breakpoint and in the case of software single step advance only
3271 if it's not the reinsert_breakpoint we are hitting.
3272 This avoids that a program would keep trapping a permanent breakpoint
3273 forever. */
3274 if (!ptid_equal (step_over_bkpt, null_ptid)
3275 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3276 && (event_child->stepping
3277 || !reinsert_breakpoint_inserted_here (event_child->stop_pc)))
3278 {
3279 int increment_pc = 0;
3280 int breakpoint_kind = 0;
3281 CORE_ADDR stop_pc = event_child->stop_pc;
3282
3283 breakpoint_kind =
3284 the_target->breakpoint_kind_from_current_state (&stop_pc);
3285 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3286
3287 if (debug_threads)
3288 {
3289 debug_printf ("step-over for %s executed software breakpoint\n",
3290 target_pid_to_str (ptid_of (current_thread)));
3291 }
3292
3293 if (increment_pc != 0)
3294 {
3295 struct regcache *regcache
3296 = get_thread_regcache (current_thread, 1);
3297
3298 event_child->stop_pc += increment_pc;
3299 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3300
3301 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3302 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3303 }
3304 }
3305
3306 /* If this event was not handled before, and is not a SIGTRAP, we
3307 report it. SIGILL and SIGSEGV are also treated as traps in case
3308 a breakpoint is inserted at the current PC. If this target does
3309 not support internal breakpoints at all, we also report the
3310 SIGTRAP without further processing; it's of no concern to us. */
3311 maybe_internal_trap
3312 = (supports_breakpoints ()
3313 && (WSTOPSIG (w) == SIGTRAP
3314 || ((WSTOPSIG (w) == SIGILL
3315 || WSTOPSIG (w) == SIGSEGV)
3316 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3317
3318 if (maybe_internal_trap)
3319 {
3320 /* Handle anything that requires bookkeeping before deciding to
3321 report the event or continue waiting. */
3322
3323 /* First check if we can explain the SIGTRAP with an internal
3324 breakpoint, or if we should possibly report the event to GDB.
3325 Do this before anything that may remove or insert a
3326 breakpoint. */
3327 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3328
3329 /* We have a SIGTRAP, possibly a step-over dance has just
3330 finished. If so, tweak the state machine accordingly,
3331 reinsert breakpoints and delete any reinsert (software
3332 single-step) breakpoints. */
3333 step_over_finished = finish_step_over (event_child);
3334
3335 /* Now invoke the callbacks of any internal breakpoints there. */
3336 check_breakpoints (event_child->stop_pc);
3337
3338 /* Handle tracepoint data collecting. This may overflow the
3339 trace buffer, and cause a tracing stop, removing
3340 breakpoints. */
3341 trace_event = handle_tracepoints (event_child);
3342
3343 if (bp_explains_trap)
3344 {
3345 if (debug_threads)
3346 debug_printf ("Hit a gdbserver breakpoint.\n");
3347 }
3348 }
3349 else
3350 {
3351 /* We have some other signal, possibly a step-over dance was in
3352 progress, and it should be cancelled too. */
3353 step_over_finished = finish_step_over (event_child);
3354 }
3355
3356 /* We have all the data we need. Either report the event to GDB, or
3357 resume threads and keep waiting for more. */
3358
3359 /* If we're collecting a fast tracepoint, finish the collection and
3360 move out of the jump pad before delivering a signal. See
3361 linux_stabilize_threads. */
3362
3363 if (WIFSTOPPED (w)
3364 && WSTOPSIG (w) != SIGTRAP
3365 && supports_fast_tracepoints ()
3366 && agent_loaded_p ())
3367 {
3368 if (debug_threads)
3369 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3370 "to defer or adjust it.\n",
3371 WSTOPSIG (w), lwpid_of (current_thread));
3372
3373 /* Allow debugging the jump pad itself. */
3374 if (current_thread->last_resume_kind != resume_step
3375 && maybe_move_out_of_jump_pad (event_child, &w))
3376 {
3377 enqueue_one_deferred_signal (event_child, &w);
3378
3379 if (debug_threads)
3380 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3381 WSTOPSIG (w), lwpid_of (current_thread));
3382
3383 linux_resume_one_lwp (event_child, 0, 0, NULL);
3384
3385 return ignore_event (ourstatus);
3386 }
3387 }
3388
3389 if (event_child->collecting_fast_tracepoint)
3390 {
3391 if (debug_threads)
3392 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3393 "Check if we're already there.\n",
3394 lwpid_of (current_thread),
3395 event_child->collecting_fast_tracepoint);
3396
3397 trace_event = 1;
3398
3399 event_child->collecting_fast_tracepoint
3400 = linux_fast_tracepoint_collecting (event_child, NULL);
3401
3402 if (event_child->collecting_fast_tracepoint != 1)
3403 {
3404 /* No longer need this breakpoint. */
3405 if (event_child->exit_jump_pad_bkpt != NULL)
3406 {
3407 if (debug_threads)
3408 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3409 "stopping all threads momentarily.\n");
3410
3411 /* Other running threads could hit this breakpoint.
3412 We don't handle moribund locations like GDB does,
3413 instead we always pause all threads when removing
3414 breakpoints, so that any step-over or
3415 decr_pc_after_break adjustment is always taken
3416 care of while the breakpoint is still
3417 inserted. */
3418 stop_all_lwps (1, event_child);
3419
3420 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3421 event_child->exit_jump_pad_bkpt = NULL;
3422
3423 unstop_all_lwps (1, event_child);
3424
3425 gdb_assert (event_child->suspended >= 0);
3426 }
3427 }
3428
3429 if (event_child->collecting_fast_tracepoint == 0)
3430 {
3431 if (debug_threads)
3432 debug_printf ("fast tracepoint finished "
3433 "collecting successfully.\n");
3434
3435 /* We may have a deferred signal to report. */
3436 if (dequeue_one_deferred_signal (event_child, &w))
3437 {
3438 if (debug_threads)
3439 debug_printf ("dequeued one signal.\n");
3440 }
3441 else
3442 {
3443 if (debug_threads)
3444 debug_printf ("no deferred signals.\n");
3445
3446 if (stabilizing_threads)
3447 {
3448 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3449 ourstatus->value.sig = GDB_SIGNAL_0;
3450
3451 if (debug_threads)
3452 {
3453 debug_printf ("linux_wait_1 ret = %s, stopped "
3454 "while stabilizing threads\n",
3455 target_pid_to_str (ptid_of (current_thread)));
3456 debug_exit ();
3457 }
3458
3459 return ptid_of (current_thread);
3460 }
3461 }
3462 }
3463 }
3464
3465 /* Check whether GDB would be interested in this event. */
3466
3467 /* Check if GDB is interested in this syscall. */
3468 if (WIFSTOPPED (w)
3469 && WSTOPSIG (w) == SYSCALL_SIGTRAP
3470 && !gdb_catch_this_syscall_p (event_child))
3471 {
3472 if (debug_threads)
3473 {
3474 debug_printf ("Ignored syscall for LWP %ld.\n",
3475 lwpid_of (current_thread));
3476 }
3477
3478 linux_resume_one_lwp (event_child, event_child->stepping,
3479 0, NULL);
3480 return ignore_event (ourstatus);
3481 }
3482
3483 /* If GDB is not interested in this signal, don't stop other
3484 threads, and don't report it to GDB. Just resume the inferior
3485 right away. We do this for threading-related signals as well as
3486 any that GDB specifically requested we ignore. But never ignore
3487 SIGSTOP if we sent it ourselves, and do not ignore signals when
3488 stepping - they may require special handling to skip the signal
3489 handler. Also never ignore signals that could be caused by a
3490 breakpoint. */
3491 if (WIFSTOPPED (w)
3492 && current_thread->last_resume_kind != resume_step
3493 && (
3494 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3495 (current_process ()->priv->thread_db != NULL
3496 && (WSTOPSIG (w) == __SIGRTMIN
3497 || WSTOPSIG (w) == __SIGRTMIN + 1))
3498 ||
3499 #endif
3500 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3501 && !(WSTOPSIG (w) == SIGSTOP
3502 && current_thread->last_resume_kind == resume_stop)
3503 && !linux_wstatus_maybe_breakpoint (w))))
3504 {
3505 siginfo_t info, *info_p;
3506
3507 if (debug_threads)
3508 debug_printf ("Ignored signal %d for LWP %ld.\n",
3509 WSTOPSIG (w), lwpid_of (current_thread));
3510
3511 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3512 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3513 info_p = &info;
3514 else
3515 info_p = NULL;
3516
3517 if (step_over_finished)
3518 {
3519 /* We cancelled this thread's step-over above. We still
3520 need to unsuspend all other LWPs, and set them back
3521 running again while the signal handler runs. */
3522 unsuspend_all_lwps (event_child);
3523
3524 /* Enqueue the pending signal info so that proceed_all_lwps
3525 doesn't lose it. */
3526 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3527
3528 proceed_all_lwps ();
3529 }
3530 else
3531 {
3532 linux_resume_one_lwp (event_child, event_child->stepping,
3533 WSTOPSIG (w), info_p);
3534 }
3535 return ignore_event (ourstatus);
3536 }
3537
3538 /* Note that all addresses are always "out of the step range" when
3539 there's no range to begin with. */
3540 in_step_range = lwp_in_step_range (event_child);
3541
3542 /* If GDB wanted this thread to single step, and the thread is out
3543 of the step range, we always want to report the SIGTRAP, and let
3544 GDB handle it. Watchpoints should always be reported. So should
3545 signals we can't explain. A SIGTRAP we can't explain could be a
3546 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3547 do, we're be able to handle GDB breakpoints on top of internal
3548 breakpoints, by handling the internal breakpoint and still
3549 reporting the event to GDB. If we don't, we're out of luck, GDB
3550 won't see the breakpoint hit. If we see a single-step event but
3551 the thread should be continuing, don't pass the trap to gdb.
3552 That indicates that we had previously finished a single-step but
3553 left the single-step pending -- see
3554 complete_ongoing_step_over. */
3555 report_to_gdb = (!maybe_internal_trap
3556 || (current_thread->last_resume_kind == resume_step
3557 && !in_step_range)
3558 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3559 || (!in_step_range
3560 && !bp_explains_trap
3561 && !trace_event
3562 && !step_over_finished
3563 && !(current_thread->last_resume_kind == resume_continue
3564 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3565 || (gdb_breakpoint_here (event_child->stop_pc)
3566 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3567 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3568 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3569
3570 run_breakpoint_commands (event_child->stop_pc);
3571
3572 /* We found no reason GDB would want us to stop. We either hit one
3573 of our own breakpoints, or finished an internal step GDB
3574 shouldn't know about. */
3575 if (!report_to_gdb)
3576 {
3577 if (debug_threads)
3578 {
3579 if (bp_explains_trap)
3580 debug_printf ("Hit a gdbserver breakpoint.\n");
3581 if (step_over_finished)
3582 debug_printf ("Step-over finished.\n");
3583 if (trace_event)
3584 debug_printf ("Tracepoint event.\n");
3585 if (lwp_in_step_range (event_child))
3586 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3587 paddress (event_child->stop_pc),
3588 paddress (event_child->step_range_start),
3589 paddress (event_child->step_range_end));
3590 }
3591
3592 /* We're not reporting this breakpoint to GDB, so apply the
3593 decr_pc_after_break adjustment to the inferior's regcache
3594 ourselves. */
3595
3596 if (the_low_target.set_pc != NULL)
3597 {
3598 struct regcache *regcache
3599 = get_thread_regcache (current_thread, 1);
3600 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3601 }
3602
3603 /* We may have finished stepping over a breakpoint. If so,
3604 we've stopped and suspended all LWPs momentarily except the
3605 stepping one. This is where we resume them all again. We're
3606 going to keep waiting, so use proceed, which handles stepping
3607 over the next breakpoint. */
3608 if (debug_threads)
3609 debug_printf ("proceeding all threads.\n");
3610
3611 if (step_over_finished)
3612 unsuspend_all_lwps (event_child);
3613
3614 proceed_all_lwps ();
3615 return ignore_event (ourstatus);
3616 }
3617
3618 if (debug_threads)
3619 {
3620 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3621 {
3622 char *str;
3623
3624 str = target_waitstatus_to_string (&event_child->waitstatus);
3625 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3626 lwpid_of (get_lwp_thread (event_child)), str);
3627 xfree (str);
3628 }
3629 if (current_thread->last_resume_kind == resume_step)
3630 {
3631 if (event_child->step_range_start == event_child->step_range_end)
3632 debug_printf ("GDB wanted to single-step, reporting event.\n");
3633 else if (!lwp_in_step_range (event_child))
3634 debug_printf ("Out of step range, reporting event.\n");
3635 }
3636 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3637 debug_printf ("Stopped by watchpoint.\n");
3638 else if (gdb_breakpoint_here (event_child->stop_pc))
3639 debug_printf ("Stopped by GDB breakpoint.\n");
3640 if (debug_threads)
3641 debug_printf ("Hit a non-gdbserver trap event.\n");
3642 }
3643
3644 /* Alright, we're going to report a stop. */
3645
3646 if (!stabilizing_threads)
3647 {
3648 /* In all-stop, stop all threads. */
3649 if (!non_stop)
3650 stop_all_lwps (0, NULL);
3651
3652 /* If we're not waiting for a specific LWP, choose an event LWP
3653 from among those that have had events. Giving equal priority
3654 to all LWPs that have had events helps prevent
3655 starvation. */
3656 if (ptid_equal (ptid, minus_one_ptid))
3657 {
3658 event_child->status_pending_p = 1;
3659 event_child->status_pending = w;
3660
3661 select_event_lwp (&event_child);
3662
3663 /* current_thread and event_child must stay in sync. */
3664 current_thread = get_lwp_thread (event_child);
3665
3666 event_child->status_pending_p = 0;
3667 w = event_child->status_pending;
3668 }
3669
3670 if (step_over_finished)
3671 {
3672 if (!non_stop)
3673 {
3674 /* If we were doing a step-over, all other threads but
3675 the stepping one had been paused in start_step_over,
3676 with their suspend counts incremented. We don't want
3677 to do a full unstop/unpause, because we're in
3678 all-stop mode (so we want threads stopped), but we
3679 still need to unsuspend the other threads, to
3680 decrement their `suspended' count back. */
3681 unsuspend_all_lwps (event_child);
3682 }
3683 else
3684 {
3685 /* If we just finished a step-over, then all threads had
3686 been momentarily paused. In all-stop, that's fine,
3687 we want threads stopped by now anyway. In non-stop,
3688 we need to re-resume threads that GDB wanted to be
3689 running. */
3690 unstop_all_lwps (1, event_child);
3691 }
3692 }
3693
3694 /* Stabilize threads (move out of jump pads). */
3695 if (!non_stop)
3696 stabilize_threads ();
3697 }
3698 else
3699 {
3700 /* If we just finished a step-over, then all threads had been
3701 momentarily paused. In all-stop, that's fine, we want
3702 threads stopped by now anyway. In non-stop, we need to
3703 re-resume threads that GDB wanted to be running. */
3704 if (step_over_finished)
3705 unstop_all_lwps (1, event_child);
3706 }
3707
3708 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3709 {
3710 /* If the reported event is an exit, fork, vfork or exec, let
3711 GDB know. */
3712 *ourstatus = event_child->waitstatus;
3713 /* Clear the event lwp's waitstatus since we handled it already. */
3714 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3715 }
3716 else
3717 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3718
3719 /* Now that we've selected our final event LWP, un-adjust its PC if
3720 it was a software breakpoint, and the client doesn't know we can
3721 adjust the breakpoint ourselves. */
3722 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3723 && !swbreak_feature)
3724 {
3725 int decr_pc = the_low_target.decr_pc_after_break;
3726
3727 if (decr_pc != 0)
3728 {
3729 struct regcache *regcache
3730 = get_thread_regcache (current_thread, 1);
3731 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3732 }
3733 }
3734
3735 if (WSTOPSIG (w) == SYSCALL_SIGTRAP)
3736 {
3737 int sysret;
3738
3739 get_syscall_trapinfo (event_child,
3740 &ourstatus->value.syscall_number, &sysret);
3741 ourstatus->kind = event_child->syscall_state;
3742 }
3743 else if (current_thread->last_resume_kind == resume_stop
3744 && WSTOPSIG (w) == SIGSTOP)
3745 {
3746 /* A thread that has been requested to stop by GDB with vCont;t,
3747 and it stopped cleanly, so report as SIG0. The use of
3748 SIGSTOP is an implementation detail. */
3749 ourstatus->value.sig = GDB_SIGNAL_0;
3750 }
3751 else if (current_thread->last_resume_kind == resume_stop
3752 && WSTOPSIG (w) != SIGSTOP)
3753 {
3754 /* A thread that has been requested to stop by GDB with vCont;t,
3755 but, it stopped for other reasons. */
3756 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3757 }
3758 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3759 {
3760 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3761 }
3762
3763 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3764
3765 if (debug_threads)
3766 {
3767 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3768 target_pid_to_str (ptid_of (current_thread)),
3769 ourstatus->kind, ourstatus->value.sig);
3770 debug_exit ();
3771 }
3772
3773 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3774 return filter_exit_event (event_child, ourstatus);
3775
3776 return ptid_of (current_thread);
3777 }
3778
3779 /* Get rid of any pending event in the pipe. */
3780 static void
3781 async_file_flush (void)
3782 {
3783 int ret;
3784 char buf;
3785
3786 do
3787 ret = read (linux_event_pipe[0], &buf, 1);
3788 while (ret >= 0 || (ret == -1 && errno == EINTR));
3789 }
3790
3791 /* Put something in the pipe, so the event loop wakes up. */
3792 static void
3793 async_file_mark (void)
3794 {
3795 int ret;
3796
3797 async_file_flush ();
3798
3799 do
3800 ret = write (linux_event_pipe[1], "+", 1);
3801 while (ret == 0 || (ret == -1 && errno == EINTR));
3802
3803 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3804 be awakened anyway. */
3805 }
3806
3807 static ptid_t
3808 linux_wait (ptid_t ptid,
3809 struct target_waitstatus *ourstatus, int target_options)
3810 {
3811 ptid_t event_ptid;
3812
3813 /* Flush the async file first. */
3814 if (target_is_async_p ())
3815 async_file_flush ();
3816
3817 do
3818 {
3819 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3820 }
3821 while ((target_options & TARGET_WNOHANG) == 0
3822 && ptid_equal (event_ptid, null_ptid)
3823 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3824
3825 /* If at least one stop was reported, there may be more. A single
3826 SIGCHLD can signal more than one child stop. */
3827 if (target_is_async_p ()
3828 && (target_options & TARGET_WNOHANG) != 0
3829 && !ptid_equal (event_ptid, null_ptid))
3830 async_file_mark ();
3831
3832 return event_ptid;
3833 }
3834
3835 /* Send a signal to an LWP. */
3836
3837 static int
3838 kill_lwp (unsigned long lwpid, int signo)
3839 {
3840 int ret;
3841
3842 errno = 0;
3843 ret = syscall (__NR_tkill, lwpid, signo);
3844 if (errno == ENOSYS)
3845 {
3846 /* If tkill fails, then we are not using nptl threads, a
3847 configuration we no longer support. */
3848 perror_with_name (("tkill"));
3849 }
3850 return ret;
3851 }
3852
3853 void
3854 linux_stop_lwp (struct lwp_info *lwp)
3855 {
3856 send_sigstop (lwp);
3857 }
3858
3859 static void
3860 send_sigstop (struct lwp_info *lwp)
3861 {
3862 int pid;
3863
3864 pid = lwpid_of (get_lwp_thread (lwp));
3865
3866 /* If we already have a pending stop signal for this process, don't
3867 send another. */
3868 if (lwp->stop_expected)
3869 {
3870 if (debug_threads)
3871 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3872
3873 return;
3874 }
3875
3876 if (debug_threads)
3877 debug_printf ("Sending sigstop to lwp %d\n", pid);
3878
3879 lwp->stop_expected = 1;
3880 kill_lwp (pid, SIGSTOP);
3881 }
3882
3883 static int
3884 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3885 {
3886 struct thread_info *thread = (struct thread_info *) entry;
3887 struct lwp_info *lwp = get_thread_lwp (thread);
3888
3889 /* Ignore EXCEPT. */
3890 if (lwp == except)
3891 return 0;
3892
3893 if (lwp->stopped)
3894 return 0;
3895
3896 send_sigstop (lwp);
3897 return 0;
3898 }
3899
3900 /* Increment the suspend count of an LWP, and stop it, if not stopped
3901 yet. */
3902 static int
3903 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3904 void *except)
3905 {
3906 struct thread_info *thread = (struct thread_info *) entry;
3907 struct lwp_info *lwp = get_thread_lwp (thread);
3908
3909 /* Ignore EXCEPT. */
3910 if (lwp == except)
3911 return 0;
3912
3913 lwp_suspended_inc (lwp);
3914
3915 return send_sigstop_callback (entry, except);
3916 }
3917
3918 static void
3919 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3920 {
3921 /* Store the exit status for later. */
3922 lwp->status_pending_p = 1;
3923 lwp->status_pending = wstat;
3924
3925 /* Store in waitstatus as well, as there's nothing else to process
3926 for this event. */
3927 if (WIFEXITED (wstat))
3928 {
3929 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3930 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3931 }
3932 else if (WIFSIGNALED (wstat))
3933 {
3934 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3935 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3936 }
3937
3938 /* Prevent trying to stop it. */
3939 lwp->stopped = 1;
3940
3941 /* No further stops are expected from a dead lwp. */
3942 lwp->stop_expected = 0;
3943 }
3944
3945 /* Return true if LWP has exited already, and has a pending exit event
3946 to report to GDB. */
3947
3948 static int
3949 lwp_is_marked_dead (struct lwp_info *lwp)
3950 {
3951 return (lwp->status_pending_p
3952 && (WIFEXITED (lwp->status_pending)
3953 || WIFSIGNALED (lwp->status_pending)));
3954 }
3955
3956 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3957
3958 static void
3959 wait_for_sigstop (void)
3960 {
3961 struct thread_info *saved_thread;
3962 ptid_t saved_tid;
3963 int wstat;
3964 int ret;
3965
3966 saved_thread = current_thread;
3967 if (saved_thread != NULL)
3968 saved_tid = saved_thread->entry.id;
3969 else
3970 saved_tid = null_ptid; /* avoid bogus unused warning */
3971
3972 if (debug_threads)
3973 debug_printf ("wait_for_sigstop: pulling events\n");
3974
3975 /* Passing NULL_PTID as filter indicates we want all events to be
3976 left pending. Eventually this returns when there are no
3977 unwaited-for children left. */
3978 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3979 &wstat, __WALL);
3980 gdb_assert (ret == -1);
3981
3982 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3983 current_thread = saved_thread;
3984 else
3985 {
3986 if (debug_threads)
3987 debug_printf ("Previously current thread died.\n");
3988
3989 /* We can't change the current inferior behind GDB's back,
3990 otherwise, a subsequent command may apply to the wrong
3991 process. */
3992 current_thread = NULL;
3993 }
3994 }
3995
3996 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3997 move it out, because we need to report the stop event to GDB. For
3998 example, if the user puts a breakpoint in the jump pad, it's
3999 because she wants to debug it. */
4000
4001 static int
4002 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
4003 {
4004 struct thread_info *thread = (struct thread_info *) entry;
4005 struct lwp_info *lwp = get_thread_lwp (thread);
4006
4007 if (lwp->suspended != 0)
4008 {
4009 internal_error (__FILE__, __LINE__,
4010 "LWP %ld is suspended, suspended=%d\n",
4011 lwpid_of (thread), lwp->suspended);
4012 }
4013 gdb_assert (lwp->stopped);
4014
4015 /* Allow debugging the jump pad, gdb_collect, etc.. */
4016 return (supports_fast_tracepoints ()
4017 && agent_loaded_p ()
4018 && (gdb_breakpoint_here (lwp->stop_pc)
4019 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
4020 || thread->last_resume_kind == resume_step)
4021 && linux_fast_tracepoint_collecting (lwp, NULL));
4022 }
4023
4024 static void
4025 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
4026 {
4027 struct thread_info *thread = (struct thread_info *) entry;
4028 struct thread_info *saved_thread;
4029 struct lwp_info *lwp = get_thread_lwp (thread);
4030 int *wstat;
4031
4032 if (lwp->suspended != 0)
4033 {
4034 internal_error (__FILE__, __LINE__,
4035 "LWP %ld is suspended, suspended=%d\n",
4036 lwpid_of (thread), lwp->suspended);
4037 }
4038 gdb_assert (lwp->stopped);
4039
4040 /* For gdb_breakpoint_here. */
4041 saved_thread = current_thread;
4042 current_thread = thread;
4043
4044 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
4045
4046 /* Allow debugging the jump pad, gdb_collect, etc. */
4047 if (!gdb_breakpoint_here (lwp->stop_pc)
4048 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
4049 && thread->last_resume_kind != resume_step
4050 && maybe_move_out_of_jump_pad (lwp, wstat))
4051 {
4052 if (debug_threads)
4053 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
4054 lwpid_of (thread));
4055
4056 if (wstat)
4057 {
4058 lwp->status_pending_p = 0;
4059 enqueue_one_deferred_signal (lwp, wstat);
4060
4061 if (debug_threads)
4062 debug_printf ("Signal %d for LWP %ld deferred "
4063 "(in jump pad)\n",
4064 WSTOPSIG (*wstat), lwpid_of (thread));
4065 }
4066
4067 linux_resume_one_lwp (lwp, 0, 0, NULL);
4068 }
4069 else
4070 lwp_suspended_inc (lwp);
4071
4072 current_thread = saved_thread;
4073 }
4074
4075 static int
4076 lwp_running (struct inferior_list_entry *entry, void *data)
4077 {
4078 struct thread_info *thread = (struct thread_info *) entry;
4079 struct lwp_info *lwp = get_thread_lwp (thread);
4080
4081 if (lwp_is_marked_dead (lwp))
4082 return 0;
4083 if (lwp->stopped)
4084 return 0;
4085 return 1;
4086 }
4087
4088 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
4089 If SUSPEND, then also increase the suspend count of every LWP,
4090 except EXCEPT. */
4091
4092 static void
4093 stop_all_lwps (int suspend, struct lwp_info *except)
4094 {
4095 /* Should not be called recursively. */
4096 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
4097
4098 if (debug_threads)
4099 {
4100 debug_enter ();
4101 debug_printf ("stop_all_lwps (%s, except=%s)\n",
4102 suspend ? "stop-and-suspend" : "stop",
4103 except != NULL
4104 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
4105 : "none");
4106 }
4107
4108 stopping_threads = (suspend
4109 ? STOPPING_AND_SUSPENDING_THREADS
4110 : STOPPING_THREADS);
4111
4112 if (suspend)
4113 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
4114 else
4115 find_inferior (&all_threads, send_sigstop_callback, except);
4116 wait_for_sigstop ();
4117 stopping_threads = NOT_STOPPING_THREADS;
4118
4119 if (debug_threads)
4120 {
4121 debug_printf ("stop_all_lwps done, setting stopping_threads "
4122 "back to !stopping\n");
4123 debug_exit ();
4124 }
4125 }
4126
4127 /* Enqueue one signal in the chain of signals which need to be
4128 delivered to this process on next resume. */
4129
4130 static void
4131 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
4132 {
4133 struct pending_signals *p_sig = XNEW (struct pending_signals);
4134
4135 p_sig->prev = lwp->pending_signals;
4136 p_sig->signal = signal;
4137 if (info == NULL)
4138 memset (&p_sig->info, 0, sizeof (siginfo_t));
4139 else
4140 memcpy (&p_sig->info, info, sizeof (siginfo_t));
4141 lwp->pending_signals = p_sig;
4142 }
4143
4144 /* Install breakpoints for software single stepping. */
4145
4146 static void
4147 install_software_single_step_breakpoints (struct lwp_info *lwp)
4148 {
4149 int i;
4150 CORE_ADDR pc;
4151 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4152 VEC (CORE_ADDR) *next_pcs = NULL;
4153 struct cleanup *old_chain = make_cleanup (VEC_cleanup (CORE_ADDR), &next_pcs);
4154
4155 next_pcs = (*the_low_target.get_next_pcs) (regcache);
4156
4157 for (i = 0; VEC_iterate (CORE_ADDR, next_pcs, i, pc); ++i)
4158 set_reinsert_breakpoint (pc);
4159
4160 do_cleanups (old_chain);
4161 }
4162
4163 /* Single step via hardware or software single step.
4164 Return 1 if hardware single stepping, 0 if software single stepping
4165 or can't single step. */
4166
4167 static int
4168 single_step (struct lwp_info* lwp)
4169 {
4170 int step = 0;
4171
4172 if (can_hardware_single_step ())
4173 {
4174 step = 1;
4175 }
4176 else if (can_software_single_step ())
4177 {
4178 install_software_single_step_breakpoints (lwp);
4179 step = 0;
4180 }
4181 else
4182 {
4183 if (debug_threads)
4184 debug_printf ("stepping is not implemented on this target");
4185 }
4186
4187 return step;
4188 }
4189
4190 /* The signal can be delivered to the inferior if we are not trying to
4191 finish a fast tracepoint collect. Since signal can be delivered in
4192 the step-over, the program may go to signal handler and trap again
4193 after return from the signal handler. We can live with the spurious
4194 double traps. */
4195
4196 static int
4197 lwp_signal_can_be_delivered (struct lwp_info *lwp)
4198 {
4199 return !lwp->collecting_fast_tracepoint;
4200 }
4201
4202 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
4203 SIGNAL is nonzero, give it that signal. */
4204
4205 static void
4206 linux_resume_one_lwp_throw (struct lwp_info *lwp,
4207 int step, int signal, siginfo_t *info)
4208 {
4209 struct thread_info *thread = get_lwp_thread (lwp);
4210 struct thread_info *saved_thread;
4211 int fast_tp_collecting;
4212 int ptrace_request;
4213 struct process_info *proc = get_thread_process (thread);
4214
4215 /* Note that target description may not be initialised
4216 (proc->tdesc == NULL) at this point because the program hasn't
4217 stopped at the first instruction yet. It means GDBserver skips
4218 the extra traps from the wrapper program (see option --wrapper).
4219 Code in this function that requires register access should be
4220 guarded by proc->tdesc == NULL or something else. */
4221
4222 if (lwp->stopped == 0)
4223 return;
4224
4225 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
4226
4227 fast_tp_collecting = lwp->collecting_fast_tracepoint;
4228
4229 gdb_assert (!stabilizing_threads || fast_tp_collecting);
4230
4231 /* Cancel actions that rely on GDB not changing the PC (e.g., the
4232 user used the "jump" command, or "set $pc = foo"). */
4233 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
4234 {
4235 /* Collecting 'while-stepping' actions doesn't make sense
4236 anymore. */
4237 release_while_stepping_state_list (thread);
4238 }
4239
4240 /* If we have pending signals or status, and a new signal, enqueue the
4241 signal. Also enqueue the signal if it can't be delivered to the
4242 inferior right now. */
4243 if (signal != 0
4244 && (lwp->status_pending_p
4245 || lwp->pending_signals != NULL
4246 || !lwp_signal_can_be_delivered (lwp)))
4247 {
4248 enqueue_pending_signal (lwp, signal, info);
4249
4250 /* Postpone any pending signal. It was enqueued above. */
4251 signal = 0;
4252 }
4253
4254 if (lwp->status_pending_p)
4255 {
4256 if (debug_threads)
4257 debug_printf ("Not resuming lwp %ld (%s, stop %s);"
4258 " has pending status\n",
4259 lwpid_of (thread), step ? "step" : "continue",
4260 lwp->stop_expected ? "expected" : "not expected");
4261 return;
4262 }
4263
4264 saved_thread = current_thread;
4265 current_thread = thread;
4266
4267 /* This bit needs some thinking about. If we get a signal that
4268 we must report while a single-step reinsert is still pending,
4269 we often end up resuming the thread. It might be better to
4270 (ew) allow a stack of pending events; then we could be sure that
4271 the reinsert happened right away and not lose any signals.
4272
4273 Making this stack would also shrink the window in which breakpoints are
4274 uninserted (see comment in linux_wait_for_lwp) but not enough for
4275 complete correctness, so it won't solve that problem. It may be
4276 worthwhile just to solve this one, however. */
4277 if (lwp->bp_reinsert != 0)
4278 {
4279 if (debug_threads)
4280 debug_printf (" pending reinsert at 0x%s\n",
4281 paddress (lwp->bp_reinsert));
4282
4283 if (can_hardware_single_step ())
4284 {
4285 if (fast_tp_collecting == 0)
4286 {
4287 if (step == 0)
4288 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4289 if (lwp->suspended)
4290 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4291 lwp->suspended);
4292 }
4293 }
4294
4295 step = maybe_hw_step (thread);
4296 }
4297 else
4298 {
4299 /* If the thread isn't doing step-over, there shouldn't be any
4300 reinsert breakpoints. */
4301 gdb_assert (!has_reinsert_breakpoints (proc));
4302 }
4303
4304 if (fast_tp_collecting == 1)
4305 {
4306 if (debug_threads)
4307 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4308 " (exit-jump-pad-bkpt)\n",
4309 lwpid_of (thread));
4310 }
4311 else if (fast_tp_collecting == 2)
4312 {
4313 if (debug_threads)
4314 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4315 " single-stepping\n",
4316 lwpid_of (thread));
4317
4318 if (can_hardware_single_step ())
4319 step = 1;
4320 else
4321 {
4322 internal_error (__FILE__, __LINE__,
4323 "moving out of jump pad single-stepping"
4324 " not implemented on this target");
4325 }
4326 }
4327
4328 /* If we have while-stepping actions in this thread set it stepping.
4329 If we have a signal to deliver, it may or may not be set to
4330 SIG_IGN, we don't know. Assume so, and allow collecting
4331 while-stepping into a signal handler. A possible smart thing to
4332 do would be to set an internal breakpoint at the signal return
4333 address, continue, and carry on catching this while-stepping
4334 action only when that breakpoint is hit. A future
4335 enhancement. */
4336 if (thread->while_stepping != NULL)
4337 {
4338 if (debug_threads)
4339 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4340 lwpid_of (thread));
4341
4342 step = single_step (lwp);
4343 }
4344
4345 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4346 {
4347 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4348
4349 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4350
4351 if (debug_threads)
4352 {
4353 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4354 (long) lwp->stop_pc);
4355 }
4356 }
4357
4358 /* If we have pending signals, consume one if it can be delivered to
4359 the inferior. */
4360 if (lwp->pending_signals != NULL && lwp_signal_can_be_delivered (lwp))
4361 {
4362 struct pending_signals **p_sig;
4363
4364 p_sig = &lwp->pending_signals;
4365 while ((*p_sig)->prev != NULL)
4366 p_sig = &(*p_sig)->prev;
4367
4368 signal = (*p_sig)->signal;
4369 if ((*p_sig)->info.si_signo != 0)
4370 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4371 &(*p_sig)->info);
4372
4373 free (*p_sig);
4374 *p_sig = NULL;
4375 }
4376
4377 if (debug_threads)
4378 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4379 lwpid_of (thread), step ? "step" : "continue", signal,
4380 lwp->stop_expected ? "expected" : "not expected");
4381
4382 if (the_low_target.prepare_to_resume != NULL)
4383 the_low_target.prepare_to_resume (lwp);
4384
4385 regcache_invalidate_thread (thread);
4386 errno = 0;
4387 lwp->stepping = step;
4388 if (step)
4389 ptrace_request = PTRACE_SINGLESTEP;
4390 else if (gdb_catching_syscalls_p (lwp))
4391 ptrace_request = PTRACE_SYSCALL;
4392 else
4393 ptrace_request = PTRACE_CONT;
4394 ptrace (ptrace_request,
4395 lwpid_of (thread),
4396 (PTRACE_TYPE_ARG3) 0,
4397 /* Coerce to a uintptr_t first to avoid potential gcc warning
4398 of coercing an 8 byte integer to a 4 byte pointer. */
4399 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4400
4401 current_thread = saved_thread;
4402 if (errno)
4403 perror_with_name ("resuming thread");
4404
4405 /* Successfully resumed. Clear state that no longer makes sense,
4406 and mark the LWP as running. Must not do this before resuming
4407 otherwise if that fails other code will be confused. E.g., we'd
4408 later try to stop the LWP and hang forever waiting for a stop
4409 status. Note that we must not throw after this is cleared,
4410 otherwise handle_zombie_lwp_error would get confused. */
4411 lwp->stopped = 0;
4412 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4413 }
4414
4415 /* Called when we try to resume a stopped LWP and that errors out. If
4416 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4417 or about to become), discard the error, clear any pending status
4418 the LWP may have, and return true (we'll collect the exit status
4419 soon enough). Otherwise, return false. */
4420
4421 static int
4422 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4423 {
4424 struct thread_info *thread = get_lwp_thread (lp);
4425
4426 /* If we get an error after resuming the LWP successfully, we'd
4427 confuse !T state for the LWP being gone. */
4428 gdb_assert (lp->stopped);
4429
4430 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4431 because even if ptrace failed with ESRCH, the tracee may be "not
4432 yet fully dead", but already refusing ptrace requests. In that
4433 case the tracee has 'R (Running)' state for a little bit
4434 (observed in Linux 3.18). See also the note on ESRCH in the
4435 ptrace(2) man page. Instead, check whether the LWP has any state
4436 other than ptrace-stopped. */
4437
4438 /* Don't assume anything if /proc/PID/status can't be read. */
4439 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4440 {
4441 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4442 lp->status_pending_p = 0;
4443 return 1;
4444 }
4445 return 0;
4446 }
4447
4448 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4449 disappears while we try to resume it. */
4450
4451 static void
4452 linux_resume_one_lwp (struct lwp_info *lwp,
4453 int step, int signal, siginfo_t *info)
4454 {
4455 TRY
4456 {
4457 linux_resume_one_lwp_throw (lwp, step, signal, info);
4458 }
4459 CATCH (ex, RETURN_MASK_ERROR)
4460 {
4461 if (!check_ptrace_stopped_lwp_gone (lwp))
4462 throw_exception (ex);
4463 }
4464 END_CATCH
4465 }
4466
4467 struct thread_resume_array
4468 {
4469 struct thread_resume *resume;
4470 size_t n;
4471 };
4472
4473 /* This function is called once per thread via find_inferior.
4474 ARG is a pointer to a thread_resume_array struct.
4475 We look up the thread specified by ENTRY in ARG, and mark the thread
4476 with a pointer to the appropriate resume request.
4477
4478 This algorithm is O(threads * resume elements), but resume elements
4479 is small (and will remain small at least until GDB supports thread
4480 suspension). */
4481
4482 static int
4483 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4484 {
4485 struct thread_info *thread = (struct thread_info *) entry;
4486 struct lwp_info *lwp = get_thread_lwp (thread);
4487 int ndx;
4488 struct thread_resume_array *r;
4489
4490 r = (struct thread_resume_array *) arg;
4491
4492 for (ndx = 0; ndx < r->n; ndx++)
4493 {
4494 ptid_t ptid = r->resume[ndx].thread;
4495 if (ptid_equal (ptid, minus_one_ptid)
4496 || ptid_equal (ptid, entry->id)
4497 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4498 of PID'. */
4499 || (ptid_get_pid (ptid) == pid_of (thread)
4500 && (ptid_is_pid (ptid)
4501 || ptid_get_lwp (ptid) == -1)))
4502 {
4503 if (r->resume[ndx].kind == resume_stop
4504 && thread->last_resume_kind == resume_stop)
4505 {
4506 if (debug_threads)
4507 debug_printf ("already %s LWP %ld at GDB's request\n",
4508 (thread->last_status.kind
4509 == TARGET_WAITKIND_STOPPED)
4510 ? "stopped"
4511 : "stopping",
4512 lwpid_of (thread));
4513
4514 continue;
4515 }
4516
4517 lwp->resume = &r->resume[ndx];
4518 thread->last_resume_kind = lwp->resume->kind;
4519
4520 lwp->step_range_start = lwp->resume->step_range_start;
4521 lwp->step_range_end = lwp->resume->step_range_end;
4522
4523 /* If we had a deferred signal to report, dequeue one now.
4524 This can happen if LWP gets more than one signal while
4525 trying to get out of a jump pad. */
4526 if (lwp->stopped
4527 && !lwp->status_pending_p
4528 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4529 {
4530 lwp->status_pending_p = 1;
4531
4532 if (debug_threads)
4533 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4534 "leaving status pending.\n",
4535 WSTOPSIG (lwp->status_pending),
4536 lwpid_of (thread));
4537 }
4538
4539 return 0;
4540 }
4541 }
4542
4543 /* No resume action for this thread. */
4544 lwp->resume = NULL;
4545
4546 return 0;
4547 }
4548
4549 /* find_inferior callback for linux_resume.
4550 Set *FLAG_P if this lwp has an interesting status pending. */
4551
4552 static int
4553 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4554 {
4555 struct thread_info *thread = (struct thread_info *) entry;
4556 struct lwp_info *lwp = get_thread_lwp (thread);
4557
4558 /* LWPs which will not be resumed are not interesting, because
4559 we might not wait for them next time through linux_wait. */
4560 if (lwp->resume == NULL)
4561 return 0;
4562
4563 if (thread_still_has_status_pending_p (thread))
4564 * (int *) flag_p = 1;
4565
4566 return 0;
4567 }
4568
4569 /* Return 1 if this lwp that GDB wants running is stopped at an
4570 internal breakpoint that we need to step over. It assumes that any
4571 required STOP_PC adjustment has already been propagated to the
4572 inferior's regcache. */
4573
4574 static int
4575 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4576 {
4577 struct thread_info *thread = (struct thread_info *) entry;
4578 struct lwp_info *lwp = get_thread_lwp (thread);
4579 struct thread_info *saved_thread;
4580 CORE_ADDR pc;
4581 struct process_info *proc = get_thread_process (thread);
4582
4583 /* GDBserver is skipping the extra traps from the wrapper program,
4584 don't have to do step over. */
4585 if (proc->tdesc == NULL)
4586 return 0;
4587
4588 /* LWPs which will not be resumed are not interesting, because we
4589 might not wait for them next time through linux_wait. */
4590
4591 if (!lwp->stopped)
4592 {
4593 if (debug_threads)
4594 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4595 lwpid_of (thread));
4596 return 0;
4597 }
4598
4599 if (thread->last_resume_kind == resume_stop)
4600 {
4601 if (debug_threads)
4602 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4603 " stopped\n",
4604 lwpid_of (thread));
4605 return 0;
4606 }
4607
4608 gdb_assert (lwp->suspended >= 0);
4609
4610 if (lwp->suspended)
4611 {
4612 if (debug_threads)
4613 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4614 lwpid_of (thread));
4615 return 0;
4616 }
4617
4618 if (lwp->status_pending_p)
4619 {
4620 if (debug_threads)
4621 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4622 " status.\n",
4623 lwpid_of (thread));
4624 return 0;
4625 }
4626
4627 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4628 or we have. */
4629 pc = get_pc (lwp);
4630
4631 /* If the PC has changed since we stopped, then don't do anything,
4632 and let the breakpoint/tracepoint be hit. This happens if, for
4633 instance, GDB handled the decr_pc_after_break subtraction itself,
4634 GDB is OOL stepping this thread, or the user has issued a "jump"
4635 command, or poked thread's registers herself. */
4636 if (pc != lwp->stop_pc)
4637 {
4638 if (debug_threads)
4639 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4640 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4641 lwpid_of (thread),
4642 paddress (lwp->stop_pc), paddress (pc));
4643 return 0;
4644 }
4645
4646 /* On software single step target, resume the inferior with signal
4647 rather than stepping over. */
4648 if (can_software_single_step ()
4649 && lwp->pending_signals != NULL
4650 && lwp_signal_can_be_delivered (lwp))
4651 {
4652 if (debug_threads)
4653 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4654 " signals.\n",
4655 lwpid_of (thread));
4656
4657 return 0;
4658 }
4659
4660 saved_thread = current_thread;
4661 current_thread = thread;
4662
4663 /* We can only step over breakpoints we know about. */
4664 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4665 {
4666 /* Don't step over a breakpoint that GDB expects to hit
4667 though. If the condition is being evaluated on the target's side
4668 and it evaluate to false, step over this breakpoint as well. */
4669 if (gdb_breakpoint_here (pc)
4670 && gdb_condition_true_at_breakpoint (pc)
4671 && gdb_no_commands_at_breakpoint (pc))
4672 {
4673 if (debug_threads)
4674 debug_printf ("Need step over [LWP %ld]? yes, but found"
4675 " GDB breakpoint at 0x%s; skipping step over\n",
4676 lwpid_of (thread), paddress (pc));
4677
4678 current_thread = saved_thread;
4679 return 0;
4680 }
4681 else
4682 {
4683 if (debug_threads)
4684 debug_printf ("Need step over [LWP %ld]? yes, "
4685 "found breakpoint at 0x%s\n",
4686 lwpid_of (thread), paddress (pc));
4687
4688 /* We've found an lwp that needs stepping over --- return 1 so
4689 that find_inferior stops looking. */
4690 current_thread = saved_thread;
4691
4692 return 1;
4693 }
4694 }
4695
4696 current_thread = saved_thread;
4697
4698 if (debug_threads)
4699 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4700 " at 0x%s\n",
4701 lwpid_of (thread), paddress (pc));
4702
4703 return 0;
4704 }
4705
4706 /* Start a step-over operation on LWP. When LWP stopped at a
4707 breakpoint, to make progress, we need to remove the breakpoint out
4708 of the way. If we let other threads run while we do that, they may
4709 pass by the breakpoint location and miss hitting it. To avoid
4710 that, a step-over momentarily stops all threads while LWP is
4711 single-stepped by either hardware or software while the breakpoint
4712 is temporarily uninserted from the inferior. When the single-step
4713 finishes, we reinsert the breakpoint, and let all threads that are
4714 supposed to be running, run again. */
4715
4716 static int
4717 start_step_over (struct lwp_info *lwp)
4718 {
4719 struct thread_info *thread = get_lwp_thread (lwp);
4720 struct thread_info *saved_thread;
4721 CORE_ADDR pc;
4722 int step;
4723
4724 if (debug_threads)
4725 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4726 lwpid_of (thread));
4727
4728 stop_all_lwps (1, lwp);
4729
4730 if (lwp->suspended != 0)
4731 {
4732 internal_error (__FILE__, __LINE__,
4733 "LWP %ld suspended=%d\n", lwpid_of (thread),
4734 lwp->suspended);
4735 }
4736
4737 if (debug_threads)
4738 debug_printf ("Done stopping all threads for step-over.\n");
4739
4740 /* Note, we should always reach here with an already adjusted PC,
4741 either by GDB (if we're resuming due to GDB's request), or by our
4742 caller, if we just finished handling an internal breakpoint GDB
4743 shouldn't care about. */
4744 pc = get_pc (lwp);
4745
4746 saved_thread = current_thread;
4747 current_thread = thread;
4748
4749 lwp->bp_reinsert = pc;
4750 uninsert_breakpoints_at (pc);
4751 uninsert_fast_tracepoint_jumps_at (pc);
4752
4753 step = single_step (lwp);
4754
4755 current_thread = saved_thread;
4756
4757 linux_resume_one_lwp (lwp, step, 0, NULL);
4758
4759 /* Require next event from this LWP. */
4760 step_over_bkpt = thread->entry.id;
4761 return 1;
4762 }
4763
4764 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4765 start_step_over, if still there, and delete any reinsert
4766 breakpoints we've set, on non hardware single-step targets. */
4767
4768 static int
4769 finish_step_over (struct lwp_info *lwp)
4770 {
4771 if (lwp->bp_reinsert != 0)
4772 {
4773 struct thread_info *saved_thread = current_thread;
4774
4775 if (debug_threads)
4776 debug_printf ("Finished step over.\n");
4777
4778 current_thread = get_lwp_thread (lwp);
4779
4780 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4781 may be no breakpoint to reinsert there by now. */
4782 reinsert_breakpoints_at (lwp->bp_reinsert);
4783 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4784
4785 lwp->bp_reinsert = 0;
4786
4787 /* Delete any software-single-step reinsert breakpoints. No
4788 longer needed. We don't have to worry about other threads
4789 hitting this trap, and later not being able to explain it,
4790 because we were stepping over a breakpoint, and we hold all
4791 threads but LWP stopped while doing that. */
4792 if (!can_hardware_single_step ())
4793 {
4794 gdb_assert (has_reinsert_breakpoints (current_process ()));
4795 delete_reinsert_breakpoints ();
4796 }
4797
4798 step_over_bkpt = null_ptid;
4799 current_thread = saved_thread;
4800 return 1;
4801 }
4802 else
4803 return 0;
4804 }
4805
4806 /* If there's a step over in progress, wait until all threads stop
4807 (that is, until the stepping thread finishes its step), and
4808 unsuspend all lwps. The stepping thread ends with its status
4809 pending, which is processed later when we get back to processing
4810 events. */
4811
4812 static void
4813 complete_ongoing_step_over (void)
4814 {
4815 if (!ptid_equal (step_over_bkpt, null_ptid))
4816 {
4817 struct lwp_info *lwp;
4818 int wstat;
4819 int ret;
4820
4821 if (debug_threads)
4822 debug_printf ("detach: step over in progress, finish it first\n");
4823
4824 /* Passing NULL_PTID as filter indicates we want all events to
4825 be left pending. Eventually this returns when there are no
4826 unwaited-for children left. */
4827 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4828 &wstat, __WALL);
4829 gdb_assert (ret == -1);
4830
4831 lwp = find_lwp_pid (step_over_bkpt);
4832 if (lwp != NULL)
4833 finish_step_over (lwp);
4834 step_over_bkpt = null_ptid;
4835 unsuspend_all_lwps (lwp);
4836 }
4837 }
4838
4839 /* This function is called once per thread. We check the thread's resume
4840 request, which will tell us whether to resume, step, or leave the thread
4841 stopped; and what signal, if any, it should be sent.
4842
4843 For threads which we aren't explicitly told otherwise, we preserve
4844 the stepping flag; this is used for stepping over gdbserver-placed
4845 breakpoints.
4846
4847 If pending_flags was set in any thread, we queue any needed
4848 signals, since we won't actually resume. We already have a pending
4849 event to report, so we don't need to preserve any step requests;
4850 they should be re-issued if necessary. */
4851
4852 static int
4853 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4854 {
4855 struct thread_info *thread = (struct thread_info *) entry;
4856 struct lwp_info *lwp = get_thread_lwp (thread);
4857 int step;
4858 int leave_all_stopped = * (int *) arg;
4859 int leave_pending;
4860
4861 if (lwp->resume == NULL)
4862 return 0;
4863
4864 if (lwp->resume->kind == resume_stop)
4865 {
4866 if (debug_threads)
4867 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4868
4869 if (!lwp->stopped)
4870 {
4871 if (debug_threads)
4872 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4873
4874 /* Stop the thread, and wait for the event asynchronously,
4875 through the event loop. */
4876 send_sigstop (lwp);
4877 }
4878 else
4879 {
4880 if (debug_threads)
4881 debug_printf ("already stopped LWP %ld\n",
4882 lwpid_of (thread));
4883
4884 /* The LWP may have been stopped in an internal event that
4885 was not meant to be notified back to GDB (e.g., gdbserver
4886 breakpoint), so we should be reporting a stop event in
4887 this case too. */
4888
4889 /* If the thread already has a pending SIGSTOP, this is a
4890 no-op. Otherwise, something later will presumably resume
4891 the thread and this will cause it to cancel any pending
4892 operation, due to last_resume_kind == resume_stop. If
4893 the thread already has a pending status to report, we
4894 will still report it the next time we wait - see
4895 status_pending_p_callback. */
4896
4897 /* If we already have a pending signal to report, then
4898 there's no need to queue a SIGSTOP, as this means we're
4899 midway through moving the LWP out of the jumppad, and we
4900 will report the pending signal as soon as that is
4901 finished. */
4902 if (lwp->pending_signals_to_report == NULL)
4903 send_sigstop (lwp);
4904 }
4905
4906 /* For stop requests, we're done. */
4907 lwp->resume = NULL;
4908 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4909 return 0;
4910 }
4911
4912 /* If this thread which is about to be resumed has a pending status,
4913 then don't resume it - we can just report the pending status.
4914 Likewise if it is suspended, because e.g., another thread is
4915 stepping past a breakpoint. Make sure to queue any signals that
4916 would otherwise be sent. In all-stop mode, we do this decision
4917 based on if *any* thread has a pending status. If there's a
4918 thread that needs the step-over-breakpoint dance, then don't
4919 resume any other thread but that particular one. */
4920 leave_pending = (lwp->suspended
4921 || lwp->status_pending_p
4922 || leave_all_stopped);
4923
4924 if (!leave_pending)
4925 {
4926 if (debug_threads)
4927 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4928
4929 step = (lwp->resume->kind == resume_step);
4930 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4931 }
4932 else
4933 {
4934 if (debug_threads)
4935 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4936
4937 /* If we have a new signal, enqueue the signal. */
4938 if (lwp->resume->sig != 0)
4939 {
4940 struct pending_signals *p_sig = XCNEW (struct pending_signals);
4941
4942 p_sig->prev = lwp->pending_signals;
4943 p_sig->signal = lwp->resume->sig;
4944
4945 /* If this is the same signal we were previously stopped by,
4946 make sure to queue its siginfo. We can ignore the return
4947 value of ptrace; if it fails, we'll skip
4948 PTRACE_SETSIGINFO. */
4949 if (WIFSTOPPED (lwp->last_status)
4950 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4951 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4952 &p_sig->info);
4953
4954 lwp->pending_signals = p_sig;
4955 }
4956 }
4957
4958 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4959 lwp->resume = NULL;
4960 return 0;
4961 }
4962
4963 static void
4964 linux_resume (struct thread_resume *resume_info, size_t n)
4965 {
4966 struct thread_resume_array array = { resume_info, n };
4967 struct thread_info *need_step_over = NULL;
4968 int any_pending;
4969 int leave_all_stopped;
4970
4971 if (debug_threads)
4972 {
4973 debug_enter ();
4974 debug_printf ("linux_resume:\n");
4975 }
4976
4977 find_inferior (&all_threads, linux_set_resume_request, &array);
4978
4979 /* If there is a thread which would otherwise be resumed, which has
4980 a pending status, then don't resume any threads - we can just
4981 report the pending status. Make sure to queue any signals that
4982 would otherwise be sent. In non-stop mode, we'll apply this
4983 logic to each thread individually. We consume all pending events
4984 before considering to start a step-over (in all-stop). */
4985 any_pending = 0;
4986 if (!non_stop)
4987 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4988
4989 /* If there is a thread which would otherwise be resumed, which is
4990 stopped at a breakpoint that needs stepping over, then don't
4991 resume any threads - have it step over the breakpoint with all
4992 other threads stopped, then resume all threads again. Make sure
4993 to queue any signals that would otherwise be delivered or
4994 queued. */
4995 if (!any_pending && supports_breakpoints ())
4996 need_step_over
4997 = (struct thread_info *) find_inferior (&all_threads,
4998 need_step_over_p, NULL);
4999
5000 leave_all_stopped = (need_step_over != NULL || any_pending);
5001
5002 if (debug_threads)
5003 {
5004 if (need_step_over != NULL)
5005 debug_printf ("Not resuming all, need step over\n");
5006 else if (any_pending)
5007 debug_printf ("Not resuming, all-stop and found "
5008 "an LWP with pending status\n");
5009 else
5010 debug_printf ("Resuming, no pending status or step over needed\n");
5011 }
5012
5013 /* Even if we're leaving threads stopped, queue all signals we'd
5014 otherwise deliver. */
5015 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
5016
5017 if (need_step_over)
5018 start_step_over (get_thread_lwp (need_step_over));
5019
5020 if (debug_threads)
5021 {
5022 debug_printf ("linux_resume done\n");
5023 debug_exit ();
5024 }
5025
5026 /* We may have events that were pending that can/should be sent to
5027 the client now. Trigger a linux_wait call. */
5028 if (target_is_async_p ())
5029 async_file_mark ();
5030 }
5031
5032 /* This function is called once per thread. We check the thread's
5033 last resume request, which will tell us whether to resume, step, or
5034 leave the thread stopped. Any signal the client requested to be
5035 delivered has already been enqueued at this point.
5036
5037 If any thread that GDB wants running is stopped at an internal
5038 breakpoint that needs stepping over, we start a step-over operation
5039 on that particular thread, and leave all others stopped. */
5040
5041 static int
5042 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5043 {
5044 struct thread_info *thread = (struct thread_info *) entry;
5045 struct lwp_info *lwp = get_thread_lwp (thread);
5046 int step;
5047
5048 if (lwp == except)
5049 return 0;
5050
5051 if (debug_threads)
5052 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
5053
5054 if (!lwp->stopped)
5055 {
5056 if (debug_threads)
5057 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
5058 return 0;
5059 }
5060
5061 if (thread->last_resume_kind == resume_stop
5062 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
5063 {
5064 if (debug_threads)
5065 debug_printf (" client wants LWP to remain %ld stopped\n",
5066 lwpid_of (thread));
5067 return 0;
5068 }
5069
5070 if (lwp->status_pending_p)
5071 {
5072 if (debug_threads)
5073 debug_printf (" LWP %ld has pending status, leaving stopped\n",
5074 lwpid_of (thread));
5075 return 0;
5076 }
5077
5078 gdb_assert (lwp->suspended >= 0);
5079
5080 if (lwp->suspended)
5081 {
5082 if (debug_threads)
5083 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
5084 return 0;
5085 }
5086
5087 if (thread->last_resume_kind == resume_stop
5088 && lwp->pending_signals_to_report == NULL
5089 && lwp->collecting_fast_tracepoint == 0)
5090 {
5091 /* We haven't reported this LWP as stopped yet (otherwise, the
5092 last_status.kind check above would catch it, and we wouldn't
5093 reach here. This LWP may have been momentarily paused by a
5094 stop_all_lwps call while handling for example, another LWP's
5095 step-over. In that case, the pending expected SIGSTOP signal
5096 that was queued at vCont;t handling time will have already
5097 been consumed by wait_for_sigstop, and so we need to requeue
5098 another one here. Note that if the LWP already has a SIGSTOP
5099 pending, this is a no-op. */
5100
5101 if (debug_threads)
5102 debug_printf ("Client wants LWP %ld to stop. "
5103 "Making sure it has a SIGSTOP pending\n",
5104 lwpid_of (thread));
5105
5106 send_sigstop (lwp);
5107 }
5108
5109 if (thread->last_resume_kind == resume_step)
5110 {
5111 if (debug_threads)
5112 debug_printf (" stepping LWP %ld, client wants it stepping\n",
5113 lwpid_of (thread));
5114 step = 1;
5115 }
5116 else if (lwp->bp_reinsert != 0)
5117 {
5118 if (debug_threads)
5119 debug_printf (" stepping LWP %ld, reinsert set\n",
5120 lwpid_of (thread));
5121
5122 step = maybe_hw_step (thread);
5123 }
5124 else
5125 step = 0;
5126
5127 linux_resume_one_lwp (lwp, step, 0, NULL);
5128 return 0;
5129 }
5130
5131 static int
5132 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
5133 {
5134 struct thread_info *thread = (struct thread_info *) entry;
5135 struct lwp_info *lwp = get_thread_lwp (thread);
5136
5137 if (lwp == except)
5138 return 0;
5139
5140 lwp_suspended_decr (lwp);
5141
5142 return proceed_one_lwp (entry, except);
5143 }
5144
5145 /* When we finish a step-over, set threads running again. If there's
5146 another thread that may need a step-over, now's the time to start
5147 it. Eventually, we'll move all threads past their breakpoints. */
5148
5149 static void
5150 proceed_all_lwps (void)
5151 {
5152 struct thread_info *need_step_over;
5153
5154 /* If there is a thread which would otherwise be resumed, which is
5155 stopped at a breakpoint that needs stepping over, then don't
5156 resume any threads - have it step over the breakpoint with all
5157 other threads stopped, then resume all threads again. */
5158
5159 if (supports_breakpoints ())
5160 {
5161 need_step_over
5162 = (struct thread_info *) find_inferior (&all_threads,
5163 need_step_over_p, NULL);
5164
5165 if (need_step_over != NULL)
5166 {
5167 if (debug_threads)
5168 debug_printf ("proceed_all_lwps: found "
5169 "thread %ld needing a step-over\n",
5170 lwpid_of (need_step_over));
5171
5172 start_step_over (get_thread_lwp (need_step_over));
5173 return;
5174 }
5175 }
5176
5177 if (debug_threads)
5178 debug_printf ("Proceeding, no step-over needed\n");
5179
5180 find_inferior (&all_threads, proceed_one_lwp, NULL);
5181 }
5182
5183 /* Stopped LWPs that the client wanted to be running, that don't have
5184 pending statuses, are set to run again, except for EXCEPT, if not
5185 NULL. This undoes a stop_all_lwps call. */
5186
5187 static void
5188 unstop_all_lwps (int unsuspend, struct lwp_info *except)
5189 {
5190 if (debug_threads)
5191 {
5192 debug_enter ();
5193 if (except)
5194 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
5195 lwpid_of (get_lwp_thread (except)));
5196 else
5197 debug_printf ("unstopping all lwps\n");
5198 }
5199
5200 if (unsuspend)
5201 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
5202 else
5203 find_inferior (&all_threads, proceed_one_lwp, except);
5204
5205 if (debug_threads)
5206 {
5207 debug_printf ("unstop_all_lwps done\n");
5208 debug_exit ();
5209 }
5210 }
5211
5212
5213 #ifdef HAVE_LINUX_REGSETS
5214
5215 #define use_linux_regsets 1
5216
5217 /* Returns true if REGSET has been disabled. */
5218
5219 static int
5220 regset_disabled (struct regsets_info *info, struct regset_info *regset)
5221 {
5222 return (info->disabled_regsets != NULL
5223 && info->disabled_regsets[regset - info->regsets]);
5224 }
5225
5226 /* Disable REGSET. */
5227
5228 static void
5229 disable_regset (struct regsets_info *info, struct regset_info *regset)
5230 {
5231 int dr_offset;
5232
5233 dr_offset = regset - info->regsets;
5234 if (info->disabled_regsets == NULL)
5235 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
5236 info->disabled_regsets[dr_offset] = 1;
5237 }
5238
5239 static int
5240 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
5241 struct regcache *regcache)
5242 {
5243 struct regset_info *regset;
5244 int saw_general_regs = 0;
5245 int pid;
5246 struct iovec iov;
5247
5248 pid = lwpid_of (current_thread);
5249 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5250 {
5251 void *buf, *data;
5252 int nt_type, res;
5253
5254 if (regset->size == 0 || regset_disabled (regsets_info, regset))
5255 continue;
5256
5257 buf = xmalloc (regset->size);
5258
5259 nt_type = regset->nt_type;
5260 if (nt_type)
5261 {
5262 iov.iov_base = buf;
5263 iov.iov_len = regset->size;
5264 data = (void *) &iov;
5265 }
5266 else
5267 data = buf;
5268
5269 #ifndef __sparc__
5270 res = ptrace (regset->get_request, pid,
5271 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5272 #else
5273 res = ptrace (regset->get_request, pid, data, nt_type);
5274 #endif
5275 if (res < 0)
5276 {
5277 if (errno == EIO)
5278 {
5279 /* If we get EIO on a regset, do not try it again for
5280 this process mode. */
5281 disable_regset (regsets_info, regset);
5282 }
5283 else if (errno == ENODATA)
5284 {
5285 /* ENODATA may be returned if the regset is currently
5286 not "active". This can happen in normal operation,
5287 so suppress the warning in this case. */
5288 }
5289 else
5290 {
5291 char s[256];
5292 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5293 pid);
5294 perror (s);
5295 }
5296 }
5297 else
5298 {
5299 if (regset->type == GENERAL_REGS)
5300 saw_general_regs = 1;
5301 regset->store_function (regcache, buf);
5302 }
5303 free (buf);
5304 }
5305 if (saw_general_regs)
5306 return 0;
5307 else
5308 return 1;
5309 }
5310
5311 static int
5312 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5313 struct regcache *regcache)
5314 {
5315 struct regset_info *regset;
5316 int saw_general_regs = 0;
5317 int pid;
5318 struct iovec iov;
5319
5320 pid = lwpid_of (current_thread);
5321 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5322 {
5323 void *buf, *data;
5324 int nt_type, res;
5325
5326 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5327 || regset->fill_function == NULL)
5328 continue;
5329
5330 buf = xmalloc (regset->size);
5331
5332 /* First fill the buffer with the current register set contents,
5333 in case there are any items in the kernel's regset that are
5334 not in gdbserver's regcache. */
5335
5336 nt_type = regset->nt_type;
5337 if (nt_type)
5338 {
5339 iov.iov_base = buf;
5340 iov.iov_len = regset->size;
5341 data = (void *) &iov;
5342 }
5343 else
5344 data = buf;
5345
5346 #ifndef __sparc__
5347 res = ptrace (regset->get_request, pid,
5348 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5349 #else
5350 res = ptrace (regset->get_request, pid, data, nt_type);
5351 #endif
5352
5353 if (res == 0)
5354 {
5355 /* Then overlay our cached registers on that. */
5356 regset->fill_function (regcache, buf);
5357
5358 /* Only now do we write the register set. */
5359 #ifndef __sparc__
5360 res = ptrace (regset->set_request, pid,
5361 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5362 #else
5363 res = ptrace (regset->set_request, pid, data, nt_type);
5364 #endif
5365 }
5366
5367 if (res < 0)
5368 {
5369 if (errno == EIO)
5370 {
5371 /* If we get EIO on a regset, do not try it again for
5372 this process mode. */
5373 disable_regset (regsets_info, regset);
5374 }
5375 else if (errno == ESRCH)
5376 {
5377 /* At this point, ESRCH should mean the process is
5378 already gone, in which case we simply ignore attempts
5379 to change its registers. See also the related
5380 comment in linux_resume_one_lwp. */
5381 free (buf);
5382 return 0;
5383 }
5384 else
5385 {
5386 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5387 }
5388 }
5389 else if (regset->type == GENERAL_REGS)
5390 saw_general_regs = 1;
5391 free (buf);
5392 }
5393 if (saw_general_regs)
5394 return 0;
5395 else
5396 return 1;
5397 }
5398
5399 #else /* !HAVE_LINUX_REGSETS */
5400
5401 #define use_linux_regsets 0
5402 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5403 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5404
5405 #endif
5406
5407 /* Return 1 if register REGNO is supported by one of the regset ptrace
5408 calls or 0 if it has to be transferred individually. */
5409
5410 static int
5411 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5412 {
5413 unsigned char mask = 1 << (regno % 8);
5414 size_t index = regno / 8;
5415
5416 return (use_linux_regsets
5417 && (regs_info->regset_bitmap == NULL
5418 || (regs_info->regset_bitmap[index] & mask) != 0));
5419 }
5420
5421 #ifdef HAVE_LINUX_USRREGS
5422
5423 static int
5424 register_addr (const struct usrregs_info *usrregs, int regnum)
5425 {
5426 int addr;
5427
5428 if (regnum < 0 || regnum >= usrregs->num_regs)
5429 error ("Invalid register number %d.", regnum);
5430
5431 addr = usrregs->regmap[regnum];
5432
5433 return addr;
5434 }
5435
5436 /* Fetch one register. */
5437 static void
5438 fetch_register (const struct usrregs_info *usrregs,
5439 struct regcache *regcache, int regno)
5440 {
5441 CORE_ADDR regaddr;
5442 int i, size;
5443 char *buf;
5444 int pid;
5445
5446 if (regno >= usrregs->num_regs)
5447 return;
5448 if ((*the_low_target.cannot_fetch_register) (regno))
5449 return;
5450
5451 regaddr = register_addr (usrregs, regno);
5452 if (regaddr == -1)
5453 return;
5454
5455 size = ((register_size (regcache->tdesc, regno)
5456 + sizeof (PTRACE_XFER_TYPE) - 1)
5457 & -sizeof (PTRACE_XFER_TYPE));
5458 buf = (char *) alloca (size);
5459
5460 pid = lwpid_of (current_thread);
5461 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5462 {
5463 errno = 0;
5464 *(PTRACE_XFER_TYPE *) (buf + i) =
5465 ptrace (PTRACE_PEEKUSER, pid,
5466 /* Coerce to a uintptr_t first to avoid potential gcc warning
5467 of coercing an 8 byte integer to a 4 byte pointer. */
5468 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5469 regaddr += sizeof (PTRACE_XFER_TYPE);
5470 if (errno != 0)
5471 error ("reading register %d: %s", regno, strerror (errno));
5472 }
5473
5474 if (the_low_target.supply_ptrace_register)
5475 the_low_target.supply_ptrace_register (regcache, regno, buf);
5476 else
5477 supply_register (regcache, regno, buf);
5478 }
5479
5480 /* Store one register. */
5481 static void
5482 store_register (const struct usrregs_info *usrregs,
5483 struct regcache *regcache, int regno)
5484 {
5485 CORE_ADDR regaddr;
5486 int i, size;
5487 char *buf;
5488 int pid;
5489
5490 if (regno >= usrregs->num_regs)
5491 return;
5492 if ((*the_low_target.cannot_store_register) (regno))
5493 return;
5494
5495 regaddr = register_addr (usrregs, regno);
5496 if (regaddr == -1)
5497 return;
5498
5499 size = ((register_size (regcache->tdesc, regno)
5500 + sizeof (PTRACE_XFER_TYPE) - 1)
5501 & -sizeof (PTRACE_XFER_TYPE));
5502 buf = (char *) alloca (size);
5503 memset (buf, 0, size);
5504
5505 if (the_low_target.collect_ptrace_register)
5506 the_low_target.collect_ptrace_register (regcache, regno, buf);
5507 else
5508 collect_register (regcache, regno, buf);
5509
5510 pid = lwpid_of (current_thread);
5511 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5512 {
5513 errno = 0;
5514 ptrace (PTRACE_POKEUSER, pid,
5515 /* Coerce to a uintptr_t first to avoid potential gcc warning
5516 about coercing an 8 byte integer to a 4 byte pointer. */
5517 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5518 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5519 if (errno != 0)
5520 {
5521 /* At this point, ESRCH should mean the process is
5522 already gone, in which case we simply ignore attempts
5523 to change its registers. See also the related
5524 comment in linux_resume_one_lwp. */
5525 if (errno == ESRCH)
5526 return;
5527
5528 if ((*the_low_target.cannot_store_register) (regno) == 0)
5529 error ("writing register %d: %s", regno, strerror (errno));
5530 }
5531 regaddr += sizeof (PTRACE_XFER_TYPE);
5532 }
5533 }
5534
5535 /* Fetch all registers, or just one, from the child process.
5536 If REGNO is -1, do this for all registers, skipping any that are
5537 assumed to have been retrieved by regsets_fetch_inferior_registers,
5538 unless ALL is non-zero.
5539 Otherwise, REGNO specifies which register (so we can save time). */
5540 static void
5541 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5542 struct regcache *regcache, int regno, int all)
5543 {
5544 struct usrregs_info *usr = regs_info->usrregs;
5545
5546 if (regno == -1)
5547 {
5548 for (regno = 0; regno < usr->num_regs; regno++)
5549 if (all || !linux_register_in_regsets (regs_info, regno))
5550 fetch_register (usr, regcache, regno);
5551 }
5552 else
5553 fetch_register (usr, regcache, regno);
5554 }
5555
5556 /* Store our register values back into the inferior.
5557 If REGNO is -1, do this for all registers, skipping any that are
5558 assumed to have been saved by regsets_store_inferior_registers,
5559 unless ALL is non-zero.
5560 Otherwise, REGNO specifies which register (so we can save time). */
5561 static void
5562 usr_store_inferior_registers (const struct regs_info *regs_info,
5563 struct regcache *regcache, int regno, int all)
5564 {
5565 struct usrregs_info *usr = regs_info->usrregs;
5566
5567 if (regno == -1)
5568 {
5569 for (regno = 0; regno < usr->num_regs; regno++)
5570 if (all || !linux_register_in_regsets (regs_info, regno))
5571 store_register (usr, regcache, regno);
5572 }
5573 else
5574 store_register (usr, regcache, regno);
5575 }
5576
5577 #else /* !HAVE_LINUX_USRREGS */
5578
5579 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5580 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5581
5582 #endif
5583
5584
5585 static void
5586 linux_fetch_registers (struct regcache *regcache, int regno)
5587 {
5588 int use_regsets;
5589 int all = 0;
5590 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5591
5592 if (regno == -1)
5593 {
5594 if (the_low_target.fetch_register != NULL
5595 && regs_info->usrregs != NULL)
5596 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5597 (*the_low_target.fetch_register) (regcache, regno);
5598
5599 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5600 if (regs_info->usrregs != NULL)
5601 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5602 }
5603 else
5604 {
5605 if (the_low_target.fetch_register != NULL
5606 && (*the_low_target.fetch_register) (regcache, regno))
5607 return;
5608
5609 use_regsets = linux_register_in_regsets (regs_info, regno);
5610 if (use_regsets)
5611 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5612 regcache);
5613 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5614 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5615 }
5616 }
5617
5618 static void
5619 linux_store_registers (struct regcache *regcache, int regno)
5620 {
5621 int use_regsets;
5622 int all = 0;
5623 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5624
5625 if (regno == -1)
5626 {
5627 all = regsets_store_inferior_registers (regs_info->regsets_info,
5628 regcache);
5629 if (regs_info->usrregs != NULL)
5630 usr_store_inferior_registers (regs_info, regcache, regno, all);
5631 }
5632 else
5633 {
5634 use_regsets = linux_register_in_regsets (regs_info, regno);
5635 if (use_regsets)
5636 all = regsets_store_inferior_registers (regs_info->regsets_info,
5637 regcache);
5638 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5639 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5640 }
5641 }
5642
5643
5644 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5645 to debugger memory starting at MYADDR. */
5646
5647 static int
5648 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5649 {
5650 int pid = lwpid_of (current_thread);
5651 register PTRACE_XFER_TYPE *buffer;
5652 register CORE_ADDR addr;
5653 register int count;
5654 char filename[64];
5655 register int i;
5656 int ret;
5657 int fd;
5658
5659 /* Try using /proc. Don't bother for one word. */
5660 if (len >= 3 * sizeof (long))
5661 {
5662 int bytes;
5663
5664 /* We could keep this file open and cache it - possibly one per
5665 thread. That requires some juggling, but is even faster. */
5666 sprintf (filename, "/proc/%d/mem", pid);
5667 fd = open (filename, O_RDONLY | O_LARGEFILE);
5668 if (fd == -1)
5669 goto no_proc;
5670
5671 /* If pread64 is available, use it. It's faster if the kernel
5672 supports it (only one syscall), and it's 64-bit safe even on
5673 32-bit platforms (for instance, SPARC debugging a SPARC64
5674 application). */
5675 #ifdef HAVE_PREAD64
5676 bytes = pread64 (fd, myaddr, len, memaddr);
5677 #else
5678 bytes = -1;
5679 if (lseek (fd, memaddr, SEEK_SET) != -1)
5680 bytes = read (fd, myaddr, len);
5681 #endif
5682
5683 close (fd);
5684 if (bytes == len)
5685 return 0;
5686
5687 /* Some data was read, we'll try to get the rest with ptrace. */
5688 if (bytes > 0)
5689 {
5690 memaddr += bytes;
5691 myaddr += bytes;
5692 len -= bytes;
5693 }
5694 }
5695
5696 no_proc:
5697 /* Round starting address down to longword boundary. */
5698 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5699 /* Round ending address up; get number of longwords that makes. */
5700 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5701 / sizeof (PTRACE_XFER_TYPE));
5702 /* Allocate buffer of that many longwords. */
5703 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5704
5705 /* Read all the longwords */
5706 errno = 0;
5707 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5708 {
5709 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5710 about coercing an 8 byte integer to a 4 byte pointer. */
5711 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5712 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5713 (PTRACE_TYPE_ARG4) 0);
5714 if (errno)
5715 break;
5716 }
5717 ret = errno;
5718
5719 /* Copy appropriate bytes out of the buffer. */
5720 if (i > 0)
5721 {
5722 i *= sizeof (PTRACE_XFER_TYPE);
5723 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5724 memcpy (myaddr,
5725 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5726 i < len ? i : len);
5727 }
5728
5729 return ret;
5730 }
5731
5732 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5733 memory at MEMADDR. On failure (cannot write to the inferior)
5734 returns the value of errno. Always succeeds if LEN is zero. */
5735
5736 static int
5737 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5738 {
5739 register int i;
5740 /* Round starting address down to longword boundary. */
5741 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5742 /* Round ending address up; get number of longwords that makes. */
5743 register int count
5744 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5745 / sizeof (PTRACE_XFER_TYPE);
5746
5747 /* Allocate buffer of that many longwords. */
5748 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5749
5750 int pid = lwpid_of (current_thread);
5751
5752 if (len == 0)
5753 {
5754 /* Zero length write always succeeds. */
5755 return 0;
5756 }
5757
5758 if (debug_threads)
5759 {
5760 /* Dump up to four bytes. */
5761 char str[4 * 2 + 1];
5762 char *p = str;
5763 int dump = len < 4 ? len : 4;
5764
5765 for (i = 0; i < dump; i++)
5766 {
5767 sprintf (p, "%02x", myaddr[i]);
5768 p += 2;
5769 }
5770 *p = '\0';
5771
5772 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5773 str, (long) memaddr, pid);
5774 }
5775
5776 /* Fill start and end extra bytes of buffer with existing memory data. */
5777
5778 errno = 0;
5779 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5780 about coercing an 8 byte integer to a 4 byte pointer. */
5781 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5782 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5783 (PTRACE_TYPE_ARG4) 0);
5784 if (errno)
5785 return errno;
5786
5787 if (count > 1)
5788 {
5789 errno = 0;
5790 buffer[count - 1]
5791 = ptrace (PTRACE_PEEKTEXT, pid,
5792 /* Coerce to a uintptr_t first to avoid potential gcc warning
5793 about coercing an 8 byte integer to a 4 byte pointer. */
5794 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5795 * sizeof (PTRACE_XFER_TYPE)),
5796 (PTRACE_TYPE_ARG4) 0);
5797 if (errno)
5798 return errno;
5799 }
5800
5801 /* Copy data to be written over corresponding part of buffer. */
5802
5803 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5804 myaddr, len);
5805
5806 /* Write the entire buffer. */
5807
5808 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5809 {
5810 errno = 0;
5811 ptrace (PTRACE_POKETEXT, pid,
5812 /* Coerce to a uintptr_t first to avoid potential gcc warning
5813 about coercing an 8 byte integer to a 4 byte pointer. */
5814 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5815 (PTRACE_TYPE_ARG4) buffer[i]);
5816 if (errno)
5817 return errno;
5818 }
5819
5820 return 0;
5821 }
5822
5823 static void
5824 linux_look_up_symbols (void)
5825 {
5826 #ifdef USE_THREAD_DB
5827 struct process_info *proc = current_process ();
5828
5829 if (proc->priv->thread_db != NULL)
5830 return;
5831
5832 thread_db_init ();
5833 #endif
5834 }
5835
5836 static void
5837 linux_request_interrupt (void)
5838 {
5839 extern unsigned long signal_pid;
5840
5841 /* Send a SIGINT to the process group. This acts just like the user
5842 typed a ^C on the controlling terminal. */
5843 kill (-signal_pid, SIGINT);
5844 }
5845
5846 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5847 to debugger memory starting at MYADDR. */
5848
5849 static int
5850 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5851 {
5852 char filename[PATH_MAX];
5853 int fd, n;
5854 int pid = lwpid_of (current_thread);
5855
5856 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5857
5858 fd = open (filename, O_RDONLY);
5859 if (fd < 0)
5860 return -1;
5861
5862 if (offset != (CORE_ADDR) 0
5863 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5864 n = -1;
5865 else
5866 n = read (fd, myaddr, len);
5867
5868 close (fd);
5869
5870 return n;
5871 }
5872
5873 /* These breakpoint and watchpoint related wrapper functions simply
5874 pass on the function call if the target has registered a
5875 corresponding function. */
5876
5877 static int
5878 linux_supports_z_point_type (char z_type)
5879 {
5880 return (the_low_target.supports_z_point_type != NULL
5881 && the_low_target.supports_z_point_type (z_type));
5882 }
5883
5884 static int
5885 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5886 int size, struct raw_breakpoint *bp)
5887 {
5888 if (type == raw_bkpt_type_sw)
5889 return insert_memory_breakpoint (bp);
5890 else if (the_low_target.insert_point != NULL)
5891 return the_low_target.insert_point (type, addr, size, bp);
5892 else
5893 /* Unsupported (see target.h). */
5894 return 1;
5895 }
5896
5897 static int
5898 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5899 int size, struct raw_breakpoint *bp)
5900 {
5901 if (type == raw_bkpt_type_sw)
5902 return remove_memory_breakpoint (bp);
5903 else if (the_low_target.remove_point != NULL)
5904 return the_low_target.remove_point (type, addr, size, bp);
5905 else
5906 /* Unsupported (see target.h). */
5907 return 1;
5908 }
5909
5910 /* Implement the to_stopped_by_sw_breakpoint target_ops
5911 method. */
5912
5913 static int
5914 linux_stopped_by_sw_breakpoint (void)
5915 {
5916 struct lwp_info *lwp = get_thread_lwp (current_thread);
5917
5918 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5919 }
5920
5921 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5922 method. */
5923
5924 static int
5925 linux_supports_stopped_by_sw_breakpoint (void)
5926 {
5927 return USE_SIGTRAP_SIGINFO;
5928 }
5929
5930 /* Implement the to_stopped_by_hw_breakpoint target_ops
5931 method. */
5932
5933 static int
5934 linux_stopped_by_hw_breakpoint (void)
5935 {
5936 struct lwp_info *lwp = get_thread_lwp (current_thread);
5937
5938 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5939 }
5940
5941 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5942 method. */
5943
5944 static int
5945 linux_supports_stopped_by_hw_breakpoint (void)
5946 {
5947 return USE_SIGTRAP_SIGINFO;
5948 }
5949
5950 /* Implement the supports_hardware_single_step target_ops method. */
5951
5952 static int
5953 linux_supports_hardware_single_step (void)
5954 {
5955 return can_hardware_single_step ();
5956 }
5957
5958 static int
5959 linux_supports_software_single_step (void)
5960 {
5961 return can_software_single_step ();
5962 }
5963
5964 static int
5965 linux_stopped_by_watchpoint (void)
5966 {
5967 struct lwp_info *lwp = get_thread_lwp (current_thread);
5968
5969 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5970 }
5971
5972 static CORE_ADDR
5973 linux_stopped_data_address (void)
5974 {
5975 struct lwp_info *lwp = get_thread_lwp (current_thread);
5976
5977 return lwp->stopped_data_address;
5978 }
5979
5980 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5981 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5982 && defined(PT_TEXT_END_ADDR)
5983
5984 /* This is only used for targets that define PT_TEXT_ADDR,
5985 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5986 the target has different ways of acquiring this information, like
5987 loadmaps. */
5988
5989 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5990 to tell gdb about. */
5991
5992 static int
5993 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5994 {
5995 unsigned long text, text_end, data;
5996 int pid = lwpid_of (current_thread);
5997
5998 errno = 0;
5999
6000 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
6001 (PTRACE_TYPE_ARG4) 0);
6002 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
6003 (PTRACE_TYPE_ARG4) 0);
6004 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
6005 (PTRACE_TYPE_ARG4) 0);
6006
6007 if (errno == 0)
6008 {
6009 /* Both text and data offsets produced at compile-time (and so
6010 used by gdb) are relative to the beginning of the program,
6011 with the data segment immediately following the text segment.
6012 However, the actual runtime layout in memory may put the data
6013 somewhere else, so when we send gdb a data base-address, we
6014 use the real data base address and subtract the compile-time
6015 data base-address from it (which is just the length of the
6016 text segment). BSS immediately follows data in both
6017 cases. */
6018 *text_p = text;
6019 *data_p = data - (text_end - text);
6020
6021 return 1;
6022 }
6023 return 0;
6024 }
6025 #endif
6026
6027 static int
6028 linux_qxfer_osdata (const char *annex,
6029 unsigned char *readbuf, unsigned const char *writebuf,
6030 CORE_ADDR offset, int len)
6031 {
6032 return linux_common_xfer_osdata (annex, readbuf, offset, len);
6033 }
6034
6035 /* Convert a native/host siginfo object, into/from the siginfo in the
6036 layout of the inferiors' architecture. */
6037
6038 static void
6039 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
6040 {
6041 int done = 0;
6042
6043 if (the_low_target.siginfo_fixup != NULL)
6044 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
6045
6046 /* If there was no callback, or the callback didn't do anything,
6047 then just do a straight memcpy. */
6048 if (!done)
6049 {
6050 if (direction == 1)
6051 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
6052 else
6053 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
6054 }
6055 }
6056
6057 static int
6058 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
6059 unsigned const char *writebuf, CORE_ADDR offset, int len)
6060 {
6061 int pid;
6062 siginfo_t siginfo;
6063 gdb_byte inf_siginfo[sizeof (siginfo_t)];
6064
6065 if (current_thread == NULL)
6066 return -1;
6067
6068 pid = lwpid_of (current_thread);
6069
6070 if (debug_threads)
6071 debug_printf ("%s siginfo for lwp %d.\n",
6072 readbuf != NULL ? "Reading" : "Writing",
6073 pid);
6074
6075 if (offset >= sizeof (siginfo))
6076 return -1;
6077
6078 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6079 return -1;
6080
6081 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
6082 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
6083 inferior with a 64-bit GDBSERVER should look the same as debugging it
6084 with a 32-bit GDBSERVER, we need to convert it. */
6085 siginfo_fixup (&siginfo, inf_siginfo, 0);
6086
6087 if (offset + len > sizeof (siginfo))
6088 len = sizeof (siginfo) - offset;
6089
6090 if (readbuf != NULL)
6091 memcpy (readbuf, inf_siginfo + offset, len);
6092 else
6093 {
6094 memcpy (inf_siginfo + offset, writebuf, len);
6095
6096 /* Convert back to ptrace layout before flushing it out. */
6097 siginfo_fixup (&siginfo, inf_siginfo, 1);
6098
6099 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
6100 return -1;
6101 }
6102
6103 return len;
6104 }
6105
6106 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
6107 so we notice when children change state; as the handler for the
6108 sigsuspend in my_waitpid. */
6109
6110 static void
6111 sigchld_handler (int signo)
6112 {
6113 int old_errno = errno;
6114
6115 if (debug_threads)
6116 {
6117 do
6118 {
6119 /* fprintf is not async-signal-safe, so call write
6120 directly. */
6121 if (write (2, "sigchld_handler\n",
6122 sizeof ("sigchld_handler\n") - 1) < 0)
6123 break; /* just ignore */
6124 } while (0);
6125 }
6126
6127 if (target_is_async_p ())
6128 async_file_mark (); /* trigger a linux_wait */
6129
6130 errno = old_errno;
6131 }
6132
6133 static int
6134 linux_supports_non_stop (void)
6135 {
6136 return 1;
6137 }
6138
6139 static int
6140 linux_async (int enable)
6141 {
6142 int previous = target_is_async_p ();
6143
6144 if (debug_threads)
6145 debug_printf ("linux_async (%d), previous=%d\n",
6146 enable, previous);
6147
6148 if (previous != enable)
6149 {
6150 sigset_t mask;
6151 sigemptyset (&mask);
6152 sigaddset (&mask, SIGCHLD);
6153
6154 sigprocmask (SIG_BLOCK, &mask, NULL);
6155
6156 if (enable)
6157 {
6158 if (pipe (linux_event_pipe) == -1)
6159 {
6160 linux_event_pipe[0] = -1;
6161 linux_event_pipe[1] = -1;
6162 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6163
6164 warning ("creating event pipe failed.");
6165 return previous;
6166 }
6167
6168 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
6169 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
6170
6171 /* Register the event loop handler. */
6172 add_file_handler (linux_event_pipe[0],
6173 handle_target_event, NULL);
6174
6175 /* Always trigger a linux_wait. */
6176 async_file_mark ();
6177 }
6178 else
6179 {
6180 delete_file_handler (linux_event_pipe[0]);
6181
6182 close (linux_event_pipe[0]);
6183 close (linux_event_pipe[1]);
6184 linux_event_pipe[0] = -1;
6185 linux_event_pipe[1] = -1;
6186 }
6187
6188 sigprocmask (SIG_UNBLOCK, &mask, NULL);
6189 }
6190
6191 return previous;
6192 }
6193
6194 static int
6195 linux_start_non_stop (int nonstop)
6196 {
6197 /* Register or unregister from event-loop accordingly. */
6198 linux_async (nonstop);
6199
6200 if (target_is_async_p () != (nonstop != 0))
6201 return -1;
6202
6203 return 0;
6204 }
6205
6206 static int
6207 linux_supports_multi_process (void)
6208 {
6209 return 1;
6210 }
6211
6212 /* Check if fork events are supported. */
6213
6214 static int
6215 linux_supports_fork_events (void)
6216 {
6217 return linux_supports_tracefork ();
6218 }
6219
6220 /* Check if vfork events are supported. */
6221
6222 static int
6223 linux_supports_vfork_events (void)
6224 {
6225 return linux_supports_tracefork ();
6226 }
6227
6228 /* Check if exec events are supported. */
6229
6230 static int
6231 linux_supports_exec_events (void)
6232 {
6233 return linux_supports_traceexec ();
6234 }
6235
6236 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
6237 options for the specified lwp. */
6238
6239 static int
6240 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
6241 void *args)
6242 {
6243 struct thread_info *thread = (struct thread_info *) entry;
6244 struct lwp_info *lwp = get_thread_lwp (thread);
6245
6246 if (!lwp->stopped)
6247 {
6248 /* Stop the lwp so we can modify its ptrace options. */
6249 lwp->must_set_ptrace_flags = 1;
6250 linux_stop_lwp (lwp);
6251 }
6252 else
6253 {
6254 /* Already stopped; go ahead and set the ptrace options. */
6255 struct process_info *proc = find_process_pid (pid_of (thread));
6256 int options = linux_low_ptrace_options (proc->attached);
6257
6258 linux_enable_event_reporting (lwpid_of (thread), options);
6259 lwp->must_set_ptrace_flags = 0;
6260 }
6261
6262 return 0;
6263 }
6264
6265 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
6266 ptrace flags for all inferiors. This is in case the new GDB connection
6267 doesn't support the same set of events that the previous one did. */
6268
6269 static void
6270 linux_handle_new_gdb_connection (void)
6271 {
6272 pid_t pid;
6273
6274 /* Request that all the lwps reset their ptrace options. */
6275 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6276 }
6277
6278 static int
6279 linux_supports_disable_randomization (void)
6280 {
6281 #ifdef HAVE_PERSONALITY
6282 return 1;
6283 #else
6284 return 0;
6285 #endif
6286 }
6287
6288 static int
6289 linux_supports_agent (void)
6290 {
6291 return 1;
6292 }
6293
6294 static int
6295 linux_supports_range_stepping (void)
6296 {
6297 if (*the_low_target.supports_range_stepping == NULL)
6298 return 0;
6299
6300 return (*the_low_target.supports_range_stepping) ();
6301 }
6302
6303 /* Enumerate spufs IDs for process PID. */
6304 static int
6305 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6306 {
6307 int pos = 0;
6308 int written = 0;
6309 char path[128];
6310 DIR *dir;
6311 struct dirent *entry;
6312
6313 sprintf (path, "/proc/%ld/fd", pid);
6314 dir = opendir (path);
6315 if (!dir)
6316 return -1;
6317
6318 rewinddir (dir);
6319 while ((entry = readdir (dir)) != NULL)
6320 {
6321 struct stat st;
6322 struct statfs stfs;
6323 int fd;
6324
6325 fd = atoi (entry->d_name);
6326 if (!fd)
6327 continue;
6328
6329 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6330 if (stat (path, &st) != 0)
6331 continue;
6332 if (!S_ISDIR (st.st_mode))
6333 continue;
6334
6335 if (statfs (path, &stfs) != 0)
6336 continue;
6337 if (stfs.f_type != SPUFS_MAGIC)
6338 continue;
6339
6340 if (pos >= offset && pos + 4 <= offset + len)
6341 {
6342 *(unsigned int *)(buf + pos - offset) = fd;
6343 written += 4;
6344 }
6345 pos += 4;
6346 }
6347
6348 closedir (dir);
6349 return written;
6350 }
6351
6352 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6353 object type, using the /proc file system. */
6354 static int
6355 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6356 unsigned const char *writebuf,
6357 CORE_ADDR offset, int len)
6358 {
6359 long pid = lwpid_of (current_thread);
6360 char buf[128];
6361 int fd = 0;
6362 int ret = 0;
6363
6364 if (!writebuf && !readbuf)
6365 return -1;
6366
6367 if (!*annex)
6368 {
6369 if (!readbuf)
6370 return -1;
6371 else
6372 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6373 }
6374
6375 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6376 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6377 if (fd <= 0)
6378 return -1;
6379
6380 if (offset != 0
6381 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6382 {
6383 close (fd);
6384 return 0;
6385 }
6386
6387 if (writebuf)
6388 ret = write (fd, writebuf, (size_t) len);
6389 else
6390 ret = read (fd, readbuf, (size_t) len);
6391
6392 close (fd);
6393 return ret;
6394 }
6395
6396 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6397 struct target_loadseg
6398 {
6399 /* Core address to which the segment is mapped. */
6400 Elf32_Addr addr;
6401 /* VMA recorded in the program header. */
6402 Elf32_Addr p_vaddr;
6403 /* Size of this segment in memory. */
6404 Elf32_Word p_memsz;
6405 };
6406
6407 # if defined PT_GETDSBT
6408 struct target_loadmap
6409 {
6410 /* Protocol version number, must be zero. */
6411 Elf32_Word version;
6412 /* Pointer to the DSBT table, its size, and the DSBT index. */
6413 unsigned *dsbt_table;
6414 unsigned dsbt_size, dsbt_index;
6415 /* Number of segments in this map. */
6416 Elf32_Word nsegs;
6417 /* The actual memory map. */
6418 struct target_loadseg segs[/*nsegs*/];
6419 };
6420 # define LINUX_LOADMAP PT_GETDSBT
6421 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6422 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6423 # else
6424 struct target_loadmap
6425 {
6426 /* Protocol version number, must be zero. */
6427 Elf32_Half version;
6428 /* Number of segments in this map. */
6429 Elf32_Half nsegs;
6430 /* The actual memory map. */
6431 struct target_loadseg segs[/*nsegs*/];
6432 };
6433 # define LINUX_LOADMAP PTRACE_GETFDPIC
6434 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6435 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6436 # endif
6437
6438 static int
6439 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6440 unsigned char *myaddr, unsigned int len)
6441 {
6442 int pid = lwpid_of (current_thread);
6443 int addr = -1;
6444 struct target_loadmap *data = NULL;
6445 unsigned int actual_length, copy_length;
6446
6447 if (strcmp (annex, "exec") == 0)
6448 addr = (int) LINUX_LOADMAP_EXEC;
6449 else if (strcmp (annex, "interp") == 0)
6450 addr = (int) LINUX_LOADMAP_INTERP;
6451 else
6452 return -1;
6453
6454 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6455 return -1;
6456
6457 if (data == NULL)
6458 return -1;
6459
6460 actual_length = sizeof (struct target_loadmap)
6461 + sizeof (struct target_loadseg) * data->nsegs;
6462
6463 if (offset < 0 || offset > actual_length)
6464 return -1;
6465
6466 copy_length = actual_length - offset < len ? actual_length - offset : len;
6467 memcpy (myaddr, (char *) data + offset, copy_length);
6468 return copy_length;
6469 }
6470 #else
6471 # define linux_read_loadmap NULL
6472 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6473
6474 static void
6475 linux_process_qsupported (char **features, int count)
6476 {
6477 if (the_low_target.process_qsupported != NULL)
6478 the_low_target.process_qsupported (features, count);
6479 }
6480
6481 static int
6482 linux_supports_catch_syscall (void)
6483 {
6484 return (the_low_target.get_syscall_trapinfo != NULL
6485 && linux_supports_tracesysgood ());
6486 }
6487
6488 static int
6489 linux_get_ipa_tdesc_idx (void)
6490 {
6491 if (the_low_target.get_ipa_tdesc_idx == NULL)
6492 return 0;
6493
6494 return (*the_low_target.get_ipa_tdesc_idx) ();
6495 }
6496
6497 static int
6498 linux_supports_tracepoints (void)
6499 {
6500 if (*the_low_target.supports_tracepoints == NULL)
6501 return 0;
6502
6503 return (*the_low_target.supports_tracepoints) ();
6504 }
6505
6506 static CORE_ADDR
6507 linux_read_pc (struct regcache *regcache)
6508 {
6509 if (the_low_target.get_pc == NULL)
6510 return 0;
6511
6512 return (*the_low_target.get_pc) (regcache);
6513 }
6514
6515 static void
6516 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6517 {
6518 gdb_assert (the_low_target.set_pc != NULL);
6519
6520 (*the_low_target.set_pc) (regcache, pc);
6521 }
6522
6523 static int
6524 linux_thread_stopped (struct thread_info *thread)
6525 {
6526 return get_thread_lwp (thread)->stopped;
6527 }
6528
6529 /* This exposes stop-all-threads functionality to other modules. */
6530
6531 static void
6532 linux_pause_all (int freeze)
6533 {
6534 stop_all_lwps (freeze, NULL);
6535 }
6536
6537 /* This exposes unstop-all-threads functionality to other gdbserver
6538 modules. */
6539
6540 static void
6541 linux_unpause_all (int unfreeze)
6542 {
6543 unstop_all_lwps (unfreeze, NULL);
6544 }
6545
6546 static int
6547 linux_prepare_to_access_memory (void)
6548 {
6549 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6550 running LWP. */
6551 if (non_stop)
6552 linux_pause_all (1);
6553 return 0;
6554 }
6555
6556 static void
6557 linux_done_accessing_memory (void)
6558 {
6559 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6560 running LWP. */
6561 if (non_stop)
6562 linux_unpause_all (1);
6563 }
6564
6565 static int
6566 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6567 CORE_ADDR collector,
6568 CORE_ADDR lockaddr,
6569 ULONGEST orig_size,
6570 CORE_ADDR *jump_entry,
6571 CORE_ADDR *trampoline,
6572 ULONGEST *trampoline_size,
6573 unsigned char *jjump_pad_insn,
6574 ULONGEST *jjump_pad_insn_size,
6575 CORE_ADDR *adjusted_insn_addr,
6576 CORE_ADDR *adjusted_insn_addr_end,
6577 char *err)
6578 {
6579 return (*the_low_target.install_fast_tracepoint_jump_pad)
6580 (tpoint, tpaddr, collector, lockaddr, orig_size,
6581 jump_entry, trampoline, trampoline_size,
6582 jjump_pad_insn, jjump_pad_insn_size,
6583 adjusted_insn_addr, adjusted_insn_addr_end,
6584 err);
6585 }
6586
6587 static struct emit_ops *
6588 linux_emit_ops (void)
6589 {
6590 if (the_low_target.emit_ops != NULL)
6591 return (*the_low_target.emit_ops) ();
6592 else
6593 return NULL;
6594 }
6595
6596 static int
6597 linux_get_min_fast_tracepoint_insn_len (void)
6598 {
6599 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6600 }
6601
6602 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6603
6604 static int
6605 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6606 CORE_ADDR *phdr_memaddr, int *num_phdr)
6607 {
6608 char filename[PATH_MAX];
6609 int fd;
6610 const int auxv_size = is_elf64
6611 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6612 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6613
6614 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6615
6616 fd = open (filename, O_RDONLY);
6617 if (fd < 0)
6618 return 1;
6619
6620 *phdr_memaddr = 0;
6621 *num_phdr = 0;
6622 while (read (fd, buf, auxv_size) == auxv_size
6623 && (*phdr_memaddr == 0 || *num_phdr == 0))
6624 {
6625 if (is_elf64)
6626 {
6627 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6628
6629 switch (aux->a_type)
6630 {
6631 case AT_PHDR:
6632 *phdr_memaddr = aux->a_un.a_val;
6633 break;
6634 case AT_PHNUM:
6635 *num_phdr = aux->a_un.a_val;
6636 break;
6637 }
6638 }
6639 else
6640 {
6641 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6642
6643 switch (aux->a_type)
6644 {
6645 case AT_PHDR:
6646 *phdr_memaddr = aux->a_un.a_val;
6647 break;
6648 case AT_PHNUM:
6649 *num_phdr = aux->a_un.a_val;
6650 break;
6651 }
6652 }
6653 }
6654
6655 close (fd);
6656
6657 if (*phdr_memaddr == 0 || *num_phdr == 0)
6658 {
6659 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6660 "phdr_memaddr = %ld, phdr_num = %d",
6661 (long) *phdr_memaddr, *num_phdr);
6662 return 2;
6663 }
6664
6665 return 0;
6666 }
6667
6668 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6669
6670 static CORE_ADDR
6671 get_dynamic (const int pid, const int is_elf64)
6672 {
6673 CORE_ADDR phdr_memaddr, relocation;
6674 int num_phdr, i;
6675 unsigned char *phdr_buf;
6676 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6677
6678 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6679 return 0;
6680
6681 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6682 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6683
6684 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6685 return 0;
6686
6687 /* Compute relocation: it is expected to be 0 for "regular" executables,
6688 non-zero for PIE ones. */
6689 relocation = -1;
6690 for (i = 0; relocation == -1 && i < num_phdr; i++)
6691 if (is_elf64)
6692 {
6693 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6694
6695 if (p->p_type == PT_PHDR)
6696 relocation = phdr_memaddr - p->p_vaddr;
6697 }
6698 else
6699 {
6700 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6701
6702 if (p->p_type == PT_PHDR)
6703 relocation = phdr_memaddr - p->p_vaddr;
6704 }
6705
6706 if (relocation == -1)
6707 {
6708 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6709 any real world executables, including PIE executables, have always
6710 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6711 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6712 or present DT_DEBUG anyway (fpc binaries are statically linked).
6713
6714 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6715
6716 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6717
6718 return 0;
6719 }
6720
6721 for (i = 0; i < num_phdr; i++)
6722 {
6723 if (is_elf64)
6724 {
6725 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6726
6727 if (p->p_type == PT_DYNAMIC)
6728 return p->p_vaddr + relocation;
6729 }
6730 else
6731 {
6732 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6733
6734 if (p->p_type == PT_DYNAMIC)
6735 return p->p_vaddr + relocation;
6736 }
6737 }
6738
6739 return 0;
6740 }
6741
6742 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6743 can be 0 if the inferior does not yet have the library list initialized.
6744 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6745 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6746
6747 static CORE_ADDR
6748 get_r_debug (const int pid, const int is_elf64)
6749 {
6750 CORE_ADDR dynamic_memaddr;
6751 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6752 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6753 CORE_ADDR map = -1;
6754
6755 dynamic_memaddr = get_dynamic (pid, is_elf64);
6756 if (dynamic_memaddr == 0)
6757 return map;
6758
6759 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6760 {
6761 if (is_elf64)
6762 {
6763 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6764 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6765 union
6766 {
6767 Elf64_Xword map;
6768 unsigned char buf[sizeof (Elf64_Xword)];
6769 }
6770 rld_map;
6771 #endif
6772 #ifdef DT_MIPS_RLD_MAP
6773 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6774 {
6775 if (linux_read_memory (dyn->d_un.d_val,
6776 rld_map.buf, sizeof (rld_map.buf)) == 0)
6777 return rld_map.map;
6778 else
6779 break;
6780 }
6781 #endif /* DT_MIPS_RLD_MAP */
6782 #ifdef DT_MIPS_RLD_MAP_REL
6783 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6784 {
6785 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6786 rld_map.buf, sizeof (rld_map.buf)) == 0)
6787 return rld_map.map;
6788 else
6789 break;
6790 }
6791 #endif /* DT_MIPS_RLD_MAP_REL */
6792
6793 if (dyn->d_tag == DT_DEBUG && map == -1)
6794 map = dyn->d_un.d_val;
6795
6796 if (dyn->d_tag == DT_NULL)
6797 break;
6798 }
6799 else
6800 {
6801 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6802 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6803 union
6804 {
6805 Elf32_Word map;
6806 unsigned char buf[sizeof (Elf32_Word)];
6807 }
6808 rld_map;
6809 #endif
6810 #ifdef DT_MIPS_RLD_MAP
6811 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6812 {
6813 if (linux_read_memory (dyn->d_un.d_val,
6814 rld_map.buf, sizeof (rld_map.buf)) == 0)
6815 return rld_map.map;
6816 else
6817 break;
6818 }
6819 #endif /* DT_MIPS_RLD_MAP */
6820 #ifdef DT_MIPS_RLD_MAP_REL
6821 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6822 {
6823 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6824 rld_map.buf, sizeof (rld_map.buf)) == 0)
6825 return rld_map.map;
6826 else
6827 break;
6828 }
6829 #endif /* DT_MIPS_RLD_MAP_REL */
6830
6831 if (dyn->d_tag == DT_DEBUG && map == -1)
6832 map = dyn->d_un.d_val;
6833
6834 if (dyn->d_tag == DT_NULL)
6835 break;
6836 }
6837
6838 dynamic_memaddr += dyn_size;
6839 }
6840
6841 return map;
6842 }
6843
6844 /* Read one pointer from MEMADDR in the inferior. */
6845
6846 static int
6847 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6848 {
6849 int ret;
6850
6851 /* Go through a union so this works on either big or little endian
6852 hosts, when the inferior's pointer size is smaller than the size
6853 of CORE_ADDR. It is assumed the inferior's endianness is the
6854 same of the superior's. */
6855 union
6856 {
6857 CORE_ADDR core_addr;
6858 unsigned int ui;
6859 unsigned char uc;
6860 } addr;
6861
6862 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6863 if (ret == 0)
6864 {
6865 if (ptr_size == sizeof (CORE_ADDR))
6866 *ptr = addr.core_addr;
6867 else if (ptr_size == sizeof (unsigned int))
6868 *ptr = addr.ui;
6869 else
6870 gdb_assert_not_reached ("unhandled pointer size");
6871 }
6872 return ret;
6873 }
6874
6875 struct link_map_offsets
6876 {
6877 /* Offset and size of r_debug.r_version. */
6878 int r_version_offset;
6879
6880 /* Offset and size of r_debug.r_map. */
6881 int r_map_offset;
6882
6883 /* Offset to l_addr field in struct link_map. */
6884 int l_addr_offset;
6885
6886 /* Offset to l_name field in struct link_map. */
6887 int l_name_offset;
6888
6889 /* Offset to l_ld field in struct link_map. */
6890 int l_ld_offset;
6891
6892 /* Offset to l_next field in struct link_map. */
6893 int l_next_offset;
6894
6895 /* Offset to l_prev field in struct link_map. */
6896 int l_prev_offset;
6897 };
6898
6899 /* Construct qXfer:libraries-svr4:read reply. */
6900
6901 static int
6902 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6903 unsigned const char *writebuf,
6904 CORE_ADDR offset, int len)
6905 {
6906 char *document;
6907 unsigned document_len;
6908 struct process_info_private *const priv = current_process ()->priv;
6909 char filename[PATH_MAX];
6910 int pid, is_elf64;
6911
6912 static const struct link_map_offsets lmo_32bit_offsets =
6913 {
6914 0, /* r_version offset. */
6915 4, /* r_debug.r_map offset. */
6916 0, /* l_addr offset in link_map. */
6917 4, /* l_name offset in link_map. */
6918 8, /* l_ld offset in link_map. */
6919 12, /* l_next offset in link_map. */
6920 16 /* l_prev offset in link_map. */
6921 };
6922
6923 static const struct link_map_offsets lmo_64bit_offsets =
6924 {
6925 0, /* r_version offset. */
6926 8, /* r_debug.r_map offset. */
6927 0, /* l_addr offset in link_map. */
6928 8, /* l_name offset in link_map. */
6929 16, /* l_ld offset in link_map. */
6930 24, /* l_next offset in link_map. */
6931 32 /* l_prev offset in link_map. */
6932 };
6933 const struct link_map_offsets *lmo;
6934 unsigned int machine;
6935 int ptr_size;
6936 CORE_ADDR lm_addr = 0, lm_prev = 0;
6937 int allocated = 1024;
6938 char *p;
6939 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6940 int header_done = 0;
6941
6942 if (writebuf != NULL)
6943 return -2;
6944 if (readbuf == NULL)
6945 return -1;
6946
6947 pid = lwpid_of (current_thread);
6948 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6949 is_elf64 = elf_64_file_p (filename, &machine);
6950 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6951 ptr_size = is_elf64 ? 8 : 4;
6952
6953 while (annex[0] != '\0')
6954 {
6955 const char *sep;
6956 CORE_ADDR *addrp;
6957 int len;
6958
6959 sep = strchr (annex, '=');
6960 if (sep == NULL)
6961 break;
6962
6963 len = sep - annex;
6964 if (len == 5 && startswith (annex, "start"))
6965 addrp = &lm_addr;
6966 else if (len == 4 && startswith (annex, "prev"))
6967 addrp = &lm_prev;
6968 else
6969 {
6970 annex = strchr (sep, ';');
6971 if (annex == NULL)
6972 break;
6973 annex++;
6974 continue;
6975 }
6976
6977 annex = decode_address_to_semicolon (addrp, sep + 1);
6978 }
6979
6980 if (lm_addr == 0)
6981 {
6982 int r_version = 0;
6983
6984 if (priv->r_debug == 0)
6985 priv->r_debug = get_r_debug (pid, is_elf64);
6986
6987 /* We failed to find DT_DEBUG. Such situation will not change
6988 for this inferior - do not retry it. Report it to GDB as
6989 E01, see for the reasons at the GDB solib-svr4.c side. */
6990 if (priv->r_debug == (CORE_ADDR) -1)
6991 return -1;
6992
6993 if (priv->r_debug != 0)
6994 {
6995 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6996 (unsigned char *) &r_version,
6997 sizeof (r_version)) != 0
6998 || r_version != 1)
6999 {
7000 warning ("unexpected r_debug version %d", r_version);
7001 }
7002 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
7003 &lm_addr, ptr_size) != 0)
7004 {
7005 warning ("unable to read r_map from 0x%lx",
7006 (long) priv->r_debug + lmo->r_map_offset);
7007 }
7008 }
7009 }
7010
7011 document = (char *) xmalloc (allocated);
7012 strcpy (document, "<library-list-svr4 version=\"1.0\"");
7013 p = document + strlen (document);
7014
7015 while (lm_addr
7016 && read_one_ptr (lm_addr + lmo->l_name_offset,
7017 &l_name, ptr_size) == 0
7018 && read_one_ptr (lm_addr + lmo->l_addr_offset,
7019 &l_addr, ptr_size) == 0
7020 && read_one_ptr (lm_addr + lmo->l_ld_offset,
7021 &l_ld, ptr_size) == 0
7022 && read_one_ptr (lm_addr + lmo->l_prev_offset,
7023 &l_prev, ptr_size) == 0
7024 && read_one_ptr (lm_addr + lmo->l_next_offset,
7025 &l_next, ptr_size) == 0)
7026 {
7027 unsigned char libname[PATH_MAX];
7028
7029 if (lm_prev != l_prev)
7030 {
7031 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
7032 (long) lm_prev, (long) l_prev);
7033 break;
7034 }
7035
7036 /* Ignore the first entry even if it has valid name as the first entry
7037 corresponds to the main executable. The first entry should not be
7038 skipped if the dynamic loader was loaded late by a static executable
7039 (see solib-svr4.c parameter ignore_first). But in such case the main
7040 executable does not have PT_DYNAMIC present and this function already
7041 exited above due to failed get_r_debug. */
7042 if (lm_prev == 0)
7043 {
7044 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
7045 p = p + strlen (p);
7046 }
7047 else
7048 {
7049 /* Not checking for error because reading may stop before
7050 we've got PATH_MAX worth of characters. */
7051 libname[0] = '\0';
7052 linux_read_memory (l_name, libname, sizeof (libname) - 1);
7053 libname[sizeof (libname) - 1] = '\0';
7054 if (libname[0] != '\0')
7055 {
7056 /* 6x the size for xml_escape_text below. */
7057 size_t len = 6 * strlen ((char *) libname);
7058 char *name;
7059
7060 if (!header_done)
7061 {
7062 /* Terminate `<library-list-svr4'. */
7063 *p++ = '>';
7064 header_done = 1;
7065 }
7066
7067 while (allocated < p - document + len + 200)
7068 {
7069 /* Expand to guarantee sufficient storage. */
7070 uintptr_t document_len = p - document;
7071
7072 document = (char *) xrealloc (document, 2 * allocated);
7073 allocated *= 2;
7074 p = document + document_len;
7075 }
7076
7077 name = xml_escape_text ((char *) libname);
7078 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
7079 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
7080 name, (unsigned long) lm_addr,
7081 (unsigned long) l_addr, (unsigned long) l_ld);
7082 free (name);
7083 }
7084 }
7085
7086 lm_prev = lm_addr;
7087 lm_addr = l_next;
7088 }
7089
7090 if (!header_done)
7091 {
7092 /* Empty list; terminate `<library-list-svr4'. */
7093 strcpy (p, "/>");
7094 }
7095 else
7096 strcpy (p, "</library-list-svr4>");
7097
7098 document_len = strlen (document);
7099 if (offset < document_len)
7100 document_len -= offset;
7101 else
7102 document_len = 0;
7103 if (len > document_len)
7104 len = document_len;
7105
7106 memcpy (readbuf, document + offset, len);
7107 xfree (document);
7108
7109 return len;
7110 }
7111
7112 #ifdef HAVE_LINUX_BTRACE
7113
7114 /* See to_disable_btrace target method. */
7115
7116 static int
7117 linux_low_disable_btrace (struct btrace_target_info *tinfo)
7118 {
7119 enum btrace_error err;
7120
7121 err = linux_disable_btrace (tinfo);
7122 return (err == BTRACE_ERR_NONE ? 0 : -1);
7123 }
7124
7125 /* Encode an Intel Processor Trace configuration. */
7126
7127 static void
7128 linux_low_encode_pt_config (struct buffer *buffer,
7129 const struct btrace_data_pt_config *config)
7130 {
7131 buffer_grow_str (buffer, "<pt-config>\n");
7132
7133 switch (config->cpu.vendor)
7134 {
7135 case CV_INTEL:
7136 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
7137 "model=\"%u\" stepping=\"%u\"/>\n",
7138 config->cpu.family, config->cpu.model,
7139 config->cpu.stepping);
7140 break;
7141
7142 default:
7143 break;
7144 }
7145
7146 buffer_grow_str (buffer, "</pt-config>\n");
7147 }
7148
7149 /* Encode a raw buffer. */
7150
7151 static void
7152 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
7153 unsigned int size)
7154 {
7155 if (size == 0)
7156 return;
7157
7158 /* We use hex encoding - see common/rsp-low.h. */
7159 buffer_grow_str (buffer, "<raw>\n");
7160
7161 while (size-- > 0)
7162 {
7163 char elem[2];
7164
7165 elem[0] = tohex ((*data >> 4) & 0xf);
7166 elem[1] = tohex (*data++ & 0xf);
7167
7168 buffer_grow (buffer, elem, 2);
7169 }
7170
7171 buffer_grow_str (buffer, "</raw>\n");
7172 }
7173
7174 /* See to_read_btrace target method. */
7175
7176 static int
7177 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
7178 enum btrace_read_type type)
7179 {
7180 struct btrace_data btrace;
7181 struct btrace_block *block;
7182 enum btrace_error err;
7183 int i;
7184
7185 btrace_data_init (&btrace);
7186
7187 err = linux_read_btrace (&btrace, tinfo, type);
7188 if (err != BTRACE_ERR_NONE)
7189 {
7190 if (err == BTRACE_ERR_OVERFLOW)
7191 buffer_grow_str0 (buffer, "E.Overflow.");
7192 else
7193 buffer_grow_str0 (buffer, "E.Generic Error.");
7194
7195 goto err;
7196 }
7197
7198 switch (btrace.format)
7199 {
7200 case BTRACE_FORMAT_NONE:
7201 buffer_grow_str0 (buffer, "E.No Trace.");
7202 goto err;
7203
7204 case BTRACE_FORMAT_BTS:
7205 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7206 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7207
7208 for (i = 0;
7209 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
7210 i++)
7211 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
7212 paddress (block->begin), paddress (block->end));
7213
7214 buffer_grow_str0 (buffer, "</btrace>\n");
7215 break;
7216
7217 case BTRACE_FORMAT_PT:
7218 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
7219 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
7220 buffer_grow_str (buffer, "<pt>\n");
7221
7222 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
7223
7224 linux_low_encode_raw (buffer, btrace.variant.pt.data,
7225 btrace.variant.pt.size);
7226
7227 buffer_grow_str (buffer, "</pt>\n");
7228 buffer_grow_str0 (buffer, "</btrace>\n");
7229 break;
7230
7231 default:
7232 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
7233 goto err;
7234 }
7235
7236 btrace_data_fini (&btrace);
7237 return 0;
7238
7239 err:
7240 btrace_data_fini (&btrace);
7241 return -1;
7242 }
7243
7244 /* See to_btrace_conf target method. */
7245
7246 static int
7247 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
7248 struct buffer *buffer)
7249 {
7250 const struct btrace_config *conf;
7251
7252 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
7253 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
7254
7255 conf = linux_btrace_conf (tinfo);
7256 if (conf != NULL)
7257 {
7258 switch (conf->format)
7259 {
7260 case BTRACE_FORMAT_NONE:
7261 break;
7262
7263 case BTRACE_FORMAT_BTS:
7264 buffer_xml_printf (buffer, "<bts");
7265 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
7266 buffer_xml_printf (buffer, " />\n");
7267 break;
7268
7269 case BTRACE_FORMAT_PT:
7270 buffer_xml_printf (buffer, "<pt");
7271 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
7272 buffer_xml_printf (buffer, "/>\n");
7273 break;
7274 }
7275 }
7276
7277 buffer_grow_str0 (buffer, "</btrace-conf>\n");
7278 return 0;
7279 }
7280 #endif /* HAVE_LINUX_BTRACE */
7281
7282 /* See nat/linux-nat.h. */
7283
7284 ptid_t
7285 current_lwp_ptid (void)
7286 {
7287 return ptid_of (current_thread);
7288 }
7289
7290 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7291
7292 static int
7293 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7294 {
7295 if (the_low_target.breakpoint_kind_from_pc != NULL)
7296 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7297 else
7298 return default_breakpoint_kind_from_pc (pcptr);
7299 }
7300
7301 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7302
7303 static const gdb_byte *
7304 linux_sw_breakpoint_from_kind (int kind, int *size)
7305 {
7306 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7307
7308 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7309 }
7310
7311 /* Implementation of the target_ops method
7312 "breakpoint_kind_from_current_state". */
7313
7314 static int
7315 linux_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
7316 {
7317 if (the_low_target.breakpoint_kind_from_current_state != NULL)
7318 return (*the_low_target.breakpoint_kind_from_current_state) (pcptr);
7319 else
7320 return linux_breakpoint_kind_from_pc (pcptr);
7321 }
7322
7323 /* Default implementation of linux_target_ops method "set_pc" for
7324 32-bit pc register which is literally named "pc". */
7325
7326 void
7327 linux_set_pc_32bit (struct regcache *regcache, CORE_ADDR pc)
7328 {
7329 uint32_t newpc = pc;
7330
7331 supply_register_by_name (regcache, "pc", &newpc);
7332 }
7333
7334 /* Default implementation of linux_target_ops method "get_pc" for
7335 32-bit pc register which is literally named "pc". */
7336
7337 CORE_ADDR
7338 linux_get_pc_32bit (struct regcache *regcache)
7339 {
7340 uint32_t pc;
7341
7342 collect_register_by_name (regcache, "pc", &pc);
7343 if (debug_threads)
7344 debug_printf ("stop pc is 0x%" PRIx32 "\n", pc);
7345 return pc;
7346 }
7347
7348 /* Default implementation of linux_target_ops method "set_pc" for
7349 64-bit pc register which is literally named "pc". */
7350
7351 void
7352 linux_set_pc_64bit (struct regcache *regcache, CORE_ADDR pc)
7353 {
7354 uint64_t newpc = pc;
7355
7356 supply_register_by_name (regcache, "pc", &newpc);
7357 }
7358
7359 /* Default implementation of linux_target_ops method "get_pc" for
7360 64-bit pc register which is literally named "pc". */
7361
7362 CORE_ADDR
7363 linux_get_pc_64bit (struct regcache *regcache)
7364 {
7365 uint64_t pc;
7366
7367 collect_register_by_name (regcache, "pc", &pc);
7368 if (debug_threads)
7369 debug_printf ("stop pc is 0x%" PRIx64 "\n", pc);
7370 return pc;
7371 }
7372
7373
7374 static struct target_ops linux_target_ops = {
7375 linux_create_inferior,
7376 linux_post_create_inferior,
7377 linux_attach,
7378 linux_kill,
7379 linux_detach,
7380 linux_mourn,
7381 linux_join,
7382 linux_thread_alive,
7383 linux_resume,
7384 linux_wait,
7385 linux_fetch_registers,
7386 linux_store_registers,
7387 linux_prepare_to_access_memory,
7388 linux_done_accessing_memory,
7389 linux_read_memory,
7390 linux_write_memory,
7391 linux_look_up_symbols,
7392 linux_request_interrupt,
7393 linux_read_auxv,
7394 linux_supports_z_point_type,
7395 linux_insert_point,
7396 linux_remove_point,
7397 linux_stopped_by_sw_breakpoint,
7398 linux_supports_stopped_by_sw_breakpoint,
7399 linux_stopped_by_hw_breakpoint,
7400 linux_supports_stopped_by_hw_breakpoint,
7401 linux_supports_hardware_single_step,
7402 linux_stopped_by_watchpoint,
7403 linux_stopped_data_address,
7404 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7405 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7406 && defined(PT_TEXT_END_ADDR)
7407 linux_read_offsets,
7408 #else
7409 NULL,
7410 #endif
7411 #ifdef USE_THREAD_DB
7412 thread_db_get_tls_address,
7413 #else
7414 NULL,
7415 #endif
7416 linux_qxfer_spu,
7417 hostio_last_error_from_errno,
7418 linux_qxfer_osdata,
7419 linux_xfer_siginfo,
7420 linux_supports_non_stop,
7421 linux_async,
7422 linux_start_non_stop,
7423 linux_supports_multi_process,
7424 linux_supports_fork_events,
7425 linux_supports_vfork_events,
7426 linux_supports_exec_events,
7427 linux_handle_new_gdb_connection,
7428 #ifdef USE_THREAD_DB
7429 thread_db_handle_monitor_command,
7430 #else
7431 NULL,
7432 #endif
7433 linux_common_core_of_thread,
7434 linux_read_loadmap,
7435 linux_process_qsupported,
7436 linux_supports_tracepoints,
7437 linux_read_pc,
7438 linux_write_pc,
7439 linux_thread_stopped,
7440 NULL,
7441 linux_pause_all,
7442 linux_unpause_all,
7443 linux_stabilize_threads,
7444 linux_install_fast_tracepoint_jump_pad,
7445 linux_emit_ops,
7446 linux_supports_disable_randomization,
7447 linux_get_min_fast_tracepoint_insn_len,
7448 linux_qxfer_libraries_svr4,
7449 linux_supports_agent,
7450 #ifdef HAVE_LINUX_BTRACE
7451 linux_supports_btrace,
7452 linux_enable_btrace,
7453 linux_low_disable_btrace,
7454 linux_low_read_btrace,
7455 linux_low_btrace_conf,
7456 #else
7457 NULL,
7458 NULL,
7459 NULL,
7460 NULL,
7461 NULL,
7462 #endif
7463 linux_supports_range_stepping,
7464 linux_proc_pid_to_exec_file,
7465 linux_mntns_open_cloexec,
7466 linux_mntns_unlink,
7467 linux_mntns_readlink,
7468 linux_breakpoint_kind_from_pc,
7469 linux_sw_breakpoint_from_kind,
7470 linux_proc_tid_get_name,
7471 linux_breakpoint_kind_from_current_state,
7472 linux_supports_software_single_step,
7473 linux_supports_catch_syscall,
7474 linux_get_ipa_tdesc_idx,
7475 };
7476
7477 #ifdef HAVE_LINUX_REGSETS
7478 void
7479 initialize_regsets_info (struct regsets_info *info)
7480 {
7481 for (info->num_regsets = 0;
7482 info->regsets[info->num_regsets].size >= 0;
7483 info->num_regsets++)
7484 ;
7485 }
7486 #endif
7487
7488 void
7489 initialize_low (void)
7490 {
7491 struct sigaction sigchld_action;
7492
7493 memset (&sigchld_action, 0, sizeof (sigchld_action));
7494 set_target_ops (&linux_target_ops);
7495
7496 linux_ptrace_init_warnings ();
7497
7498 sigchld_action.sa_handler = sigchld_handler;
7499 sigemptyset (&sigchld_action.sa_mask);
7500 sigchld_action.sa_flags = SA_RESTART;
7501 sigaction (SIGCHLD, &sigchld_action, NULL);
7502
7503 initialize_low_arch ();
7504
7505 linux_check_ptrace_features ();
7506 }
This page took 0.384376 seconds and 4 git commands to generate.