gdbserver: bfin: new port
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include <signal.h>
28 #include <sys/ioctl.h>
29 #include <fcntl.h>
30 #include <string.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40 #include <sys/stat.h>
41 #include <sys/vfs.h>
42 #include <sys/uio.h>
43 #ifndef ELFMAG0
44 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
45 then ELFMAG0 will have been defined. If it didn't get included by
46 gdb_proc_service.h then including it will likely introduce a duplicate
47 definition of elf_fpregset_t. */
48 #include <elf.h>
49 #endif
50
51 #ifndef SPUFS_MAGIC
52 #define SPUFS_MAGIC 0x23c9b64e
53 #endif
54
55 #ifndef PTRACE_GETSIGINFO
56 # define PTRACE_GETSIGINFO 0x4202
57 # define PTRACE_SETSIGINFO 0x4203
58 #endif
59
60 #ifndef O_LARGEFILE
61 #define O_LARGEFILE 0
62 #endif
63
64 /* If the system headers did not provide the constants, hard-code the normal
65 values. */
66 #ifndef PTRACE_EVENT_FORK
67
68 #define PTRACE_SETOPTIONS 0x4200
69 #define PTRACE_GETEVENTMSG 0x4201
70
71 /* options set using PTRACE_SETOPTIONS */
72 #define PTRACE_O_TRACESYSGOOD 0x00000001
73 #define PTRACE_O_TRACEFORK 0x00000002
74 #define PTRACE_O_TRACEVFORK 0x00000004
75 #define PTRACE_O_TRACECLONE 0x00000008
76 #define PTRACE_O_TRACEEXEC 0x00000010
77 #define PTRACE_O_TRACEVFORKDONE 0x00000020
78 #define PTRACE_O_TRACEEXIT 0x00000040
79
80 /* Wait extended result codes for the above trace options. */
81 #define PTRACE_EVENT_FORK 1
82 #define PTRACE_EVENT_VFORK 2
83 #define PTRACE_EVENT_CLONE 3
84 #define PTRACE_EVENT_EXEC 4
85 #define PTRACE_EVENT_VFORK_DONE 5
86 #define PTRACE_EVENT_EXIT 6
87
88 #endif /* PTRACE_EVENT_FORK */
89
90 /* We can't always assume that this flag is available, but all systems
91 with the ptrace event handlers also have __WALL, so it's safe to use
92 in some contexts. */
93 #ifndef __WALL
94 #define __WALL 0x40000000 /* Wait for any child. */
95 #endif
96
97 #ifndef W_STOPCODE
98 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
99 #endif
100
101 /* This is the kernel's hard limit. Not to be confused with
102 SIGRTMIN. */
103 #ifndef __SIGRTMIN
104 #define __SIGRTMIN 32
105 #endif
106
107 #ifdef __UCLIBC__
108 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
109 #define HAS_NOMMU
110 #endif
111 #endif
112
113 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
114 representation of the thread ID.
115
116 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
117 the same as the LWP ID.
118
119 ``all_processes'' is keyed by the "overall process ID", which
120 GNU/Linux calls tgid, "thread group ID". */
121
122 struct inferior_list all_lwps;
123
124 /* A list of all unknown processes which receive stop signals. Some other
125 process will presumably claim each of these as forked children
126 momentarily. */
127
128 struct inferior_list stopped_pids;
129
130 /* FIXME this is a bit of a hack, and could be removed. */
131 int stopping_threads;
132
133 /* FIXME make into a target method? */
134 int using_threads = 1;
135
136 /* True if we're presently stabilizing threads (moving them out of
137 jump pads). */
138 static int stabilizing_threads;
139
140 /* This flag is true iff we've just created or attached to our first
141 inferior but it has not stopped yet. As soon as it does, we need
142 to call the low target's arch_setup callback. Doing this only on
143 the first inferior avoids reinializing the architecture on every
144 inferior, and avoids messing with the register caches of the
145 already running inferiors. NOTE: this assumes all inferiors under
146 control of gdbserver have the same architecture. */
147 static int new_inferior;
148
149 static void linux_resume_one_lwp (struct lwp_info *lwp,
150 int step, int signal, siginfo_t *info);
151 static void linux_resume (struct thread_resume *resume_info, size_t n);
152 static void stop_all_lwps (int suspend, struct lwp_info *except);
153 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
154 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
155 static void *add_lwp (ptid_t ptid);
156 static int linux_stopped_by_watchpoint (void);
157 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
158 static int linux_core_of_thread (ptid_t ptid);
159 static void proceed_all_lwps (void);
160 static int finish_step_over (struct lwp_info *lwp);
161 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
162 static int kill_lwp (unsigned long lwpid, int signo);
163 static void linux_enable_event_reporting (int pid);
164
165 /* True if the low target can hardware single-step. Such targets
166 don't need a BREAKPOINT_REINSERT_ADDR callback. */
167
168 static int
169 can_hardware_single_step (void)
170 {
171 return (the_low_target.breakpoint_reinsert_addr == NULL);
172 }
173
174 /* True if the low target supports memory breakpoints. If so, we'll
175 have a GET_PC implementation. */
176
177 static int
178 supports_breakpoints (void)
179 {
180 return (the_low_target.get_pc != NULL);
181 }
182
183 /* Returns true if this target can support fast tracepoints. This
184 does not mean that the in-process agent has been loaded in the
185 inferior. */
186
187 static int
188 supports_fast_tracepoints (void)
189 {
190 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
191 }
192
193 struct pending_signals
194 {
195 int signal;
196 siginfo_t info;
197 struct pending_signals *prev;
198 };
199
200 #define PTRACE_ARG3_TYPE void *
201 #define PTRACE_ARG4_TYPE void *
202 #define PTRACE_XFER_TYPE long
203
204 #ifdef HAVE_LINUX_REGSETS
205 static char *disabled_regsets;
206 static int num_regsets;
207 #endif
208
209 /* The read/write ends of the pipe registered as waitable file in the
210 event loop. */
211 static int linux_event_pipe[2] = { -1, -1 };
212
213 /* True if we're currently in async mode. */
214 #define target_is_async_p() (linux_event_pipe[0] != -1)
215
216 static void send_sigstop (struct lwp_info *lwp);
217 static void wait_for_sigstop (struct inferior_list_entry *entry);
218
219 /* Accepts an integer PID; Returns a string representing a file that
220 can be opened to get info for the child process.
221 Space for the result is malloc'd, caller must free. */
222
223 char *
224 linux_child_pid_to_exec_file (int pid)
225 {
226 char *name1, *name2;
227
228 name1 = xmalloc (MAXPATHLEN);
229 name2 = xmalloc (MAXPATHLEN);
230 memset (name2, 0, MAXPATHLEN);
231
232 sprintf (name1, "/proc/%d/exe", pid);
233 if (readlink (name1, name2, MAXPATHLEN) > 0)
234 {
235 free (name1);
236 return name2;
237 }
238 else
239 {
240 free (name2);
241 return name1;
242 }
243 }
244
245 /* Return non-zero if HEADER is a 64-bit ELF file. */
246
247 static int
248 elf_64_header_p (const Elf64_Ehdr *header)
249 {
250 return (header->e_ident[EI_MAG0] == ELFMAG0
251 && header->e_ident[EI_MAG1] == ELFMAG1
252 && header->e_ident[EI_MAG2] == ELFMAG2
253 && header->e_ident[EI_MAG3] == ELFMAG3
254 && header->e_ident[EI_CLASS] == ELFCLASS64);
255 }
256
257 /* Return non-zero if FILE is a 64-bit ELF file,
258 zero if the file is not a 64-bit ELF file,
259 and -1 if the file is not accessible or doesn't exist. */
260
261 int
262 elf_64_file_p (const char *file)
263 {
264 Elf64_Ehdr header;
265 int fd;
266
267 fd = open (file, O_RDONLY);
268 if (fd < 0)
269 return -1;
270
271 if (read (fd, &header, sizeof (header)) != sizeof (header))
272 {
273 close (fd);
274 return 0;
275 }
276 close (fd);
277
278 return elf_64_header_p (&header);
279 }
280
281 static void
282 delete_lwp (struct lwp_info *lwp)
283 {
284 remove_thread (get_lwp_thread (lwp));
285 remove_inferior (&all_lwps, &lwp->head);
286 free (lwp->arch_private);
287 free (lwp);
288 }
289
290 /* Add a process to the common process list, and set its private
291 data. */
292
293 static struct process_info *
294 linux_add_process (int pid, int attached)
295 {
296 struct process_info *proc;
297
298 /* Is this the first process? If so, then set the arch. */
299 if (all_processes.head == NULL)
300 new_inferior = 1;
301
302 proc = add_process (pid, attached);
303 proc->private = xcalloc (1, sizeof (*proc->private));
304
305 if (the_low_target.new_process != NULL)
306 proc->private->arch_private = the_low_target.new_process ();
307
308 return proc;
309 }
310
311 /* Wrapper function for waitpid which handles EINTR, and emulates
312 __WALL for systems where that is not available. */
313
314 static int
315 my_waitpid (int pid, int *status, int flags)
316 {
317 int ret, out_errno;
318
319 if (debug_threads)
320 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
321
322 if (flags & __WALL)
323 {
324 sigset_t block_mask, org_mask, wake_mask;
325 int wnohang;
326
327 wnohang = (flags & WNOHANG) != 0;
328 flags &= ~(__WALL | __WCLONE);
329 flags |= WNOHANG;
330
331 /* Block all signals while here. This avoids knowing about
332 LinuxThread's signals. */
333 sigfillset (&block_mask);
334 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
335
336 /* ... except during the sigsuspend below. */
337 sigemptyset (&wake_mask);
338
339 while (1)
340 {
341 /* Since all signals are blocked, there's no need to check
342 for EINTR here. */
343 ret = waitpid (pid, status, flags);
344 out_errno = errno;
345
346 if (ret == -1 && out_errno != ECHILD)
347 break;
348 else if (ret > 0)
349 break;
350
351 if (flags & __WCLONE)
352 {
353 /* We've tried both flavors now. If WNOHANG is set,
354 there's nothing else to do, just bail out. */
355 if (wnohang)
356 break;
357
358 if (debug_threads)
359 fprintf (stderr, "blocking\n");
360
361 /* Block waiting for signals. */
362 sigsuspend (&wake_mask);
363 }
364
365 flags ^= __WCLONE;
366 }
367
368 sigprocmask (SIG_SETMASK, &org_mask, NULL);
369 }
370 else
371 {
372 do
373 ret = waitpid (pid, status, flags);
374 while (ret == -1 && errno == EINTR);
375 out_errno = errno;
376 }
377
378 if (debug_threads)
379 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
380 pid, flags, status ? *status : -1, ret);
381
382 errno = out_errno;
383 return ret;
384 }
385
386 /* Handle a GNU/Linux extended wait response. If we see a clone
387 event, we need to add the new LWP to our list (and not report the
388 trap to higher layers). */
389
390 static void
391 handle_extended_wait (struct lwp_info *event_child, int wstat)
392 {
393 int event = wstat >> 16;
394 struct lwp_info *new_lwp;
395
396 if (event == PTRACE_EVENT_CLONE)
397 {
398 ptid_t ptid;
399 unsigned long new_pid;
400 int ret, status = W_STOPCODE (SIGSTOP);
401
402 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
403
404 /* If we haven't already seen the new PID stop, wait for it now. */
405 if (! pull_pid_from_list (&stopped_pids, new_pid))
406 {
407 /* The new child has a pending SIGSTOP. We can't affect it until it
408 hits the SIGSTOP, but we're already attached. */
409
410 ret = my_waitpid (new_pid, &status, __WALL);
411
412 if (ret == -1)
413 perror_with_name ("waiting for new child");
414 else if (ret != new_pid)
415 warning ("wait returned unexpected PID %d", ret);
416 else if (!WIFSTOPPED (status))
417 warning ("wait returned unexpected status 0x%x", status);
418 }
419
420 linux_enable_event_reporting (new_pid);
421
422 ptid = ptid_build (pid_of (event_child), new_pid, 0);
423 new_lwp = (struct lwp_info *) add_lwp (ptid);
424 add_thread (ptid, new_lwp);
425
426 /* Either we're going to immediately resume the new thread
427 or leave it stopped. linux_resume_one_lwp is a nop if it
428 thinks the thread is currently running, so set this first
429 before calling linux_resume_one_lwp. */
430 new_lwp->stopped = 1;
431
432 /* Normally we will get the pending SIGSTOP. But in some cases
433 we might get another signal delivered to the group first.
434 If we do get another signal, be sure not to lose it. */
435 if (WSTOPSIG (status) == SIGSTOP)
436 {
437 if (stopping_threads)
438 new_lwp->stop_pc = get_stop_pc (new_lwp);
439 else
440 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
441 }
442 else
443 {
444 new_lwp->stop_expected = 1;
445
446 if (stopping_threads)
447 {
448 new_lwp->stop_pc = get_stop_pc (new_lwp);
449 new_lwp->status_pending_p = 1;
450 new_lwp->status_pending = status;
451 }
452 else
453 /* Pass the signal on. This is what GDB does - except
454 shouldn't we really report it instead? */
455 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
456 }
457
458 /* Always resume the current thread. If we are stopping
459 threads, it will have a pending SIGSTOP; we may as well
460 collect it now. */
461 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
462 }
463 }
464
465 /* Return the PC as read from the regcache of LWP, without any
466 adjustment. */
467
468 static CORE_ADDR
469 get_pc (struct lwp_info *lwp)
470 {
471 struct thread_info *saved_inferior;
472 struct regcache *regcache;
473 CORE_ADDR pc;
474
475 if (the_low_target.get_pc == NULL)
476 return 0;
477
478 saved_inferior = current_inferior;
479 current_inferior = get_lwp_thread (lwp);
480
481 regcache = get_thread_regcache (current_inferior, 1);
482 pc = (*the_low_target.get_pc) (regcache);
483
484 if (debug_threads)
485 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
486
487 current_inferior = saved_inferior;
488 return pc;
489 }
490
491 /* This function should only be called if LWP got a SIGTRAP.
492 The SIGTRAP could mean several things.
493
494 On i386, where decr_pc_after_break is non-zero:
495 If we were single-stepping this process using PTRACE_SINGLESTEP,
496 we will get only the one SIGTRAP (even if the instruction we
497 stepped over was a breakpoint). The value of $eip will be the
498 next instruction.
499 If we continue the process using PTRACE_CONT, we will get a
500 SIGTRAP when we hit a breakpoint. The value of $eip will be
501 the instruction after the breakpoint (i.e. needs to be
502 decremented). If we report the SIGTRAP to GDB, we must also
503 report the undecremented PC. If we cancel the SIGTRAP, we
504 must resume at the decremented PC.
505
506 (Presumably, not yet tested) On a non-decr_pc_after_break machine
507 with hardware or kernel single-step:
508 If we single-step over a breakpoint instruction, our PC will
509 point at the following instruction. If we continue and hit a
510 breakpoint instruction, our PC will point at the breakpoint
511 instruction. */
512
513 static CORE_ADDR
514 get_stop_pc (struct lwp_info *lwp)
515 {
516 CORE_ADDR stop_pc;
517
518 if (the_low_target.get_pc == NULL)
519 return 0;
520
521 stop_pc = get_pc (lwp);
522
523 if (WSTOPSIG (lwp->last_status) == SIGTRAP
524 && !lwp->stepping
525 && !lwp->stopped_by_watchpoint
526 && lwp->last_status >> 16 == 0)
527 stop_pc -= the_low_target.decr_pc_after_break;
528
529 if (debug_threads)
530 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
531
532 return stop_pc;
533 }
534
535 static void *
536 add_lwp (ptid_t ptid)
537 {
538 struct lwp_info *lwp;
539
540 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
541 memset (lwp, 0, sizeof (*lwp));
542
543 lwp->head.id = ptid;
544
545 if (the_low_target.new_thread != NULL)
546 lwp->arch_private = the_low_target.new_thread ();
547
548 add_inferior_to_list (&all_lwps, &lwp->head);
549
550 return lwp;
551 }
552
553 /* Start an inferior process and returns its pid.
554 ALLARGS is a vector of program-name and args. */
555
556 static int
557 linux_create_inferior (char *program, char **allargs)
558 {
559 struct lwp_info *new_lwp;
560 int pid;
561 ptid_t ptid;
562
563 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
564 pid = vfork ();
565 #else
566 pid = fork ();
567 #endif
568 if (pid < 0)
569 perror_with_name ("fork");
570
571 if (pid == 0)
572 {
573 ptrace (PTRACE_TRACEME, 0, 0, 0);
574
575 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
576 signal (__SIGRTMIN + 1, SIG_DFL);
577 #endif
578
579 setpgid (0, 0);
580
581 execv (program, allargs);
582 if (errno == ENOENT)
583 execvp (program, allargs);
584
585 fprintf (stderr, "Cannot exec %s: %s.\n", program,
586 strerror (errno));
587 fflush (stderr);
588 _exit (0177);
589 }
590
591 linux_add_process (pid, 0);
592
593 ptid = ptid_build (pid, pid, 0);
594 new_lwp = add_lwp (ptid);
595 add_thread (ptid, new_lwp);
596 new_lwp->must_set_ptrace_flags = 1;
597
598 return pid;
599 }
600
601 /* Attach to an inferior process. */
602
603 static void
604 linux_attach_lwp_1 (unsigned long lwpid, int initial)
605 {
606 ptid_t ptid;
607 struct lwp_info *new_lwp;
608
609 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
610 {
611 if (!initial)
612 {
613 /* If we fail to attach to an LWP, just warn. */
614 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
615 strerror (errno), errno);
616 fflush (stderr);
617 return;
618 }
619 else
620 /* If we fail to attach to a process, report an error. */
621 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
622 strerror (errno), errno);
623 }
624
625 if (initial)
626 /* NOTE/FIXME: This lwp might have not been the tgid. */
627 ptid = ptid_build (lwpid, lwpid, 0);
628 else
629 {
630 /* Note that extracting the pid from the current inferior is
631 safe, since we're always called in the context of the same
632 process as this new thread. */
633 int pid = pid_of (get_thread_lwp (current_inferior));
634 ptid = ptid_build (pid, lwpid, 0);
635 }
636
637 new_lwp = (struct lwp_info *) add_lwp (ptid);
638 add_thread (ptid, new_lwp);
639
640 /* We need to wait for SIGSTOP before being able to make the next
641 ptrace call on this LWP. */
642 new_lwp->must_set_ptrace_flags = 1;
643
644 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
645 brings it to a halt.
646
647 There are several cases to consider here:
648
649 1) gdbserver has already attached to the process and is being notified
650 of a new thread that is being created.
651 In this case we should ignore that SIGSTOP and resume the
652 process. This is handled below by setting stop_expected = 1,
653 and the fact that add_thread sets last_resume_kind ==
654 resume_continue.
655
656 2) This is the first thread (the process thread), and we're attaching
657 to it via attach_inferior.
658 In this case we want the process thread to stop.
659 This is handled by having linux_attach set last_resume_kind ==
660 resume_stop after we return.
661 ??? If the process already has several threads we leave the other
662 threads running.
663
664 3) GDB is connecting to gdbserver and is requesting an enumeration of all
665 existing threads.
666 In this case we want the thread to stop.
667 FIXME: This case is currently not properly handled.
668 We should wait for the SIGSTOP but don't. Things work apparently
669 because enough time passes between when we ptrace (ATTACH) and when
670 gdb makes the next ptrace call on the thread.
671
672 On the other hand, if we are currently trying to stop all threads, we
673 should treat the new thread as if we had sent it a SIGSTOP. This works
674 because we are guaranteed that the add_lwp call above added us to the
675 end of the list, and so the new thread has not yet reached
676 wait_for_sigstop (but will). */
677 new_lwp->stop_expected = 1;
678 }
679
680 void
681 linux_attach_lwp (unsigned long lwpid)
682 {
683 linux_attach_lwp_1 (lwpid, 0);
684 }
685
686 int
687 linux_attach (unsigned long pid)
688 {
689 linux_attach_lwp_1 (pid, 1);
690 linux_add_process (pid, 1);
691
692 if (!non_stop)
693 {
694 struct thread_info *thread;
695
696 /* Don't ignore the initial SIGSTOP if we just attached to this
697 process. It will be collected by wait shortly. */
698 thread = find_thread_ptid (ptid_build (pid, pid, 0));
699 thread->last_resume_kind = resume_stop;
700 }
701
702 return 0;
703 }
704
705 struct counter
706 {
707 int pid;
708 int count;
709 };
710
711 static int
712 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
713 {
714 struct counter *counter = args;
715
716 if (ptid_get_pid (entry->id) == counter->pid)
717 {
718 if (++counter->count > 1)
719 return 1;
720 }
721
722 return 0;
723 }
724
725 static int
726 last_thread_of_process_p (struct thread_info *thread)
727 {
728 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
729 int pid = ptid_get_pid (ptid);
730 struct counter counter = { pid , 0 };
731
732 return (find_inferior (&all_threads,
733 second_thread_of_pid_p, &counter) == NULL);
734 }
735
736 /* Kill the inferior lwp. */
737
738 static int
739 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
740 {
741 struct thread_info *thread = (struct thread_info *) entry;
742 struct lwp_info *lwp = get_thread_lwp (thread);
743 int wstat;
744 int pid = * (int *) args;
745
746 if (ptid_get_pid (entry->id) != pid)
747 return 0;
748
749 /* We avoid killing the first thread here, because of a Linux kernel (at
750 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
751 the children get a chance to be reaped, it will remain a zombie
752 forever. */
753
754 if (lwpid_of (lwp) == pid)
755 {
756 if (debug_threads)
757 fprintf (stderr, "lkop: is last of process %s\n",
758 target_pid_to_str (entry->id));
759 return 0;
760 }
761
762 do
763 {
764 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
765
766 /* Make sure it died. The loop is most likely unnecessary. */
767 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
768 } while (pid > 0 && WIFSTOPPED (wstat));
769
770 return 0;
771 }
772
773 static int
774 linux_kill (int pid)
775 {
776 struct process_info *process;
777 struct lwp_info *lwp;
778 int wstat;
779 int lwpid;
780
781 process = find_process_pid (pid);
782 if (process == NULL)
783 return -1;
784
785 /* If we're killing a running inferior, make sure it is stopped
786 first, as PTRACE_KILL will not work otherwise. */
787 stop_all_lwps (0, NULL);
788
789 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
790
791 /* See the comment in linux_kill_one_lwp. We did not kill the first
792 thread in the list, so do so now. */
793 lwp = find_lwp_pid (pid_to_ptid (pid));
794
795 if (debug_threads)
796 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
797 lwpid_of (lwp), pid);
798
799 do
800 {
801 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
802
803 /* Make sure it died. The loop is most likely unnecessary. */
804 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
805 } while (lwpid > 0 && WIFSTOPPED (wstat));
806
807 the_target->mourn (process);
808
809 /* Since we presently can only stop all lwps of all processes, we
810 need to unstop lwps of other processes. */
811 unstop_all_lwps (0, NULL);
812 return 0;
813 }
814
815 static int
816 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
817 {
818 struct thread_info *thread = (struct thread_info *) entry;
819 struct lwp_info *lwp = get_thread_lwp (thread);
820 int pid = * (int *) args;
821
822 if (ptid_get_pid (entry->id) != pid)
823 return 0;
824
825 /* If this process is stopped but is expecting a SIGSTOP, then make
826 sure we take care of that now. This isn't absolutely guaranteed
827 to collect the SIGSTOP, but is fairly likely to. */
828 if (lwp->stop_expected)
829 {
830 int wstat;
831 /* Clear stop_expected, so that the SIGSTOP will be reported. */
832 lwp->stop_expected = 0;
833 linux_resume_one_lwp (lwp, 0, 0, NULL);
834 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
835 }
836
837 /* Flush any pending changes to the process's registers. */
838 regcache_invalidate_one ((struct inferior_list_entry *)
839 get_lwp_thread (lwp));
840
841 /* Finally, let it resume. */
842 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
843
844 delete_lwp (lwp);
845 return 0;
846 }
847
848 static int
849 linux_detach (int pid)
850 {
851 struct process_info *process;
852
853 process = find_process_pid (pid);
854 if (process == NULL)
855 return -1;
856
857 /* Stop all threads before detaching. First, ptrace requires that
858 the thread is stopped to sucessfully detach. Second, thread_db
859 may need to uninstall thread event breakpoints from memory, which
860 only works with a stopped process anyway. */
861 stop_all_lwps (0, NULL);
862
863 #ifdef USE_THREAD_DB
864 thread_db_detach (process);
865 #endif
866
867 /* Stabilize threads (move out of jump pads). */
868 stabilize_threads ();
869
870 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
871
872 the_target->mourn (process);
873
874 /* Since we presently can only stop all lwps of all processes, we
875 need to unstop lwps of other processes. */
876 unstop_all_lwps (0, NULL);
877 return 0;
878 }
879
880 /* Remove all LWPs that belong to process PROC from the lwp list. */
881
882 static int
883 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
884 {
885 struct lwp_info *lwp = (struct lwp_info *) entry;
886 struct process_info *process = proc;
887
888 if (pid_of (lwp) == pid_of (process))
889 delete_lwp (lwp);
890
891 return 0;
892 }
893
894 static void
895 linux_mourn (struct process_info *process)
896 {
897 struct process_info_private *priv;
898
899 #ifdef USE_THREAD_DB
900 thread_db_mourn (process);
901 #endif
902
903 find_inferior (&all_lwps, delete_lwp_callback, process);
904
905 /* Freeing all private data. */
906 priv = process->private;
907 free (priv->arch_private);
908 free (priv);
909 process->private = NULL;
910
911 remove_process (process);
912 }
913
914 static void
915 linux_join (int pid)
916 {
917 int status, ret;
918 struct process_info *process;
919
920 process = find_process_pid (pid);
921 if (process == NULL)
922 return;
923
924 do {
925 ret = my_waitpid (pid, &status, 0);
926 if (WIFEXITED (status) || WIFSIGNALED (status))
927 break;
928 } while (ret != -1 || errno != ECHILD);
929 }
930
931 /* Return nonzero if the given thread is still alive. */
932 static int
933 linux_thread_alive (ptid_t ptid)
934 {
935 struct lwp_info *lwp = find_lwp_pid (ptid);
936
937 /* We assume we always know if a thread exits. If a whole process
938 exited but we still haven't been able to report it to GDB, we'll
939 hold on to the last lwp of the dead process. */
940 if (lwp != NULL)
941 return !lwp->dead;
942 else
943 return 0;
944 }
945
946 /* Return 1 if this lwp has an interesting status pending. */
947 static int
948 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
949 {
950 struct lwp_info *lwp = (struct lwp_info *) entry;
951 ptid_t ptid = * (ptid_t *) arg;
952 struct thread_info *thread;
953
954 /* Check if we're only interested in events from a specific process
955 or its lwps. */
956 if (!ptid_equal (minus_one_ptid, ptid)
957 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
958 return 0;
959
960 thread = get_lwp_thread (lwp);
961
962 /* If we got a `vCont;t', but we haven't reported a stop yet, do
963 report any status pending the LWP may have. */
964 if (thread->last_resume_kind == resume_stop
965 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
966 return 0;
967
968 return lwp->status_pending_p;
969 }
970
971 static int
972 same_lwp (struct inferior_list_entry *entry, void *data)
973 {
974 ptid_t ptid = *(ptid_t *) data;
975 int lwp;
976
977 if (ptid_get_lwp (ptid) != 0)
978 lwp = ptid_get_lwp (ptid);
979 else
980 lwp = ptid_get_pid (ptid);
981
982 if (ptid_get_lwp (entry->id) == lwp)
983 return 1;
984
985 return 0;
986 }
987
988 struct lwp_info *
989 find_lwp_pid (ptid_t ptid)
990 {
991 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
992 }
993
994 static struct lwp_info *
995 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
996 {
997 int ret;
998 int to_wait_for = -1;
999 struct lwp_info *child = NULL;
1000
1001 if (debug_threads)
1002 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1003
1004 if (ptid_equal (ptid, minus_one_ptid))
1005 to_wait_for = -1; /* any child */
1006 else
1007 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1008
1009 options |= __WALL;
1010
1011 retry:
1012
1013 ret = my_waitpid (to_wait_for, wstatp, options);
1014 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1015 return NULL;
1016 else if (ret == -1)
1017 perror_with_name ("waitpid");
1018
1019 if (debug_threads
1020 && (!WIFSTOPPED (*wstatp)
1021 || (WSTOPSIG (*wstatp) != 32
1022 && WSTOPSIG (*wstatp) != 33)))
1023 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1024
1025 child = find_lwp_pid (pid_to_ptid (ret));
1026
1027 /* If we didn't find a process, one of two things presumably happened:
1028 - A process we started and then detached from has exited. Ignore it.
1029 - A process we are controlling has forked and the new child's stop
1030 was reported to us by the kernel. Save its PID. */
1031 if (child == NULL && WIFSTOPPED (*wstatp))
1032 {
1033 add_pid_to_list (&stopped_pids, ret);
1034 goto retry;
1035 }
1036 else if (child == NULL)
1037 goto retry;
1038
1039 child->stopped = 1;
1040
1041 child->last_status = *wstatp;
1042
1043 /* Architecture-specific setup after inferior is running.
1044 This needs to happen after we have attached to the inferior
1045 and it is stopped for the first time, but before we access
1046 any inferior registers. */
1047 if (new_inferior)
1048 {
1049 the_low_target.arch_setup ();
1050 #ifdef HAVE_LINUX_REGSETS
1051 memset (disabled_regsets, 0, num_regsets);
1052 #endif
1053 new_inferior = 0;
1054 }
1055
1056 /* Fetch the possibly triggered data watchpoint info and store it in
1057 CHILD.
1058
1059 On some archs, like x86, that use debug registers to set
1060 watchpoints, it's possible that the way to know which watched
1061 address trapped, is to check the register that is used to select
1062 which address to watch. Problem is, between setting the
1063 watchpoint and reading back which data address trapped, the user
1064 may change the set of watchpoints, and, as a consequence, GDB
1065 changes the debug registers in the inferior. To avoid reading
1066 back a stale stopped-data-address when that happens, we cache in
1067 LP the fact that a watchpoint trapped, and the corresponding data
1068 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1069 changes the debug registers meanwhile, we have the cached data we
1070 can rely on. */
1071
1072 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1073 {
1074 if (the_low_target.stopped_by_watchpoint == NULL)
1075 {
1076 child->stopped_by_watchpoint = 0;
1077 }
1078 else
1079 {
1080 struct thread_info *saved_inferior;
1081
1082 saved_inferior = current_inferior;
1083 current_inferior = get_lwp_thread (child);
1084
1085 child->stopped_by_watchpoint
1086 = the_low_target.stopped_by_watchpoint ();
1087
1088 if (child->stopped_by_watchpoint)
1089 {
1090 if (the_low_target.stopped_data_address != NULL)
1091 child->stopped_data_address
1092 = the_low_target.stopped_data_address ();
1093 else
1094 child->stopped_data_address = 0;
1095 }
1096
1097 current_inferior = saved_inferior;
1098 }
1099 }
1100
1101 /* Store the STOP_PC, with adjustment applied. This depends on the
1102 architecture being defined already (so that CHILD has a valid
1103 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1104 not). */
1105 if (WIFSTOPPED (*wstatp))
1106 child->stop_pc = get_stop_pc (child);
1107
1108 if (debug_threads
1109 && WIFSTOPPED (*wstatp)
1110 && the_low_target.get_pc != NULL)
1111 {
1112 struct thread_info *saved_inferior = current_inferior;
1113 struct regcache *regcache;
1114 CORE_ADDR pc;
1115
1116 current_inferior = get_lwp_thread (child);
1117 regcache = get_thread_regcache (current_inferior, 1);
1118 pc = (*the_low_target.get_pc) (regcache);
1119 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1120 current_inferior = saved_inferior;
1121 }
1122
1123 return child;
1124 }
1125
1126 /* This function should only be called if the LWP got a SIGTRAP.
1127
1128 Handle any tracepoint steps or hits. Return true if a tracepoint
1129 event was handled, 0 otherwise. */
1130
1131 static int
1132 handle_tracepoints (struct lwp_info *lwp)
1133 {
1134 struct thread_info *tinfo = get_lwp_thread (lwp);
1135 int tpoint_related_event = 0;
1136
1137 /* If this tracepoint hit causes a tracing stop, we'll immediately
1138 uninsert tracepoints. To do this, we temporarily pause all
1139 threads, unpatch away, and then unpause threads. We need to make
1140 sure the unpausing doesn't resume LWP too. */
1141 lwp->suspended++;
1142
1143 /* And we need to be sure that any all-threads-stopping doesn't try
1144 to move threads out of the jump pads, as it could deadlock the
1145 inferior (LWP could be in the jump pad, maybe even holding the
1146 lock.) */
1147
1148 /* Do any necessary step collect actions. */
1149 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1150
1151 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1152
1153 /* See if we just hit a tracepoint and do its main collect
1154 actions. */
1155 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1156
1157 lwp->suspended--;
1158
1159 gdb_assert (lwp->suspended == 0);
1160 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1161
1162 if (tpoint_related_event)
1163 {
1164 if (debug_threads)
1165 fprintf (stderr, "got a tracepoint event\n");
1166 return 1;
1167 }
1168
1169 return 0;
1170 }
1171
1172 /* Convenience wrapper. Returns true if LWP is presently collecting a
1173 fast tracepoint. */
1174
1175 static int
1176 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1177 struct fast_tpoint_collect_status *status)
1178 {
1179 CORE_ADDR thread_area;
1180
1181 if (the_low_target.get_thread_area == NULL)
1182 return 0;
1183
1184 /* Get the thread area address. This is used to recognize which
1185 thread is which when tracing with the in-process agent library.
1186 We don't read anything from the address, and treat it as opaque;
1187 it's the address itself that we assume is unique per-thread. */
1188 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1189 return 0;
1190
1191 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1192 }
1193
1194 /* The reason we resume in the caller, is because we want to be able
1195 to pass lwp->status_pending as WSTAT, and we need to clear
1196 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1197 refuses to resume. */
1198
1199 static int
1200 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1201 {
1202 struct thread_info *saved_inferior;
1203
1204 saved_inferior = current_inferior;
1205 current_inferior = get_lwp_thread (lwp);
1206
1207 if ((wstat == NULL
1208 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1209 && supports_fast_tracepoints ()
1210 && in_process_agent_loaded ())
1211 {
1212 struct fast_tpoint_collect_status status;
1213 int r;
1214
1215 if (debug_threads)
1216 fprintf (stderr, "\
1217 Checking whether LWP %ld needs to move out of the jump pad.\n",
1218 lwpid_of (lwp));
1219
1220 r = linux_fast_tracepoint_collecting (lwp, &status);
1221
1222 if (wstat == NULL
1223 || (WSTOPSIG (*wstat) != SIGILL
1224 && WSTOPSIG (*wstat) != SIGFPE
1225 && WSTOPSIG (*wstat) != SIGSEGV
1226 && WSTOPSIG (*wstat) != SIGBUS))
1227 {
1228 lwp->collecting_fast_tracepoint = r;
1229
1230 if (r != 0)
1231 {
1232 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1233 {
1234 /* Haven't executed the original instruction yet.
1235 Set breakpoint there, and wait till it's hit,
1236 then single-step until exiting the jump pad. */
1237 lwp->exit_jump_pad_bkpt
1238 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1239 }
1240
1241 if (debug_threads)
1242 fprintf (stderr, "\
1243 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1244 lwpid_of (lwp));
1245 current_inferior = saved_inferior;
1246
1247 return 1;
1248 }
1249 }
1250 else
1251 {
1252 /* If we get a synchronous signal while collecting, *and*
1253 while executing the (relocated) original instruction,
1254 reset the PC to point at the tpoint address, before
1255 reporting to GDB. Otherwise, it's an IPA lib bug: just
1256 report the signal to GDB, and pray for the best. */
1257
1258 lwp->collecting_fast_tracepoint = 0;
1259
1260 if (r != 0
1261 && (status.adjusted_insn_addr <= lwp->stop_pc
1262 && lwp->stop_pc < status.adjusted_insn_addr_end))
1263 {
1264 siginfo_t info;
1265 struct regcache *regcache;
1266
1267 /* The si_addr on a few signals references the address
1268 of the faulting instruction. Adjust that as
1269 well. */
1270 if ((WSTOPSIG (*wstat) == SIGILL
1271 || WSTOPSIG (*wstat) == SIGFPE
1272 || WSTOPSIG (*wstat) == SIGBUS
1273 || WSTOPSIG (*wstat) == SIGSEGV)
1274 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1275 /* Final check just to make sure we don't clobber
1276 the siginfo of non-kernel-sent signals. */
1277 && (uintptr_t) info.si_addr == lwp->stop_pc)
1278 {
1279 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1280 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1281 }
1282
1283 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1284 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1285 lwp->stop_pc = status.tpoint_addr;
1286
1287 /* Cancel any fast tracepoint lock this thread was
1288 holding. */
1289 force_unlock_trace_buffer ();
1290 }
1291
1292 if (lwp->exit_jump_pad_bkpt != NULL)
1293 {
1294 if (debug_threads)
1295 fprintf (stderr,
1296 "Cancelling fast exit-jump-pad: removing bkpt. "
1297 "stopping all threads momentarily.\n");
1298
1299 stop_all_lwps (1, lwp);
1300 cancel_breakpoints ();
1301
1302 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1303 lwp->exit_jump_pad_bkpt = NULL;
1304
1305 unstop_all_lwps (1, lwp);
1306
1307 gdb_assert (lwp->suspended >= 0);
1308 }
1309 }
1310 }
1311
1312 if (debug_threads)
1313 fprintf (stderr, "\
1314 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1315 lwpid_of (lwp));
1316
1317 current_inferior = saved_inferior;
1318 return 0;
1319 }
1320
1321 /* Enqueue one signal in the "signals to report later when out of the
1322 jump pad" list. */
1323
1324 static void
1325 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1326 {
1327 struct pending_signals *p_sig;
1328
1329 if (debug_threads)
1330 fprintf (stderr, "\
1331 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1332
1333 if (debug_threads)
1334 {
1335 struct pending_signals *sig;
1336
1337 for (sig = lwp->pending_signals_to_report;
1338 sig != NULL;
1339 sig = sig->prev)
1340 fprintf (stderr,
1341 " Already queued %d\n",
1342 sig->signal);
1343
1344 fprintf (stderr, " (no more currently queued signals)\n");
1345 }
1346
1347 /* Don't enqueue non-RT signals if they are already in the deferred
1348 queue. (SIGSTOP being the easiest signal to see ending up here
1349 twice) */
1350 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1351 {
1352 struct pending_signals *sig;
1353
1354 for (sig = lwp->pending_signals_to_report;
1355 sig != NULL;
1356 sig = sig->prev)
1357 {
1358 if (sig->signal == WSTOPSIG (*wstat))
1359 {
1360 if (debug_threads)
1361 fprintf (stderr,
1362 "Not requeuing already queued non-RT signal %d"
1363 " for LWP %ld\n",
1364 sig->signal,
1365 lwpid_of (lwp));
1366 return;
1367 }
1368 }
1369 }
1370
1371 p_sig = xmalloc (sizeof (*p_sig));
1372 p_sig->prev = lwp->pending_signals_to_report;
1373 p_sig->signal = WSTOPSIG (*wstat);
1374 memset (&p_sig->info, 0, sizeof (siginfo_t));
1375 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1376
1377 lwp->pending_signals_to_report = p_sig;
1378 }
1379
1380 /* Dequeue one signal from the "signals to report later when out of
1381 the jump pad" list. */
1382
1383 static int
1384 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1385 {
1386 if (lwp->pending_signals_to_report != NULL)
1387 {
1388 struct pending_signals **p_sig;
1389
1390 p_sig = &lwp->pending_signals_to_report;
1391 while ((*p_sig)->prev != NULL)
1392 p_sig = &(*p_sig)->prev;
1393
1394 *wstat = W_STOPCODE ((*p_sig)->signal);
1395 if ((*p_sig)->info.si_signo != 0)
1396 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1397 free (*p_sig);
1398 *p_sig = NULL;
1399
1400 if (debug_threads)
1401 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1402 WSTOPSIG (*wstat), lwpid_of (lwp));
1403
1404 if (debug_threads)
1405 {
1406 struct pending_signals *sig;
1407
1408 for (sig = lwp->pending_signals_to_report;
1409 sig != NULL;
1410 sig = sig->prev)
1411 fprintf (stderr,
1412 " Still queued %d\n",
1413 sig->signal);
1414
1415 fprintf (stderr, " (no more queued signals)\n");
1416 }
1417
1418 return 1;
1419 }
1420
1421 return 0;
1422 }
1423
1424 /* Arrange for a breakpoint to be hit again later. We don't keep the
1425 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1426 will handle the current event, eventually we will resume this LWP,
1427 and this breakpoint will trap again. */
1428
1429 static int
1430 cancel_breakpoint (struct lwp_info *lwp)
1431 {
1432 struct thread_info *saved_inferior;
1433
1434 /* There's nothing to do if we don't support breakpoints. */
1435 if (!supports_breakpoints ())
1436 return 0;
1437
1438 /* breakpoint_at reads from current inferior. */
1439 saved_inferior = current_inferior;
1440 current_inferior = get_lwp_thread (lwp);
1441
1442 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1443 {
1444 if (debug_threads)
1445 fprintf (stderr,
1446 "CB: Push back breakpoint for %s\n",
1447 target_pid_to_str (ptid_of (lwp)));
1448
1449 /* Back up the PC if necessary. */
1450 if (the_low_target.decr_pc_after_break)
1451 {
1452 struct regcache *regcache
1453 = get_thread_regcache (current_inferior, 1);
1454 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1455 }
1456
1457 current_inferior = saved_inferior;
1458 return 1;
1459 }
1460 else
1461 {
1462 if (debug_threads)
1463 fprintf (stderr,
1464 "CB: No breakpoint found at %s for [%s]\n",
1465 paddress (lwp->stop_pc),
1466 target_pid_to_str (ptid_of (lwp)));
1467 }
1468
1469 current_inferior = saved_inferior;
1470 return 0;
1471 }
1472
1473 /* When the event-loop is doing a step-over, this points at the thread
1474 being stepped. */
1475 ptid_t step_over_bkpt;
1476
1477 /* Wait for an event from child PID. If PID is -1, wait for any
1478 child. Store the stop status through the status pointer WSTAT.
1479 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1480 event was found and OPTIONS contains WNOHANG. Return the PID of
1481 the stopped child otherwise. */
1482
1483 static int
1484 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1485 {
1486 struct lwp_info *event_child, *requested_child;
1487
1488 event_child = NULL;
1489 requested_child = NULL;
1490
1491 /* Check for a lwp with a pending status. */
1492
1493 if (ptid_equal (ptid, minus_one_ptid)
1494 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
1495 {
1496 event_child = (struct lwp_info *)
1497 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1498 if (debug_threads && event_child)
1499 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1500 }
1501 else
1502 {
1503 requested_child = find_lwp_pid (ptid);
1504
1505 if (!stopping_threads
1506 && requested_child->status_pending_p
1507 && requested_child->collecting_fast_tracepoint)
1508 {
1509 enqueue_one_deferred_signal (requested_child,
1510 &requested_child->status_pending);
1511 requested_child->status_pending_p = 0;
1512 requested_child->status_pending = 0;
1513 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1514 }
1515
1516 if (requested_child->suspended
1517 && requested_child->status_pending_p)
1518 fatal ("requesting an event out of a suspended child?");
1519
1520 if (requested_child->status_pending_p)
1521 event_child = requested_child;
1522 }
1523
1524 if (event_child != NULL)
1525 {
1526 if (debug_threads)
1527 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1528 lwpid_of (event_child), event_child->status_pending);
1529 *wstat = event_child->status_pending;
1530 event_child->status_pending_p = 0;
1531 event_child->status_pending = 0;
1532 current_inferior = get_lwp_thread (event_child);
1533 return lwpid_of (event_child);
1534 }
1535
1536 /* We only enter this loop if no process has a pending wait status. Thus
1537 any action taken in response to a wait status inside this loop is
1538 responding as soon as we detect the status, not after any pending
1539 events. */
1540 while (1)
1541 {
1542 event_child = linux_wait_for_lwp (ptid, wstat, options);
1543
1544 if ((options & WNOHANG) && event_child == NULL)
1545 {
1546 if (debug_threads)
1547 fprintf (stderr, "WNOHANG set, no event found\n");
1548 return 0;
1549 }
1550
1551 if (event_child == NULL)
1552 error ("event from unknown child");
1553
1554 current_inferior = get_lwp_thread (event_child);
1555
1556 /* Check for thread exit. */
1557 if (! WIFSTOPPED (*wstat))
1558 {
1559 if (debug_threads)
1560 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1561
1562 /* If the last thread is exiting, just return. */
1563 if (last_thread_of_process_p (current_inferior))
1564 {
1565 if (debug_threads)
1566 fprintf (stderr, "LWP %ld is last lwp of process\n",
1567 lwpid_of (event_child));
1568 return lwpid_of (event_child);
1569 }
1570
1571 if (!non_stop)
1572 {
1573 current_inferior = (struct thread_info *) all_threads.head;
1574 if (debug_threads)
1575 fprintf (stderr, "Current inferior is now %ld\n",
1576 lwpid_of (get_thread_lwp (current_inferior)));
1577 }
1578 else
1579 {
1580 current_inferior = NULL;
1581 if (debug_threads)
1582 fprintf (stderr, "Current inferior is now <NULL>\n");
1583 }
1584
1585 /* If we were waiting for this particular child to do something...
1586 well, it did something. */
1587 if (requested_child != NULL)
1588 {
1589 int lwpid = lwpid_of (event_child);
1590
1591 /* Cancel the step-over operation --- the thread that
1592 started it is gone. */
1593 if (finish_step_over (event_child))
1594 unstop_all_lwps (1, event_child);
1595 delete_lwp (event_child);
1596 return lwpid;
1597 }
1598
1599 delete_lwp (event_child);
1600
1601 /* Wait for a more interesting event. */
1602 continue;
1603 }
1604
1605 if (event_child->must_set_ptrace_flags)
1606 {
1607 linux_enable_event_reporting (lwpid_of (event_child));
1608 event_child->must_set_ptrace_flags = 0;
1609 }
1610
1611 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1612 && *wstat >> 16 != 0)
1613 {
1614 handle_extended_wait (event_child, *wstat);
1615 continue;
1616 }
1617
1618 if (WIFSTOPPED (*wstat)
1619 && WSTOPSIG (*wstat) == SIGSTOP
1620 && event_child->stop_expected)
1621 {
1622 int should_stop;
1623
1624 if (debug_threads)
1625 fprintf (stderr, "Expected stop.\n");
1626 event_child->stop_expected = 0;
1627
1628 should_stop = (current_inferior->last_resume_kind == resume_stop
1629 || stopping_threads);
1630
1631 if (!should_stop)
1632 {
1633 linux_resume_one_lwp (event_child,
1634 event_child->stepping, 0, NULL);
1635 continue;
1636 }
1637 }
1638
1639 return lwpid_of (event_child);
1640 }
1641
1642 /* NOTREACHED */
1643 return 0;
1644 }
1645
1646 static int
1647 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1648 {
1649 ptid_t wait_ptid;
1650
1651 if (ptid_is_pid (ptid))
1652 {
1653 /* A request to wait for a specific tgid. This is not possible
1654 with waitpid, so instead, we wait for any child, and leave
1655 children we're not interested in right now with a pending
1656 status to report later. */
1657 wait_ptid = minus_one_ptid;
1658 }
1659 else
1660 wait_ptid = ptid;
1661
1662 while (1)
1663 {
1664 int event_pid;
1665
1666 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1667
1668 if (event_pid > 0
1669 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1670 {
1671 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1672
1673 if (! WIFSTOPPED (*wstat))
1674 mark_lwp_dead (event_child, *wstat);
1675 else
1676 {
1677 event_child->status_pending_p = 1;
1678 event_child->status_pending = *wstat;
1679 }
1680 }
1681 else
1682 return event_pid;
1683 }
1684 }
1685
1686
1687 /* Count the LWP's that have had events. */
1688
1689 static int
1690 count_events_callback (struct inferior_list_entry *entry, void *data)
1691 {
1692 struct lwp_info *lp = (struct lwp_info *) entry;
1693 struct thread_info *thread = get_lwp_thread (lp);
1694 int *count = data;
1695
1696 gdb_assert (count != NULL);
1697
1698 /* Count only resumed LWPs that have a SIGTRAP event pending that
1699 should be reported to GDB. */
1700 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1701 && thread->last_resume_kind != resume_stop
1702 && lp->status_pending_p
1703 && WIFSTOPPED (lp->status_pending)
1704 && WSTOPSIG (lp->status_pending) == SIGTRAP
1705 && !breakpoint_inserted_here (lp->stop_pc))
1706 (*count)++;
1707
1708 return 0;
1709 }
1710
1711 /* Select the LWP (if any) that is currently being single-stepped. */
1712
1713 static int
1714 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1715 {
1716 struct lwp_info *lp = (struct lwp_info *) entry;
1717 struct thread_info *thread = get_lwp_thread (lp);
1718
1719 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1720 && thread->last_resume_kind == resume_step
1721 && lp->status_pending_p)
1722 return 1;
1723 else
1724 return 0;
1725 }
1726
1727 /* Select the Nth LWP that has had a SIGTRAP event that should be
1728 reported to GDB. */
1729
1730 static int
1731 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1732 {
1733 struct lwp_info *lp = (struct lwp_info *) entry;
1734 struct thread_info *thread = get_lwp_thread (lp);
1735 int *selector = data;
1736
1737 gdb_assert (selector != NULL);
1738
1739 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1740 if (thread->last_resume_kind != resume_stop
1741 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1742 && lp->status_pending_p
1743 && WIFSTOPPED (lp->status_pending)
1744 && WSTOPSIG (lp->status_pending) == SIGTRAP
1745 && !breakpoint_inserted_here (lp->stop_pc))
1746 if ((*selector)-- == 0)
1747 return 1;
1748
1749 return 0;
1750 }
1751
1752 static int
1753 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1754 {
1755 struct lwp_info *lp = (struct lwp_info *) entry;
1756 struct thread_info *thread = get_lwp_thread (lp);
1757 struct lwp_info *event_lp = data;
1758
1759 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1760 if (lp == event_lp)
1761 return 0;
1762
1763 /* If a LWP other than the LWP that we're reporting an event for has
1764 hit a GDB breakpoint (as opposed to some random trap signal),
1765 then just arrange for it to hit it again later. We don't keep
1766 the SIGTRAP status and don't forward the SIGTRAP signal to the
1767 LWP. We will handle the current event, eventually we will resume
1768 all LWPs, and this one will get its breakpoint trap again.
1769
1770 If we do not do this, then we run the risk that the user will
1771 delete or disable the breakpoint, but the LWP will have already
1772 tripped on it. */
1773
1774 if (thread->last_resume_kind != resume_stop
1775 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1776 && lp->status_pending_p
1777 && WIFSTOPPED (lp->status_pending)
1778 && WSTOPSIG (lp->status_pending) == SIGTRAP
1779 && !lp->stepping
1780 && !lp->stopped_by_watchpoint
1781 && cancel_breakpoint (lp))
1782 /* Throw away the SIGTRAP. */
1783 lp->status_pending_p = 0;
1784
1785 return 0;
1786 }
1787
1788 static void
1789 linux_cancel_breakpoints (void)
1790 {
1791 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
1792 }
1793
1794 /* Select one LWP out of those that have events pending. */
1795
1796 static void
1797 select_event_lwp (struct lwp_info **orig_lp)
1798 {
1799 int num_events = 0;
1800 int random_selector;
1801 struct lwp_info *event_lp;
1802
1803 /* Give preference to any LWP that is being single-stepped. */
1804 event_lp
1805 = (struct lwp_info *) find_inferior (&all_lwps,
1806 select_singlestep_lwp_callback, NULL);
1807 if (event_lp != NULL)
1808 {
1809 if (debug_threads)
1810 fprintf (stderr,
1811 "SEL: Select single-step %s\n",
1812 target_pid_to_str (ptid_of (event_lp)));
1813 }
1814 else
1815 {
1816 /* No single-stepping LWP. Select one at random, out of those
1817 which have had SIGTRAP events. */
1818
1819 /* First see how many SIGTRAP events we have. */
1820 find_inferior (&all_lwps, count_events_callback, &num_events);
1821
1822 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1823 random_selector = (int)
1824 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1825
1826 if (debug_threads && num_events > 1)
1827 fprintf (stderr,
1828 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1829 num_events, random_selector);
1830
1831 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1832 select_event_lwp_callback,
1833 &random_selector);
1834 }
1835
1836 if (event_lp != NULL)
1837 {
1838 /* Switch the event LWP. */
1839 *orig_lp = event_lp;
1840 }
1841 }
1842
1843 /* Decrement the suspend count of an LWP. */
1844
1845 static int
1846 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
1847 {
1848 struct lwp_info *lwp = (struct lwp_info *) entry;
1849
1850 /* Ignore EXCEPT. */
1851 if (lwp == except)
1852 return 0;
1853
1854 lwp->suspended--;
1855
1856 gdb_assert (lwp->suspended >= 0);
1857 return 0;
1858 }
1859
1860 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
1861 NULL. */
1862
1863 static void
1864 unsuspend_all_lwps (struct lwp_info *except)
1865 {
1866 find_inferior (&all_lwps, unsuspend_one_lwp, except);
1867 }
1868
1869 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
1870 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
1871 void *data);
1872 static int lwp_running (struct inferior_list_entry *entry, void *data);
1873 static ptid_t linux_wait_1 (ptid_t ptid,
1874 struct target_waitstatus *ourstatus,
1875 int target_options);
1876
1877 /* Stabilize threads (move out of jump pads).
1878
1879 If a thread is midway collecting a fast tracepoint, we need to
1880 finish the collection and move it out of the jump pad before
1881 reporting the signal.
1882
1883 This avoids recursion while collecting (when a signal arrives
1884 midway, and the signal handler itself collects), which would trash
1885 the trace buffer. In case the user set a breakpoint in a signal
1886 handler, this avoids the backtrace showing the jump pad, etc..
1887 Most importantly, there are certain things we can't do safely if
1888 threads are stopped in a jump pad (or in its callee's). For
1889 example:
1890
1891 - starting a new trace run. A thread still collecting the
1892 previous run, could trash the trace buffer when resumed. The trace
1893 buffer control structures would have been reset but the thread had
1894 no way to tell. The thread could even midway memcpy'ing to the
1895 buffer, which would mean that when resumed, it would clobber the
1896 trace buffer that had been set for a new run.
1897
1898 - we can't rewrite/reuse the jump pads for new tracepoints
1899 safely. Say you do tstart while a thread is stopped midway while
1900 collecting. When the thread is later resumed, it finishes the
1901 collection, and returns to the jump pad, to execute the original
1902 instruction that was under the tracepoint jump at the time the
1903 older run had been started. If the jump pad had been rewritten
1904 since for something else in the new run, the thread would now
1905 execute the wrong / random instructions. */
1906
1907 static void
1908 linux_stabilize_threads (void)
1909 {
1910 struct thread_info *save_inferior;
1911 struct lwp_info *lwp_stuck;
1912
1913 lwp_stuck
1914 = (struct lwp_info *) find_inferior (&all_lwps,
1915 stuck_in_jump_pad_callback, NULL);
1916 if (lwp_stuck != NULL)
1917 {
1918 if (debug_threads)
1919 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
1920 lwpid_of (lwp_stuck));
1921 return;
1922 }
1923
1924 save_inferior = current_inferior;
1925
1926 stabilizing_threads = 1;
1927
1928 /* Kick 'em all. */
1929 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
1930
1931 /* Loop until all are stopped out of the jump pads. */
1932 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
1933 {
1934 struct target_waitstatus ourstatus;
1935 struct lwp_info *lwp;
1936 int wstat;
1937
1938 /* Note that we go through the full wait even loop. While
1939 moving threads out of jump pad, we need to be able to step
1940 over internal breakpoints and such. */
1941 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
1942
1943 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
1944 {
1945 lwp = get_thread_lwp (current_inferior);
1946
1947 /* Lock it. */
1948 lwp->suspended++;
1949
1950 if (ourstatus.value.sig != TARGET_SIGNAL_0
1951 || current_inferior->last_resume_kind == resume_stop)
1952 {
1953 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
1954 enqueue_one_deferred_signal (lwp, &wstat);
1955 }
1956 }
1957 }
1958
1959 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
1960
1961 stabilizing_threads = 0;
1962
1963 current_inferior = save_inferior;
1964
1965 if (debug_threads)
1966 {
1967 lwp_stuck
1968 = (struct lwp_info *) find_inferior (&all_lwps,
1969 stuck_in_jump_pad_callback, NULL);
1970 if (lwp_stuck != NULL)
1971 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
1972 lwpid_of (lwp_stuck));
1973 }
1974 }
1975
1976 /* Wait for process, returns status. */
1977
1978 static ptid_t
1979 linux_wait_1 (ptid_t ptid,
1980 struct target_waitstatus *ourstatus, int target_options)
1981 {
1982 int w;
1983 struct lwp_info *event_child;
1984 int options;
1985 int pid;
1986 int step_over_finished;
1987 int bp_explains_trap;
1988 int maybe_internal_trap;
1989 int report_to_gdb;
1990 int trace_event;
1991
1992 /* Translate generic target options into linux options. */
1993 options = __WALL;
1994 if (target_options & TARGET_WNOHANG)
1995 options |= WNOHANG;
1996
1997 retry:
1998 bp_explains_trap = 0;
1999 trace_event = 0;
2000 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2001
2002 /* If we were only supposed to resume one thread, only wait for
2003 that thread - if it's still alive. If it died, however - which
2004 can happen if we're coming from the thread death case below -
2005 then we need to make sure we restart the other threads. We could
2006 pick a thread at random or restart all; restarting all is less
2007 arbitrary. */
2008 if (!non_stop
2009 && !ptid_equal (cont_thread, null_ptid)
2010 && !ptid_equal (cont_thread, minus_one_ptid))
2011 {
2012 struct thread_info *thread;
2013
2014 thread = (struct thread_info *) find_inferior_id (&all_threads,
2015 cont_thread);
2016
2017 /* No stepping, no signal - unless one is pending already, of course. */
2018 if (thread == NULL)
2019 {
2020 struct thread_resume resume_info;
2021 resume_info.thread = minus_one_ptid;
2022 resume_info.kind = resume_continue;
2023 resume_info.sig = 0;
2024 linux_resume (&resume_info, 1);
2025 }
2026 else
2027 ptid = cont_thread;
2028 }
2029
2030 if (ptid_equal (step_over_bkpt, null_ptid))
2031 pid = linux_wait_for_event (ptid, &w, options);
2032 else
2033 {
2034 if (debug_threads)
2035 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2036 target_pid_to_str (step_over_bkpt));
2037 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2038 }
2039
2040 if (pid == 0) /* only if TARGET_WNOHANG */
2041 return null_ptid;
2042
2043 event_child = get_thread_lwp (current_inferior);
2044
2045 /* If we are waiting for a particular child, and it exited,
2046 linux_wait_for_event will return its exit status. Similarly if
2047 the last child exited. If this is not the last child, however,
2048 do not report it as exited until there is a 'thread exited' response
2049 available in the remote protocol. Instead, just wait for another event.
2050 This should be safe, because if the thread crashed we will already
2051 have reported the termination signal to GDB; that should stop any
2052 in-progress stepping operations, etc.
2053
2054 Report the exit status of the last thread to exit. This matches
2055 LinuxThreads' behavior. */
2056
2057 if (last_thread_of_process_p (current_inferior))
2058 {
2059 if (WIFEXITED (w) || WIFSIGNALED (w))
2060 {
2061 if (WIFEXITED (w))
2062 {
2063 ourstatus->kind = TARGET_WAITKIND_EXITED;
2064 ourstatus->value.integer = WEXITSTATUS (w);
2065
2066 if (debug_threads)
2067 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
2068 }
2069 else
2070 {
2071 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2072 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2073
2074 if (debug_threads)
2075 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
2076
2077 }
2078
2079 return ptid_of (event_child);
2080 }
2081 }
2082 else
2083 {
2084 if (!WIFSTOPPED (w))
2085 goto retry;
2086 }
2087
2088 /* If this event was not handled before, and is not a SIGTRAP, we
2089 report it. SIGILL and SIGSEGV are also treated as traps in case
2090 a breakpoint is inserted at the current PC. If this target does
2091 not support internal breakpoints at all, we also report the
2092 SIGTRAP without further processing; it's of no concern to us. */
2093 maybe_internal_trap
2094 = (supports_breakpoints ()
2095 && (WSTOPSIG (w) == SIGTRAP
2096 || ((WSTOPSIG (w) == SIGILL
2097 || WSTOPSIG (w) == SIGSEGV)
2098 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2099
2100 if (maybe_internal_trap)
2101 {
2102 /* Handle anything that requires bookkeeping before deciding to
2103 report the event or continue waiting. */
2104
2105 /* First check if we can explain the SIGTRAP with an internal
2106 breakpoint, or if we should possibly report the event to GDB.
2107 Do this before anything that may remove or insert a
2108 breakpoint. */
2109 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2110
2111 /* We have a SIGTRAP, possibly a step-over dance has just
2112 finished. If so, tweak the state machine accordingly,
2113 reinsert breakpoints and delete any reinsert (software
2114 single-step) breakpoints. */
2115 step_over_finished = finish_step_over (event_child);
2116
2117 /* Now invoke the callbacks of any internal breakpoints there. */
2118 check_breakpoints (event_child->stop_pc);
2119
2120 /* Handle tracepoint data collecting. This may overflow the
2121 trace buffer, and cause a tracing stop, removing
2122 breakpoints. */
2123 trace_event = handle_tracepoints (event_child);
2124
2125 if (bp_explains_trap)
2126 {
2127 /* If we stepped or ran into an internal breakpoint, we've
2128 already handled it. So next time we resume (from this
2129 PC), we should step over it. */
2130 if (debug_threads)
2131 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2132
2133 if (breakpoint_here (event_child->stop_pc))
2134 event_child->need_step_over = 1;
2135 }
2136 }
2137 else
2138 {
2139 /* We have some other signal, possibly a step-over dance was in
2140 progress, and it should be cancelled too. */
2141 step_over_finished = finish_step_over (event_child);
2142 }
2143
2144 /* We have all the data we need. Either report the event to GDB, or
2145 resume threads and keep waiting for more. */
2146
2147 /* If we're collecting a fast tracepoint, finish the collection and
2148 move out of the jump pad before delivering a signal. See
2149 linux_stabilize_threads. */
2150
2151 if (WIFSTOPPED (w)
2152 && WSTOPSIG (w) != SIGTRAP
2153 && supports_fast_tracepoints ()
2154 && in_process_agent_loaded ())
2155 {
2156 if (debug_threads)
2157 fprintf (stderr,
2158 "Got signal %d for LWP %ld. Check if we need "
2159 "to defer or adjust it.\n",
2160 WSTOPSIG (w), lwpid_of (event_child));
2161
2162 /* Allow debugging the jump pad itself. */
2163 if (current_inferior->last_resume_kind != resume_step
2164 && maybe_move_out_of_jump_pad (event_child, &w))
2165 {
2166 enqueue_one_deferred_signal (event_child, &w);
2167
2168 if (debug_threads)
2169 fprintf (stderr,
2170 "Signal %d for LWP %ld deferred (in jump pad)\n",
2171 WSTOPSIG (w), lwpid_of (event_child));
2172
2173 linux_resume_one_lwp (event_child, 0, 0, NULL);
2174 goto retry;
2175 }
2176 }
2177
2178 if (event_child->collecting_fast_tracepoint)
2179 {
2180 if (debug_threads)
2181 fprintf (stderr, "\
2182 LWP %ld was trying to move out of the jump pad (%d). \
2183 Check if we're already there.\n",
2184 lwpid_of (event_child),
2185 event_child->collecting_fast_tracepoint);
2186
2187 trace_event = 1;
2188
2189 event_child->collecting_fast_tracepoint
2190 = linux_fast_tracepoint_collecting (event_child, NULL);
2191
2192 if (event_child->collecting_fast_tracepoint != 1)
2193 {
2194 /* No longer need this breakpoint. */
2195 if (event_child->exit_jump_pad_bkpt != NULL)
2196 {
2197 if (debug_threads)
2198 fprintf (stderr,
2199 "No longer need exit-jump-pad bkpt; removing it."
2200 "stopping all threads momentarily.\n");
2201
2202 /* Other running threads could hit this breakpoint.
2203 We don't handle moribund locations like GDB does,
2204 instead we always pause all threads when removing
2205 breakpoints, so that any step-over or
2206 decr_pc_after_break adjustment is always taken
2207 care of while the breakpoint is still
2208 inserted. */
2209 stop_all_lwps (1, event_child);
2210 cancel_breakpoints ();
2211
2212 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2213 event_child->exit_jump_pad_bkpt = NULL;
2214
2215 unstop_all_lwps (1, event_child);
2216
2217 gdb_assert (event_child->suspended >= 0);
2218 }
2219 }
2220
2221 if (event_child->collecting_fast_tracepoint == 0)
2222 {
2223 if (debug_threads)
2224 fprintf (stderr,
2225 "fast tracepoint finished "
2226 "collecting successfully.\n");
2227
2228 /* We may have a deferred signal to report. */
2229 if (dequeue_one_deferred_signal (event_child, &w))
2230 {
2231 if (debug_threads)
2232 fprintf (stderr, "dequeued one signal.\n");
2233 }
2234 else
2235 {
2236 if (debug_threads)
2237 fprintf (stderr, "no deferred signals.\n");
2238
2239 if (stabilizing_threads)
2240 {
2241 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2242 ourstatus->value.sig = TARGET_SIGNAL_0;
2243 return ptid_of (event_child);
2244 }
2245 }
2246 }
2247 }
2248
2249 /* Check whether GDB would be interested in this event. */
2250
2251 /* If GDB is not interested in this signal, don't stop other
2252 threads, and don't report it to GDB. Just resume the inferior
2253 right away. We do this for threading-related signals as well as
2254 any that GDB specifically requested we ignore. But never ignore
2255 SIGSTOP if we sent it ourselves, and do not ignore signals when
2256 stepping - they may require special handling to skip the signal
2257 handler. */
2258 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2259 thread library? */
2260 if (WIFSTOPPED (w)
2261 && current_inferior->last_resume_kind != resume_step
2262 && (
2263 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2264 (current_process ()->private->thread_db != NULL
2265 && (WSTOPSIG (w) == __SIGRTMIN
2266 || WSTOPSIG (w) == __SIGRTMIN + 1))
2267 ||
2268 #endif
2269 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2270 && !(WSTOPSIG (w) == SIGSTOP
2271 && current_inferior->last_resume_kind == resume_stop))))
2272 {
2273 siginfo_t info, *info_p;
2274
2275 if (debug_threads)
2276 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2277 WSTOPSIG (w), lwpid_of (event_child));
2278
2279 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2280 info_p = &info;
2281 else
2282 info_p = NULL;
2283 linux_resume_one_lwp (event_child, event_child->stepping,
2284 WSTOPSIG (w), info_p);
2285 goto retry;
2286 }
2287
2288 /* If GDB wanted this thread to single step, we always want to
2289 report the SIGTRAP, and let GDB handle it. Watchpoints should
2290 always be reported. So should signals we can't explain. A
2291 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2292 not support Z0 breakpoints. If we do, we're be able to handle
2293 GDB breakpoints on top of internal breakpoints, by handling the
2294 internal breakpoint and still reporting the event to GDB. If we
2295 don't, we're out of luck, GDB won't see the breakpoint hit. */
2296 report_to_gdb = (!maybe_internal_trap
2297 || current_inferior->last_resume_kind == resume_step
2298 || event_child->stopped_by_watchpoint
2299 || (!step_over_finished && !bp_explains_trap && !trace_event)
2300 || gdb_breakpoint_here (event_child->stop_pc));
2301
2302 /* We found no reason GDB would want us to stop. We either hit one
2303 of our own breakpoints, or finished an internal step GDB
2304 shouldn't know about. */
2305 if (!report_to_gdb)
2306 {
2307 if (debug_threads)
2308 {
2309 if (bp_explains_trap)
2310 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2311 if (step_over_finished)
2312 fprintf (stderr, "Step-over finished.\n");
2313 if (trace_event)
2314 fprintf (stderr, "Tracepoint event.\n");
2315 }
2316
2317 /* We're not reporting this breakpoint to GDB, so apply the
2318 decr_pc_after_break adjustment to the inferior's regcache
2319 ourselves. */
2320
2321 if (the_low_target.set_pc != NULL)
2322 {
2323 struct regcache *regcache
2324 = get_thread_regcache (get_lwp_thread (event_child), 1);
2325 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2326 }
2327
2328 /* We may have finished stepping over a breakpoint. If so,
2329 we've stopped and suspended all LWPs momentarily except the
2330 stepping one. This is where we resume them all again. We're
2331 going to keep waiting, so use proceed, which handles stepping
2332 over the next breakpoint. */
2333 if (debug_threads)
2334 fprintf (stderr, "proceeding all threads.\n");
2335
2336 if (step_over_finished)
2337 unsuspend_all_lwps (event_child);
2338
2339 proceed_all_lwps ();
2340 goto retry;
2341 }
2342
2343 if (debug_threads)
2344 {
2345 if (current_inferior->last_resume_kind == resume_step)
2346 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2347 if (event_child->stopped_by_watchpoint)
2348 fprintf (stderr, "Stopped by watchpoint.\n");
2349 if (gdb_breakpoint_here (event_child->stop_pc))
2350 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2351 if (debug_threads)
2352 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2353 }
2354
2355 /* Alright, we're going to report a stop. */
2356
2357 if (!non_stop && !stabilizing_threads)
2358 {
2359 /* In all-stop, stop all threads. */
2360 stop_all_lwps (0, NULL);
2361
2362 /* If we're not waiting for a specific LWP, choose an event LWP
2363 from among those that have had events. Giving equal priority
2364 to all LWPs that have had events helps prevent
2365 starvation. */
2366 if (ptid_equal (ptid, minus_one_ptid))
2367 {
2368 event_child->status_pending_p = 1;
2369 event_child->status_pending = w;
2370
2371 select_event_lwp (&event_child);
2372
2373 event_child->status_pending_p = 0;
2374 w = event_child->status_pending;
2375 }
2376
2377 /* Now that we've selected our final event LWP, cancel any
2378 breakpoints in other LWPs that have hit a GDB breakpoint.
2379 See the comment in cancel_breakpoints_callback to find out
2380 why. */
2381 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2382
2383 /* Stabilize threads (move out of jump pads). */
2384 stabilize_threads ();
2385 }
2386 else
2387 {
2388 /* If we just finished a step-over, then all threads had been
2389 momentarily paused. In all-stop, that's fine, we want
2390 threads stopped by now anyway. In non-stop, we need to
2391 re-resume threads that GDB wanted to be running. */
2392 if (step_over_finished)
2393 unstop_all_lwps (1, event_child);
2394 }
2395
2396 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2397
2398 if (current_inferior->last_resume_kind == resume_stop
2399 && WSTOPSIG (w) == SIGSTOP)
2400 {
2401 /* A thread that has been requested to stop by GDB with vCont;t,
2402 and it stopped cleanly, so report as SIG0. The use of
2403 SIGSTOP is an implementation detail. */
2404 ourstatus->value.sig = TARGET_SIGNAL_0;
2405 }
2406 else if (current_inferior->last_resume_kind == resume_stop
2407 && WSTOPSIG (w) != SIGSTOP)
2408 {
2409 /* A thread that has been requested to stop by GDB with vCont;t,
2410 but, it stopped for other reasons. */
2411 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2412 }
2413 else
2414 {
2415 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2416 }
2417
2418 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2419
2420 if (debug_threads)
2421 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2422 target_pid_to_str (ptid_of (event_child)),
2423 ourstatus->kind,
2424 ourstatus->value.sig);
2425
2426 return ptid_of (event_child);
2427 }
2428
2429 /* Get rid of any pending event in the pipe. */
2430 static void
2431 async_file_flush (void)
2432 {
2433 int ret;
2434 char buf;
2435
2436 do
2437 ret = read (linux_event_pipe[0], &buf, 1);
2438 while (ret >= 0 || (ret == -1 && errno == EINTR));
2439 }
2440
2441 /* Put something in the pipe, so the event loop wakes up. */
2442 static void
2443 async_file_mark (void)
2444 {
2445 int ret;
2446
2447 async_file_flush ();
2448
2449 do
2450 ret = write (linux_event_pipe[1], "+", 1);
2451 while (ret == 0 || (ret == -1 && errno == EINTR));
2452
2453 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2454 be awakened anyway. */
2455 }
2456
2457 static ptid_t
2458 linux_wait (ptid_t ptid,
2459 struct target_waitstatus *ourstatus, int target_options)
2460 {
2461 ptid_t event_ptid;
2462
2463 if (debug_threads)
2464 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2465
2466 /* Flush the async file first. */
2467 if (target_is_async_p ())
2468 async_file_flush ();
2469
2470 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2471
2472 /* If at least one stop was reported, there may be more. A single
2473 SIGCHLD can signal more than one child stop. */
2474 if (target_is_async_p ()
2475 && (target_options & TARGET_WNOHANG) != 0
2476 && !ptid_equal (event_ptid, null_ptid))
2477 async_file_mark ();
2478
2479 return event_ptid;
2480 }
2481
2482 /* Send a signal to an LWP. */
2483
2484 static int
2485 kill_lwp (unsigned long lwpid, int signo)
2486 {
2487 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2488 fails, then we are not using nptl threads and we should be using kill. */
2489
2490 #ifdef __NR_tkill
2491 {
2492 static int tkill_failed;
2493
2494 if (!tkill_failed)
2495 {
2496 int ret;
2497
2498 errno = 0;
2499 ret = syscall (__NR_tkill, lwpid, signo);
2500 if (errno != ENOSYS)
2501 return ret;
2502 tkill_failed = 1;
2503 }
2504 }
2505 #endif
2506
2507 return kill (lwpid, signo);
2508 }
2509
2510 void
2511 linux_stop_lwp (struct lwp_info *lwp)
2512 {
2513 send_sigstop (lwp);
2514 }
2515
2516 static void
2517 send_sigstop (struct lwp_info *lwp)
2518 {
2519 int pid;
2520
2521 pid = lwpid_of (lwp);
2522
2523 /* If we already have a pending stop signal for this process, don't
2524 send another. */
2525 if (lwp->stop_expected)
2526 {
2527 if (debug_threads)
2528 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2529
2530 return;
2531 }
2532
2533 if (debug_threads)
2534 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2535
2536 lwp->stop_expected = 1;
2537 kill_lwp (pid, SIGSTOP);
2538 }
2539
2540 static int
2541 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2542 {
2543 struct lwp_info *lwp = (struct lwp_info *) entry;
2544
2545 /* Ignore EXCEPT. */
2546 if (lwp == except)
2547 return 0;
2548
2549 if (lwp->stopped)
2550 return 0;
2551
2552 send_sigstop (lwp);
2553 return 0;
2554 }
2555
2556 /* Increment the suspend count of an LWP, and stop it, if not stopped
2557 yet. */
2558 static int
2559 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2560 void *except)
2561 {
2562 struct lwp_info *lwp = (struct lwp_info *) entry;
2563
2564 /* Ignore EXCEPT. */
2565 if (lwp == except)
2566 return 0;
2567
2568 lwp->suspended++;
2569
2570 return send_sigstop_callback (entry, except);
2571 }
2572
2573 static void
2574 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2575 {
2576 /* It's dead, really. */
2577 lwp->dead = 1;
2578
2579 /* Store the exit status for later. */
2580 lwp->status_pending_p = 1;
2581 lwp->status_pending = wstat;
2582
2583 /* Prevent trying to stop it. */
2584 lwp->stopped = 1;
2585
2586 /* No further stops are expected from a dead lwp. */
2587 lwp->stop_expected = 0;
2588 }
2589
2590 static void
2591 wait_for_sigstop (struct inferior_list_entry *entry)
2592 {
2593 struct lwp_info *lwp = (struct lwp_info *) entry;
2594 struct thread_info *saved_inferior;
2595 int wstat;
2596 ptid_t saved_tid;
2597 ptid_t ptid;
2598 int pid;
2599
2600 if (lwp->stopped)
2601 {
2602 if (debug_threads)
2603 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2604 lwpid_of (lwp));
2605 return;
2606 }
2607
2608 saved_inferior = current_inferior;
2609 if (saved_inferior != NULL)
2610 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2611 else
2612 saved_tid = null_ptid; /* avoid bogus unused warning */
2613
2614 ptid = lwp->head.id;
2615
2616 if (debug_threads)
2617 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2618
2619 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2620
2621 /* If we stopped with a non-SIGSTOP signal, save it for later
2622 and record the pending SIGSTOP. If the process exited, just
2623 return. */
2624 if (WIFSTOPPED (wstat))
2625 {
2626 if (debug_threads)
2627 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2628 lwpid_of (lwp), WSTOPSIG (wstat));
2629
2630 if (WSTOPSIG (wstat) != SIGSTOP)
2631 {
2632 if (debug_threads)
2633 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2634 lwpid_of (lwp), wstat);
2635
2636 lwp->status_pending_p = 1;
2637 lwp->status_pending = wstat;
2638 }
2639 }
2640 else
2641 {
2642 if (debug_threads)
2643 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2644
2645 lwp = find_lwp_pid (pid_to_ptid (pid));
2646 if (lwp)
2647 {
2648 /* Leave this status pending for the next time we're able to
2649 report it. In the mean time, we'll report this lwp as
2650 dead to GDB, so GDB doesn't try to read registers and
2651 memory from it. This can only happen if this was the
2652 last thread of the process; otherwise, PID is removed
2653 from the thread tables before linux_wait_for_event
2654 returns. */
2655 mark_lwp_dead (lwp, wstat);
2656 }
2657 }
2658
2659 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2660 current_inferior = saved_inferior;
2661 else
2662 {
2663 if (debug_threads)
2664 fprintf (stderr, "Previously current thread died.\n");
2665
2666 if (non_stop)
2667 {
2668 /* We can't change the current inferior behind GDB's back,
2669 otherwise, a subsequent command may apply to the wrong
2670 process. */
2671 current_inferior = NULL;
2672 }
2673 else
2674 {
2675 /* Set a valid thread as current. */
2676 set_desired_inferior (0);
2677 }
2678 }
2679 }
2680
2681 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2682 move it out, because we need to report the stop event to GDB. For
2683 example, if the user puts a breakpoint in the jump pad, it's
2684 because she wants to debug it. */
2685
2686 static int
2687 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2688 {
2689 struct lwp_info *lwp = (struct lwp_info *) entry;
2690 struct thread_info *thread = get_lwp_thread (lwp);
2691
2692 gdb_assert (lwp->suspended == 0);
2693 gdb_assert (lwp->stopped);
2694
2695 /* Allow debugging the jump pad, gdb_collect, etc.. */
2696 return (supports_fast_tracepoints ()
2697 && in_process_agent_loaded ()
2698 && (gdb_breakpoint_here (lwp->stop_pc)
2699 || lwp->stopped_by_watchpoint
2700 || thread->last_resume_kind == resume_step)
2701 && linux_fast_tracepoint_collecting (lwp, NULL));
2702 }
2703
2704 static void
2705 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
2706 {
2707 struct lwp_info *lwp = (struct lwp_info *) entry;
2708 struct thread_info *thread = get_lwp_thread (lwp);
2709 int *wstat;
2710
2711 gdb_assert (lwp->suspended == 0);
2712 gdb_assert (lwp->stopped);
2713
2714 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
2715
2716 /* Allow debugging the jump pad, gdb_collect, etc. */
2717 if (!gdb_breakpoint_here (lwp->stop_pc)
2718 && !lwp->stopped_by_watchpoint
2719 && thread->last_resume_kind != resume_step
2720 && maybe_move_out_of_jump_pad (lwp, wstat))
2721 {
2722 if (debug_threads)
2723 fprintf (stderr,
2724 "LWP %ld needs stabilizing (in jump pad)\n",
2725 lwpid_of (lwp));
2726
2727 if (wstat)
2728 {
2729 lwp->status_pending_p = 0;
2730 enqueue_one_deferred_signal (lwp, wstat);
2731
2732 if (debug_threads)
2733 fprintf (stderr,
2734 "Signal %d for LWP %ld deferred "
2735 "(in jump pad)\n",
2736 WSTOPSIG (*wstat), lwpid_of (lwp));
2737 }
2738
2739 linux_resume_one_lwp (lwp, 0, 0, NULL);
2740 }
2741 else
2742 lwp->suspended++;
2743 }
2744
2745 static int
2746 lwp_running (struct inferior_list_entry *entry, void *data)
2747 {
2748 struct lwp_info *lwp = (struct lwp_info *) entry;
2749
2750 if (lwp->dead)
2751 return 0;
2752 if (lwp->stopped)
2753 return 0;
2754 return 1;
2755 }
2756
2757 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2758 If SUSPEND, then also increase the suspend count of every LWP,
2759 except EXCEPT. */
2760
2761 static void
2762 stop_all_lwps (int suspend, struct lwp_info *except)
2763 {
2764 stopping_threads = 1;
2765
2766 if (suspend)
2767 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
2768 else
2769 find_inferior (&all_lwps, send_sigstop_callback, except);
2770 for_each_inferior (&all_lwps, wait_for_sigstop);
2771 stopping_threads = 0;
2772 }
2773
2774 /* Resume execution of the inferior process.
2775 If STEP is nonzero, single-step it.
2776 If SIGNAL is nonzero, give it that signal. */
2777
2778 static void
2779 linux_resume_one_lwp (struct lwp_info *lwp,
2780 int step, int signal, siginfo_t *info)
2781 {
2782 struct thread_info *saved_inferior;
2783 int fast_tp_collecting;
2784
2785 if (lwp->stopped == 0)
2786 return;
2787
2788 fast_tp_collecting = lwp->collecting_fast_tracepoint;
2789
2790 gdb_assert (!stabilizing_threads || fast_tp_collecting);
2791
2792 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2793 user used the "jump" command, or "set $pc = foo"). */
2794 if (lwp->stop_pc != get_pc (lwp))
2795 {
2796 /* Collecting 'while-stepping' actions doesn't make sense
2797 anymore. */
2798 release_while_stepping_state_list (get_lwp_thread (lwp));
2799 }
2800
2801 /* If we have pending signals or status, and a new signal, enqueue the
2802 signal. Also enqueue the signal if we are waiting to reinsert a
2803 breakpoint; it will be picked up again below. */
2804 if (signal != 0
2805 && (lwp->status_pending_p
2806 || lwp->pending_signals != NULL
2807 || lwp->bp_reinsert != 0
2808 || fast_tp_collecting))
2809 {
2810 struct pending_signals *p_sig;
2811 p_sig = xmalloc (sizeof (*p_sig));
2812 p_sig->prev = lwp->pending_signals;
2813 p_sig->signal = signal;
2814 if (info == NULL)
2815 memset (&p_sig->info, 0, sizeof (siginfo_t));
2816 else
2817 memcpy (&p_sig->info, info, sizeof (siginfo_t));
2818 lwp->pending_signals = p_sig;
2819 }
2820
2821 if (lwp->status_pending_p)
2822 {
2823 if (debug_threads)
2824 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2825 " has pending status\n",
2826 lwpid_of (lwp), step ? "step" : "continue", signal,
2827 lwp->stop_expected ? "expected" : "not expected");
2828 return;
2829 }
2830
2831 saved_inferior = current_inferior;
2832 current_inferior = get_lwp_thread (lwp);
2833
2834 if (debug_threads)
2835 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2836 lwpid_of (lwp), step ? "step" : "continue", signal,
2837 lwp->stop_expected ? "expected" : "not expected");
2838
2839 /* This bit needs some thinking about. If we get a signal that
2840 we must report while a single-step reinsert is still pending,
2841 we often end up resuming the thread. It might be better to
2842 (ew) allow a stack of pending events; then we could be sure that
2843 the reinsert happened right away and not lose any signals.
2844
2845 Making this stack would also shrink the window in which breakpoints are
2846 uninserted (see comment in linux_wait_for_lwp) but not enough for
2847 complete correctness, so it won't solve that problem. It may be
2848 worthwhile just to solve this one, however. */
2849 if (lwp->bp_reinsert != 0)
2850 {
2851 if (debug_threads)
2852 fprintf (stderr, " pending reinsert at 0x%s\n",
2853 paddress (lwp->bp_reinsert));
2854
2855 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2856 {
2857 if (fast_tp_collecting == 0)
2858 {
2859 if (step == 0)
2860 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2861 if (lwp->suspended)
2862 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
2863 lwp->suspended);
2864 }
2865
2866 step = 1;
2867 }
2868
2869 /* Postpone any pending signal. It was enqueued above. */
2870 signal = 0;
2871 }
2872
2873 if (fast_tp_collecting == 1)
2874 {
2875 if (debug_threads)
2876 fprintf (stderr, "\
2877 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
2878 lwpid_of (lwp));
2879
2880 /* Postpone any pending signal. It was enqueued above. */
2881 signal = 0;
2882 }
2883 else if (fast_tp_collecting == 2)
2884 {
2885 if (debug_threads)
2886 fprintf (stderr, "\
2887 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
2888 lwpid_of (lwp));
2889
2890 if (can_hardware_single_step ())
2891 step = 1;
2892 else
2893 fatal ("moving out of jump pad single-stepping"
2894 " not implemented on this target");
2895
2896 /* Postpone any pending signal. It was enqueued above. */
2897 signal = 0;
2898 }
2899
2900 /* If we have while-stepping actions in this thread set it stepping.
2901 If we have a signal to deliver, it may or may not be set to
2902 SIG_IGN, we don't know. Assume so, and allow collecting
2903 while-stepping into a signal handler. A possible smart thing to
2904 do would be to set an internal breakpoint at the signal return
2905 address, continue, and carry on catching this while-stepping
2906 action only when that breakpoint is hit. A future
2907 enhancement. */
2908 if (get_lwp_thread (lwp)->while_stepping != NULL
2909 && can_hardware_single_step ())
2910 {
2911 if (debug_threads)
2912 fprintf (stderr,
2913 "lwp %ld has a while-stepping action -> forcing step.\n",
2914 lwpid_of (lwp));
2915 step = 1;
2916 }
2917
2918 if (debug_threads && the_low_target.get_pc != NULL)
2919 {
2920 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2921 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
2922 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
2923 }
2924
2925 /* If we have pending signals, consume one unless we are trying to
2926 reinsert a breakpoint or we're trying to finish a fast tracepoint
2927 collect. */
2928 if (lwp->pending_signals != NULL
2929 && lwp->bp_reinsert == 0
2930 && fast_tp_collecting == 0)
2931 {
2932 struct pending_signals **p_sig;
2933
2934 p_sig = &lwp->pending_signals;
2935 while ((*p_sig)->prev != NULL)
2936 p_sig = &(*p_sig)->prev;
2937
2938 signal = (*p_sig)->signal;
2939 if ((*p_sig)->info.si_signo != 0)
2940 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
2941
2942 free (*p_sig);
2943 *p_sig = NULL;
2944 }
2945
2946 if (the_low_target.prepare_to_resume != NULL)
2947 the_low_target.prepare_to_resume (lwp);
2948
2949 regcache_invalidate_one ((struct inferior_list_entry *)
2950 get_lwp_thread (lwp));
2951 errno = 0;
2952 lwp->stopped = 0;
2953 lwp->stopped_by_watchpoint = 0;
2954 lwp->stepping = step;
2955 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2956 /* Coerce to a uintptr_t first to avoid potential gcc warning
2957 of coercing an 8 byte integer to a 4 byte pointer. */
2958 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
2959
2960 current_inferior = saved_inferior;
2961 if (errno)
2962 {
2963 /* ESRCH from ptrace either means that the thread was already
2964 running (an error) or that it is gone (a race condition). If
2965 it's gone, we will get a notification the next time we wait,
2966 so we can ignore the error. We could differentiate these
2967 two, but it's tricky without waiting; the thread still exists
2968 as a zombie, so sending it signal 0 would succeed. So just
2969 ignore ESRCH. */
2970 if (errno == ESRCH)
2971 return;
2972
2973 perror_with_name ("ptrace");
2974 }
2975 }
2976
2977 struct thread_resume_array
2978 {
2979 struct thread_resume *resume;
2980 size_t n;
2981 };
2982
2983 /* This function is called once per thread. We look up the thread
2984 in RESUME_PTR, and mark the thread with a pointer to the appropriate
2985 resume request.
2986
2987 This algorithm is O(threads * resume elements), but resume elements
2988 is small (and will remain small at least until GDB supports thread
2989 suspension). */
2990 static int
2991 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
2992 {
2993 struct lwp_info *lwp;
2994 struct thread_info *thread;
2995 int ndx;
2996 struct thread_resume_array *r;
2997
2998 thread = (struct thread_info *) entry;
2999 lwp = get_thread_lwp (thread);
3000 r = arg;
3001
3002 for (ndx = 0; ndx < r->n; ndx++)
3003 {
3004 ptid_t ptid = r->resume[ndx].thread;
3005 if (ptid_equal (ptid, minus_one_ptid)
3006 || ptid_equal (ptid, entry->id)
3007 || (ptid_is_pid (ptid)
3008 && (ptid_get_pid (ptid) == pid_of (lwp)))
3009 || (ptid_get_lwp (ptid) == -1
3010 && (ptid_get_pid (ptid) == pid_of (lwp))))
3011 {
3012 if (r->resume[ndx].kind == resume_stop
3013 && thread->last_resume_kind == resume_stop)
3014 {
3015 if (debug_threads)
3016 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3017 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3018 ? "stopped"
3019 : "stopping",
3020 lwpid_of (lwp));
3021
3022 continue;
3023 }
3024
3025 lwp->resume = &r->resume[ndx];
3026 thread->last_resume_kind = lwp->resume->kind;
3027
3028 /* If we had a deferred signal to report, dequeue one now.
3029 This can happen if LWP gets more than one signal while
3030 trying to get out of a jump pad. */
3031 if (lwp->stopped
3032 && !lwp->status_pending_p
3033 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3034 {
3035 lwp->status_pending_p = 1;
3036
3037 if (debug_threads)
3038 fprintf (stderr,
3039 "Dequeueing deferred signal %d for LWP %ld, "
3040 "leaving status pending.\n",
3041 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3042 }
3043
3044 return 0;
3045 }
3046 }
3047
3048 /* No resume action for this thread. */
3049 lwp->resume = NULL;
3050
3051 return 0;
3052 }
3053
3054
3055 /* Set *FLAG_P if this lwp has an interesting status pending. */
3056 static int
3057 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3058 {
3059 struct lwp_info *lwp = (struct lwp_info *) entry;
3060
3061 /* LWPs which will not be resumed are not interesting, because
3062 we might not wait for them next time through linux_wait. */
3063 if (lwp->resume == NULL)
3064 return 0;
3065
3066 if (lwp->status_pending_p)
3067 * (int *) flag_p = 1;
3068
3069 return 0;
3070 }
3071
3072 /* Return 1 if this lwp that GDB wants running is stopped at an
3073 internal breakpoint that we need to step over. It assumes that any
3074 required STOP_PC adjustment has already been propagated to the
3075 inferior's regcache. */
3076
3077 static int
3078 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3079 {
3080 struct lwp_info *lwp = (struct lwp_info *) entry;
3081 struct thread_info *thread;
3082 struct thread_info *saved_inferior;
3083 CORE_ADDR pc;
3084
3085 /* LWPs which will not be resumed are not interesting, because we
3086 might not wait for them next time through linux_wait. */
3087
3088 if (!lwp->stopped)
3089 {
3090 if (debug_threads)
3091 fprintf (stderr,
3092 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3093 lwpid_of (lwp));
3094 return 0;
3095 }
3096
3097 thread = get_lwp_thread (lwp);
3098
3099 if (thread->last_resume_kind == resume_stop)
3100 {
3101 if (debug_threads)
3102 fprintf (stderr,
3103 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3104 lwpid_of (lwp));
3105 return 0;
3106 }
3107
3108 gdb_assert (lwp->suspended >= 0);
3109
3110 if (lwp->suspended)
3111 {
3112 if (debug_threads)
3113 fprintf (stderr,
3114 "Need step over [LWP %ld]? Ignoring, suspended\n",
3115 lwpid_of (lwp));
3116 return 0;
3117 }
3118
3119 if (!lwp->need_step_over)
3120 {
3121 if (debug_threads)
3122 fprintf (stderr,
3123 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3124 }
3125
3126 if (lwp->status_pending_p)
3127 {
3128 if (debug_threads)
3129 fprintf (stderr,
3130 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3131 lwpid_of (lwp));
3132 return 0;
3133 }
3134
3135 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3136 or we have. */
3137 pc = get_pc (lwp);
3138
3139 /* If the PC has changed since we stopped, then don't do anything,
3140 and let the breakpoint/tracepoint be hit. This happens if, for
3141 instance, GDB handled the decr_pc_after_break subtraction itself,
3142 GDB is OOL stepping this thread, or the user has issued a "jump"
3143 command, or poked thread's registers herself. */
3144 if (pc != lwp->stop_pc)
3145 {
3146 if (debug_threads)
3147 fprintf (stderr,
3148 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3149 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3150 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3151
3152 lwp->need_step_over = 0;
3153 return 0;
3154 }
3155
3156 saved_inferior = current_inferior;
3157 current_inferior = thread;
3158
3159 /* We can only step over breakpoints we know about. */
3160 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3161 {
3162 /* Don't step over a breakpoint that GDB expects to hit
3163 though. */
3164 if (gdb_breakpoint_here (pc))
3165 {
3166 if (debug_threads)
3167 fprintf (stderr,
3168 "Need step over [LWP %ld]? yes, but found"
3169 " GDB breakpoint at 0x%s; skipping step over\n",
3170 lwpid_of (lwp), paddress (pc));
3171
3172 current_inferior = saved_inferior;
3173 return 0;
3174 }
3175 else
3176 {
3177 if (debug_threads)
3178 fprintf (stderr,
3179 "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
3180 lwpid_of (lwp), paddress (pc));
3181
3182 /* We've found an lwp that needs stepping over --- return 1 so
3183 that find_inferior stops looking. */
3184 current_inferior = saved_inferior;
3185
3186 /* If the step over is cancelled, this is set again. */
3187 lwp->need_step_over = 0;
3188 return 1;
3189 }
3190 }
3191
3192 current_inferior = saved_inferior;
3193
3194 if (debug_threads)
3195 fprintf (stderr,
3196 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3197 lwpid_of (lwp), paddress (pc));
3198
3199 return 0;
3200 }
3201
3202 /* Start a step-over operation on LWP. When LWP stopped at a
3203 breakpoint, to make progress, we need to remove the breakpoint out
3204 of the way. If we let other threads run while we do that, they may
3205 pass by the breakpoint location and miss hitting it. To avoid
3206 that, a step-over momentarily stops all threads while LWP is
3207 single-stepped while the breakpoint is temporarily uninserted from
3208 the inferior. When the single-step finishes, we reinsert the
3209 breakpoint, and let all threads that are supposed to be running,
3210 run again.
3211
3212 On targets that don't support hardware single-step, we don't
3213 currently support full software single-stepping. Instead, we only
3214 support stepping over the thread event breakpoint, by asking the
3215 low target where to place a reinsert breakpoint. Since this
3216 routine assumes the breakpoint being stepped over is a thread event
3217 breakpoint, it usually assumes the return address of the current
3218 function is a good enough place to set the reinsert breakpoint. */
3219
3220 static int
3221 start_step_over (struct lwp_info *lwp)
3222 {
3223 struct thread_info *saved_inferior;
3224 CORE_ADDR pc;
3225 int step;
3226
3227 if (debug_threads)
3228 fprintf (stderr,
3229 "Starting step-over on LWP %ld. Stopping all threads\n",
3230 lwpid_of (lwp));
3231
3232 stop_all_lwps (1, lwp);
3233 gdb_assert (lwp->suspended == 0);
3234
3235 if (debug_threads)
3236 fprintf (stderr, "Done stopping all threads for step-over.\n");
3237
3238 /* Note, we should always reach here with an already adjusted PC,
3239 either by GDB (if we're resuming due to GDB's request), or by our
3240 caller, if we just finished handling an internal breakpoint GDB
3241 shouldn't care about. */
3242 pc = get_pc (lwp);
3243
3244 saved_inferior = current_inferior;
3245 current_inferior = get_lwp_thread (lwp);
3246
3247 lwp->bp_reinsert = pc;
3248 uninsert_breakpoints_at (pc);
3249 uninsert_fast_tracepoint_jumps_at (pc);
3250
3251 if (can_hardware_single_step ())
3252 {
3253 step = 1;
3254 }
3255 else
3256 {
3257 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3258 set_reinsert_breakpoint (raddr);
3259 step = 0;
3260 }
3261
3262 current_inferior = saved_inferior;
3263
3264 linux_resume_one_lwp (lwp, step, 0, NULL);
3265
3266 /* Require next event from this LWP. */
3267 step_over_bkpt = lwp->head.id;
3268 return 1;
3269 }
3270
3271 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3272 start_step_over, if still there, and delete any reinsert
3273 breakpoints we've set, on non hardware single-step targets. */
3274
3275 static int
3276 finish_step_over (struct lwp_info *lwp)
3277 {
3278 if (lwp->bp_reinsert != 0)
3279 {
3280 if (debug_threads)
3281 fprintf (stderr, "Finished step over.\n");
3282
3283 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3284 may be no breakpoint to reinsert there by now. */
3285 reinsert_breakpoints_at (lwp->bp_reinsert);
3286 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3287
3288 lwp->bp_reinsert = 0;
3289
3290 /* Delete any software-single-step reinsert breakpoints. No
3291 longer needed. We don't have to worry about other threads
3292 hitting this trap, and later not being able to explain it,
3293 because we were stepping over a breakpoint, and we hold all
3294 threads but LWP stopped while doing that. */
3295 if (!can_hardware_single_step ())
3296 delete_reinsert_breakpoints ();
3297
3298 step_over_bkpt = null_ptid;
3299 return 1;
3300 }
3301 else
3302 return 0;
3303 }
3304
3305 /* This function is called once per thread. We check the thread's resume
3306 request, which will tell us whether to resume, step, or leave the thread
3307 stopped; and what signal, if any, it should be sent.
3308
3309 For threads which we aren't explicitly told otherwise, we preserve
3310 the stepping flag; this is used for stepping over gdbserver-placed
3311 breakpoints.
3312
3313 If pending_flags was set in any thread, we queue any needed
3314 signals, since we won't actually resume. We already have a pending
3315 event to report, so we don't need to preserve any step requests;
3316 they should be re-issued if necessary. */
3317
3318 static int
3319 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3320 {
3321 struct lwp_info *lwp;
3322 struct thread_info *thread;
3323 int step;
3324 int leave_all_stopped = * (int *) arg;
3325 int leave_pending;
3326
3327 thread = (struct thread_info *) entry;
3328 lwp = get_thread_lwp (thread);
3329
3330 if (lwp->resume == NULL)
3331 return 0;
3332
3333 if (lwp->resume->kind == resume_stop)
3334 {
3335 if (debug_threads)
3336 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3337
3338 if (!lwp->stopped)
3339 {
3340 if (debug_threads)
3341 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3342
3343 /* Stop the thread, and wait for the event asynchronously,
3344 through the event loop. */
3345 send_sigstop (lwp);
3346 }
3347 else
3348 {
3349 if (debug_threads)
3350 fprintf (stderr, "already stopped LWP %ld\n",
3351 lwpid_of (lwp));
3352
3353 /* The LWP may have been stopped in an internal event that
3354 was not meant to be notified back to GDB (e.g., gdbserver
3355 breakpoint), so we should be reporting a stop event in
3356 this case too. */
3357
3358 /* If the thread already has a pending SIGSTOP, this is a
3359 no-op. Otherwise, something later will presumably resume
3360 the thread and this will cause it to cancel any pending
3361 operation, due to last_resume_kind == resume_stop. If
3362 the thread already has a pending status to report, we
3363 will still report it the next time we wait - see
3364 status_pending_p_callback. */
3365
3366 /* If we already have a pending signal to report, then
3367 there's no need to queue a SIGSTOP, as this means we're
3368 midway through moving the LWP out of the jumppad, and we
3369 will report the pending signal as soon as that is
3370 finished. */
3371 if (lwp->pending_signals_to_report == NULL)
3372 send_sigstop (lwp);
3373 }
3374
3375 /* For stop requests, we're done. */
3376 lwp->resume = NULL;
3377 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3378 return 0;
3379 }
3380
3381 /* If this thread which is about to be resumed has a pending status,
3382 then don't resume any threads - we can just report the pending
3383 status. Make sure to queue any signals that would otherwise be
3384 sent. In all-stop mode, we do this decision based on if *any*
3385 thread has a pending status. If there's a thread that needs the
3386 step-over-breakpoint dance, then don't resume any other thread
3387 but that particular one. */
3388 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3389
3390 if (!leave_pending)
3391 {
3392 if (debug_threads)
3393 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3394
3395 step = (lwp->resume->kind == resume_step);
3396 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3397 }
3398 else
3399 {
3400 if (debug_threads)
3401 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3402
3403 /* If we have a new signal, enqueue the signal. */
3404 if (lwp->resume->sig != 0)
3405 {
3406 struct pending_signals *p_sig;
3407 p_sig = xmalloc (sizeof (*p_sig));
3408 p_sig->prev = lwp->pending_signals;
3409 p_sig->signal = lwp->resume->sig;
3410 memset (&p_sig->info, 0, sizeof (siginfo_t));
3411
3412 /* If this is the same signal we were previously stopped by,
3413 make sure to queue its siginfo. We can ignore the return
3414 value of ptrace; if it fails, we'll skip
3415 PTRACE_SETSIGINFO. */
3416 if (WIFSTOPPED (lwp->last_status)
3417 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3418 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3419
3420 lwp->pending_signals = p_sig;
3421 }
3422 }
3423
3424 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3425 lwp->resume = NULL;
3426 return 0;
3427 }
3428
3429 static void
3430 linux_resume (struct thread_resume *resume_info, size_t n)
3431 {
3432 struct thread_resume_array array = { resume_info, n };
3433 struct lwp_info *need_step_over = NULL;
3434 int any_pending;
3435 int leave_all_stopped;
3436
3437 find_inferior (&all_threads, linux_set_resume_request, &array);
3438
3439 /* If there is a thread which would otherwise be resumed, which has
3440 a pending status, then don't resume any threads - we can just
3441 report the pending status. Make sure to queue any signals that
3442 would otherwise be sent. In non-stop mode, we'll apply this
3443 logic to each thread individually. We consume all pending events
3444 before considering to start a step-over (in all-stop). */
3445 any_pending = 0;
3446 if (!non_stop)
3447 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3448
3449 /* If there is a thread which would otherwise be resumed, which is
3450 stopped at a breakpoint that needs stepping over, then don't
3451 resume any threads - have it step over the breakpoint with all
3452 other threads stopped, then resume all threads again. Make sure
3453 to queue any signals that would otherwise be delivered or
3454 queued. */
3455 if (!any_pending && supports_breakpoints ())
3456 need_step_over
3457 = (struct lwp_info *) find_inferior (&all_lwps,
3458 need_step_over_p, NULL);
3459
3460 leave_all_stopped = (need_step_over != NULL || any_pending);
3461
3462 if (debug_threads)
3463 {
3464 if (need_step_over != NULL)
3465 fprintf (stderr, "Not resuming all, need step over\n");
3466 else if (any_pending)
3467 fprintf (stderr,
3468 "Not resuming, all-stop and found "
3469 "an LWP with pending status\n");
3470 else
3471 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3472 }
3473
3474 /* Even if we're leaving threads stopped, queue all signals we'd
3475 otherwise deliver. */
3476 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3477
3478 if (need_step_over)
3479 start_step_over (need_step_over);
3480 }
3481
3482 /* This function is called once per thread. We check the thread's
3483 last resume request, which will tell us whether to resume, step, or
3484 leave the thread stopped. Any signal the client requested to be
3485 delivered has already been enqueued at this point.
3486
3487 If any thread that GDB wants running is stopped at an internal
3488 breakpoint that needs stepping over, we start a step-over operation
3489 on that particular thread, and leave all others stopped. */
3490
3491 static int
3492 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3493 {
3494 struct lwp_info *lwp = (struct lwp_info *) entry;
3495 struct thread_info *thread;
3496 int step;
3497
3498 if (lwp == except)
3499 return 0;
3500
3501 if (debug_threads)
3502 fprintf (stderr,
3503 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3504
3505 if (!lwp->stopped)
3506 {
3507 if (debug_threads)
3508 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3509 return 0;
3510 }
3511
3512 thread = get_lwp_thread (lwp);
3513
3514 if (thread->last_resume_kind == resume_stop
3515 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3516 {
3517 if (debug_threads)
3518 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3519 lwpid_of (lwp));
3520 return 0;
3521 }
3522
3523 if (lwp->status_pending_p)
3524 {
3525 if (debug_threads)
3526 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3527 lwpid_of (lwp));
3528 return 0;
3529 }
3530
3531 gdb_assert (lwp->suspended >= 0);
3532
3533 if (lwp->suspended)
3534 {
3535 if (debug_threads)
3536 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3537 return 0;
3538 }
3539
3540 if (thread->last_resume_kind == resume_stop
3541 && lwp->pending_signals_to_report == NULL
3542 && lwp->collecting_fast_tracepoint == 0)
3543 {
3544 /* We haven't reported this LWP as stopped yet (otherwise, the
3545 last_status.kind check above would catch it, and we wouldn't
3546 reach here. This LWP may have been momentarily paused by a
3547 stop_all_lwps call while handling for example, another LWP's
3548 step-over. In that case, the pending expected SIGSTOP signal
3549 that was queued at vCont;t handling time will have already
3550 been consumed by wait_for_sigstop, and so we need to requeue
3551 another one here. Note that if the LWP already has a SIGSTOP
3552 pending, this is a no-op. */
3553
3554 if (debug_threads)
3555 fprintf (stderr,
3556 "Client wants LWP %ld to stop. "
3557 "Making sure it has a SIGSTOP pending\n",
3558 lwpid_of (lwp));
3559
3560 send_sigstop (lwp);
3561 }
3562
3563 step = thread->last_resume_kind == resume_step;
3564 linux_resume_one_lwp (lwp, step, 0, NULL);
3565 return 0;
3566 }
3567
3568 static int
3569 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3570 {
3571 struct lwp_info *lwp = (struct lwp_info *) entry;
3572
3573 if (lwp == except)
3574 return 0;
3575
3576 lwp->suspended--;
3577 gdb_assert (lwp->suspended >= 0);
3578
3579 return proceed_one_lwp (entry, except);
3580 }
3581
3582 /* When we finish a step-over, set threads running again. If there's
3583 another thread that may need a step-over, now's the time to start
3584 it. Eventually, we'll move all threads past their breakpoints. */
3585
3586 static void
3587 proceed_all_lwps (void)
3588 {
3589 struct lwp_info *need_step_over;
3590
3591 /* If there is a thread which would otherwise be resumed, which is
3592 stopped at a breakpoint that needs stepping over, then don't
3593 resume any threads - have it step over the breakpoint with all
3594 other threads stopped, then resume all threads again. */
3595
3596 if (supports_breakpoints ())
3597 {
3598 need_step_over
3599 = (struct lwp_info *) find_inferior (&all_lwps,
3600 need_step_over_p, NULL);
3601
3602 if (need_step_over != NULL)
3603 {
3604 if (debug_threads)
3605 fprintf (stderr, "proceed_all_lwps: found "
3606 "thread %ld needing a step-over\n",
3607 lwpid_of (need_step_over));
3608
3609 start_step_over (need_step_over);
3610 return;
3611 }
3612 }
3613
3614 if (debug_threads)
3615 fprintf (stderr, "Proceeding, no step-over needed\n");
3616
3617 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3618 }
3619
3620 /* Stopped LWPs that the client wanted to be running, that don't have
3621 pending statuses, are set to run again, except for EXCEPT, if not
3622 NULL. This undoes a stop_all_lwps call. */
3623
3624 static void
3625 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3626 {
3627 if (debug_threads)
3628 {
3629 if (except)
3630 fprintf (stderr,
3631 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3632 else
3633 fprintf (stderr,
3634 "unstopping all lwps\n");
3635 }
3636
3637 if (unsuspend)
3638 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3639 else
3640 find_inferior (&all_lwps, proceed_one_lwp, except);
3641 }
3642
3643 #ifdef HAVE_LINUX_USRREGS
3644
3645 int
3646 register_addr (int regnum)
3647 {
3648 int addr;
3649
3650 if (regnum < 0 || regnum >= the_low_target.num_regs)
3651 error ("Invalid register number %d.", regnum);
3652
3653 addr = the_low_target.regmap[regnum];
3654
3655 return addr;
3656 }
3657
3658 /* Fetch one register. */
3659 static void
3660 fetch_register (struct regcache *regcache, int regno)
3661 {
3662 CORE_ADDR regaddr;
3663 int i, size;
3664 char *buf;
3665 int pid;
3666
3667 if (regno >= the_low_target.num_regs)
3668 return;
3669 if ((*the_low_target.cannot_fetch_register) (regno))
3670 return;
3671
3672 regaddr = register_addr (regno);
3673 if (regaddr == -1)
3674 return;
3675
3676 pid = lwpid_of (get_thread_lwp (current_inferior));
3677 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3678 & - sizeof (PTRACE_XFER_TYPE));
3679 buf = alloca (size);
3680 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3681 {
3682 errno = 0;
3683 *(PTRACE_XFER_TYPE *) (buf + i) =
3684 ptrace (PTRACE_PEEKUSER, pid,
3685 /* Coerce to a uintptr_t first to avoid potential gcc warning
3686 of coercing an 8 byte integer to a 4 byte pointer. */
3687 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
3688 regaddr += sizeof (PTRACE_XFER_TYPE);
3689 if (errno != 0)
3690 error ("reading register %d: %s", regno, strerror (errno));
3691 }
3692
3693 if (the_low_target.supply_ptrace_register)
3694 the_low_target.supply_ptrace_register (regcache, regno, buf);
3695 else
3696 supply_register (regcache, regno, buf);
3697 }
3698
3699 /* Fetch all registers, or just one, from the child process. */
3700 static void
3701 usr_fetch_inferior_registers (struct regcache *regcache, int regno)
3702 {
3703 if (regno == -1)
3704 for (regno = 0; regno < the_low_target.num_regs; regno++)
3705 fetch_register (regcache, regno);
3706 else
3707 fetch_register (regcache, regno);
3708 }
3709
3710 /* Store our register values back into the inferior.
3711 If REGNO is -1, do this for all registers.
3712 Otherwise, REGNO specifies which register (so we can save time). */
3713 static void
3714 usr_store_inferior_registers (struct regcache *regcache, int regno)
3715 {
3716 CORE_ADDR regaddr;
3717 int i, size;
3718 char *buf;
3719 int pid;
3720
3721 if (regno >= 0)
3722 {
3723 if (regno >= the_low_target.num_regs)
3724 return;
3725
3726 if ((*the_low_target.cannot_store_register) (regno) == 1)
3727 return;
3728
3729 regaddr = register_addr (regno);
3730 if (regaddr == -1)
3731 return;
3732 errno = 0;
3733 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3734 & - sizeof (PTRACE_XFER_TYPE);
3735 buf = alloca (size);
3736 memset (buf, 0, size);
3737
3738 if (the_low_target.collect_ptrace_register)
3739 the_low_target.collect_ptrace_register (regcache, regno, buf);
3740 else
3741 collect_register (regcache, regno, buf);
3742
3743 pid = lwpid_of (get_thread_lwp (current_inferior));
3744 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3745 {
3746 errno = 0;
3747 ptrace (PTRACE_POKEUSER, pid,
3748 /* Coerce to a uintptr_t first to avoid potential gcc warning
3749 about coercing an 8 byte integer to a 4 byte pointer. */
3750 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3751 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
3752 if (errno != 0)
3753 {
3754 /* At this point, ESRCH should mean the process is
3755 already gone, in which case we simply ignore attempts
3756 to change its registers. See also the related
3757 comment in linux_resume_one_lwp. */
3758 if (errno == ESRCH)
3759 return;
3760
3761 if ((*the_low_target.cannot_store_register) (regno) == 0)
3762 error ("writing register %d: %s", regno, strerror (errno));
3763 }
3764 regaddr += sizeof (PTRACE_XFER_TYPE);
3765 }
3766 }
3767 else
3768 for (regno = 0; regno < the_low_target.num_regs; regno++)
3769 usr_store_inferior_registers (regcache, regno);
3770 }
3771 #endif /* HAVE_LINUX_USRREGS */
3772
3773
3774
3775 #ifdef HAVE_LINUX_REGSETS
3776
3777 static int
3778 regsets_fetch_inferior_registers (struct regcache *regcache)
3779 {
3780 struct regset_info *regset;
3781 int saw_general_regs = 0;
3782 int pid;
3783 struct iovec iov;
3784
3785 regset = target_regsets;
3786
3787 pid = lwpid_of (get_thread_lwp (current_inferior));
3788 while (regset->size >= 0)
3789 {
3790 void *buf, *data;
3791 int nt_type, res;
3792
3793 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3794 {
3795 regset ++;
3796 continue;
3797 }
3798
3799 buf = xmalloc (regset->size);
3800
3801 nt_type = regset->nt_type;
3802 if (nt_type)
3803 {
3804 iov.iov_base = buf;
3805 iov.iov_len = regset->size;
3806 data = (void *) &iov;
3807 }
3808 else
3809 data = buf;
3810
3811 #ifndef __sparc__
3812 res = ptrace (regset->get_request, pid, nt_type, data);
3813 #else
3814 res = ptrace (regset->get_request, pid, data, nt_type);
3815 #endif
3816 if (res < 0)
3817 {
3818 if (errno == EIO)
3819 {
3820 /* If we get EIO on a regset, do not try it again for
3821 this process. */
3822 disabled_regsets[regset - target_regsets] = 1;
3823 free (buf);
3824 continue;
3825 }
3826 else
3827 {
3828 char s[256];
3829 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3830 pid);
3831 perror (s);
3832 }
3833 }
3834 else if (regset->type == GENERAL_REGS)
3835 saw_general_regs = 1;
3836 regset->store_function (regcache, buf);
3837 regset ++;
3838 free (buf);
3839 }
3840 if (saw_general_regs)
3841 return 0;
3842 else
3843 return 1;
3844 }
3845
3846 static int
3847 regsets_store_inferior_registers (struct regcache *regcache)
3848 {
3849 struct regset_info *regset;
3850 int saw_general_regs = 0;
3851 int pid;
3852 struct iovec iov;
3853
3854 regset = target_regsets;
3855
3856 pid = lwpid_of (get_thread_lwp (current_inferior));
3857 while (regset->size >= 0)
3858 {
3859 void *buf, *data;
3860 int nt_type, res;
3861
3862 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3863 {
3864 regset ++;
3865 continue;
3866 }
3867
3868 buf = xmalloc (regset->size);
3869
3870 /* First fill the buffer with the current register set contents,
3871 in case there are any items in the kernel's regset that are
3872 not in gdbserver's regcache. */
3873
3874 nt_type = regset->nt_type;
3875 if (nt_type)
3876 {
3877 iov.iov_base = buf;
3878 iov.iov_len = regset->size;
3879 data = (void *) &iov;
3880 }
3881 else
3882 data = buf;
3883
3884 #ifndef __sparc__
3885 res = ptrace (regset->get_request, pid, nt_type, data);
3886 #else
3887 res = ptrace (regset->get_request, pid, &iov, data);
3888 #endif
3889
3890 if (res == 0)
3891 {
3892 /* Then overlay our cached registers on that. */
3893 regset->fill_function (regcache, buf);
3894
3895 /* Only now do we write the register set. */
3896 #ifndef __sparc__
3897 res = ptrace (regset->set_request, pid, nt_type, data);
3898 #else
3899 res = ptrace (regset->set_request, pid, data, nt_type);
3900 #endif
3901 }
3902
3903 if (res < 0)
3904 {
3905 if (errno == EIO)
3906 {
3907 /* If we get EIO on a regset, do not try it again for
3908 this process. */
3909 disabled_regsets[regset - target_regsets] = 1;
3910 free (buf);
3911 continue;
3912 }
3913 else if (errno == ESRCH)
3914 {
3915 /* At this point, ESRCH should mean the process is
3916 already gone, in which case we simply ignore attempts
3917 to change its registers. See also the related
3918 comment in linux_resume_one_lwp. */
3919 free (buf);
3920 return 0;
3921 }
3922 else
3923 {
3924 perror ("Warning: ptrace(regsets_store_inferior_registers)");
3925 }
3926 }
3927 else if (regset->type == GENERAL_REGS)
3928 saw_general_regs = 1;
3929 regset ++;
3930 free (buf);
3931 }
3932 if (saw_general_regs)
3933 return 0;
3934 else
3935 return 1;
3936 return 0;
3937 }
3938
3939 #endif /* HAVE_LINUX_REGSETS */
3940
3941
3942 void
3943 linux_fetch_registers (struct regcache *regcache, int regno)
3944 {
3945 #ifdef HAVE_LINUX_REGSETS
3946 if (regsets_fetch_inferior_registers (regcache) == 0)
3947 return;
3948 #endif
3949 #ifdef HAVE_LINUX_USRREGS
3950 usr_fetch_inferior_registers (regcache, regno);
3951 #endif
3952 }
3953
3954 void
3955 linux_store_registers (struct regcache *regcache, int regno)
3956 {
3957 #ifdef HAVE_LINUX_REGSETS
3958 if (regsets_store_inferior_registers (regcache) == 0)
3959 return;
3960 #endif
3961 #ifdef HAVE_LINUX_USRREGS
3962 usr_store_inferior_registers (regcache, regno);
3963 #endif
3964 }
3965
3966
3967 /* Copy LEN bytes from inferior's memory starting at MEMADDR
3968 to debugger memory starting at MYADDR. */
3969
3970 static int
3971 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
3972 {
3973 register int i;
3974 /* Round starting address down to longword boundary. */
3975 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3976 /* Round ending address up; get number of longwords that makes. */
3977 register int count
3978 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
3979 / sizeof (PTRACE_XFER_TYPE);
3980 /* Allocate buffer of that many longwords. */
3981 register PTRACE_XFER_TYPE *buffer
3982 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3983 int fd;
3984 char filename[64];
3985 int pid = lwpid_of (get_thread_lwp (current_inferior));
3986
3987 /* Try using /proc. Don't bother for one word. */
3988 if (len >= 3 * sizeof (long))
3989 {
3990 /* We could keep this file open and cache it - possibly one per
3991 thread. That requires some juggling, but is even faster. */
3992 sprintf (filename, "/proc/%d/mem", pid);
3993 fd = open (filename, O_RDONLY | O_LARGEFILE);
3994 if (fd == -1)
3995 goto no_proc;
3996
3997 /* If pread64 is available, use it. It's faster if the kernel
3998 supports it (only one syscall), and it's 64-bit safe even on
3999 32-bit platforms (for instance, SPARC debugging a SPARC64
4000 application). */
4001 #ifdef HAVE_PREAD64
4002 if (pread64 (fd, myaddr, len, memaddr) != len)
4003 #else
4004 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
4005 #endif
4006 {
4007 close (fd);
4008 goto no_proc;
4009 }
4010
4011 close (fd);
4012 return 0;
4013 }
4014
4015 no_proc:
4016 /* Read all the longwords */
4017 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4018 {
4019 errno = 0;
4020 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4021 about coercing an 8 byte integer to a 4 byte pointer. */
4022 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4023 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4024 if (errno)
4025 return errno;
4026 }
4027
4028 /* Copy appropriate bytes out of the buffer. */
4029 memcpy (myaddr,
4030 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4031 len);
4032
4033 return 0;
4034 }
4035
4036 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4037 memory at MEMADDR. On failure (cannot write to the inferior)
4038 returns the value of errno. */
4039
4040 static int
4041 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4042 {
4043 register int i;
4044 /* Round starting address down to longword boundary. */
4045 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4046 /* Round ending address up; get number of longwords that makes. */
4047 register int count
4048 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
4049 /* Allocate buffer of that many longwords. */
4050 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4051 int pid = lwpid_of (get_thread_lwp (current_inferior));
4052
4053 if (debug_threads)
4054 {
4055 /* Dump up to four bytes. */
4056 unsigned int val = * (unsigned int *) myaddr;
4057 if (len == 1)
4058 val = val & 0xff;
4059 else if (len == 2)
4060 val = val & 0xffff;
4061 else if (len == 3)
4062 val = val & 0xffffff;
4063 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4064 val, (long)memaddr);
4065 }
4066
4067 /* Fill start and end extra bytes of buffer with existing memory data. */
4068
4069 errno = 0;
4070 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4071 about coercing an 8 byte integer to a 4 byte pointer. */
4072 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4073 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4074 if (errno)
4075 return errno;
4076
4077 if (count > 1)
4078 {
4079 errno = 0;
4080 buffer[count - 1]
4081 = ptrace (PTRACE_PEEKTEXT, pid,
4082 /* Coerce to a uintptr_t first to avoid potential gcc warning
4083 about coercing an 8 byte integer to a 4 byte pointer. */
4084 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4085 * sizeof (PTRACE_XFER_TYPE)),
4086 0);
4087 if (errno)
4088 return errno;
4089 }
4090
4091 /* Copy data to be written over corresponding part of buffer. */
4092
4093 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
4094
4095 /* Write the entire buffer. */
4096
4097 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4098 {
4099 errno = 0;
4100 ptrace (PTRACE_POKETEXT, pid,
4101 /* Coerce to a uintptr_t first to avoid potential gcc warning
4102 about coercing an 8 byte integer to a 4 byte pointer. */
4103 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4104 (PTRACE_ARG4_TYPE) buffer[i]);
4105 if (errno)
4106 return errno;
4107 }
4108
4109 return 0;
4110 }
4111
4112 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4113 static int linux_supports_tracefork_flag;
4114
4115 static void
4116 linux_enable_event_reporting (int pid)
4117 {
4118 if (!linux_supports_tracefork_flag)
4119 return;
4120
4121 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4122 }
4123
4124 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4125
4126 static int
4127 linux_tracefork_grandchild (void *arg)
4128 {
4129 _exit (0);
4130 }
4131
4132 #define STACK_SIZE 4096
4133
4134 static int
4135 linux_tracefork_child (void *arg)
4136 {
4137 ptrace (PTRACE_TRACEME, 0, 0, 0);
4138 kill (getpid (), SIGSTOP);
4139
4140 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4141
4142 if (fork () == 0)
4143 linux_tracefork_grandchild (NULL);
4144
4145 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4146
4147 #ifdef __ia64__
4148 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4149 CLONE_VM | SIGCHLD, NULL);
4150 #else
4151 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
4152 CLONE_VM | SIGCHLD, NULL);
4153 #endif
4154
4155 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4156
4157 _exit (0);
4158 }
4159
4160 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4161 sure that we can enable the option, and that it had the desired
4162 effect. */
4163
4164 static void
4165 linux_test_for_tracefork (void)
4166 {
4167 int child_pid, ret, status;
4168 long second_pid;
4169 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4170 char *stack = xmalloc (STACK_SIZE * 4);
4171 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4172
4173 linux_supports_tracefork_flag = 0;
4174
4175 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4176
4177 child_pid = fork ();
4178 if (child_pid == 0)
4179 linux_tracefork_child (NULL);
4180
4181 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4182
4183 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4184 #ifdef __ia64__
4185 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4186 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4187 #else /* !__ia64__ */
4188 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4189 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4190 #endif /* !__ia64__ */
4191
4192 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4193
4194 if (child_pid == -1)
4195 perror_with_name ("clone");
4196
4197 ret = my_waitpid (child_pid, &status, 0);
4198 if (ret == -1)
4199 perror_with_name ("waitpid");
4200 else if (ret != child_pid)
4201 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4202 if (! WIFSTOPPED (status))
4203 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4204
4205 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4206 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4207 if (ret != 0)
4208 {
4209 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4210 if (ret != 0)
4211 {
4212 warning ("linux_test_for_tracefork: failed to kill child");
4213 return;
4214 }
4215
4216 ret = my_waitpid (child_pid, &status, 0);
4217 if (ret != child_pid)
4218 warning ("linux_test_for_tracefork: failed to wait for killed child");
4219 else if (!WIFSIGNALED (status))
4220 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4221 "killed child", status);
4222
4223 return;
4224 }
4225
4226 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4227 if (ret != 0)
4228 warning ("linux_test_for_tracefork: failed to resume child");
4229
4230 ret = my_waitpid (child_pid, &status, 0);
4231
4232 if (ret == child_pid && WIFSTOPPED (status)
4233 && status >> 16 == PTRACE_EVENT_FORK)
4234 {
4235 second_pid = 0;
4236 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4237 if (ret == 0 && second_pid != 0)
4238 {
4239 int second_status;
4240
4241 linux_supports_tracefork_flag = 1;
4242 my_waitpid (second_pid, &second_status, 0);
4243 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4244 if (ret != 0)
4245 warning ("linux_test_for_tracefork: failed to kill second child");
4246 my_waitpid (second_pid, &status, 0);
4247 }
4248 }
4249 else
4250 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4251 "(%d, status 0x%x)", ret, status);
4252
4253 do
4254 {
4255 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4256 if (ret != 0)
4257 warning ("linux_test_for_tracefork: failed to kill child");
4258 my_waitpid (child_pid, &status, 0);
4259 }
4260 while (WIFSTOPPED (status));
4261
4262 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4263 free (stack);
4264 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4265 }
4266
4267
4268 static void
4269 linux_look_up_symbols (void)
4270 {
4271 #ifdef USE_THREAD_DB
4272 struct process_info *proc = current_process ();
4273
4274 if (proc->private->thread_db != NULL)
4275 return;
4276
4277 /* If the kernel supports tracing forks then it also supports tracing
4278 clones, and then we don't need to use the magic thread event breakpoint
4279 to learn about threads. */
4280 thread_db_init (!linux_supports_tracefork_flag);
4281 #endif
4282 }
4283
4284 static void
4285 linux_request_interrupt (void)
4286 {
4287 extern unsigned long signal_pid;
4288
4289 if (!ptid_equal (cont_thread, null_ptid)
4290 && !ptid_equal (cont_thread, minus_one_ptid))
4291 {
4292 struct lwp_info *lwp;
4293 int lwpid;
4294
4295 lwp = get_thread_lwp (current_inferior);
4296 lwpid = lwpid_of (lwp);
4297 kill_lwp (lwpid, SIGINT);
4298 }
4299 else
4300 kill_lwp (signal_pid, SIGINT);
4301 }
4302
4303 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4304 to debugger memory starting at MYADDR. */
4305
4306 static int
4307 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4308 {
4309 char filename[PATH_MAX];
4310 int fd, n;
4311 int pid = lwpid_of (get_thread_lwp (current_inferior));
4312
4313 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4314
4315 fd = open (filename, O_RDONLY);
4316 if (fd < 0)
4317 return -1;
4318
4319 if (offset != (CORE_ADDR) 0
4320 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4321 n = -1;
4322 else
4323 n = read (fd, myaddr, len);
4324
4325 close (fd);
4326
4327 return n;
4328 }
4329
4330 /* These breakpoint and watchpoint related wrapper functions simply
4331 pass on the function call if the target has registered a
4332 corresponding function. */
4333
4334 static int
4335 linux_insert_point (char type, CORE_ADDR addr, int len)
4336 {
4337 if (the_low_target.insert_point != NULL)
4338 return the_low_target.insert_point (type, addr, len);
4339 else
4340 /* Unsupported (see target.h). */
4341 return 1;
4342 }
4343
4344 static int
4345 linux_remove_point (char type, CORE_ADDR addr, int len)
4346 {
4347 if (the_low_target.remove_point != NULL)
4348 return the_low_target.remove_point (type, addr, len);
4349 else
4350 /* Unsupported (see target.h). */
4351 return 1;
4352 }
4353
4354 static int
4355 linux_stopped_by_watchpoint (void)
4356 {
4357 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4358
4359 return lwp->stopped_by_watchpoint;
4360 }
4361
4362 static CORE_ADDR
4363 linux_stopped_data_address (void)
4364 {
4365 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4366
4367 return lwp->stopped_data_address;
4368 }
4369
4370 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4371 #if defined(__mcoldfire__)
4372 /* These should really be defined in the kernel's ptrace.h header. */
4373 #define PT_TEXT_ADDR 49*4
4374 #define PT_DATA_ADDR 50*4
4375 #define PT_TEXT_END_ADDR 51*4
4376 #elif defined(BFIN)
4377 #define PT_TEXT_ADDR 220
4378 #define PT_TEXT_END_ADDR 224
4379 #define PT_DATA_ADDR 228
4380 #endif
4381
4382 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4383 to tell gdb about. */
4384
4385 static int
4386 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4387 {
4388 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4389 unsigned long text, text_end, data;
4390 int pid = lwpid_of (get_thread_lwp (current_inferior));
4391
4392 errno = 0;
4393
4394 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4395 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4396 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4397
4398 if (errno == 0)
4399 {
4400 /* Both text and data offsets produced at compile-time (and so
4401 used by gdb) are relative to the beginning of the program,
4402 with the data segment immediately following the text segment.
4403 However, the actual runtime layout in memory may put the data
4404 somewhere else, so when we send gdb a data base-address, we
4405 use the real data base address and subtract the compile-time
4406 data base-address from it (which is just the length of the
4407 text segment). BSS immediately follows data in both
4408 cases. */
4409 *text_p = text;
4410 *data_p = data - (text_end - text);
4411
4412 return 1;
4413 }
4414 #endif
4415 return 0;
4416 }
4417 #endif
4418
4419 static int
4420 compare_ints (const void *xa, const void *xb)
4421 {
4422 int a = *(const int *)xa;
4423 int b = *(const int *)xb;
4424
4425 return a - b;
4426 }
4427
4428 static int *
4429 unique (int *b, int *e)
4430 {
4431 int *d = b;
4432 while (++b != e)
4433 if (*d != *b)
4434 *++d = *b;
4435 return ++d;
4436 }
4437
4438 /* Given PID, iterates over all threads in that process.
4439
4440 Information about each thread, in a format suitable for qXfer:osdata:thread
4441 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
4442 initialized, and the caller is responsible for finishing and appending '\0'
4443 to it.
4444
4445 The list of cores that threads are running on is assigned to *CORES, if it
4446 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
4447 should free *CORES. */
4448
4449 static void
4450 list_threads (int pid, struct buffer *buffer, char **cores)
4451 {
4452 int count = 0;
4453 int allocated = 10;
4454 int *core_numbers = xmalloc (sizeof (int) * allocated);
4455 char pathname[128];
4456 DIR *dir;
4457 struct dirent *dp;
4458 struct stat statbuf;
4459
4460 sprintf (pathname, "/proc/%d/task", pid);
4461 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
4462 {
4463 dir = opendir (pathname);
4464 if (!dir)
4465 {
4466 free (core_numbers);
4467 return;
4468 }
4469
4470 while ((dp = readdir (dir)) != NULL)
4471 {
4472 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
4473
4474 if (lwp != 0)
4475 {
4476 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
4477
4478 if (core != -1)
4479 {
4480 char s[sizeof ("4294967295")];
4481 sprintf (s, "%u", core);
4482
4483 if (count == allocated)
4484 {
4485 allocated *= 2;
4486 core_numbers = realloc (core_numbers,
4487 sizeof (int) * allocated);
4488 }
4489 core_numbers[count++] = core;
4490 if (buffer)
4491 buffer_xml_printf (buffer,
4492 "<item>"
4493 "<column name=\"pid\">%d</column>"
4494 "<column name=\"tid\">%s</column>"
4495 "<column name=\"core\">%s</column>"
4496 "</item>", pid, dp->d_name, s);
4497 }
4498 else
4499 {
4500 if (buffer)
4501 buffer_xml_printf (buffer,
4502 "<item>"
4503 "<column name=\"pid\">%d</column>"
4504 "<column name=\"tid\">%s</column>"
4505 "</item>", pid, dp->d_name);
4506 }
4507 }
4508 }
4509 }
4510
4511 if (cores)
4512 {
4513 *cores = NULL;
4514 if (count > 0)
4515 {
4516 struct buffer buffer2;
4517 int *b;
4518 int *e;
4519 qsort (core_numbers, count, sizeof (int), compare_ints);
4520
4521 /* Remove duplicates. */
4522 b = core_numbers;
4523 e = unique (b, core_numbers + count);
4524
4525 buffer_init (&buffer2);
4526
4527 for (b = core_numbers; b != e; ++b)
4528 {
4529 char number[sizeof ("4294967295")];
4530 sprintf (number, "%u", *b);
4531 buffer_xml_printf (&buffer2, "%s%s",
4532 (b == core_numbers) ? "" : ",", number);
4533 }
4534 buffer_grow_str0 (&buffer2, "");
4535
4536 *cores = buffer_finish (&buffer2);
4537 }
4538 }
4539 free (core_numbers);
4540 }
4541
4542 static void
4543 show_process (int pid, const char *username, struct buffer *buffer)
4544 {
4545 char pathname[128];
4546 FILE *f;
4547 char cmd[MAXPATHLEN + 1];
4548
4549 sprintf (pathname, "/proc/%d/cmdline", pid);
4550
4551 if ((f = fopen (pathname, "r")) != NULL)
4552 {
4553 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
4554 if (len > 0)
4555 {
4556 char *cores = 0;
4557 int i;
4558 for (i = 0; i < len; i++)
4559 if (cmd[i] == '\0')
4560 cmd[i] = ' ';
4561 cmd[len] = '\0';
4562
4563 buffer_xml_printf (buffer,
4564 "<item>"
4565 "<column name=\"pid\">%d</column>"
4566 "<column name=\"user\">%s</column>"
4567 "<column name=\"command\">%s</column>",
4568 pid,
4569 username,
4570 cmd);
4571
4572 /* This only collects core numbers, and does not print threads. */
4573 list_threads (pid, NULL, &cores);
4574
4575 if (cores)
4576 {
4577 buffer_xml_printf (buffer,
4578 "<column name=\"cores\">%s</column>", cores);
4579 free (cores);
4580 }
4581
4582 buffer_xml_printf (buffer, "</item>");
4583 }
4584 fclose (f);
4585 }
4586 }
4587
4588 static int
4589 linux_qxfer_osdata (const char *annex,
4590 unsigned char *readbuf, unsigned const char *writebuf,
4591 CORE_ADDR offset, int len)
4592 {
4593 /* We make the process list snapshot when the object starts to be
4594 read. */
4595 static const char *buf;
4596 static long len_avail = -1;
4597 static struct buffer buffer;
4598 int processes = 0;
4599 int threads = 0;
4600
4601 DIR *dirp;
4602
4603 if (strcmp (annex, "processes") == 0)
4604 processes = 1;
4605 else if (strcmp (annex, "threads") == 0)
4606 threads = 1;
4607 else
4608 return 0;
4609
4610 if (!readbuf || writebuf)
4611 return 0;
4612
4613 if (offset == 0)
4614 {
4615 if (len_avail != -1 && len_avail != 0)
4616 buffer_free (&buffer);
4617 len_avail = 0;
4618 buf = NULL;
4619 buffer_init (&buffer);
4620 if (processes)
4621 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
4622 else if (threads)
4623 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
4624
4625 dirp = opendir ("/proc");
4626 if (dirp)
4627 {
4628 struct dirent *dp;
4629 while ((dp = readdir (dirp)) != NULL)
4630 {
4631 struct stat statbuf;
4632 char procentry[sizeof ("/proc/4294967295")];
4633
4634 if (!isdigit (dp->d_name[0])
4635 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
4636 continue;
4637
4638 sprintf (procentry, "/proc/%s", dp->d_name);
4639 if (stat (procentry, &statbuf) == 0
4640 && S_ISDIR (statbuf.st_mode))
4641 {
4642 int pid = (int) strtoul (dp->d_name, NULL, 10);
4643
4644 if (processes)
4645 {
4646 struct passwd *entry = getpwuid (statbuf.st_uid);
4647 show_process (pid, entry ? entry->pw_name : "?", &buffer);
4648 }
4649 else if (threads)
4650 {
4651 list_threads (pid, &buffer, NULL);
4652 }
4653 }
4654 }
4655
4656 closedir (dirp);
4657 }
4658 buffer_grow_str0 (&buffer, "</osdata>\n");
4659 buf = buffer_finish (&buffer);
4660 len_avail = strlen (buf);
4661 }
4662
4663 if (offset >= len_avail)
4664 {
4665 /* Done. Get rid of the data. */
4666 buffer_free (&buffer);
4667 buf = NULL;
4668 len_avail = 0;
4669 return 0;
4670 }
4671
4672 if (len > len_avail - offset)
4673 len = len_avail - offset;
4674 memcpy (readbuf, buf + offset, len);
4675
4676 return len;
4677 }
4678
4679 /* Convert a native/host siginfo object, into/from the siginfo in the
4680 layout of the inferiors' architecture. */
4681
4682 static void
4683 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
4684 {
4685 int done = 0;
4686
4687 if (the_low_target.siginfo_fixup != NULL)
4688 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4689
4690 /* If there was no callback, or the callback didn't do anything,
4691 then just do a straight memcpy. */
4692 if (!done)
4693 {
4694 if (direction == 1)
4695 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4696 else
4697 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4698 }
4699 }
4700
4701 static int
4702 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4703 unsigned const char *writebuf, CORE_ADDR offset, int len)
4704 {
4705 int pid;
4706 struct siginfo siginfo;
4707 char inf_siginfo[sizeof (struct siginfo)];
4708
4709 if (current_inferior == NULL)
4710 return -1;
4711
4712 pid = lwpid_of (get_thread_lwp (current_inferior));
4713
4714 if (debug_threads)
4715 fprintf (stderr, "%s siginfo for lwp %d.\n",
4716 readbuf != NULL ? "Reading" : "Writing",
4717 pid);
4718
4719 if (offset > sizeof (siginfo))
4720 return -1;
4721
4722 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4723 return -1;
4724
4725 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4726 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4727 inferior with a 64-bit GDBSERVER should look the same as debugging it
4728 with a 32-bit GDBSERVER, we need to convert it. */
4729 siginfo_fixup (&siginfo, inf_siginfo, 0);
4730
4731 if (offset + len > sizeof (siginfo))
4732 len = sizeof (siginfo) - offset;
4733
4734 if (readbuf != NULL)
4735 memcpy (readbuf, inf_siginfo + offset, len);
4736 else
4737 {
4738 memcpy (inf_siginfo + offset, writebuf, len);
4739
4740 /* Convert back to ptrace layout before flushing it out. */
4741 siginfo_fixup (&siginfo, inf_siginfo, 1);
4742
4743 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4744 return -1;
4745 }
4746
4747 return len;
4748 }
4749
4750 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4751 so we notice when children change state; as the handler for the
4752 sigsuspend in my_waitpid. */
4753
4754 static void
4755 sigchld_handler (int signo)
4756 {
4757 int old_errno = errno;
4758
4759 if (debug_threads)
4760 {
4761 do
4762 {
4763 /* fprintf is not async-signal-safe, so call write
4764 directly. */
4765 if (write (2, "sigchld_handler\n",
4766 sizeof ("sigchld_handler\n") - 1) < 0)
4767 break; /* just ignore */
4768 } while (0);
4769 }
4770
4771 if (target_is_async_p ())
4772 async_file_mark (); /* trigger a linux_wait */
4773
4774 errno = old_errno;
4775 }
4776
4777 static int
4778 linux_supports_non_stop (void)
4779 {
4780 return 1;
4781 }
4782
4783 static int
4784 linux_async (int enable)
4785 {
4786 int previous = (linux_event_pipe[0] != -1);
4787
4788 if (debug_threads)
4789 fprintf (stderr, "linux_async (%d), previous=%d\n",
4790 enable, previous);
4791
4792 if (previous != enable)
4793 {
4794 sigset_t mask;
4795 sigemptyset (&mask);
4796 sigaddset (&mask, SIGCHLD);
4797
4798 sigprocmask (SIG_BLOCK, &mask, NULL);
4799
4800 if (enable)
4801 {
4802 if (pipe (linux_event_pipe) == -1)
4803 fatal ("creating event pipe failed.");
4804
4805 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4806 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4807
4808 /* Register the event loop handler. */
4809 add_file_handler (linux_event_pipe[0],
4810 handle_target_event, NULL);
4811
4812 /* Always trigger a linux_wait. */
4813 async_file_mark ();
4814 }
4815 else
4816 {
4817 delete_file_handler (linux_event_pipe[0]);
4818
4819 close (linux_event_pipe[0]);
4820 close (linux_event_pipe[1]);
4821 linux_event_pipe[0] = -1;
4822 linux_event_pipe[1] = -1;
4823 }
4824
4825 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4826 }
4827
4828 return previous;
4829 }
4830
4831 static int
4832 linux_start_non_stop (int nonstop)
4833 {
4834 /* Register or unregister from event-loop accordingly. */
4835 linux_async (nonstop);
4836 return 0;
4837 }
4838
4839 static int
4840 linux_supports_multi_process (void)
4841 {
4842 return 1;
4843 }
4844
4845
4846 /* Enumerate spufs IDs for process PID. */
4847 static int
4848 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4849 {
4850 int pos = 0;
4851 int written = 0;
4852 char path[128];
4853 DIR *dir;
4854 struct dirent *entry;
4855
4856 sprintf (path, "/proc/%ld/fd", pid);
4857 dir = opendir (path);
4858 if (!dir)
4859 return -1;
4860
4861 rewinddir (dir);
4862 while ((entry = readdir (dir)) != NULL)
4863 {
4864 struct stat st;
4865 struct statfs stfs;
4866 int fd;
4867
4868 fd = atoi (entry->d_name);
4869 if (!fd)
4870 continue;
4871
4872 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4873 if (stat (path, &st) != 0)
4874 continue;
4875 if (!S_ISDIR (st.st_mode))
4876 continue;
4877
4878 if (statfs (path, &stfs) != 0)
4879 continue;
4880 if (stfs.f_type != SPUFS_MAGIC)
4881 continue;
4882
4883 if (pos >= offset && pos + 4 <= offset + len)
4884 {
4885 *(unsigned int *)(buf + pos - offset) = fd;
4886 written += 4;
4887 }
4888 pos += 4;
4889 }
4890
4891 closedir (dir);
4892 return written;
4893 }
4894
4895 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4896 object type, using the /proc file system. */
4897 static int
4898 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4899 unsigned const char *writebuf,
4900 CORE_ADDR offset, int len)
4901 {
4902 long pid = lwpid_of (get_thread_lwp (current_inferior));
4903 char buf[128];
4904 int fd = 0;
4905 int ret = 0;
4906
4907 if (!writebuf && !readbuf)
4908 return -1;
4909
4910 if (!*annex)
4911 {
4912 if (!readbuf)
4913 return -1;
4914 else
4915 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4916 }
4917
4918 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4919 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4920 if (fd <= 0)
4921 return -1;
4922
4923 if (offset != 0
4924 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4925 {
4926 close (fd);
4927 return 0;
4928 }
4929
4930 if (writebuf)
4931 ret = write (fd, writebuf, (size_t) len);
4932 else
4933 ret = read (fd, readbuf, (size_t) len);
4934
4935 close (fd);
4936 return ret;
4937 }
4938
4939 static int
4940 linux_core_of_thread (ptid_t ptid)
4941 {
4942 char filename[sizeof ("/proc//task//stat")
4943 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4944 + 1];
4945 FILE *f;
4946 char *content = NULL;
4947 char *p;
4948 char *ts = 0;
4949 int content_read = 0;
4950 int i;
4951 int core;
4952
4953 sprintf (filename, "/proc/%d/task/%ld/stat",
4954 ptid_get_pid (ptid), ptid_get_lwp (ptid));
4955 f = fopen (filename, "r");
4956 if (!f)
4957 return -1;
4958
4959 for (;;)
4960 {
4961 int n;
4962 content = realloc (content, content_read + 1024);
4963 n = fread (content + content_read, 1, 1024, f);
4964 content_read += n;
4965 if (n < 1024)
4966 {
4967 content[content_read] = '\0';
4968 break;
4969 }
4970 }
4971
4972 p = strchr (content, '(');
4973
4974 /* Skip ")". */
4975 if (p != NULL)
4976 p = strchr (p, ')');
4977 if (p != NULL)
4978 p++;
4979
4980 /* If the first field after program name has index 0, then core number is
4981 the field with index 36. There's no constant for that anywhere. */
4982 if (p != NULL)
4983 p = strtok_r (p, " ", &ts);
4984 for (i = 0; p != NULL && i != 36; ++i)
4985 p = strtok_r (NULL, " ", &ts);
4986
4987 if (p == NULL || sscanf (p, "%d", &core) == 0)
4988 core = -1;
4989
4990 free (content);
4991 fclose (f);
4992
4993 return core;
4994 }
4995
4996 static void
4997 linux_process_qsupported (const char *query)
4998 {
4999 if (the_low_target.process_qsupported != NULL)
5000 the_low_target.process_qsupported (query);
5001 }
5002
5003 static int
5004 linux_supports_tracepoints (void)
5005 {
5006 if (*the_low_target.supports_tracepoints == NULL)
5007 return 0;
5008
5009 return (*the_low_target.supports_tracepoints) ();
5010 }
5011
5012 static CORE_ADDR
5013 linux_read_pc (struct regcache *regcache)
5014 {
5015 if (the_low_target.get_pc == NULL)
5016 return 0;
5017
5018 return (*the_low_target.get_pc) (regcache);
5019 }
5020
5021 static void
5022 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5023 {
5024 gdb_assert (the_low_target.set_pc != NULL);
5025
5026 (*the_low_target.set_pc) (regcache, pc);
5027 }
5028
5029 static int
5030 linux_thread_stopped (struct thread_info *thread)
5031 {
5032 return get_thread_lwp (thread)->stopped;
5033 }
5034
5035 /* This exposes stop-all-threads functionality to other modules. */
5036
5037 static void
5038 linux_pause_all (int freeze)
5039 {
5040 stop_all_lwps (freeze, NULL);
5041 }
5042
5043 /* This exposes unstop-all-threads functionality to other gdbserver
5044 modules. */
5045
5046 static void
5047 linux_unpause_all (int unfreeze)
5048 {
5049 unstop_all_lwps (unfreeze, NULL);
5050 }
5051
5052 static int
5053 linux_prepare_to_access_memory (void)
5054 {
5055 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5056 running LWP. */
5057 if (non_stop)
5058 linux_pause_all (1);
5059 return 0;
5060 }
5061
5062 static void
5063 linux_done_accessing_memory (void)
5064 {
5065 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5066 running LWP. */
5067 if (non_stop)
5068 linux_unpause_all (1);
5069 }
5070
5071 static int
5072 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5073 CORE_ADDR collector,
5074 CORE_ADDR lockaddr,
5075 ULONGEST orig_size,
5076 CORE_ADDR *jump_entry,
5077 unsigned char *jjump_pad_insn,
5078 ULONGEST *jjump_pad_insn_size,
5079 CORE_ADDR *adjusted_insn_addr,
5080 CORE_ADDR *adjusted_insn_addr_end)
5081 {
5082 return (*the_low_target.install_fast_tracepoint_jump_pad)
5083 (tpoint, tpaddr, collector, lockaddr, orig_size,
5084 jump_entry, jjump_pad_insn, jjump_pad_insn_size,
5085 adjusted_insn_addr, adjusted_insn_addr_end);
5086 }
5087
5088 static struct emit_ops *
5089 linux_emit_ops (void)
5090 {
5091 if (the_low_target.emit_ops != NULL)
5092 return (*the_low_target.emit_ops) ();
5093 else
5094 return NULL;
5095 }
5096
5097 static struct target_ops linux_target_ops = {
5098 linux_create_inferior,
5099 linux_attach,
5100 linux_kill,
5101 linux_detach,
5102 linux_mourn,
5103 linux_join,
5104 linux_thread_alive,
5105 linux_resume,
5106 linux_wait,
5107 linux_fetch_registers,
5108 linux_store_registers,
5109 linux_prepare_to_access_memory,
5110 linux_done_accessing_memory,
5111 linux_read_memory,
5112 linux_write_memory,
5113 linux_look_up_symbols,
5114 linux_request_interrupt,
5115 linux_read_auxv,
5116 linux_insert_point,
5117 linux_remove_point,
5118 linux_stopped_by_watchpoint,
5119 linux_stopped_data_address,
5120 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
5121 linux_read_offsets,
5122 #else
5123 NULL,
5124 #endif
5125 #ifdef USE_THREAD_DB
5126 thread_db_get_tls_address,
5127 #else
5128 NULL,
5129 #endif
5130 linux_qxfer_spu,
5131 hostio_last_error_from_errno,
5132 linux_qxfer_osdata,
5133 linux_xfer_siginfo,
5134 linux_supports_non_stop,
5135 linux_async,
5136 linux_start_non_stop,
5137 linux_supports_multi_process,
5138 #ifdef USE_THREAD_DB
5139 thread_db_handle_monitor_command,
5140 #else
5141 NULL,
5142 #endif
5143 linux_core_of_thread,
5144 linux_process_qsupported,
5145 linux_supports_tracepoints,
5146 linux_read_pc,
5147 linux_write_pc,
5148 linux_thread_stopped,
5149 NULL,
5150 linux_pause_all,
5151 linux_unpause_all,
5152 linux_cancel_breakpoints,
5153 linux_stabilize_threads,
5154 linux_install_fast_tracepoint_jump_pad,
5155 linux_emit_ops
5156 };
5157
5158 static void
5159 linux_init_signals ()
5160 {
5161 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5162 to find what the cancel signal actually is. */
5163 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5164 signal (__SIGRTMIN+1, SIG_IGN);
5165 #endif
5166 }
5167
5168 void
5169 initialize_low (void)
5170 {
5171 struct sigaction sigchld_action;
5172 memset (&sigchld_action, 0, sizeof (sigchld_action));
5173 set_target_ops (&linux_target_ops);
5174 set_breakpoint_data (the_low_target.breakpoint,
5175 the_low_target.breakpoint_len);
5176 linux_init_signals ();
5177 linux_test_for_tracefork ();
5178 #ifdef HAVE_LINUX_REGSETS
5179 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5180 ;
5181 disabled_regsets = xmalloc (num_regsets);
5182 #endif
5183
5184 sigchld_action.sa_handler = sigchld_handler;
5185 sigemptyset (&sigchld_action.sa_mask);
5186 sigchld_action.sa_flags = SA_RESTART;
5187 sigaction (SIGCHLD, &sigchld_action, NULL);
5188 }
This page took 0.142679 seconds and 4 git commands to generate.