* readelf.c (ia64_process_unwind): Turn into a void funtion.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
... / ...
CommitLineData
1/* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20#include "server.h"
21#include "linux-low.h"
22#include "linux-osdata.h"
23
24#include <sys/wait.h>
25#include <stdio.h>
26#include <sys/param.h>
27#include <sys/ptrace.h>
28#include "linux-ptrace.h"
29#include "linux-procfs.h"
30#include <signal.h>
31#include <sys/ioctl.h>
32#include <fcntl.h>
33#include <string.h>
34#include <stdlib.h>
35#include <unistd.h>
36#include <errno.h>
37#include <sys/syscall.h>
38#include <sched.h>
39#include <ctype.h>
40#include <pwd.h>
41#include <sys/types.h>
42#include <dirent.h>
43#include <sys/stat.h>
44#include <sys/vfs.h>
45#include <sys/uio.h>
46#ifndef ELFMAG0
47/* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
51#include <elf.h>
52#endif
53
54#ifndef SPUFS_MAGIC
55#define SPUFS_MAGIC 0x23c9b64e
56#endif
57
58#ifdef HAVE_PERSONALITY
59# include <sys/personality.h>
60# if !HAVE_DECL_ADDR_NO_RANDOMIZE
61# define ADDR_NO_RANDOMIZE 0x0040000
62# endif
63#endif
64
65#ifndef O_LARGEFILE
66#define O_LARGEFILE 0
67#endif
68
69#ifndef W_STOPCODE
70#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
71#endif
72
73/* This is the kernel's hard limit. Not to be confused with
74 SIGRTMIN. */
75#ifndef __SIGRTMIN
76#define __SIGRTMIN 32
77#endif
78
79#ifdef __UCLIBC__
80#if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
81#define HAS_NOMMU
82#endif
83#endif
84
85/* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
86 representation of the thread ID.
87
88 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
89 the same as the LWP ID.
90
91 ``all_processes'' is keyed by the "overall process ID", which
92 GNU/Linux calls tgid, "thread group ID". */
93
94struct inferior_list all_lwps;
95
96/* A list of all unknown processes which receive stop signals. Some other
97 process will presumably claim each of these as forked children
98 momentarily. */
99
100struct inferior_list stopped_pids;
101
102/* FIXME this is a bit of a hack, and could be removed. */
103int stopping_threads;
104
105/* FIXME make into a target method? */
106int using_threads = 1;
107
108/* True if we're presently stabilizing threads (moving them out of
109 jump pads). */
110static int stabilizing_threads;
111
112/* This flag is true iff we've just created or attached to our first
113 inferior but it has not stopped yet. As soon as it does, we need
114 to call the low target's arch_setup callback. Doing this only on
115 the first inferior avoids reinializing the architecture on every
116 inferior, and avoids messing with the register caches of the
117 already running inferiors. NOTE: this assumes all inferiors under
118 control of gdbserver have the same architecture. */
119static int new_inferior;
120
121static void linux_resume_one_lwp (struct lwp_info *lwp,
122 int step, int signal, siginfo_t *info);
123static void linux_resume (struct thread_resume *resume_info, size_t n);
124static void stop_all_lwps (int suspend, struct lwp_info *except);
125static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
126static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
127static void *add_lwp (ptid_t ptid);
128static int linux_stopped_by_watchpoint (void);
129static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
130static void proceed_all_lwps (void);
131static int finish_step_over (struct lwp_info *lwp);
132static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
133static int kill_lwp (unsigned long lwpid, int signo);
134static void linux_enable_event_reporting (int pid);
135
136/* True if the low target can hardware single-step. Such targets
137 don't need a BREAKPOINT_REINSERT_ADDR callback. */
138
139static int
140can_hardware_single_step (void)
141{
142 return (the_low_target.breakpoint_reinsert_addr == NULL);
143}
144
145/* True if the low target supports memory breakpoints. If so, we'll
146 have a GET_PC implementation. */
147
148static int
149supports_breakpoints (void)
150{
151 return (the_low_target.get_pc != NULL);
152}
153
154/* Returns true if this target can support fast tracepoints. This
155 does not mean that the in-process agent has been loaded in the
156 inferior. */
157
158static int
159supports_fast_tracepoints (void)
160{
161 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
162}
163
164struct pending_signals
165{
166 int signal;
167 siginfo_t info;
168 struct pending_signals *prev;
169};
170
171#define PTRACE_ARG3_TYPE void *
172#define PTRACE_ARG4_TYPE void *
173#define PTRACE_XFER_TYPE long
174
175#ifdef HAVE_LINUX_REGSETS
176static char *disabled_regsets;
177static int num_regsets;
178#endif
179
180/* The read/write ends of the pipe registered as waitable file in the
181 event loop. */
182static int linux_event_pipe[2] = { -1, -1 };
183
184/* True if we're currently in async mode. */
185#define target_is_async_p() (linux_event_pipe[0] != -1)
186
187static void send_sigstop (struct lwp_info *lwp);
188static void wait_for_sigstop (struct inferior_list_entry *entry);
189
190/* Accepts an integer PID; Returns a string representing a file that
191 can be opened to get info for the child process.
192 Space for the result is malloc'd, caller must free. */
193
194char *
195linux_child_pid_to_exec_file (int pid)
196{
197 char *name1, *name2;
198
199 name1 = xmalloc (MAXPATHLEN);
200 name2 = xmalloc (MAXPATHLEN);
201 memset (name2, 0, MAXPATHLEN);
202
203 sprintf (name1, "/proc/%d/exe", pid);
204 if (readlink (name1, name2, MAXPATHLEN) > 0)
205 {
206 free (name1);
207 return name2;
208 }
209 else
210 {
211 free (name2);
212 return name1;
213 }
214}
215
216/* Return non-zero if HEADER is a 64-bit ELF file. */
217
218static int
219elf_64_header_p (const Elf64_Ehdr *header)
220{
221 return (header->e_ident[EI_MAG0] == ELFMAG0
222 && header->e_ident[EI_MAG1] == ELFMAG1
223 && header->e_ident[EI_MAG2] == ELFMAG2
224 && header->e_ident[EI_MAG3] == ELFMAG3
225 && header->e_ident[EI_CLASS] == ELFCLASS64);
226}
227
228/* Return non-zero if FILE is a 64-bit ELF file,
229 zero if the file is not a 64-bit ELF file,
230 and -1 if the file is not accessible or doesn't exist. */
231
232int
233elf_64_file_p (const char *file)
234{
235 Elf64_Ehdr header;
236 int fd;
237
238 fd = open (file, O_RDONLY);
239 if (fd < 0)
240 return -1;
241
242 if (read (fd, &header, sizeof (header)) != sizeof (header))
243 {
244 close (fd);
245 return 0;
246 }
247 close (fd);
248
249 return elf_64_header_p (&header);
250}
251
252static void
253delete_lwp (struct lwp_info *lwp)
254{
255 remove_thread (get_lwp_thread (lwp));
256 remove_inferior (&all_lwps, &lwp->head);
257 free (lwp->arch_private);
258 free (lwp);
259}
260
261/* Add a process to the common process list, and set its private
262 data. */
263
264static struct process_info *
265linux_add_process (int pid, int attached)
266{
267 struct process_info *proc;
268
269 /* Is this the first process? If so, then set the arch. */
270 if (all_processes.head == NULL)
271 new_inferior = 1;
272
273 proc = add_process (pid, attached);
274 proc->private = xcalloc (1, sizeof (*proc->private));
275
276 if (the_low_target.new_process != NULL)
277 proc->private->arch_private = the_low_target.new_process ();
278
279 return proc;
280}
281
282/* Wrapper function for waitpid which handles EINTR, and emulates
283 __WALL for systems where that is not available. */
284
285static int
286my_waitpid (int pid, int *status, int flags)
287{
288 int ret, out_errno;
289
290 if (debug_threads)
291 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
292
293 if (flags & __WALL)
294 {
295 sigset_t block_mask, org_mask, wake_mask;
296 int wnohang;
297
298 wnohang = (flags & WNOHANG) != 0;
299 flags &= ~(__WALL | __WCLONE);
300 flags |= WNOHANG;
301
302 /* Block all signals while here. This avoids knowing about
303 LinuxThread's signals. */
304 sigfillset (&block_mask);
305 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
306
307 /* ... except during the sigsuspend below. */
308 sigemptyset (&wake_mask);
309
310 while (1)
311 {
312 /* Since all signals are blocked, there's no need to check
313 for EINTR here. */
314 ret = waitpid (pid, status, flags);
315 out_errno = errno;
316
317 if (ret == -1 && out_errno != ECHILD)
318 break;
319 else if (ret > 0)
320 break;
321
322 if (flags & __WCLONE)
323 {
324 /* We've tried both flavors now. If WNOHANG is set,
325 there's nothing else to do, just bail out. */
326 if (wnohang)
327 break;
328
329 if (debug_threads)
330 fprintf (stderr, "blocking\n");
331
332 /* Block waiting for signals. */
333 sigsuspend (&wake_mask);
334 }
335
336 flags ^= __WCLONE;
337 }
338
339 sigprocmask (SIG_SETMASK, &org_mask, NULL);
340 }
341 else
342 {
343 do
344 ret = waitpid (pid, status, flags);
345 while (ret == -1 && errno == EINTR);
346 out_errno = errno;
347 }
348
349 if (debug_threads)
350 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
351 pid, flags, status ? *status : -1, ret);
352
353 errno = out_errno;
354 return ret;
355}
356
357/* Handle a GNU/Linux extended wait response. If we see a clone
358 event, we need to add the new LWP to our list (and not report the
359 trap to higher layers). */
360
361static void
362handle_extended_wait (struct lwp_info *event_child, int wstat)
363{
364 int event = wstat >> 16;
365 struct lwp_info *new_lwp;
366
367 if (event == PTRACE_EVENT_CLONE)
368 {
369 ptid_t ptid;
370 unsigned long new_pid;
371 int ret, status = W_STOPCODE (SIGSTOP);
372
373 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
374
375 /* If we haven't already seen the new PID stop, wait for it now. */
376 if (! pull_pid_from_list (&stopped_pids, new_pid))
377 {
378 /* The new child has a pending SIGSTOP. We can't affect it until it
379 hits the SIGSTOP, but we're already attached. */
380
381 ret = my_waitpid (new_pid, &status, __WALL);
382
383 if (ret == -1)
384 perror_with_name ("waiting for new child");
385 else if (ret != new_pid)
386 warning ("wait returned unexpected PID %d", ret);
387 else if (!WIFSTOPPED (status))
388 warning ("wait returned unexpected status 0x%x", status);
389 }
390
391 linux_enable_event_reporting (new_pid);
392
393 ptid = ptid_build (pid_of (event_child), new_pid, 0);
394 new_lwp = (struct lwp_info *) add_lwp (ptid);
395 add_thread (ptid, new_lwp);
396
397 /* Either we're going to immediately resume the new thread
398 or leave it stopped. linux_resume_one_lwp is a nop if it
399 thinks the thread is currently running, so set this first
400 before calling linux_resume_one_lwp. */
401 new_lwp->stopped = 1;
402
403 /* Normally we will get the pending SIGSTOP. But in some cases
404 we might get another signal delivered to the group first.
405 If we do get another signal, be sure not to lose it. */
406 if (WSTOPSIG (status) == SIGSTOP)
407 {
408 if (stopping_threads)
409 new_lwp->stop_pc = get_stop_pc (new_lwp);
410 else
411 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
412 }
413 else
414 {
415 new_lwp->stop_expected = 1;
416
417 if (stopping_threads)
418 {
419 new_lwp->stop_pc = get_stop_pc (new_lwp);
420 new_lwp->status_pending_p = 1;
421 new_lwp->status_pending = status;
422 }
423 else
424 /* Pass the signal on. This is what GDB does - except
425 shouldn't we really report it instead? */
426 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
427 }
428
429 /* Always resume the current thread. If we are stopping
430 threads, it will have a pending SIGSTOP; we may as well
431 collect it now. */
432 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
433 }
434}
435
436/* Return the PC as read from the regcache of LWP, without any
437 adjustment. */
438
439static CORE_ADDR
440get_pc (struct lwp_info *lwp)
441{
442 struct thread_info *saved_inferior;
443 struct regcache *regcache;
444 CORE_ADDR pc;
445
446 if (the_low_target.get_pc == NULL)
447 return 0;
448
449 saved_inferior = current_inferior;
450 current_inferior = get_lwp_thread (lwp);
451
452 regcache = get_thread_regcache (current_inferior, 1);
453 pc = (*the_low_target.get_pc) (regcache);
454
455 if (debug_threads)
456 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
457
458 current_inferior = saved_inferior;
459 return pc;
460}
461
462/* This function should only be called if LWP got a SIGTRAP.
463 The SIGTRAP could mean several things.
464
465 On i386, where decr_pc_after_break is non-zero:
466 If we were single-stepping this process using PTRACE_SINGLESTEP,
467 we will get only the one SIGTRAP (even if the instruction we
468 stepped over was a breakpoint). The value of $eip will be the
469 next instruction.
470 If we continue the process using PTRACE_CONT, we will get a
471 SIGTRAP when we hit a breakpoint. The value of $eip will be
472 the instruction after the breakpoint (i.e. needs to be
473 decremented). If we report the SIGTRAP to GDB, we must also
474 report the undecremented PC. If we cancel the SIGTRAP, we
475 must resume at the decremented PC.
476
477 (Presumably, not yet tested) On a non-decr_pc_after_break machine
478 with hardware or kernel single-step:
479 If we single-step over a breakpoint instruction, our PC will
480 point at the following instruction. If we continue and hit a
481 breakpoint instruction, our PC will point at the breakpoint
482 instruction. */
483
484static CORE_ADDR
485get_stop_pc (struct lwp_info *lwp)
486{
487 CORE_ADDR stop_pc;
488
489 if (the_low_target.get_pc == NULL)
490 return 0;
491
492 stop_pc = get_pc (lwp);
493
494 if (WSTOPSIG (lwp->last_status) == SIGTRAP
495 && !lwp->stepping
496 && !lwp->stopped_by_watchpoint
497 && lwp->last_status >> 16 == 0)
498 stop_pc -= the_low_target.decr_pc_after_break;
499
500 if (debug_threads)
501 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
502
503 return stop_pc;
504}
505
506static void *
507add_lwp (ptid_t ptid)
508{
509 struct lwp_info *lwp;
510
511 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
512 memset (lwp, 0, sizeof (*lwp));
513
514 lwp->head.id = ptid;
515
516 if (the_low_target.new_thread != NULL)
517 lwp->arch_private = the_low_target.new_thread ();
518
519 add_inferior_to_list (&all_lwps, &lwp->head);
520
521 return lwp;
522}
523
524/* Start an inferior process and returns its pid.
525 ALLARGS is a vector of program-name and args. */
526
527static int
528linux_create_inferior (char *program, char **allargs)
529{
530#ifdef HAVE_PERSONALITY
531 int personality_orig = 0, personality_set = 0;
532#endif
533 struct lwp_info *new_lwp;
534 int pid;
535 ptid_t ptid;
536
537#ifdef HAVE_PERSONALITY
538 if (disable_randomization)
539 {
540 errno = 0;
541 personality_orig = personality (0xffffffff);
542 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
543 {
544 personality_set = 1;
545 personality (personality_orig | ADDR_NO_RANDOMIZE);
546 }
547 if (errno != 0 || (personality_set
548 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
549 warning ("Error disabling address space randomization: %s",
550 strerror (errno));
551 }
552#endif
553
554#if defined(__UCLIBC__) && defined(HAS_NOMMU)
555 pid = vfork ();
556#else
557 pid = fork ();
558#endif
559 if (pid < 0)
560 perror_with_name ("fork");
561
562 if (pid == 0)
563 {
564 ptrace (PTRACE_TRACEME, 0, 0, 0);
565
566#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
567 signal (__SIGRTMIN + 1, SIG_DFL);
568#endif
569
570 setpgid (0, 0);
571
572 execv (program, allargs);
573 if (errno == ENOENT)
574 execvp (program, allargs);
575
576 fprintf (stderr, "Cannot exec %s: %s.\n", program,
577 strerror (errno));
578 fflush (stderr);
579 _exit (0177);
580 }
581
582#ifdef HAVE_PERSONALITY
583 if (personality_set)
584 {
585 errno = 0;
586 personality (personality_orig);
587 if (errno != 0)
588 warning ("Error restoring address space randomization: %s",
589 strerror (errno));
590 }
591#endif
592
593 linux_add_process (pid, 0);
594
595 ptid = ptid_build (pid, pid, 0);
596 new_lwp = add_lwp (ptid);
597 add_thread (ptid, new_lwp);
598 new_lwp->must_set_ptrace_flags = 1;
599
600 return pid;
601}
602
603/* Attach to an inferior process. */
604
605static void
606linux_attach_lwp_1 (unsigned long lwpid, int initial)
607{
608 ptid_t ptid;
609 struct lwp_info *new_lwp;
610
611 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
612 {
613 if (!initial)
614 {
615 /* If we fail to attach to an LWP, just warn. */
616 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
617 strerror (errno), errno);
618 fflush (stderr);
619 return;
620 }
621 else
622 /* If we fail to attach to a process, report an error. */
623 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
624 strerror (errno), errno);
625 }
626
627 if (initial)
628 /* If lwp is the tgid, we handle adding existing threads later.
629 Otherwise we just add lwp without bothering about any other
630 threads. */
631 ptid = ptid_build (lwpid, lwpid, 0);
632 else
633 {
634 /* Note that extracting the pid from the current inferior is
635 safe, since we're always called in the context of the same
636 process as this new thread. */
637 int pid = pid_of (get_thread_lwp (current_inferior));
638 ptid = ptid_build (pid, lwpid, 0);
639 }
640
641 new_lwp = (struct lwp_info *) add_lwp (ptid);
642 add_thread (ptid, new_lwp);
643
644 /* We need to wait for SIGSTOP before being able to make the next
645 ptrace call on this LWP. */
646 new_lwp->must_set_ptrace_flags = 1;
647
648 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
649 brings it to a halt.
650
651 There are several cases to consider here:
652
653 1) gdbserver has already attached to the process and is being notified
654 of a new thread that is being created.
655 In this case we should ignore that SIGSTOP and resume the
656 process. This is handled below by setting stop_expected = 1,
657 and the fact that add_thread sets last_resume_kind ==
658 resume_continue.
659
660 2) This is the first thread (the process thread), and we're attaching
661 to it via attach_inferior.
662 In this case we want the process thread to stop.
663 This is handled by having linux_attach set last_resume_kind ==
664 resume_stop after we return.
665
666 If the pid we are attaching to is also the tgid, we attach to and
667 stop all the existing threads. Otherwise, we attach to pid and
668 ignore any other threads in the same group as this pid.
669
670 3) GDB is connecting to gdbserver and is requesting an enumeration of all
671 existing threads.
672 In this case we want the thread to stop.
673 FIXME: This case is currently not properly handled.
674 We should wait for the SIGSTOP but don't. Things work apparently
675 because enough time passes between when we ptrace (ATTACH) and when
676 gdb makes the next ptrace call on the thread.
677
678 On the other hand, if we are currently trying to stop all threads, we
679 should treat the new thread as if we had sent it a SIGSTOP. This works
680 because we are guaranteed that the add_lwp call above added us to the
681 end of the list, and so the new thread has not yet reached
682 wait_for_sigstop (but will). */
683 new_lwp->stop_expected = 1;
684}
685
686void
687linux_attach_lwp (unsigned long lwpid)
688{
689 linux_attach_lwp_1 (lwpid, 0);
690}
691
692/* Attach to PID. If PID is the tgid, attach to it and all
693 of its threads. */
694
695int
696linux_attach (unsigned long pid)
697{
698 /* Attach to PID. We will check for other threads
699 soon. */
700 linux_attach_lwp_1 (pid, 1);
701 linux_add_process (pid, 1);
702
703 if (!non_stop)
704 {
705 struct thread_info *thread;
706
707 /* Don't ignore the initial SIGSTOP if we just attached to this
708 process. It will be collected by wait shortly. */
709 thread = find_thread_ptid (ptid_build (pid, pid, 0));
710 thread->last_resume_kind = resume_stop;
711 }
712
713 if (linux_proc_get_tgid (pid) == pid)
714 {
715 DIR *dir;
716 char pathname[128];
717
718 sprintf (pathname, "/proc/%ld/task", pid);
719
720 dir = opendir (pathname);
721
722 if (!dir)
723 {
724 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
725 fflush (stderr);
726 }
727 else
728 {
729 /* At this point we attached to the tgid. Scan the task for
730 existing threads. */
731 unsigned long lwp;
732 int new_threads_found;
733 int iterations = 0;
734 struct dirent *dp;
735
736 while (iterations < 2)
737 {
738 new_threads_found = 0;
739 /* Add all the other threads. While we go through the
740 threads, new threads may be spawned. Cycle through
741 the list of threads until we have done two iterations without
742 finding new threads. */
743 while ((dp = readdir (dir)) != NULL)
744 {
745 /* Fetch one lwp. */
746 lwp = strtoul (dp->d_name, NULL, 10);
747
748 /* Is this a new thread? */
749 if (lwp
750 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
751 {
752 linux_attach_lwp_1 (lwp, 0);
753 new_threads_found++;
754
755 if (debug_threads)
756 fprintf (stderr, "\
757Found and attached to new lwp %ld\n", lwp);
758 }
759 }
760
761 if (!new_threads_found)
762 iterations++;
763 else
764 iterations = 0;
765
766 rewinddir (dir);
767 }
768 closedir (dir);
769 }
770 }
771
772 return 0;
773}
774
775struct counter
776{
777 int pid;
778 int count;
779};
780
781static int
782second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
783{
784 struct counter *counter = args;
785
786 if (ptid_get_pid (entry->id) == counter->pid)
787 {
788 if (++counter->count > 1)
789 return 1;
790 }
791
792 return 0;
793}
794
795static int
796last_thread_of_process_p (struct thread_info *thread)
797{
798 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
799 int pid = ptid_get_pid (ptid);
800 struct counter counter = { pid , 0 };
801
802 return (find_inferior (&all_threads,
803 second_thread_of_pid_p, &counter) == NULL);
804}
805
806/* Kill the inferior lwp. */
807
808static int
809linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
810{
811 struct thread_info *thread = (struct thread_info *) entry;
812 struct lwp_info *lwp = get_thread_lwp (thread);
813 int wstat;
814 int pid = * (int *) args;
815
816 if (ptid_get_pid (entry->id) != pid)
817 return 0;
818
819 /* We avoid killing the first thread here, because of a Linux kernel (at
820 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
821 the children get a chance to be reaped, it will remain a zombie
822 forever. */
823
824 if (lwpid_of (lwp) == pid)
825 {
826 if (debug_threads)
827 fprintf (stderr, "lkop: is last of process %s\n",
828 target_pid_to_str (entry->id));
829 return 0;
830 }
831
832 do
833 {
834 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
835
836 /* Make sure it died. The loop is most likely unnecessary. */
837 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
838 } while (pid > 0 && WIFSTOPPED (wstat));
839
840 return 0;
841}
842
843static int
844linux_kill (int pid)
845{
846 struct process_info *process;
847 struct lwp_info *lwp;
848 int wstat;
849 int lwpid;
850
851 process = find_process_pid (pid);
852 if (process == NULL)
853 return -1;
854
855 /* If we're killing a running inferior, make sure it is stopped
856 first, as PTRACE_KILL will not work otherwise. */
857 stop_all_lwps (0, NULL);
858
859 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
860
861 /* See the comment in linux_kill_one_lwp. We did not kill the first
862 thread in the list, so do so now. */
863 lwp = find_lwp_pid (pid_to_ptid (pid));
864
865 if (debug_threads)
866 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
867 lwpid_of (lwp), pid);
868
869 do
870 {
871 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
872
873 /* Make sure it died. The loop is most likely unnecessary. */
874 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
875 } while (lwpid > 0 && WIFSTOPPED (wstat));
876
877 the_target->mourn (process);
878
879 /* Since we presently can only stop all lwps of all processes, we
880 need to unstop lwps of other processes. */
881 unstop_all_lwps (0, NULL);
882 return 0;
883}
884
885static int
886linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
887{
888 struct thread_info *thread = (struct thread_info *) entry;
889 struct lwp_info *lwp = get_thread_lwp (thread);
890 int pid = * (int *) args;
891
892 if (ptid_get_pid (entry->id) != pid)
893 return 0;
894
895 /* If this process is stopped but is expecting a SIGSTOP, then make
896 sure we take care of that now. This isn't absolutely guaranteed
897 to collect the SIGSTOP, but is fairly likely to. */
898 if (lwp->stop_expected)
899 {
900 int wstat;
901 /* Clear stop_expected, so that the SIGSTOP will be reported. */
902 lwp->stop_expected = 0;
903 linux_resume_one_lwp (lwp, 0, 0, NULL);
904 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
905 }
906
907 /* Flush any pending changes to the process's registers. */
908 regcache_invalidate_one ((struct inferior_list_entry *)
909 get_lwp_thread (lwp));
910
911 /* Finally, let it resume. */
912 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
913
914 delete_lwp (lwp);
915 return 0;
916}
917
918static int
919linux_detach (int pid)
920{
921 struct process_info *process;
922
923 process = find_process_pid (pid);
924 if (process == NULL)
925 return -1;
926
927 /* Stop all threads before detaching. First, ptrace requires that
928 the thread is stopped to sucessfully detach. Second, thread_db
929 may need to uninstall thread event breakpoints from memory, which
930 only works with a stopped process anyway. */
931 stop_all_lwps (0, NULL);
932
933#ifdef USE_THREAD_DB
934 thread_db_detach (process);
935#endif
936
937 /* Stabilize threads (move out of jump pads). */
938 stabilize_threads ();
939
940 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
941
942 the_target->mourn (process);
943
944 /* Since we presently can only stop all lwps of all processes, we
945 need to unstop lwps of other processes. */
946 unstop_all_lwps (0, NULL);
947 return 0;
948}
949
950/* Remove all LWPs that belong to process PROC from the lwp list. */
951
952static int
953delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
954{
955 struct lwp_info *lwp = (struct lwp_info *) entry;
956 struct process_info *process = proc;
957
958 if (pid_of (lwp) == pid_of (process))
959 delete_lwp (lwp);
960
961 return 0;
962}
963
964static void
965linux_mourn (struct process_info *process)
966{
967 struct process_info_private *priv;
968
969#ifdef USE_THREAD_DB
970 thread_db_mourn (process);
971#endif
972
973 find_inferior (&all_lwps, delete_lwp_callback, process);
974
975 /* Freeing all private data. */
976 priv = process->private;
977 free (priv->arch_private);
978 free (priv);
979 process->private = NULL;
980
981 remove_process (process);
982}
983
984static void
985linux_join (int pid)
986{
987 int status, ret;
988
989 do {
990 ret = my_waitpid (pid, &status, 0);
991 if (WIFEXITED (status) || WIFSIGNALED (status))
992 break;
993 } while (ret != -1 || errno != ECHILD);
994}
995
996/* Return nonzero if the given thread is still alive. */
997static int
998linux_thread_alive (ptid_t ptid)
999{
1000 struct lwp_info *lwp = find_lwp_pid (ptid);
1001
1002 /* We assume we always know if a thread exits. If a whole process
1003 exited but we still haven't been able to report it to GDB, we'll
1004 hold on to the last lwp of the dead process. */
1005 if (lwp != NULL)
1006 return !lwp->dead;
1007 else
1008 return 0;
1009}
1010
1011/* Return 1 if this lwp has an interesting status pending. */
1012static int
1013status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1014{
1015 struct lwp_info *lwp = (struct lwp_info *) entry;
1016 ptid_t ptid = * (ptid_t *) arg;
1017 struct thread_info *thread;
1018
1019 /* Check if we're only interested in events from a specific process
1020 or its lwps. */
1021 if (!ptid_equal (minus_one_ptid, ptid)
1022 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1023 return 0;
1024
1025 thread = get_lwp_thread (lwp);
1026
1027 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1028 report any status pending the LWP may have. */
1029 if (thread->last_resume_kind == resume_stop
1030 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1031 return 0;
1032
1033 return lwp->status_pending_p;
1034}
1035
1036static int
1037same_lwp (struct inferior_list_entry *entry, void *data)
1038{
1039 ptid_t ptid = *(ptid_t *) data;
1040 int lwp;
1041
1042 if (ptid_get_lwp (ptid) != 0)
1043 lwp = ptid_get_lwp (ptid);
1044 else
1045 lwp = ptid_get_pid (ptid);
1046
1047 if (ptid_get_lwp (entry->id) == lwp)
1048 return 1;
1049
1050 return 0;
1051}
1052
1053struct lwp_info *
1054find_lwp_pid (ptid_t ptid)
1055{
1056 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1057}
1058
1059static struct lwp_info *
1060linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1061{
1062 int ret;
1063 int to_wait_for = -1;
1064 struct lwp_info *child = NULL;
1065
1066 if (debug_threads)
1067 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1068
1069 if (ptid_equal (ptid, minus_one_ptid))
1070 to_wait_for = -1; /* any child */
1071 else
1072 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1073
1074 options |= __WALL;
1075
1076retry:
1077
1078 ret = my_waitpid (to_wait_for, wstatp, options);
1079 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1080 return NULL;
1081 else if (ret == -1)
1082 perror_with_name ("waitpid");
1083
1084 if (debug_threads
1085 && (!WIFSTOPPED (*wstatp)
1086 || (WSTOPSIG (*wstatp) != 32
1087 && WSTOPSIG (*wstatp) != 33)))
1088 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1089
1090 child = find_lwp_pid (pid_to_ptid (ret));
1091
1092 /* If we didn't find a process, one of two things presumably happened:
1093 - A process we started and then detached from has exited. Ignore it.
1094 - A process we are controlling has forked and the new child's stop
1095 was reported to us by the kernel. Save its PID. */
1096 if (child == NULL && WIFSTOPPED (*wstatp))
1097 {
1098 add_pid_to_list (&stopped_pids, ret);
1099 goto retry;
1100 }
1101 else if (child == NULL)
1102 goto retry;
1103
1104 child->stopped = 1;
1105
1106 child->last_status = *wstatp;
1107
1108 /* Architecture-specific setup after inferior is running.
1109 This needs to happen after we have attached to the inferior
1110 and it is stopped for the first time, but before we access
1111 any inferior registers. */
1112 if (new_inferior)
1113 {
1114 the_low_target.arch_setup ();
1115#ifdef HAVE_LINUX_REGSETS
1116 memset (disabled_regsets, 0, num_regsets);
1117#endif
1118 new_inferior = 0;
1119 }
1120
1121 /* Fetch the possibly triggered data watchpoint info and store it in
1122 CHILD.
1123
1124 On some archs, like x86, that use debug registers to set
1125 watchpoints, it's possible that the way to know which watched
1126 address trapped, is to check the register that is used to select
1127 which address to watch. Problem is, between setting the
1128 watchpoint and reading back which data address trapped, the user
1129 may change the set of watchpoints, and, as a consequence, GDB
1130 changes the debug registers in the inferior. To avoid reading
1131 back a stale stopped-data-address when that happens, we cache in
1132 LP the fact that a watchpoint trapped, and the corresponding data
1133 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1134 changes the debug registers meanwhile, we have the cached data we
1135 can rely on. */
1136
1137 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1138 {
1139 if (the_low_target.stopped_by_watchpoint == NULL)
1140 {
1141 child->stopped_by_watchpoint = 0;
1142 }
1143 else
1144 {
1145 struct thread_info *saved_inferior;
1146
1147 saved_inferior = current_inferior;
1148 current_inferior = get_lwp_thread (child);
1149
1150 child->stopped_by_watchpoint
1151 = the_low_target.stopped_by_watchpoint ();
1152
1153 if (child->stopped_by_watchpoint)
1154 {
1155 if (the_low_target.stopped_data_address != NULL)
1156 child->stopped_data_address
1157 = the_low_target.stopped_data_address ();
1158 else
1159 child->stopped_data_address = 0;
1160 }
1161
1162 current_inferior = saved_inferior;
1163 }
1164 }
1165
1166 /* Store the STOP_PC, with adjustment applied. This depends on the
1167 architecture being defined already (so that CHILD has a valid
1168 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1169 not). */
1170 if (WIFSTOPPED (*wstatp))
1171 child->stop_pc = get_stop_pc (child);
1172
1173 if (debug_threads
1174 && WIFSTOPPED (*wstatp)
1175 && the_low_target.get_pc != NULL)
1176 {
1177 struct thread_info *saved_inferior = current_inferior;
1178 struct regcache *regcache;
1179 CORE_ADDR pc;
1180
1181 current_inferior = get_lwp_thread (child);
1182 regcache = get_thread_regcache (current_inferior, 1);
1183 pc = (*the_low_target.get_pc) (regcache);
1184 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1185 current_inferior = saved_inferior;
1186 }
1187
1188 return child;
1189}
1190
1191/* This function should only be called if the LWP got a SIGTRAP.
1192
1193 Handle any tracepoint steps or hits. Return true if a tracepoint
1194 event was handled, 0 otherwise. */
1195
1196static int
1197handle_tracepoints (struct lwp_info *lwp)
1198{
1199 struct thread_info *tinfo = get_lwp_thread (lwp);
1200 int tpoint_related_event = 0;
1201
1202 /* If this tracepoint hit causes a tracing stop, we'll immediately
1203 uninsert tracepoints. To do this, we temporarily pause all
1204 threads, unpatch away, and then unpause threads. We need to make
1205 sure the unpausing doesn't resume LWP too. */
1206 lwp->suspended++;
1207
1208 /* And we need to be sure that any all-threads-stopping doesn't try
1209 to move threads out of the jump pads, as it could deadlock the
1210 inferior (LWP could be in the jump pad, maybe even holding the
1211 lock.) */
1212
1213 /* Do any necessary step collect actions. */
1214 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1215
1216 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1217
1218 /* See if we just hit a tracepoint and do its main collect
1219 actions. */
1220 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1221
1222 lwp->suspended--;
1223
1224 gdb_assert (lwp->suspended == 0);
1225 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1226
1227 if (tpoint_related_event)
1228 {
1229 if (debug_threads)
1230 fprintf (stderr, "got a tracepoint event\n");
1231 return 1;
1232 }
1233
1234 return 0;
1235}
1236
1237/* Convenience wrapper. Returns true if LWP is presently collecting a
1238 fast tracepoint. */
1239
1240static int
1241linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1242 struct fast_tpoint_collect_status *status)
1243{
1244 CORE_ADDR thread_area;
1245
1246 if (the_low_target.get_thread_area == NULL)
1247 return 0;
1248
1249 /* Get the thread area address. This is used to recognize which
1250 thread is which when tracing with the in-process agent library.
1251 We don't read anything from the address, and treat it as opaque;
1252 it's the address itself that we assume is unique per-thread. */
1253 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1254 return 0;
1255
1256 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1257}
1258
1259/* The reason we resume in the caller, is because we want to be able
1260 to pass lwp->status_pending as WSTAT, and we need to clear
1261 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1262 refuses to resume. */
1263
1264static int
1265maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1266{
1267 struct thread_info *saved_inferior;
1268
1269 saved_inferior = current_inferior;
1270 current_inferior = get_lwp_thread (lwp);
1271
1272 if ((wstat == NULL
1273 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1274 && supports_fast_tracepoints ()
1275 && in_process_agent_loaded ())
1276 {
1277 struct fast_tpoint_collect_status status;
1278 int r;
1279
1280 if (debug_threads)
1281 fprintf (stderr, "\
1282Checking whether LWP %ld needs to move out of the jump pad.\n",
1283 lwpid_of (lwp));
1284
1285 r = linux_fast_tracepoint_collecting (lwp, &status);
1286
1287 if (wstat == NULL
1288 || (WSTOPSIG (*wstat) != SIGILL
1289 && WSTOPSIG (*wstat) != SIGFPE
1290 && WSTOPSIG (*wstat) != SIGSEGV
1291 && WSTOPSIG (*wstat) != SIGBUS))
1292 {
1293 lwp->collecting_fast_tracepoint = r;
1294
1295 if (r != 0)
1296 {
1297 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1298 {
1299 /* Haven't executed the original instruction yet.
1300 Set breakpoint there, and wait till it's hit,
1301 then single-step until exiting the jump pad. */
1302 lwp->exit_jump_pad_bkpt
1303 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1304 }
1305
1306 if (debug_threads)
1307 fprintf (stderr, "\
1308Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1309 lwpid_of (lwp));
1310 current_inferior = saved_inferior;
1311
1312 return 1;
1313 }
1314 }
1315 else
1316 {
1317 /* If we get a synchronous signal while collecting, *and*
1318 while executing the (relocated) original instruction,
1319 reset the PC to point at the tpoint address, before
1320 reporting to GDB. Otherwise, it's an IPA lib bug: just
1321 report the signal to GDB, and pray for the best. */
1322
1323 lwp->collecting_fast_tracepoint = 0;
1324
1325 if (r != 0
1326 && (status.adjusted_insn_addr <= lwp->stop_pc
1327 && lwp->stop_pc < status.adjusted_insn_addr_end))
1328 {
1329 siginfo_t info;
1330 struct regcache *regcache;
1331
1332 /* The si_addr on a few signals references the address
1333 of the faulting instruction. Adjust that as
1334 well. */
1335 if ((WSTOPSIG (*wstat) == SIGILL
1336 || WSTOPSIG (*wstat) == SIGFPE
1337 || WSTOPSIG (*wstat) == SIGBUS
1338 || WSTOPSIG (*wstat) == SIGSEGV)
1339 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1340 /* Final check just to make sure we don't clobber
1341 the siginfo of non-kernel-sent signals. */
1342 && (uintptr_t) info.si_addr == lwp->stop_pc)
1343 {
1344 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1345 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1346 }
1347
1348 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1349 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1350 lwp->stop_pc = status.tpoint_addr;
1351
1352 /* Cancel any fast tracepoint lock this thread was
1353 holding. */
1354 force_unlock_trace_buffer ();
1355 }
1356
1357 if (lwp->exit_jump_pad_bkpt != NULL)
1358 {
1359 if (debug_threads)
1360 fprintf (stderr,
1361 "Cancelling fast exit-jump-pad: removing bkpt. "
1362 "stopping all threads momentarily.\n");
1363
1364 stop_all_lwps (1, lwp);
1365 cancel_breakpoints ();
1366
1367 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1368 lwp->exit_jump_pad_bkpt = NULL;
1369
1370 unstop_all_lwps (1, lwp);
1371
1372 gdb_assert (lwp->suspended >= 0);
1373 }
1374 }
1375 }
1376
1377 if (debug_threads)
1378 fprintf (stderr, "\
1379Checking whether LWP %ld needs to move out of the jump pad...no\n",
1380 lwpid_of (lwp));
1381
1382 current_inferior = saved_inferior;
1383 return 0;
1384}
1385
1386/* Enqueue one signal in the "signals to report later when out of the
1387 jump pad" list. */
1388
1389static void
1390enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1391{
1392 struct pending_signals *p_sig;
1393
1394 if (debug_threads)
1395 fprintf (stderr, "\
1396Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1397
1398 if (debug_threads)
1399 {
1400 struct pending_signals *sig;
1401
1402 for (sig = lwp->pending_signals_to_report;
1403 sig != NULL;
1404 sig = sig->prev)
1405 fprintf (stderr,
1406 " Already queued %d\n",
1407 sig->signal);
1408
1409 fprintf (stderr, " (no more currently queued signals)\n");
1410 }
1411
1412 /* Don't enqueue non-RT signals if they are already in the deferred
1413 queue. (SIGSTOP being the easiest signal to see ending up here
1414 twice) */
1415 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1416 {
1417 struct pending_signals *sig;
1418
1419 for (sig = lwp->pending_signals_to_report;
1420 sig != NULL;
1421 sig = sig->prev)
1422 {
1423 if (sig->signal == WSTOPSIG (*wstat))
1424 {
1425 if (debug_threads)
1426 fprintf (stderr,
1427 "Not requeuing already queued non-RT signal %d"
1428 " for LWP %ld\n",
1429 sig->signal,
1430 lwpid_of (lwp));
1431 return;
1432 }
1433 }
1434 }
1435
1436 p_sig = xmalloc (sizeof (*p_sig));
1437 p_sig->prev = lwp->pending_signals_to_report;
1438 p_sig->signal = WSTOPSIG (*wstat);
1439 memset (&p_sig->info, 0, sizeof (siginfo_t));
1440 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1441
1442 lwp->pending_signals_to_report = p_sig;
1443}
1444
1445/* Dequeue one signal from the "signals to report later when out of
1446 the jump pad" list. */
1447
1448static int
1449dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1450{
1451 if (lwp->pending_signals_to_report != NULL)
1452 {
1453 struct pending_signals **p_sig;
1454
1455 p_sig = &lwp->pending_signals_to_report;
1456 while ((*p_sig)->prev != NULL)
1457 p_sig = &(*p_sig)->prev;
1458
1459 *wstat = W_STOPCODE ((*p_sig)->signal);
1460 if ((*p_sig)->info.si_signo != 0)
1461 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1462 free (*p_sig);
1463 *p_sig = NULL;
1464
1465 if (debug_threads)
1466 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1467 WSTOPSIG (*wstat), lwpid_of (lwp));
1468
1469 if (debug_threads)
1470 {
1471 struct pending_signals *sig;
1472
1473 for (sig = lwp->pending_signals_to_report;
1474 sig != NULL;
1475 sig = sig->prev)
1476 fprintf (stderr,
1477 " Still queued %d\n",
1478 sig->signal);
1479
1480 fprintf (stderr, " (no more queued signals)\n");
1481 }
1482
1483 return 1;
1484 }
1485
1486 return 0;
1487}
1488
1489/* Arrange for a breakpoint to be hit again later. We don't keep the
1490 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1491 will handle the current event, eventually we will resume this LWP,
1492 and this breakpoint will trap again. */
1493
1494static int
1495cancel_breakpoint (struct lwp_info *lwp)
1496{
1497 struct thread_info *saved_inferior;
1498
1499 /* There's nothing to do if we don't support breakpoints. */
1500 if (!supports_breakpoints ())
1501 return 0;
1502
1503 /* breakpoint_at reads from current inferior. */
1504 saved_inferior = current_inferior;
1505 current_inferior = get_lwp_thread (lwp);
1506
1507 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1508 {
1509 if (debug_threads)
1510 fprintf (stderr,
1511 "CB: Push back breakpoint for %s\n",
1512 target_pid_to_str (ptid_of (lwp)));
1513
1514 /* Back up the PC if necessary. */
1515 if (the_low_target.decr_pc_after_break)
1516 {
1517 struct regcache *regcache
1518 = get_thread_regcache (current_inferior, 1);
1519 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1520 }
1521
1522 current_inferior = saved_inferior;
1523 return 1;
1524 }
1525 else
1526 {
1527 if (debug_threads)
1528 fprintf (stderr,
1529 "CB: No breakpoint found at %s for [%s]\n",
1530 paddress (lwp->stop_pc),
1531 target_pid_to_str (ptid_of (lwp)));
1532 }
1533
1534 current_inferior = saved_inferior;
1535 return 0;
1536}
1537
1538/* When the event-loop is doing a step-over, this points at the thread
1539 being stepped. */
1540ptid_t step_over_bkpt;
1541
1542/* Wait for an event from child PID. If PID is -1, wait for any
1543 child. Store the stop status through the status pointer WSTAT.
1544 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1545 event was found and OPTIONS contains WNOHANG. Return the PID of
1546 the stopped child otherwise. */
1547
1548static int
1549linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1550{
1551 struct lwp_info *event_child, *requested_child;
1552
1553 event_child = NULL;
1554 requested_child = NULL;
1555
1556 /* Check for a lwp with a pending status. */
1557
1558 if (ptid_equal (ptid, minus_one_ptid)
1559 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
1560 {
1561 event_child = (struct lwp_info *)
1562 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1563 if (debug_threads && event_child)
1564 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1565 }
1566 else
1567 {
1568 requested_child = find_lwp_pid (ptid);
1569
1570 if (!stopping_threads
1571 && requested_child->status_pending_p
1572 && requested_child->collecting_fast_tracepoint)
1573 {
1574 enqueue_one_deferred_signal (requested_child,
1575 &requested_child->status_pending);
1576 requested_child->status_pending_p = 0;
1577 requested_child->status_pending = 0;
1578 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1579 }
1580
1581 if (requested_child->suspended
1582 && requested_child->status_pending_p)
1583 fatal ("requesting an event out of a suspended child?");
1584
1585 if (requested_child->status_pending_p)
1586 event_child = requested_child;
1587 }
1588
1589 if (event_child != NULL)
1590 {
1591 if (debug_threads)
1592 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1593 lwpid_of (event_child), event_child->status_pending);
1594 *wstat = event_child->status_pending;
1595 event_child->status_pending_p = 0;
1596 event_child->status_pending = 0;
1597 current_inferior = get_lwp_thread (event_child);
1598 return lwpid_of (event_child);
1599 }
1600
1601 /* We only enter this loop if no process has a pending wait status. Thus
1602 any action taken in response to a wait status inside this loop is
1603 responding as soon as we detect the status, not after any pending
1604 events. */
1605 while (1)
1606 {
1607 event_child = linux_wait_for_lwp (ptid, wstat, options);
1608
1609 if ((options & WNOHANG) && event_child == NULL)
1610 {
1611 if (debug_threads)
1612 fprintf (stderr, "WNOHANG set, no event found\n");
1613 return 0;
1614 }
1615
1616 if (event_child == NULL)
1617 error ("event from unknown child");
1618
1619 current_inferior = get_lwp_thread (event_child);
1620
1621 /* Check for thread exit. */
1622 if (! WIFSTOPPED (*wstat))
1623 {
1624 if (debug_threads)
1625 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1626
1627 /* If the last thread is exiting, just return. */
1628 if (last_thread_of_process_p (current_inferior))
1629 {
1630 if (debug_threads)
1631 fprintf (stderr, "LWP %ld is last lwp of process\n",
1632 lwpid_of (event_child));
1633 return lwpid_of (event_child);
1634 }
1635
1636 if (!non_stop)
1637 {
1638 current_inferior = (struct thread_info *) all_threads.head;
1639 if (debug_threads)
1640 fprintf (stderr, "Current inferior is now %ld\n",
1641 lwpid_of (get_thread_lwp (current_inferior)));
1642 }
1643 else
1644 {
1645 current_inferior = NULL;
1646 if (debug_threads)
1647 fprintf (stderr, "Current inferior is now <NULL>\n");
1648 }
1649
1650 /* If we were waiting for this particular child to do something...
1651 well, it did something. */
1652 if (requested_child != NULL)
1653 {
1654 int lwpid = lwpid_of (event_child);
1655
1656 /* Cancel the step-over operation --- the thread that
1657 started it is gone. */
1658 if (finish_step_over (event_child))
1659 unstop_all_lwps (1, event_child);
1660 delete_lwp (event_child);
1661 return lwpid;
1662 }
1663
1664 delete_lwp (event_child);
1665
1666 /* Wait for a more interesting event. */
1667 continue;
1668 }
1669
1670 if (event_child->must_set_ptrace_flags)
1671 {
1672 linux_enable_event_reporting (lwpid_of (event_child));
1673 event_child->must_set_ptrace_flags = 0;
1674 }
1675
1676 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1677 && *wstat >> 16 != 0)
1678 {
1679 handle_extended_wait (event_child, *wstat);
1680 continue;
1681 }
1682
1683 if (WIFSTOPPED (*wstat)
1684 && WSTOPSIG (*wstat) == SIGSTOP
1685 && event_child->stop_expected)
1686 {
1687 int should_stop;
1688
1689 if (debug_threads)
1690 fprintf (stderr, "Expected stop.\n");
1691 event_child->stop_expected = 0;
1692
1693 should_stop = (current_inferior->last_resume_kind == resume_stop
1694 || stopping_threads);
1695
1696 if (!should_stop)
1697 {
1698 linux_resume_one_lwp (event_child,
1699 event_child->stepping, 0, NULL);
1700 continue;
1701 }
1702 }
1703
1704 return lwpid_of (event_child);
1705 }
1706
1707 /* NOTREACHED */
1708 return 0;
1709}
1710
1711static int
1712linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1713{
1714 ptid_t wait_ptid;
1715
1716 if (ptid_is_pid (ptid))
1717 {
1718 /* A request to wait for a specific tgid. This is not possible
1719 with waitpid, so instead, we wait for any child, and leave
1720 children we're not interested in right now with a pending
1721 status to report later. */
1722 wait_ptid = minus_one_ptid;
1723 }
1724 else
1725 wait_ptid = ptid;
1726
1727 while (1)
1728 {
1729 int event_pid;
1730
1731 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1732
1733 if (event_pid > 0
1734 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1735 {
1736 struct lwp_info *event_child
1737 = find_lwp_pid (pid_to_ptid (event_pid));
1738
1739 if (! WIFSTOPPED (*wstat))
1740 mark_lwp_dead (event_child, *wstat);
1741 else
1742 {
1743 event_child->status_pending_p = 1;
1744 event_child->status_pending = *wstat;
1745 }
1746 }
1747 else
1748 return event_pid;
1749 }
1750}
1751
1752
1753/* Count the LWP's that have had events. */
1754
1755static int
1756count_events_callback (struct inferior_list_entry *entry, void *data)
1757{
1758 struct lwp_info *lp = (struct lwp_info *) entry;
1759 struct thread_info *thread = get_lwp_thread (lp);
1760 int *count = data;
1761
1762 gdb_assert (count != NULL);
1763
1764 /* Count only resumed LWPs that have a SIGTRAP event pending that
1765 should be reported to GDB. */
1766 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1767 && thread->last_resume_kind != resume_stop
1768 && lp->status_pending_p
1769 && WIFSTOPPED (lp->status_pending)
1770 && WSTOPSIG (lp->status_pending) == SIGTRAP
1771 && !breakpoint_inserted_here (lp->stop_pc))
1772 (*count)++;
1773
1774 return 0;
1775}
1776
1777/* Select the LWP (if any) that is currently being single-stepped. */
1778
1779static int
1780select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1781{
1782 struct lwp_info *lp = (struct lwp_info *) entry;
1783 struct thread_info *thread = get_lwp_thread (lp);
1784
1785 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1786 && thread->last_resume_kind == resume_step
1787 && lp->status_pending_p)
1788 return 1;
1789 else
1790 return 0;
1791}
1792
1793/* Select the Nth LWP that has had a SIGTRAP event that should be
1794 reported to GDB. */
1795
1796static int
1797select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1798{
1799 struct lwp_info *lp = (struct lwp_info *) entry;
1800 struct thread_info *thread = get_lwp_thread (lp);
1801 int *selector = data;
1802
1803 gdb_assert (selector != NULL);
1804
1805 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1806 if (thread->last_resume_kind != resume_stop
1807 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1808 && lp->status_pending_p
1809 && WIFSTOPPED (lp->status_pending)
1810 && WSTOPSIG (lp->status_pending) == SIGTRAP
1811 && !breakpoint_inserted_here (lp->stop_pc))
1812 if ((*selector)-- == 0)
1813 return 1;
1814
1815 return 0;
1816}
1817
1818static int
1819cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1820{
1821 struct lwp_info *lp = (struct lwp_info *) entry;
1822 struct thread_info *thread = get_lwp_thread (lp);
1823 struct lwp_info *event_lp = data;
1824
1825 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1826 if (lp == event_lp)
1827 return 0;
1828
1829 /* If a LWP other than the LWP that we're reporting an event for has
1830 hit a GDB breakpoint (as opposed to some random trap signal),
1831 then just arrange for it to hit it again later. We don't keep
1832 the SIGTRAP status and don't forward the SIGTRAP signal to the
1833 LWP. We will handle the current event, eventually we will resume
1834 all LWPs, and this one will get its breakpoint trap again.
1835
1836 If we do not do this, then we run the risk that the user will
1837 delete or disable the breakpoint, but the LWP will have already
1838 tripped on it. */
1839
1840 if (thread->last_resume_kind != resume_stop
1841 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1842 && lp->status_pending_p
1843 && WIFSTOPPED (lp->status_pending)
1844 && WSTOPSIG (lp->status_pending) == SIGTRAP
1845 && !lp->stepping
1846 && !lp->stopped_by_watchpoint
1847 && cancel_breakpoint (lp))
1848 /* Throw away the SIGTRAP. */
1849 lp->status_pending_p = 0;
1850
1851 return 0;
1852}
1853
1854static void
1855linux_cancel_breakpoints (void)
1856{
1857 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
1858}
1859
1860/* Select one LWP out of those that have events pending. */
1861
1862static void
1863select_event_lwp (struct lwp_info **orig_lp)
1864{
1865 int num_events = 0;
1866 int random_selector;
1867 struct lwp_info *event_lp;
1868
1869 /* Give preference to any LWP that is being single-stepped. */
1870 event_lp
1871 = (struct lwp_info *) find_inferior (&all_lwps,
1872 select_singlestep_lwp_callback, NULL);
1873 if (event_lp != NULL)
1874 {
1875 if (debug_threads)
1876 fprintf (stderr,
1877 "SEL: Select single-step %s\n",
1878 target_pid_to_str (ptid_of (event_lp)));
1879 }
1880 else
1881 {
1882 /* No single-stepping LWP. Select one at random, out of those
1883 which have had SIGTRAP events. */
1884
1885 /* First see how many SIGTRAP events we have. */
1886 find_inferior (&all_lwps, count_events_callback, &num_events);
1887
1888 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1889 random_selector = (int)
1890 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1891
1892 if (debug_threads && num_events > 1)
1893 fprintf (stderr,
1894 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1895 num_events, random_selector);
1896
1897 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1898 select_event_lwp_callback,
1899 &random_selector);
1900 }
1901
1902 if (event_lp != NULL)
1903 {
1904 /* Switch the event LWP. */
1905 *orig_lp = event_lp;
1906 }
1907}
1908
1909/* Decrement the suspend count of an LWP. */
1910
1911static int
1912unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
1913{
1914 struct lwp_info *lwp = (struct lwp_info *) entry;
1915
1916 /* Ignore EXCEPT. */
1917 if (lwp == except)
1918 return 0;
1919
1920 lwp->suspended--;
1921
1922 gdb_assert (lwp->suspended >= 0);
1923 return 0;
1924}
1925
1926/* Decrement the suspend count of all LWPs, except EXCEPT, if non
1927 NULL. */
1928
1929static void
1930unsuspend_all_lwps (struct lwp_info *except)
1931{
1932 find_inferior (&all_lwps, unsuspend_one_lwp, except);
1933}
1934
1935static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
1936static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
1937 void *data);
1938static int lwp_running (struct inferior_list_entry *entry, void *data);
1939static ptid_t linux_wait_1 (ptid_t ptid,
1940 struct target_waitstatus *ourstatus,
1941 int target_options);
1942
1943/* Stabilize threads (move out of jump pads).
1944
1945 If a thread is midway collecting a fast tracepoint, we need to
1946 finish the collection and move it out of the jump pad before
1947 reporting the signal.
1948
1949 This avoids recursion while collecting (when a signal arrives
1950 midway, and the signal handler itself collects), which would trash
1951 the trace buffer. In case the user set a breakpoint in a signal
1952 handler, this avoids the backtrace showing the jump pad, etc..
1953 Most importantly, there are certain things we can't do safely if
1954 threads are stopped in a jump pad (or in its callee's). For
1955 example:
1956
1957 - starting a new trace run. A thread still collecting the
1958 previous run, could trash the trace buffer when resumed. The trace
1959 buffer control structures would have been reset but the thread had
1960 no way to tell. The thread could even midway memcpy'ing to the
1961 buffer, which would mean that when resumed, it would clobber the
1962 trace buffer that had been set for a new run.
1963
1964 - we can't rewrite/reuse the jump pads for new tracepoints
1965 safely. Say you do tstart while a thread is stopped midway while
1966 collecting. When the thread is later resumed, it finishes the
1967 collection, and returns to the jump pad, to execute the original
1968 instruction that was under the tracepoint jump at the time the
1969 older run had been started. If the jump pad had been rewritten
1970 since for something else in the new run, the thread would now
1971 execute the wrong / random instructions. */
1972
1973static void
1974linux_stabilize_threads (void)
1975{
1976 struct thread_info *save_inferior;
1977 struct lwp_info *lwp_stuck;
1978
1979 lwp_stuck
1980 = (struct lwp_info *) find_inferior (&all_lwps,
1981 stuck_in_jump_pad_callback, NULL);
1982 if (lwp_stuck != NULL)
1983 {
1984 if (debug_threads)
1985 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
1986 lwpid_of (lwp_stuck));
1987 return;
1988 }
1989
1990 save_inferior = current_inferior;
1991
1992 stabilizing_threads = 1;
1993
1994 /* Kick 'em all. */
1995 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
1996
1997 /* Loop until all are stopped out of the jump pads. */
1998 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
1999 {
2000 struct target_waitstatus ourstatus;
2001 struct lwp_info *lwp;
2002 int wstat;
2003
2004 /* Note that we go through the full wait even loop. While
2005 moving threads out of jump pad, we need to be able to step
2006 over internal breakpoints and such. */
2007 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2008
2009 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2010 {
2011 lwp = get_thread_lwp (current_inferior);
2012
2013 /* Lock it. */
2014 lwp->suspended++;
2015
2016 if (ourstatus.value.sig != TARGET_SIGNAL_0
2017 || current_inferior->last_resume_kind == resume_stop)
2018 {
2019 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
2020 enqueue_one_deferred_signal (lwp, &wstat);
2021 }
2022 }
2023 }
2024
2025 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2026
2027 stabilizing_threads = 0;
2028
2029 current_inferior = save_inferior;
2030
2031 if (debug_threads)
2032 {
2033 lwp_stuck
2034 = (struct lwp_info *) find_inferior (&all_lwps,
2035 stuck_in_jump_pad_callback, NULL);
2036 if (lwp_stuck != NULL)
2037 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2038 lwpid_of (lwp_stuck));
2039 }
2040}
2041
2042/* Wait for process, returns status. */
2043
2044static ptid_t
2045linux_wait_1 (ptid_t ptid,
2046 struct target_waitstatus *ourstatus, int target_options)
2047{
2048 int w;
2049 struct lwp_info *event_child;
2050 int options;
2051 int pid;
2052 int step_over_finished;
2053 int bp_explains_trap;
2054 int maybe_internal_trap;
2055 int report_to_gdb;
2056 int trace_event;
2057
2058 /* Translate generic target options into linux options. */
2059 options = __WALL;
2060 if (target_options & TARGET_WNOHANG)
2061 options |= WNOHANG;
2062
2063retry:
2064 bp_explains_trap = 0;
2065 trace_event = 0;
2066 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2067
2068 /* If we were only supposed to resume one thread, only wait for
2069 that thread - if it's still alive. If it died, however - which
2070 can happen if we're coming from the thread death case below -
2071 then we need to make sure we restart the other threads. We could
2072 pick a thread at random or restart all; restarting all is less
2073 arbitrary. */
2074 if (!non_stop
2075 && !ptid_equal (cont_thread, null_ptid)
2076 && !ptid_equal (cont_thread, minus_one_ptid))
2077 {
2078 struct thread_info *thread;
2079
2080 thread = (struct thread_info *) find_inferior_id (&all_threads,
2081 cont_thread);
2082
2083 /* No stepping, no signal - unless one is pending already, of course. */
2084 if (thread == NULL)
2085 {
2086 struct thread_resume resume_info;
2087 resume_info.thread = minus_one_ptid;
2088 resume_info.kind = resume_continue;
2089 resume_info.sig = 0;
2090 linux_resume (&resume_info, 1);
2091 }
2092 else
2093 ptid = cont_thread;
2094 }
2095
2096 if (ptid_equal (step_over_bkpt, null_ptid))
2097 pid = linux_wait_for_event (ptid, &w, options);
2098 else
2099 {
2100 if (debug_threads)
2101 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2102 target_pid_to_str (step_over_bkpt));
2103 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2104 }
2105
2106 if (pid == 0) /* only if TARGET_WNOHANG */
2107 return null_ptid;
2108
2109 event_child = get_thread_lwp (current_inferior);
2110
2111 /* If we are waiting for a particular child, and it exited,
2112 linux_wait_for_event will return its exit status. Similarly if
2113 the last child exited. If this is not the last child, however,
2114 do not report it as exited until there is a 'thread exited' response
2115 available in the remote protocol. Instead, just wait for another event.
2116 This should be safe, because if the thread crashed we will already
2117 have reported the termination signal to GDB; that should stop any
2118 in-progress stepping operations, etc.
2119
2120 Report the exit status of the last thread to exit. This matches
2121 LinuxThreads' behavior. */
2122
2123 if (last_thread_of_process_p (current_inferior))
2124 {
2125 if (WIFEXITED (w) || WIFSIGNALED (w))
2126 {
2127 if (WIFEXITED (w))
2128 {
2129 ourstatus->kind = TARGET_WAITKIND_EXITED;
2130 ourstatus->value.integer = WEXITSTATUS (w);
2131
2132 if (debug_threads)
2133 fprintf (stderr,
2134 "\nChild exited with retcode = %x \n",
2135 WEXITSTATUS (w));
2136 }
2137 else
2138 {
2139 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2140 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2141
2142 if (debug_threads)
2143 fprintf (stderr,
2144 "\nChild terminated with signal = %x \n",
2145 WTERMSIG (w));
2146
2147 }
2148
2149 return ptid_of (event_child);
2150 }
2151 }
2152 else
2153 {
2154 if (!WIFSTOPPED (w))
2155 goto retry;
2156 }
2157
2158 /* If this event was not handled before, and is not a SIGTRAP, we
2159 report it. SIGILL and SIGSEGV are also treated as traps in case
2160 a breakpoint is inserted at the current PC. If this target does
2161 not support internal breakpoints at all, we also report the
2162 SIGTRAP without further processing; it's of no concern to us. */
2163 maybe_internal_trap
2164 = (supports_breakpoints ()
2165 && (WSTOPSIG (w) == SIGTRAP
2166 || ((WSTOPSIG (w) == SIGILL
2167 || WSTOPSIG (w) == SIGSEGV)
2168 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2169
2170 if (maybe_internal_trap)
2171 {
2172 /* Handle anything that requires bookkeeping before deciding to
2173 report the event or continue waiting. */
2174
2175 /* First check if we can explain the SIGTRAP with an internal
2176 breakpoint, or if we should possibly report the event to GDB.
2177 Do this before anything that may remove or insert a
2178 breakpoint. */
2179 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2180
2181 /* We have a SIGTRAP, possibly a step-over dance has just
2182 finished. If so, tweak the state machine accordingly,
2183 reinsert breakpoints and delete any reinsert (software
2184 single-step) breakpoints. */
2185 step_over_finished = finish_step_over (event_child);
2186
2187 /* Now invoke the callbacks of any internal breakpoints there. */
2188 check_breakpoints (event_child->stop_pc);
2189
2190 /* Handle tracepoint data collecting. This may overflow the
2191 trace buffer, and cause a tracing stop, removing
2192 breakpoints. */
2193 trace_event = handle_tracepoints (event_child);
2194
2195 if (bp_explains_trap)
2196 {
2197 /* If we stepped or ran into an internal breakpoint, we've
2198 already handled it. So next time we resume (from this
2199 PC), we should step over it. */
2200 if (debug_threads)
2201 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2202
2203 if (breakpoint_here (event_child->stop_pc))
2204 event_child->need_step_over = 1;
2205 }
2206 }
2207 else
2208 {
2209 /* We have some other signal, possibly a step-over dance was in
2210 progress, and it should be cancelled too. */
2211 step_over_finished = finish_step_over (event_child);
2212 }
2213
2214 /* We have all the data we need. Either report the event to GDB, or
2215 resume threads and keep waiting for more. */
2216
2217 /* If we're collecting a fast tracepoint, finish the collection and
2218 move out of the jump pad before delivering a signal. See
2219 linux_stabilize_threads. */
2220
2221 if (WIFSTOPPED (w)
2222 && WSTOPSIG (w) != SIGTRAP
2223 && supports_fast_tracepoints ()
2224 && in_process_agent_loaded ())
2225 {
2226 if (debug_threads)
2227 fprintf (stderr,
2228 "Got signal %d for LWP %ld. Check if we need "
2229 "to defer or adjust it.\n",
2230 WSTOPSIG (w), lwpid_of (event_child));
2231
2232 /* Allow debugging the jump pad itself. */
2233 if (current_inferior->last_resume_kind != resume_step
2234 && maybe_move_out_of_jump_pad (event_child, &w))
2235 {
2236 enqueue_one_deferred_signal (event_child, &w);
2237
2238 if (debug_threads)
2239 fprintf (stderr,
2240 "Signal %d for LWP %ld deferred (in jump pad)\n",
2241 WSTOPSIG (w), lwpid_of (event_child));
2242
2243 linux_resume_one_lwp (event_child, 0, 0, NULL);
2244 goto retry;
2245 }
2246 }
2247
2248 if (event_child->collecting_fast_tracepoint)
2249 {
2250 if (debug_threads)
2251 fprintf (stderr, "\
2252LWP %ld was trying to move out of the jump pad (%d). \
2253Check if we're already there.\n",
2254 lwpid_of (event_child),
2255 event_child->collecting_fast_tracepoint);
2256
2257 trace_event = 1;
2258
2259 event_child->collecting_fast_tracepoint
2260 = linux_fast_tracepoint_collecting (event_child, NULL);
2261
2262 if (event_child->collecting_fast_tracepoint != 1)
2263 {
2264 /* No longer need this breakpoint. */
2265 if (event_child->exit_jump_pad_bkpt != NULL)
2266 {
2267 if (debug_threads)
2268 fprintf (stderr,
2269 "No longer need exit-jump-pad bkpt; removing it."
2270 "stopping all threads momentarily.\n");
2271
2272 /* Other running threads could hit this breakpoint.
2273 We don't handle moribund locations like GDB does,
2274 instead we always pause all threads when removing
2275 breakpoints, so that any step-over or
2276 decr_pc_after_break adjustment is always taken
2277 care of while the breakpoint is still
2278 inserted. */
2279 stop_all_lwps (1, event_child);
2280 cancel_breakpoints ();
2281
2282 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2283 event_child->exit_jump_pad_bkpt = NULL;
2284
2285 unstop_all_lwps (1, event_child);
2286
2287 gdb_assert (event_child->suspended >= 0);
2288 }
2289 }
2290
2291 if (event_child->collecting_fast_tracepoint == 0)
2292 {
2293 if (debug_threads)
2294 fprintf (stderr,
2295 "fast tracepoint finished "
2296 "collecting successfully.\n");
2297
2298 /* We may have a deferred signal to report. */
2299 if (dequeue_one_deferred_signal (event_child, &w))
2300 {
2301 if (debug_threads)
2302 fprintf (stderr, "dequeued one signal.\n");
2303 }
2304 else
2305 {
2306 if (debug_threads)
2307 fprintf (stderr, "no deferred signals.\n");
2308
2309 if (stabilizing_threads)
2310 {
2311 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2312 ourstatus->value.sig = TARGET_SIGNAL_0;
2313 return ptid_of (event_child);
2314 }
2315 }
2316 }
2317 }
2318
2319 /* Check whether GDB would be interested in this event. */
2320
2321 /* If GDB is not interested in this signal, don't stop other
2322 threads, and don't report it to GDB. Just resume the inferior
2323 right away. We do this for threading-related signals as well as
2324 any that GDB specifically requested we ignore. But never ignore
2325 SIGSTOP if we sent it ourselves, and do not ignore signals when
2326 stepping - they may require special handling to skip the signal
2327 handler. */
2328 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2329 thread library? */
2330 if (WIFSTOPPED (w)
2331 && current_inferior->last_resume_kind != resume_step
2332 && (
2333#if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2334 (current_process ()->private->thread_db != NULL
2335 && (WSTOPSIG (w) == __SIGRTMIN
2336 || WSTOPSIG (w) == __SIGRTMIN + 1))
2337 ||
2338#endif
2339 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2340 && !(WSTOPSIG (w) == SIGSTOP
2341 && current_inferior->last_resume_kind == resume_stop))))
2342 {
2343 siginfo_t info, *info_p;
2344
2345 if (debug_threads)
2346 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2347 WSTOPSIG (w), lwpid_of (event_child));
2348
2349 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2350 info_p = &info;
2351 else
2352 info_p = NULL;
2353 linux_resume_one_lwp (event_child, event_child->stepping,
2354 WSTOPSIG (w), info_p);
2355 goto retry;
2356 }
2357
2358 /* If GDB wanted this thread to single step, we always want to
2359 report the SIGTRAP, and let GDB handle it. Watchpoints should
2360 always be reported. So should signals we can't explain. A
2361 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2362 not support Z0 breakpoints. If we do, we're be able to handle
2363 GDB breakpoints on top of internal breakpoints, by handling the
2364 internal breakpoint and still reporting the event to GDB. If we
2365 don't, we're out of luck, GDB won't see the breakpoint hit. */
2366 report_to_gdb = (!maybe_internal_trap
2367 || current_inferior->last_resume_kind == resume_step
2368 || event_child->stopped_by_watchpoint
2369 || (!step_over_finished
2370 && !bp_explains_trap && !trace_event)
2371 || gdb_breakpoint_here (event_child->stop_pc));
2372
2373 /* We found no reason GDB would want us to stop. We either hit one
2374 of our own breakpoints, or finished an internal step GDB
2375 shouldn't know about. */
2376 if (!report_to_gdb)
2377 {
2378 if (debug_threads)
2379 {
2380 if (bp_explains_trap)
2381 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2382 if (step_over_finished)
2383 fprintf (stderr, "Step-over finished.\n");
2384 if (trace_event)
2385 fprintf (stderr, "Tracepoint event.\n");
2386 }
2387
2388 /* We're not reporting this breakpoint to GDB, so apply the
2389 decr_pc_after_break adjustment to the inferior's regcache
2390 ourselves. */
2391
2392 if (the_low_target.set_pc != NULL)
2393 {
2394 struct regcache *regcache
2395 = get_thread_regcache (get_lwp_thread (event_child), 1);
2396 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2397 }
2398
2399 /* We may have finished stepping over a breakpoint. If so,
2400 we've stopped and suspended all LWPs momentarily except the
2401 stepping one. This is where we resume them all again. We're
2402 going to keep waiting, so use proceed, which handles stepping
2403 over the next breakpoint. */
2404 if (debug_threads)
2405 fprintf (stderr, "proceeding all threads.\n");
2406
2407 if (step_over_finished)
2408 unsuspend_all_lwps (event_child);
2409
2410 proceed_all_lwps ();
2411 goto retry;
2412 }
2413
2414 if (debug_threads)
2415 {
2416 if (current_inferior->last_resume_kind == resume_step)
2417 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2418 if (event_child->stopped_by_watchpoint)
2419 fprintf (stderr, "Stopped by watchpoint.\n");
2420 if (gdb_breakpoint_here (event_child->stop_pc))
2421 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2422 if (debug_threads)
2423 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2424 }
2425
2426 /* Alright, we're going to report a stop. */
2427
2428 if (!non_stop && !stabilizing_threads)
2429 {
2430 /* In all-stop, stop all threads. */
2431 stop_all_lwps (0, NULL);
2432
2433 /* If we're not waiting for a specific LWP, choose an event LWP
2434 from among those that have had events. Giving equal priority
2435 to all LWPs that have had events helps prevent
2436 starvation. */
2437 if (ptid_equal (ptid, minus_one_ptid))
2438 {
2439 event_child->status_pending_p = 1;
2440 event_child->status_pending = w;
2441
2442 select_event_lwp (&event_child);
2443
2444 event_child->status_pending_p = 0;
2445 w = event_child->status_pending;
2446 }
2447
2448 /* Now that we've selected our final event LWP, cancel any
2449 breakpoints in other LWPs that have hit a GDB breakpoint.
2450 See the comment in cancel_breakpoints_callback to find out
2451 why. */
2452 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2453
2454 /* Stabilize threads (move out of jump pads). */
2455 stabilize_threads ();
2456 }
2457 else
2458 {
2459 /* If we just finished a step-over, then all threads had been
2460 momentarily paused. In all-stop, that's fine, we want
2461 threads stopped by now anyway. In non-stop, we need to
2462 re-resume threads that GDB wanted to be running. */
2463 if (step_over_finished)
2464 unstop_all_lwps (1, event_child);
2465 }
2466
2467 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2468
2469 if (current_inferior->last_resume_kind == resume_stop
2470 && WSTOPSIG (w) == SIGSTOP)
2471 {
2472 /* A thread that has been requested to stop by GDB with vCont;t,
2473 and it stopped cleanly, so report as SIG0. The use of
2474 SIGSTOP is an implementation detail. */
2475 ourstatus->value.sig = TARGET_SIGNAL_0;
2476 }
2477 else if (current_inferior->last_resume_kind == resume_stop
2478 && WSTOPSIG (w) != SIGSTOP)
2479 {
2480 /* A thread that has been requested to stop by GDB with vCont;t,
2481 but, it stopped for other reasons. */
2482 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2483 }
2484 else
2485 {
2486 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2487 }
2488
2489 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2490
2491 if (debug_threads)
2492 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2493 target_pid_to_str (ptid_of (event_child)),
2494 ourstatus->kind,
2495 ourstatus->value.sig);
2496
2497 return ptid_of (event_child);
2498}
2499
2500/* Get rid of any pending event in the pipe. */
2501static void
2502async_file_flush (void)
2503{
2504 int ret;
2505 char buf;
2506
2507 do
2508 ret = read (linux_event_pipe[0], &buf, 1);
2509 while (ret >= 0 || (ret == -1 && errno == EINTR));
2510}
2511
2512/* Put something in the pipe, so the event loop wakes up. */
2513static void
2514async_file_mark (void)
2515{
2516 int ret;
2517
2518 async_file_flush ();
2519
2520 do
2521 ret = write (linux_event_pipe[1], "+", 1);
2522 while (ret == 0 || (ret == -1 && errno == EINTR));
2523
2524 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2525 be awakened anyway. */
2526}
2527
2528static ptid_t
2529linux_wait (ptid_t ptid,
2530 struct target_waitstatus *ourstatus, int target_options)
2531{
2532 ptid_t event_ptid;
2533
2534 if (debug_threads)
2535 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2536
2537 /* Flush the async file first. */
2538 if (target_is_async_p ())
2539 async_file_flush ();
2540
2541 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2542
2543 /* If at least one stop was reported, there may be more. A single
2544 SIGCHLD can signal more than one child stop. */
2545 if (target_is_async_p ()
2546 && (target_options & TARGET_WNOHANG) != 0
2547 && !ptid_equal (event_ptid, null_ptid))
2548 async_file_mark ();
2549
2550 return event_ptid;
2551}
2552
2553/* Send a signal to an LWP. */
2554
2555static int
2556kill_lwp (unsigned long lwpid, int signo)
2557{
2558 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2559 fails, then we are not using nptl threads and we should be using kill. */
2560
2561#ifdef __NR_tkill
2562 {
2563 static int tkill_failed;
2564
2565 if (!tkill_failed)
2566 {
2567 int ret;
2568
2569 errno = 0;
2570 ret = syscall (__NR_tkill, lwpid, signo);
2571 if (errno != ENOSYS)
2572 return ret;
2573 tkill_failed = 1;
2574 }
2575 }
2576#endif
2577
2578 return kill (lwpid, signo);
2579}
2580
2581void
2582linux_stop_lwp (struct lwp_info *lwp)
2583{
2584 send_sigstop (lwp);
2585}
2586
2587static void
2588send_sigstop (struct lwp_info *lwp)
2589{
2590 int pid;
2591
2592 pid = lwpid_of (lwp);
2593
2594 /* If we already have a pending stop signal for this process, don't
2595 send another. */
2596 if (lwp->stop_expected)
2597 {
2598 if (debug_threads)
2599 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2600
2601 return;
2602 }
2603
2604 if (debug_threads)
2605 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2606
2607 lwp->stop_expected = 1;
2608 kill_lwp (pid, SIGSTOP);
2609}
2610
2611static int
2612send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2613{
2614 struct lwp_info *lwp = (struct lwp_info *) entry;
2615
2616 /* Ignore EXCEPT. */
2617 if (lwp == except)
2618 return 0;
2619
2620 if (lwp->stopped)
2621 return 0;
2622
2623 send_sigstop (lwp);
2624 return 0;
2625}
2626
2627/* Increment the suspend count of an LWP, and stop it, if not stopped
2628 yet. */
2629static int
2630suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2631 void *except)
2632{
2633 struct lwp_info *lwp = (struct lwp_info *) entry;
2634
2635 /* Ignore EXCEPT. */
2636 if (lwp == except)
2637 return 0;
2638
2639 lwp->suspended++;
2640
2641 return send_sigstop_callback (entry, except);
2642}
2643
2644static void
2645mark_lwp_dead (struct lwp_info *lwp, int wstat)
2646{
2647 /* It's dead, really. */
2648 lwp->dead = 1;
2649
2650 /* Store the exit status for later. */
2651 lwp->status_pending_p = 1;
2652 lwp->status_pending = wstat;
2653
2654 /* Prevent trying to stop it. */
2655 lwp->stopped = 1;
2656
2657 /* No further stops are expected from a dead lwp. */
2658 lwp->stop_expected = 0;
2659}
2660
2661static void
2662wait_for_sigstop (struct inferior_list_entry *entry)
2663{
2664 struct lwp_info *lwp = (struct lwp_info *) entry;
2665 struct thread_info *saved_inferior;
2666 int wstat;
2667 ptid_t saved_tid;
2668 ptid_t ptid;
2669 int pid;
2670
2671 if (lwp->stopped)
2672 {
2673 if (debug_threads)
2674 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2675 lwpid_of (lwp));
2676 return;
2677 }
2678
2679 saved_inferior = current_inferior;
2680 if (saved_inferior != NULL)
2681 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2682 else
2683 saved_tid = null_ptid; /* avoid bogus unused warning */
2684
2685 ptid = lwp->head.id;
2686
2687 if (debug_threads)
2688 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2689
2690 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2691
2692 /* If we stopped with a non-SIGSTOP signal, save it for later
2693 and record the pending SIGSTOP. If the process exited, just
2694 return. */
2695 if (WIFSTOPPED (wstat))
2696 {
2697 if (debug_threads)
2698 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2699 lwpid_of (lwp), WSTOPSIG (wstat));
2700
2701 if (WSTOPSIG (wstat) != SIGSTOP)
2702 {
2703 if (debug_threads)
2704 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2705 lwpid_of (lwp), wstat);
2706
2707 lwp->status_pending_p = 1;
2708 lwp->status_pending = wstat;
2709 }
2710 }
2711 else
2712 {
2713 if (debug_threads)
2714 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2715
2716 lwp = find_lwp_pid (pid_to_ptid (pid));
2717 if (lwp)
2718 {
2719 /* Leave this status pending for the next time we're able to
2720 report it. In the mean time, we'll report this lwp as
2721 dead to GDB, so GDB doesn't try to read registers and
2722 memory from it. This can only happen if this was the
2723 last thread of the process; otherwise, PID is removed
2724 from the thread tables before linux_wait_for_event
2725 returns. */
2726 mark_lwp_dead (lwp, wstat);
2727 }
2728 }
2729
2730 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2731 current_inferior = saved_inferior;
2732 else
2733 {
2734 if (debug_threads)
2735 fprintf (stderr, "Previously current thread died.\n");
2736
2737 if (non_stop)
2738 {
2739 /* We can't change the current inferior behind GDB's back,
2740 otherwise, a subsequent command may apply to the wrong
2741 process. */
2742 current_inferior = NULL;
2743 }
2744 else
2745 {
2746 /* Set a valid thread as current. */
2747 set_desired_inferior (0);
2748 }
2749 }
2750}
2751
2752/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2753 move it out, because we need to report the stop event to GDB. For
2754 example, if the user puts a breakpoint in the jump pad, it's
2755 because she wants to debug it. */
2756
2757static int
2758stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2759{
2760 struct lwp_info *lwp = (struct lwp_info *) entry;
2761 struct thread_info *thread = get_lwp_thread (lwp);
2762
2763 gdb_assert (lwp->suspended == 0);
2764 gdb_assert (lwp->stopped);
2765
2766 /* Allow debugging the jump pad, gdb_collect, etc.. */
2767 return (supports_fast_tracepoints ()
2768 && in_process_agent_loaded ()
2769 && (gdb_breakpoint_here (lwp->stop_pc)
2770 || lwp->stopped_by_watchpoint
2771 || thread->last_resume_kind == resume_step)
2772 && linux_fast_tracepoint_collecting (lwp, NULL));
2773}
2774
2775static void
2776move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
2777{
2778 struct lwp_info *lwp = (struct lwp_info *) entry;
2779 struct thread_info *thread = get_lwp_thread (lwp);
2780 int *wstat;
2781
2782 gdb_assert (lwp->suspended == 0);
2783 gdb_assert (lwp->stopped);
2784
2785 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
2786
2787 /* Allow debugging the jump pad, gdb_collect, etc. */
2788 if (!gdb_breakpoint_here (lwp->stop_pc)
2789 && !lwp->stopped_by_watchpoint
2790 && thread->last_resume_kind != resume_step
2791 && maybe_move_out_of_jump_pad (lwp, wstat))
2792 {
2793 if (debug_threads)
2794 fprintf (stderr,
2795 "LWP %ld needs stabilizing (in jump pad)\n",
2796 lwpid_of (lwp));
2797
2798 if (wstat)
2799 {
2800 lwp->status_pending_p = 0;
2801 enqueue_one_deferred_signal (lwp, wstat);
2802
2803 if (debug_threads)
2804 fprintf (stderr,
2805 "Signal %d for LWP %ld deferred "
2806 "(in jump pad)\n",
2807 WSTOPSIG (*wstat), lwpid_of (lwp));
2808 }
2809
2810 linux_resume_one_lwp (lwp, 0, 0, NULL);
2811 }
2812 else
2813 lwp->suspended++;
2814}
2815
2816static int
2817lwp_running (struct inferior_list_entry *entry, void *data)
2818{
2819 struct lwp_info *lwp = (struct lwp_info *) entry;
2820
2821 if (lwp->dead)
2822 return 0;
2823 if (lwp->stopped)
2824 return 0;
2825 return 1;
2826}
2827
2828/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2829 If SUSPEND, then also increase the suspend count of every LWP,
2830 except EXCEPT. */
2831
2832static void
2833stop_all_lwps (int suspend, struct lwp_info *except)
2834{
2835 stopping_threads = 1;
2836
2837 if (suspend)
2838 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
2839 else
2840 find_inferior (&all_lwps, send_sigstop_callback, except);
2841 for_each_inferior (&all_lwps, wait_for_sigstop);
2842 stopping_threads = 0;
2843}
2844
2845/* Resume execution of the inferior process.
2846 If STEP is nonzero, single-step it.
2847 If SIGNAL is nonzero, give it that signal. */
2848
2849static void
2850linux_resume_one_lwp (struct lwp_info *lwp,
2851 int step, int signal, siginfo_t *info)
2852{
2853 struct thread_info *saved_inferior;
2854 int fast_tp_collecting;
2855
2856 if (lwp->stopped == 0)
2857 return;
2858
2859 fast_tp_collecting = lwp->collecting_fast_tracepoint;
2860
2861 gdb_assert (!stabilizing_threads || fast_tp_collecting);
2862
2863 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2864 user used the "jump" command, or "set $pc = foo"). */
2865 if (lwp->stop_pc != get_pc (lwp))
2866 {
2867 /* Collecting 'while-stepping' actions doesn't make sense
2868 anymore. */
2869 release_while_stepping_state_list (get_lwp_thread (lwp));
2870 }
2871
2872 /* If we have pending signals or status, and a new signal, enqueue the
2873 signal. Also enqueue the signal if we are waiting to reinsert a
2874 breakpoint; it will be picked up again below. */
2875 if (signal != 0
2876 && (lwp->status_pending_p
2877 || lwp->pending_signals != NULL
2878 || lwp->bp_reinsert != 0
2879 || fast_tp_collecting))
2880 {
2881 struct pending_signals *p_sig;
2882 p_sig = xmalloc (sizeof (*p_sig));
2883 p_sig->prev = lwp->pending_signals;
2884 p_sig->signal = signal;
2885 if (info == NULL)
2886 memset (&p_sig->info, 0, sizeof (siginfo_t));
2887 else
2888 memcpy (&p_sig->info, info, sizeof (siginfo_t));
2889 lwp->pending_signals = p_sig;
2890 }
2891
2892 if (lwp->status_pending_p)
2893 {
2894 if (debug_threads)
2895 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2896 " has pending status\n",
2897 lwpid_of (lwp), step ? "step" : "continue", signal,
2898 lwp->stop_expected ? "expected" : "not expected");
2899 return;
2900 }
2901
2902 saved_inferior = current_inferior;
2903 current_inferior = get_lwp_thread (lwp);
2904
2905 if (debug_threads)
2906 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2907 lwpid_of (lwp), step ? "step" : "continue", signal,
2908 lwp->stop_expected ? "expected" : "not expected");
2909
2910 /* This bit needs some thinking about. If we get a signal that
2911 we must report while a single-step reinsert is still pending,
2912 we often end up resuming the thread. It might be better to
2913 (ew) allow a stack of pending events; then we could be sure that
2914 the reinsert happened right away and not lose any signals.
2915
2916 Making this stack would also shrink the window in which breakpoints are
2917 uninserted (see comment in linux_wait_for_lwp) but not enough for
2918 complete correctness, so it won't solve that problem. It may be
2919 worthwhile just to solve this one, however. */
2920 if (lwp->bp_reinsert != 0)
2921 {
2922 if (debug_threads)
2923 fprintf (stderr, " pending reinsert at 0x%s\n",
2924 paddress (lwp->bp_reinsert));
2925
2926 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2927 {
2928 if (fast_tp_collecting == 0)
2929 {
2930 if (step == 0)
2931 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2932 if (lwp->suspended)
2933 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
2934 lwp->suspended);
2935 }
2936
2937 step = 1;
2938 }
2939
2940 /* Postpone any pending signal. It was enqueued above. */
2941 signal = 0;
2942 }
2943
2944 if (fast_tp_collecting == 1)
2945 {
2946 if (debug_threads)
2947 fprintf (stderr, "\
2948lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
2949 lwpid_of (lwp));
2950
2951 /* Postpone any pending signal. It was enqueued above. */
2952 signal = 0;
2953 }
2954 else if (fast_tp_collecting == 2)
2955 {
2956 if (debug_threads)
2957 fprintf (stderr, "\
2958lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
2959 lwpid_of (lwp));
2960
2961 if (can_hardware_single_step ())
2962 step = 1;
2963 else
2964 fatal ("moving out of jump pad single-stepping"
2965 " not implemented on this target");
2966
2967 /* Postpone any pending signal. It was enqueued above. */
2968 signal = 0;
2969 }
2970
2971 /* If we have while-stepping actions in this thread set it stepping.
2972 If we have a signal to deliver, it may or may not be set to
2973 SIG_IGN, we don't know. Assume so, and allow collecting
2974 while-stepping into a signal handler. A possible smart thing to
2975 do would be to set an internal breakpoint at the signal return
2976 address, continue, and carry on catching this while-stepping
2977 action only when that breakpoint is hit. A future
2978 enhancement. */
2979 if (get_lwp_thread (lwp)->while_stepping != NULL
2980 && can_hardware_single_step ())
2981 {
2982 if (debug_threads)
2983 fprintf (stderr,
2984 "lwp %ld has a while-stepping action -> forcing step.\n",
2985 lwpid_of (lwp));
2986 step = 1;
2987 }
2988
2989 if (debug_threads && the_low_target.get_pc != NULL)
2990 {
2991 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2992 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
2993 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
2994 }
2995
2996 /* If we have pending signals, consume one unless we are trying to
2997 reinsert a breakpoint or we're trying to finish a fast tracepoint
2998 collect. */
2999 if (lwp->pending_signals != NULL
3000 && lwp->bp_reinsert == 0
3001 && fast_tp_collecting == 0)
3002 {
3003 struct pending_signals **p_sig;
3004
3005 p_sig = &lwp->pending_signals;
3006 while ((*p_sig)->prev != NULL)
3007 p_sig = &(*p_sig)->prev;
3008
3009 signal = (*p_sig)->signal;
3010 if ((*p_sig)->info.si_signo != 0)
3011 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
3012
3013 free (*p_sig);
3014 *p_sig = NULL;
3015 }
3016
3017 if (the_low_target.prepare_to_resume != NULL)
3018 the_low_target.prepare_to_resume (lwp);
3019
3020 regcache_invalidate_one ((struct inferior_list_entry *)
3021 get_lwp_thread (lwp));
3022 errno = 0;
3023 lwp->stopped = 0;
3024 lwp->stopped_by_watchpoint = 0;
3025 lwp->stepping = step;
3026 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
3027 /* Coerce to a uintptr_t first to avoid potential gcc warning
3028 of coercing an 8 byte integer to a 4 byte pointer. */
3029 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
3030
3031 current_inferior = saved_inferior;
3032 if (errno)
3033 {
3034 /* ESRCH from ptrace either means that the thread was already
3035 running (an error) or that it is gone (a race condition). If
3036 it's gone, we will get a notification the next time we wait,
3037 so we can ignore the error. We could differentiate these
3038 two, but it's tricky without waiting; the thread still exists
3039 as a zombie, so sending it signal 0 would succeed. So just
3040 ignore ESRCH. */
3041 if (errno == ESRCH)
3042 return;
3043
3044 perror_with_name ("ptrace");
3045 }
3046}
3047
3048struct thread_resume_array
3049{
3050 struct thread_resume *resume;
3051 size_t n;
3052};
3053
3054/* This function is called once per thread. We look up the thread
3055 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3056 resume request.
3057
3058 This algorithm is O(threads * resume elements), but resume elements
3059 is small (and will remain small at least until GDB supports thread
3060 suspension). */
3061static int
3062linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3063{
3064 struct lwp_info *lwp;
3065 struct thread_info *thread;
3066 int ndx;
3067 struct thread_resume_array *r;
3068
3069 thread = (struct thread_info *) entry;
3070 lwp = get_thread_lwp (thread);
3071 r = arg;
3072
3073 for (ndx = 0; ndx < r->n; ndx++)
3074 {
3075 ptid_t ptid = r->resume[ndx].thread;
3076 if (ptid_equal (ptid, minus_one_ptid)
3077 || ptid_equal (ptid, entry->id)
3078 || (ptid_is_pid (ptid)
3079 && (ptid_get_pid (ptid) == pid_of (lwp)))
3080 || (ptid_get_lwp (ptid) == -1
3081 && (ptid_get_pid (ptid) == pid_of (lwp))))
3082 {
3083 if (r->resume[ndx].kind == resume_stop
3084 && thread->last_resume_kind == resume_stop)
3085 {
3086 if (debug_threads)
3087 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3088 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3089 ? "stopped"
3090 : "stopping",
3091 lwpid_of (lwp));
3092
3093 continue;
3094 }
3095
3096 lwp->resume = &r->resume[ndx];
3097 thread->last_resume_kind = lwp->resume->kind;
3098
3099 /* If we had a deferred signal to report, dequeue one now.
3100 This can happen if LWP gets more than one signal while
3101 trying to get out of a jump pad. */
3102 if (lwp->stopped
3103 && !lwp->status_pending_p
3104 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3105 {
3106 lwp->status_pending_p = 1;
3107
3108 if (debug_threads)
3109 fprintf (stderr,
3110 "Dequeueing deferred signal %d for LWP %ld, "
3111 "leaving status pending.\n",
3112 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3113 }
3114
3115 return 0;
3116 }
3117 }
3118
3119 /* No resume action for this thread. */
3120 lwp->resume = NULL;
3121
3122 return 0;
3123}
3124
3125
3126/* Set *FLAG_P if this lwp has an interesting status pending. */
3127static int
3128resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3129{
3130 struct lwp_info *lwp = (struct lwp_info *) entry;
3131
3132 /* LWPs which will not be resumed are not interesting, because
3133 we might not wait for them next time through linux_wait. */
3134 if (lwp->resume == NULL)
3135 return 0;
3136
3137 if (lwp->status_pending_p)
3138 * (int *) flag_p = 1;
3139
3140 return 0;
3141}
3142
3143/* Return 1 if this lwp that GDB wants running is stopped at an
3144 internal breakpoint that we need to step over. It assumes that any
3145 required STOP_PC adjustment has already been propagated to the
3146 inferior's regcache. */
3147
3148static int
3149need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3150{
3151 struct lwp_info *lwp = (struct lwp_info *) entry;
3152 struct thread_info *thread;
3153 struct thread_info *saved_inferior;
3154 CORE_ADDR pc;
3155
3156 /* LWPs which will not be resumed are not interesting, because we
3157 might not wait for them next time through linux_wait. */
3158
3159 if (!lwp->stopped)
3160 {
3161 if (debug_threads)
3162 fprintf (stderr,
3163 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3164 lwpid_of (lwp));
3165 return 0;
3166 }
3167
3168 thread = get_lwp_thread (lwp);
3169
3170 if (thread->last_resume_kind == resume_stop)
3171 {
3172 if (debug_threads)
3173 fprintf (stderr,
3174 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3175 lwpid_of (lwp));
3176 return 0;
3177 }
3178
3179 gdb_assert (lwp->suspended >= 0);
3180
3181 if (lwp->suspended)
3182 {
3183 if (debug_threads)
3184 fprintf (stderr,
3185 "Need step over [LWP %ld]? Ignoring, suspended\n",
3186 lwpid_of (lwp));
3187 return 0;
3188 }
3189
3190 if (!lwp->need_step_over)
3191 {
3192 if (debug_threads)
3193 fprintf (stderr,
3194 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3195 }
3196
3197 if (lwp->status_pending_p)
3198 {
3199 if (debug_threads)
3200 fprintf (stderr,
3201 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3202 lwpid_of (lwp));
3203 return 0;
3204 }
3205
3206 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3207 or we have. */
3208 pc = get_pc (lwp);
3209
3210 /* If the PC has changed since we stopped, then don't do anything,
3211 and let the breakpoint/tracepoint be hit. This happens if, for
3212 instance, GDB handled the decr_pc_after_break subtraction itself,
3213 GDB is OOL stepping this thread, or the user has issued a "jump"
3214 command, or poked thread's registers herself. */
3215 if (pc != lwp->stop_pc)
3216 {
3217 if (debug_threads)
3218 fprintf (stderr,
3219 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3220 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3221 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3222
3223 lwp->need_step_over = 0;
3224 return 0;
3225 }
3226
3227 saved_inferior = current_inferior;
3228 current_inferior = thread;
3229
3230 /* We can only step over breakpoints we know about. */
3231 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3232 {
3233 /* Don't step over a breakpoint that GDB expects to hit
3234 though. */
3235 if (gdb_breakpoint_here (pc))
3236 {
3237 if (debug_threads)
3238 fprintf (stderr,
3239 "Need step over [LWP %ld]? yes, but found"
3240 " GDB breakpoint at 0x%s; skipping step over\n",
3241 lwpid_of (lwp), paddress (pc));
3242
3243 current_inferior = saved_inferior;
3244 return 0;
3245 }
3246 else
3247 {
3248 if (debug_threads)
3249 fprintf (stderr,
3250 "Need step over [LWP %ld]? yes, "
3251 "found breakpoint at 0x%s\n",
3252 lwpid_of (lwp), paddress (pc));
3253
3254 /* We've found an lwp that needs stepping over --- return 1 so
3255 that find_inferior stops looking. */
3256 current_inferior = saved_inferior;
3257
3258 /* If the step over is cancelled, this is set again. */
3259 lwp->need_step_over = 0;
3260 return 1;
3261 }
3262 }
3263
3264 current_inferior = saved_inferior;
3265
3266 if (debug_threads)
3267 fprintf (stderr,
3268 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3269 lwpid_of (lwp), paddress (pc));
3270
3271 return 0;
3272}
3273
3274/* Start a step-over operation on LWP. When LWP stopped at a
3275 breakpoint, to make progress, we need to remove the breakpoint out
3276 of the way. If we let other threads run while we do that, they may
3277 pass by the breakpoint location and miss hitting it. To avoid
3278 that, a step-over momentarily stops all threads while LWP is
3279 single-stepped while the breakpoint is temporarily uninserted from
3280 the inferior. When the single-step finishes, we reinsert the
3281 breakpoint, and let all threads that are supposed to be running,
3282 run again.
3283
3284 On targets that don't support hardware single-step, we don't
3285 currently support full software single-stepping. Instead, we only
3286 support stepping over the thread event breakpoint, by asking the
3287 low target where to place a reinsert breakpoint. Since this
3288 routine assumes the breakpoint being stepped over is a thread event
3289 breakpoint, it usually assumes the return address of the current
3290 function is a good enough place to set the reinsert breakpoint. */
3291
3292static int
3293start_step_over (struct lwp_info *lwp)
3294{
3295 struct thread_info *saved_inferior;
3296 CORE_ADDR pc;
3297 int step;
3298
3299 if (debug_threads)
3300 fprintf (stderr,
3301 "Starting step-over on LWP %ld. Stopping all threads\n",
3302 lwpid_of (lwp));
3303
3304 stop_all_lwps (1, lwp);
3305 gdb_assert (lwp->suspended == 0);
3306
3307 if (debug_threads)
3308 fprintf (stderr, "Done stopping all threads for step-over.\n");
3309
3310 /* Note, we should always reach here with an already adjusted PC,
3311 either by GDB (if we're resuming due to GDB's request), or by our
3312 caller, if we just finished handling an internal breakpoint GDB
3313 shouldn't care about. */
3314 pc = get_pc (lwp);
3315
3316 saved_inferior = current_inferior;
3317 current_inferior = get_lwp_thread (lwp);
3318
3319 lwp->bp_reinsert = pc;
3320 uninsert_breakpoints_at (pc);
3321 uninsert_fast_tracepoint_jumps_at (pc);
3322
3323 if (can_hardware_single_step ())
3324 {
3325 step = 1;
3326 }
3327 else
3328 {
3329 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3330 set_reinsert_breakpoint (raddr);
3331 step = 0;
3332 }
3333
3334 current_inferior = saved_inferior;
3335
3336 linux_resume_one_lwp (lwp, step, 0, NULL);
3337
3338 /* Require next event from this LWP. */
3339 step_over_bkpt = lwp->head.id;
3340 return 1;
3341}
3342
3343/* Finish a step-over. Reinsert the breakpoint we had uninserted in
3344 start_step_over, if still there, and delete any reinsert
3345 breakpoints we've set, on non hardware single-step targets. */
3346
3347static int
3348finish_step_over (struct lwp_info *lwp)
3349{
3350 if (lwp->bp_reinsert != 0)
3351 {
3352 if (debug_threads)
3353 fprintf (stderr, "Finished step over.\n");
3354
3355 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3356 may be no breakpoint to reinsert there by now. */
3357 reinsert_breakpoints_at (lwp->bp_reinsert);
3358 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3359
3360 lwp->bp_reinsert = 0;
3361
3362 /* Delete any software-single-step reinsert breakpoints. No
3363 longer needed. We don't have to worry about other threads
3364 hitting this trap, and later not being able to explain it,
3365 because we were stepping over a breakpoint, and we hold all
3366 threads but LWP stopped while doing that. */
3367 if (!can_hardware_single_step ())
3368 delete_reinsert_breakpoints ();
3369
3370 step_over_bkpt = null_ptid;
3371 return 1;
3372 }
3373 else
3374 return 0;
3375}
3376
3377/* This function is called once per thread. We check the thread's resume
3378 request, which will tell us whether to resume, step, or leave the thread
3379 stopped; and what signal, if any, it should be sent.
3380
3381 For threads which we aren't explicitly told otherwise, we preserve
3382 the stepping flag; this is used for stepping over gdbserver-placed
3383 breakpoints.
3384
3385 If pending_flags was set in any thread, we queue any needed
3386 signals, since we won't actually resume. We already have a pending
3387 event to report, so we don't need to preserve any step requests;
3388 they should be re-issued if necessary. */
3389
3390static int
3391linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3392{
3393 struct lwp_info *lwp;
3394 struct thread_info *thread;
3395 int step;
3396 int leave_all_stopped = * (int *) arg;
3397 int leave_pending;
3398
3399 thread = (struct thread_info *) entry;
3400 lwp = get_thread_lwp (thread);
3401
3402 if (lwp->resume == NULL)
3403 return 0;
3404
3405 if (lwp->resume->kind == resume_stop)
3406 {
3407 if (debug_threads)
3408 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3409
3410 if (!lwp->stopped)
3411 {
3412 if (debug_threads)
3413 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3414
3415 /* Stop the thread, and wait for the event asynchronously,
3416 through the event loop. */
3417 send_sigstop (lwp);
3418 }
3419 else
3420 {
3421 if (debug_threads)
3422 fprintf (stderr, "already stopped LWP %ld\n",
3423 lwpid_of (lwp));
3424
3425 /* The LWP may have been stopped in an internal event that
3426 was not meant to be notified back to GDB (e.g., gdbserver
3427 breakpoint), so we should be reporting a stop event in
3428 this case too. */
3429
3430 /* If the thread already has a pending SIGSTOP, this is a
3431 no-op. Otherwise, something later will presumably resume
3432 the thread and this will cause it to cancel any pending
3433 operation, due to last_resume_kind == resume_stop. If
3434 the thread already has a pending status to report, we
3435 will still report it the next time we wait - see
3436 status_pending_p_callback. */
3437
3438 /* If we already have a pending signal to report, then
3439 there's no need to queue a SIGSTOP, as this means we're
3440 midway through moving the LWP out of the jumppad, and we
3441 will report the pending signal as soon as that is
3442 finished. */
3443 if (lwp->pending_signals_to_report == NULL)
3444 send_sigstop (lwp);
3445 }
3446
3447 /* For stop requests, we're done. */
3448 lwp->resume = NULL;
3449 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3450 return 0;
3451 }
3452
3453 /* If this thread which is about to be resumed has a pending status,
3454 then don't resume any threads - we can just report the pending
3455 status. Make sure to queue any signals that would otherwise be
3456 sent. In all-stop mode, we do this decision based on if *any*
3457 thread has a pending status. If there's a thread that needs the
3458 step-over-breakpoint dance, then don't resume any other thread
3459 but that particular one. */
3460 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3461
3462 if (!leave_pending)
3463 {
3464 if (debug_threads)
3465 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3466
3467 step = (lwp->resume->kind == resume_step);
3468 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3469 }
3470 else
3471 {
3472 if (debug_threads)
3473 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3474
3475 /* If we have a new signal, enqueue the signal. */
3476 if (lwp->resume->sig != 0)
3477 {
3478 struct pending_signals *p_sig;
3479 p_sig = xmalloc (sizeof (*p_sig));
3480 p_sig->prev = lwp->pending_signals;
3481 p_sig->signal = lwp->resume->sig;
3482 memset (&p_sig->info, 0, sizeof (siginfo_t));
3483
3484 /* If this is the same signal we were previously stopped by,
3485 make sure to queue its siginfo. We can ignore the return
3486 value of ptrace; if it fails, we'll skip
3487 PTRACE_SETSIGINFO. */
3488 if (WIFSTOPPED (lwp->last_status)
3489 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3490 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3491
3492 lwp->pending_signals = p_sig;
3493 }
3494 }
3495
3496 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3497 lwp->resume = NULL;
3498 return 0;
3499}
3500
3501static void
3502linux_resume (struct thread_resume *resume_info, size_t n)
3503{
3504 struct thread_resume_array array = { resume_info, n };
3505 struct lwp_info *need_step_over = NULL;
3506 int any_pending;
3507 int leave_all_stopped;
3508
3509 find_inferior (&all_threads, linux_set_resume_request, &array);
3510
3511 /* If there is a thread which would otherwise be resumed, which has
3512 a pending status, then don't resume any threads - we can just
3513 report the pending status. Make sure to queue any signals that
3514 would otherwise be sent. In non-stop mode, we'll apply this
3515 logic to each thread individually. We consume all pending events
3516 before considering to start a step-over (in all-stop). */
3517 any_pending = 0;
3518 if (!non_stop)
3519 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3520
3521 /* If there is a thread which would otherwise be resumed, which is
3522 stopped at a breakpoint that needs stepping over, then don't
3523 resume any threads - have it step over the breakpoint with all
3524 other threads stopped, then resume all threads again. Make sure
3525 to queue any signals that would otherwise be delivered or
3526 queued. */
3527 if (!any_pending && supports_breakpoints ())
3528 need_step_over
3529 = (struct lwp_info *) find_inferior (&all_lwps,
3530 need_step_over_p, NULL);
3531
3532 leave_all_stopped = (need_step_over != NULL || any_pending);
3533
3534 if (debug_threads)
3535 {
3536 if (need_step_over != NULL)
3537 fprintf (stderr, "Not resuming all, need step over\n");
3538 else if (any_pending)
3539 fprintf (stderr,
3540 "Not resuming, all-stop and found "
3541 "an LWP with pending status\n");
3542 else
3543 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3544 }
3545
3546 /* Even if we're leaving threads stopped, queue all signals we'd
3547 otherwise deliver. */
3548 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3549
3550 if (need_step_over)
3551 start_step_over (need_step_over);
3552}
3553
3554/* This function is called once per thread. We check the thread's
3555 last resume request, which will tell us whether to resume, step, or
3556 leave the thread stopped. Any signal the client requested to be
3557 delivered has already been enqueued at this point.
3558
3559 If any thread that GDB wants running is stopped at an internal
3560 breakpoint that needs stepping over, we start a step-over operation
3561 on that particular thread, and leave all others stopped. */
3562
3563static int
3564proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3565{
3566 struct lwp_info *lwp = (struct lwp_info *) entry;
3567 struct thread_info *thread;
3568 int step;
3569
3570 if (lwp == except)
3571 return 0;
3572
3573 if (debug_threads)
3574 fprintf (stderr,
3575 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3576
3577 if (!lwp->stopped)
3578 {
3579 if (debug_threads)
3580 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3581 return 0;
3582 }
3583
3584 thread = get_lwp_thread (lwp);
3585
3586 if (thread->last_resume_kind == resume_stop
3587 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3588 {
3589 if (debug_threads)
3590 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3591 lwpid_of (lwp));
3592 return 0;
3593 }
3594
3595 if (lwp->status_pending_p)
3596 {
3597 if (debug_threads)
3598 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3599 lwpid_of (lwp));
3600 return 0;
3601 }
3602
3603 gdb_assert (lwp->suspended >= 0);
3604
3605 if (lwp->suspended)
3606 {
3607 if (debug_threads)
3608 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3609 return 0;
3610 }
3611
3612 if (thread->last_resume_kind == resume_stop
3613 && lwp->pending_signals_to_report == NULL
3614 && lwp->collecting_fast_tracepoint == 0)
3615 {
3616 /* We haven't reported this LWP as stopped yet (otherwise, the
3617 last_status.kind check above would catch it, and we wouldn't
3618 reach here. This LWP may have been momentarily paused by a
3619 stop_all_lwps call while handling for example, another LWP's
3620 step-over. In that case, the pending expected SIGSTOP signal
3621 that was queued at vCont;t handling time will have already
3622 been consumed by wait_for_sigstop, and so we need to requeue
3623 another one here. Note that if the LWP already has a SIGSTOP
3624 pending, this is a no-op. */
3625
3626 if (debug_threads)
3627 fprintf (stderr,
3628 "Client wants LWP %ld to stop. "
3629 "Making sure it has a SIGSTOP pending\n",
3630 lwpid_of (lwp));
3631
3632 send_sigstop (lwp);
3633 }
3634
3635 step = thread->last_resume_kind == resume_step;
3636 linux_resume_one_lwp (lwp, step, 0, NULL);
3637 return 0;
3638}
3639
3640static int
3641unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3642{
3643 struct lwp_info *lwp = (struct lwp_info *) entry;
3644
3645 if (lwp == except)
3646 return 0;
3647
3648 lwp->suspended--;
3649 gdb_assert (lwp->suspended >= 0);
3650
3651 return proceed_one_lwp (entry, except);
3652}
3653
3654/* When we finish a step-over, set threads running again. If there's
3655 another thread that may need a step-over, now's the time to start
3656 it. Eventually, we'll move all threads past their breakpoints. */
3657
3658static void
3659proceed_all_lwps (void)
3660{
3661 struct lwp_info *need_step_over;
3662
3663 /* If there is a thread which would otherwise be resumed, which is
3664 stopped at a breakpoint that needs stepping over, then don't
3665 resume any threads - have it step over the breakpoint with all
3666 other threads stopped, then resume all threads again. */
3667
3668 if (supports_breakpoints ())
3669 {
3670 need_step_over
3671 = (struct lwp_info *) find_inferior (&all_lwps,
3672 need_step_over_p, NULL);
3673
3674 if (need_step_over != NULL)
3675 {
3676 if (debug_threads)
3677 fprintf (stderr, "proceed_all_lwps: found "
3678 "thread %ld needing a step-over\n",
3679 lwpid_of (need_step_over));
3680
3681 start_step_over (need_step_over);
3682 return;
3683 }
3684 }
3685
3686 if (debug_threads)
3687 fprintf (stderr, "Proceeding, no step-over needed\n");
3688
3689 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3690}
3691
3692/* Stopped LWPs that the client wanted to be running, that don't have
3693 pending statuses, are set to run again, except for EXCEPT, if not
3694 NULL. This undoes a stop_all_lwps call. */
3695
3696static void
3697unstop_all_lwps (int unsuspend, struct lwp_info *except)
3698{
3699 if (debug_threads)
3700 {
3701 if (except)
3702 fprintf (stderr,
3703 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3704 else
3705 fprintf (stderr,
3706 "unstopping all lwps\n");
3707 }
3708
3709 if (unsuspend)
3710 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3711 else
3712 find_inferior (&all_lwps, proceed_one_lwp, except);
3713}
3714
3715#ifdef HAVE_LINUX_USRREGS
3716
3717int
3718register_addr (int regnum)
3719{
3720 int addr;
3721
3722 if (regnum < 0 || regnum >= the_low_target.num_regs)
3723 error ("Invalid register number %d.", regnum);
3724
3725 addr = the_low_target.regmap[regnum];
3726
3727 return addr;
3728}
3729
3730/* Fetch one register. */
3731static void
3732fetch_register (struct regcache *regcache, int regno)
3733{
3734 CORE_ADDR regaddr;
3735 int i, size;
3736 char *buf;
3737 int pid;
3738
3739 if (regno >= the_low_target.num_regs)
3740 return;
3741 if ((*the_low_target.cannot_fetch_register) (regno))
3742 return;
3743
3744 regaddr = register_addr (regno);
3745 if (regaddr == -1)
3746 return;
3747
3748 pid = lwpid_of (get_thread_lwp (current_inferior));
3749 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3750 & - sizeof (PTRACE_XFER_TYPE));
3751 buf = alloca (size);
3752 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3753 {
3754 errno = 0;
3755 *(PTRACE_XFER_TYPE *) (buf + i) =
3756 ptrace (PTRACE_PEEKUSER, pid,
3757 /* Coerce to a uintptr_t first to avoid potential gcc warning
3758 of coercing an 8 byte integer to a 4 byte pointer. */
3759 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
3760 regaddr += sizeof (PTRACE_XFER_TYPE);
3761 if (errno != 0)
3762 error ("reading register %d: %s", regno, strerror (errno));
3763 }
3764
3765 if (the_low_target.supply_ptrace_register)
3766 the_low_target.supply_ptrace_register (regcache, regno, buf);
3767 else
3768 supply_register (regcache, regno, buf);
3769}
3770
3771/* Fetch all registers, or just one, from the child process. */
3772static void
3773usr_fetch_inferior_registers (struct regcache *regcache, int regno)
3774{
3775 if (regno == -1)
3776 for (regno = 0; regno < the_low_target.num_regs; regno++)
3777 fetch_register (regcache, regno);
3778 else
3779 fetch_register (regcache, regno);
3780}
3781
3782/* Store our register values back into the inferior.
3783 If REGNO is -1, do this for all registers.
3784 Otherwise, REGNO specifies which register (so we can save time). */
3785static void
3786usr_store_inferior_registers (struct regcache *regcache, int regno)
3787{
3788 CORE_ADDR regaddr;
3789 int i, size;
3790 char *buf;
3791 int pid;
3792
3793 if (regno >= 0)
3794 {
3795 if (regno >= the_low_target.num_regs)
3796 return;
3797
3798 if ((*the_low_target.cannot_store_register) (regno) == 1)
3799 return;
3800
3801 regaddr = register_addr (regno);
3802 if (regaddr == -1)
3803 return;
3804 errno = 0;
3805 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3806 & - sizeof (PTRACE_XFER_TYPE);
3807 buf = alloca (size);
3808 memset (buf, 0, size);
3809
3810 if (the_low_target.collect_ptrace_register)
3811 the_low_target.collect_ptrace_register (regcache, regno, buf);
3812 else
3813 collect_register (regcache, regno, buf);
3814
3815 pid = lwpid_of (get_thread_lwp (current_inferior));
3816 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3817 {
3818 errno = 0;
3819 ptrace (PTRACE_POKEUSER, pid,
3820 /* Coerce to a uintptr_t first to avoid potential gcc warning
3821 about coercing an 8 byte integer to a 4 byte pointer. */
3822 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3823 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
3824 if (errno != 0)
3825 {
3826 /* At this point, ESRCH should mean the process is
3827 already gone, in which case we simply ignore attempts
3828 to change its registers. See also the related
3829 comment in linux_resume_one_lwp. */
3830 if (errno == ESRCH)
3831 return;
3832
3833 if ((*the_low_target.cannot_store_register) (regno) == 0)
3834 error ("writing register %d: %s", regno, strerror (errno));
3835 }
3836 regaddr += sizeof (PTRACE_XFER_TYPE);
3837 }
3838 }
3839 else
3840 for (regno = 0; regno < the_low_target.num_regs; regno++)
3841 usr_store_inferior_registers (regcache, regno);
3842}
3843#endif /* HAVE_LINUX_USRREGS */
3844
3845
3846
3847#ifdef HAVE_LINUX_REGSETS
3848
3849static int
3850regsets_fetch_inferior_registers (struct regcache *regcache)
3851{
3852 struct regset_info *regset;
3853 int saw_general_regs = 0;
3854 int pid;
3855 struct iovec iov;
3856
3857 regset = target_regsets;
3858
3859 pid = lwpid_of (get_thread_lwp (current_inferior));
3860 while (regset->size >= 0)
3861 {
3862 void *buf, *data;
3863 int nt_type, res;
3864
3865 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3866 {
3867 regset ++;
3868 continue;
3869 }
3870
3871 buf = xmalloc (regset->size);
3872
3873 nt_type = regset->nt_type;
3874 if (nt_type)
3875 {
3876 iov.iov_base = buf;
3877 iov.iov_len = regset->size;
3878 data = (void *) &iov;
3879 }
3880 else
3881 data = buf;
3882
3883#ifndef __sparc__
3884 res = ptrace (regset->get_request, pid, nt_type, data);
3885#else
3886 res = ptrace (regset->get_request, pid, data, nt_type);
3887#endif
3888 if (res < 0)
3889 {
3890 if (errno == EIO)
3891 {
3892 /* If we get EIO on a regset, do not try it again for
3893 this process. */
3894 disabled_regsets[regset - target_regsets] = 1;
3895 free (buf);
3896 continue;
3897 }
3898 else
3899 {
3900 char s[256];
3901 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3902 pid);
3903 perror (s);
3904 }
3905 }
3906 else if (regset->type == GENERAL_REGS)
3907 saw_general_regs = 1;
3908 regset->store_function (regcache, buf);
3909 regset ++;
3910 free (buf);
3911 }
3912 if (saw_general_regs)
3913 return 0;
3914 else
3915 return 1;
3916}
3917
3918static int
3919regsets_store_inferior_registers (struct regcache *regcache)
3920{
3921 struct regset_info *regset;
3922 int saw_general_regs = 0;
3923 int pid;
3924 struct iovec iov;
3925
3926 regset = target_regsets;
3927
3928 pid = lwpid_of (get_thread_lwp (current_inferior));
3929 while (regset->size >= 0)
3930 {
3931 void *buf, *data;
3932 int nt_type, res;
3933
3934 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3935 {
3936 regset ++;
3937 continue;
3938 }
3939
3940 buf = xmalloc (regset->size);
3941
3942 /* First fill the buffer with the current register set contents,
3943 in case there are any items in the kernel's regset that are
3944 not in gdbserver's regcache. */
3945
3946 nt_type = regset->nt_type;
3947 if (nt_type)
3948 {
3949 iov.iov_base = buf;
3950 iov.iov_len = regset->size;
3951 data = (void *) &iov;
3952 }
3953 else
3954 data = buf;
3955
3956#ifndef __sparc__
3957 res = ptrace (regset->get_request, pid, nt_type, data);
3958#else
3959 res = ptrace (regset->get_request, pid, &iov, data);
3960#endif
3961
3962 if (res == 0)
3963 {
3964 /* Then overlay our cached registers on that. */
3965 regset->fill_function (regcache, buf);
3966
3967 /* Only now do we write the register set. */
3968#ifndef __sparc__
3969 res = ptrace (regset->set_request, pid, nt_type, data);
3970#else
3971 res = ptrace (regset->set_request, pid, data, nt_type);
3972#endif
3973 }
3974
3975 if (res < 0)
3976 {
3977 if (errno == EIO)
3978 {
3979 /* If we get EIO on a regset, do not try it again for
3980 this process. */
3981 disabled_regsets[regset - target_regsets] = 1;
3982 free (buf);
3983 continue;
3984 }
3985 else if (errno == ESRCH)
3986 {
3987 /* At this point, ESRCH should mean the process is
3988 already gone, in which case we simply ignore attempts
3989 to change its registers. See also the related
3990 comment in linux_resume_one_lwp. */
3991 free (buf);
3992 return 0;
3993 }
3994 else
3995 {
3996 perror ("Warning: ptrace(regsets_store_inferior_registers)");
3997 }
3998 }
3999 else if (regset->type == GENERAL_REGS)
4000 saw_general_regs = 1;
4001 regset ++;
4002 free (buf);
4003 }
4004 if (saw_general_regs)
4005 return 0;
4006 else
4007 return 1;
4008 return 0;
4009}
4010
4011#endif /* HAVE_LINUX_REGSETS */
4012
4013
4014void
4015linux_fetch_registers (struct regcache *regcache, int regno)
4016{
4017#ifdef HAVE_LINUX_REGSETS
4018 if (regsets_fetch_inferior_registers (regcache) == 0)
4019 return;
4020#endif
4021#ifdef HAVE_LINUX_USRREGS
4022 usr_fetch_inferior_registers (regcache, regno);
4023#endif
4024}
4025
4026void
4027linux_store_registers (struct regcache *regcache, int regno)
4028{
4029#ifdef HAVE_LINUX_REGSETS
4030 if (regsets_store_inferior_registers (regcache) == 0)
4031 return;
4032#endif
4033#ifdef HAVE_LINUX_USRREGS
4034 usr_store_inferior_registers (regcache, regno);
4035#endif
4036}
4037
4038
4039/* Copy LEN bytes from inferior's memory starting at MEMADDR
4040 to debugger memory starting at MYADDR. */
4041
4042static int
4043linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4044{
4045 register int i;
4046 /* Round starting address down to longword boundary. */
4047 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4048 /* Round ending address up; get number of longwords that makes. */
4049 register int count
4050 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4051 / sizeof (PTRACE_XFER_TYPE);
4052 /* Allocate buffer of that many longwords. */
4053 register PTRACE_XFER_TYPE *buffer
4054 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4055 int fd;
4056 char filename[64];
4057 int pid = lwpid_of (get_thread_lwp (current_inferior));
4058
4059 /* Try using /proc. Don't bother for one word. */
4060 if (len >= 3 * sizeof (long))
4061 {
4062 /* We could keep this file open and cache it - possibly one per
4063 thread. That requires some juggling, but is even faster. */
4064 sprintf (filename, "/proc/%d/mem", pid);
4065 fd = open (filename, O_RDONLY | O_LARGEFILE);
4066 if (fd == -1)
4067 goto no_proc;
4068
4069 /* If pread64 is available, use it. It's faster if the kernel
4070 supports it (only one syscall), and it's 64-bit safe even on
4071 32-bit platforms (for instance, SPARC debugging a SPARC64
4072 application). */
4073#ifdef HAVE_PREAD64
4074 if (pread64 (fd, myaddr, len, memaddr) != len)
4075#else
4076 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
4077#endif
4078 {
4079 close (fd);
4080 goto no_proc;
4081 }
4082
4083 close (fd);
4084 return 0;
4085 }
4086
4087 no_proc:
4088 /* Read all the longwords */
4089 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4090 {
4091 errno = 0;
4092 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4093 about coercing an 8 byte integer to a 4 byte pointer. */
4094 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4095 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4096 if (errno)
4097 return errno;
4098 }
4099
4100 /* Copy appropriate bytes out of the buffer. */
4101 memcpy (myaddr,
4102 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4103 len);
4104
4105 return 0;
4106}
4107
4108/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4109 memory at MEMADDR. On failure (cannot write to the inferior)
4110 returns the value of errno. */
4111
4112static int
4113linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4114{
4115 register int i;
4116 /* Round starting address down to longword boundary. */
4117 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4118 /* Round ending address up; get number of longwords that makes. */
4119 register int count
4120 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4121 / sizeof (PTRACE_XFER_TYPE);
4122
4123 /* Allocate buffer of that many longwords. */
4124 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4125 alloca (count * sizeof (PTRACE_XFER_TYPE));
4126
4127 int pid = lwpid_of (get_thread_lwp (current_inferior));
4128
4129 if (debug_threads)
4130 {
4131 /* Dump up to four bytes. */
4132 unsigned int val = * (unsigned int *) myaddr;
4133 if (len == 1)
4134 val = val & 0xff;
4135 else if (len == 2)
4136 val = val & 0xffff;
4137 else if (len == 3)
4138 val = val & 0xffffff;
4139 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4140 val, (long)memaddr);
4141 }
4142
4143 /* Fill start and end extra bytes of buffer with existing memory data. */
4144
4145 errno = 0;
4146 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4147 about coercing an 8 byte integer to a 4 byte pointer. */
4148 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4149 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4150 if (errno)
4151 return errno;
4152
4153 if (count > 1)
4154 {
4155 errno = 0;
4156 buffer[count - 1]
4157 = ptrace (PTRACE_PEEKTEXT, pid,
4158 /* Coerce to a uintptr_t first to avoid potential gcc warning
4159 about coercing an 8 byte integer to a 4 byte pointer. */
4160 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4161 * sizeof (PTRACE_XFER_TYPE)),
4162 0);
4163 if (errno)
4164 return errno;
4165 }
4166
4167 /* Copy data to be written over corresponding part of buffer. */
4168
4169 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4170 myaddr, len);
4171
4172 /* Write the entire buffer. */
4173
4174 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4175 {
4176 errno = 0;
4177 ptrace (PTRACE_POKETEXT, pid,
4178 /* Coerce to a uintptr_t first to avoid potential gcc warning
4179 about coercing an 8 byte integer to a 4 byte pointer. */
4180 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4181 (PTRACE_ARG4_TYPE) buffer[i]);
4182 if (errno)
4183 return errno;
4184 }
4185
4186 return 0;
4187}
4188
4189/* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4190static int linux_supports_tracefork_flag;
4191
4192static void
4193linux_enable_event_reporting (int pid)
4194{
4195 if (!linux_supports_tracefork_flag)
4196 return;
4197
4198 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4199}
4200
4201/* Helper functions for linux_test_for_tracefork, called via clone (). */
4202
4203static int
4204linux_tracefork_grandchild (void *arg)
4205{
4206 _exit (0);
4207}
4208
4209#define STACK_SIZE 4096
4210
4211static int
4212linux_tracefork_child (void *arg)
4213{
4214 ptrace (PTRACE_TRACEME, 0, 0, 0);
4215 kill (getpid (), SIGSTOP);
4216
4217#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4218
4219 if (fork () == 0)
4220 linux_tracefork_grandchild (NULL);
4221
4222#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4223
4224#ifdef __ia64__
4225 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4226 CLONE_VM | SIGCHLD, NULL);
4227#else
4228 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
4229 CLONE_VM | SIGCHLD, NULL);
4230#endif
4231
4232#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4233
4234 _exit (0);
4235}
4236
4237/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4238 sure that we can enable the option, and that it had the desired
4239 effect. */
4240
4241static void
4242linux_test_for_tracefork (void)
4243{
4244 int child_pid, ret, status;
4245 long second_pid;
4246#if defined(__UCLIBC__) && defined(HAS_NOMMU)
4247 char *stack = xmalloc (STACK_SIZE * 4);
4248#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4249
4250 linux_supports_tracefork_flag = 0;
4251
4252#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4253
4254 child_pid = fork ();
4255 if (child_pid == 0)
4256 linux_tracefork_child (NULL);
4257
4258#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4259
4260 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4261#ifdef __ia64__
4262 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4263 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4264#else /* !__ia64__ */
4265 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4266 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4267#endif /* !__ia64__ */
4268
4269#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4270
4271 if (child_pid == -1)
4272 perror_with_name ("clone");
4273
4274 ret = my_waitpid (child_pid, &status, 0);
4275 if (ret == -1)
4276 perror_with_name ("waitpid");
4277 else if (ret != child_pid)
4278 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4279 if (! WIFSTOPPED (status))
4280 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4281
4282 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4283 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4284 if (ret != 0)
4285 {
4286 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4287 if (ret != 0)
4288 {
4289 warning ("linux_test_for_tracefork: failed to kill child");
4290 return;
4291 }
4292
4293 ret = my_waitpid (child_pid, &status, 0);
4294 if (ret != child_pid)
4295 warning ("linux_test_for_tracefork: failed to wait for killed child");
4296 else if (!WIFSIGNALED (status))
4297 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4298 "killed child", status);
4299
4300 return;
4301 }
4302
4303 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4304 if (ret != 0)
4305 warning ("linux_test_for_tracefork: failed to resume child");
4306
4307 ret = my_waitpid (child_pid, &status, 0);
4308
4309 if (ret == child_pid && WIFSTOPPED (status)
4310 && status >> 16 == PTRACE_EVENT_FORK)
4311 {
4312 second_pid = 0;
4313 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4314 if (ret == 0 && second_pid != 0)
4315 {
4316 int second_status;
4317
4318 linux_supports_tracefork_flag = 1;
4319 my_waitpid (second_pid, &second_status, 0);
4320 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4321 if (ret != 0)
4322 warning ("linux_test_for_tracefork: failed to kill second child");
4323 my_waitpid (second_pid, &status, 0);
4324 }
4325 }
4326 else
4327 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4328 "(%d, status 0x%x)", ret, status);
4329
4330 do
4331 {
4332 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4333 if (ret != 0)
4334 warning ("linux_test_for_tracefork: failed to kill child");
4335 my_waitpid (child_pid, &status, 0);
4336 }
4337 while (WIFSTOPPED (status));
4338
4339#if defined(__UCLIBC__) && defined(HAS_NOMMU)
4340 free (stack);
4341#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4342}
4343
4344
4345static void
4346linux_look_up_symbols (void)
4347{
4348#ifdef USE_THREAD_DB
4349 struct process_info *proc = current_process ();
4350
4351 if (proc->private->thread_db != NULL)
4352 return;
4353
4354 /* If the kernel supports tracing forks then it also supports tracing
4355 clones, and then we don't need to use the magic thread event breakpoint
4356 to learn about threads. */
4357 thread_db_init (!linux_supports_tracefork_flag);
4358#endif
4359}
4360
4361static void
4362linux_request_interrupt (void)
4363{
4364 extern unsigned long signal_pid;
4365
4366 if (!ptid_equal (cont_thread, null_ptid)
4367 && !ptid_equal (cont_thread, minus_one_ptid))
4368 {
4369 struct lwp_info *lwp;
4370 int lwpid;
4371
4372 lwp = get_thread_lwp (current_inferior);
4373 lwpid = lwpid_of (lwp);
4374 kill_lwp (lwpid, SIGINT);
4375 }
4376 else
4377 kill_lwp (signal_pid, SIGINT);
4378}
4379
4380/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4381 to debugger memory starting at MYADDR. */
4382
4383static int
4384linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4385{
4386 char filename[PATH_MAX];
4387 int fd, n;
4388 int pid = lwpid_of (get_thread_lwp (current_inferior));
4389
4390 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4391
4392 fd = open (filename, O_RDONLY);
4393 if (fd < 0)
4394 return -1;
4395
4396 if (offset != (CORE_ADDR) 0
4397 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4398 n = -1;
4399 else
4400 n = read (fd, myaddr, len);
4401
4402 close (fd);
4403
4404 return n;
4405}
4406
4407/* These breakpoint and watchpoint related wrapper functions simply
4408 pass on the function call if the target has registered a
4409 corresponding function. */
4410
4411static int
4412linux_insert_point (char type, CORE_ADDR addr, int len)
4413{
4414 if (the_low_target.insert_point != NULL)
4415 return the_low_target.insert_point (type, addr, len);
4416 else
4417 /* Unsupported (see target.h). */
4418 return 1;
4419}
4420
4421static int
4422linux_remove_point (char type, CORE_ADDR addr, int len)
4423{
4424 if (the_low_target.remove_point != NULL)
4425 return the_low_target.remove_point (type, addr, len);
4426 else
4427 /* Unsupported (see target.h). */
4428 return 1;
4429}
4430
4431static int
4432linux_stopped_by_watchpoint (void)
4433{
4434 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4435
4436 return lwp->stopped_by_watchpoint;
4437}
4438
4439static CORE_ADDR
4440linux_stopped_data_address (void)
4441{
4442 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4443
4444 return lwp->stopped_data_address;
4445}
4446
4447#if defined(__UCLIBC__) && defined(HAS_NOMMU)
4448#if defined(__mcoldfire__)
4449/* These should really be defined in the kernel's ptrace.h header. */
4450#define PT_TEXT_ADDR 49*4
4451#define PT_DATA_ADDR 50*4
4452#define PT_TEXT_END_ADDR 51*4
4453#elif defined(BFIN)
4454#define PT_TEXT_ADDR 220
4455#define PT_TEXT_END_ADDR 224
4456#define PT_DATA_ADDR 228
4457#elif defined(__TMS320C6X__)
4458#define PT_TEXT_ADDR (0x10000*4)
4459#define PT_DATA_ADDR (0x10004*4)
4460#define PT_TEXT_END_ADDR (0x10008*4)
4461#endif
4462
4463/* Under uClinux, programs are loaded at non-zero offsets, which we need
4464 to tell gdb about. */
4465
4466static int
4467linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4468{
4469#if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4470 unsigned long text, text_end, data;
4471 int pid = lwpid_of (get_thread_lwp (current_inferior));
4472
4473 errno = 0;
4474
4475 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4476 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4477 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4478
4479 if (errno == 0)
4480 {
4481 /* Both text and data offsets produced at compile-time (and so
4482 used by gdb) are relative to the beginning of the program,
4483 with the data segment immediately following the text segment.
4484 However, the actual runtime layout in memory may put the data
4485 somewhere else, so when we send gdb a data base-address, we
4486 use the real data base address and subtract the compile-time
4487 data base-address from it (which is just the length of the
4488 text segment). BSS immediately follows data in both
4489 cases. */
4490 *text_p = text;
4491 *data_p = data - (text_end - text);
4492
4493 return 1;
4494 }
4495#endif
4496 return 0;
4497}
4498#endif
4499
4500static int
4501linux_qxfer_osdata (const char *annex,
4502 unsigned char *readbuf, unsigned const char *writebuf,
4503 CORE_ADDR offset, int len)
4504{
4505 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4506}
4507
4508/* Convert a native/host siginfo object, into/from the siginfo in the
4509 layout of the inferiors' architecture. */
4510
4511static void
4512siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
4513{
4514 int done = 0;
4515
4516 if (the_low_target.siginfo_fixup != NULL)
4517 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4518
4519 /* If there was no callback, or the callback didn't do anything,
4520 then just do a straight memcpy. */
4521 if (!done)
4522 {
4523 if (direction == 1)
4524 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4525 else
4526 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4527 }
4528}
4529
4530static int
4531linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4532 unsigned const char *writebuf, CORE_ADDR offset, int len)
4533{
4534 int pid;
4535 struct siginfo siginfo;
4536 char inf_siginfo[sizeof (struct siginfo)];
4537
4538 if (current_inferior == NULL)
4539 return -1;
4540
4541 pid = lwpid_of (get_thread_lwp (current_inferior));
4542
4543 if (debug_threads)
4544 fprintf (stderr, "%s siginfo for lwp %d.\n",
4545 readbuf != NULL ? "Reading" : "Writing",
4546 pid);
4547
4548 if (offset >= sizeof (siginfo))
4549 return -1;
4550
4551 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4552 return -1;
4553
4554 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4555 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4556 inferior with a 64-bit GDBSERVER should look the same as debugging it
4557 with a 32-bit GDBSERVER, we need to convert it. */
4558 siginfo_fixup (&siginfo, inf_siginfo, 0);
4559
4560 if (offset + len > sizeof (siginfo))
4561 len = sizeof (siginfo) - offset;
4562
4563 if (readbuf != NULL)
4564 memcpy (readbuf, inf_siginfo + offset, len);
4565 else
4566 {
4567 memcpy (inf_siginfo + offset, writebuf, len);
4568
4569 /* Convert back to ptrace layout before flushing it out. */
4570 siginfo_fixup (&siginfo, inf_siginfo, 1);
4571
4572 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4573 return -1;
4574 }
4575
4576 return len;
4577}
4578
4579/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4580 so we notice when children change state; as the handler for the
4581 sigsuspend in my_waitpid. */
4582
4583static void
4584sigchld_handler (int signo)
4585{
4586 int old_errno = errno;
4587
4588 if (debug_threads)
4589 {
4590 do
4591 {
4592 /* fprintf is not async-signal-safe, so call write
4593 directly. */
4594 if (write (2, "sigchld_handler\n",
4595 sizeof ("sigchld_handler\n") - 1) < 0)
4596 break; /* just ignore */
4597 } while (0);
4598 }
4599
4600 if (target_is_async_p ())
4601 async_file_mark (); /* trigger a linux_wait */
4602
4603 errno = old_errno;
4604}
4605
4606static int
4607linux_supports_non_stop (void)
4608{
4609 return 1;
4610}
4611
4612static int
4613linux_async (int enable)
4614{
4615 int previous = (linux_event_pipe[0] != -1);
4616
4617 if (debug_threads)
4618 fprintf (stderr, "linux_async (%d), previous=%d\n",
4619 enable, previous);
4620
4621 if (previous != enable)
4622 {
4623 sigset_t mask;
4624 sigemptyset (&mask);
4625 sigaddset (&mask, SIGCHLD);
4626
4627 sigprocmask (SIG_BLOCK, &mask, NULL);
4628
4629 if (enable)
4630 {
4631 if (pipe (linux_event_pipe) == -1)
4632 fatal ("creating event pipe failed.");
4633
4634 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4635 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4636
4637 /* Register the event loop handler. */
4638 add_file_handler (linux_event_pipe[0],
4639 handle_target_event, NULL);
4640
4641 /* Always trigger a linux_wait. */
4642 async_file_mark ();
4643 }
4644 else
4645 {
4646 delete_file_handler (linux_event_pipe[0]);
4647
4648 close (linux_event_pipe[0]);
4649 close (linux_event_pipe[1]);
4650 linux_event_pipe[0] = -1;
4651 linux_event_pipe[1] = -1;
4652 }
4653
4654 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4655 }
4656
4657 return previous;
4658}
4659
4660static int
4661linux_start_non_stop (int nonstop)
4662{
4663 /* Register or unregister from event-loop accordingly. */
4664 linux_async (nonstop);
4665 return 0;
4666}
4667
4668static int
4669linux_supports_multi_process (void)
4670{
4671 return 1;
4672}
4673
4674static int
4675linux_supports_disable_randomization (void)
4676{
4677#ifdef HAVE_PERSONALITY
4678 return 1;
4679#else
4680 return 0;
4681#endif
4682}
4683
4684/* Enumerate spufs IDs for process PID. */
4685static int
4686spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4687{
4688 int pos = 0;
4689 int written = 0;
4690 char path[128];
4691 DIR *dir;
4692 struct dirent *entry;
4693
4694 sprintf (path, "/proc/%ld/fd", pid);
4695 dir = opendir (path);
4696 if (!dir)
4697 return -1;
4698
4699 rewinddir (dir);
4700 while ((entry = readdir (dir)) != NULL)
4701 {
4702 struct stat st;
4703 struct statfs stfs;
4704 int fd;
4705
4706 fd = atoi (entry->d_name);
4707 if (!fd)
4708 continue;
4709
4710 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4711 if (stat (path, &st) != 0)
4712 continue;
4713 if (!S_ISDIR (st.st_mode))
4714 continue;
4715
4716 if (statfs (path, &stfs) != 0)
4717 continue;
4718 if (stfs.f_type != SPUFS_MAGIC)
4719 continue;
4720
4721 if (pos >= offset && pos + 4 <= offset + len)
4722 {
4723 *(unsigned int *)(buf + pos - offset) = fd;
4724 written += 4;
4725 }
4726 pos += 4;
4727 }
4728
4729 closedir (dir);
4730 return written;
4731}
4732
4733/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4734 object type, using the /proc file system. */
4735static int
4736linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4737 unsigned const char *writebuf,
4738 CORE_ADDR offset, int len)
4739{
4740 long pid = lwpid_of (get_thread_lwp (current_inferior));
4741 char buf[128];
4742 int fd = 0;
4743 int ret = 0;
4744
4745 if (!writebuf && !readbuf)
4746 return -1;
4747
4748 if (!*annex)
4749 {
4750 if (!readbuf)
4751 return -1;
4752 else
4753 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4754 }
4755
4756 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4757 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4758 if (fd <= 0)
4759 return -1;
4760
4761 if (offset != 0
4762 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4763 {
4764 close (fd);
4765 return 0;
4766 }
4767
4768 if (writebuf)
4769 ret = write (fd, writebuf, (size_t) len);
4770 else
4771 ret = read (fd, readbuf, (size_t) len);
4772
4773 close (fd);
4774 return ret;
4775}
4776
4777#if defined PT_GETDSBT || defined PTRACE_GETFDPIC
4778struct target_loadseg
4779{
4780 /* Core address to which the segment is mapped. */
4781 Elf32_Addr addr;
4782 /* VMA recorded in the program header. */
4783 Elf32_Addr p_vaddr;
4784 /* Size of this segment in memory. */
4785 Elf32_Word p_memsz;
4786};
4787
4788# if defined PT_GETDSBT
4789struct target_loadmap
4790{
4791 /* Protocol version number, must be zero. */
4792 Elf32_Word version;
4793 /* Pointer to the DSBT table, its size, and the DSBT index. */
4794 unsigned *dsbt_table;
4795 unsigned dsbt_size, dsbt_index;
4796 /* Number of segments in this map. */
4797 Elf32_Word nsegs;
4798 /* The actual memory map. */
4799 struct target_loadseg segs[/*nsegs*/];
4800};
4801# define LINUX_LOADMAP PT_GETDSBT
4802# define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
4803# define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
4804# else
4805struct target_loadmap
4806{
4807 /* Protocol version number, must be zero. */
4808 Elf32_Half version;
4809 /* Number of segments in this map. */
4810 Elf32_Half nsegs;
4811 /* The actual memory map. */
4812 struct target_loadseg segs[/*nsegs*/];
4813};
4814# define LINUX_LOADMAP PTRACE_GETFDPIC
4815# define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
4816# define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
4817# endif
4818
4819static int
4820linux_read_loadmap (const char *annex, CORE_ADDR offset,
4821 unsigned char *myaddr, unsigned int len)
4822{
4823 int pid = lwpid_of (get_thread_lwp (current_inferior));
4824 int addr = -1;
4825 struct target_loadmap *data = NULL;
4826 unsigned int actual_length, copy_length;
4827
4828 if (strcmp (annex, "exec") == 0)
4829 addr = (int) LINUX_LOADMAP_EXEC;
4830 else if (strcmp (annex, "interp") == 0)
4831 addr = (int) LINUX_LOADMAP_INTERP;
4832 else
4833 return -1;
4834
4835 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
4836 return -1;
4837
4838 if (data == NULL)
4839 return -1;
4840
4841 actual_length = sizeof (struct target_loadmap)
4842 + sizeof (struct target_loadseg) * data->nsegs;
4843
4844 if (offset < 0 || offset > actual_length)
4845 return -1;
4846
4847 copy_length = actual_length - offset < len ? actual_length - offset : len;
4848 memcpy (myaddr, (char *) data + offset, copy_length);
4849 return copy_length;
4850}
4851#else
4852# define linux_read_loadmap NULL
4853#endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
4854
4855static void
4856linux_process_qsupported (const char *query)
4857{
4858 if (the_low_target.process_qsupported != NULL)
4859 the_low_target.process_qsupported (query);
4860}
4861
4862static int
4863linux_supports_tracepoints (void)
4864{
4865 if (*the_low_target.supports_tracepoints == NULL)
4866 return 0;
4867
4868 return (*the_low_target.supports_tracepoints) ();
4869}
4870
4871static CORE_ADDR
4872linux_read_pc (struct regcache *regcache)
4873{
4874 if (the_low_target.get_pc == NULL)
4875 return 0;
4876
4877 return (*the_low_target.get_pc) (regcache);
4878}
4879
4880static void
4881linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
4882{
4883 gdb_assert (the_low_target.set_pc != NULL);
4884
4885 (*the_low_target.set_pc) (regcache, pc);
4886}
4887
4888static int
4889linux_thread_stopped (struct thread_info *thread)
4890{
4891 return get_thread_lwp (thread)->stopped;
4892}
4893
4894/* This exposes stop-all-threads functionality to other modules. */
4895
4896static void
4897linux_pause_all (int freeze)
4898{
4899 stop_all_lwps (freeze, NULL);
4900}
4901
4902/* This exposes unstop-all-threads functionality to other gdbserver
4903 modules. */
4904
4905static void
4906linux_unpause_all (int unfreeze)
4907{
4908 unstop_all_lwps (unfreeze, NULL);
4909}
4910
4911static int
4912linux_prepare_to_access_memory (void)
4913{
4914 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
4915 running LWP. */
4916 if (non_stop)
4917 linux_pause_all (1);
4918 return 0;
4919}
4920
4921static void
4922linux_done_accessing_memory (void)
4923{
4924 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
4925 running LWP. */
4926 if (non_stop)
4927 linux_unpause_all (1);
4928}
4929
4930static int
4931linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
4932 CORE_ADDR collector,
4933 CORE_ADDR lockaddr,
4934 ULONGEST orig_size,
4935 CORE_ADDR *jump_entry,
4936 CORE_ADDR *trampoline,
4937 ULONGEST *trampoline_size,
4938 unsigned char *jjump_pad_insn,
4939 ULONGEST *jjump_pad_insn_size,
4940 CORE_ADDR *adjusted_insn_addr,
4941 CORE_ADDR *adjusted_insn_addr_end,
4942 char *err)
4943{
4944 return (*the_low_target.install_fast_tracepoint_jump_pad)
4945 (tpoint, tpaddr, collector, lockaddr, orig_size,
4946 jump_entry, trampoline, trampoline_size,
4947 jjump_pad_insn, jjump_pad_insn_size,
4948 adjusted_insn_addr, adjusted_insn_addr_end,
4949 err);
4950}
4951
4952static struct emit_ops *
4953linux_emit_ops (void)
4954{
4955 if (the_low_target.emit_ops != NULL)
4956 return (*the_low_target.emit_ops) ();
4957 else
4958 return NULL;
4959}
4960
4961static int
4962linux_get_min_fast_tracepoint_insn_len (void)
4963{
4964 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
4965}
4966
4967static struct target_ops linux_target_ops = {
4968 linux_create_inferior,
4969 linux_attach,
4970 linux_kill,
4971 linux_detach,
4972 linux_mourn,
4973 linux_join,
4974 linux_thread_alive,
4975 linux_resume,
4976 linux_wait,
4977 linux_fetch_registers,
4978 linux_store_registers,
4979 linux_prepare_to_access_memory,
4980 linux_done_accessing_memory,
4981 linux_read_memory,
4982 linux_write_memory,
4983 linux_look_up_symbols,
4984 linux_request_interrupt,
4985 linux_read_auxv,
4986 linux_insert_point,
4987 linux_remove_point,
4988 linux_stopped_by_watchpoint,
4989 linux_stopped_data_address,
4990#if defined(__UCLIBC__) && defined(HAS_NOMMU)
4991 linux_read_offsets,
4992#else
4993 NULL,
4994#endif
4995#ifdef USE_THREAD_DB
4996 thread_db_get_tls_address,
4997#else
4998 NULL,
4999#endif
5000 linux_qxfer_spu,
5001 hostio_last_error_from_errno,
5002 linux_qxfer_osdata,
5003 linux_xfer_siginfo,
5004 linux_supports_non_stop,
5005 linux_async,
5006 linux_start_non_stop,
5007 linux_supports_multi_process,
5008#ifdef USE_THREAD_DB
5009 thread_db_handle_monitor_command,
5010#else
5011 NULL,
5012#endif
5013 linux_common_core_of_thread,
5014 linux_read_loadmap,
5015 linux_process_qsupported,
5016 linux_supports_tracepoints,
5017 linux_read_pc,
5018 linux_write_pc,
5019 linux_thread_stopped,
5020 NULL,
5021 linux_pause_all,
5022 linux_unpause_all,
5023 linux_cancel_breakpoints,
5024 linux_stabilize_threads,
5025 linux_install_fast_tracepoint_jump_pad,
5026 linux_emit_ops,
5027 linux_supports_disable_randomization,
5028 linux_get_min_fast_tracepoint_insn_len,
5029};
5030
5031static void
5032linux_init_signals ()
5033{
5034 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5035 to find what the cancel signal actually is. */
5036#ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5037 signal (__SIGRTMIN+1, SIG_IGN);
5038#endif
5039}
5040
5041void
5042initialize_low (void)
5043{
5044 struct sigaction sigchld_action;
5045 memset (&sigchld_action, 0, sizeof (sigchld_action));
5046 set_target_ops (&linux_target_ops);
5047 set_breakpoint_data (the_low_target.breakpoint,
5048 the_low_target.breakpoint_len);
5049 linux_init_signals ();
5050 linux_test_for_tracefork ();
5051#ifdef HAVE_LINUX_REGSETS
5052 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5053 ;
5054 disabled_regsets = xmalloc (num_regsets);
5055#endif
5056
5057 sigchld_action.sa_handler = sigchld_handler;
5058 sigemptyset (&sigchld_action.sa_mask);
5059 sigchld_action.sa_flags = SA_RESTART;
5060 sigaction (SIGCHLD, &sigchld_action, NULL);
5061}
This page took 0.038299 seconds and 4 git commands to generate.