* gdbreplay.c (remote_error): New.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include <signal.h>
28 #include <sys/ioctl.h>
29 #include <fcntl.h>
30 #include <string.h>
31 #include <stdlib.h>
32 #include <unistd.h>
33 #include <errno.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40 #include <sys/stat.h>
41 #include <sys/vfs.h>
42 #include <sys/uio.h>
43 #ifndef ELFMAG0
44 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
45 then ELFMAG0 will have been defined. If it didn't get included by
46 gdb_proc_service.h then including it will likely introduce a duplicate
47 definition of elf_fpregset_t. */
48 #include <elf.h>
49 #endif
50
51 #ifndef SPUFS_MAGIC
52 #define SPUFS_MAGIC 0x23c9b64e
53 #endif
54
55 #ifndef PTRACE_GETSIGINFO
56 # define PTRACE_GETSIGINFO 0x4202
57 # define PTRACE_SETSIGINFO 0x4203
58 #endif
59
60 #ifndef O_LARGEFILE
61 #define O_LARGEFILE 0
62 #endif
63
64 /* If the system headers did not provide the constants, hard-code the normal
65 values. */
66 #ifndef PTRACE_EVENT_FORK
67
68 #define PTRACE_SETOPTIONS 0x4200
69 #define PTRACE_GETEVENTMSG 0x4201
70
71 /* options set using PTRACE_SETOPTIONS */
72 #define PTRACE_O_TRACESYSGOOD 0x00000001
73 #define PTRACE_O_TRACEFORK 0x00000002
74 #define PTRACE_O_TRACEVFORK 0x00000004
75 #define PTRACE_O_TRACECLONE 0x00000008
76 #define PTRACE_O_TRACEEXEC 0x00000010
77 #define PTRACE_O_TRACEVFORKDONE 0x00000020
78 #define PTRACE_O_TRACEEXIT 0x00000040
79
80 /* Wait extended result codes for the above trace options. */
81 #define PTRACE_EVENT_FORK 1
82 #define PTRACE_EVENT_VFORK 2
83 #define PTRACE_EVENT_CLONE 3
84 #define PTRACE_EVENT_EXEC 4
85 #define PTRACE_EVENT_VFORK_DONE 5
86 #define PTRACE_EVENT_EXIT 6
87
88 #endif /* PTRACE_EVENT_FORK */
89
90 /* We can't always assume that this flag is available, but all systems
91 with the ptrace event handlers also have __WALL, so it's safe to use
92 in some contexts. */
93 #ifndef __WALL
94 #define __WALL 0x40000000 /* Wait for any child. */
95 #endif
96
97 #ifndef W_STOPCODE
98 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
99 #endif
100
101 #ifdef __UCLIBC__
102 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
103 #define HAS_NOMMU
104 #endif
105 #endif
106
107 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
108 representation of the thread ID.
109
110 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
111 the same as the LWP ID.
112
113 ``all_processes'' is keyed by the "overall process ID", which
114 GNU/Linux calls tgid, "thread group ID". */
115
116 struct inferior_list all_lwps;
117
118 /* A list of all unknown processes which receive stop signals. Some other
119 process will presumably claim each of these as forked children
120 momentarily. */
121
122 struct inferior_list stopped_pids;
123
124 /* FIXME this is a bit of a hack, and could be removed. */
125 int stopping_threads;
126
127 /* FIXME make into a target method? */
128 int using_threads = 1;
129
130 /* True if we're presently stabilizing threads (moving them out of
131 jump pads). */
132 static int stabilizing_threads;
133
134 /* This flag is true iff we've just created or attached to our first
135 inferior but it has not stopped yet. As soon as it does, we need
136 to call the low target's arch_setup callback. Doing this only on
137 the first inferior avoids reinializing the architecture on every
138 inferior, and avoids messing with the register caches of the
139 already running inferiors. NOTE: this assumes all inferiors under
140 control of gdbserver have the same architecture. */
141 static int new_inferior;
142
143 static void linux_resume_one_lwp (struct lwp_info *lwp,
144 int step, int signal, siginfo_t *info);
145 static void linux_resume (struct thread_resume *resume_info, size_t n);
146 static void stop_all_lwps (int suspend, struct lwp_info *except);
147 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
148 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
149 static void *add_lwp (ptid_t ptid);
150 static int linux_stopped_by_watchpoint (void);
151 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
152 static int linux_core_of_thread (ptid_t ptid);
153 static void proceed_all_lwps (void);
154 static int finish_step_over (struct lwp_info *lwp);
155 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
156 static int kill_lwp (unsigned long lwpid, int signo);
157 static void linux_enable_event_reporting (int pid);
158
159 /* True if the low target can hardware single-step. Such targets
160 don't need a BREAKPOINT_REINSERT_ADDR callback. */
161
162 static int
163 can_hardware_single_step (void)
164 {
165 return (the_low_target.breakpoint_reinsert_addr == NULL);
166 }
167
168 /* True if the low target supports memory breakpoints. If so, we'll
169 have a GET_PC implementation. */
170
171 static int
172 supports_breakpoints (void)
173 {
174 return (the_low_target.get_pc != NULL);
175 }
176
177 /* Returns true if this target can support fast tracepoints. This
178 does not mean that the in-process agent has been loaded in the
179 inferior. */
180
181 static int
182 supports_fast_tracepoints (void)
183 {
184 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
185 }
186
187 struct pending_signals
188 {
189 int signal;
190 siginfo_t info;
191 struct pending_signals *prev;
192 };
193
194 #define PTRACE_ARG3_TYPE void *
195 #define PTRACE_ARG4_TYPE void *
196 #define PTRACE_XFER_TYPE long
197
198 #ifdef HAVE_LINUX_REGSETS
199 static char *disabled_regsets;
200 static int num_regsets;
201 #endif
202
203 /* The read/write ends of the pipe registered as waitable file in the
204 event loop. */
205 static int linux_event_pipe[2] = { -1, -1 };
206
207 /* True if we're currently in async mode. */
208 #define target_is_async_p() (linux_event_pipe[0] != -1)
209
210 static void send_sigstop (struct lwp_info *lwp);
211 static void wait_for_sigstop (struct inferior_list_entry *entry);
212
213 /* Accepts an integer PID; Returns a string representing a file that
214 can be opened to get info for the child process.
215 Space for the result is malloc'd, caller must free. */
216
217 char *
218 linux_child_pid_to_exec_file (int pid)
219 {
220 char *name1, *name2;
221
222 name1 = xmalloc (MAXPATHLEN);
223 name2 = xmalloc (MAXPATHLEN);
224 memset (name2, 0, MAXPATHLEN);
225
226 sprintf (name1, "/proc/%d/exe", pid);
227 if (readlink (name1, name2, MAXPATHLEN) > 0)
228 {
229 free (name1);
230 return name2;
231 }
232 else
233 {
234 free (name2);
235 return name1;
236 }
237 }
238
239 /* Return non-zero if HEADER is a 64-bit ELF file. */
240
241 static int
242 elf_64_header_p (const Elf64_Ehdr *header)
243 {
244 return (header->e_ident[EI_MAG0] == ELFMAG0
245 && header->e_ident[EI_MAG1] == ELFMAG1
246 && header->e_ident[EI_MAG2] == ELFMAG2
247 && header->e_ident[EI_MAG3] == ELFMAG3
248 && header->e_ident[EI_CLASS] == ELFCLASS64);
249 }
250
251 /* Return non-zero if FILE is a 64-bit ELF file,
252 zero if the file is not a 64-bit ELF file,
253 and -1 if the file is not accessible or doesn't exist. */
254
255 int
256 elf_64_file_p (const char *file)
257 {
258 Elf64_Ehdr header;
259 int fd;
260
261 fd = open (file, O_RDONLY);
262 if (fd < 0)
263 return -1;
264
265 if (read (fd, &header, sizeof (header)) != sizeof (header))
266 {
267 close (fd);
268 return 0;
269 }
270 close (fd);
271
272 return elf_64_header_p (&header);
273 }
274
275 static void
276 delete_lwp (struct lwp_info *lwp)
277 {
278 remove_thread (get_lwp_thread (lwp));
279 remove_inferior (&all_lwps, &lwp->head);
280 free (lwp->arch_private);
281 free (lwp);
282 }
283
284 /* Add a process to the common process list, and set its private
285 data. */
286
287 static struct process_info *
288 linux_add_process (int pid, int attached)
289 {
290 struct process_info *proc;
291
292 /* Is this the first process? If so, then set the arch. */
293 if (all_processes.head == NULL)
294 new_inferior = 1;
295
296 proc = add_process (pid, attached);
297 proc->private = xcalloc (1, sizeof (*proc->private));
298
299 if (the_low_target.new_process != NULL)
300 proc->private->arch_private = the_low_target.new_process ();
301
302 return proc;
303 }
304
305 /* Wrapper function for waitpid which handles EINTR, and emulates
306 __WALL for systems where that is not available. */
307
308 static int
309 my_waitpid (int pid, int *status, int flags)
310 {
311 int ret, out_errno;
312
313 if (debug_threads)
314 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
315
316 if (flags & __WALL)
317 {
318 sigset_t block_mask, org_mask, wake_mask;
319 int wnohang;
320
321 wnohang = (flags & WNOHANG) != 0;
322 flags &= ~(__WALL | __WCLONE);
323 flags |= WNOHANG;
324
325 /* Block all signals while here. This avoids knowing about
326 LinuxThread's signals. */
327 sigfillset (&block_mask);
328 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
329
330 /* ... except during the sigsuspend below. */
331 sigemptyset (&wake_mask);
332
333 while (1)
334 {
335 /* Since all signals are blocked, there's no need to check
336 for EINTR here. */
337 ret = waitpid (pid, status, flags);
338 out_errno = errno;
339
340 if (ret == -1 && out_errno != ECHILD)
341 break;
342 else if (ret > 0)
343 break;
344
345 if (flags & __WCLONE)
346 {
347 /* We've tried both flavors now. If WNOHANG is set,
348 there's nothing else to do, just bail out. */
349 if (wnohang)
350 break;
351
352 if (debug_threads)
353 fprintf (stderr, "blocking\n");
354
355 /* Block waiting for signals. */
356 sigsuspend (&wake_mask);
357 }
358
359 flags ^= __WCLONE;
360 }
361
362 sigprocmask (SIG_SETMASK, &org_mask, NULL);
363 }
364 else
365 {
366 do
367 ret = waitpid (pid, status, flags);
368 while (ret == -1 && errno == EINTR);
369 out_errno = errno;
370 }
371
372 if (debug_threads)
373 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
374 pid, flags, status ? *status : -1, ret);
375
376 errno = out_errno;
377 return ret;
378 }
379
380 /* Handle a GNU/Linux extended wait response. If we see a clone
381 event, we need to add the new LWP to our list (and not report the
382 trap to higher layers). */
383
384 static void
385 handle_extended_wait (struct lwp_info *event_child, int wstat)
386 {
387 int event = wstat >> 16;
388 struct lwp_info *new_lwp;
389
390 if (event == PTRACE_EVENT_CLONE)
391 {
392 ptid_t ptid;
393 unsigned long new_pid;
394 int ret, status = W_STOPCODE (SIGSTOP);
395
396 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
397
398 /* If we haven't already seen the new PID stop, wait for it now. */
399 if (! pull_pid_from_list (&stopped_pids, new_pid))
400 {
401 /* The new child has a pending SIGSTOP. We can't affect it until it
402 hits the SIGSTOP, but we're already attached. */
403
404 ret = my_waitpid (new_pid, &status, __WALL);
405
406 if (ret == -1)
407 perror_with_name ("waiting for new child");
408 else if (ret != new_pid)
409 warning ("wait returned unexpected PID %d", ret);
410 else if (!WIFSTOPPED (status))
411 warning ("wait returned unexpected status 0x%x", status);
412 }
413
414 linux_enable_event_reporting (new_pid);
415
416 ptid = ptid_build (pid_of (event_child), new_pid, 0);
417 new_lwp = (struct lwp_info *) add_lwp (ptid);
418 add_thread (ptid, new_lwp);
419
420 /* Either we're going to immediately resume the new thread
421 or leave it stopped. linux_resume_one_lwp is a nop if it
422 thinks the thread is currently running, so set this first
423 before calling linux_resume_one_lwp. */
424 new_lwp->stopped = 1;
425
426 /* Normally we will get the pending SIGSTOP. But in some cases
427 we might get another signal delivered to the group first.
428 If we do get another signal, be sure not to lose it. */
429 if (WSTOPSIG (status) == SIGSTOP)
430 {
431 if (stopping_threads)
432 new_lwp->stop_pc = get_stop_pc (new_lwp);
433 else
434 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
435 }
436 else
437 {
438 new_lwp->stop_expected = 1;
439
440 if (stopping_threads)
441 {
442 new_lwp->stop_pc = get_stop_pc (new_lwp);
443 new_lwp->status_pending_p = 1;
444 new_lwp->status_pending = status;
445 }
446 else
447 /* Pass the signal on. This is what GDB does - except
448 shouldn't we really report it instead? */
449 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
450 }
451
452 /* Always resume the current thread. If we are stopping
453 threads, it will have a pending SIGSTOP; we may as well
454 collect it now. */
455 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
456 }
457 }
458
459 /* Return the PC as read from the regcache of LWP, without any
460 adjustment. */
461
462 static CORE_ADDR
463 get_pc (struct lwp_info *lwp)
464 {
465 struct thread_info *saved_inferior;
466 struct regcache *regcache;
467 CORE_ADDR pc;
468
469 if (the_low_target.get_pc == NULL)
470 return 0;
471
472 saved_inferior = current_inferior;
473 current_inferior = get_lwp_thread (lwp);
474
475 regcache = get_thread_regcache (current_inferior, 1);
476 pc = (*the_low_target.get_pc) (regcache);
477
478 if (debug_threads)
479 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
480
481 current_inferior = saved_inferior;
482 return pc;
483 }
484
485 /* This function should only be called if LWP got a SIGTRAP.
486 The SIGTRAP could mean several things.
487
488 On i386, where decr_pc_after_break is non-zero:
489 If we were single-stepping this process using PTRACE_SINGLESTEP,
490 we will get only the one SIGTRAP (even if the instruction we
491 stepped over was a breakpoint). The value of $eip will be the
492 next instruction.
493 If we continue the process using PTRACE_CONT, we will get a
494 SIGTRAP when we hit a breakpoint. The value of $eip will be
495 the instruction after the breakpoint (i.e. needs to be
496 decremented). If we report the SIGTRAP to GDB, we must also
497 report the undecremented PC. If we cancel the SIGTRAP, we
498 must resume at the decremented PC.
499
500 (Presumably, not yet tested) On a non-decr_pc_after_break machine
501 with hardware or kernel single-step:
502 If we single-step over a breakpoint instruction, our PC will
503 point at the following instruction. If we continue and hit a
504 breakpoint instruction, our PC will point at the breakpoint
505 instruction. */
506
507 static CORE_ADDR
508 get_stop_pc (struct lwp_info *lwp)
509 {
510 CORE_ADDR stop_pc;
511
512 if (the_low_target.get_pc == NULL)
513 return 0;
514
515 stop_pc = get_pc (lwp);
516
517 if (WSTOPSIG (lwp->last_status) == SIGTRAP
518 && !lwp->stepping
519 && !lwp->stopped_by_watchpoint
520 && lwp->last_status >> 16 == 0)
521 stop_pc -= the_low_target.decr_pc_after_break;
522
523 if (debug_threads)
524 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
525
526 return stop_pc;
527 }
528
529 static void *
530 add_lwp (ptid_t ptid)
531 {
532 struct lwp_info *lwp;
533
534 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
535 memset (lwp, 0, sizeof (*lwp));
536
537 lwp->head.id = ptid;
538
539 if (the_low_target.new_thread != NULL)
540 lwp->arch_private = the_low_target.new_thread ();
541
542 add_inferior_to_list (&all_lwps, &lwp->head);
543
544 return lwp;
545 }
546
547 /* Start an inferior process and returns its pid.
548 ALLARGS is a vector of program-name and args. */
549
550 static int
551 linux_create_inferior (char *program, char **allargs)
552 {
553 struct lwp_info *new_lwp;
554 int pid;
555 ptid_t ptid;
556
557 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
558 pid = vfork ();
559 #else
560 pid = fork ();
561 #endif
562 if (pid < 0)
563 perror_with_name ("fork");
564
565 if (pid == 0)
566 {
567 ptrace (PTRACE_TRACEME, 0, 0, 0);
568
569 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
570 signal (__SIGRTMIN + 1, SIG_DFL);
571 #endif
572
573 setpgid (0, 0);
574
575 execv (program, allargs);
576 if (errno == ENOENT)
577 execvp (program, allargs);
578
579 fprintf (stderr, "Cannot exec %s: %s.\n", program,
580 strerror (errno));
581 fflush (stderr);
582 _exit (0177);
583 }
584
585 linux_add_process (pid, 0);
586
587 ptid = ptid_build (pid, pid, 0);
588 new_lwp = add_lwp (ptid);
589 add_thread (ptid, new_lwp);
590 new_lwp->must_set_ptrace_flags = 1;
591
592 return pid;
593 }
594
595 /* Attach to an inferior process. */
596
597 static void
598 linux_attach_lwp_1 (unsigned long lwpid, int initial)
599 {
600 ptid_t ptid;
601 struct lwp_info *new_lwp;
602
603 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
604 {
605 if (!initial)
606 {
607 /* If we fail to attach to an LWP, just warn. */
608 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
609 strerror (errno), errno);
610 fflush (stderr);
611 return;
612 }
613 else
614 /* If we fail to attach to a process, report an error. */
615 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
616 strerror (errno), errno);
617 }
618
619 if (initial)
620 /* NOTE/FIXME: This lwp might have not been the tgid. */
621 ptid = ptid_build (lwpid, lwpid, 0);
622 else
623 {
624 /* Note that extracting the pid from the current inferior is
625 safe, since we're always called in the context of the same
626 process as this new thread. */
627 int pid = pid_of (get_thread_lwp (current_inferior));
628 ptid = ptid_build (pid, lwpid, 0);
629 }
630
631 new_lwp = (struct lwp_info *) add_lwp (ptid);
632 add_thread (ptid, new_lwp);
633
634 /* We need to wait for SIGSTOP before being able to make the next
635 ptrace call on this LWP. */
636 new_lwp->must_set_ptrace_flags = 1;
637
638 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
639 brings it to a halt.
640
641 There are several cases to consider here:
642
643 1) gdbserver has already attached to the process and is being notified
644 of a new thread that is being created.
645 In this case we should ignore that SIGSTOP and resume the
646 process. This is handled below by setting stop_expected = 1,
647 and the fact that add_thread sets last_resume_kind ==
648 resume_continue.
649
650 2) This is the first thread (the process thread), and we're attaching
651 to it via attach_inferior.
652 In this case we want the process thread to stop.
653 This is handled by having linux_attach set last_resume_kind ==
654 resume_stop after we return.
655 ??? If the process already has several threads we leave the other
656 threads running.
657
658 3) GDB is connecting to gdbserver and is requesting an enumeration of all
659 existing threads.
660 In this case we want the thread to stop.
661 FIXME: This case is currently not properly handled.
662 We should wait for the SIGSTOP but don't. Things work apparently
663 because enough time passes between when we ptrace (ATTACH) and when
664 gdb makes the next ptrace call on the thread.
665
666 On the other hand, if we are currently trying to stop all threads, we
667 should treat the new thread as if we had sent it a SIGSTOP. This works
668 because we are guaranteed that the add_lwp call above added us to the
669 end of the list, and so the new thread has not yet reached
670 wait_for_sigstop (but will). */
671 new_lwp->stop_expected = 1;
672 }
673
674 void
675 linux_attach_lwp (unsigned long lwpid)
676 {
677 linux_attach_lwp_1 (lwpid, 0);
678 }
679
680 int
681 linux_attach (unsigned long pid)
682 {
683 linux_attach_lwp_1 (pid, 1);
684 linux_add_process (pid, 1);
685
686 if (!non_stop)
687 {
688 struct thread_info *thread;
689
690 /* Don't ignore the initial SIGSTOP if we just attached to this
691 process. It will be collected by wait shortly. */
692 thread = find_thread_ptid (ptid_build (pid, pid, 0));
693 thread->last_resume_kind = resume_stop;
694 }
695
696 return 0;
697 }
698
699 struct counter
700 {
701 int pid;
702 int count;
703 };
704
705 static int
706 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
707 {
708 struct counter *counter = args;
709
710 if (ptid_get_pid (entry->id) == counter->pid)
711 {
712 if (++counter->count > 1)
713 return 1;
714 }
715
716 return 0;
717 }
718
719 static int
720 last_thread_of_process_p (struct thread_info *thread)
721 {
722 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
723 int pid = ptid_get_pid (ptid);
724 struct counter counter = { pid , 0 };
725
726 return (find_inferior (&all_threads,
727 second_thread_of_pid_p, &counter) == NULL);
728 }
729
730 /* Kill the inferior lwp. */
731
732 static int
733 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
734 {
735 struct thread_info *thread = (struct thread_info *) entry;
736 struct lwp_info *lwp = get_thread_lwp (thread);
737 int wstat;
738 int pid = * (int *) args;
739
740 if (ptid_get_pid (entry->id) != pid)
741 return 0;
742
743 /* We avoid killing the first thread here, because of a Linux kernel (at
744 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
745 the children get a chance to be reaped, it will remain a zombie
746 forever. */
747
748 if (lwpid_of (lwp) == pid)
749 {
750 if (debug_threads)
751 fprintf (stderr, "lkop: is last of process %s\n",
752 target_pid_to_str (entry->id));
753 return 0;
754 }
755
756 do
757 {
758 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
759
760 /* Make sure it died. The loop is most likely unnecessary. */
761 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
762 } while (pid > 0 && WIFSTOPPED (wstat));
763
764 return 0;
765 }
766
767 static int
768 linux_kill (int pid)
769 {
770 struct process_info *process;
771 struct lwp_info *lwp;
772 struct thread_info *thread;
773 int wstat;
774 int lwpid;
775
776 process = find_process_pid (pid);
777 if (process == NULL)
778 return -1;
779
780 /* If we're killing a running inferior, make sure it is stopped
781 first, as PTRACE_KILL will not work otherwise. */
782 stop_all_lwps (0, NULL);
783
784 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
785
786 /* See the comment in linux_kill_one_lwp. We did not kill the first
787 thread in the list, so do so now. */
788 lwp = find_lwp_pid (pid_to_ptid (pid));
789 thread = get_lwp_thread (lwp);
790
791 if (debug_threads)
792 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
793 lwpid_of (lwp), pid);
794
795 do
796 {
797 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
798
799 /* Make sure it died. The loop is most likely unnecessary. */
800 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
801 } while (lwpid > 0 && WIFSTOPPED (wstat));
802
803 the_target->mourn (process);
804
805 /* Since we presently can only stop all lwps of all processes, we
806 need to unstop lwps of other processes. */
807 unstop_all_lwps (0, NULL);
808 return 0;
809 }
810
811 static int
812 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
813 {
814 struct thread_info *thread = (struct thread_info *) entry;
815 struct lwp_info *lwp = get_thread_lwp (thread);
816 int pid = * (int *) args;
817
818 if (ptid_get_pid (entry->id) != pid)
819 return 0;
820
821 /* If this process is stopped but is expecting a SIGSTOP, then make
822 sure we take care of that now. This isn't absolutely guaranteed
823 to collect the SIGSTOP, but is fairly likely to. */
824 if (lwp->stop_expected)
825 {
826 int wstat;
827 /* Clear stop_expected, so that the SIGSTOP will be reported. */
828 lwp->stop_expected = 0;
829 linux_resume_one_lwp (lwp, 0, 0, NULL);
830 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
831 }
832
833 /* Flush any pending changes to the process's registers. */
834 regcache_invalidate_one ((struct inferior_list_entry *)
835 get_lwp_thread (lwp));
836
837 /* Finally, let it resume. */
838 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
839
840 delete_lwp (lwp);
841 return 0;
842 }
843
844 static int
845 linux_detach (int pid)
846 {
847 struct process_info *process;
848
849 process = find_process_pid (pid);
850 if (process == NULL)
851 return -1;
852
853 /* Stop all threads before detaching. First, ptrace requires that
854 the thread is stopped to sucessfully detach. Second, thread_db
855 may need to uninstall thread event breakpoints from memory, which
856 only works with a stopped process anyway. */
857 stop_all_lwps (0, NULL);
858
859 #ifdef USE_THREAD_DB
860 thread_db_detach (process);
861 #endif
862
863 /* Stabilize threads (move out of jump pads). */
864 stabilize_threads ();
865
866 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
867
868 the_target->mourn (process);
869
870 /* Since we presently can only stop all lwps of all processes, we
871 need to unstop lwps of other processes. */
872 unstop_all_lwps (0, NULL);
873 return 0;
874 }
875
876 /* Remove all LWPs that belong to process PROC from the lwp list. */
877
878 static int
879 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
880 {
881 struct lwp_info *lwp = (struct lwp_info *) entry;
882 struct process_info *process = proc;
883
884 if (pid_of (lwp) == pid_of (process))
885 delete_lwp (lwp);
886
887 return 0;
888 }
889
890 static void
891 linux_mourn (struct process_info *process)
892 {
893 struct process_info_private *priv;
894
895 #ifdef USE_THREAD_DB
896 thread_db_mourn (process);
897 #endif
898
899 find_inferior (&all_lwps, delete_lwp_callback, process);
900
901 /* Freeing all private data. */
902 priv = process->private;
903 free (priv->arch_private);
904 free (priv);
905 process->private = NULL;
906
907 remove_process (process);
908 }
909
910 static void
911 linux_join (int pid)
912 {
913 int status, ret;
914 struct process_info *process;
915
916 process = find_process_pid (pid);
917 if (process == NULL)
918 return;
919
920 do {
921 ret = my_waitpid (pid, &status, 0);
922 if (WIFEXITED (status) || WIFSIGNALED (status))
923 break;
924 } while (ret != -1 || errno != ECHILD);
925 }
926
927 /* Return nonzero if the given thread is still alive. */
928 static int
929 linux_thread_alive (ptid_t ptid)
930 {
931 struct lwp_info *lwp = find_lwp_pid (ptid);
932
933 /* We assume we always know if a thread exits. If a whole process
934 exited but we still haven't been able to report it to GDB, we'll
935 hold on to the last lwp of the dead process. */
936 if (lwp != NULL)
937 return !lwp->dead;
938 else
939 return 0;
940 }
941
942 /* Return 1 if this lwp has an interesting status pending. */
943 static int
944 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
945 {
946 struct lwp_info *lwp = (struct lwp_info *) entry;
947 ptid_t ptid = * (ptid_t *) arg;
948 struct thread_info *thread;
949
950 /* Check if we're only interested in events from a specific process
951 or its lwps. */
952 if (!ptid_equal (minus_one_ptid, ptid)
953 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
954 return 0;
955
956 thread = get_lwp_thread (lwp);
957
958 /* If we got a `vCont;t', but we haven't reported a stop yet, do
959 report any status pending the LWP may have. */
960 if (thread->last_resume_kind == resume_stop
961 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
962 return 0;
963
964 return lwp->status_pending_p;
965 }
966
967 static int
968 same_lwp (struct inferior_list_entry *entry, void *data)
969 {
970 ptid_t ptid = *(ptid_t *) data;
971 int lwp;
972
973 if (ptid_get_lwp (ptid) != 0)
974 lwp = ptid_get_lwp (ptid);
975 else
976 lwp = ptid_get_pid (ptid);
977
978 if (ptid_get_lwp (entry->id) == lwp)
979 return 1;
980
981 return 0;
982 }
983
984 struct lwp_info *
985 find_lwp_pid (ptid_t ptid)
986 {
987 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
988 }
989
990 static struct lwp_info *
991 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
992 {
993 int ret;
994 int to_wait_for = -1;
995 struct lwp_info *child = NULL;
996
997 if (debug_threads)
998 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
999
1000 if (ptid_equal (ptid, minus_one_ptid))
1001 to_wait_for = -1; /* any child */
1002 else
1003 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1004
1005 options |= __WALL;
1006
1007 retry:
1008
1009 ret = my_waitpid (to_wait_for, wstatp, options);
1010 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1011 return NULL;
1012 else if (ret == -1)
1013 perror_with_name ("waitpid");
1014
1015 if (debug_threads
1016 && (!WIFSTOPPED (*wstatp)
1017 || (WSTOPSIG (*wstatp) != 32
1018 && WSTOPSIG (*wstatp) != 33)))
1019 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1020
1021 child = find_lwp_pid (pid_to_ptid (ret));
1022
1023 /* If we didn't find a process, one of two things presumably happened:
1024 - A process we started and then detached from has exited. Ignore it.
1025 - A process we are controlling has forked and the new child's stop
1026 was reported to us by the kernel. Save its PID. */
1027 if (child == NULL && WIFSTOPPED (*wstatp))
1028 {
1029 add_pid_to_list (&stopped_pids, ret);
1030 goto retry;
1031 }
1032 else if (child == NULL)
1033 goto retry;
1034
1035 child->stopped = 1;
1036
1037 child->last_status = *wstatp;
1038
1039 /* Architecture-specific setup after inferior is running.
1040 This needs to happen after we have attached to the inferior
1041 and it is stopped for the first time, but before we access
1042 any inferior registers. */
1043 if (new_inferior)
1044 {
1045 the_low_target.arch_setup ();
1046 #ifdef HAVE_LINUX_REGSETS
1047 memset (disabled_regsets, 0, num_regsets);
1048 #endif
1049 new_inferior = 0;
1050 }
1051
1052 /* Fetch the possibly triggered data watchpoint info and store it in
1053 CHILD.
1054
1055 On some archs, like x86, that use debug registers to set
1056 watchpoints, it's possible that the way to know which watched
1057 address trapped, is to check the register that is used to select
1058 which address to watch. Problem is, between setting the
1059 watchpoint and reading back which data address trapped, the user
1060 may change the set of watchpoints, and, as a consequence, GDB
1061 changes the debug registers in the inferior. To avoid reading
1062 back a stale stopped-data-address when that happens, we cache in
1063 LP the fact that a watchpoint trapped, and the corresponding data
1064 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1065 changes the debug registers meanwhile, we have the cached data we
1066 can rely on. */
1067
1068 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1069 {
1070 if (the_low_target.stopped_by_watchpoint == NULL)
1071 {
1072 child->stopped_by_watchpoint = 0;
1073 }
1074 else
1075 {
1076 struct thread_info *saved_inferior;
1077
1078 saved_inferior = current_inferior;
1079 current_inferior = get_lwp_thread (child);
1080
1081 child->stopped_by_watchpoint
1082 = the_low_target.stopped_by_watchpoint ();
1083
1084 if (child->stopped_by_watchpoint)
1085 {
1086 if (the_low_target.stopped_data_address != NULL)
1087 child->stopped_data_address
1088 = the_low_target.stopped_data_address ();
1089 else
1090 child->stopped_data_address = 0;
1091 }
1092
1093 current_inferior = saved_inferior;
1094 }
1095 }
1096
1097 /* Store the STOP_PC, with adjustment applied. This depends on the
1098 architecture being defined already (so that CHILD has a valid
1099 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1100 not). */
1101 if (WIFSTOPPED (*wstatp))
1102 child->stop_pc = get_stop_pc (child);
1103
1104 if (debug_threads
1105 && WIFSTOPPED (*wstatp)
1106 && the_low_target.get_pc != NULL)
1107 {
1108 struct thread_info *saved_inferior = current_inferior;
1109 struct regcache *regcache;
1110 CORE_ADDR pc;
1111
1112 current_inferior = get_lwp_thread (child);
1113 regcache = get_thread_regcache (current_inferior, 1);
1114 pc = (*the_low_target.get_pc) (regcache);
1115 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1116 current_inferior = saved_inferior;
1117 }
1118
1119 return child;
1120 }
1121
1122 /* This function should only be called if the LWP got a SIGTRAP.
1123
1124 Handle any tracepoint steps or hits. Return true if a tracepoint
1125 event was handled, 0 otherwise. */
1126
1127 static int
1128 handle_tracepoints (struct lwp_info *lwp)
1129 {
1130 struct thread_info *tinfo = get_lwp_thread (lwp);
1131 int tpoint_related_event = 0;
1132
1133 /* If this tracepoint hit causes a tracing stop, we'll immediately
1134 uninsert tracepoints. To do this, we temporarily pause all
1135 threads, unpatch away, and then unpause threads. We need to make
1136 sure the unpausing doesn't resume LWP too. */
1137 lwp->suspended++;
1138
1139 /* And we need to be sure that any all-threads-stopping doesn't try
1140 to move threads out of the jump pads, as it could deadlock the
1141 inferior (LWP could be in the jump pad, maybe even holding the
1142 lock.) */
1143
1144 /* Do any necessary step collect actions. */
1145 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1146
1147 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1148
1149 /* See if we just hit a tracepoint and do its main collect
1150 actions. */
1151 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1152
1153 lwp->suspended--;
1154
1155 gdb_assert (lwp->suspended == 0);
1156 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1157
1158 if (tpoint_related_event)
1159 {
1160 if (debug_threads)
1161 fprintf (stderr, "got a tracepoint event\n");
1162 return 1;
1163 }
1164
1165 return 0;
1166 }
1167
1168 /* Convenience wrapper. Returns true if LWP is presently collecting a
1169 fast tracepoint. */
1170
1171 static int
1172 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1173 struct fast_tpoint_collect_status *status)
1174 {
1175 CORE_ADDR thread_area;
1176
1177 if (the_low_target.get_thread_area == NULL)
1178 return 0;
1179
1180 /* Get the thread area address. This is used to recognize which
1181 thread is which when tracing with the in-process agent library.
1182 We don't read anything from the address, and treat it as opaque;
1183 it's the address itself that we assume is unique per-thread. */
1184 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1185 return 0;
1186
1187 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1188 }
1189
1190 /* The reason we resume in the caller, is because we want to be able
1191 to pass lwp->status_pending as WSTAT, and we need to clear
1192 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1193 refuses to resume. */
1194
1195 static int
1196 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1197 {
1198 struct thread_info *saved_inferior;
1199
1200 saved_inferior = current_inferior;
1201 current_inferior = get_lwp_thread (lwp);
1202
1203 if ((wstat == NULL
1204 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1205 && supports_fast_tracepoints ()
1206 && in_process_agent_loaded ())
1207 {
1208 struct fast_tpoint_collect_status status;
1209 int r;
1210
1211 if (debug_threads)
1212 fprintf (stderr, "\
1213 Checking whether LWP %ld needs to move out of the jump pad.\n",
1214 lwpid_of (lwp));
1215
1216 r = linux_fast_tracepoint_collecting (lwp, &status);
1217
1218 if (wstat == NULL
1219 || (WSTOPSIG (*wstat) != SIGILL
1220 && WSTOPSIG (*wstat) != SIGFPE
1221 && WSTOPSIG (*wstat) != SIGSEGV
1222 && WSTOPSIG (*wstat) != SIGBUS))
1223 {
1224 lwp->collecting_fast_tracepoint = r;
1225
1226 if (r != 0)
1227 {
1228 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1229 {
1230 /* Haven't executed the original instruction yet.
1231 Set breakpoint there, and wait till it's hit,
1232 then single-step until exiting the jump pad. */
1233 lwp->exit_jump_pad_bkpt
1234 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1235 }
1236
1237 if (debug_threads)
1238 fprintf (stderr, "\
1239 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1240 lwpid_of (lwp));
1241
1242 return 1;
1243 }
1244 }
1245 else
1246 {
1247 /* If we get a synchronous signal while collecting, *and*
1248 while executing the (relocated) original instruction,
1249 reset the PC to point at the tpoint address, before
1250 reporting to GDB. Otherwise, it's an IPA lib bug: just
1251 report the signal to GDB, and pray for the best. */
1252
1253 lwp->collecting_fast_tracepoint = 0;
1254
1255 if (r != 0
1256 && (status.adjusted_insn_addr <= lwp->stop_pc
1257 && lwp->stop_pc < status.adjusted_insn_addr_end))
1258 {
1259 siginfo_t info;
1260 struct regcache *regcache;
1261
1262 /* The si_addr on a few signals references the address
1263 of the faulting instruction. Adjust that as
1264 well. */
1265 if ((WSTOPSIG (*wstat) == SIGILL
1266 || WSTOPSIG (*wstat) == SIGFPE
1267 || WSTOPSIG (*wstat) == SIGBUS
1268 || WSTOPSIG (*wstat) == SIGSEGV)
1269 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1270 /* Final check just to make sure we don't clobber
1271 the siginfo of non-kernel-sent signals. */
1272 && (uintptr_t) info.si_addr == lwp->stop_pc)
1273 {
1274 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1275 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1276 }
1277
1278 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1279 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1280 lwp->stop_pc = status.tpoint_addr;
1281
1282 /* Cancel any fast tracepoint lock this thread was
1283 holding. */
1284 force_unlock_trace_buffer ();
1285 }
1286
1287 if (lwp->exit_jump_pad_bkpt != NULL)
1288 {
1289 if (debug_threads)
1290 fprintf (stderr,
1291 "Cancelling fast exit-jump-pad: removing bkpt. "
1292 "stopping all threads momentarily.\n");
1293
1294 stop_all_lwps (1, lwp);
1295 cancel_breakpoints ();
1296
1297 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1298 lwp->exit_jump_pad_bkpt = NULL;
1299
1300 unstop_all_lwps (1, lwp);
1301
1302 gdb_assert (lwp->suspended >= 0);
1303 }
1304 }
1305 }
1306
1307 if (debug_threads)
1308 fprintf (stderr, "\
1309 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1310 lwpid_of (lwp));
1311 return 0;
1312 }
1313
1314 /* Enqueue one signal in the "signals to report later when out of the
1315 jump pad" list. */
1316
1317 static void
1318 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1319 {
1320 struct pending_signals *p_sig;
1321
1322 if (debug_threads)
1323 fprintf (stderr, "\
1324 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1325
1326 if (debug_threads)
1327 {
1328 struct pending_signals *sig;
1329
1330 for (sig = lwp->pending_signals_to_report;
1331 sig != NULL;
1332 sig = sig->prev)
1333 fprintf (stderr,
1334 " Already queued %d\n",
1335 sig->signal);
1336
1337 fprintf (stderr, " (no more currently queued signals)\n");
1338 }
1339
1340 p_sig = xmalloc (sizeof (*p_sig));
1341 p_sig->prev = lwp->pending_signals_to_report;
1342 p_sig->signal = WSTOPSIG (*wstat);
1343 memset (&p_sig->info, 0, sizeof (siginfo_t));
1344 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1345
1346 lwp->pending_signals_to_report = p_sig;
1347 }
1348
1349 /* Dequeue one signal from the "signals to report later when out of
1350 the jump pad" list. */
1351
1352 static int
1353 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1354 {
1355 if (lwp->pending_signals_to_report != NULL)
1356 {
1357 struct pending_signals **p_sig;
1358
1359 p_sig = &lwp->pending_signals_to_report;
1360 while ((*p_sig)->prev != NULL)
1361 p_sig = &(*p_sig)->prev;
1362
1363 *wstat = W_STOPCODE ((*p_sig)->signal);
1364 if ((*p_sig)->info.si_signo != 0)
1365 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1366 free (*p_sig);
1367 *p_sig = NULL;
1368
1369 if (debug_threads)
1370 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1371 WSTOPSIG (*wstat), lwpid_of (lwp));
1372
1373 if (debug_threads)
1374 {
1375 struct pending_signals *sig;
1376
1377 for (sig = lwp->pending_signals_to_report;
1378 sig != NULL;
1379 sig = sig->prev)
1380 fprintf (stderr,
1381 " Still queued %d\n",
1382 sig->signal);
1383
1384 fprintf (stderr, " (no more queued signals)\n");
1385 }
1386
1387 return 1;
1388 }
1389
1390 return 0;
1391 }
1392
1393 /* Arrange for a breakpoint to be hit again later. We don't keep the
1394 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1395 will handle the current event, eventually we will resume this LWP,
1396 and this breakpoint will trap again. */
1397
1398 static int
1399 cancel_breakpoint (struct lwp_info *lwp)
1400 {
1401 struct thread_info *saved_inferior;
1402
1403 /* There's nothing to do if we don't support breakpoints. */
1404 if (!supports_breakpoints ())
1405 return 0;
1406
1407 /* breakpoint_at reads from current inferior. */
1408 saved_inferior = current_inferior;
1409 current_inferior = get_lwp_thread (lwp);
1410
1411 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1412 {
1413 if (debug_threads)
1414 fprintf (stderr,
1415 "CB: Push back breakpoint for %s\n",
1416 target_pid_to_str (ptid_of (lwp)));
1417
1418 /* Back up the PC if necessary. */
1419 if (the_low_target.decr_pc_after_break)
1420 {
1421 struct regcache *regcache
1422 = get_thread_regcache (current_inferior, 1);
1423 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1424 }
1425
1426 current_inferior = saved_inferior;
1427 return 1;
1428 }
1429 else
1430 {
1431 if (debug_threads)
1432 fprintf (stderr,
1433 "CB: No breakpoint found at %s for [%s]\n",
1434 paddress (lwp->stop_pc),
1435 target_pid_to_str (ptid_of (lwp)));
1436 }
1437
1438 current_inferior = saved_inferior;
1439 return 0;
1440 }
1441
1442 /* When the event-loop is doing a step-over, this points at the thread
1443 being stepped. */
1444 ptid_t step_over_bkpt;
1445
1446 /* Wait for an event from child PID. If PID is -1, wait for any
1447 child. Store the stop status through the status pointer WSTAT.
1448 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1449 event was found and OPTIONS contains WNOHANG. Return the PID of
1450 the stopped child otherwise. */
1451
1452 static int
1453 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1454 {
1455 struct lwp_info *event_child, *requested_child;
1456
1457 event_child = NULL;
1458 requested_child = NULL;
1459
1460 /* Check for a lwp with a pending status. */
1461
1462 if (ptid_equal (ptid, minus_one_ptid)
1463 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
1464 {
1465 event_child = (struct lwp_info *)
1466 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1467 if (debug_threads && event_child)
1468 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1469 }
1470 else
1471 {
1472 requested_child = find_lwp_pid (ptid);
1473
1474 if (!stopping_threads
1475 && requested_child->status_pending_p
1476 && requested_child->collecting_fast_tracepoint)
1477 {
1478 enqueue_one_deferred_signal (requested_child,
1479 &requested_child->status_pending);
1480 requested_child->status_pending_p = 0;
1481 requested_child->status_pending = 0;
1482 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1483 }
1484
1485 if (requested_child->suspended
1486 && requested_child->status_pending_p)
1487 fatal ("requesting an event out of a suspended child?");
1488
1489 if (requested_child->status_pending_p)
1490 event_child = requested_child;
1491 }
1492
1493 if (event_child != NULL)
1494 {
1495 if (debug_threads)
1496 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1497 lwpid_of (event_child), event_child->status_pending);
1498 *wstat = event_child->status_pending;
1499 event_child->status_pending_p = 0;
1500 event_child->status_pending = 0;
1501 current_inferior = get_lwp_thread (event_child);
1502 return lwpid_of (event_child);
1503 }
1504
1505 /* We only enter this loop if no process has a pending wait status. Thus
1506 any action taken in response to a wait status inside this loop is
1507 responding as soon as we detect the status, not after any pending
1508 events. */
1509 while (1)
1510 {
1511 event_child = linux_wait_for_lwp (ptid, wstat, options);
1512
1513 if ((options & WNOHANG) && event_child == NULL)
1514 {
1515 if (debug_threads)
1516 fprintf (stderr, "WNOHANG set, no event found\n");
1517 return 0;
1518 }
1519
1520 if (event_child == NULL)
1521 error ("event from unknown child");
1522
1523 current_inferior = get_lwp_thread (event_child);
1524
1525 /* Check for thread exit. */
1526 if (! WIFSTOPPED (*wstat))
1527 {
1528 if (debug_threads)
1529 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1530
1531 /* If the last thread is exiting, just return. */
1532 if (last_thread_of_process_p (current_inferior))
1533 {
1534 if (debug_threads)
1535 fprintf (stderr, "LWP %ld is last lwp of process\n",
1536 lwpid_of (event_child));
1537 return lwpid_of (event_child);
1538 }
1539
1540 if (!non_stop)
1541 {
1542 current_inferior = (struct thread_info *) all_threads.head;
1543 if (debug_threads)
1544 fprintf (stderr, "Current inferior is now %ld\n",
1545 lwpid_of (get_thread_lwp (current_inferior)));
1546 }
1547 else
1548 {
1549 current_inferior = NULL;
1550 if (debug_threads)
1551 fprintf (stderr, "Current inferior is now <NULL>\n");
1552 }
1553
1554 /* If we were waiting for this particular child to do something...
1555 well, it did something. */
1556 if (requested_child != NULL)
1557 {
1558 int lwpid = lwpid_of (event_child);
1559
1560 /* Cancel the step-over operation --- the thread that
1561 started it is gone. */
1562 if (finish_step_over (event_child))
1563 unstop_all_lwps (1, event_child);
1564 delete_lwp (event_child);
1565 return lwpid;
1566 }
1567
1568 delete_lwp (event_child);
1569
1570 /* Wait for a more interesting event. */
1571 continue;
1572 }
1573
1574 if (event_child->must_set_ptrace_flags)
1575 {
1576 linux_enable_event_reporting (lwpid_of (event_child));
1577 event_child->must_set_ptrace_flags = 0;
1578 }
1579
1580 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1581 && *wstat >> 16 != 0)
1582 {
1583 handle_extended_wait (event_child, *wstat);
1584 continue;
1585 }
1586
1587 if (WIFSTOPPED (*wstat)
1588 && WSTOPSIG (*wstat) == SIGSTOP
1589 && event_child->stop_expected)
1590 {
1591 int should_stop;
1592
1593 if (debug_threads)
1594 fprintf (stderr, "Expected stop.\n");
1595 event_child->stop_expected = 0;
1596
1597 should_stop = (current_inferior->last_resume_kind == resume_stop
1598 || stopping_threads);
1599
1600 if (!should_stop)
1601 {
1602 linux_resume_one_lwp (event_child,
1603 event_child->stepping, 0, NULL);
1604 continue;
1605 }
1606 }
1607
1608 return lwpid_of (event_child);
1609 }
1610
1611 /* NOTREACHED */
1612 return 0;
1613 }
1614
1615 static int
1616 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1617 {
1618 ptid_t wait_ptid;
1619
1620 if (ptid_is_pid (ptid))
1621 {
1622 /* A request to wait for a specific tgid. This is not possible
1623 with waitpid, so instead, we wait for any child, and leave
1624 children we're not interested in right now with a pending
1625 status to report later. */
1626 wait_ptid = minus_one_ptid;
1627 }
1628 else
1629 wait_ptid = ptid;
1630
1631 while (1)
1632 {
1633 int event_pid;
1634
1635 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1636
1637 if (event_pid > 0
1638 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1639 {
1640 struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1641
1642 if (! WIFSTOPPED (*wstat))
1643 mark_lwp_dead (event_child, *wstat);
1644 else
1645 {
1646 event_child->status_pending_p = 1;
1647 event_child->status_pending = *wstat;
1648 }
1649 }
1650 else
1651 return event_pid;
1652 }
1653 }
1654
1655
1656 /* Count the LWP's that have had events. */
1657
1658 static int
1659 count_events_callback (struct inferior_list_entry *entry, void *data)
1660 {
1661 struct lwp_info *lp = (struct lwp_info *) entry;
1662 struct thread_info *thread = get_lwp_thread (lp);
1663 int *count = data;
1664
1665 gdb_assert (count != NULL);
1666
1667 /* Count only resumed LWPs that have a SIGTRAP event pending that
1668 should be reported to GDB. */
1669 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1670 && thread->last_resume_kind != resume_stop
1671 && lp->status_pending_p
1672 && WIFSTOPPED (lp->status_pending)
1673 && WSTOPSIG (lp->status_pending) == SIGTRAP
1674 && !breakpoint_inserted_here (lp->stop_pc))
1675 (*count)++;
1676
1677 return 0;
1678 }
1679
1680 /* Select the LWP (if any) that is currently being single-stepped. */
1681
1682 static int
1683 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1684 {
1685 struct lwp_info *lp = (struct lwp_info *) entry;
1686 struct thread_info *thread = get_lwp_thread (lp);
1687
1688 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1689 && thread->last_resume_kind == resume_step
1690 && lp->status_pending_p)
1691 return 1;
1692 else
1693 return 0;
1694 }
1695
1696 /* Select the Nth LWP that has had a SIGTRAP event that should be
1697 reported to GDB. */
1698
1699 static int
1700 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1701 {
1702 struct lwp_info *lp = (struct lwp_info *) entry;
1703 struct thread_info *thread = get_lwp_thread (lp);
1704 int *selector = data;
1705
1706 gdb_assert (selector != NULL);
1707
1708 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1709 if (thread->last_resume_kind != resume_stop
1710 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1711 && lp->status_pending_p
1712 && WIFSTOPPED (lp->status_pending)
1713 && WSTOPSIG (lp->status_pending) == SIGTRAP
1714 && !breakpoint_inserted_here (lp->stop_pc))
1715 if ((*selector)-- == 0)
1716 return 1;
1717
1718 return 0;
1719 }
1720
1721 static int
1722 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1723 {
1724 struct lwp_info *lp = (struct lwp_info *) entry;
1725 struct thread_info *thread = get_lwp_thread (lp);
1726 struct lwp_info *event_lp = data;
1727
1728 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1729 if (lp == event_lp)
1730 return 0;
1731
1732 /* If a LWP other than the LWP that we're reporting an event for has
1733 hit a GDB breakpoint (as opposed to some random trap signal),
1734 then just arrange for it to hit it again later. We don't keep
1735 the SIGTRAP status and don't forward the SIGTRAP signal to the
1736 LWP. We will handle the current event, eventually we will resume
1737 all LWPs, and this one will get its breakpoint trap again.
1738
1739 If we do not do this, then we run the risk that the user will
1740 delete or disable the breakpoint, but the LWP will have already
1741 tripped on it. */
1742
1743 if (thread->last_resume_kind != resume_stop
1744 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1745 && lp->status_pending_p
1746 && WIFSTOPPED (lp->status_pending)
1747 && WSTOPSIG (lp->status_pending) == SIGTRAP
1748 && !lp->stepping
1749 && !lp->stopped_by_watchpoint
1750 && cancel_breakpoint (lp))
1751 /* Throw away the SIGTRAP. */
1752 lp->status_pending_p = 0;
1753
1754 return 0;
1755 }
1756
1757 static void
1758 linux_cancel_breakpoints (void)
1759 {
1760 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
1761 }
1762
1763 /* Select one LWP out of those that have events pending. */
1764
1765 static void
1766 select_event_lwp (struct lwp_info **orig_lp)
1767 {
1768 int num_events = 0;
1769 int random_selector;
1770 struct lwp_info *event_lp;
1771
1772 /* Give preference to any LWP that is being single-stepped. */
1773 event_lp
1774 = (struct lwp_info *) find_inferior (&all_lwps,
1775 select_singlestep_lwp_callback, NULL);
1776 if (event_lp != NULL)
1777 {
1778 if (debug_threads)
1779 fprintf (stderr,
1780 "SEL: Select single-step %s\n",
1781 target_pid_to_str (ptid_of (event_lp)));
1782 }
1783 else
1784 {
1785 /* No single-stepping LWP. Select one at random, out of those
1786 which have had SIGTRAP events. */
1787
1788 /* First see how many SIGTRAP events we have. */
1789 find_inferior (&all_lwps, count_events_callback, &num_events);
1790
1791 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1792 random_selector = (int)
1793 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1794
1795 if (debug_threads && num_events > 1)
1796 fprintf (stderr,
1797 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1798 num_events, random_selector);
1799
1800 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1801 select_event_lwp_callback,
1802 &random_selector);
1803 }
1804
1805 if (event_lp != NULL)
1806 {
1807 /* Switch the event LWP. */
1808 *orig_lp = event_lp;
1809 }
1810 }
1811
1812 /* Decrement the suspend count of an LWP. */
1813
1814 static int
1815 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
1816 {
1817 struct lwp_info *lwp = (struct lwp_info *) entry;
1818
1819 /* Ignore EXCEPT. */
1820 if (lwp == except)
1821 return 0;
1822
1823 lwp->suspended--;
1824
1825 gdb_assert (lwp->suspended >= 0);
1826 return 0;
1827 }
1828
1829 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
1830 NULL. */
1831
1832 static void
1833 unsuspend_all_lwps (struct lwp_info *except)
1834 {
1835 find_inferior (&all_lwps, unsuspend_one_lwp, except);
1836 }
1837
1838 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
1839 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
1840 void *data);
1841 static int lwp_running (struct inferior_list_entry *entry, void *data);
1842 static ptid_t linux_wait_1 (ptid_t ptid,
1843 struct target_waitstatus *ourstatus,
1844 int target_options);
1845
1846 /* Stabilize threads (move out of jump pads).
1847
1848 If a thread is midway collecting a fast tracepoint, we need to
1849 finish the collection and move it out of the jump pad before
1850 reporting the signal.
1851
1852 This avoids recursion while collecting (when a signal arrives
1853 midway, and the signal handler itself collects), which would trash
1854 the trace buffer. In case the user set a breakpoint in a signal
1855 handler, this avoids the backtrace showing the jump pad, etc..
1856 Most importantly, there are certain things we can't do safely if
1857 threads are stopped in a jump pad (or in its callee's). For
1858 example:
1859
1860 - starting a new trace run. A thread still collecting the
1861 previous run, could trash the trace buffer when resumed. The trace
1862 buffer control structures would have been reset but the thread had
1863 no way to tell. The thread could even midway memcpy'ing to the
1864 buffer, which would mean that when resumed, it would clobber the
1865 trace buffer that had been set for a new run.
1866
1867 - we can't rewrite/reuse the jump pads for new tracepoints
1868 safely. Say you do tstart while a thread is stopped midway while
1869 collecting. When the thread is later resumed, it finishes the
1870 collection, and returns to the jump pad, to execute the original
1871 instruction that was under the tracepoint jump at the time the
1872 older run had been started. If the jump pad had been rewritten
1873 since for something else in the new run, the thread would now
1874 execute the wrong / random instructions. */
1875
1876 static void
1877 linux_stabilize_threads (void)
1878 {
1879 struct thread_info *save_inferior;
1880 struct lwp_info *lwp_stuck;
1881
1882 lwp_stuck
1883 = (struct lwp_info *) find_inferior (&all_lwps,
1884 stuck_in_jump_pad_callback, NULL);
1885 if (lwp_stuck != NULL)
1886 {
1887 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
1888 lwpid_of (lwp_stuck));
1889 return;
1890 }
1891
1892 save_inferior = current_inferior;
1893
1894 stabilizing_threads = 1;
1895
1896 /* Kick 'em all. */
1897 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
1898
1899 /* Loop until all are stopped out of the jump pads. */
1900 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
1901 {
1902 struct target_waitstatus ourstatus;
1903 struct lwp_info *lwp;
1904 ptid_t ptid;
1905 int wstat;
1906
1907 /* Note that we go through the full wait even loop. While
1908 moving threads out of jump pad, we need to be able to step
1909 over internal breakpoints and such. */
1910 ptid = linux_wait_1 (minus_one_ptid, &ourstatus, 0);
1911
1912 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
1913 {
1914 lwp = get_thread_lwp (current_inferior);
1915
1916 /* Lock it. */
1917 lwp->suspended++;
1918
1919 if (ourstatus.value.sig != TARGET_SIGNAL_0
1920 || current_inferior->last_resume_kind == resume_stop)
1921 {
1922 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
1923 enqueue_one_deferred_signal (lwp, &wstat);
1924 }
1925 }
1926 }
1927
1928 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
1929
1930 stabilizing_threads = 0;
1931
1932 current_inferior = save_inferior;
1933
1934 lwp_stuck
1935 = (struct lwp_info *) find_inferior (&all_lwps,
1936 stuck_in_jump_pad_callback, NULL);
1937 if (lwp_stuck != NULL)
1938 {
1939 if (debug_threads)
1940 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
1941 lwpid_of (lwp_stuck));
1942 }
1943 }
1944
1945 /* Wait for process, returns status. */
1946
1947 static ptid_t
1948 linux_wait_1 (ptid_t ptid,
1949 struct target_waitstatus *ourstatus, int target_options)
1950 {
1951 int w;
1952 struct lwp_info *event_child;
1953 int options;
1954 int pid;
1955 int step_over_finished;
1956 int bp_explains_trap;
1957 int maybe_internal_trap;
1958 int report_to_gdb;
1959 int trace_event;
1960
1961 /* Translate generic target options into linux options. */
1962 options = __WALL;
1963 if (target_options & TARGET_WNOHANG)
1964 options |= WNOHANG;
1965
1966 retry:
1967 bp_explains_trap = 0;
1968 trace_event = 0;
1969 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1970
1971 /* If we were only supposed to resume one thread, only wait for
1972 that thread - if it's still alive. If it died, however - which
1973 can happen if we're coming from the thread death case below -
1974 then we need to make sure we restart the other threads. We could
1975 pick a thread at random or restart all; restarting all is less
1976 arbitrary. */
1977 if (!non_stop
1978 && !ptid_equal (cont_thread, null_ptid)
1979 && !ptid_equal (cont_thread, minus_one_ptid))
1980 {
1981 struct thread_info *thread;
1982
1983 thread = (struct thread_info *) find_inferior_id (&all_threads,
1984 cont_thread);
1985
1986 /* No stepping, no signal - unless one is pending already, of course. */
1987 if (thread == NULL)
1988 {
1989 struct thread_resume resume_info;
1990 resume_info.thread = minus_one_ptid;
1991 resume_info.kind = resume_continue;
1992 resume_info.sig = 0;
1993 linux_resume (&resume_info, 1);
1994 }
1995 else
1996 ptid = cont_thread;
1997 }
1998
1999 if (ptid_equal (step_over_bkpt, null_ptid))
2000 pid = linux_wait_for_event (ptid, &w, options);
2001 else
2002 {
2003 if (debug_threads)
2004 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2005 target_pid_to_str (step_over_bkpt));
2006 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2007 }
2008
2009 if (pid == 0) /* only if TARGET_WNOHANG */
2010 return null_ptid;
2011
2012 event_child = get_thread_lwp (current_inferior);
2013
2014 /* If we are waiting for a particular child, and it exited,
2015 linux_wait_for_event will return its exit status. Similarly if
2016 the last child exited. If this is not the last child, however,
2017 do not report it as exited until there is a 'thread exited' response
2018 available in the remote protocol. Instead, just wait for another event.
2019 This should be safe, because if the thread crashed we will already
2020 have reported the termination signal to GDB; that should stop any
2021 in-progress stepping operations, etc.
2022
2023 Report the exit status of the last thread to exit. This matches
2024 LinuxThreads' behavior. */
2025
2026 if (last_thread_of_process_p (current_inferior))
2027 {
2028 if (WIFEXITED (w) || WIFSIGNALED (w))
2029 {
2030 if (WIFEXITED (w))
2031 {
2032 ourstatus->kind = TARGET_WAITKIND_EXITED;
2033 ourstatus->value.integer = WEXITSTATUS (w);
2034
2035 if (debug_threads)
2036 fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
2037 }
2038 else
2039 {
2040 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2041 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2042
2043 if (debug_threads)
2044 fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
2045
2046 }
2047
2048 return ptid_of (event_child);
2049 }
2050 }
2051 else
2052 {
2053 if (!WIFSTOPPED (w))
2054 goto retry;
2055 }
2056
2057 /* If this event was not handled before, and is not a SIGTRAP, we
2058 report it. SIGILL and SIGSEGV are also treated as traps in case
2059 a breakpoint is inserted at the current PC. If this target does
2060 not support internal breakpoints at all, we also report the
2061 SIGTRAP without further processing; it's of no concern to us. */
2062 maybe_internal_trap
2063 = (supports_breakpoints ()
2064 && (WSTOPSIG (w) == SIGTRAP
2065 || ((WSTOPSIG (w) == SIGILL
2066 || WSTOPSIG (w) == SIGSEGV)
2067 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2068
2069 if (maybe_internal_trap)
2070 {
2071 /* Handle anything that requires bookkeeping before deciding to
2072 report the event or continue waiting. */
2073
2074 /* First check if we can explain the SIGTRAP with an internal
2075 breakpoint, or if we should possibly report the event to GDB.
2076 Do this before anything that may remove or insert a
2077 breakpoint. */
2078 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2079
2080 /* We have a SIGTRAP, possibly a step-over dance has just
2081 finished. If so, tweak the state machine accordingly,
2082 reinsert breakpoints and delete any reinsert (software
2083 single-step) breakpoints. */
2084 step_over_finished = finish_step_over (event_child);
2085
2086 /* Now invoke the callbacks of any internal breakpoints there. */
2087 check_breakpoints (event_child->stop_pc);
2088
2089 /* Handle tracepoint data collecting. This may overflow the
2090 trace buffer, and cause a tracing stop, removing
2091 breakpoints. */
2092 trace_event = handle_tracepoints (event_child);
2093
2094 if (bp_explains_trap)
2095 {
2096 /* If we stepped or ran into an internal breakpoint, we've
2097 already handled it. So next time we resume (from this
2098 PC), we should step over it. */
2099 if (debug_threads)
2100 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2101
2102 if (breakpoint_here (event_child->stop_pc))
2103 event_child->need_step_over = 1;
2104 }
2105 }
2106 else
2107 {
2108 /* We have some other signal, possibly a step-over dance was in
2109 progress, and it should be cancelled too. */
2110 step_over_finished = finish_step_over (event_child);
2111 }
2112
2113 /* We have all the data we need. Either report the event to GDB, or
2114 resume threads and keep waiting for more. */
2115
2116 /* If we're collecting a fast tracepoint, finish the collection and
2117 move out of the jump pad before delivering a signal. See
2118 linux_stabilize_threads. */
2119
2120 if (WIFSTOPPED (w)
2121 && WSTOPSIG (w) != SIGTRAP
2122 && supports_fast_tracepoints ()
2123 && in_process_agent_loaded ())
2124 {
2125 if (debug_threads)
2126 fprintf (stderr,
2127 "Got signal %d for LWP %ld. Check if we need "
2128 "to defer or adjust it.\n",
2129 WSTOPSIG (w), lwpid_of (event_child));
2130
2131 /* Allow debugging the jump pad itself. */
2132 if (current_inferior->last_resume_kind != resume_step
2133 && maybe_move_out_of_jump_pad (event_child, &w))
2134 {
2135 enqueue_one_deferred_signal (event_child, &w);
2136
2137 if (debug_threads)
2138 fprintf (stderr,
2139 "Signal %d for LWP %ld deferred (in jump pad)\n",
2140 WSTOPSIG (w), lwpid_of (event_child));
2141
2142 linux_resume_one_lwp (event_child, 0, 0, NULL);
2143 goto retry;
2144 }
2145 }
2146
2147 if (event_child->collecting_fast_tracepoint)
2148 {
2149 if (debug_threads)
2150 fprintf (stderr, "\
2151 LWP %ld was trying to move out of the jump pad (%d). \
2152 Check if we're already there.\n",
2153 lwpid_of (event_child),
2154 event_child->collecting_fast_tracepoint);
2155
2156 trace_event = 1;
2157
2158 event_child->collecting_fast_tracepoint
2159 = linux_fast_tracepoint_collecting (event_child, NULL);
2160
2161 if (event_child->collecting_fast_tracepoint != 1)
2162 {
2163 /* No longer need this breakpoint. */
2164 if (event_child->exit_jump_pad_bkpt != NULL)
2165 {
2166 if (debug_threads)
2167 fprintf (stderr,
2168 "No longer need exit-jump-pad bkpt; removing it."
2169 "stopping all threads momentarily.\n");
2170
2171 /* Other running threads could hit this breakpoint.
2172 We don't handle moribund locations like GDB does,
2173 instead we always pause all threads when removing
2174 breakpoints, so that any step-over or
2175 decr_pc_after_break adjustment is always taken
2176 care of while the breakpoint is still
2177 inserted. */
2178 stop_all_lwps (1, event_child);
2179 cancel_breakpoints ();
2180
2181 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2182 event_child->exit_jump_pad_bkpt = NULL;
2183
2184 unstop_all_lwps (1, event_child);
2185
2186 gdb_assert (event_child->suspended >= 0);
2187 }
2188 }
2189
2190 if (event_child->collecting_fast_tracepoint == 0)
2191 {
2192 if (debug_threads)
2193 fprintf (stderr,
2194 "fast tracepoint finished "
2195 "collecting successfully.\n");
2196
2197 /* We may have a deferred signal to report. */
2198 if (dequeue_one_deferred_signal (event_child, &w))
2199 {
2200 if (debug_threads)
2201 fprintf (stderr, "dequeued one signal.\n");
2202 }
2203 else
2204 {
2205 if (debug_threads)
2206 fprintf (stderr, "no deferred signals.\n");
2207
2208 if (stabilizing_threads)
2209 {
2210 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2211 ourstatus->value.sig = TARGET_SIGNAL_0;
2212 return ptid_of (event_child);
2213 }
2214 }
2215 }
2216 }
2217
2218 /* Check whether GDB would be interested in this event. */
2219
2220 /* If GDB is not interested in this signal, don't stop other
2221 threads, and don't report it to GDB. Just resume the inferior
2222 right away. We do this for threading-related signals as well as
2223 any that GDB specifically requested we ignore. But never ignore
2224 SIGSTOP if we sent it ourselves, and do not ignore signals when
2225 stepping - they may require special handling to skip the signal
2226 handler. */
2227 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2228 thread library? */
2229 if (WIFSTOPPED (w)
2230 && current_inferior->last_resume_kind != resume_step
2231 && (
2232 #if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
2233 (current_process ()->private->thread_db != NULL
2234 && (WSTOPSIG (w) == __SIGRTMIN
2235 || WSTOPSIG (w) == __SIGRTMIN + 1))
2236 ||
2237 #endif
2238 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2239 && !(WSTOPSIG (w) == SIGSTOP
2240 && current_inferior->last_resume_kind == resume_stop))))
2241 {
2242 siginfo_t info, *info_p;
2243
2244 if (debug_threads)
2245 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2246 WSTOPSIG (w), lwpid_of (event_child));
2247
2248 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2249 info_p = &info;
2250 else
2251 info_p = NULL;
2252 linux_resume_one_lwp (event_child, event_child->stepping,
2253 WSTOPSIG (w), info_p);
2254 goto retry;
2255 }
2256
2257 /* If GDB wanted this thread to single step, we always want to
2258 report the SIGTRAP, and let GDB handle it. Watchpoints should
2259 always be reported. So should signals we can't explain. A
2260 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2261 not support Z0 breakpoints. If we do, we're be able to handle
2262 GDB breakpoints on top of internal breakpoints, by handling the
2263 internal breakpoint and still reporting the event to GDB. If we
2264 don't, we're out of luck, GDB won't see the breakpoint hit. */
2265 report_to_gdb = (!maybe_internal_trap
2266 || current_inferior->last_resume_kind == resume_step
2267 || event_child->stopped_by_watchpoint
2268 || (!step_over_finished && !bp_explains_trap && !trace_event)
2269 || gdb_breakpoint_here (event_child->stop_pc));
2270
2271 /* We found no reason GDB would want us to stop. We either hit one
2272 of our own breakpoints, or finished an internal step GDB
2273 shouldn't know about. */
2274 if (!report_to_gdb)
2275 {
2276 if (debug_threads)
2277 {
2278 if (bp_explains_trap)
2279 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2280 if (step_over_finished)
2281 fprintf (stderr, "Step-over finished.\n");
2282 if (trace_event)
2283 fprintf (stderr, "Tracepoint event.\n");
2284 }
2285
2286 /* We're not reporting this breakpoint to GDB, so apply the
2287 decr_pc_after_break adjustment to the inferior's regcache
2288 ourselves. */
2289
2290 if (the_low_target.set_pc != NULL)
2291 {
2292 struct regcache *regcache
2293 = get_thread_regcache (get_lwp_thread (event_child), 1);
2294 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2295 }
2296
2297 /* We may have finished stepping over a breakpoint. If so,
2298 we've stopped and suspended all LWPs momentarily except the
2299 stepping one. This is where we resume them all again. We're
2300 going to keep waiting, so use proceed, which handles stepping
2301 over the next breakpoint. */
2302 if (debug_threads)
2303 fprintf (stderr, "proceeding all threads.\n");
2304
2305 if (step_over_finished)
2306 unsuspend_all_lwps (event_child);
2307
2308 proceed_all_lwps ();
2309 goto retry;
2310 }
2311
2312 if (debug_threads)
2313 {
2314 if (current_inferior->last_resume_kind == resume_step)
2315 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2316 if (event_child->stopped_by_watchpoint)
2317 fprintf (stderr, "Stopped by watchpoint.\n");
2318 if (gdb_breakpoint_here (event_child->stop_pc))
2319 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2320 if (debug_threads)
2321 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2322 }
2323
2324 /* Alright, we're going to report a stop. */
2325
2326 if (!non_stop && !stabilizing_threads)
2327 {
2328 /* In all-stop, stop all threads. */
2329 stop_all_lwps (0, NULL);
2330
2331 /* If we're not waiting for a specific LWP, choose an event LWP
2332 from among those that have had events. Giving equal priority
2333 to all LWPs that have had events helps prevent
2334 starvation. */
2335 if (ptid_equal (ptid, minus_one_ptid))
2336 {
2337 event_child->status_pending_p = 1;
2338 event_child->status_pending = w;
2339
2340 select_event_lwp (&event_child);
2341
2342 event_child->status_pending_p = 0;
2343 w = event_child->status_pending;
2344 }
2345
2346 /* Now that we've selected our final event LWP, cancel any
2347 breakpoints in other LWPs that have hit a GDB breakpoint.
2348 See the comment in cancel_breakpoints_callback to find out
2349 why. */
2350 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2351
2352 /* Stabilize threads (move out of jump pads). */
2353 stabilize_threads ();
2354 }
2355 else
2356 {
2357 /* If we just finished a step-over, then all threads had been
2358 momentarily paused. In all-stop, that's fine, we want
2359 threads stopped by now anyway. In non-stop, we need to
2360 re-resume threads that GDB wanted to be running. */
2361 if (step_over_finished)
2362 unstop_all_lwps (1, event_child);
2363 }
2364
2365 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2366
2367 if (current_inferior->last_resume_kind == resume_stop
2368 && WSTOPSIG (w) == SIGSTOP)
2369 {
2370 /* A thread that has been requested to stop by GDB with vCont;t,
2371 and it stopped cleanly, so report as SIG0. The use of
2372 SIGSTOP is an implementation detail. */
2373 ourstatus->value.sig = TARGET_SIGNAL_0;
2374 }
2375 else if (current_inferior->last_resume_kind == resume_stop
2376 && WSTOPSIG (w) != SIGSTOP)
2377 {
2378 /* A thread that has been requested to stop by GDB with vCont;t,
2379 but, it stopped for other reasons. */
2380 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2381 }
2382 else
2383 {
2384 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2385 }
2386
2387 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2388
2389 if (debug_threads)
2390 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2391 target_pid_to_str (ptid_of (event_child)),
2392 ourstatus->kind,
2393 ourstatus->value.sig);
2394
2395 return ptid_of (event_child);
2396 }
2397
2398 /* Get rid of any pending event in the pipe. */
2399 static void
2400 async_file_flush (void)
2401 {
2402 int ret;
2403 char buf;
2404
2405 do
2406 ret = read (linux_event_pipe[0], &buf, 1);
2407 while (ret >= 0 || (ret == -1 && errno == EINTR));
2408 }
2409
2410 /* Put something in the pipe, so the event loop wakes up. */
2411 static void
2412 async_file_mark (void)
2413 {
2414 int ret;
2415
2416 async_file_flush ();
2417
2418 do
2419 ret = write (linux_event_pipe[1], "+", 1);
2420 while (ret == 0 || (ret == -1 && errno == EINTR));
2421
2422 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2423 be awakened anyway. */
2424 }
2425
2426 static ptid_t
2427 linux_wait (ptid_t ptid,
2428 struct target_waitstatus *ourstatus, int target_options)
2429 {
2430 ptid_t event_ptid;
2431
2432 if (debug_threads)
2433 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2434
2435 /* Flush the async file first. */
2436 if (target_is_async_p ())
2437 async_file_flush ();
2438
2439 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2440
2441 /* If at least one stop was reported, there may be more. A single
2442 SIGCHLD can signal more than one child stop. */
2443 if (target_is_async_p ()
2444 && (target_options & TARGET_WNOHANG) != 0
2445 && !ptid_equal (event_ptid, null_ptid))
2446 async_file_mark ();
2447
2448 return event_ptid;
2449 }
2450
2451 /* Send a signal to an LWP. */
2452
2453 static int
2454 kill_lwp (unsigned long lwpid, int signo)
2455 {
2456 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2457 fails, then we are not using nptl threads and we should be using kill. */
2458
2459 #ifdef __NR_tkill
2460 {
2461 static int tkill_failed;
2462
2463 if (!tkill_failed)
2464 {
2465 int ret;
2466
2467 errno = 0;
2468 ret = syscall (__NR_tkill, lwpid, signo);
2469 if (errno != ENOSYS)
2470 return ret;
2471 tkill_failed = 1;
2472 }
2473 }
2474 #endif
2475
2476 return kill (lwpid, signo);
2477 }
2478
2479 void
2480 linux_stop_lwp (struct lwp_info *lwp)
2481 {
2482 send_sigstop (lwp);
2483 }
2484
2485 static void
2486 send_sigstop (struct lwp_info *lwp)
2487 {
2488 int pid;
2489
2490 pid = lwpid_of (lwp);
2491
2492 /* If we already have a pending stop signal for this process, don't
2493 send another. */
2494 if (lwp->stop_expected)
2495 {
2496 if (debug_threads)
2497 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2498
2499 return;
2500 }
2501
2502 if (debug_threads)
2503 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2504
2505 lwp->stop_expected = 1;
2506 kill_lwp (pid, SIGSTOP);
2507 }
2508
2509 static int
2510 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2511 {
2512 struct lwp_info *lwp = (struct lwp_info *) entry;
2513
2514 /* Ignore EXCEPT. */
2515 if (lwp == except)
2516 return 0;
2517
2518 if (lwp->stopped)
2519 return 0;
2520
2521 send_sigstop (lwp);
2522 return 0;
2523 }
2524
2525 /* Increment the suspend count of an LWP, and stop it, if not stopped
2526 yet. */
2527 static int
2528 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2529 void *except)
2530 {
2531 struct lwp_info *lwp = (struct lwp_info *) entry;
2532
2533 /* Ignore EXCEPT. */
2534 if (lwp == except)
2535 return 0;
2536
2537 lwp->suspended++;
2538
2539 return send_sigstop_callback (entry, except);
2540 }
2541
2542 static void
2543 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2544 {
2545 /* It's dead, really. */
2546 lwp->dead = 1;
2547
2548 /* Store the exit status for later. */
2549 lwp->status_pending_p = 1;
2550 lwp->status_pending = wstat;
2551
2552 /* Prevent trying to stop it. */
2553 lwp->stopped = 1;
2554
2555 /* No further stops are expected from a dead lwp. */
2556 lwp->stop_expected = 0;
2557 }
2558
2559 static void
2560 wait_for_sigstop (struct inferior_list_entry *entry)
2561 {
2562 struct lwp_info *lwp = (struct lwp_info *) entry;
2563 struct thread_info *saved_inferior;
2564 int wstat;
2565 ptid_t saved_tid;
2566 ptid_t ptid;
2567 int pid;
2568
2569 if (lwp->stopped)
2570 {
2571 if (debug_threads)
2572 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2573 lwpid_of (lwp));
2574 return;
2575 }
2576
2577 saved_inferior = current_inferior;
2578 if (saved_inferior != NULL)
2579 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2580 else
2581 saved_tid = null_ptid; /* avoid bogus unused warning */
2582
2583 ptid = lwp->head.id;
2584
2585 if (debug_threads)
2586 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2587
2588 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2589
2590 /* If we stopped with a non-SIGSTOP signal, save it for later
2591 and record the pending SIGSTOP. If the process exited, just
2592 return. */
2593 if (WIFSTOPPED (wstat))
2594 {
2595 if (debug_threads)
2596 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2597 lwpid_of (lwp), WSTOPSIG (wstat));
2598
2599 if (WSTOPSIG (wstat) != SIGSTOP)
2600 {
2601 if (debug_threads)
2602 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2603 lwpid_of (lwp), wstat);
2604
2605 lwp->status_pending_p = 1;
2606 lwp->status_pending = wstat;
2607 }
2608 }
2609 else
2610 {
2611 if (debug_threads)
2612 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2613
2614 lwp = find_lwp_pid (pid_to_ptid (pid));
2615 if (lwp)
2616 {
2617 /* Leave this status pending for the next time we're able to
2618 report it. In the mean time, we'll report this lwp as
2619 dead to GDB, so GDB doesn't try to read registers and
2620 memory from it. This can only happen if this was the
2621 last thread of the process; otherwise, PID is removed
2622 from the thread tables before linux_wait_for_event
2623 returns. */
2624 mark_lwp_dead (lwp, wstat);
2625 }
2626 }
2627
2628 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2629 current_inferior = saved_inferior;
2630 else
2631 {
2632 if (debug_threads)
2633 fprintf (stderr, "Previously current thread died.\n");
2634
2635 if (non_stop)
2636 {
2637 /* We can't change the current inferior behind GDB's back,
2638 otherwise, a subsequent command may apply to the wrong
2639 process. */
2640 current_inferior = NULL;
2641 }
2642 else
2643 {
2644 /* Set a valid thread as current. */
2645 set_desired_inferior (0);
2646 }
2647 }
2648 }
2649
2650 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2651 move it out, because we need to report the stop event to GDB. For
2652 example, if the user puts a breakpoint in the jump pad, it's
2653 because she wants to debug it. */
2654
2655 static int
2656 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2657 {
2658 struct lwp_info *lwp = (struct lwp_info *) entry;
2659 struct thread_info *thread = get_lwp_thread (lwp);
2660
2661 gdb_assert (lwp->suspended == 0);
2662 gdb_assert (lwp->stopped);
2663
2664 /* Allow debugging the jump pad, gdb_collect, etc.. */
2665 return (supports_fast_tracepoints ()
2666 && in_process_agent_loaded ()
2667 && (gdb_breakpoint_here (lwp->stop_pc)
2668 || lwp->stopped_by_watchpoint
2669 || thread->last_resume_kind == resume_step)
2670 && linux_fast_tracepoint_collecting (lwp, NULL));
2671 }
2672
2673 static void
2674 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
2675 {
2676 struct lwp_info *lwp = (struct lwp_info *) entry;
2677 struct thread_info *thread = get_lwp_thread (lwp);
2678 int *wstat;
2679
2680 gdb_assert (lwp->suspended == 0);
2681 gdb_assert (lwp->stopped);
2682
2683 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
2684
2685 /* Allow debugging the jump pad, gdb_collect, etc. */
2686 if (!gdb_breakpoint_here (lwp->stop_pc)
2687 && !lwp->stopped_by_watchpoint
2688 && thread->last_resume_kind != resume_step
2689 && maybe_move_out_of_jump_pad (lwp, wstat))
2690 {
2691 if (debug_threads)
2692 fprintf (stderr,
2693 "LWP %ld needs stabilizing (in jump pad)\n",
2694 lwpid_of (lwp));
2695
2696 if (wstat)
2697 {
2698 lwp->status_pending_p = 0;
2699 enqueue_one_deferred_signal (lwp, wstat);
2700
2701 if (debug_threads)
2702 fprintf (stderr,
2703 "Signal %d for LWP %ld deferred "
2704 "(in jump pad)\n",
2705 WSTOPSIG (*wstat), lwpid_of (lwp));
2706 }
2707
2708 linux_resume_one_lwp (lwp, 0, 0, NULL);
2709 }
2710 else
2711 lwp->suspended++;
2712 }
2713
2714 static int
2715 lwp_running (struct inferior_list_entry *entry, void *data)
2716 {
2717 struct lwp_info *lwp = (struct lwp_info *) entry;
2718
2719 if (lwp->dead)
2720 return 0;
2721 if (lwp->stopped)
2722 return 0;
2723 return 1;
2724 }
2725
2726 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2727 If SUSPEND, then also increase the suspend count of every LWP,
2728 except EXCEPT. */
2729
2730 static void
2731 stop_all_lwps (int suspend, struct lwp_info *except)
2732 {
2733 stopping_threads = 1;
2734
2735 if (suspend)
2736 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
2737 else
2738 find_inferior (&all_lwps, send_sigstop_callback, except);
2739 for_each_inferior (&all_lwps, wait_for_sigstop);
2740 stopping_threads = 0;
2741 }
2742
2743 /* Resume execution of the inferior process.
2744 If STEP is nonzero, single-step it.
2745 If SIGNAL is nonzero, give it that signal. */
2746
2747 static void
2748 linux_resume_one_lwp (struct lwp_info *lwp,
2749 int step, int signal, siginfo_t *info)
2750 {
2751 struct thread_info *saved_inferior;
2752 int fast_tp_collecting;
2753
2754 if (lwp->stopped == 0)
2755 return;
2756
2757 fast_tp_collecting = lwp->collecting_fast_tracepoint;
2758
2759 gdb_assert (!stabilizing_threads || fast_tp_collecting);
2760
2761 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2762 user used the "jump" command, or "set $pc = foo"). */
2763 if (lwp->stop_pc != get_pc (lwp))
2764 {
2765 /* Collecting 'while-stepping' actions doesn't make sense
2766 anymore. */
2767 release_while_stepping_state_list (get_lwp_thread (lwp));
2768 }
2769
2770 /* If we have pending signals or status, and a new signal, enqueue the
2771 signal. Also enqueue the signal if we are waiting to reinsert a
2772 breakpoint; it will be picked up again below. */
2773 if (signal != 0
2774 && (lwp->status_pending_p
2775 || lwp->pending_signals != NULL
2776 || lwp->bp_reinsert != 0
2777 || fast_tp_collecting))
2778 {
2779 struct pending_signals *p_sig;
2780 p_sig = xmalloc (sizeof (*p_sig));
2781 p_sig->prev = lwp->pending_signals;
2782 p_sig->signal = signal;
2783 if (info == NULL)
2784 memset (&p_sig->info, 0, sizeof (siginfo_t));
2785 else
2786 memcpy (&p_sig->info, info, sizeof (siginfo_t));
2787 lwp->pending_signals = p_sig;
2788 }
2789
2790 if (lwp->status_pending_p)
2791 {
2792 if (debug_threads)
2793 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2794 " has pending status\n",
2795 lwpid_of (lwp), step ? "step" : "continue", signal,
2796 lwp->stop_expected ? "expected" : "not expected");
2797 return;
2798 }
2799
2800 saved_inferior = current_inferior;
2801 current_inferior = get_lwp_thread (lwp);
2802
2803 if (debug_threads)
2804 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2805 lwpid_of (lwp), step ? "step" : "continue", signal,
2806 lwp->stop_expected ? "expected" : "not expected");
2807
2808 /* This bit needs some thinking about. If we get a signal that
2809 we must report while a single-step reinsert is still pending,
2810 we often end up resuming the thread. It might be better to
2811 (ew) allow a stack of pending events; then we could be sure that
2812 the reinsert happened right away and not lose any signals.
2813
2814 Making this stack would also shrink the window in which breakpoints are
2815 uninserted (see comment in linux_wait_for_lwp) but not enough for
2816 complete correctness, so it won't solve that problem. It may be
2817 worthwhile just to solve this one, however. */
2818 if (lwp->bp_reinsert != 0)
2819 {
2820 if (debug_threads)
2821 fprintf (stderr, " pending reinsert at 0x%s\n",
2822 paddress (lwp->bp_reinsert));
2823
2824 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2825 {
2826 if (fast_tp_collecting == 0)
2827 {
2828 if (step == 0)
2829 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2830 if (lwp->suspended)
2831 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
2832 lwp->suspended);
2833 }
2834
2835 step = 1;
2836 }
2837
2838 /* Postpone any pending signal. It was enqueued above. */
2839 signal = 0;
2840 }
2841
2842 if (fast_tp_collecting == 1)
2843 {
2844 if (debug_threads)
2845 fprintf (stderr, "\
2846 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
2847 lwpid_of (lwp));
2848
2849 /* Postpone any pending signal. It was enqueued above. */
2850 signal = 0;
2851 }
2852 else if (fast_tp_collecting == 2)
2853 {
2854 if (debug_threads)
2855 fprintf (stderr, "\
2856 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
2857 lwpid_of (lwp));
2858
2859 if (can_hardware_single_step ())
2860 step = 1;
2861 else
2862 fatal ("moving out of jump pad single-stepping"
2863 " not implemented on this target");
2864
2865 /* Postpone any pending signal. It was enqueued above. */
2866 signal = 0;
2867 }
2868
2869 /* If we have while-stepping actions in this thread set it stepping.
2870 If we have a signal to deliver, it may or may not be set to
2871 SIG_IGN, we don't know. Assume so, and allow collecting
2872 while-stepping into a signal handler. A possible smart thing to
2873 do would be to set an internal breakpoint at the signal return
2874 address, continue, and carry on catching this while-stepping
2875 action only when that breakpoint is hit. A future
2876 enhancement. */
2877 if (get_lwp_thread (lwp)->while_stepping != NULL
2878 && can_hardware_single_step ())
2879 {
2880 if (debug_threads)
2881 fprintf (stderr,
2882 "lwp %ld has a while-stepping action -> forcing step.\n",
2883 lwpid_of (lwp));
2884 step = 1;
2885 }
2886
2887 if (debug_threads && the_low_target.get_pc != NULL)
2888 {
2889 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2890 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
2891 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
2892 }
2893
2894 /* If we have pending signals, consume one unless we are trying to
2895 reinsert a breakpoint or we're trying to finish a fast tracepoint
2896 collect. */
2897 if (lwp->pending_signals != NULL
2898 && lwp->bp_reinsert == 0
2899 && fast_tp_collecting == 0)
2900 {
2901 struct pending_signals **p_sig;
2902
2903 p_sig = &lwp->pending_signals;
2904 while ((*p_sig)->prev != NULL)
2905 p_sig = &(*p_sig)->prev;
2906
2907 signal = (*p_sig)->signal;
2908 if ((*p_sig)->info.si_signo != 0)
2909 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
2910
2911 free (*p_sig);
2912 *p_sig = NULL;
2913 }
2914
2915 if (the_low_target.prepare_to_resume != NULL)
2916 the_low_target.prepare_to_resume (lwp);
2917
2918 regcache_invalidate_one ((struct inferior_list_entry *)
2919 get_lwp_thread (lwp));
2920 errno = 0;
2921 lwp->stopped = 0;
2922 lwp->stopped_by_watchpoint = 0;
2923 lwp->stepping = step;
2924 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2925 /* Coerce to a uintptr_t first to avoid potential gcc warning
2926 of coercing an 8 byte integer to a 4 byte pointer. */
2927 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
2928
2929 current_inferior = saved_inferior;
2930 if (errno)
2931 {
2932 /* ESRCH from ptrace either means that the thread was already
2933 running (an error) or that it is gone (a race condition). If
2934 it's gone, we will get a notification the next time we wait,
2935 so we can ignore the error. We could differentiate these
2936 two, but it's tricky without waiting; the thread still exists
2937 as a zombie, so sending it signal 0 would succeed. So just
2938 ignore ESRCH. */
2939 if (errno == ESRCH)
2940 return;
2941
2942 perror_with_name ("ptrace");
2943 }
2944 }
2945
2946 struct thread_resume_array
2947 {
2948 struct thread_resume *resume;
2949 size_t n;
2950 };
2951
2952 /* This function is called once per thread. We look up the thread
2953 in RESUME_PTR, and mark the thread with a pointer to the appropriate
2954 resume request.
2955
2956 This algorithm is O(threads * resume elements), but resume elements
2957 is small (and will remain small at least until GDB supports thread
2958 suspension). */
2959 static int
2960 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
2961 {
2962 struct lwp_info *lwp;
2963 struct thread_info *thread;
2964 int ndx;
2965 struct thread_resume_array *r;
2966
2967 thread = (struct thread_info *) entry;
2968 lwp = get_thread_lwp (thread);
2969 r = arg;
2970
2971 for (ndx = 0; ndx < r->n; ndx++)
2972 {
2973 ptid_t ptid = r->resume[ndx].thread;
2974 if (ptid_equal (ptid, minus_one_ptid)
2975 || ptid_equal (ptid, entry->id)
2976 || (ptid_is_pid (ptid)
2977 && (ptid_get_pid (ptid) == pid_of (lwp)))
2978 || (ptid_get_lwp (ptid) == -1
2979 && (ptid_get_pid (ptid) == pid_of (lwp))))
2980 {
2981 if (r->resume[ndx].kind == resume_stop
2982 && thread->last_resume_kind == resume_stop)
2983 {
2984 if (debug_threads)
2985 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
2986 thread->last_status.kind == TARGET_WAITKIND_STOPPED
2987 ? "stopped"
2988 : "stopping",
2989 lwpid_of (lwp));
2990
2991 continue;
2992 }
2993
2994 lwp->resume = &r->resume[ndx];
2995 thread->last_resume_kind = lwp->resume->kind;
2996
2997 /* If we had a deferred signal to report, dequeue one now.
2998 This can happen if LWP gets more than one signal while
2999 trying to get out of a jump pad. */
3000 if (lwp->stopped
3001 && !lwp->status_pending_p
3002 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3003 {
3004 lwp->status_pending_p = 1;
3005
3006 if (debug_threads)
3007 fprintf (stderr,
3008 "Dequeueing deferred signal %d for LWP %ld, "
3009 "leaving status pending.\n",
3010 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3011 }
3012
3013 return 0;
3014 }
3015 }
3016
3017 /* No resume action for this thread. */
3018 lwp->resume = NULL;
3019
3020 return 0;
3021 }
3022
3023
3024 /* Set *FLAG_P if this lwp has an interesting status pending. */
3025 static int
3026 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3027 {
3028 struct lwp_info *lwp = (struct lwp_info *) entry;
3029
3030 /* LWPs which will not be resumed are not interesting, because
3031 we might not wait for them next time through linux_wait. */
3032 if (lwp->resume == NULL)
3033 return 0;
3034
3035 if (lwp->status_pending_p)
3036 * (int *) flag_p = 1;
3037
3038 return 0;
3039 }
3040
3041 /* Return 1 if this lwp that GDB wants running is stopped at an
3042 internal breakpoint that we need to step over. It assumes that any
3043 required STOP_PC adjustment has already been propagated to the
3044 inferior's regcache. */
3045
3046 static int
3047 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3048 {
3049 struct lwp_info *lwp = (struct lwp_info *) entry;
3050 struct thread_info *thread;
3051 struct thread_info *saved_inferior;
3052 CORE_ADDR pc;
3053
3054 /* LWPs which will not be resumed are not interesting, because we
3055 might not wait for them next time through linux_wait. */
3056
3057 if (!lwp->stopped)
3058 {
3059 if (debug_threads)
3060 fprintf (stderr,
3061 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3062 lwpid_of (lwp));
3063 return 0;
3064 }
3065
3066 thread = get_lwp_thread (lwp);
3067
3068 if (thread->last_resume_kind == resume_stop)
3069 {
3070 if (debug_threads)
3071 fprintf (stderr,
3072 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3073 lwpid_of (lwp));
3074 return 0;
3075 }
3076
3077 gdb_assert (lwp->suspended >= 0);
3078
3079 if (lwp->suspended)
3080 {
3081 if (debug_threads)
3082 fprintf (stderr,
3083 "Need step over [LWP %ld]? Ignoring, suspended\n",
3084 lwpid_of (lwp));
3085 return 0;
3086 }
3087
3088 if (!lwp->need_step_over)
3089 {
3090 if (debug_threads)
3091 fprintf (stderr,
3092 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3093 }
3094
3095 if (lwp->status_pending_p)
3096 {
3097 if (debug_threads)
3098 fprintf (stderr,
3099 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3100 lwpid_of (lwp));
3101 return 0;
3102 }
3103
3104 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3105 or we have. */
3106 pc = get_pc (lwp);
3107
3108 /* If the PC has changed since we stopped, then don't do anything,
3109 and let the breakpoint/tracepoint be hit. This happens if, for
3110 instance, GDB handled the decr_pc_after_break subtraction itself,
3111 GDB is OOL stepping this thread, or the user has issued a "jump"
3112 command, or poked thread's registers herself. */
3113 if (pc != lwp->stop_pc)
3114 {
3115 if (debug_threads)
3116 fprintf (stderr,
3117 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3118 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3119 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3120
3121 lwp->need_step_over = 0;
3122 return 0;
3123 }
3124
3125 saved_inferior = current_inferior;
3126 current_inferior = thread;
3127
3128 /* We can only step over breakpoints we know about. */
3129 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3130 {
3131 /* Don't step over a breakpoint that GDB expects to hit
3132 though. */
3133 if (gdb_breakpoint_here (pc))
3134 {
3135 if (debug_threads)
3136 fprintf (stderr,
3137 "Need step over [LWP %ld]? yes, but found"
3138 " GDB breakpoint at 0x%s; skipping step over\n",
3139 lwpid_of (lwp), paddress (pc));
3140
3141 current_inferior = saved_inferior;
3142 return 0;
3143 }
3144 else
3145 {
3146 if (debug_threads)
3147 fprintf (stderr,
3148 "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
3149 lwpid_of (lwp), paddress (pc));
3150
3151 /* We've found an lwp that needs stepping over --- return 1 so
3152 that find_inferior stops looking. */
3153 current_inferior = saved_inferior;
3154
3155 /* If the step over is cancelled, this is set again. */
3156 lwp->need_step_over = 0;
3157 return 1;
3158 }
3159 }
3160
3161 current_inferior = saved_inferior;
3162
3163 if (debug_threads)
3164 fprintf (stderr,
3165 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3166 lwpid_of (lwp), paddress (pc));
3167
3168 return 0;
3169 }
3170
3171 /* Start a step-over operation on LWP. When LWP stopped at a
3172 breakpoint, to make progress, we need to remove the breakpoint out
3173 of the way. If we let other threads run while we do that, they may
3174 pass by the breakpoint location and miss hitting it. To avoid
3175 that, a step-over momentarily stops all threads while LWP is
3176 single-stepped while the breakpoint is temporarily uninserted from
3177 the inferior. When the single-step finishes, we reinsert the
3178 breakpoint, and let all threads that are supposed to be running,
3179 run again.
3180
3181 On targets that don't support hardware single-step, we don't
3182 currently support full software single-stepping. Instead, we only
3183 support stepping over the thread event breakpoint, by asking the
3184 low target where to place a reinsert breakpoint. Since this
3185 routine assumes the breakpoint being stepped over is a thread event
3186 breakpoint, it usually assumes the return address of the current
3187 function is a good enough place to set the reinsert breakpoint. */
3188
3189 static int
3190 start_step_over (struct lwp_info *lwp)
3191 {
3192 struct thread_info *saved_inferior;
3193 CORE_ADDR pc;
3194 int step;
3195
3196 if (debug_threads)
3197 fprintf (stderr,
3198 "Starting step-over on LWP %ld. Stopping all threads\n",
3199 lwpid_of (lwp));
3200
3201 stop_all_lwps (1, lwp);
3202 gdb_assert (lwp->suspended == 0);
3203
3204 if (debug_threads)
3205 fprintf (stderr, "Done stopping all threads for step-over.\n");
3206
3207 /* Note, we should always reach here with an already adjusted PC,
3208 either by GDB (if we're resuming due to GDB's request), or by our
3209 caller, if we just finished handling an internal breakpoint GDB
3210 shouldn't care about. */
3211 pc = get_pc (lwp);
3212
3213 saved_inferior = current_inferior;
3214 current_inferior = get_lwp_thread (lwp);
3215
3216 lwp->bp_reinsert = pc;
3217 uninsert_breakpoints_at (pc);
3218 uninsert_fast_tracepoint_jumps_at (pc);
3219
3220 if (can_hardware_single_step ())
3221 {
3222 step = 1;
3223 }
3224 else
3225 {
3226 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3227 set_reinsert_breakpoint (raddr);
3228 step = 0;
3229 }
3230
3231 current_inferior = saved_inferior;
3232
3233 linux_resume_one_lwp (lwp, step, 0, NULL);
3234
3235 /* Require next event from this LWP. */
3236 step_over_bkpt = lwp->head.id;
3237 return 1;
3238 }
3239
3240 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3241 start_step_over, if still there, and delete any reinsert
3242 breakpoints we've set, on non hardware single-step targets. */
3243
3244 static int
3245 finish_step_over (struct lwp_info *lwp)
3246 {
3247 if (lwp->bp_reinsert != 0)
3248 {
3249 if (debug_threads)
3250 fprintf (stderr, "Finished step over.\n");
3251
3252 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3253 may be no breakpoint to reinsert there by now. */
3254 reinsert_breakpoints_at (lwp->bp_reinsert);
3255 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3256
3257 lwp->bp_reinsert = 0;
3258
3259 /* Delete any software-single-step reinsert breakpoints. No
3260 longer needed. We don't have to worry about other threads
3261 hitting this trap, and later not being able to explain it,
3262 because we were stepping over a breakpoint, and we hold all
3263 threads but LWP stopped while doing that. */
3264 if (!can_hardware_single_step ())
3265 delete_reinsert_breakpoints ();
3266
3267 step_over_bkpt = null_ptid;
3268 return 1;
3269 }
3270 else
3271 return 0;
3272 }
3273
3274 /* This function is called once per thread. We check the thread's resume
3275 request, which will tell us whether to resume, step, or leave the thread
3276 stopped; and what signal, if any, it should be sent.
3277
3278 For threads which we aren't explicitly told otherwise, we preserve
3279 the stepping flag; this is used for stepping over gdbserver-placed
3280 breakpoints.
3281
3282 If pending_flags was set in any thread, we queue any needed
3283 signals, since we won't actually resume. We already have a pending
3284 event to report, so we don't need to preserve any step requests;
3285 they should be re-issued if necessary. */
3286
3287 static int
3288 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3289 {
3290 struct lwp_info *lwp;
3291 struct thread_info *thread;
3292 int step;
3293 int leave_all_stopped = * (int *) arg;
3294 int leave_pending;
3295
3296 thread = (struct thread_info *) entry;
3297 lwp = get_thread_lwp (thread);
3298
3299 if (lwp->resume == NULL)
3300 return 0;
3301
3302 if (lwp->resume->kind == resume_stop)
3303 {
3304 if (debug_threads)
3305 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3306
3307 if (!lwp->stopped)
3308 {
3309 if (debug_threads)
3310 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3311
3312 /* Stop the thread, and wait for the event asynchronously,
3313 through the event loop. */
3314 send_sigstop (lwp);
3315 }
3316 else
3317 {
3318 if (debug_threads)
3319 fprintf (stderr, "already stopped LWP %ld\n",
3320 lwpid_of (lwp));
3321
3322 /* The LWP may have been stopped in an internal event that
3323 was not meant to be notified back to GDB (e.g., gdbserver
3324 breakpoint), so we should be reporting a stop event in
3325 this case too. */
3326
3327 /* If the thread already has a pending SIGSTOP, this is a
3328 no-op. Otherwise, something later will presumably resume
3329 the thread and this will cause it to cancel any pending
3330 operation, due to last_resume_kind == resume_stop. If
3331 the thread already has a pending status to report, we
3332 will still report it the next time we wait - see
3333 status_pending_p_callback. */
3334 send_sigstop (lwp);
3335 }
3336
3337 /* For stop requests, we're done. */
3338 lwp->resume = NULL;
3339 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3340 return 0;
3341 }
3342
3343 /* If this thread which is about to be resumed has a pending status,
3344 then don't resume any threads - we can just report the pending
3345 status. Make sure to queue any signals that would otherwise be
3346 sent. In all-stop mode, we do this decision based on if *any*
3347 thread has a pending status. If there's a thread that needs the
3348 step-over-breakpoint dance, then don't resume any other thread
3349 but that particular one. */
3350 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3351
3352 if (!leave_pending)
3353 {
3354 if (debug_threads)
3355 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3356
3357 step = (lwp->resume->kind == resume_step);
3358 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3359 }
3360 else
3361 {
3362 if (debug_threads)
3363 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3364
3365 /* If we have a new signal, enqueue the signal. */
3366 if (lwp->resume->sig != 0)
3367 {
3368 struct pending_signals *p_sig;
3369 p_sig = xmalloc (sizeof (*p_sig));
3370 p_sig->prev = lwp->pending_signals;
3371 p_sig->signal = lwp->resume->sig;
3372 memset (&p_sig->info, 0, sizeof (siginfo_t));
3373
3374 /* If this is the same signal we were previously stopped by,
3375 make sure to queue its siginfo. We can ignore the return
3376 value of ptrace; if it fails, we'll skip
3377 PTRACE_SETSIGINFO. */
3378 if (WIFSTOPPED (lwp->last_status)
3379 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3380 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3381
3382 lwp->pending_signals = p_sig;
3383 }
3384 }
3385
3386 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3387 lwp->resume = NULL;
3388 return 0;
3389 }
3390
3391 static void
3392 linux_resume (struct thread_resume *resume_info, size_t n)
3393 {
3394 struct thread_resume_array array = { resume_info, n };
3395 struct lwp_info *need_step_over = NULL;
3396 int any_pending;
3397 int leave_all_stopped;
3398
3399 find_inferior (&all_threads, linux_set_resume_request, &array);
3400
3401 /* If there is a thread which would otherwise be resumed, which has
3402 a pending status, then don't resume any threads - we can just
3403 report the pending status. Make sure to queue any signals that
3404 would otherwise be sent. In non-stop mode, we'll apply this
3405 logic to each thread individually. We consume all pending events
3406 before considering to start a step-over (in all-stop). */
3407 any_pending = 0;
3408 if (!non_stop)
3409 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3410
3411 /* If there is a thread which would otherwise be resumed, which is
3412 stopped at a breakpoint that needs stepping over, then don't
3413 resume any threads - have it step over the breakpoint with all
3414 other threads stopped, then resume all threads again. Make sure
3415 to queue any signals that would otherwise be delivered or
3416 queued. */
3417 if (!any_pending && supports_breakpoints ())
3418 need_step_over
3419 = (struct lwp_info *) find_inferior (&all_lwps,
3420 need_step_over_p, NULL);
3421
3422 leave_all_stopped = (need_step_over != NULL || any_pending);
3423
3424 if (debug_threads)
3425 {
3426 if (need_step_over != NULL)
3427 fprintf (stderr, "Not resuming all, need step over\n");
3428 else if (any_pending)
3429 fprintf (stderr,
3430 "Not resuming, all-stop and found "
3431 "an LWP with pending status\n");
3432 else
3433 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3434 }
3435
3436 /* Even if we're leaving threads stopped, queue all signals we'd
3437 otherwise deliver. */
3438 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3439
3440 if (need_step_over)
3441 start_step_over (need_step_over);
3442 }
3443
3444 /* This function is called once per thread. We check the thread's
3445 last resume request, which will tell us whether to resume, step, or
3446 leave the thread stopped. Any signal the client requested to be
3447 delivered has already been enqueued at this point.
3448
3449 If any thread that GDB wants running is stopped at an internal
3450 breakpoint that needs stepping over, we start a step-over operation
3451 on that particular thread, and leave all others stopped. */
3452
3453 static int
3454 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3455 {
3456 struct lwp_info *lwp = (struct lwp_info *) entry;
3457 struct thread_info *thread;
3458 int step;
3459
3460 if (lwp == except)
3461 return 0;
3462
3463 if (debug_threads)
3464 fprintf (stderr,
3465 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3466
3467 if (!lwp->stopped)
3468 {
3469 if (debug_threads)
3470 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3471 return 0;
3472 }
3473
3474 thread = get_lwp_thread (lwp);
3475
3476 if (thread->last_resume_kind == resume_stop
3477 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3478 {
3479 if (debug_threads)
3480 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3481 lwpid_of (lwp));
3482 return 0;
3483 }
3484
3485 if (lwp->status_pending_p)
3486 {
3487 if (debug_threads)
3488 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3489 lwpid_of (lwp));
3490 return 0;
3491 }
3492
3493 gdb_assert (lwp->suspended >= 0);
3494
3495 if (lwp->suspended)
3496 {
3497 if (debug_threads)
3498 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3499 return 0;
3500 }
3501
3502 if (thread->last_resume_kind == resume_stop)
3503 {
3504 /* We haven't reported this LWP as stopped yet (otherwise, the
3505 last_status.kind check above would catch it, and we wouldn't
3506 reach here. This LWP may have been momentarily paused by a
3507 stop_all_lwps call while handling for example, another LWP's
3508 step-over. In that case, the pending expected SIGSTOP signal
3509 that was queued at vCont;t handling time will have already
3510 been consumed by wait_for_sigstop, and so we need to requeue
3511 another one here. Note that if the LWP already has a SIGSTOP
3512 pending, this is a no-op. */
3513
3514 if (debug_threads)
3515 fprintf (stderr,
3516 "Client wants LWP %ld to stop. "
3517 "Making sure it has a SIGSTOP pending\n",
3518 lwpid_of (lwp));
3519
3520 send_sigstop (lwp);
3521 }
3522
3523 step = thread->last_resume_kind == resume_step;
3524 linux_resume_one_lwp (lwp, step, 0, NULL);
3525 return 0;
3526 }
3527
3528 static int
3529 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3530 {
3531 struct lwp_info *lwp = (struct lwp_info *) entry;
3532
3533 if (lwp == except)
3534 return 0;
3535
3536 lwp->suspended--;
3537 gdb_assert (lwp->suspended >= 0);
3538
3539 return proceed_one_lwp (entry, except);
3540 }
3541
3542 /* When we finish a step-over, set threads running again. If there's
3543 another thread that may need a step-over, now's the time to start
3544 it. Eventually, we'll move all threads past their breakpoints. */
3545
3546 static void
3547 proceed_all_lwps (void)
3548 {
3549 struct lwp_info *need_step_over;
3550
3551 /* If there is a thread which would otherwise be resumed, which is
3552 stopped at a breakpoint that needs stepping over, then don't
3553 resume any threads - have it step over the breakpoint with all
3554 other threads stopped, then resume all threads again. */
3555
3556 if (supports_breakpoints ())
3557 {
3558 need_step_over
3559 = (struct lwp_info *) find_inferior (&all_lwps,
3560 need_step_over_p, NULL);
3561
3562 if (need_step_over != NULL)
3563 {
3564 if (debug_threads)
3565 fprintf (stderr, "proceed_all_lwps: found "
3566 "thread %ld needing a step-over\n",
3567 lwpid_of (need_step_over));
3568
3569 start_step_over (need_step_over);
3570 return;
3571 }
3572 }
3573
3574 if (debug_threads)
3575 fprintf (stderr, "Proceeding, no step-over needed\n");
3576
3577 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3578 }
3579
3580 /* Stopped LWPs that the client wanted to be running, that don't have
3581 pending statuses, are set to run again, except for EXCEPT, if not
3582 NULL. This undoes a stop_all_lwps call. */
3583
3584 static void
3585 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3586 {
3587 if (debug_threads)
3588 {
3589 if (except)
3590 fprintf (stderr,
3591 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3592 else
3593 fprintf (stderr,
3594 "unstopping all lwps\n");
3595 }
3596
3597 if (unsuspend)
3598 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3599 else
3600 find_inferior (&all_lwps, proceed_one_lwp, except);
3601 }
3602
3603 #ifdef HAVE_LINUX_USRREGS
3604
3605 int
3606 register_addr (int regnum)
3607 {
3608 int addr;
3609
3610 if (regnum < 0 || regnum >= the_low_target.num_regs)
3611 error ("Invalid register number %d.", regnum);
3612
3613 addr = the_low_target.regmap[regnum];
3614
3615 return addr;
3616 }
3617
3618 /* Fetch one register. */
3619 static void
3620 fetch_register (struct regcache *regcache, int regno)
3621 {
3622 CORE_ADDR regaddr;
3623 int i, size;
3624 char *buf;
3625 int pid;
3626
3627 if (regno >= the_low_target.num_regs)
3628 return;
3629 if ((*the_low_target.cannot_fetch_register) (regno))
3630 return;
3631
3632 regaddr = register_addr (regno);
3633 if (regaddr == -1)
3634 return;
3635
3636 pid = lwpid_of (get_thread_lwp (current_inferior));
3637 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3638 & - sizeof (PTRACE_XFER_TYPE));
3639 buf = alloca (size);
3640 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3641 {
3642 errno = 0;
3643 *(PTRACE_XFER_TYPE *) (buf + i) =
3644 ptrace (PTRACE_PEEKUSER, pid,
3645 /* Coerce to a uintptr_t first to avoid potential gcc warning
3646 of coercing an 8 byte integer to a 4 byte pointer. */
3647 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
3648 regaddr += sizeof (PTRACE_XFER_TYPE);
3649 if (errno != 0)
3650 error ("reading register %d: %s", regno, strerror (errno));
3651 }
3652
3653 if (the_low_target.supply_ptrace_register)
3654 the_low_target.supply_ptrace_register (regcache, regno, buf);
3655 else
3656 supply_register (regcache, regno, buf);
3657 }
3658
3659 /* Fetch all registers, or just one, from the child process. */
3660 static void
3661 usr_fetch_inferior_registers (struct regcache *regcache, int regno)
3662 {
3663 if (regno == -1)
3664 for (regno = 0; regno < the_low_target.num_regs; regno++)
3665 fetch_register (regcache, regno);
3666 else
3667 fetch_register (regcache, regno);
3668 }
3669
3670 /* Store our register values back into the inferior.
3671 If REGNO is -1, do this for all registers.
3672 Otherwise, REGNO specifies which register (so we can save time). */
3673 static void
3674 usr_store_inferior_registers (struct regcache *regcache, int regno)
3675 {
3676 CORE_ADDR regaddr;
3677 int i, size;
3678 char *buf;
3679 int pid;
3680
3681 if (regno >= 0)
3682 {
3683 if (regno >= the_low_target.num_regs)
3684 return;
3685
3686 if ((*the_low_target.cannot_store_register) (regno) == 1)
3687 return;
3688
3689 regaddr = register_addr (regno);
3690 if (regaddr == -1)
3691 return;
3692 errno = 0;
3693 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3694 & - sizeof (PTRACE_XFER_TYPE);
3695 buf = alloca (size);
3696 memset (buf, 0, size);
3697
3698 if (the_low_target.collect_ptrace_register)
3699 the_low_target.collect_ptrace_register (regcache, regno, buf);
3700 else
3701 collect_register (regcache, regno, buf);
3702
3703 pid = lwpid_of (get_thread_lwp (current_inferior));
3704 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3705 {
3706 errno = 0;
3707 ptrace (PTRACE_POKEUSER, pid,
3708 /* Coerce to a uintptr_t first to avoid potential gcc warning
3709 about coercing an 8 byte integer to a 4 byte pointer. */
3710 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3711 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
3712 if (errno != 0)
3713 {
3714 /* At this point, ESRCH should mean the process is
3715 already gone, in which case we simply ignore attempts
3716 to change its registers. See also the related
3717 comment in linux_resume_one_lwp. */
3718 if (errno == ESRCH)
3719 return;
3720
3721 if ((*the_low_target.cannot_store_register) (regno) == 0)
3722 error ("writing register %d: %s", regno, strerror (errno));
3723 }
3724 regaddr += sizeof (PTRACE_XFER_TYPE);
3725 }
3726 }
3727 else
3728 for (regno = 0; regno < the_low_target.num_regs; regno++)
3729 usr_store_inferior_registers (regcache, regno);
3730 }
3731 #endif /* HAVE_LINUX_USRREGS */
3732
3733
3734
3735 #ifdef HAVE_LINUX_REGSETS
3736
3737 static int
3738 regsets_fetch_inferior_registers (struct regcache *regcache)
3739 {
3740 struct regset_info *regset;
3741 int saw_general_regs = 0;
3742 int pid;
3743 struct iovec iov;
3744
3745 regset = target_regsets;
3746
3747 pid = lwpid_of (get_thread_lwp (current_inferior));
3748 while (regset->size >= 0)
3749 {
3750 void *buf, *data;
3751 int nt_type, res;
3752
3753 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3754 {
3755 regset ++;
3756 continue;
3757 }
3758
3759 buf = xmalloc (regset->size);
3760
3761 nt_type = regset->nt_type;
3762 if (nt_type)
3763 {
3764 iov.iov_base = buf;
3765 iov.iov_len = regset->size;
3766 data = (void *) &iov;
3767 }
3768 else
3769 data = buf;
3770
3771 #ifndef __sparc__
3772 res = ptrace (regset->get_request, pid, nt_type, data);
3773 #else
3774 res = ptrace (regset->get_request, pid, data, nt_type);
3775 #endif
3776 if (res < 0)
3777 {
3778 if (errno == EIO)
3779 {
3780 /* If we get EIO on a regset, do not try it again for
3781 this process. */
3782 disabled_regsets[regset - target_regsets] = 1;
3783 free (buf);
3784 continue;
3785 }
3786 else
3787 {
3788 char s[256];
3789 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3790 pid);
3791 perror (s);
3792 }
3793 }
3794 else if (regset->type == GENERAL_REGS)
3795 saw_general_regs = 1;
3796 regset->store_function (regcache, buf);
3797 regset ++;
3798 free (buf);
3799 }
3800 if (saw_general_regs)
3801 return 0;
3802 else
3803 return 1;
3804 }
3805
3806 static int
3807 regsets_store_inferior_registers (struct regcache *regcache)
3808 {
3809 struct regset_info *regset;
3810 int saw_general_regs = 0;
3811 int pid;
3812 struct iovec iov;
3813
3814 regset = target_regsets;
3815
3816 pid = lwpid_of (get_thread_lwp (current_inferior));
3817 while (regset->size >= 0)
3818 {
3819 void *buf, *data;
3820 int nt_type, res;
3821
3822 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3823 {
3824 regset ++;
3825 continue;
3826 }
3827
3828 buf = xmalloc (regset->size);
3829
3830 /* First fill the buffer with the current register set contents,
3831 in case there are any items in the kernel's regset that are
3832 not in gdbserver's regcache. */
3833
3834 nt_type = regset->nt_type;
3835 if (nt_type)
3836 {
3837 iov.iov_base = buf;
3838 iov.iov_len = regset->size;
3839 data = (void *) &iov;
3840 }
3841 else
3842 data = buf;
3843
3844 #ifndef __sparc__
3845 res = ptrace (regset->get_request, pid, nt_type, data);
3846 #else
3847 res = ptrace (regset->get_request, pid, &iov, data);
3848 #endif
3849
3850 if (res == 0)
3851 {
3852 /* Then overlay our cached registers on that. */
3853 regset->fill_function (regcache, buf);
3854
3855 /* Only now do we write the register set. */
3856 #ifndef __sparc__
3857 res = ptrace (regset->set_request, pid, nt_type, data);
3858 #else
3859 res = ptrace (regset->set_request, pid, data, nt_type);
3860 #endif
3861 }
3862
3863 if (res < 0)
3864 {
3865 if (errno == EIO)
3866 {
3867 /* If we get EIO on a regset, do not try it again for
3868 this process. */
3869 disabled_regsets[regset - target_regsets] = 1;
3870 free (buf);
3871 continue;
3872 }
3873 else if (errno == ESRCH)
3874 {
3875 /* At this point, ESRCH should mean the process is
3876 already gone, in which case we simply ignore attempts
3877 to change its registers. See also the related
3878 comment in linux_resume_one_lwp. */
3879 free (buf);
3880 return 0;
3881 }
3882 else
3883 {
3884 perror ("Warning: ptrace(regsets_store_inferior_registers)");
3885 }
3886 }
3887 else if (regset->type == GENERAL_REGS)
3888 saw_general_regs = 1;
3889 regset ++;
3890 free (buf);
3891 }
3892 if (saw_general_regs)
3893 return 0;
3894 else
3895 return 1;
3896 return 0;
3897 }
3898
3899 #endif /* HAVE_LINUX_REGSETS */
3900
3901
3902 void
3903 linux_fetch_registers (struct regcache *regcache, int regno)
3904 {
3905 #ifdef HAVE_LINUX_REGSETS
3906 if (regsets_fetch_inferior_registers (regcache) == 0)
3907 return;
3908 #endif
3909 #ifdef HAVE_LINUX_USRREGS
3910 usr_fetch_inferior_registers (regcache, regno);
3911 #endif
3912 }
3913
3914 void
3915 linux_store_registers (struct regcache *regcache, int regno)
3916 {
3917 #ifdef HAVE_LINUX_REGSETS
3918 if (regsets_store_inferior_registers (regcache) == 0)
3919 return;
3920 #endif
3921 #ifdef HAVE_LINUX_USRREGS
3922 usr_store_inferior_registers (regcache, regno);
3923 #endif
3924 }
3925
3926
3927 /* Copy LEN bytes from inferior's memory starting at MEMADDR
3928 to debugger memory starting at MYADDR. */
3929
3930 static int
3931 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
3932 {
3933 register int i;
3934 /* Round starting address down to longword boundary. */
3935 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3936 /* Round ending address up; get number of longwords that makes. */
3937 register int count
3938 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
3939 / sizeof (PTRACE_XFER_TYPE);
3940 /* Allocate buffer of that many longwords. */
3941 register PTRACE_XFER_TYPE *buffer
3942 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3943 int fd;
3944 char filename[64];
3945 int pid = lwpid_of (get_thread_lwp (current_inferior));
3946
3947 /* Try using /proc. Don't bother for one word. */
3948 if (len >= 3 * sizeof (long))
3949 {
3950 /* We could keep this file open and cache it - possibly one per
3951 thread. That requires some juggling, but is even faster. */
3952 sprintf (filename, "/proc/%d/mem", pid);
3953 fd = open (filename, O_RDONLY | O_LARGEFILE);
3954 if (fd == -1)
3955 goto no_proc;
3956
3957 /* If pread64 is available, use it. It's faster if the kernel
3958 supports it (only one syscall), and it's 64-bit safe even on
3959 32-bit platforms (for instance, SPARC debugging a SPARC64
3960 application). */
3961 #ifdef HAVE_PREAD64
3962 if (pread64 (fd, myaddr, len, memaddr) != len)
3963 #else
3964 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
3965 #endif
3966 {
3967 close (fd);
3968 goto no_proc;
3969 }
3970
3971 close (fd);
3972 return 0;
3973 }
3974
3975 no_proc:
3976 /* Read all the longwords */
3977 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
3978 {
3979 errno = 0;
3980 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
3981 about coercing an 8 byte integer to a 4 byte pointer. */
3982 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
3983 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
3984 if (errno)
3985 return errno;
3986 }
3987
3988 /* Copy appropriate bytes out of the buffer. */
3989 memcpy (myaddr,
3990 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
3991 len);
3992
3993 return 0;
3994 }
3995
3996 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
3997 memory at MEMADDR. On failure (cannot write to the inferior)
3998 returns the value of errno. */
3999
4000 static int
4001 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4002 {
4003 register int i;
4004 /* Round starting address down to longword boundary. */
4005 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4006 /* Round ending address up; get number of longwords that makes. */
4007 register int count
4008 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
4009 /* Allocate buffer of that many longwords. */
4010 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4011 int pid = lwpid_of (get_thread_lwp (current_inferior));
4012
4013 if (debug_threads)
4014 {
4015 /* Dump up to four bytes. */
4016 unsigned int val = * (unsigned int *) myaddr;
4017 if (len == 1)
4018 val = val & 0xff;
4019 else if (len == 2)
4020 val = val & 0xffff;
4021 else if (len == 3)
4022 val = val & 0xffffff;
4023 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4024 val, (long)memaddr);
4025 }
4026
4027 /* Fill start and end extra bytes of buffer with existing memory data. */
4028
4029 errno = 0;
4030 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4031 about coercing an 8 byte integer to a 4 byte pointer. */
4032 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4033 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4034 if (errno)
4035 return errno;
4036
4037 if (count > 1)
4038 {
4039 errno = 0;
4040 buffer[count - 1]
4041 = ptrace (PTRACE_PEEKTEXT, pid,
4042 /* Coerce to a uintptr_t first to avoid potential gcc warning
4043 about coercing an 8 byte integer to a 4 byte pointer. */
4044 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4045 * sizeof (PTRACE_XFER_TYPE)),
4046 0);
4047 if (errno)
4048 return errno;
4049 }
4050
4051 /* Copy data to be written over corresponding part of buffer. */
4052
4053 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
4054
4055 /* Write the entire buffer. */
4056
4057 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4058 {
4059 errno = 0;
4060 ptrace (PTRACE_POKETEXT, pid,
4061 /* Coerce to a uintptr_t first to avoid potential gcc warning
4062 about coercing an 8 byte integer to a 4 byte pointer. */
4063 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4064 (PTRACE_ARG4_TYPE) buffer[i]);
4065 if (errno)
4066 return errno;
4067 }
4068
4069 return 0;
4070 }
4071
4072 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4073 static int linux_supports_tracefork_flag;
4074
4075 static void
4076 linux_enable_event_reporting (int pid)
4077 {
4078 if (!linux_supports_tracefork_flag)
4079 return;
4080
4081 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4082 }
4083
4084 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4085
4086 static int
4087 linux_tracefork_grandchild (void *arg)
4088 {
4089 _exit (0);
4090 }
4091
4092 #define STACK_SIZE 4096
4093
4094 static int
4095 linux_tracefork_child (void *arg)
4096 {
4097 ptrace (PTRACE_TRACEME, 0, 0, 0);
4098 kill (getpid (), SIGSTOP);
4099
4100 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4101
4102 if (fork () == 0)
4103 linux_tracefork_grandchild (NULL);
4104
4105 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4106
4107 #ifdef __ia64__
4108 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4109 CLONE_VM | SIGCHLD, NULL);
4110 #else
4111 clone (linux_tracefork_grandchild, arg + STACK_SIZE,
4112 CLONE_VM | SIGCHLD, NULL);
4113 #endif
4114
4115 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4116
4117 _exit (0);
4118 }
4119
4120 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4121 sure that we can enable the option, and that it had the desired
4122 effect. */
4123
4124 static void
4125 linux_test_for_tracefork (void)
4126 {
4127 int child_pid, ret, status;
4128 long second_pid;
4129 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4130 char *stack = xmalloc (STACK_SIZE * 4);
4131 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4132
4133 linux_supports_tracefork_flag = 0;
4134
4135 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4136
4137 child_pid = fork ();
4138 if (child_pid == 0)
4139 linux_tracefork_child (NULL);
4140
4141 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4142
4143 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4144 #ifdef __ia64__
4145 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4146 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4147 #else /* !__ia64__ */
4148 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4149 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4150 #endif /* !__ia64__ */
4151
4152 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4153
4154 if (child_pid == -1)
4155 perror_with_name ("clone");
4156
4157 ret = my_waitpid (child_pid, &status, 0);
4158 if (ret == -1)
4159 perror_with_name ("waitpid");
4160 else if (ret != child_pid)
4161 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4162 if (! WIFSTOPPED (status))
4163 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4164
4165 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4166 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4167 if (ret != 0)
4168 {
4169 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4170 if (ret != 0)
4171 {
4172 warning ("linux_test_for_tracefork: failed to kill child");
4173 return;
4174 }
4175
4176 ret = my_waitpid (child_pid, &status, 0);
4177 if (ret != child_pid)
4178 warning ("linux_test_for_tracefork: failed to wait for killed child");
4179 else if (!WIFSIGNALED (status))
4180 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4181 "killed child", status);
4182
4183 return;
4184 }
4185
4186 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4187 if (ret != 0)
4188 warning ("linux_test_for_tracefork: failed to resume child");
4189
4190 ret = my_waitpid (child_pid, &status, 0);
4191
4192 if (ret == child_pid && WIFSTOPPED (status)
4193 && status >> 16 == PTRACE_EVENT_FORK)
4194 {
4195 second_pid = 0;
4196 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4197 if (ret == 0 && second_pid != 0)
4198 {
4199 int second_status;
4200
4201 linux_supports_tracefork_flag = 1;
4202 my_waitpid (second_pid, &second_status, 0);
4203 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4204 if (ret != 0)
4205 warning ("linux_test_for_tracefork: failed to kill second child");
4206 my_waitpid (second_pid, &status, 0);
4207 }
4208 }
4209 else
4210 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4211 "(%d, status 0x%x)", ret, status);
4212
4213 do
4214 {
4215 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4216 if (ret != 0)
4217 warning ("linux_test_for_tracefork: failed to kill child");
4218 my_waitpid (child_pid, &status, 0);
4219 }
4220 while (WIFSTOPPED (status));
4221
4222 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4223 free (stack);
4224 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4225 }
4226
4227
4228 static void
4229 linux_look_up_symbols (void)
4230 {
4231 #ifdef USE_THREAD_DB
4232 struct process_info *proc = current_process ();
4233
4234 if (proc->private->thread_db != NULL)
4235 return;
4236
4237 /* If the kernel supports tracing forks then it also supports tracing
4238 clones, and then we don't need to use the magic thread event breakpoint
4239 to learn about threads. */
4240 thread_db_init (!linux_supports_tracefork_flag);
4241 #endif
4242 }
4243
4244 static void
4245 linux_request_interrupt (void)
4246 {
4247 extern unsigned long signal_pid;
4248
4249 if (!ptid_equal (cont_thread, null_ptid)
4250 && !ptid_equal (cont_thread, minus_one_ptid))
4251 {
4252 struct lwp_info *lwp;
4253 int lwpid;
4254
4255 lwp = get_thread_lwp (current_inferior);
4256 lwpid = lwpid_of (lwp);
4257 kill_lwp (lwpid, SIGINT);
4258 }
4259 else
4260 kill_lwp (signal_pid, SIGINT);
4261 }
4262
4263 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4264 to debugger memory starting at MYADDR. */
4265
4266 static int
4267 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4268 {
4269 char filename[PATH_MAX];
4270 int fd, n;
4271 int pid = lwpid_of (get_thread_lwp (current_inferior));
4272
4273 snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4274
4275 fd = open (filename, O_RDONLY);
4276 if (fd < 0)
4277 return -1;
4278
4279 if (offset != (CORE_ADDR) 0
4280 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4281 n = -1;
4282 else
4283 n = read (fd, myaddr, len);
4284
4285 close (fd);
4286
4287 return n;
4288 }
4289
4290 /* These breakpoint and watchpoint related wrapper functions simply
4291 pass on the function call if the target has registered a
4292 corresponding function. */
4293
4294 static int
4295 linux_insert_point (char type, CORE_ADDR addr, int len)
4296 {
4297 if (the_low_target.insert_point != NULL)
4298 return the_low_target.insert_point (type, addr, len);
4299 else
4300 /* Unsupported (see target.h). */
4301 return 1;
4302 }
4303
4304 static int
4305 linux_remove_point (char type, CORE_ADDR addr, int len)
4306 {
4307 if (the_low_target.remove_point != NULL)
4308 return the_low_target.remove_point (type, addr, len);
4309 else
4310 /* Unsupported (see target.h). */
4311 return 1;
4312 }
4313
4314 static int
4315 linux_stopped_by_watchpoint (void)
4316 {
4317 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4318
4319 return lwp->stopped_by_watchpoint;
4320 }
4321
4322 static CORE_ADDR
4323 linux_stopped_data_address (void)
4324 {
4325 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4326
4327 return lwp->stopped_data_address;
4328 }
4329
4330 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4331 #if defined(__mcoldfire__)
4332 /* These should really be defined in the kernel's ptrace.h header. */
4333 #define PT_TEXT_ADDR 49*4
4334 #define PT_DATA_ADDR 50*4
4335 #define PT_TEXT_END_ADDR 51*4
4336 #endif
4337
4338 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4339 to tell gdb about. */
4340
4341 static int
4342 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4343 {
4344 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4345 unsigned long text, text_end, data;
4346 int pid = lwpid_of (get_thread_lwp (current_inferior));
4347
4348 errno = 0;
4349
4350 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4351 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4352 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4353
4354 if (errno == 0)
4355 {
4356 /* Both text and data offsets produced at compile-time (and so
4357 used by gdb) are relative to the beginning of the program,
4358 with the data segment immediately following the text segment.
4359 However, the actual runtime layout in memory may put the data
4360 somewhere else, so when we send gdb a data base-address, we
4361 use the real data base address and subtract the compile-time
4362 data base-address from it (which is just the length of the
4363 text segment). BSS immediately follows data in both
4364 cases. */
4365 *text_p = text;
4366 *data_p = data - (text_end - text);
4367
4368 return 1;
4369 }
4370 #endif
4371 return 0;
4372 }
4373 #endif
4374
4375 static int
4376 compare_ints (const void *xa, const void *xb)
4377 {
4378 int a = *(const int *)xa;
4379 int b = *(const int *)xb;
4380
4381 return a - b;
4382 }
4383
4384 static int *
4385 unique (int *b, int *e)
4386 {
4387 int *d = b;
4388 while (++b != e)
4389 if (*d != *b)
4390 *++d = *b;
4391 return ++d;
4392 }
4393
4394 /* Given PID, iterates over all threads in that process.
4395
4396 Information about each thread, in a format suitable for qXfer:osdata:thread
4397 is printed to BUFFER, if it's not NULL. BUFFER is assumed to be already
4398 initialized, and the caller is responsible for finishing and appending '\0'
4399 to it.
4400
4401 The list of cores that threads are running on is assigned to *CORES, if it
4402 is not NULL. If no cores are found, *CORES will be set to NULL. Caller
4403 should free *CORES. */
4404
4405 static void
4406 list_threads (int pid, struct buffer *buffer, char **cores)
4407 {
4408 int count = 0;
4409 int allocated = 10;
4410 int *core_numbers = xmalloc (sizeof (int) * allocated);
4411 char pathname[128];
4412 DIR *dir;
4413 struct dirent *dp;
4414 struct stat statbuf;
4415
4416 sprintf (pathname, "/proc/%d/task", pid);
4417 if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
4418 {
4419 dir = opendir (pathname);
4420 if (!dir)
4421 {
4422 free (core_numbers);
4423 return;
4424 }
4425
4426 while ((dp = readdir (dir)) != NULL)
4427 {
4428 unsigned long lwp = strtoul (dp->d_name, NULL, 10);
4429
4430 if (lwp != 0)
4431 {
4432 unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
4433
4434 if (core != -1)
4435 {
4436 char s[sizeof ("4294967295")];
4437 sprintf (s, "%u", core);
4438
4439 if (count == allocated)
4440 {
4441 allocated *= 2;
4442 core_numbers = realloc (core_numbers,
4443 sizeof (int) * allocated);
4444 }
4445 core_numbers[count++] = core;
4446 if (buffer)
4447 buffer_xml_printf (buffer,
4448 "<item>"
4449 "<column name=\"pid\">%d</column>"
4450 "<column name=\"tid\">%s</column>"
4451 "<column name=\"core\">%s</column>"
4452 "</item>", pid, dp->d_name, s);
4453 }
4454 else
4455 {
4456 if (buffer)
4457 buffer_xml_printf (buffer,
4458 "<item>"
4459 "<column name=\"pid\">%d</column>"
4460 "<column name=\"tid\">%s</column>"
4461 "</item>", pid, dp->d_name);
4462 }
4463 }
4464 }
4465 }
4466
4467 if (cores)
4468 {
4469 *cores = NULL;
4470 if (count > 0)
4471 {
4472 struct buffer buffer2;
4473 int *b;
4474 int *e;
4475 qsort (core_numbers, count, sizeof (int), compare_ints);
4476
4477 /* Remove duplicates. */
4478 b = core_numbers;
4479 e = unique (b, core_numbers + count);
4480
4481 buffer_init (&buffer2);
4482
4483 for (b = core_numbers; b != e; ++b)
4484 {
4485 char number[sizeof ("4294967295")];
4486 sprintf (number, "%u", *b);
4487 buffer_xml_printf (&buffer2, "%s%s",
4488 (b == core_numbers) ? "" : ",", number);
4489 }
4490 buffer_grow_str0 (&buffer2, "");
4491
4492 *cores = buffer_finish (&buffer2);
4493 }
4494 }
4495 free (core_numbers);
4496 }
4497
4498 static void
4499 show_process (int pid, const char *username, struct buffer *buffer)
4500 {
4501 char pathname[128];
4502 FILE *f;
4503 char cmd[MAXPATHLEN + 1];
4504
4505 sprintf (pathname, "/proc/%d/cmdline", pid);
4506
4507 if ((f = fopen (pathname, "r")) != NULL)
4508 {
4509 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
4510 if (len > 0)
4511 {
4512 char *cores = 0;
4513 int i;
4514 for (i = 0; i < len; i++)
4515 if (cmd[i] == '\0')
4516 cmd[i] = ' ';
4517 cmd[len] = '\0';
4518
4519 buffer_xml_printf (buffer,
4520 "<item>"
4521 "<column name=\"pid\">%d</column>"
4522 "<column name=\"user\">%s</column>"
4523 "<column name=\"command\">%s</column>",
4524 pid,
4525 username,
4526 cmd);
4527
4528 /* This only collects core numbers, and does not print threads. */
4529 list_threads (pid, NULL, &cores);
4530
4531 if (cores)
4532 {
4533 buffer_xml_printf (buffer,
4534 "<column name=\"cores\">%s</column>", cores);
4535 free (cores);
4536 }
4537
4538 buffer_xml_printf (buffer, "</item>");
4539 }
4540 fclose (f);
4541 }
4542 }
4543
4544 static int
4545 linux_qxfer_osdata (const char *annex,
4546 unsigned char *readbuf, unsigned const char *writebuf,
4547 CORE_ADDR offset, int len)
4548 {
4549 /* We make the process list snapshot when the object starts to be
4550 read. */
4551 static const char *buf;
4552 static long len_avail = -1;
4553 static struct buffer buffer;
4554 int processes = 0;
4555 int threads = 0;
4556
4557 DIR *dirp;
4558
4559 if (strcmp (annex, "processes") == 0)
4560 processes = 1;
4561 else if (strcmp (annex, "threads") == 0)
4562 threads = 1;
4563 else
4564 return 0;
4565
4566 if (!readbuf || writebuf)
4567 return 0;
4568
4569 if (offset == 0)
4570 {
4571 if (len_avail != -1 && len_avail != 0)
4572 buffer_free (&buffer);
4573 len_avail = 0;
4574 buf = NULL;
4575 buffer_init (&buffer);
4576 if (processes)
4577 buffer_grow_str (&buffer, "<osdata type=\"processes\">");
4578 else if (threads)
4579 buffer_grow_str (&buffer, "<osdata type=\"threads\">");
4580
4581 dirp = opendir ("/proc");
4582 if (dirp)
4583 {
4584 struct dirent *dp;
4585 while ((dp = readdir (dirp)) != NULL)
4586 {
4587 struct stat statbuf;
4588 char procentry[sizeof ("/proc/4294967295")];
4589
4590 if (!isdigit (dp->d_name[0])
4591 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
4592 continue;
4593
4594 sprintf (procentry, "/proc/%s", dp->d_name);
4595 if (stat (procentry, &statbuf) == 0
4596 && S_ISDIR (statbuf.st_mode))
4597 {
4598 int pid = (int) strtoul (dp->d_name, NULL, 10);
4599
4600 if (processes)
4601 {
4602 struct passwd *entry = getpwuid (statbuf.st_uid);
4603 show_process (pid, entry ? entry->pw_name : "?", &buffer);
4604 }
4605 else if (threads)
4606 {
4607 list_threads (pid, &buffer, NULL);
4608 }
4609 }
4610 }
4611
4612 closedir (dirp);
4613 }
4614 buffer_grow_str0 (&buffer, "</osdata>\n");
4615 buf = buffer_finish (&buffer);
4616 len_avail = strlen (buf);
4617 }
4618
4619 if (offset >= len_avail)
4620 {
4621 /* Done. Get rid of the data. */
4622 buffer_free (&buffer);
4623 buf = NULL;
4624 len_avail = 0;
4625 return 0;
4626 }
4627
4628 if (len > len_avail - offset)
4629 len = len_avail - offset;
4630 memcpy (readbuf, buf + offset, len);
4631
4632 return len;
4633 }
4634
4635 /* Convert a native/host siginfo object, into/from the siginfo in the
4636 layout of the inferiors' architecture. */
4637
4638 static void
4639 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
4640 {
4641 int done = 0;
4642
4643 if (the_low_target.siginfo_fixup != NULL)
4644 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4645
4646 /* If there was no callback, or the callback didn't do anything,
4647 then just do a straight memcpy. */
4648 if (!done)
4649 {
4650 if (direction == 1)
4651 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4652 else
4653 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4654 }
4655 }
4656
4657 static int
4658 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4659 unsigned const char *writebuf, CORE_ADDR offset, int len)
4660 {
4661 int pid;
4662 struct siginfo siginfo;
4663 char inf_siginfo[sizeof (struct siginfo)];
4664
4665 if (current_inferior == NULL)
4666 return -1;
4667
4668 pid = lwpid_of (get_thread_lwp (current_inferior));
4669
4670 if (debug_threads)
4671 fprintf (stderr, "%s siginfo for lwp %d.\n",
4672 readbuf != NULL ? "Reading" : "Writing",
4673 pid);
4674
4675 if (offset > sizeof (siginfo))
4676 return -1;
4677
4678 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4679 return -1;
4680
4681 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4682 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4683 inferior with a 64-bit GDBSERVER should look the same as debugging it
4684 with a 32-bit GDBSERVER, we need to convert it. */
4685 siginfo_fixup (&siginfo, inf_siginfo, 0);
4686
4687 if (offset + len > sizeof (siginfo))
4688 len = sizeof (siginfo) - offset;
4689
4690 if (readbuf != NULL)
4691 memcpy (readbuf, inf_siginfo + offset, len);
4692 else
4693 {
4694 memcpy (inf_siginfo + offset, writebuf, len);
4695
4696 /* Convert back to ptrace layout before flushing it out. */
4697 siginfo_fixup (&siginfo, inf_siginfo, 1);
4698
4699 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4700 return -1;
4701 }
4702
4703 return len;
4704 }
4705
4706 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4707 so we notice when children change state; as the handler for the
4708 sigsuspend in my_waitpid. */
4709
4710 static void
4711 sigchld_handler (int signo)
4712 {
4713 int old_errno = errno;
4714
4715 if (debug_threads)
4716 {
4717 do
4718 {
4719 /* fprintf is not async-signal-safe, so call write
4720 directly. */
4721 if (write (2, "sigchld_handler\n",
4722 sizeof ("sigchld_handler\n") - 1) < 0)
4723 break; /* just ignore */
4724 } while (0);
4725 }
4726
4727 if (target_is_async_p ())
4728 async_file_mark (); /* trigger a linux_wait */
4729
4730 errno = old_errno;
4731 }
4732
4733 static int
4734 linux_supports_non_stop (void)
4735 {
4736 return 1;
4737 }
4738
4739 static int
4740 linux_async (int enable)
4741 {
4742 int previous = (linux_event_pipe[0] != -1);
4743
4744 if (debug_threads)
4745 fprintf (stderr, "linux_async (%d), previous=%d\n",
4746 enable, previous);
4747
4748 if (previous != enable)
4749 {
4750 sigset_t mask;
4751 sigemptyset (&mask);
4752 sigaddset (&mask, SIGCHLD);
4753
4754 sigprocmask (SIG_BLOCK, &mask, NULL);
4755
4756 if (enable)
4757 {
4758 if (pipe (linux_event_pipe) == -1)
4759 fatal ("creating event pipe failed.");
4760
4761 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4762 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4763
4764 /* Register the event loop handler. */
4765 add_file_handler (linux_event_pipe[0],
4766 handle_target_event, NULL);
4767
4768 /* Always trigger a linux_wait. */
4769 async_file_mark ();
4770 }
4771 else
4772 {
4773 delete_file_handler (linux_event_pipe[0]);
4774
4775 close (linux_event_pipe[0]);
4776 close (linux_event_pipe[1]);
4777 linux_event_pipe[0] = -1;
4778 linux_event_pipe[1] = -1;
4779 }
4780
4781 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4782 }
4783
4784 return previous;
4785 }
4786
4787 static int
4788 linux_start_non_stop (int nonstop)
4789 {
4790 /* Register or unregister from event-loop accordingly. */
4791 linux_async (nonstop);
4792 return 0;
4793 }
4794
4795 static int
4796 linux_supports_multi_process (void)
4797 {
4798 return 1;
4799 }
4800
4801
4802 /* Enumerate spufs IDs for process PID. */
4803 static int
4804 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4805 {
4806 int pos = 0;
4807 int written = 0;
4808 char path[128];
4809 DIR *dir;
4810 struct dirent *entry;
4811
4812 sprintf (path, "/proc/%ld/fd", pid);
4813 dir = opendir (path);
4814 if (!dir)
4815 return -1;
4816
4817 rewinddir (dir);
4818 while ((entry = readdir (dir)) != NULL)
4819 {
4820 struct stat st;
4821 struct statfs stfs;
4822 int fd;
4823
4824 fd = atoi (entry->d_name);
4825 if (!fd)
4826 continue;
4827
4828 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4829 if (stat (path, &st) != 0)
4830 continue;
4831 if (!S_ISDIR (st.st_mode))
4832 continue;
4833
4834 if (statfs (path, &stfs) != 0)
4835 continue;
4836 if (stfs.f_type != SPUFS_MAGIC)
4837 continue;
4838
4839 if (pos >= offset && pos + 4 <= offset + len)
4840 {
4841 *(unsigned int *)(buf + pos - offset) = fd;
4842 written += 4;
4843 }
4844 pos += 4;
4845 }
4846
4847 closedir (dir);
4848 return written;
4849 }
4850
4851 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4852 object type, using the /proc file system. */
4853 static int
4854 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4855 unsigned const char *writebuf,
4856 CORE_ADDR offset, int len)
4857 {
4858 long pid = lwpid_of (get_thread_lwp (current_inferior));
4859 char buf[128];
4860 int fd = 0;
4861 int ret = 0;
4862
4863 if (!writebuf && !readbuf)
4864 return -1;
4865
4866 if (!*annex)
4867 {
4868 if (!readbuf)
4869 return -1;
4870 else
4871 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4872 }
4873
4874 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4875 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4876 if (fd <= 0)
4877 return -1;
4878
4879 if (offset != 0
4880 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4881 {
4882 close (fd);
4883 return 0;
4884 }
4885
4886 if (writebuf)
4887 ret = write (fd, writebuf, (size_t) len);
4888 else
4889 ret = read (fd, readbuf, (size_t) len);
4890
4891 close (fd);
4892 return ret;
4893 }
4894
4895 static int
4896 linux_core_of_thread (ptid_t ptid)
4897 {
4898 char filename[sizeof ("/proc//task//stat")
4899 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4900 + 1];
4901 FILE *f;
4902 char *content = NULL;
4903 char *p;
4904 char *ts = 0;
4905 int content_read = 0;
4906 int i;
4907 int core;
4908
4909 sprintf (filename, "/proc/%d/task/%ld/stat",
4910 ptid_get_pid (ptid), ptid_get_lwp (ptid));
4911 f = fopen (filename, "r");
4912 if (!f)
4913 return -1;
4914
4915 for (;;)
4916 {
4917 int n;
4918 content = realloc (content, content_read + 1024);
4919 n = fread (content + content_read, 1, 1024, f);
4920 content_read += n;
4921 if (n < 1024)
4922 {
4923 content[content_read] = '\0';
4924 break;
4925 }
4926 }
4927
4928 p = strchr (content, '(');
4929
4930 /* Skip ")". */
4931 if (p != NULL)
4932 p = strchr (p, ')');
4933 if (p != NULL)
4934 p++;
4935
4936 /* If the first field after program name has index 0, then core number is
4937 the field with index 36. There's no constant for that anywhere. */
4938 if (p != NULL)
4939 p = strtok_r (p, " ", &ts);
4940 for (i = 0; p != NULL && i != 36; ++i)
4941 p = strtok_r (NULL, " ", &ts);
4942
4943 if (p == NULL || sscanf (p, "%d", &core) == 0)
4944 core = -1;
4945
4946 free (content);
4947 fclose (f);
4948
4949 return core;
4950 }
4951
4952 static void
4953 linux_process_qsupported (const char *query)
4954 {
4955 if (the_low_target.process_qsupported != NULL)
4956 the_low_target.process_qsupported (query);
4957 }
4958
4959 static int
4960 linux_supports_tracepoints (void)
4961 {
4962 if (*the_low_target.supports_tracepoints == NULL)
4963 return 0;
4964
4965 return (*the_low_target.supports_tracepoints) ();
4966 }
4967
4968 static CORE_ADDR
4969 linux_read_pc (struct regcache *regcache)
4970 {
4971 if (the_low_target.get_pc == NULL)
4972 return 0;
4973
4974 return (*the_low_target.get_pc) (regcache);
4975 }
4976
4977 static void
4978 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
4979 {
4980 gdb_assert (the_low_target.set_pc != NULL);
4981
4982 (*the_low_target.set_pc) (regcache, pc);
4983 }
4984
4985 static int
4986 linux_thread_stopped (struct thread_info *thread)
4987 {
4988 return get_thread_lwp (thread)->stopped;
4989 }
4990
4991 /* This exposes stop-all-threads functionality to other modules. */
4992
4993 static void
4994 linux_pause_all (int freeze)
4995 {
4996 stop_all_lwps (freeze, NULL);
4997 }
4998
4999 /* This exposes unstop-all-threads functionality to other gdbserver
5000 modules. */
5001
5002 static void
5003 linux_unpause_all (int unfreeze)
5004 {
5005 unstop_all_lwps (unfreeze, NULL);
5006 }
5007
5008 static int
5009 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5010 CORE_ADDR collector,
5011 CORE_ADDR lockaddr,
5012 ULONGEST orig_size,
5013 CORE_ADDR *jump_entry,
5014 unsigned char *jjump_pad_insn,
5015 ULONGEST *jjump_pad_insn_size,
5016 CORE_ADDR *adjusted_insn_addr,
5017 CORE_ADDR *adjusted_insn_addr_end)
5018 {
5019 return (*the_low_target.install_fast_tracepoint_jump_pad)
5020 (tpoint, tpaddr, collector, lockaddr, orig_size,
5021 jump_entry, jjump_pad_insn, jjump_pad_insn_size,
5022 adjusted_insn_addr, adjusted_insn_addr_end);
5023 }
5024
5025 static struct emit_ops *
5026 linux_emit_ops (void)
5027 {
5028 if (the_low_target.emit_ops != NULL)
5029 return (*the_low_target.emit_ops) ();
5030 else
5031 return NULL;
5032 }
5033
5034 static struct target_ops linux_target_ops = {
5035 linux_create_inferior,
5036 linux_attach,
5037 linux_kill,
5038 linux_detach,
5039 linux_mourn,
5040 linux_join,
5041 linux_thread_alive,
5042 linux_resume,
5043 linux_wait,
5044 linux_fetch_registers,
5045 linux_store_registers,
5046 linux_read_memory,
5047 linux_write_memory,
5048 linux_look_up_symbols,
5049 linux_request_interrupt,
5050 linux_read_auxv,
5051 linux_insert_point,
5052 linux_remove_point,
5053 linux_stopped_by_watchpoint,
5054 linux_stopped_data_address,
5055 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
5056 linux_read_offsets,
5057 #else
5058 NULL,
5059 #endif
5060 #ifdef USE_THREAD_DB
5061 thread_db_get_tls_address,
5062 #else
5063 NULL,
5064 #endif
5065 linux_qxfer_spu,
5066 hostio_last_error_from_errno,
5067 linux_qxfer_osdata,
5068 linux_xfer_siginfo,
5069 linux_supports_non_stop,
5070 linux_async,
5071 linux_start_non_stop,
5072 linux_supports_multi_process,
5073 #ifdef USE_THREAD_DB
5074 thread_db_handle_monitor_command,
5075 #else
5076 NULL,
5077 #endif
5078 linux_core_of_thread,
5079 linux_process_qsupported,
5080 linux_supports_tracepoints,
5081 linux_read_pc,
5082 linux_write_pc,
5083 linux_thread_stopped,
5084 NULL,
5085 linux_pause_all,
5086 linux_unpause_all,
5087 linux_cancel_breakpoints,
5088 linux_stabilize_threads,
5089 linux_install_fast_tracepoint_jump_pad,
5090 linux_emit_ops
5091 };
5092
5093 static void
5094 linux_init_signals ()
5095 {
5096 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5097 to find what the cancel signal actually is. */
5098 #ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does. */
5099 signal (__SIGRTMIN+1, SIG_IGN);
5100 #endif
5101 }
5102
5103 void
5104 initialize_low (void)
5105 {
5106 struct sigaction sigchld_action;
5107 memset (&sigchld_action, 0, sizeof (sigchld_action));
5108 set_target_ops (&linux_target_ops);
5109 set_breakpoint_data (the_low_target.breakpoint,
5110 the_low_target.breakpoint_len);
5111 linux_init_signals ();
5112 linux_test_for_tracefork ();
5113 #ifdef HAVE_LINUX_REGSETS
5114 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5115 ;
5116 disabled_regsets = xmalloc (num_regsets);
5117 #endif
5118
5119 sigchld_action.sa_handler = sigchld_handler;
5120 sigemptyset (&sigchld_action.sa_mask);
5121 sigchld_action.sa_flags = SA_RESTART;
5122 sigaction (SIGCHLD, &sigchld_action, NULL);
5123 }
This page took 0.240859 seconds and 4 git commands to generate.