Stop threads when attaching to a PID that is the tgid.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include "linux-low.h"
22 #include "linux-osdata.h"
23
24 #include <sys/wait.h>
25 #include <stdio.h>
26 #include <sys/param.h>
27 #include <sys/ptrace.h>
28 #include "linux-ptrace.h"
29 #include "linux-procfs.h"
30 #include <signal.h>
31 #include <sys/ioctl.h>
32 #include <fcntl.h>
33 #include <string.h>
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #ifndef ELFMAG0
47 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
51 #include <elf.h>
52 #endif
53
54 #ifndef SPUFS_MAGIC
55 #define SPUFS_MAGIC 0x23c9b64e
56 #endif
57
58 #ifndef O_LARGEFILE
59 #define O_LARGEFILE 0
60 #endif
61
62 #ifndef W_STOPCODE
63 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
64 #endif
65
66 /* This is the kernel's hard limit. Not to be confused with
67 SIGRTMIN. */
68 #ifndef __SIGRTMIN
69 #define __SIGRTMIN 32
70 #endif
71
72 #ifdef __UCLIBC__
73 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
74 #define HAS_NOMMU
75 #endif
76 #endif
77
78 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
79 representation of the thread ID.
80
81 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
82 the same as the LWP ID.
83
84 ``all_processes'' is keyed by the "overall process ID", which
85 GNU/Linux calls tgid, "thread group ID". */
86
87 struct inferior_list all_lwps;
88
89 /* A list of all unknown processes which receive stop signals. Some other
90 process will presumably claim each of these as forked children
91 momentarily. */
92
93 struct inferior_list stopped_pids;
94
95 /* FIXME this is a bit of a hack, and could be removed. */
96 int stopping_threads;
97
98 /* FIXME make into a target method? */
99 int using_threads = 1;
100
101 /* True if we're presently stabilizing threads (moving them out of
102 jump pads). */
103 static int stabilizing_threads;
104
105 /* This flag is true iff we've just created or attached to our first
106 inferior but it has not stopped yet. As soon as it does, we need
107 to call the low target's arch_setup callback. Doing this only on
108 the first inferior avoids reinializing the architecture on every
109 inferior, and avoids messing with the register caches of the
110 already running inferiors. NOTE: this assumes all inferiors under
111 control of gdbserver have the same architecture. */
112 static int new_inferior;
113
114 static void linux_resume_one_lwp (struct lwp_info *lwp,
115 int step, int signal, siginfo_t *info);
116 static void linux_resume (struct thread_resume *resume_info, size_t n);
117 static void stop_all_lwps (int suspend, struct lwp_info *except);
118 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
119 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
120 static void *add_lwp (ptid_t ptid);
121 static int linux_stopped_by_watchpoint (void);
122 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
123 static void proceed_all_lwps (void);
124 static int finish_step_over (struct lwp_info *lwp);
125 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
126 static int kill_lwp (unsigned long lwpid, int signo);
127 static void linux_enable_event_reporting (int pid);
128
129 /* True if the low target can hardware single-step. Such targets
130 don't need a BREAKPOINT_REINSERT_ADDR callback. */
131
132 static int
133 can_hardware_single_step (void)
134 {
135 return (the_low_target.breakpoint_reinsert_addr == NULL);
136 }
137
138 /* True if the low target supports memory breakpoints. If so, we'll
139 have a GET_PC implementation. */
140
141 static int
142 supports_breakpoints (void)
143 {
144 return (the_low_target.get_pc != NULL);
145 }
146
147 /* Returns true if this target can support fast tracepoints. This
148 does not mean that the in-process agent has been loaded in the
149 inferior. */
150
151 static int
152 supports_fast_tracepoints (void)
153 {
154 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
155 }
156
157 struct pending_signals
158 {
159 int signal;
160 siginfo_t info;
161 struct pending_signals *prev;
162 };
163
164 #define PTRACE_ARG3_TYPE void *
165 #define PTRACE_ARG4_TYPE void *
166 #define PTRACE_XFER_TYPE long
167
168 #ifdef HAVE_LINUX_REGSETS
169 static char *disabled_regsets;
170 static int num_regsets;
171 #endif
172
173 /* The read/write ends of the pipe registered as waitable file in the
174 event loop. */
175 static int linux_event_pipe[2] = { -1, -1 };
176
177 /* True if we're currently in async mode. */
178 #define target_is_async_p() (linux_event_pipe[0] != -1)
179
180 static void send_sigstop (struct lwp_info *lwp);
181 static void wait_for_sigstop (struct inferior_list_entry *entry);
182
183 /* Accepts an integer PID; Returns a string representing a file that
184 can be opened to get info for the child process.
185 Space for the result is malloc'd, caller must free. */
186
187 char *
188 linux_child_pid_to_exec_file (int pid)
189 {
190 char *name1, *name2;
191
192 name1 = xmalloc (MAXPATHLEN);
193 name2 = xmalloc (MAXPATHLEN);
194 memset (name2, 0, MAXPATHLEN);
195
196 sprintf (name1, "/proc/%d/exe", pid);
197 if (readlink (name1, name2, MAXPATHLEN) > 0)
198 {
199 free (name1);
200 return name2;
201 }
202 else
203 {
204 free (name2);
205 return name1;
206 }
207 }
208
209 /* Return non-zero if HEADER is a 64-bit ELF file. */
210
211 static int
212 elf_64_header_p (const Elf64_Ehdr *header)
213 {
214 return (header->e_ident[EI_MAG0] == ELFMAG0
215 && header->e_ident[EI_MAG1] == ELFMAG1
216 && header->e_ident[EI_MAG2] == ELFMAG2
217 && header->e_ident[EI_MAG3] == ELFMAG3
218 && header->e_ident[EI_CLASS] == ELFCLASS64);
219 }
220
221 /* Return non-zero if FILE is a 64-bit ELF file,
222 zero if the file is not a 64-bit ELF file,
223 and -1 if the file is not accessible or doesn't exist. */
224
225 int
226 elf_64_file_p (const char *file)
227 {
228 Elf64_Ehdr header;
229 int fd;
230
231 fd = open (file, O_RDONLY);
232 if (fd < 0)
233 return -1;
234
235 if (read (fd, &header, sizeof (header)) != sizeof (header))
236 {
237 close (fd);
238 return 0;
239 }
240 close (fd);
241
242 return elf_64_header_p (&header);
243 }
244
245 static void
246 delete_lwp (struct lwp_info *lwp)
247 {
248 remove_thread (get_lwp_thread (lwp));
249 remove_inferior (&all_lwps, &lwp->head);
250 free (lwp->arch_private);
251 free (lwp);
252 }
253
254 /* Add a process to the common process list, and set its private
255 data. */
256
257 static struct process_info *
258 linux_add_process (int pid, int attached)
259 {
260 struct process_info *proc;
261
262 /* Is this the first process? If so, then set the arch. */
263 if (all_processes.head == NULL)
264 new_inferior = 1;
265
266 proc = add_process (pid, attached);
267 proc->private = xcalloc (1, sizeof (*proc->private));
268
269 if (the_low_target.new_process != NULL)
270 proc->private->arch_private = the_low_target.new_process ();
271
272 return proc;
273 }
274
275 /* Wrapper function for waitpid which handles EINTR, and emulates
276 __WALL for systems where that is not available. */
277
278 static int
279 my_waitpid (int pid, int *status, int flags)
280 {
281 int ret, out_errno;
282
283 if (debug_threads)
284 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
285
286 if (flags & __WALL)
287 {
288 sigset_t block_mask, org_mask, wake_mask;
289 int wnohang;
290
291 wnohang = (flags & WNOHANG) != 0;
292 flags &= ~(__WALL | __WCLONE);
293 flags |= WNOHANG;
294
295 /* Block all signals while here. This avoids knowing about
296 LinuxThread's signals. */
297 sigfillset (&block_mask);
298 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
299
300 /* ... except during the sigsuspend below. */
301 sigemptyset (&wake_mask);
302
303 while (1)
304 {
305 /* Since all signals are blocked, there's no need to check
306 for EINTR here. */
307 ret = waitpid (pid, status, flags);
308 out_errno = errno;
309
310 if (ret == -1 && out_errno != ECHILD)
311 break;
312 else if (ret > 0)
313 break;
314
315 if (flags & __WCLONE)
316 {
317 /* We've tried both flavors now. If WNOHANG is set,
318 there's nothing else to do, just bail out. */
319 if (wnohang)
320 break;
321
322 if (debug_threads)
323 fprintf (stderr, "blocking\n");
324
325 /* Block waiting for signals. */
326 sigsuspend (&wake_mask);
327 }
328
329 flags ^= __WCLONE;
330 }
331
332 sigprocmask (SIG_SETMASK, &org_mask, NULL);
333 }
334 else
335 {
336 do
337 ret = waitpid (pid, status, flags);
338 while (ret == -1 && errno == EINTR);
339 out_errno = errno;
340 }
341
342 if (debug_threads)
343 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
344 pid, flags, status ? *status : -1, ret);
345
346 errno = out_errno;
347 return ret;
348 }
349
350 /* Handle a GNU/Linux extended wait response. If we see a clone
351 event, we need to add the new LWP to our list (and not report the
352 trap to higher layers). */
353
354 static void
355 handle_extended_wait (struct lwp_info *event_child, int wstat)
356 {
357 int event = wstat >> 16;
358 struct lwp_info *new_lwp;
359
360 if (event == PTRACE_EVENT_CLONE)
361 {
362 ptid_t ptid;
363 unsigned long new_pid;
364 int ret, status = W_STOPCODE (SIGSTOP);
365
366 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
367
368 /* If we haven't already seen the new PID stop, wait for it now. */
369 if (! pull_pid_from_list (&stopped_pids, new_pid))
370 {
371 /* The new child has a pending SIGSTOP. We can't affect it until it
372 hits the SIGSTOP, but we're already attached. */
373
374 ret = my_waitpid (new_pid, &status, __WALL);
375
376 if (ret == -1)
377 perror_with_name ("waiting for new child");
378 else if (ret != new_pid)
379 warning ("wait returned unexpected PID %d", ret);
380 else if (!WIFSTOPPED (status))
381 warning ("wait returned unexpected status 0x%x", status);
382 }
383
384 linux_enable_event_reporting (new_pid);
385
386 ptid = ptid_build (pid_of (event_child), new_pid, 0);
387 new_lwp = (struct lwp_info *) add_lwp (ptid);
388 add_thread (ptid, new_lwp);
389
390 /* Either we're going to immediately resume the new thread
391 or leave it stopped. linux_resume_one_lwp is a nop if it
392 thinks the thread is currently running, so set this first
393 before calling linux_resume_one_lwp. */
394 new_lwp->stopped = 1;
395
396 /* Normally we will get the pending SIGSTOP. But in some cases
397 we might get another signal delivered to the group first.
398 If we do get another signal, be sure not to lose it. */
399 if (WSTOPSIG (status) == SIGSTOP)
400 {
401 if (stopping_threads)
402 new_lwp->stop_pc = get_stop_pc (new_lwp);
403 else
404 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
405 }
406 else
407 {
408 new_lwp->stop_expected = 1;
409
410 if (stopping_threads)
411 {
412 new_lwp->stop_pc = get_stop_pc (new_lwp);
413 new_lwp->status_pending_p = 1;
414 new_lwp->status_pending = status;
415 }
416 else
417 /* Pass the signal on. This is what GDB does - except
418 shouldn't we really report it instead? */
419 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
420 }
421
422 /* Always resume the current thread. If we are stopping
423 threads, it will have a pending SIGSTOP; we may as well
424 collect it now. */
425 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
426 }
427 }
428
429 /* Return the PC as read from the regcache of LWP, without any
430 adjustment. */
431
432 static CORE_ADDR
433 get_pc (struct lwp_info *lwp)
434 {
435 struct thread_info *saved_inferior;
436 struct regcache *regcache;
437 CORE_ADDR pc;
438
439 if (the_low_target.get_pc == NULL)
440 return 0;
441
442 saved_inferior = current_inferior;
443 current_inferior = get_lwp_thread (lwp);
444
445 regcache = get_thread_regcache (current_inferior, 1);
446 pc = (*the_low_target.get_pc) (regcache);
447
448 if (debug_threads)
449 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
450
451 current_inferior = saved_inferior;
452 return pc;
453 }
454
455 /* This function should only be called if LWP got a SIGTRAP.
456 The SIGTRAP could mean several things.
457
458 On i386, where decr_pc_after_break is non-zero:
459 If we were single-stepping this process using PTRACE_SINGLESTEP,
460 we will get only the one SIGTRAP (even if the instruction we
461 stepped over was a breakpoint). The value of $eip will be the
462 next instruction.
463 If we continue the process using PTRACE_CONT, we will get a
464 SIGTRAP when we hit a breakpoint. The value of $eip will be
465 the instruction after the breakpoint (i.e. needs to be
466 decremented). If we report the SIGTRAP to GDB, we must also
467 report the undecremented PC. If we cancel the SIGTRAP, we
468 must resume at the decremented PC.
469
470 (Presumably, not yet tested) On a non-decr_pc_after_break machine
471 with hardware or kernel single-step:
472 If we single-step over a breakpoint instruction, our PC will
473 point at the following instruction. If we continue and hit a
474 breakpoint instruction, our PC will point at the breakpoint
475 instruction. */
476
477 static CORE_ADDR
478 get_stop_pc (struct lwp_info *lwp)
479 {
480 CORE_ADDR stop_pc;
481
482 if (the_low_target.get_pc == NULL)
483 return 0;
484
485 stop_pc = get_pc (lwp);
486
487 if (WSTOPSIG (lwp->last_status) == SIGTRAP
488 && !lwp->stepping
489 && !lwp->stopped_by_watchpoint
490 && lwp->last_status >> 16 == 0)
491 stop_pc -= the_low_target.decr_pc_after_break;
492
493 if (debug_threads)
494 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
495
496 return stop_pc;
497 }
498
499 static void *
500 add_lwp (ptid_t ptid)
501 {
502 struct lwp_info *lwp;
503
504 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
505 memset (lwp, 0, sizeof (*lwp));
506
507 lwp->head.id = ptid;
508
509 if (the_low_target.new_thread != NULL)
510 lwp->arch_private = the_low_target.new_thread ();
511
512 add_inferior_to_list (&all_lwps, &lwp->head);
513
514 return lwp;
515 }
516
517 /* Start an inferior process and returns its pid.
518 ALLARGS is a vector of program-name and args. */
519
520 static int
521 linux_create_inferior (char *program, char **allargs)
522 {
523 struct lwp_info *new_lwp;
524 int pid;
525 ptid_t ptid;
526
527 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
528 pid = vfork ();
529 #else
530 pid = fork ();
531 #endif
532 if (pid < 0)
533 perror_with_name ("fork");
534
535 if (pid == 0)
536 {
537 ptrace (PTRACE_TRACEME, 0, 0, 0);
538
539 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
540 signal (__SIGRTMIN + 1, SIG_DFL);
541 #endif
542
543 setpgid (0, 0);
544
545 execv (program, allargs);
546 if (errno == ENOENT)
547 execvp (program, allargs);
548
549 fprintf (stderr, "Cannot exec %s: %s.\n", program,
550 strerror (errno));
551 fflush (stderr);
552 _exit (0177);
553 }
554
555 linux_add_process (pid, 0);
556
557 ptid = ptid_build (pid, pid, 0);
558 new_lwp = add_lwp (ptid);
559 add_thread (ptid, new_lwp);
560 new_lwp->must_set_ptrace_flags = 1;
561
562 return pid;
563 }
564
565 /* Attach to an inferior process. */
566
567 static void
568 linux_attach_lwp_1 (unsigned long lwpid, int initial)
569 {
570 ptid_t ptid;
571 struct lwp_info *new_lwp;
572
573 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
574 {
575 if (!initial)
576 {
577 /* If we fail to attach to an LWP, just warn. */
578 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
579 strerror (errno), errno);
580 fflush (stderr);
581 return;
582 }
583 else
584 /* If we fail to attach to a process, report an error. */
585 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
586 strerror (errno), errno);
587 }
588
589 if (initial)
590 /* If lwp is the tgid, we handle adding existing threads later.
591 Otherwise we just add lwp without bothering about any other
592 threads. */
593 ptid = ptid_build (lwpid, lwpid, 0);
594 else
595 {
596 /* Note that extracting the pid from the current inferior is
597 safe, since we're always called in the context of the same
598 process as this new thread. */
599 int pid = pid_of (get_thread_lwp (current_inferior));
600 ptid = ptid_build (pid, lwpid, 0);
601 }
602
603 new_lwp = (struct lwp_info *) add_lwp (ptid);
604 add_thread (ptid, new_lwp);
605
606 /* We need to wait for SIGSTOP before being able to make the next
607 ptrace call on this LWP. */
608 new_lwp->must_set_ptrace_flags = 1;
609
610 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
611 brings it to a halt.
612
613 There are several cases to consider here:
614
615 1) gdbserver has already attached to the process and is being notified
616 of a new thread that is being created.
617 In this case we should ignore that SIGSTOP and resume the
618 process. This is handled below by setting stop_expected = 1,
619 and the fact that add_thread sets last_resume_kind ==
620 resume_continue.
621
622 2) This is the first thread (the process thread), and we're attaching
623 to it via attach_inferior.
624 In this case we want the process thread to stop.
625 This is handled by having linux_attach set last_resume_kind ==
626 resume_stop after we return.
627
628 If the pid we are attaching to is also the tgid, we attach to and
629 stop all the existing threads. Otherwise, we attach to pid and
630 ignore any other threads in the same group as this pid.
631
632 3) GDB is connecting to gdbserver and is requesting an enumeration of all
633 existing threads.
634 In this case we want the thread to stop.
635 FIXME: This case is currently not properly handled.
636 We should wait for the SIGSTOP but don't. Things work apparently
637 because enough time passes between when we ptrace (ATTACH) and when
638 gdb makes the next ptrace call on the thread.
639
640 On the other hand, if we are currently trying to stop all threads, we
641 should treat the new thread as if we had sent it a SIGSTOP. This works
642 because we are guaranteed that the add_lwp call above added us to the
643 end of the list, and so the new thread has not yet reached
644 wait_for_sigstop (but will). */
645 new_lwp->stop_expected = 1;
646 }
647
648 void
649 linux_attach_lwp (unsigned long lwpid)
650 {
651 linux_attach_lwp_1 (lwpid, 0);
652 }
653
654 /* Attach to PID. If PID is the tgid, attach to it and all
655 of its threads. */
656
657 int
658 linux_attach (unsigned long pid)
659 {
660 /* Attach to PID. We will check for other threads
661 soon. */
662 linux_attach_lwp_1 (pid, 1);
663 linux_add_process (pid, 1);
664
665 if (!non_stop)
666 {
667 struct thread_info *thread;
668
669 /* Don't ignore the initial SIGSTOP if we just attached to this
670 process. It will be collected by wait shortly. */
671 thread = find_thread_ptid (ptid_build (pid, pid, 0));
672 thread->last_resume_kind = resume_stop;
673 }
674
675 if (linux_proc_get_tgid (pid) == pid)
676 {
677 DIR *dir;
678 char pathname[128];
679
680 sprintf (pathname, "/proc/%ld/task", pid);
681
682 dir = opendir (pathname);
683
684 if (!dir)
685 {
686 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
687 fflush (stderr);
688 }
689 else
690 {
691 /* At this point we attached to the tgid. Scan the task for
692 existing threads. */
693 unsigned long lwp;
694 int new_threads_found;
695 int iterations = 0;
696 struct dirent *dp;
697
698 while (iterations < 2)
699 {
700 new_threads_found = 0;
701 /* Add all the other threads. While we go through the
702 threads, new threads may be spawned. Cycle through
703 the list of threads until we have done two iterations without
704 finding new threads. */
705 while ((dp = readdir (dir)) != NULL)
706 {
707 /* Fetch one lwp. */
708 lwp = strtoul (dp->d_name, NULL, 10);
709
710 /* Is this a new thread? */
711 if (lwp
712 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
713 {
714 linux_attach_lwp_1 (lwp, 0);
715 new_threads_found++;
716
717 if (debug_threads)
718 fprintf (stderr, "\
719 Found and attached to new lwp %ld\n", lwp);
720 }
721 }
722
723 if (!new_threads_found)
724 iterations++;
725 else
726 iterations = 0;
727
728 rewinddir (dir);
729 }
730 closedir (dir);
731 }
732 }
733
734 return 0;
735 }
736
737 struct counter
738 {
739 int pid;
740 int count;
741 };
742
743 static int
744 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
745 {
746 struct counter *counter = args;
747
748 if (ptid_get_pid (entry->id) == counter->pid)
749 {
750 if (++counter->count > 1)
751 return 1;
752 }
753
754 return 0;
755 }
756
757 static int
758 last_thread_of_process_p (struct thread_info *thread)
759 {
760 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
761 int pid = ptid_get_pid (ptid);
762 struct counter counter = { pid , 0 };
763
764 return (find_inferior (&all_threads,
765 second_thread_of_pid_p, &counter) == NULL);
766 }
767
768 /* Kill the inferior lwp. */
769
770 static int
771 linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
772 {
773 struct thread_info *thread = (struct thread_info *) entry;
774 struct lwp_info *lwp = get_thread_lwp (thread);
775 int wstat;
776 int pid = * (int *) args;
777
778 if (ptid_get_pid (entry->id) != pid)
779 return 0;
780
781 /* We avoid killing the first thread here, because of a Linux kernel (at
782 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
783 the children get a chance to be reaped, it will remain a zombie
784 forever. */
785
786 if (lwpid_of (lwp) == pid)
787 {
788 if (debug_threads)
789 fprintf (stderr, "lkop: is last of process %s\n",
790 target_pid_to_str (entry->id));
791 return 0;
792 }
793
794 do
795 {
796 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
797
798 /* Make sure it died. The loop is most likely unnecessary. */
799 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
800 } while (pid > 0 && WIFSTOPPED (wstat));
801
802 return 0;
803 }
804
805 static int
806 linux_kill (int pid)
807 {
808 struct process_info *process;
809 struct lwp_info *lwp;
810 int wstat;
811 int lwpid;
812
813 process = find_process_pid (pid);
814 if (process == NULL)
815 return -1;
816
817 /* If we're killing a running inferior, make sure it is stopped
818 first, as PTRACE_KILL will not work otherwise. */
819 stop_all_lwps (0, NULL);
820
821 find_inferior (&all_threads, linux_kill_one_lwp, &pid);
822
823 /* See the comment in linux_kill_one_lwp. We did not kill the first
824 thread in the list, so do so now. */
825 lwp = find_lwp_pid (pid_to_ptid (pid));
826
827 if (debug_threads)
828 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
829 lwpid_of (lwp), pid);
830
831 do
832 {
833 ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
834
835 /* Make sure it died. The loop is most likely unnecessary. */
836 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
837 } while (lwpid > 0 && WIFSTOPPED (wstat));
838
839 the_target->mourn (process);
840
841 /* Since we presently can only stop all lwps of all processes, we
842 need to unstop lwps of other processes. */
843 unstop_all_lwps (0, NULL);
844 return 0;
845 }
846
847 static int
848 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
849 {
850 struct thread_info *thread = (struct thread_info *) entry;
851 struct lwp_info *lwp = get_thread_lwp (thread);
852 int pid = * (int *) args;
853
854 if (ptid_get_pid (entry->id) != pid)
855 return 0;
856
857 /* If this process is stopped but is expecting a SIGSTOP, then make
858 sure we take care of that now. This isn't absolutely guaranteed
859 to collect the SIGSTOP, but is fairly likely to. */
860 if (lwp->stop_expected)
861 {
862 int wstat;
863 /* Clear stop_expected, so that the SIGSTOP will be reported. */
864 lwp->stop_expected = 0;
865 linux_resume_one_lwp (lwp, 0, 0, NULL);
866 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
867 }
868
869 /* Flush any pending changes to the process's registers. */
870 regcache_invalidate_one ((struct inferior_list_entry *)
871 get_lwp_thread (lwp));
872
873 /* Finally, let it resume. */
874 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
875
876 delete_lwp (lwp);
877 return 0;
878 }
879
880 static int
881 linux_detach (int pid)
882 {
883 struct process_info *process;
884
885 process = find_process_pid (pid);
886 if (process == NULL)
887 return -1;
888
889 /* Stop all threads before detaching. First, ptrace requires that
890 the thread is stopped to sucessfully detach. Second, thread_db
891 may need to uninstall thread event breakpoints from memory, which
892 only works with a stopped process anyway. */
893 stop_all_lwps (0, NULL);
894
895 #ifdef USE_THREAD_DB
896 thread_db_detach (process);
897 #endif
898
899 /* Stabilize threads (move out of jump pads). */
900 stabilize_threads ();
901
902 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
903
904 the_target->mourn (process);
905
906 /* Since we presently can only stop all lwps of all processes, we
907 need to unstop lwps of other processes. */
908 unstop_all_lwps (0, NULL);
909 return 0;
910 }
911
912 /* Remove all LWPs that belong to process PROC from the lwp list. */
913
914 static int
915 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
916 {
917 struct lwp_info *lwp = (struct lwp_info *) entry;
918 struct process_info *process = proc;
919
920 if (pid_of (lwp) == pid_of (process))
921 delete_lwp (lwp);
922
923 return 0;
924 }
925
926 static void
927 linux_mourn (struct process_info *process)
928 {
929 struct process_info_private *priv;
930
931 #ifdef USE_THREAD_DB
932 thread_db_mourn (process);
933 #endif
934
935 find_inferior (&all_lwps, delete_lwp_callback, process);
936
937 /* Freeing all private data. */
938 priv = process->private;
939 free (priv->arch_private);
940 free (priv);
941 process->private = NULL;
942
943 remove_process (process);
944 }
945
946 static void
947 linux_join (int pid)
948 {
949 int status, ret;
950
951 do {
952 ret = my_waitpid (pid, &status, 0);
953 if (WIFEXITED (status) || WIFSIGNALED (status))
954 break;
955 } while (ret != -1 || errno != ECHILD);
956 }
957
958 /* Return nonzero if the given thread is still alive. */
959 static int
960 linux_thread_alive (ptid_t ptid)
961 {
962 struct lwp_info *lwp = find_lwp_pid (ptid);
963
964 /* We assume we always know if a thread exits. If a whole process
965 exited but we still haven't been able to report it to GDB, we'll
966 hold on to the last lwp of the dead process. */
967 if (lwp != NULL)
968 return !lwp->dead;
969 else
970 return 0;
971 }
972
973 /* Return 1 if this lwp has an interesting status pending. */
974 static int
975 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
976 {
977 struct lwp_info *lwp = (struct lwp_info *) entry;
978 ptid_t ptid = * (ptid_t *) arg;
979 struct thread_info *thread;
980
981 /* Check if we're only interested in events from a specific process
982 or its lwps. */
983 if (!ptid_equal (minus_one_ptid, ptid)
984 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
985 return 0;
986
987 thread = get_lwp_thread (lwp);
988
989 /* If we got a `vCont;t', but we haven't reported a stop yet, do
990 report any status pending the LWP may have. */
991 if (thread->last_resume_kind == resume_stop
992 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
993 return 0;
994
995 return lwp->status_pending_p;
996 }
997
998 static int
999 same_lwp (struct inferior_list_entry *entry, void *data)
1000 {
1001 ptid_t ptid = *(ptid_t *) data;
1002 int lwp;
1003
1004 if (ptid_get_lwp (ptid) != 0)
1005 lwp = ptid_get_lwp (ptid);
1006 else
1007 lwp = ptid_get_pid (ptid);
1008
1009 if (ptid_get_lwp (entry->id) == lwp)
1010 return 1;
1011
1012 return 0;
1013 }
1014
1015 struct lwp_info *
1016 find_lwp_pid (ptid_t ptid)
1017 {
1018 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1019 }
1020
1021 static struct lwp_info *
1022 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1023 {
1024 int ret;
1025 int to_wait_for = -1;
1026 struct lwp_info *child = NULL;
1027
1028 if (debug_threads)
1029 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1030
1031 if (ptid_equal (ptid, minus_one_ptid))
1032 to_wait_for = -1; /* any child */
1033 else
1034 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1035
1036 options |= __WALL;
1037
1038 retry:
1039
1040 ret = my_waitpid (to_wait_for, wstatp, options);
1041 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1042 return NULL;
1043 else if (ret == -1)
1044 perror_with_name ("waitpid");
1045
1046 if (debug_threads
1047 && (!WIFSTOPPED (*wstatp)
1048 || (WSTOPSIG (*wstatp) != 32
1049 && WSTOPSIG (*wstatp) != 33)))
1050 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1051
1052 child = find_lwp_pid (pid_to_ptid (ret));
1053
1054 /* If we didn't find a process, one of two things presumably happened:
1055 - A process we started and then detached from has exited. Ignore it.
1056 - A process we are controlling has forked and the new child's stop
1057 was reported to us by the kernel. Save its PID. */
1058 if (child == NULL && WIFSTOPPED (*wstatp))
1059 {
1060 add_pid_to_list (&stopped_pids, ret);
1061 goto retry;
1062 }
1063 else if (child == NULL)
1064 goto retry;
1065
1066 child->stopped = 1;
1067
1068 child->last_status = *wstatp;
1069
1070 /* Architecture-specific setup after inferior is running.
1071 This needs to happen after we have attached to the inferior
1072 and it is stopped for the first time, but before we access
1073 any inferior registers. */
1074 if (new_inferior)
1075 {
1076 the_low_target.arch_setup ();
1077 #ifdef HAVE_LINUX_REGSETS
1078 memset (disabled_regsets, 0, num_regsets);
1079 #endif
1080 new_inferior = 0;
1081 }
1082
1083 /* Fetch the possibly triggered data watchpoint info and store it in
1084 CHILD.
1085
1086 On some archs, like x86, that use debug registers to set
1087 watchpoints, it's possible that the way to know which watched
1088 address trapped, is to check the register that is used to select
1089 which address to watch. Problem is, between setting the
1090 watchpoint and reading back which data address trapped, the user
1091 may change the set of watchpoints, and, as a consequence, GDB
1092 changes the debug registers in the inferior. To avoid reading
1093 back a stale stopped-data-address when that happens, we cache in
1094 LP the fact that a watchpoint trapped, and the corresponding data
1095 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1096 changes the debug registers meanwhile, we have the cached data we
1097 can rely on. */
1098
1099 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1100 {
1101 if (the_low_target.stopped_by_watchpoint == NULL)
1102 {
1103 child->stopped_by_watchpoint = 0;
1104 }
1105 else
1106 {
1107 struct thread_info *saved_inferior;
1108
1109 saved_inferior = current_inferior;
1110 current_inferior = get_lwp_thread (child);
1111
1112 child->stopped_by_watchpoint
1113 = the_low_target.stopped_by_watchpoint ();
1114
1115 if (child->stopped_by_watchpoint)
1116 {
1117 if (the_low_target.stopped_data_address != NULL)
1118 child->stopped_data_address
1119 = the_low_target.stopped_data_address ();
1120 else
1121 child->stopped_data_address = 0;
1122 }
1123
1124 current_inferior = saved_inferior;
1125 }
1126 }
1127
1128 /* Store the STOP_PC, with adjustment applied. This depends on the
1129 architecture being defined already (so that CHILD has a valid
1130 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1131 not). */
1132 if (WIFSTOPPED (*wstatp))
1133 child->stop_pc = get_stop_pc (child);
1134
1135 if (debug_threads
1136 && WIFSTOPPED (*wstatp)
1137 && the_low_target.get_pc != NULL)
1138 {
1139 struct thread_info *saved_inferior = current_inferior;
1140 struct regcache *regcache;
1141 CORE_ADDR pc;
1142
1143 current_inferior = get_lwp_thread (child);
1144 regcache = get_thread_regcache (current_inferior, 1);
1145 pc = (*the_low_target.get_pc) (regcache);
1146 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1147 current_inferior = saved_inferior;
1148 }
1149
1150 return child;
1151 }
1152
1153 /* This function should only be called if the LWP got a SIGTRAP.
1154
1155 Handle any tracepoint steps or hits. Return true if a tracepoint
1156 event was handled, 0 otherwise. */
1157
1158 static int
1159 handle_tracepoints (struct lwp_info *lwp)
1160 {
1161 struct thread_info *tinfo = get_lwp_thread (lwp);
1162 int tpoint_related_event = 0;
1163
1164 /* If this tracepoint hit causes a tracing stop, we'll immediately
1165 uninsert tracepoints. To do this, we temporarily pause all
1166 threads, unpatch away, and then unpause threads. We need to make
1167 sure the unpausing doesn't resume LWP too. */
1168 lwp->suspended++;
1169
1170 /* And we need to be sure that any all-threads-stopping doesn't try
1171 to move threads out of the jump pads, as it could deadlock the
1172 inferior (LWP could be in the jump pad, maybe even holding the
1173 lock.) */
1174
1175 /* Do any necessary step collect actions. */
1176 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1177
1178 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1179
1180 /* See if we just hit a tracepoint and do its main collect
1181 actions. */
1182 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1183
1184 lwp->suspended--;
1185
1186 gdb_assert (lwp->suspended == 0);
1187 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1188
1189 if (tpoint_related_event)
1190 {
1191 if (debug_threads)
1192 fprintf (stderr, "got a tracepoint event\n");
1193 return 1;
1194 }
1195
1196 return 0;
1197 }
1198
1199 /* Convenience wrapper. Returns true if LWP is presently collecting a
1200 fast tracepoint. */
1201
1202 static int
1203 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1204 struct fast_tpoint_collect_status *status)
1205 {
1206 CORE_ADDR thread_area;
1207
1208 if (the_low_target.get_thread_area == NULL)
1209 return 0;
1210
1211 /* Get the thread area address. This is used to recognize which
1212 thread is which when tracing with the in-process agent library.
1213 We don't read anything from the address, and treat it as opaque;
1214 it's the address itself that we assume is unique per-thread. */
1215 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1216 return 0;
1217
1218 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1219 }
1220
1221 /* The reason we resume in the caller, is because we want to be able
1222 to pass lwp->status_pending as WSTAT, and we need to clear
1223 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1224 refuses to resume. */
1225
1226 static int
1227 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1228 {
1229 struct thread_info *saved_inferior;
1230
1231 saved_inferior = current_inferior;
1232 current_inferior = get_lwp_thread (lwp);
1233
1234 if ((wstat == NULL
1235 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1236 && supports_fast_tracepoints ()
1237 && in_process_agent_loaded ())
1238 {
1239 struct fast_tpoint_collect_status status;
1240 int r;
1241
1242 if (debug_threads)
1243 fprintf (stderr, "\
1244 Checking whether LWP %ld needs to move out of the jump pad.\n",
1245 lwpid_of (lwp));
1246
1247 r = linux_fast_tracepoint_collecting (lwp, &status);
1248
1249 if (wstat == NULL
1250 || (WSTOPSIG (*wstat) != SIGILL
1251 && WSTOPSIG (*wstat) != SIGFPE
1252 && WSTOPSIG (*wstat) != SIGSEGV
1253 && WSTOPSIG (*wstat) != SIGBUS))
1254 {
1255 lwp->collecting_fast_tracepoint = r;
1256
1257 if (r != 0)
1258 {
1259 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1260 {
1261 /* Haven't executed the original instruction yet.
1262 Set breakpoint there, and wait till it's hit,
1263 then single-step until exiting the jump pad. */
1264 lwp->exit_jump_pad_bkpt
1265 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1266 }
1267
1268 if (debug_threads)
1269 fprintf (stderr, "\
1270 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1271 lwpid_of (lwp));
1272 current_inferior = saved_inferior;
1273
1274 return 1;
1275 }
1276 }
1277 else
1278 {
1279 /* If we get a synchronous signal while collecting, *and*
1280 while executing the (relocated) original instruction,
1281 reset the PC to point at the tpoint address, before
1282 reporting to GDB. Otherwise, it's an IPA lib bug: just
1283 report the signal to GDB, and pray for the best. */
1284
1285 lwp->collecting_fast_tracepoint = 0;
1286
1287 if (r != 0
1288 && (status.adjusted_insn_addr <= lwp->stop_pc
1289 && lwp->stop_pc < status.adjusted_insn_addr_end))
1290 {
1291 siginfo_t info;
1292 struct regcache *regcache;
1293
1294 /* The si_addr on a few signals references the address
1295 of the faulting instruction. Adjust that as
1296 well. */
1297 if ((WSTOPSIG (*wstat) == SIGILL
1298 || WSTOPSIG (*wstat) == SIGFPE
1299 || WSTOPSIG (*wstat) == SIGBUS
1300 || WSTOPSIG (*wstat) == SIGSEGV)
1301 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1302 /* Final check just to make sure we don't clobber
1303 the siginfo of non-kernel-sent signals. */
1304 && (uintptr_t) info.si_addr == lwp->stop_pc)
1305 {
1306 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1307 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1308 }
1309
1310 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1311 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1312 lwp->stop_pc = status.tpoint_addr;
1313
1314 /* Cancel any fast tracepoint lock this thread was
1315 holding. */
1316 force_unlock_trace_buffer ();
1317 }
1318
1319 if (lwp->exit_jump_pad_bkpt != NULL)
1320 {
1321 if (debug_threads)
1322 fprintf (stderr,
1323 "Cancelling fast exit-jump-pad: removing bkpt. "
1324 "stopping all threads momentarily.\n");
1325
1326 stop_all_lwps (1, lwp);
1327 cancel_breakpoints ();
1328
1329 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1330 lwp->exit_jump_pad_bkpt = NULL;
1331
1332 unstop_all_lwps (1, lwp);
1333
1334 gdb_assert (lwp->suspended >= 0);
1335 }
1336 }
1337 }
1338
1339 if (debug_threads)
1340 fprintf (stderr, "\
1341 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1342 lwpid_of (lwp));
1343
1344 current_inferior = saved_inferior;
1345 return 0;
1346 }
1347
1348 /* Enqueue one signal in the "signals to report later when out of the
1349 jump pad" list. */
1350
1351 static void
1352 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1353 {
1354 struct pending_signals *p_sig;
1355
1356 if (debug_threads)
1357 fprintf (stderr, "\
1358 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1359
1360 if (debug_threads)
1361 {
1362 struct pending_signals *sig;
1363
1364 for (sig = lwp->pending_signals_to_report;
1365 sig != NULL;
1366 sig = sig->prev)
1367 fprintf (stderr,
1368 " Already queued %d\n",
1369 sig->signal);
1370
1371 fprintf (stderr, " (no more currently queued signals)\n");
1372 }
1373
1374 /* Don't enqueue non-RT signals if they are already in the deferred
1375 queue. (SIGSTOP being the easiest signal to see ending up here
1376 twice) */
1377 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1378 {
1379 struct pending_signals *sig;
1380
1381 for (sig = lwp->pending_signals_to_report;
1382 sig != NULL;
1383 sig = sig->prev)
1384 {
1385 if (sig->signal == WSTOPSIG (*wstat))
1386 {
1387 if (debug_threads)
1388 fprintf (stderr,
1389 "Not requeuing already queued non-RT signal %d"
1390 " for LWP %ld\n",
1391 sig->signal,
1392 lwpid_of (lwp));
1393 return;
1394 }
1395 }
1396 }
1397
1398 p_sig = xmalloc (sizeof (*p_sig));
1399 p_sig->prev = lwp->pending_signals_to_report;
1400 p_sig->signal = WSTOPSIG (*wstat);
1401 memset (&p_sig->info, 0, sizeof (siginfo_t));
1402 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1403
1404 lwp->pending_signals_to_report = p_sig;
1405 }
1406
1407 /* Dequeue one signal from the "signals to report later when out of
1408 the jump pad" list. */
1409
1410 static int
1411 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1412 {
1413 if (lwp->pending_signals_to_report != NULL)
1414 {
1415 struct pending_signals **p_sig;
1416
1417 p_sig = &lwp->pending_signals_to_report;
1418 while ((*p_sig)->prev != NULL)
1419 p_sig = &(*p_sig)->prev;
1420
1421 *wstat = W_STOPCODE ((*p_sig)->signal);
1422 if ((*p_sig)->info.si_signo != 0)
1423 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1424 free (*p_sig);
1425 *p_sig = NULL;
1426
1427 if (debug_threads)
1428 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1429 WSTOPSIG (*wstat), lwpid_of (lwp));
1430
1431 if (debug_threads)
1432 {
1433 struct pending_signals *sig;
1434
1435 for (sig = lwp->pending_signals_to_report;
1436 sig != NULL;
1437 sig = sig->prev)
1438 fprintf (stderr,
1439 " Still queued %d\n",
1440 sig->signal);
1441
1442 fprintf (stderr, " (no more queued signals)\n");
1443 }
1444
1445 return 1;
1446 }
1447
1448 return 0;
1449 }
1450
1451 /* Arrange for a breakpoint to be hit again later. We don't keep the
1452 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1453 will handle the current event, eventually we will resume this LWP,
1454 and this breakpoint will trap again. */
1455
1456 static int
1457 cancel_breakpoint (struct lwp_info *lwp)
1458 {
1459 struct thread_info *saved_inferior;
1460
1461 /* There's nothing to do if we don't support breakpoints. */
1462 if (!supports_breakpoints ())
1463 return 0;
1464
1465 /* breakpoint_at reads from current inferior. */
1466 saved_inferior = current_inferior;
1467 current_inferior = get_lwp_thread (lwp);
1468
1469 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1470 {
1471 if (debug_threads)
1472 fprintf (stderr,
1473 "CB: Push back breakpoint for %s\n",
1474 target_pid_to_str (ptid_of (lwp)));
1475
1476 /* Back up the PC if necessary. */
1477 if (the_low_target.decr_pc_after_break)
1478 {
1479 struct regcache *regcache
1480 = get_thread_regcache (current_inferior, 1);
1481 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1482 }
1483
1484 current_inferior = saved_inferior;
1485 return 1;
1486 }
1487 else
1488 {
1489 if (debug_threads)
1490 fprintf (stderr,
1491 "CB: No breakpoint found at %s for [%s]\n",
1492 paddress (lwp->stop_pc),
1493 target_pid_to_str (ptid_of (lwp)));
1494 }
1495
1496 current_inferior = saved_inferior;
1497 return 0;
1498 }
1499
1500 /* When the event-loop is doing a step-over, this points at the thread
1501 being stepped. */
1502 ptid_t step_over_bkpt;
1503
1504 /* Wait for an event from child PID. If PID is -1, wait for any
1505 child. Store the stop status through the status pointer WSTAT.
1506 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1507 event was found and OPTIONS contains WNOHANG. Return the PID of
1508 the stopped child otherwise. */
1509
1510 static int
1511 linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1512 {
1513 struct lwp_info *event_child, *requested_child;
1514
1515 event_child = NULL;
1516 requested_child = NULL;
1517
1518 /* Check for a lwp with a pending status. */
1519
1520 if (ptid_equal (ptid, minus_one_ptid)
1521 || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
1522 {
1523 event_child = (struct lwp_info *)
1524 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1525 if (debug_threads && event_child)
1526 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1527 }
1528 else
1529 {
1530 requested_child = find_lwp_pid (ptid);
1531
1532 if (!stopping_threads
1533 && requested_child->status_pending_p
1534 && requested_child->collecting_fast_tracepoint)
1535 {
1536 enqueue_one_deferred_signal (requested_child,
1537 &requested_child->status_pending);
1538 requested_child->status_pending_p = 0;
1539 requested_child->status_pending = 0;
1540 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1541 }
1542
1543 if (requested_child->suspended
1544 && requested_child->status_pending_p)
1545 fatal ("requesting an event out of a suspended child?");
1546
1547 if (requested_child->status_pending_p)
1548 event_child = requested_child;
1549 }
1550
1551 if (event_child != NULL)
1552 {
1553 if (debug_threads)
1554 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1555 lwpid_of (event_child), event_child->status_pending);
1556 *wstat = event_child->status_pending;
1557 event_child->status_pending_p = 0;
1558 event_child->status_pending = 0;
1559 current_inferior = get_lwp_thread (event_child);
1560 return lwpid_of (event_child);
1561 }
1562
1563 /* We only enter this loop if no process has a pending wait status. Thus
1564 any action taken in response to a wait status inside this loop is
1565 responding as soon as we detect the status, not after any pending
1566 events. */
1567 while (1)
1568 {
1569 event_child = linux_wait_for_lwp (ptid, wstat, options);
1570
1571 if ((options & WNOHANG) && event_child == NULL)
1572 {
1573 if (debug_threads)
1574 fprintf (stderr, "WNOHANG set, no event found\n");
1575 return 0;
1576 }
1577
1578 if (event_child == NULL)
1579 error ("event from unknown child");
1580
1581 current_inferior = get_lwp_thread (event_child);
1582
1583 /* Check for thread exit. */
1584 if (! WIFSTOPPED (*wstat))
1585 {
1586 if (debug_threads)
1587 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1588
1589 /* If the last thread is exiting, just return. */
1590 if (last_thread_of_process_p (current_inferior))
1591 {
1592 if (debug_threads)
1593 fprintf (stderr, "LWP %ld is last lwp of process\n",
1594 lwpid_of (event_child));
1595 return lwpid_of (event_child);
1596 }
1597
1598 if (!non_stop)
1599 {
1600 current_inferior = (struct thread_info *) all_threads.head;
1601 if (debug_threads)
1602 fprintf (stderr, "Current inferior is now %ld\n",
1603 lwpid_of (get_thread_lwp (current_inferior)));
1604 }
1605 else
1606 {
1607 current_inferior = NULL;
1608 if (debug_threads)
1609 fprintf (stderr, "Current inferior is now <NULL>\n");
1610 }
1611
1612 /* If we were waiting for this particular child to do something...
1613 well, it did something. */
1614 if (requested_child != NULL)
1615 {
1616 int lwpid = lwpid_of (event_child);
1617
1618 /* Cancel the step-over operation --- the thread that
1619 started it is gone. */
1620 if (finish_step_over (event_child))
1621 unstop_all_lwps (1, event_child);
1622 delete_lwp (event_child);
1623 return lwpid;
1624 }
1625
1626 delete_lwp (event_child);
1627
1628 /* Wait for a more interesting event. */
1629 continue;
1630 }
1631
1632 if (event_child->must_set_ptrace_flags)
1633 {
1634 linux_enable_event_reporting (lwpid_of (event_child));
1635 event_child->must_set_ptrace_flags = 0;
1636 }
1637
1638 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1639 && *wstat >> 16 != 0)
1640 {
1641 handle_extended_wait (event_child, *wstat);
1642 continue;
1643 }
1644
1645 if (WIFSTOPPED (*wstat)
1646 && WSTOPSIG (*wstat) == SIGSTOP
1647 && event_child->stop_expected)
1648 {
1649 int should_stop;
1650
1651 if (debug_threads)
1652 fprintf (stderr, "Expected stop.\n");
1653 event_child->stop_expected = 0;
1654
1655 should_stop = (current_inferior->last_resume_kind == resume_stop
1656 || stopping_threads);
1657
1658 if (!should_stop)
1659 {
1660 linux_resume_one_lwp (event_child,
1661 event_child->stepping, 0, NULL);
1662 continue;
1663 }
1664 }
1665
1666 return lwpid_of (event_child);
1667 }
1668
1669 /* NOTREACHED */
1670 return 0;
1671 }
1672
1673 static int
1674 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1675 {
1676 ptid_t wait_ptid;
1677
1678 if (ptid_is_pid (ptid))
1679 {
1680 /* A request to wait for a specific tgid. This is not possible
1681 with waitpid, so instead, we wait for any child, and leave
1682 children we're not interested in right now with a pending
1683 status to report later. */
1684 wait_ptid = minus_one_ptid;
1685 }
1686 else
1687 wait_ptid = ptid;
1688
1689 while (1)
1690 {
1691 int event_pid;
1692
1693 event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1694
1695 if (event_pid > 0
1696 && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1697 {
1698 struct lwp_info *event_child
1699 = find_lwp_pid (pid_to_ptid (event_pid));
1700
1701 if (! WIFSTOPPED (*wstat))
1702 mark_lwp_dead (event_child, *wstat);
1703 else
1704 {
1705 event_child->status_pending_p = 1;
1706 event_child->status_pending = *wstat;
1707 }
1708 }
1709 else
1710 return event_pid;
1711 }
1712 }
1713
1714
1715 /* Count the LWP's that have had events. */
1716
1717 static int
1718 count_events_callback (struct inferior_list_entry *entry, void *data)
1719 {
1720 struct lwp_info *lp = (struct lwp_info *) entry;
1721 struct thread_info *thread = get_lwp_thread (lp);
1722 int *count = data;
1723
1724 gdb_assert (count != NULL);
1725
1726 /* Count only resumed LWPs that have a SIGTRAP event pending that
1727 should be reported to GDB. */
1728 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1729 && thread->last_resume_kind != resume_stop
1730 && lp->status_pending_p
1731 && WIFSTOPPED (lp->status_pending)
1732 && WSTOPSIG (lp->status_pending) == SIGTRAP
1733 && !breakpoint_inserted_here (lp->stop_pc))
1734 (*count)++;
1735
1736 return 0;
1737 }
1738
1739 /* Select the LWP (if any) that is currently being single-stepped. */
1740
1741 static int
1742 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1743 {
1744 struct lwp_info *lp = (struct lwp_info *) entry;
1745 struct thread_info *thread = get_lwp_thread (lp);
1746
1747 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1748 && thread->last_resume_kind == resume_step
1749 && lp->status_pending_p)
1750 return 1;
1751 else
1752 return 0;
1753 }
1754
1755 /* Select the Nth LWP that has had a SIGTRAP event that should be
1756 reported to GDB. */
1757
1758 static int
1759 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1760 {
1761 struct lwp_info *lp = (struct lwp_info *) entry;
1762 struct thread_info *thread = get_lwp_thread (lp);
1763 int *selector = data;
1764
1765 gdb_assert (selector != NULL);
1766
1767 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1768 if (thread->last_resume_kind != resume_stop
1769 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1770 && lp->status_pending_p
1771 && WIFSTOPPED (lp->status_pending)
1772 && WSTOPSIG (lp->status_pending) == SIGTRAP
1773 && !breakpoint_inserted_here (lp->stop_pc))
1774 if ((*selector)-- == 0)
1775 return 1;
1776
1777 return 0;
1778 }
1779
1780 static int
1781 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1782 {
1783 struct lwp_info *lp = (struct lwp_info *) entry;
1784 struct thread_info *thread = get_lwp_thread (lp);
1785 struct lwp_info *event_lp = data;
1786
1787 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1788 if (lp == event_lp)
1789 return 0;
1790
1791 /* If a LWP other than the LWP that we're reporting an event for has
1792 hit a GDB breakpoint (as opposed to some random trap signal),
1793 then just arrange for it to hit it again later. We don't keep
1794 the SIGTRAP status and don't forward the SIGTRAP signal to the
1795 LWP. We will handle the current event, eventually we will resume
1796 all LWPs, and this one will get its breakpoint trap again.
1797
1798 If we do not do this, then we run the risk that the user will
1799 delete or disable the breakpoint, but the LWP will have already
1800 tripped on it. */
1801
1802 if (thread->last_resume_kind != resume_stop
1803 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1804 && lp->status_pending_p
1805 && WIFSTOPPED (lp->status_pending)
1806 && WSTOPSIG (lp->status_pending) == SIGTRAP
1807 && !lp->stepping
1808 && !lp->stopped_by_watchpoint
1809 && cancel_breakpoint (lp))
1810 /* Throw away the SIGTRAP. */
1811 lp->status_pending_p = 0;
1812
1813 return 0;
1814 }
1815
1816 static void
1817 linux_cancel_breakpoints (void)
1818 {
1819 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
1820 }
1821
1822 /* Select one LWP out of those that have events pending. */
1823
1824 static void
1825 select_event_lwp (struct lwp_info **orig_lp)
1826 {
1827 int num_events = 0;
1828 int random_selector;
1829 struct lwp_info *event_lp;
1830
1831 /* Give preference to any LWP that is being single-stepped. */
1832 event_lp
1833 = (struct lwp_info *) find_inferior (&all_lwps,
1834 select_singlestep_lwp_callback, NULL);
1835 if (event_lp != NULL)
1836 {
1837 if (debug_threads)
1838 fprintf (stderr,
1839 "SEL: Select single-step %s\n",
1840 target_pid_to_str (ptid_of (event_lp)));
1841 }
1842 else
1843 {
1844 /* No single-stepping LWP. Select one at random, out of those
1845 which have had SIGTRAP events. */
1846
1847 /* First see how many SIGTRAP events we have. */
1848 find_inferior (&all_lwps, count_events_callback, &num_events);
1849
1850 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1851 random_selector = (int)
1852 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1853
1854 if (debug_threads && num_events > 1)
1855 fprintf (stderr,
1856 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1857 num_events, random_selector);
1858
1859 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1860 select_event_lwp_callback,
1861 &random_selector);
1862 }
1863
1864 if (event_lp != NULL)
1865 {
1866 /* Switch the event LWP. */
1867 *orig_lp = event_lp;
1868 }
1869 }
1870
1871 /* Decrement the suspend count of an LWP. */
1872
1873 static int
1874 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
1875 {
1876 struct lwp_info *lwp = (struct lwp_info *) entry;
1877
1878 /* Ignore EXCEPT. */
1879 if (lwp == except)
1880 return 0;
1881
1882 lwp->suspended--;
1883
1884 gdb_assert (lwp->suspended >= 0);
1885 return 0;
1886 }
1887
1888 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
1889 NULL. */
1890
1891 static void
1892 unsuspend_all_lwps (struct lwp_info *except)
1893 {
1894 find_inferior (&all_lwps, unsuspend_one_lwp, except);
1895 }
1896
1897 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
1898 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
1899 void *data);
1900 static int lwp_running (struct inferior_list_entry *entry, void *data);
1901 static ptid_t linux_wait_1 (ptid_t ptid,
1902 struct target_waitstatus *ourstatus,
1903 int target_options);
1904
1905 /* Stabilize threads (move out of jump pads).
1906
1907 If a thread is midway collecting a fast tracepoint, we need to
1908 finish the collection and move it out of the jump pad before
1909 reporting the signal.
1910
1911 This avoids recursion while collecting (when a signal arrives
1912 midway, and the signal handler itself collects), which would trash
1913 the trace buffer. In case the user set a breakpoint in a signal
1914 handler, this avoids the backtrace showing the jump pad, etc..
1915 Most importantly, there are certain things we can't do safely if
1916 threads are stopped in a jump pad (or in its callee's). For
1917 example:
1918
1919 - starting a new trace run. A thread still collecting the
1920 previous run, could trash the trace buffer when resumed. The trace
1921 buffer control structures would have been reset but the thread had
1922 no way to tell. The thread could even midway memcpy'ing to the
1923 buffer, which would mean that when resumed, it would clobber the
1924 trace buffer that had been set for a new run.
1925
1926 - we can't rewrite/reuse the jump pads for new tracepoints
1927 safely. Say you do tstart while a thread is stopped midway while
1928 collecting. When the thread is later resumed, it finishes the
1929 collection, and returns to the jump pad, to execute the original
1930 instruction that was under the tracepoint jump at the time the
1931 older run had been started. If the jump pad had been rewritten
1932 since for something else in the new run, the thread would now
1933 execute the wrong / random instructions. */
1934
1935 static void
1936 linux_stabilize_threads (void)
1937 {
1938 struct thread_info *save_inferior;
1939 struct lwp_info *lwp_stuck;
1940
1941 lwp_stuck
1942 = (struct lwp_info *) find_inferior (&all_lwps,
1943 stuck_in_jump_pad_callback, NULL);
1944 if (lwp_stuck != NULL)
1945 {
1946 if (debug_threads)
1947 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
1948 lwpid_of (lwp_stuck));
1949 return;
1950 }
1951
1952 save_inferior = current_inferior;
1953
1954 stabilizing_threads = 1;
1955
1956 /* Kick 'em all. */
1957 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
1958
1959 /* Loop until all are stopped out of the jump pads. */
1960 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
1961 {
1962 struct target_waitstatus ourstatus;
1963 struct lwp_info *lwp;
1964 int wstat;
1965
1966 /* Note that we go through the full wait even loop. While
1967 moving threads out of jump pad, we need to be able to step
1968 over internal breakpoints and such. */
1969 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
1970
1971 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
1972 {
1973 lwp = get_thread_lwp (current_inferior);
1974
1975 /* Lock it. */
1976 lwp->suspended++;
1977
1978 if (ourstatus.value.sig != TARGET_SIGNAL_0
1979 || current_inferior->last_resume_kind == resume_stop)
1980 {
1981 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
1982 enqueue_one_deferred_signal (lwp, &wstat);
1983 }
1984 }
1985 }
1986
1987 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
1988
1989 stabilizing_threads = 0;
1990
1991 current_inferior = save_inferior;
1992
1993 if (debug_threads)
1994 {
1995 lwp_stuck
1996 = (struct lwp_info *) find_inferior (&all_lwps,
1997 stuck_in_jump_pad_callback, NULL);
1998 if (lwp_stuck != NULL)
1999 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2000 lwpid_of (lwp_stuck));
2001 }
2002 }
2003
2004 /* Wait for process, returns status. */
2005
2006 static ptid_t
2007 linux_wait_1 (ptid_t ptid,
2008 struct target_waitstatus *ourstatus, int target_options)
2009 {
2010 int w;
2011 struct lwp_info *event_child;
2012 int options;
2013 int pid;
2014 int step_over_finished;
2015 int bp_explains_trap;
2016 int maybe_internal_trap;
2017 int report_to_gdb;
2018 int trace_event;
2019
2020 /* Translate generic target options into linux options. */
2021 options = __WALL;
2022 if (target_options & TARGET_WNOHANG)
2023 options |= WNOHANG;
2024
2025 retry:
2026 bp_explains_trap = 0;
2027 trace_event = 0;
2028 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2029
2030 /* If we were only supposed to resume one thread, only wait for
2031 that thread - if it's still alive. If it died, however - which
2032 can happen if we're coming from the thread death case below -
2033 then we need to make sure we restart the other threads. We could
2034 pick a thread at random or restart all; restarting all is less
2035 arbitrary. */
2036 if (!non_stop
2037 && !ptid_equal (cont_thread, null_ptid)
2038 && !ptid_equal (cont_thread, minus_one_ptid))
2039 {
2040 struct thread_info *thread;
2041
2042 thread = (struct thread_info *) find_inferior_id (&all_threads,
2043 cont_thread);
2044
2045 /* No stepping, no signal - unless one is pending already, of course. */
2046 if (thread == NULL)
2047 {
2048 struct thread_resume resume_info;
2049 resume_info.thread = minus_one_ptid;
2050 resume_info.kind = resume_continue;
2051 resume_info.sig = 0;
2052 linux_resume (&resume_info, 1);
2053 }
2054 else
2055 ptid = cont_thread;
2056 }
2057
2058 if (ptid_equal (step_over_bkpt, null_ptid))
2059 pid = linux_wait_for_event (ptid, &w, options);
2060 else
2061 {
2062 if (debug_threads)
2063 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2064 target_pid_to_str (step_over_bkpt));
2065 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2066 }
2067
2068 if (pid == 0) /* only if TARGET_WNOHANG */
2069 return null_ptid;
2070
2071 event_child = get_thread_lwp (current_inferior);
2072
2073 /* If we are waiting for a particular child, and it exited,
2074 linux_wait_for_event will return its exit status. Similarly if
2075 the last child exited. If this is not the last child, however,
2076 do not report it as exited until there is a 'thread exited' response
2077 available in the remote protocol. Instead, just wait for another event.
2078 This should be safe, because if the thread crashed we will already
2079 have reported the termination signal to GDB; that should stop any
2080 in-progress stepping operations, etc.
2081
2082 Report the exit status of the last thread to exit. This matches
2083 LinuxThreads' behavior. */
2084
2085 if (last_thread_of_process_p (current_inferior))
2086 {
2087 if (WIFEXITED (w) || WIFSIGNALED (w))
2088 {
2089 if (WIFEXITED (w))
2090 {
2091 ourstatus->kind = TARGET_WAITKIND_EXITED;
2092 ourstatus->value.integer = WEXITSTATUS (w);
2093
2094 if (debug_threads)
2095 fprintf (stderr,
2096 "\nChild exited with retcode = %x \n",
2097 WEXITSTATUS (w));
2098 }
2099 else
2100 {
2101 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2102 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2103
2104 if (debug_threads)
2105 fprintf (stderr,
2106 "\nChild terminated with signal = %x \n",
2107 WTERMSIG (w));
2108
2109 }
2110
2111 return ptid_of (event_child);
2112 }
2113 }
2114 else
2115 {
2116 if (!WIFSTOPPED (w))
2117 goto retry;
2118 }
2119
2120 /* If this event was not handled before, and is not a SIGTRAP, we
2121 report it. SIGILL and SIGSEGV are also treated as traps in case
2122 a breakpoint is inserted at the current PC. If this target does
2123 not support internal breakpoints at all, we also report the
2124 SIGTRAP without further processing; it's of no concern to us. */
2125 maybe_internal_trap
2126 = (supports_breakpoints ()
2127 && (WSTOPSIG (w) == SIGTRAP
2128 || ((WSTOPSIG (w) == SIGILL
2129 || WSTOPSIG (w) == SIGSEGV)
2130 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2131
2132 if (maybe_internal_trap)
2133 {
2134 /* Handle anything that requires bookkeeping before deciding to
2135 report the event or continue waiting. */
2136
2137 /* First check if we can explain the SIGTRAP with an internal
2138 breakpoint, or if we should possibly report the event to GDB.
2139 Do this before anything that may remove or insert a
2140 breakpoint. */
2141 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2142
2143 /* We have a SIGTRAP, possibly a step-over dance has just
2144 finished. If so, tweak the state machine accordingly,
2145 reinsert breakpoints and delete any reinsert (software
2146 single-step) breakpoints. */
2147 step_over_finished = finish_step_over (event_child);
2148
2149 /* Now invoke the callbacks of any internal breakpoints there. */
2150 check_breakpoints (event_child->stop_pc);
2151
2152 /* Handle tracepoint data collecting. This may overflow the
2153 trace buffer, and cause a tracing stop, removing
2154 breakpoints. */
2155 trace_event = handle_tracepoints (event_child);
2156
2157 if (bp_explains_trap)
2158 {
2159 /* If we stepped or ran into an internal breakpoint, we've
2160 already handled it. So next time we resume (from this
2161 PC), we should step over it. */
2162 if (debug_threads)
2163 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2164
2165 if (breakpoint_here (event_child->stop_pc))
2166 event_child->need_step_over = 1;
2167 }
2168 }
2169 else
2170 {
2171 /* We have some other signal, possibly a step-over dance was in
2172 progress, and it should be cancelled too. */
2173 step_over_finished = finish_step_over (event_child);
2174 }
2175
2176 /* We have all the data we need. Either report the event to GDB, or
2177 resume threads and keep waiting for more. */
2178
2179 /* If we're collecting a fast tracepoint, finish the collection and
2180 move out of the jump pad before delivering a signal. See
2181 linux_stabilize_threads. */
2182
2183 if (WIFSTOPPED (w)
2184 && WSTOPSIG (w) != SIGTRAP
2185 && supports_fast_tracepoints ()
2186 && in_process_agent_loaded ())
2187 {
2188 if (debug_threads)
2189 fprintf (stderr,
2190 "Got signal %d for LWP %ld. Check if we need "
2191 "to defer or adjust it.\n",
2192 WSTOPSIG (w), lwpid_of (event_child));
2193
2194 /* Allow debugging the jump pad itself. */
2195 if (current_inferior->last_resume_kind != resume_step
2196 && maybe_move_out_of_jump_pad (event_child, &w))
2197 {
2198 enqueue_one_deferred_signal (event_child, &w);
2199
2200 if (debug_threads)
2201 fprintf (stderr,
2202 "Signal %d for LWP %ld deferred (in jump pad)\n",
2203 WSTOPSIG (w), lwpid_of (event_child));
2204
2205 linux_resume_one_lwp (event_child, 0, 0, NULL);
2206 goto retry;
2207 }
2208 }
2209
2210 if (event_child->collecting_fast_tracepoint)
2211 {
2212 if (debug_threads)
2213 fprintf (stderr, "\
2214 LWP %ld was trying to move out of the jump pad (%d). \
2215 Check if we're already there.\n",
2216 lwpid_of (event_child),
2217 event_child->collecting_fast_tracepoint);
2218
2219 trace_event = 1;
2220
2221 event_child->collecting_fast_tracepoint
2222 = linux_fast_tracepoint_collecting (event_child, NULL);
2223
2224 if (event_child->collecting_fast_tracepoint != 1)
2225 {
2226 /* No longer need this breakpoint. */
2227 if (event_child->exit_jump_pad_bkpt != NULL)
2228 {
2229 if (debug_threads)
2230 fprintf (stderr,
2231 "No longer need exit-jump-pad bkpt; removing it."
2232 "stopping all threads momentarily.\n");
2233
2234 /* Other running threads could hit this breakpoint.
2235 We don't handle moribund locations like GDB does,
2236 instead we always pause all threads when removing
2237 breakpoints, so that any step-over or
2238 decr_pc_after_break adjustment is always taken
2239 care of while the breakpoint is still
2240 inserted. */
2241 stop_all_lwps (1, event_child);
2242 cancel_breakpoints ();
2243
2244 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2245 event_child->exit_jump_pad_bkpt = NULL;
2246
2247 unstop_all_lwps (1, event_child);
2248
2249 gdb_assert (event_child->suspended >= 0);
2250 }
2251 }
2252
2253 if (event_child->collecting_fast_tracepoint == 0)
2254 {
2255 if (debug_threads)
2256 fprintf (stderr,
2257 "fast tracepoint finished "
2258 "collecting successfully.\n");
2259
2260 /* We may have a deferred signal to report. */
2261 if (dequeue_one_deferred_signal (event_child, &w))
2262 {
2263 if (debug_threads)
2264 fprintf (stderr, "dequeued one signal.\n");
2265 }
2266 else
2267 {
2268 if (debug_threads)
2269 fprintf (stderr, "no deferred signals.\n");
2270
2271 if (stabilizing_threads)
2272 {
2273 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2274 ourstatus->value.sig = TARGET_SIGNAL_0;
2275 return ptid_of (event_child);
2276 }
2277 }
2278 }
2279 }
2280
2281 /* Check whether GDB would be interested in this event. */
2282
2283 /* If GDB is not interested in this signal, don't stop other
2284 threads, and don't report it to GDB. Just resume the inferior
2285 right away. We do this for threading-related signals as well as
2286 any that GDB specifically requested we ignore. But never ignore
2287 SIGSTOP if we sent it ourselves, and do not ignore signals when
2288 stepping - they may require special handling to skip the signal
2289 handler. */
2290 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2291 thread library? */
2292 if (WIFSTOPPED (w)
2293 && current_inferior->last_resume_kind != resume_step
2294 && (
2295 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2296 (current_process ()->private->thread_db != NULL
2297 && (WSTOPSIG (w) == __SIGRTMIN
2298 || WSTOPSIG (w) == __SIGRTMIN + 1))
2299 ||
2300 #endif
2301 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2302 && !(WSTOPSIG (w) == SIGSTOP
2303 && current_inferior->last_resume_kind == resume_stop))))
2304 {
2305 siginfo_t info, *info_p;
2306
2307 if (debug_threads)
2308 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2309 WSTOPSIG (w), lwpid_of (event_child));
2310
2311 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2312 info_p = &info;
2313 else
2314 info_p = NULL;
2315 linux_resume_one_lwp (event_child, event_child->stepping,
2316 WSTOPSIG (w), info_p);
2317 goto retry;
2318 }
2319
2320 /* If GDB wanted this thread to single step, we always want to
2321 report the SIGTRAP, and let GDB handle it. Watchpoints should
2322 always be reported. So should signals we can't explain. A
2323 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2324 not support Z0 breakpoints. If we do, we're be able to handle
2325 GDB breakpoints on top of internal breakpoints, by handling the
2326 internal breakpoint and still reporting the event to GDB. If we
2327 don't, we're out of luck, GDB won't see the breakpoint hit. */
2328 report_to_gdb = (!maybe_internal_trap
2329 || current_inferior->last_resume_kind == resume_step
2330 || event_child->stopped_by_watchpoint
2331 || (!step_over_finished
2332 && !bp_explains_trap && !trace_event)
2333 || gdb_breakpoint_here (event_child->stop_pc));
2334
2335 /* We found no reason GDB would want us to stop. We either hit one
2336 of our own breakpoints, or finished an internal step GDB
2337 shouldn't know about. */
2338 if (!report_to_gdb)
2339 {
2340 if (debug_threads)
2341 {
2342 if (bp_explains_trap)
2343 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2344 if (step_over_finished)
2345 fprintf (stderr, "Step-over finished.\n");
2346 if (trace_event)
2347 fprintf (stderr, "Tracepoint event.\n");
2348 }
2349
2350 /* We're not reporting this breakpoint to GDB, so apply the
2351 decr_pc_after_break adjustment to the inferior's regcache
2352 ourselves. */
2353
2354 if (the_low_target.set_pc != NULL)
2355 {
2356 struct regcache *regcache
2357 = get_thread_regcache (get_lwp_thread (event_child), 1);
2358 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2359 }
2360
2361 /* We may have finished stepping over a breakpoint. If so,
2362 we've stopped and suspended all LWPs momentarily except the
2363 stepping one. This is where we resume them all again. We're
2364 going to keep waiting, so use proceed, which handles stepping
2365 over the next breakpoint. */
2366 if (debug_threads)
2367 fprintf (stderr, "proceeding all threads.\n");
2368
2369 if (step_over_finished)
2370 unsuspend_all_lwps (event_child);
2371
2372 proceed_all_lwps ();
2373 goto retry;
2374 }
2375
2376 if (debug_threads)
2377 {
2378 if (current_inferior->last_resume_kind == resume_step)
2379 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2380 if (event_child->stopped_by_watchpoint)
2381 fprintf (stderr, "Stopped by watchpoint.\n");
2382 if (gdb_breakpoint_here (event_child->stop_pc))
2383 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2384 if (debug_threads)
2385 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2386 }
2387
2388 /* Alright, we're going to report a stop. */
2389
2390 if (!non_stop && !stabilizing_threads)
2391 {
2392 /* In all-stop, stop all threads. */
2393 stop_all_lwps (0, NULL);
2394
2395 /* If we're not waiting for a specific LWP, choose an event LWP
2396 from among those that have had events. Giving equal priority
2397 to all LWPs that have had events helps prevent
2398 starvation. */
2399 if (ptid_equal (ptid, minus_one_ptid))
2400 {
2401 event_child->status_pending_p = 1;
2402 event_child->status_pending = w;
2403
2404 select_event_lwp (&event_child);
2405
2406 event_child->status_pending_p = 0;
2407 w = event_child->status_pending;
2408 }
2409
2410 /* Now that we've selected our final event LWP, cancel any
2411 breakpoints in other LWPs that have hit a GDB breakpoint.
2412 See the comment in cancel_breakpoints_callback to find out
2413 why. */
2414 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2415
2416 /* Stabilize threads (move out of jump pads). */
2417 stabilize_threads ();
2418 }
2419 else
2420 {
2421 /* If we just finished a step-over, then all threads had been
2422 momentarily paused. In all-stop, that's fine, we want
2423 threads stopped by now anyway. In non-stop, we need to
2424 re-resume threads that GDB wanted to be running. */
2425 if (step_over_finished)
2426 unstop_all_lwps (1, event_child);
2427 }
2428
2429 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2430
2431 if (current_inferior->last_resume_kind == resume_stop
2432 && WSTOPSIG (w) == SIGSTOP)
2433 {
2434 /* A thread that has been requested to stop by GDB with vCont;t,
2435 and it stopped cleanly, so report as SIG0. The use of
2436 SIGSTOP is an implementation detail. */
2437 ourstatus->value.sig = TARGET_SIGNAL_0;
2438 }
2439 else if (current_inferior->last_resume_kind == resume_stop
2440 && WSTOPSIG (w) != SIGSTOP)
2441 {
2442 /* A thread that has been requested to stop by GDB with vCont;t,
2443 but, it stopped for other reasons. */
2444 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2445 }
2446 else
2447 {
2448 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2449 }
2450
2451 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2452
2453 if (debug_threads)
2454 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2455 target_pid_to_str (ptid_of (event_child)),
2456 ourstatus->kind,
2457 ourstatus->value.sig);
2458
2459 return ptid_of (event_child);
2460 }
2461
2462 /* Get rid of any pending event in the pipe. */
2463 static void
2464 async_file_flush (void)
2465 {
2466 int ret;
2467 char buf;
2468
2469 do
2470 ret = read (linux_event_pipe[0], &buf, 1);
2471 while (ret >= 0 || (ret == -1 && errno == EINTR));
2472 }
2473
2474 /* Put something in the pipe, so the event loop wakes up. */
2475 static void
2476 async_file_mark (void)
2477 {
2478 int ret;
2479
2480 async_file_flush ();
2481
2482 do
2483 ret = write (linux_event_pipe[1], "+", 1);
2484 while (ret == 0 || (ret == -1 && errno == EINTR));
2485
2486 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2487 be awakened anyway. */
2488 }
2489
2490 static ptid_t
2491 linux_wait (ptid_t ptid,
2492 struct target_waitstatus *ourstatus, int target_options)
2493 {
2494 ptid_t event_ptid;
2495
2496 if (debug_threads)
2497 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2498
2499 /* Flush the async file first. */
2500 if (target_is_async_p ())
2501 async_file_flush ();
2502
2503 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2504
2505 /* If at least one stop was reported, there may be more. A single
2506 SIGCHLD can signal more than one child stop. */
2507 if (target_is_async_p ()
2508 && (target_options & TARGET_WNOHANG) != 0
2509 && !ptid_equal (event_ptid, null_ptid))
2510 async_file_mark ();
2511
2512 return event_ptid;
2513 }
2514
2515 /* Send a signal to an LWP. */
2516
2517 static int
2518 kill_lwp (unsigned long lwpid, int signo)
2519 {
2520 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2521 fails, then we are not using nptl threads and we should be using kill. */
2522
2523 #ifdef __NR_tkill
2524 {
2525 static int tkill_failed;
2526
2527 if (!tkill_failed)
2528 {
2529 int ret;
2530
2531 errno = 0;
2532 ret = syscall (__NR_tkill, lwpid, signo);
2533 if (errno != ENOSYS)
2534 return ret;
2535 tkill_failed = 1;
2536 }
2537 }
2538 #endif
2539
2540 return kill (lwpid, signo);
2541 }
2542
2543 void
2544 linux_stop_lwp (struct lwp_info *lwp)
2545 {
2546 send_sigstop (lwp);
2547 }
2548
2549 static void
2550 send_sigstop (struct lwp_info *lwp)
2551 {
2552 int pid;
2553
2554 pid = lwpid_of (lwp);
2555
2556 /* If we already have a pending stop signal for this process, don't
2557 send another. */
2558 if (lwp->stop_expected)
2559 {
2560 if (debug_threads)
2561 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2562
2563 return;
2564 }
2565
2566 if (debug_threads)
2567 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2568
2569 lwp->stop_expected = 1;
2570 kill_lwp (pid, SIGSTOP);
2571 }
2572
2573 static int
2574 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2575 {
2576 struct lwp_info *lwp = (struct lwp_info *) entry;
2577
2578 /* Ignore EXCEPT. */
2579 if (lwp == except)
2580 return 0;
2581
2582 if (lwp->stopped)
2583 return 0;
2584
2585 send_sigstop (lwp);
2586 return 0;
2587 }
2588
2589 /* Increment the suspend count of an LWP, and stop it, if not stopped
2590 yet. */
2591 static int
2592 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2593 void *except)
2594 {
2595 struct lwp_info *lwp = (struct lwp_info *) entry;
2596
2597 /* Ignore EXCEPT. */
2598 if (lwp == except)
2599 return 0;
2600
2601 lwp->suspended++;
2602
2603 return send_sigstop_callback (entry, except);
2604 }
2605
2606 static void
2607 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2608 {
2609 /* It's dead, really. */
2610 lwp->dead = 1;
2611
2612 /* Store the exit status for later. */
2613 lwp->status_pending_p = 1;
2614 lwp->status_pending = wstat;
2615
2616 /* Prevent trying to stop it. */
2617 lwp->stopped = 1;
2618
2619 /* No further stops are expected from a dead lwp. */
2620 lwp->stop_expected = 0;
2621 }
2622
2623 static void
2624 wait_for_sigstop (struct inferior_list_entry *entry)
2625 {
2626 struct lwp_info *lwp = (struct lwp_info *) entry;
2627 struct thread_info *saved_inferior;
2628 int wstat;
2629 ptid_t saved_tid;
2630 ptid_t ptid;
2631 int pid;
2632
2633 if (lwp->stopped)
2634 {
2635 if (debug_threads)
2636 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2637 lwpid_of (lwp));
2638 return;
2639 }
2640
2641 saved_inferior = current_inferior;
2642 if (saved_inferior != NULL)
2643 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2644 else
2645 saved_tid = null_ptid; /* avoid bogus unused warning */
2646
2647 ptid = lwp->head.id;
2648
2649 if (debug_threads)
2650 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2651
2652 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2653
2654 /* If we stopped with a non-SIGSTOP signal, save it for later
2655 and record the pending SIGSTOP. If the process exited, just
2656 return. */
2657 if (WIFSTOPPED (wstat))
2658 {
2659 if (debug_threads)
2660 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2661 lwpid_of (lwp), WSTOPSIG (wstat));
2662
2663 if (WSTOPSIG (wstat) != SIGSTOP)
2664 {
2665 if (debug_threads)
2666 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2667 lwpid_of (lwp), wstat);
2668
2669 lwp->status_pending_p = 1;
2670 lwp->status_pending = wstat;
2671 }
2672 }
2673 else
2674 {
2675 if (debug_threads)
2676 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2677
2678 lwp = find_lwp_pid (pid_to_ptid (pid));
2679 if (lwp)
2680 {
2681 /* Leave this status pending for the next time we're able to
2682 report it. In the mean time, we'll report this lwp as
2683 dead to GDB, so GDB doesn't try to read registers and
2684 memory from it. This can only happen if this was the
2685 last thread of the process; otherwise, PID is removed
2686 from the thread tables before linux_wait_for_event
2687 returns. */
2688 mark_lwp_dead (lwp, wstat);
2689 }
2690 }
2691
2692 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2693 current_inferior = saved_inferior;
2694 else
2695 {
2696 if (debug_threads)
2697 fprintf (stderr, "Previously current thread died.\n");
2698
2699 if (non_stop)
2700 {
2701 /* We can't change the current inferior behind GDB's back,
2702 otherwise, a subsequent command may apply to the wrong
2703 process. */
2704 current_inferior = NULL;
2705 }
2706 else
2707 {
2708 /* Set a valid thread as current. */
2709 set_desired_inferior (0);
2710 }
2711 }
2712 }
2713
2714 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2715 move it out, because we need to report the stop event to GDB. For
2716 example, if the user puts a breakpoint in the jump pad, it's
2717 because she wants to debug it. */
2718
2719 static int
2720 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2721 {
2722 struct lwp_info *lwp = (struct lwp_info *) entry;
2723 struct thread_info *thread = get_lwp_thread (lwp);
2724
2725 gdb_assert (lwp->suspended == 0);
2726 gdb_assert (lwp->stopped);
2727
2728 /* Allow debugging the jump pad, gdb_collect, etc.. */
2729 return (supports_fast_tracepoints ()
2730 && in_process_agent_loaded ()
2731 && (gdb_breakpoint_here (lwp->stop_pc)
2732 || lwp->stopped_by_watchpoint
2733 || thread->last_resume_kind == resume_step)
2734 && linux_fast_tracepoint_collecting (lwp, NULL));
2735 }
2736
2737 static void
2738 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
2739 {
2740 struct lwp_info *lwp = (struct lwp_info *) entry;
2741 struct thread_info *thread = get_lwp_thread (lwp);
2742 int *wstat;
2743
2744 gdb_assert (lwp->suspended == 0);
2745 gdb_assert (lwp->stopped);
2746
2747 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
2748
2749 /* Allow debugging the jump pad, gdb_collect, etc. */
2750 if (!gdb_breakpoint_here (lwp->stop_pc)
2751 && !lwp->stopped_by_watchpoint
2752 && thread->last_resume_kind != resume_step
2753 && maybe_move_out_of_jump_pad (lwp, wstat))
2754 {
2755 if (debug_threads)
2756 fprintf (stderr,
2757 "LWP %ld needs stabilizing (in jump pad)\n",
2758 lwpid_of (lwp));
2759
2760 if (wstat)
2761 {
2762 lwp->status_pending_p = 0;
2763 enqueue_one_deferred_signal (lwp, wstat);
2764
2765 if (debug_threads)
2766 fprintf (stderr,
2767 "Signal %d for LWP %ld deferred "
2768 "(in jump pad)\n",
2769 WSTOPSIG (*wstat), lwpid_of (lwp));
2770 }
2771
2772 linux_resume_one_lwp (lwp, 0, 0, NULL);
2773 }
2774 else
2775 lwp->suspended++;
2776 }
2777
2778 static int
2779 lwp_running (struct inferior_list_entry *entry, void *data)
2780 {
2781 struct lwp_info *lwp = (struct lwp_info *) entry;
2782
2783 if (lwp->dead)
2784 return 0;
2785 if (lwp->stopped)
2786 return 0;
2787 return 1;
2788 }
2789
2790 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2791 If SUSPEND, then also increase the suspend count of every LWP,
2792 except EXCEPT. */
2793
2794 static void
2795 stop_all_lwps (int suspend, struct lwp_info *except)
2796 {
2797 stopping_threads = 1;
2798
2799 if (suspend)
2800 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
2801 else
2802 find_inferior (&all_lwps, send_sigstop_callback, except);
2803 for_each_inferior (&all_lwps, wait_for_sigstop);
2804 stopping_threads = 0;
2805 }
2806
2807 /* Resume execution of the inferior process.
2808 If STEP is nonzero, single-step it.
2809 If SIGNAL is nonzero, give it that signal. */
2810
2811 static void
2812 linux_resume_one_lwp (struct lwp_info *lwp,
2813 int step, int signal, siginfo_t *info)
2814 {
2815 struct thread_info *saved_inferior;
2816 int fast_tp_collecting;
2817
2818 if (lwp->stopped == 0)
2819 return;
2820
2821 fast_tp_collecting = lwp->collecting_fast_tracepoint;
2822
2823 gdb_assert (!stabilizing_threads || fast_tp_collecting);
2824
2825 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2826 user used the "jump" command, or "set $pc = foo"). */
2827 if (lwp->stop_pc != get_pc (lwp))
2828 {
2829 /* Collecting 'while-stepping' actions doesn't make sense
2830 anymore. */
2831 release_while_stepping_state_list (get_lwp_thread (lwp));
2832 }
2833
2834 /* If we have pending signals or status, and a new signal, enqueue the
2835 signal. Also enqueue the signal if we are waiting to reinsert a
2836 breakpoint; it will be picked up again below. */
2837 if (signal != 0
2838 && (lwp->status_pending_p
2839 || lwp->pending_signals != NULL
2840 || lwp->bp_reinsert != 0
2841 || fast_tp_collecting))
2842 {
2843 struct pending_signals *p_sig;
2844 p_sig = xmalloc (sizeof (*p_sig));
2845 p_sig->prev = lwp->pending_signals;
2846 p_sig->signal = signal;
2847 if (info == NULL)
2848 memset (&p_sig->info, 0, sizeof (siginfo_t));
2849 else
2850 memcpy (&p_sig->info, info, sizeof (siginfo_t));
2851 lwp->pending_signals = p_sig;
2852 }
2853
2854 if (lwp->status_pending_p)
2855 {
2856 if (debug_threads)
2857 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2858 " has pending status\n",
2859 lwpid_of (lwp), step ? "step" : "continue", signal,
2860 lwp->stop_expected ? "expected" : "not expected");
2861 return;
2862 }
2863
2864 saved_inferior = current_inferior;
2865 current_inferior = get_lwp_thread (lwp);
2866
2867 if (debug_threads)
2868 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2869 lwpid_of (lwp), step ? "step" : "continue", signal,
2870 lwp->stop_expected ? "expected" : "not expected");
2871
2872 /* This bit needs some thinking about. If we get a signal that
2873 we must report while a single-step reinsert is still pending,
2874 we often end up resuming the thread. It might be better to
2875 (ew) allow a stack of pending events; then we could be sure that
2876 the reinsert happened right away and not lose any signals.
2877
2878 Making this stack would also shrink the window in which breakpoints are
2879 uninserted (see comment in linux_wait_for_lwp) but not enough for
2880 complete correctness, so it won't solve that problem. It may be
2881 worthwhile just to solve this one, however. */
2882 if (lwp->bp_reinsert != 0)
2883 {
2884 if (debug_threads)
2885 fprintf (stderr, " pending reinsert at 0x%s\n",
2886 paddress (lwp->bp_reinsert));
2887
2888 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2889 {
2890 if (fast_tp_collecting == 0)
2891 {
2892 if (step == 0)
2893 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2894 if (lwp->suspended)
2895 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
2896 lwp->suspended);
2897 }
2898
2899 step = 1;
2900 }
2901
2902 /* Postpone any pending signal. It was enqueued above. */
2903 signal = 0;
2904 }
2905
2906 if (fast_tp_collecting == 1)
2907 {
2908 if (debug_threads)
2909 fprintf (stderr, "\
2910 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
2911 lwpid_of (lwp));
2912
2913 /* Postpone any pending signal. It was enqueued above. */
2914 signal = 0;
2915 }
2916 else if (fast_tp_collecting == 2)
2917 {
2918 if (debug_threads)
2919 fprintf (stderr, "\
2920 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
2921 lwpid_of (lwp));
2922
2923 if (can_hardware_single_step ())
2924 step = 1;
2925 else
2926 fatal ("moving out of jump pad single-stepping"
2927 " not implemented on this target");
2928
2929 /* Postpone any pending signal. It was enqueued above. */
2930 signal = 0;
2931 }
2932
2933 /* If we have while-stepping actions in this thread set it stepping.
2934 If we have a signal to deliver, it may or may not be set to
2935 SIG_IGN, we don't know. Assume so, and allow collecting
2936 while-stepping into a signal handler. A possible smart thing to
2937 do would be to set an internal breakpoint at the signal return
2938 address, continue, and carry on catching this while-stepping
2939 action only when that breakpoint is hit. A future
2940 enhancement. */
2941 if (get_lwp_thread (lwp)->while_stepping != NULL
2942 && can_hardware_single_step ())
2943 {
2944 if (debug_threads)
2945 fprintf (stderr,
2946 "lwp %ld has a while-stepping action -> forcing step.\n",
2947 lwpid_of (lwp));
2948 step = 1;
2949 }
2950
2951 if (debug_threads && the_low_target.get_pc != NULL)
2952 {
2953 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2954 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
2955 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
2956 }
2957
2958 /* If we have pending signals, consume one unless we are trying to
2959 reinsert a breakpoint or we're trying to finish a fast tracepoint
2960 collect. */
2961 if (lwp->pending_signals != NULL
2962 && lwp->bp_reinsert == 0
2963 && fast_tp_collecting == 0)
2964 {
2965 struct pending_signals **p_sig;
2966
2967 p_sig = &lwp->pending_signals;
2968 while ((*p_sig)->prev != NULL)
2969 p_sig = &(*p_sig)->prev;
2970
2971 signal = (*p_sig)->signal;
2972 if ((*p_sig)->info.si_signo != 0)
2973 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
2974
2975 free (*p_sig);
2976 *p_sig = NULL;
2977 }
2978
2979 if (the_low_target.prepare_to_resume != NULL)
2980 the_low_target.prepare_to_resume (lwp);
2981
2982 regcache_invalidate_one ((struct inferior_list_entry *)
2983 get_lwp_thread (lwp));
2984 errno = 0;
2985 lwp->stopped = 0;
2986 lwp->stopped_by_watchpoint = 0;
2987 lwp->stepping = step;
2988 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2989 /* Coerce to a uintptr_t first to avoid potential gcc warning
2990 of coercing an 8 byte integer to a 4 byte pointer. */
2991 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
2992
2993 current_inferior = saved_inferior;
2994 if (errno)
2995 {
2996 /* ESRCH from ptrace either means that the thread was already
2997 running (an error) or that it is gone (a race condition). If
2998 it's gone, we will get a notification the next time we wait,
2999 so we can ignore the error. We could differentiate these
3000 two, but it's tricky without waiting; the thread still exists
3001 as a zombie, so sending it signal 0 would succeed. So just
3002 ignore ESRCH. */
3003 if (errno == ESRCH)
3004 return;
3005
3006 perror_with_name ("ptrace");
3007 }
3008 }
3009
3010 struct thread_resume_array
3011 {
3012 struct thread_resume *resume;
3013 size_t n;
3014 };
3015
3016 /* This function is called once per thread. We look up the thread
3017 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3018 resume request.
3019
3020 This algorithm is O(threads * resume elements), but resume elements
3021 is small (and will remain small at least until GDB supports thread
3022 suspension). */
3023 static int
3024 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3025 {
3026 struct lwp_info *lwp;
3027 struct thread_info *thread;
3028 int ndx;
3029 struct thread_resume_array *r;
3030
3031 thread = (struct thread_info *) entry;
3032 lwp = get_thread_lwp (thread);
3033 r = arg;
3034
3035 for (ndx = 0; ndx < r->n; ndx++)
3036 {
3037 ptid_t ptid = r->resume[ndx].thread;
3038 if (ptid_equal (ptid, minus_one_ptid)
3039 || ptid_equal (ptid, entry->id)
3040 || (ptid_is_pid (ptid)
3041 && (ptid_get_pid (ptid) == pid_of (lwp)))
3042 || (ptid_get_lwp (ptid) == -1
3043 && (ptid_get_pid (ptid) == pid_of (lwp))))
3044 {
3045 if (r->resume[ndx].kind == resume_stop
3046 && thread->last_resume_kind == resume_stop)
3047 {
3048 if (debug_threads)
3049 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3050 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3051 ? "stopped"
3052 : "stopping",
3053 lwpid_of (lwp));
3054
3055 continue;
3056 }
3057
3058 lwp->resume = &r->resume[ndx];
3059 thread->last_resume_kind = lwp->resume->kind;
3060
3061 /* If we had a deferred signal to report, dequeue one now.
3062 This can happen if LWP gets more than one signal while
3063 trying to get out of a jump pad. */
3064 if (lwp->stopped
3065 && !lwp->status_pending_p
3066 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3067 {
3068 lwp->status_pending_p = 1;
3069
3070 if (debug_threads)
3071 fprintf (stderr,
3072 "Dequeueing deferred signal %d for LWP %ld, "
3073 "leaving status pending.\n",
3074 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3075 }
3076
3077 return 0;
3078 }
3079 }
3080
3081 /* No resume action for this thread. */
3082 lwp->resume = NULL;
3083
3084 return 0;
3085 }
3086
3087
3088 /* Set *FLAG_P if this lwp has an interesting status pending. */
3089 static int
3090 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3091 {
3092 struct lwp_info *lwp = (struct lwp_info *) entry;
3093
3094 /* LWPs which will not be resumed are not interesting, because
3095 we might not wait for them next time through linux_wait. */
3096 if (lwp->resume == NULL)
3097 return 0;
3098
3099 if (lwp->status_pending_p)
3100 * (int *) flag_p = 1;
3101
3102 return 0;
3103 }
3104
3105 /* Return 1 if this lwp that GDB wants running is stopped at an
3106 internal breakpoint that we need to step over. It assumes that any
3107 required STOP_PC adjustment has already been propagated to the
3108 inferior's regcache. */
3109
3110 static int
3111 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3112 {
3113 struct lwp_info *lwp = (struct lwp_info *) entry;
3114 struct thread_info *thread;
3115 struct thread_info *saved_inferior;
3116 CORE_ADDR pc;
3117
3118 /* LWPs which will not be resumed are not interesting, because we
3119 might not wait for them next time through linux_wait. */
3120
3121 if (!lwp->stopped)
3122 {
3123 if (debug_threads)
3124 fprintf (stderr,
3125 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3126 lwpid_of (lwp));
3127 return 0;
3128 }
3129
3130 thread = get_lwp_thread (lwp);
3131
3132 if (thread->last_resume_kind == resume_stop)
3133 {
3134 if (debug_threads)
3135 fprintf (stderr,
3136 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3137 lwpid_of (lwp));
3138 return 0;
3139 }
3140
3141 gdb_assert (lwp->suspended >= 0);
3142
3143 if (lwp->suspended)
3144 {
3145 if (debug_threads)
3146 fprintf (stderr,
3147 "Need step over [LWP %ld]? Ignoring, suspended\n",
3148 lwpid_of (lwp));
3149 return 0;
3150 }
3151
3152 if (!lwp->need_step_over)
3153 {
3154 if (debug_threads)
3155 fprintf (stderr,
3156 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3157 }
3158
3159 if (lwp->status_pending_p)
3160 {
3161 if (debug_threads)
3162 fprintf (stderr,
3163 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3164 lwpid_of (lwp));
3165 return 0;
3166 }
3167
3168 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3169 or we have. */
3170 pc = get_pc (lwp);
3171
3172 /* If the PC has changed since we stopped, then don't do anything,
3173 and let the breakpoint/tracepoint be hit. This happens if, for
3174 instance, GDB handled the decr_pc_after_break subtraction itself,
3175 GDB is OOL stepping this thread, or the user has issued a "jump"
3176 command, or poked thread's registers herself. */
3177 if (pc != lwp->stop_pc)
3178 {
3179 if (debug_threads)
3180 fprintf (stderr,
3181 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3182 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3183 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3184
3185 lwp->need_step_over = 0;
3186 return 0;
3187 }
3188
3189 saved_inferior = current_inferior;
3190 current_inferior = thread;
3191
3192 /* We can only step over breakpoints we know about. */
3193 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3194 {
3195 /* Don't step over a breakpoint that GDB expects to hit
3196 though. */
3197 if (gdb_breakpoint_here (pc))
3198 {
3199 if (debug_threads)
3200 fprintf (stderr,
3201 "Need step over [LWP %ld]? yes, but found"
3202 " GDB breakpoint at 0x%s; skipping step over\n",
3203 lwpid_of (lwp), paddress (pc));
3204
3205 current_inferior = saved_inferior;
3206 return 0;
3207 }
3208 else
3209 {
3210 if (debug_threads)
3211 fprintf (stderr,
3212 "Need step over [LWP %ld]? yes, "
3213 "found breakpoint at 0x%s\n",
3214 lwpid_of (lwp), paddress (pc));
3215
3216 /* We've found an lwp that needs stepping over --- return 1 so
3217 that find_inferior stops looking. */
3218 current_inferior = saved_inferior;
3219
3220 /* If the step over is cancelled, this is set again. */
3221 lwp->need_step_over = 0;
3222 return 1;
3223 }
3224 }
3225
3226 current_inferior = saved_inferior;
3227
3228 if (debug_threads)
3229 fprintf (stderr,
3230 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3231 lwpid_of (lwp), paddress (pc));
3232
3233 return 0;
3234 }
3235
3236 /* Start a step-over operation on LWP. When LWP stopped at a
3237 breakpoint, to make progress, we need to remove the breakpoint out
3238 of the way. If we let other threads run while we do that, they may
3239 pass by the breakpoint location and miss hitting it. To avoid
3240 that, a step-over momentarily stops all threads while LWP is
3241 single-stepped while the breakpoint is temporarily uninserted from
3242 the inferior. When the single-step finishes, we reinsert the
3243 breakpoint, and let all threads that are supposed to be running,
3244 run again.
3245
3246 On targets that don't support hardware single-step, we don't
3247 currently support full software single-stepping. Instead, we only
3248 support stepping over the thread event breakpoint, by asking the
3249 low target where to place a reinsert breakpoint. Since this
3250 routine assumes the breakpoint being stepped over is a thread event
3251 breakpoint, it usually assumes the return address of the current
3252 function is a good enough place to set the reinsert breakpoint. */
3253
3254 static int
3255 start_step_over (struct lwp_info *lwp)
3256 {
3257 struct thread_info *saved_inferior;
3258 CORE_ADDR pc;
3259 int step;
3260
3261 if (debug_threads)
3262 fprintf (stderr,
3263 "Starting step-over on LWP %ld. Stopping all threads\n",
3264 lwpid_of (lwp));
3265
3266 stop_all_lwps (1, lwp);
3267 gdb_assert (lwp->suspended == 0);
3268
3269 if (debug_threads)
3270 fprintf (stderr, "Done stopping all threads for step-over.\n");
3271
3272 /* Note, we should always reach here with an already adjusted PC,
3273 either by GDB (if we're resuming due to GDB's request), or by our
3274 caller, if we just finished handling an internal breakpoint GDB
3275 shouldn't care about. */
3276 pc = get_pc (lwp);
3277
3278 saved_inferior = current_inferior;
3279 current_inferior = get_lwp_thread (lwp);
3280
3281 lwp->bp_reinsert = pc;
3282 uninsert_breakpoints_at (pc);
3283 uninsert_fast_tracepoint_jumps_at (pc);
3284
3285 if (can_hardware_single_step ())
3286 {
3287 step = 1;
3288 }
3289 else
3290 {
3291 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3292 set_reinsert_breakpoint (raddr);
3293 step = 0;
3294 }
3295
3296 current_inferior = saved_inferior;
3297
3298 linux_resume_one_lwp (lwp, step, 0, NULL);
3299
3300 /* Require next event from this LWP. */
3301 step_over_bkpt = lwp->head.id;
3302 return 1;
3303 }
3304
3305 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3306 start_step_over, if still there, and delete any reinsert
3307 breakpoints we've set, on non hardware single-step targets. */
3308
3309 static int
3310 finish_step_over (struct lwp_info *lwp)
3311 {
3312 if (lwp->bp_reinsert != 0)
3313 {
3314 if (debug_threads)
3315 fprintf (stderr, "Finished step over.\n");
3316
3317 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3318 may be no breakpoint to reinsert there by now. */
3319 reinsert_breakpoints_at (lwp->bp_reinsert);
3320 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3321
3322 lwp->bp_reinsert = 0;
3323
3324 /* Delete any software-single-step reinsert breakpoints. No
3325 longer needed. We don't have to worry about other threads
3326 hitting this trap, and later not being able to explain it,
3327 because we were stepping over a breakpoint, and we hold all
3328 threads but LWP stopped while doing that. */
3329 if (!can_hardware_single_step ())
3330 delete_reinsert_breakpoints ();
3331
3332 step_over_bkpt = null_ptid;
3333 return 1;
3334 }
3335 else
3336 return 0;
3337 }
3338
3339 /* This function is called once per thread. We check the thread's resume
3340 request, which will tell us whether to resume, step, or leave the thread
3341 stopped; and what signal, if any, it should be sent.
3342
3343 For threads which we aren't explicitly told otherwise, we preserve
3344 the stepping flag; this is used for stepping over gdbserver-placed
3345 breakpoints.
3346
3347 If pending_flags was set in any thread, we queue any needed
3348 signals, since we won't actually resume. We already have a pending
3349 event to report, so we don't need to preserve any step requests;
3350 they should be re-issued if necessary. */
3351
3352 static int
3353 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3354 {
3355 struct lwp_info *lwp;
3356 struct thread_info *thread;
3357 int step;
3358 int leave_all_stopped = * (int *) arg;
3359 int leave_pending;
3360
3361 thread = (struct thread_info *) entry;
3362 lwp = get_thread_lwp (thread);
3363
3364 if (lwp->resume == NULL)
3365 return 0;
3366
3367 if (lwp->resume->kind == resume_stop)
3368 {
3369 if (debug_threads)
3370 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3371
3372 if (!lwp->stopped)
3373 {
3374 if (debug_threads)
3375 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3376
3377 /* Stop the thread, and wait for the event asynchronously,
3378 through the event loop. */
3379 send_sigstop (lwp);
3380 }
3381 else
3382 {
3383 if (debug_threads)
3384 fprintf (stderr, "already stopped LWP %ld\n",
3385 lwpid_of (lwp));
3386
3387 /* The LWP may have been stopped in an internal event that
3388 was not meant to be notified back to GDB (e.g., gdbserver
3389 breakpoint), so we should be reporting a stop event in
3390 this case too. */
3391
3392 /* If the thread already has a pending SIGSTOP, this is a
3393 no-op. Otherwise, something later will presumably resume
3394 the thread and this will cause it to cancel any pending
3395 operation, due to last_resume_kind == resume_stop. If
3396 the thread already has a pending status to report, we
3397 will still report it the next time we wait - see
3398 status_pending_p_callback. */
3399
3400 /* If we already have a pending signal to report, then
3401 there's no need to queue a SIGSTOP, as this means we're
3402 midway through moving the LWP out of the jumppad, and we
3403 will report the pending signal as soon as that is
3404 finished. */
3405 if (lwp->pending_signals_to_report == NULL)
3406 send_sigstop (lwp);
3407 }
3408
3409 /* For stop requests, we're done. */
3410 lwp->resume = NULL;
3411 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3412 return 0;
3413 }
3414
3415 /* If this thread which is about to be resumed has a pending status,
3416 then don't resume any threads - we can just report the pending
3417 status. Make sure to queue any signals that would otherwise be
3418 sent. In all-stop mode, we do this decision based on if *any*
3419 thread has a pending status. If there's a thread that needs the
3420 step-over-breakpoint dance, then don't resume any other thread
3421 but that particular one. */
3422 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3423
3424 if (!leave_pending)
3425 {
3426 if (debug_threads)
3427 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3428
3429 step = (lwp->resume->kind == resume_step);
3430 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3431 }
3432 else
3433 {
3434 if (debug_threads)
3435 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3436
3437 /* If we have a new signal, enqueue the signal. */
3438 if (lwp->resume->sig != 0)
3439 {
3440 struct pending_signals *p_sig;
3441 p_sig = xmalloc (sizeof (*p_sig));
3442 p_sig->prev = lwp->pending_signals;
3443 p_sig->signal = lwp->resume->sig;
3444 memset (&p_sig->info, 0, sizeof (siginfo_t));
3445
3446 /* If this is the same signal we were previously stopped by,
3447 make sure to queue its siginfo. We can ignore the return
3448 value of ptrace; if it fails, we'll skip
3449 PTRACE_SETSIGINFO. */
3450 if (WIFSTOPPED (lwp->last_status)
3451 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3452 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3453
3454 lwp->pending_signals = p_sig;
3455 }
3456 }
3457
3458 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3459 lwp->resume = NULL;
3460 return 0;
3461 }
3462
3463 static void
3464 linux_resume (struct thread_resume *resume_info, size_t n)
3465 {
3466 struct thread_resume_array array = { resume_info, n };
3467 struct lwp_info *need_step_over = NULL;
3468 int any_pending;
3469 int leave_all_stopped;
3470
3471 find_inferior (&all_threads, linux_set_resume_request, &array);
3472
3473 /* If there is a thread which would otherwise be resumed, which has
3474 a pending status, then don't resume any threads - we can just
3475 report the pending status. Make sure to queue any signals that
3476 would otherwise be sent. In non-stop mode, we'll apply this
3477 logic to each thread individually. We consume all pending events
3478 before considering to start a step-over (in all-stop). */
3479 any_pending = 0;
3480 if (!non_stop)
3481 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3482
3483 /* If there is a thread which would otherwise be resumed, which is
3484 stopped at a breakpoint that needs stepping over, then don't
3485 resume any threads - have it step over the breakpoint with all
3486 other threads stopped, then resume all threads again. Make sure
3487 to queue any signals that would otherwise be delivered or
3488 queued. */
3489 if (!any_pending && supports_breakpoints ())
3490 need_step_over
3491 = (struct lwp_info *) find_inferior (&all_lwps,
3492 need_step_over_p, NULL);
3493
3494 leave_all_stopped = (need_step_over != NULL || any_pending);
3495
3496 if (debug_threads)
3497 {
3498 if (need_step_over != NULL)
3499 fprintf (stderr, "Not resuming all, need step over\n");
3500 else if (any_pending)
3501 fprintf (stderr,
3502 "Not resuming, all-stop and found "
3503 "an LWP with pending status\n");
3504 else
3505 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3506 }
3507
3508 /* Even if we're leaving threads stopped, queue all signals we'd
3509 otherwise deliver. */
3510 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3511
3512 if (need_step_over)
3513 start_step_over (need_step_over);
3514 }
3515
3516 /* This function is called once per thread. We check the thread's
3517 last resume request, which will tell us whether to resume, step, or
3518 leave the thread stopped. Any signal the client requested to be
3519 delivered has already been enqueued at this point.
3520
3521 If any thread that GDB wants running is stopped at an internal
3522 breakpoint that needs stepping over, we start a step-over operation
3523 on that particular thread, and leave all others stopped. */
3524
3525 static int
3526 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3527 {
3528 struct lwp_info *lwp = (struct lwp_info *) entry;
3529 struct thread_info *thread;
3530 int step;
3531
3532 if (lwp == except)
3533 return 0;
3534
3535 if (debug_threads)
3536 fprintf (stderr,
3537 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3538
3539 if (!lwp->stopped)
3540 {
3541 if (debug_threads)
3542 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3543 return 0;
3544 }
3545
3546 thread = get_lwp_thread (lwp);
3547
3548 if (thread->last_resume_kind == resume_stop
3549 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3550 {
3551 if (debug_threads)
3552 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3553 lwpid_of (lwp));
3554 return 0;
3555 }
3556
3557 if (lwp->status_pending_p)
3558 {
3559 if (debug_threads)
3560 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3561 lwpid_of (lwp));
3562 return 0;
3563 }
3564
3565 gdb_assert (lwp->suspended >= 0);
3566
3567 if (lwp->suspended)
3568 {
3569 if (debug_threads)
3570 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3571 return 0;
3572 }
3573
3574 if (thread->last_resume_kind == resume_stop
3575 && lwp->pending_signals_to_report == NULL
3576 && lwp->collecting_fast_tracepoint == 0)
3577 {
3578 /* We haven't reported this LWP as stopped yet (otherwise, the
3579 last_status.kind check above would catch it, and we wouldn't
3580 reach here. This LWP may have been momentarily paused by a
3581 stop_all_lwps call while handling for example, another LWP's
3582 step-over. In that case, the pending expected SIGSTOP signal
3583 that was queued at vCont;t handling time will have already
3584 been consumed by wait_for_sigstop, and so we need to requeue
3585 another one here. Note that if the LWP already has a SIGSTOP
3586 pending, this is a no-op. */
3587
3588 if (debug_threads)
3589 fprintf (stderr,
3590 "Client wants LWP %ld to stop. "
3591 "Making sure it has a SIGSTOP pending\n",
3592 lwpid_of (lwp));
3593
3594 send_sigstop (lwp);
3595 }
3596
3597 step = thread->last_resume_kind == resume_step;
3598 linux_resume_one_lwp (lwp, step, 0, NULL);
3599 return 0;
3600 }
3601
3602 static int
3603 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3604 {
3605 struct lwp_info *lwp = (struct lwp_info *) entry;
3606
3607 if (lwp == except)
3608 return 0;
3609
3610 lwp->suspended--;
3611 gdb_assert (lwp->suspended >= 0);
3612
3613 return proceed_one_lwp (entry, except);
3614 }
3615
3616 /* When we finish a step-over, set threads running again. If there's
3617 another thread that may need a step-over, now's the time to start
3618 it. Eventually, we'll move all threads past their breakpoints. */
3619
3620 static void
3621 proceed_all_lwps (void)
3622 {
3623 struct lwp_info *need_step_over;
3624
3625 /* If there is a thread which would otherwise be resumed, which is
3626 stopped at a breakpoint that needs stepping over, then don't
3627 resume any threads - have it step over the breakpoint with all
3628 other threads stopped, then resume all threads again. */
3629
3630 if (supports_breakpoints ())
3631 {
3632 need_step_over
3633 = (struct lwp_info *) find_inferior (&all_lwps,
3634 need_step_over_p, NULL);
3635
3636 if (need_step_over != NULL)
3637 {
3638 if (debug_threads)
3639 fprintf (stderr, "proceed_all_lwps: found "
3640 "thread %ld needing a step-over\n",
3641 lwpid_of (need_step_over));
3642
3643 start_step_over (need_step_over);
3644 return;
3645 }
3646 }
3647
3648 if (debug_threads)
3649 fprintf (stderr, "Proceeding, no step-over needed\n");
3650
3651 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3652 }
3653
3654 /* Stopped LWPs that the client wanted to be running, that don't have
3655 pending statuses, are set to run again, except for EXCEPT, if not
3656 NULL. This undoes a stop_all_lwps call. */
3657
3658 static void
3659 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3660 {
3661 if (debug_threads)
3662 {
3663 if (except)
3664 fprintf (stderr,
3665 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3666 else
3667 fprintf (stderr,
3668 "unstopping all lwps\n");
3669 }
3670
3671 if (unsuspend)
3672 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3673 else
3674 find_inferior (&all_lwps, proceed_one_lwp, except);
3675 }
3676
3677 #ifdef HAVE_LINUX_USRREGS
3678
3679 int
3680 register_addr (int regnum)
3681 {
3682 int addr;
3683
3684 if (regnum < 0 || regnum >= the_low_target.num_regs)
3685 error ("Invalid register number %d.", regnum);
3686
3687 addr = the_low_target.regmap[regnum];
3688
3689 return addr;
3690 }
3691
3692 /* Fetch one register. */
3693 static void
3694 fetch_register (struct regcache *regcache, int regno)
3695 {
3696 CORE_ADDR regaddr;
3697 int i, size;
3698 char *buf;
3699 int pid;
3700
3701 if (regno >= the_low_target.num_regs)
3702 return;
3703 if ((*the_low_target.cannot_fetch_register) (regno))
3704 return;
3705
3706 regaddr = register_addr (regno);
3707 if (regaddr == -1)
3708 return;
3709
3710 pid = lwpid_of (get_thread_lwp (current_inferior));
3711 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3712 & - sizeof (PTRACE_XFER_TYPE));
3713 buf = alloca (size);
3714 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3715 {
3716 errno = 0;
3717 *(PTRACE_XFER_TYPE *) (buf + i) =
3718 ptrace (PTRACE_PEEKUSER, pid,
3719 /* Coerce to a uintptr_t first to avoid potential gcc warning
3720 of coercing an 8 byte integer to a 4 byte pointer. */
3721 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
3722 regaddr += sizeof (PTRACE_XFER_TYPE);
3723 if (errno != 0)
3724 error ("reading register %d: %s", regno, strerror (errno));
3725 }
3726
3727 if (the_low_target.supply_ptrace_register)
3728 the_low_target.supply_ptrace_register (regcache, regno, buf);
3729 else
3730 supply_register (regcache, regno, buf);
3731 }
3732
3733 /* Fetch all registers, or just one, from the child process. */
3734 static void
3735 usr_fetch_inferior_registers (struct regcache *regcache, int regno)
3736 {
3737 if (regno == -1)
3738 for (regno = 0; regno < the_low_target.num_regs; regno++)
3739 fetch_register (regcache, regno);
3740 else
3741 fetch_register (regcache, regno);
3742 }
3743
3744 /* Store our register values back into the inferior.
3745 If REGNO is -1, do this for all registers.
3746 Otherwise, REGNO specifies which register (so we can save time). */
3747 static void
3748 usr_store_inferior_registers (struct regcache *regcache, int regno)
3749 {
3750 CORE_ADDR regaddr;
3751 int i, size;
3752 char *buf;
3753 int pid;
3754
3755 if (regno >= 0)
3756 {
3757 if (regno >= the_low_target.num_regs)
3758 return;
3759
3760 if ((*the_low_target.cannot_store_register) (regno) == 1)
3761 return;
3762
3763 regaddr = register_addr (regno);
3764 if (regaddr == -1)
3765 return;
3766 errno = 0;
3767 size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3768 & - sizeof (PTRACE_XFER_TYPE);
3769 buf = alloca (size);
3770 memset (buf, 0, size);
3771
3772 if (the_low_target.collect_ptrace_register)
3773 the_low_target.collect_ptrace_register (regcache, regno, buf);
3774 else
3775 collect_register (regcache, regno, buf);
3776
3777 pid = lwpid_of (get_thread_lwp (current_inferior));
3778 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3779 {
3780 errno = 0;
3781 ptrace (PTRACE_POKEUSER, pid,
3782 /* Coerce to a uintptr_t first to avoid potential gcc warning
3783 about coercing an 8 byte integer to a 4 byte pointer. */
3784 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3785 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
3786 if (errno != 0)
3787 {
3788 /* At this point, ESRCH should mean the process is
3789 already gone, in which case we simply ignore attempts
3790 to change its registers. See also the related
3791 comment in linux_resume_one_lwp. */
3792 if (errno == ESRCH)
3793 return;
3794
3795 if ((*the_low_target.cannot_store_register) (regno) == 0)
3796 error ("writing register %d: %s", regno, strerror (errno));
3797 }
3798 regaddr += sizeof (PTRACE_XFER_TYPE);
3799 }
3800 }
3801 else
3802 for (regno = 0; regno < the_low_target.num_regs; regno++)
3803 usr_store_inferior_registers (regcache, regno);
3804 }
3805 #endif /* HAVE_LINUX_USRREGS */
3806
3807
3808
3809 #ifdef HAVE_LINUX_REGSETS
3810
3811 static int
3812 regsets_fetch_inferior_registers (struct regcache *regcache)
3813 {
3814 struct regset_info *regset;
3815 int saw_general_regs = 0;
3816 int pid;
3817 struct iovec iov;
3818
3819 regset = target_regsets;
3820
3821 pid = lwpid_of (get_thread_lwp (current_inferior));
3822 while (regset->size >= 0)
3823 {
3824 void *buf, *data;
3825 int nt_type, res;
3826
3827 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3828 {
3829 regset ++;
3830 continue;
3831 }
3832
3833 buf = xmalloc (regset->size);
3834
3835 nt_type = regset->nt_type;
3836 if (nt_type)
3837 {
3838 iov.iov_base = buf;
3839 iov.iov_len = regset->size;
3840 data = (void *) &iov;
3841 }
3842 else
3843 data = buf;
3844
3845 #ifndef __sparc__
3846 res = ptrace (regset->get_request, pid, nt_type, data);
3847 #else
3848 res = ptrace (regset->get_request, pid, data, nt_type);
3849 #endif
3850 if (res < 0)
3851 {
3852 if (errno == EIO)
3853 {
3854 /* If we get EIO on a regset, do not try it again for
3855 this process. */
3856 disabled_regsets[regset - target_regsets] = 1;
3857 free (buf);
3858 continue;
3859 }
3860 else
3861 {
3862 char s[256];
3863 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3864 pid);
3865 perror (s);
3866 }
3867 }
3868 else if (regset->type == GENERAL_REGS)
3869 saw_general_regs = 1;
3870 regset->store_function (regcache, buf);
3871 regset ++;
3872 free (buf);
3873 }
3874 if (saw_general_regs)
3875 return 0;
3876 else
3877 return 1;
3878 }
3879
3880 static int
3881 regsets_store_inferior_registers (struct regcache *regcache)
3882 {
3883 struct regset_info *regset;
3884 int saw_general_regs = 0;
3885 int pid;
3886 struct iovec iov;
3887
3888 regset = target_regsets;
3889
3890 pid = lwpid_of (get_thread_lwp (current_inferior));
3891 while (regset->size >= 0)
3892 {
3893 void *buf, *data;
3894 int nt_type, res;
3895
3896 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3897 {
3898 regset ++;
3899 continue;
3900 }
3901
3902 buf = xmalloc (regset->size);
3903
3904 /* First fill the buffer with the current register set contents,
3905 in case there are any items in the kernel's regset that are
3906 not in gdbserver's regcache. */
3907
3908 nt_type = regset->nt_type;
3909 if (nt_type)
3910 {
3911 iov.iov_base = buf;
3912 iov.iov_len = regset->size;
3913 data = (void *) &iov;
3914 }
3915 else
3916 data = buf;
3917
3918 #ifndef __sparc__
3919 res = ptrace (regset->get_request, pid, nt_type, data);
3920 #else
3921 res = ptrace (regset->get_request, pid, &iov, data);
3922 #endif
3923
3924 if (res == 0)
3925 {
3926 /* Then overlay our cached registers on that. */
3927 regset->fill_function (regcache, buf);
3928
3929 /* Only now do we write the register set. */
3930 #ifndef __sparc__
3931 res = ptrace (regset->set_request, pid, nt_type, data);
3932 #else
3933 res = ptrace (regset->set_request, pid, data, nt_type);
3934 #endif
3935 }
3936
3937 if (res < 0)
3938 {
3939 if (errno == EIO)
3940 {
3941 /* If we get EIO on a regset, do not try it again for
3942 this process. */
3943 disabled_regsets[regset - target_regsets] = 1;
3944 free (buf);
3945 continue;
3946 }
3947 else if (errno == ESRCH)
3948 {
3949 /* At this point, ESRCH should mean the process is
3950 already gone, in which case we simply ignore attempts
3951 to change its registers. See also the related
3952 comment in linux_resume_one_lwp. */
3953 free (buf);
3954 return 0;
3955 }
3956 else
3957 {
3958 perror ("Warning: ptrace(regsets_store_inferior_registers)");
3959 }
3960 }
3961 else if (regset->type == GENERAL_REGS)
3962 saw_general_regs = 1;
3963 regset ++;
3964 free (buf);
3965 }
3966 if (saw_general_regs)
3967 return 0;
3968 else
3969 return 1;
3970 return 0;
3971 }
3972
3973 #endif /* HAVE_LINUX_REGSETS */
3974
3975
3976 void
3977 linux_fetch_registers (struct regcache *regcache, int regno)
3978 {
3979 #ifdef HAVE_LINUX_REGSETS
3980 if (regsets_fetch_inferior_registers (regcache) == 0)
3981 return;
3982 #endif
3983 #ifdef HAVE_LINUX_USRREGS
3984 usr_fetch_inferior_registers (regcache, regno);
3985 #endif
3986 }
3987
3988 void
3989 linux_store_registers (struct regcache *regcache, int regno)
3990 {
3991 #ifdef HAVE_LINUX_REGSETS
3992 if (regsets_store_inferior_registers (regcache) == 0)
3993 return;
3994 #endif
3995 #ifdef HAVE_LINUX_USRREGS
3996 usr_store_inferior_registers (regcache, regno);
3997 #endif
3998 }
3999
4000
4001 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4002 to debugger memory starting at MYADDR. */
4003
4004 static int
4005 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4006 {
4007 register int i;
4008 /* Round starting address down to longword boundary. */
4009 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4010 /* Round ending address up; get number of longwords that makes. */
4011 register int count
4012 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4013 / sizeof (PTRACE_XFER_TYPE);
4014 /* Allocate buffer of that many longwords. */
4015 register PTRACE_XFER_TYPE *buffer
4016 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4017 int fd;
4018 char filename[64];
4019 int pid = lwpid_of (get_thread_lwp (current_inferior));
4020
4021 /* Try using /proc. Don't bother for one word. */
4022 if (len >= 3 * sizeof (long))
4023 {
4024 /* We could keep this file open and cache it - possibly one per
4025 thread. That requires some juggling, but is even faster. */
4026 sprintf (filename, "/proc/%d/mem", pid);
4027 fd = open (filename, O_RDONLY | O_LARGEFILE);
4028 if (fd == -1)
4029 goto no_proc;
4030
4031 /* If pread64 is available, use it. It's faster if the kernel
4032 supports it (only one syscall), and it's 64-bit safe even on
4033 32-bit platforms (for instance, SPARC debugging a SPARC64
4034 application). */
4035 #ifdef HAVE_PREAD64
4036 if (pread64 (fd, myaddr, len, memaddr) != len)
4037 #else
4038 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
4039 #endif
4040 {
4041 close (fd);
4042 goto no_proc;
4043 }
4044
4045 close (fd);
4046 return 0;
4047 }
4048
4049 no_proc:
4050 /* Read all the longwords */
4051 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4052 {
4053 errno = 0;
4054 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4055 about coercing an 8 byte integer to a 4 byte pointer. */
4056 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4057 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4058 if (errno)
4059 return errno;
4060 }
4061
4062 /* Copy appropriate bytes out of the buffer. */
4063 memcpy (myaddr,
4064 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4065 len);
4066
4067 return 0;
4068 }
4069
4070 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4071 memory at MEMADDR. On failure (cannot write to the inferior)
4072 returns the value of errno. */
4073
4074 static int
4075 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4076 {
4077 register int i;
4078 /* Round starting address down to longword boundary. */
4079 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4080 /* Round ending address up; get number of longwords that makes. */
4081 register int count
4082 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4083 / sizeof (PTRACE_XFER_TYPE);
4084
4085 /* Allocate buffer of that many longwords. */
4086 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4087 alloca (count * sizeof (PTRACE_XFER_TYPE));
4088
4089 int pid = lwpid_of (get_thread_lwp (current_inferior));
4090
4091 if (debug_threads)
4092 {
4093 /* Dump up to four bytes. */
4094 unsigned int val = * (unsigned int *) myaddr;
4095 if (len == 1)
4096 val = val & 0xff;
4097 else if (len == 2)
4098 val = val & 0xffff;
4099 else if (len == 3)
4100 val = val & 0xffffff;
4101 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4102 val, (long)memaddr);
4103 }
4104
4105 /* Fill start and end extra bytes of buffer with existing memory data. */
4106
4107 errno = 0;
4108 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4109 about coercing an 8 byte integer to a 4 byte pointer. */
4110 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4111 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4112 if (errno)
4113 return errno;
4114
4115 if (count > 1)
4116 {
4117 errno = 0;
4118 buffer[count - 1]
4119 = ptrace (PTRACE_PEEKTEXT, pid,
4120 /* Coerce to a uintptr_t first to avoid potential gcc warning
4121 about coercing an 8 byte integer to a 4 byte pointer. */
4122 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4123 * sizeof (PTRACE_XFER_TYPE)),
4124 0);
4125 if (errno)
4126 return errno;
4127 }
4128
4129 /* Copy data to be written over corresponding part of buffer. */
4130
4131 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4132 myaddr, len);
4133
4134 /* Write the entire buffer. */
4135
4136 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4137 {
4138 errno = 0;
4139 ptrace (PTRACE_POKETEXT, pid,
4140 /* Coerce to a uintptr_t first to avoid potential gcc warning
4141 about coercing an 8 byte integer to a 4 byte pointer. */
4142 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4143 (PTRACE_ARG4_TYPE) buffer[i]);
4144 if (errno)
4145 return errno;
4146 }
4147
4148 return 0;
4149 }
4150
4151 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4152 static int linux_supports_tracefork_flag;
4153
4154 static void
4155 linux_enable_event_reporting (int pid)
4156 {
4157 if (!linux_supports_tracefork_flag)
4158 return;
4159
4160 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4161 }
4162
4163 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4164
4165 static int
4166 linux_tracefork_grandchild (void *arg)
4167 {
4168 _exit (0);
4169 }
4170
4171 #define STACK_SIZE 4096
4172
4173 static int
4174 linux_tracefork_child (void *arg)
4175 {
4176 ptrace (PTRACE_TRACEME, 0, 0, 0);
4177 kill (getpid (), SIGSTOP);
4178
4179 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4180
4181 if (fork () == 0)
4182 linux_tracefork_grandchild (NULL);
4183
4184 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4185
4186 #ifdef __ia64__
4187 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4188 CLONE_VM | SIGCHLD, NULL);
4189 #else
4190 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
4191 CLONE_VM | SIGCHLD, NULL);
4192 #endif
4193
4194 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4195
4196 _exit (0);
4197 }
4198
4199 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4200 sure that we can enable the option, and that it had the desired
4201 effect. */
4202
4203 static void
4204 linux_test_for_tracefork (void)
4205 {
4206 int child_pid, ret, status;
4207 long second_pid;
4208 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4209 char *stack = xmalloc (STACK_SIZE * 4);
4210 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4211
4212 linux_supports_tracefork_flag = 0;
4213
4214 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4215
4216 child_pid = fork ();
4217 if (child_pid == 0)
4218 linux_tracefork_child (NULL);
4219
4220 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4221
4222 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4223 #ifdef __ia64__
4224 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4225 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4226 #else /* !__ia64__ */
4227 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4228 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4229 #endif /* !__ia64__ */
4230
4231 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4232
4233 if (child_pid == -1)
4234 perror_with_name ("clone");
4235
4236 ret = my_waitpid (child_pid, &status, 0);
4237 if (ret == -1)
4238 perror_with_name ("waitpid");
4239 else if (ret != child_pid)
4240 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4241 if (! WIFSTOPPED (status))
4242 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4243
4244 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4245 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4246 if (ret != 0)
4247 {
4248 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4249 if (ret != 0)
4250 {
4251 warning ("linux_test_for_tracefork: failed to kill child");
4252 return;
4253 }
4254
4255 ret = my_waitpid (child_pid, &status, 0);
4256 if (ret != child_pid)
4257 warning ("linux_test_for_tracefork: failed to wait for killed child");
4258 else if (!WIFSIGNALED (status))
4259 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4260 "killed child", status);
4261
4262 return;
4263 }
4264
4265 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4266 if (ret != 0)
4267 warning ("linux_test_for_tracefork: failed to resume child");
4268
4269 ret = my_waitpid (child_pid, &status, 0);
4270
4271 if (ret == child_pid && WIFSTOPPED (status)
4272 && status >> 16 == PTRACE_EVENT_FORK)
4273 {
4274 second_pid = 0;
4275 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4276 if (ret == 0 && second_pid != 0)
4277 {
4278 int second_status;
4279
4280 linux_supports_tracefork_flag = 1;
4281 my_waitpid (second_pid, &second_status, 0);
4282 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4283 if (ret != 0)
4284 warning ("linux_test_for_tracefork: failed to kill second child");
4285 my_waitpid (second_pid, &status, 0);
4286 }
4287 }
4288 else
4289 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4290 "(%d, status 0x%x)", ret, status);
4291
4292 do
4293 {
4294 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4295 if (ret != 0)
4296 warning ("linux_test_for_tracefork: failed to kill child");
4297 my_waitpid (child_pid, &status, 0);
4298 }
4299 while (WIFSTOPPED (status));
4300
4301 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4302 free (stack);
4303 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4304 }
4305
4306
4307 static void
4308 linux_look_up_symbols (void)
4309 {
4310 #ifdef USE_THREAD_DB
4311 struct process_info *proc = current_process ();
4312
4313 if (proc->private->thread_db != NULL)
4314 return;
4315
4316 /* If the kernel supports tracing forks then it also supports tracing
4317 clones, and then we don't need to use the magic thread event breakpoint
4318 to learn about threads. */
4319 thread_db_init (!linux_supports_tracefork_flag);
4320 #endif
4321 }
4322
4323 static void
4324 linux_request_interrupt (void)
4325 {
4326 extern unsigned long signal_pid;
4327
4328 if (!ptid_equal (cont_thread, null_ptid)
4329 && !ptid_equal (cont_thread, minus_one_ptid))
4330 {
4331 struct lwp_info *lwp;
4332 int lwpid;
4333
4334 lwp = get_thread_lwp (current_inferior);
4335 lwpid = lwpid_of (lwp);
4336 kill_lwp (lwpid, SIGINT);
4337 }
4338 else
4339 kill_lwp (signal_pid, SIGINT);
4340 }
4341
4342 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4343 to debugger memory starting at MYADDR. */
4344
4345 static int
4346 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4347 {
4348 char filename[PATH_MAX];
4349 int fd, n;
4350 int pid = lwpid_of (get_thread_lwp (current_inferior));
4351
4352 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4353
4354 fd = open (filename, O_RDONLY);
4355 if (fd < 0)
4356 return -1;
4357
4358 if (offset != (CORE_ADDR) 0
4359 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4360 n = -1;
4361 else
4362 n = read (fd, myaddr, len);
4363
4364 close (fd);
4365
4366 return n;
4367 }
4368
4369 /* These breakpoint and watchpoint related wrapper functions simply
4370 pass on the function call if the target has registered a
4371 corresponding function. */
4372
4373 static int
4374 linux_insert_point (char type, CORE_ADDR addr, int len)
4375 {
4376 if (the_low_target.insert_point != NULL)
4377 return the_low_target.insert_point (type, addr, len);
4378 else
4379 /* Unsupported (see target.h). */
4380 return 1;
4381 }
4382
4383 static int
4384 linux_remove_point (char type, CORE_ADDR addr, int len)
4385 {
4386 if (the_low_target.remove_point != NULL)
4387 return the_low_target.remove_point (type, addr, len);
4388 else
4389 /* Unsupported (see target.h). */
4390 return 1;
4391 }
4392
4393 static int
4394 linux_stopped_by_watchpoint (void)
4395 {
4396 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4397
4398 return lwp->stopped_by_watchpoint;
4399 }
4400
4401 static CORE_ADDR
4402 linux_stopped_data_address (void)
4403 {
4404 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4405
4406 return lwp->stopped_data_address;
4407 }
4408
4409 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4410 #if defined(__mcoldfire__)
4411 /* These should really be defined in the kernel's ptrace.h header. */
4412 #define PT_TEXT_ADDR 49*4
4413 #define PT_DATA_ADDR 50*4
4414 #define PT_TEXT_END_ADDR 51*4
4415 #elif defined(BFIN)
4416 #define PT_TEXT_ADDR 220
4417 #define PT_TEXT_END_ADDR 224
4418 #define PT_DATA_ADDR 228
4419 #elif defined(__TMS320C6X__)
4420 #define PT_TEXT_ADDR (0x10000*4)
4421 #define PT_DATA_ADDR (0x10004*4)
4422 #define PT_TEXT_END_ADDR (0x10008*4)
4423 #endif
4424
4425 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4426 to tell gdb about. */
4427
4428 static int
4429 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4430 {
4431 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4432 unsigned long text, text_end, data;
4433 int pid = lwpid_of (get_thread_lwp (current_inferior));
4434
4435 errno = 0;
4436
4437 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4438 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4439 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4440
4441 if (errno == 0)
4442 {
4443 /* Both text and data offsets produced at compile-time (and so
4444 used by gdb) are relative to the beginning of the program,
4445 with the data segment immediately following the text segment.
4446 However, the actual runtime layout in memory may put the data
4447 somewhere else, so when we send gdb a data base-address, we
4448 use the real data base address and subtract the compile-time
4449 data base-address from it (which is just the length of the
4450 text segment). BSS immediately follows data in both
4451 cases. */
4452 *text_p = text;
4453 *data_p = data - (text_end - text);
4454
4455 return 1;
4456 }
4457 #endif
4458 return 0;
4459 }
4460 #endif
4461
4462 static int
4463 linux_qxfer_osdata (const char *annex,
4464 unsigned char *readbuf, unsigned const char *writebuf,
4465 CORE_ADDR offset, int len)
4466 {
4467 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4468 }
4469
4470 /* Convert a native/host siginfo object, into/from the siginfo in the
4471 layout of the inferiors' architecture. */
4472
4473 static void
4474 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
4475 {
4476 int done = 0;
4477
4478 if (the_low_target.siginfo_fixup != NULL)
4479 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4480
4481 /* If there was no callback, or the callback didn't do anything,
4482 then just do a straight memcpy. */
4483 if (!done)
4484 {
4485 if (direction == 1)
4486 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4487 else
4488 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4489 }
4490 }
4491
4492 static int
4493 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4494 unsigned const char *writebuf, CORE_ADDR offset, int len)
4495 {
4496 int pid;
4497 struct siginfo siginfo;
4498 char inf_siginfo[sizeof (struct siginfo)];
4499
4500 if (current_inferior == NULL)
4501 return -1;
4502
4503 pid = lwpid_of (get_thread_lwp (current_inferior));
4504
4505 if (debug_threads)
4506 fprintf (stderr, "%s siginfo for lwp %d.\n",
4507 readbuf != NULL ? "Reading" : "Writing",
4508 pid);
4509
4510 if (offset >= sizeof (siginfo))
4511 return -1;
4512
4513 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4514 return -1;
4515
4516 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4517 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4518 inferior with a 64-bit GDBSERVER should look the same as debugging it
4519 with a 32-bit GDBSERVER, we need to convert it. */
4520 siginfo_fixup (&siginfo, inf_siginfo, 0);
4521
4522 if (offset + len > sizeof (siginfo))
4523 len = sizeof (siginfo) - offset;
4524
4525 if (readbuf != NULL)
4526 memcpy (readbuf, inf_siginfo + offset, len);
4527 else
4528 {
4529 memcpy (inf_siginfo + offset, writebuf, len);
4530
4531 /* Convert back to ptrace layout before flushing it out. */
4532 siginfo_fixup (&siginfo, inf_siginfo, 1);
4533
4534 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4535 return -1;
4536 }
4537
4538 return len;
4539 }
4540
4541 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4542 so we notice when children change state; as the handler for the
4543 sigsuspend in my_waitpid. */
4544
4545 static void
4546 sigchld_handler (int signo)
4547 {
4548 int old_errno = errno;
4549
4550 if (debug_threads)
4551 {
4552 do
4553 {
4554 /* fprintf is not async-signal-safe, so call write
4555 directly. */
4556 if (write (2, "sigchld_handler\n",
4557 sizeof ("sigchld_handler\n") - 1) < 0)
4558 break; /* just ignore */
4559 } while (0);
4560 }
4561
4562 if (target_is_async_p ())
4563 async_file_mark (); /* trigger a linux_wait */
4564
4565 errno = old_errno;
4566 }
4567
4568 static int
4569 linux_supports_non_stop (void)
4570 {
4571 return 1;
4572 }
4573
4574 static int
4575 linux_async (int enable)
4576 {
4577 int previous = (linux_event_pipe[0] != -1);
4578
4579 if (debug_threads)
4580 fprintf (stderr, "linux_async (%d), previous=%d\n",
4581 enable, previous);
4582
4583 if (previous != enable)
4584 {
4585 sigset_t mask;
4586 sigemptyset (&mask);
4587 sigaddset (&mask, SIGCHLD);
4588
4589 sigprocmask (SIG_BLOCK, &mask, NULL);
4590
4591 if (enable)
4592 {
4593 if (pipe (linux_event_pipe) == -1)
4594 fatal ("creating event pipe failed.");
4595
4596 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4597 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4598
4599 /* Register the event loop handler. */
4600 add_file_handler (linux_event_pipe[0],
4601 handle_target_event, NULL);
4602
4603 /* Always trigger a linux_wait. */
4604 async_file_mark ();
4605 }
4606 else
4607 {
4608 delete_file_handler (linux_event_pipe[0]);
4609
4610 close (linux_event_pipe[0]);
4611 close (linux_event_pipe[1]);
4612 linux_event_pipe[0] = -1;
4613 linux_event_pipe[1] = -1;
4614 }
4615
4616 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4617 }
4618
4619 return previous;
4620 }
4621
4622 static int
4623 linux_start_non_stop (int nonstop)
4624 {
4625 /* Register or unregister from event-loop accordingly. */
4626 linux_async (nonstop);
4627 return 0;
4628 }
4629
4630 static int
4631 linux_supports_multi_process (void)
4632 {
4633 return 1;
4634 }
4635
4636
4637 /* Enumerate spufs IDs for process PID. */
4638 static int
4639 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4640 {
4641 int pos = 0;
4642 int written = 0;
4643 char path[128];
4644 DIR *dir;
4645 struct dirent *entry;
4646
4647 sprintf (path, "/proc/%ld/fd", pid);
4648 dir = opendir (path);
4649 if (!dir)
4650 return -1;
4651
4652 rewinddir (dir);
4653 while ((entry = readdir (dir)) != NULL)
4654 {
4655 struct stat st;
4656 struct statfs stfs;
4657 int fd;
4658
4659 fd = atoi (entry->d_name);
4660 if (!fd)
4661 continue;
4662
4663 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4664 if (stat (path, &st) != 0)
4665 continue;
4666 if (!S_ISDIR (st.st_mode))
4667 continue;
4668
4669 if (statfs (path, &stfs) != 0)
4670 continue;
4671 if (stfs.f_type != SPUFS_MAGIC)
4672 continue;
4673
4674 if (pos >= offset && pos + 4 <= offset + len)
4675 {
4676 *(unsigned int *)(buf + pos - offset) = fd;
4677 written += 4;
4678 }
4679 pos += 4;
4680 }
4681
4682 closedir (dir);
4683 return written;
4684 }
4685
4686 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4687 object type, using the /proc file system. */
4688 static int
4689 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4690 unsigned const char *writebuf,
4691 CORE_ADDR offset, int len)
4692 {
4693 long pid = lwpid_of (get_thread_lwp (current_inferior));
4694 char buf[128];
4695 int fd = 0;
4696 int ret = 0;
4697
4698 if (!writebuf && !readbuf)
4699 return -1;
4700
4701 if (!*annex)
4702 {
4703 if (!readbuf)
4704 return -1;
4705 else
4706 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4707 }
4708
4709 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4710 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4711 if (fd <= 0)
4712 return -1;
4713
4714 if (offset != 0
4715 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4716 {
4717 close (fd);
4718 return 0;
4719 }
4720
4721 if (writebuf)
4722 ret = write (fd, writebuf, (size_t) len);
4723 else
4724 ret = read (fd, readbuf, (size_t) len);
4725
4726 close (fd);
4727 return ret;
4728 }
4729
4730 #if defined PT_GETDSBT
4731 struct target_loadseg
4732 {
4733 /* Core address to which the segment is mapped. */
4734 Elf32_Addr addr;
4735 /* VMA recorded in the program header. */
4736 Elf32_Addr p_vaddr;
4737 /* Size of this segment in memory. */
4738 Elf32_Word p_memsz;
4739 };
4740
4741 struct target_loadmap
4742 {
4743 /* Protocol version number, must be zero. */
4744 Elf32_Word version;
4745 /* Pointer to the DSBT table, its size, and the DSBT index. */
4746 unsigned *dsbt_table;
4747 unsigned dsbt_size, dsbt_index;
4748 /* Number of segments in this map. */
4749 Elf32_Word nsegs;
4750 /* The actual memory map. */
4751 struct target_loadseg segs[/*nsegs*/];
4752 };
4753 #endif
4754
4755 #if defined PT_GETDSBT
4756 static int
4757 linux_read_loadmap (const char *annex, CORE_ADDR offset,
4758 unsigned char *myaddr, unsigned int len)
4759 {
4760 int pid = lwpid_of (get_thread_lwp (current_inferior));
4761 int addr = -1;
4762 struct target_loadmap *data = NULL;
4763 unsigned int actual_length, copy_length;
4764
4765 if (strcmp (annex, "exec") == 0)
4766 addr= (int) PTRACE_GETDSBT_EXEC;
4767 else if (strcmp (annex, "interp") == 0)
4768 addr = (int) PTRACE_GETDSBT_INTERP;
4769 else
4770 return -1;
4771
4772 if (ptrace (PT_GETDSBT, pid, addr, &data) != 0)
4773 return -1;
4774
4775 if (data == NULL)
4776 return -1;
4777
4778 actual_length = sizeof (struct target_loadmap)
4779 + sizeof (struct target_loadseg) * data->nsegs;
4780
4781 if (offset < 0 || offset > actual_length)
4782 return -1;
4783
4784 copy_length = actual_length - offset < len ? actual_length - offset : len;
4785 memcpy (myaddr, (char *) data + offset, copy_length);
4786 return copy_length;
4787 }
4788 #endif /* defined PT_GETDSBT */
4789
4790 static void
4791 linux_process_qsupported (const char *query)
4792 {
4793 if (the_low_target.process_qsupported != NULL)
4794 the_low_target.process_qsupported (query);
4795 }
4796
4797 static int
4798 linux_supports_tracepoints (void)
4799 {
4800 if (*the_low_target.supports_tracepoints == NULL)
4801 return 0;
4802
4803 return (*the_low_target.supports_tracepoints) ();
4804 }
4805
4806 static CORE_ADDR
4807 linux_read_pc (struct regcache *regcache)
4808 {
4809 if (the_low_target.get_pc == NULL)
4810 return 0;
4811
4812 return (*the_low_target.get_pc) (regcache);
4813 }
4814
4815 static void
4816 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
4817 {
4818 gdb_assert (the_low_target.set_pc != NULL);
4819
4820 (*the_low_target.set_pc) (regcache, pc);
4821 }
4822
4823 static int
4824 linux_thread_stopped (struct thread_info *thread)
4825 {
4826 return get_thread_lwp (thread)->stopped;
4827 }
4828
4829 /* This exposes stop-all-threads functionality to other modules. */
4830
4831 static void
4832 linux_pause_all (int freeze)
4833 {
4834 stop_all_lwps (freeze, NULL);
4835 }
4836
4837 /* This exposes unstop-all-threads functionality to other gdbserver
4838 modules. */
4839
4840 static void
4841 linux_unpause_all (int unfreeze)
4842 {
4843 unstop_all_lwps (unfreeze, NULL);
4844 }
4845
4846 static int
4847 linux_prepare_to_access_memory (void)
4848 {
4849 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
4850 running LWP. */
4851 if (non_stop)
4852 linux_pause_all (1);
4853 return 0;
4854 }
4855
4856 static void
4857 linux_done_accessing_memory (void)
4858 {
4859 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
4860 running LWP. */
4861 if (non_stop)
4862 linux_unpause_all (1);
4863 }
4864
4865 static int
4866 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
4867 CORE_ADDR collector,
4868 CORE_ADDR lockaddr,
4869 ULONGEST orig_size,
4870 CORE_ADDR *jump_entry,
4871 unsigned char *jjump_pad_insn,
4872 ULONGEST *jjump_pad_insn_size,
4873 CORE_ADDR *adjusted_insn_addr,
4874 CORE_ADDR *adjusted_insn_addr_end)
4875 {
4876 return (*the_low_target.install_fast_tracepoint_jump_pad)
4877 (tpoint, tpaddr, collector, lockaddr, orig_size,
4878 jump_entry, jjump_pad_insn, jjump_pad_insn_size,
4879 adjusted_insn_addr, adjusted_insn_addr_end);
4880 }
4881
4882 static struct emit_ops *
4883 linux_emit_ops (void)
4884 {
4885 if (the_low_target.emit_ops != NULL)
4886 return (*the_low_target.emit_ops) ();
4887 else
4888 return NULL;
4889 }
4890
4891 static struct target_ops linux_target_ops = {
4892 linux_create_inferior,
4893 linux_attach,
4894 linux_kill,
4895 linux_detach,
4896 linux_mourn,
4897 linux_join,
4898 linux_thread_alive,
4899 linux_resume,
4900 linux_wait,
4901 linux_fetch_registers,
4902 linux_store_registers,
4903 linux_prepare_to_access_memory,
4904 linux_done_accessing_memory,
4905 linux_read_memory,
4906 linux_write_memory,
4907 linux_look_up_symbols,
4908 linux_request_interrupt,
4909 linux_read_auxv,
4910 linux_insert_point,
4911 linux_remove_point,
4912 linux_stopped_by_watchpoint,
4913 linux_stopped_data_address,
4914 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4915 linux_read_offsets,
4916 #else
4917 NULL,
4918 #endif
4919 #ifdef USE_THREAD_DB
4920 thread_db_get_tls_address,
4921 #else
4922 NULL,
4923 #endif
4924 linux_qxfer_spu,
4925 hostio_last_error_from_errno,
4926 linux_qxfer_osdata,
4927 linux_xfer_siginfo,
4928 linux_supports_non_stop,
4929 linux_async,
4930 linux_start_non_stop,
4931 linux_supports_multi_process,
4932 #ifdef USE_THREAD_DB
4933 thread_db_handle_monitor_command,
4934 #else
4935 NULL,
4936 #endif
4937 linux_common_core_of_thread,
4938 #if defined PT_GETDSBT
4939 linux_read_loadmap,
4940 #else
4941 NULL,
4942 #endif
4943 linux_process_qsupported,
4944 linux_supports_tracepoints,
4945 linux_read_pc,
4946 linux_write_pc,
4947 linux_thread_stopped,
4948 NULL,
4949 linux_pause_all,
4950 linux_unpause_all,
4951 linux_cancel_breakpoints,
4952 linux_stabilize_threads,
4953 linux_install_fast_tracepoint_jump_pad,
4954 linux_emit_ops
4955 };
4956
4957 static void
4958 linux_init_signals ()
4959 {
4960 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
4961 to find what the cancel signal actually is. */
4962 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
4963 signal (__SIGRTMIN+1, SIG_IGN);
4964 #endif
4965 }
4966
4967 void
4968 initialize_low (void)
4969 {
4970 struct sigaction sigchld_action;
4971 memset (&sigchld_action, 0, sizeof (sigchld_action));
4972 set_target_ops (&linux_target_ops);
4973 set_breakpoint_data (the_low_target.breakpoint,
4974 the_low_target.breakpoint_len);
4975 linux_init_signals ();
4976 linux_test_for_tracefork ();
4977 #ifdef HAVE_LINUX_REGSETS
4978 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
4979 ;
4980 disabled_regsets = xmalloc (num_regsets);
4981 #endif
4982
4983 sigchld_action.sa_handler = sigchld_handler;
4984 sigemptyset (&sigchld_action.sa_mask);
4985 sigchld_action.sa_flags = SA_RESTART;
4986 sigaction (SIGCHLD, &sigchld_action, NULL);
4987 }
This page took 0.138404 seconds and 4 git commands to generate.