bc14ec317f33267738acd1b119b79707a95fc1a7
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-1996, 1998-2012 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22
23 #include <sys/wait.h>
24 #include <stdio.h>
25 #include <sys/param.h>
26 #include <sys/ptrace.h>
27 #include "linux-ptrace.h"
28 #include "linux-procfs.h"
29 #include <signal.h>
30 #include <sys/ioctl.h>
31 #include <fcntl.h>
32 #include <string.h>
33 #include <stdlib.h>
34 #include <unistd.h>
35 #include <errno.h>
36 #include <sys/syscall.h>
37 #include <sched.h>
38 #include <ctype.h>
39 #include <pwd.h>
40 #include <sys/types.h>
41 #include <dirent.h>
42 #include <sys/stat.h>
43 #include <sys/vfs.h>
44 #include <sys/uio.h>
45 #ifndef ELFMAG0
46 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
47 then ELFMAG0 will have been defined. If it didn't get included by
48 gdb_proc_service.h then including it will likely introduce a duplicate
49 definition of elf_fpregset_t. */
50 #include <elf.h>
51 #endif
52
53 #ifndef SPUFS_MAGIC
54 #define SPUFS_MAGIC 0x23c9b64e
55 #endif
56
57 #ifdef HAVE_PERSONALITY
58 # include <sys/personality.h>
59 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
60 # define ADDR_NO_RANDOMIZE 0x0040000
61 # endif
62 #endif
63
64 #ifndef O_LARGEFILE
65 #define O_LARGEFILE 0
66 #endif
67
68 #ifndef W_STOPCODE
69 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
70 #endif
71
72 /* This is the kernel's hard limit. Not to be confused with
73 SIGRTMIN. */
74 #ifndef __SIGRTMIN
75 #define __SIGRTMIN 32
76 #endif
77
78 #ifdef __UCLIBC__
79 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
80 #define HAS_NOMMU
81 #endif
82 #endif
83
84 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
85 representation of the thread ID.
86
87 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
88 the same as the LWP ID.
89
90 ``all_processes'' is keyed by the "overall process ID", which
91 GNU/Linux calls tgid, "thread group ID". */
92
93 struct inferior_list all_lwps;
94
95 /* A list of all unknown processes which receive stop signals. Some other
96 process will presumably claim each of these as forked children
97 momentarily. */
98
99 struct inferior_list stopped_pids;
100
101 /* FIXME this is a bit of a hack, and could be removed. */
102 int stopping_threads;
103
104 /* FIXME make into a target method? */
105 int using_threads = 1;
106
107 /* True if we're presently stabilizing threads (moving them out of
108 jump pads). */
109 static int stabilizing_threads;
110
111 /* This flag is true iff we've just created or attached to our first
112 inferior but it has not stopped yet. As soon as it does, we need
113 to call the low target's arch_setup callback. Doing this only on
114 the first inferior avoids reinializing the architecture on every
115 inferior, and avoids messing with the register caches of the
116 already running inferiors. NOTE: this assumes all inferiors under
117 control of gdbserver have the same architecture. */
118 static int new_inferior;
119
120 static void linux_resume_one_lwp (struct lwp_info *lwp,
121 int step, int signal, siginfo_t *info);
122 static void linux_resume (struct thread_resume *resume_info, size_t n);
123 static void stop_all_lwps (int suspend, struct lwp_info *except);
124 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
125 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
126 static void *add_lwp (ptid_t ptid);
127 static int linux_stopped_by_watchpoint (void);
128 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
129 static void proceed_all_lwps (void);
130 static int finish_step_over (struct lwp_info *lwp);
131 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
132 static int kill_lwp (unsigned long lwpid, int signo);
133 static void linux_enable_event_reporting (int pid);
134
135 /* True if the low target can hardware single-step. Such targets
136 don't need a BREAKPOINT_REINSERT_ADDR callback. */
137
138 static int
139 can_hardware_single_step (void)
140 {
141 return (the_low_target.breakpoint_reinsert_addr == NULL);
142 }
143
144 /* True if the low target supports memory breakpoints. If so, we'll
145 have a GET_PC implementation. */
146
147 static int
148 supports_breakpoints (void)
149 {
150 return (the_low_target.get_pc != NULL);
151 }
152
153 /* Returns true if this target can support fast tracepoints. This
154 does not mean that the in-process agent has been loaded in the
155 inferior. */
156
157 static int
158 supports_fast_tracepoints (void)
159 {
160 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
161 }
162
163 struct pending_signals
164 {
165 int signal;
166 siginfo_t info;
167 struct pending_signals *prev;
168 };
169
170 #define PTRACE_ARG3_TYPE void *
171 #define PTRACE_ARG4_TYPE void *
172 #define PTRACE_XFER_TYPE long
173
174 #ifdef HAVE_LINUX_REGSETS
175 static char *disabled_regsets;
176 static int num_regsets;
177 #endif
178
179 /* The read/write ends of the pipe registered as waitable file in the
180 event loop. */
181 static int linux_event_pipe[2] = { -1, -1 };
182
183 /* True if we're currently in async mode. */
184 #define target_is_async_p() (linux_event_pipe[0] != -1)
185
186 static void send_sigstop (struct lwp_info *lwp);
187 static void wait_for_sigstop (struct inferior_list_entry *entry);
188
189 /* Return non-zero if HEADER is a 64-bit ELF file. */
190
191 static int
192 elf_64_header_p (const Elf64_Ehdr *header)
193 {
194 return (header->e_ident[EI_MAG0] == ELFMAG0
195 && header->e_ident[EI_MAG1] == ELFMAG1
196 && header->e_ident[EI_MAG2] == ELFMAG2
197 && header->e_ident[EI_MAG3] == ELFMAG3
198 && header->e_ident[EI_CLASS] == ELFCLASS64);
199 }
200
201 /* Return non-zero if FILE is a 64-bit ELF file,
202 zero if the file is not a 64-bit ELF file,
203 and -1 if the file is not accessible or doesn't exist. */
204
205 static int
206 elf_64_file_p (const char *file)
207 {
208 Elf64_Ehdr header;
209 int fd;
210
211 fd = open (file, O_RDONLY);
212 if (fd < 0)
213 return -1;
214
215 if (read (fd, &header, sizeof (header)) != sizeof (header))
216 {
217 close (fd);
218 return 0;
219 }
220 close (fd);
221
222 return elf_64_header_p (&header);
223 }
224
225 /* Accepts an integer PID; Returns true if the executable PID is
226 running is a 64-bit ELF file.. */
227
228 int
229 linux_pid_exe_is_elf_64_file (int pid)
230 {
231 char file[MAXPATHLEN];
232
233 sprintf (file, "/proc/%d/exe", pid);
234 return elf_64_file_p (file);
235 }
236
237 static void
238 delete_lwp (struct lwp_info *lwp)
239 {
240 remove_thread (get_lwp_thread (lwp));
241 remove_inferior (&all_lwps, &lwp->head);
242 free (lwp->arch_private);
243 free (lwp);
244 }
245
246 /* Add a process to the common process list, and set its private
247 data. */
248
249 static struct process_info *
250 linux_add_process (int pid, int attached)
251 {
252 struct process_info *proc;
253
254 /* Is this the first process? If so, then set the arch. */
255 if (all_processes.head == NULL)
256 new_inferior = 1;
257
258 proc = add_process (pid, attached);
259 proc->private = xcalloc (1, sizeof (*proc->private));
260
261 if (the_low_target.new_process != NULL)
262 proc->private->arch_private = the_low_target.new_process ();
263
264 return proc;
265 }
266
267 /* Wrapper function for waitpid which handles EINTR, and emulates
268 __WALL for systems where that is not available. */
269
270 static int
271 my_waitpid (int pid, int *status, int flags)
272 {
273 int ret, out_errno;
274
275 if (debug_threads)
276 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
277
278 if (flags & __WALL)
279 {
280 sigset_t block_mask, org_mask, wake_mask;
281 int wnohang;
282
283 wnohang = (flags & WNOHANG) != 0;
284 flags &= ~(__WALL | __WCLONE);
285 flags |= WNOHANG;
286
287 /* Block all signals while here. This avoids knowing about
288 LinuxThread's signals. */
289 sigfillset (&block_mask);
290 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
291
292 /* ... except during the sigsuspend below. */
293 sigemptyset (&wake_mask);
294
295 while (1)
296 {
297 /* Since all signals are blocked, there's no need to check
298 for EINTR here. */
299 ret = waitpid (pid, status, flags);
300 out_errno = errno;
301
302 if (ret == -1 && out_errno != ECHILD)
303 break;
304 else if (ret > 0)
305 break;
306
307 if (flags & __WCLONE)
308 {
309 /* We've tried both flavors now. If WNOHANG is set,
310 there's nothing else to do, just bail out. */
311 if (wnohang)
312 break;
313
314 if (debug_threads)
315 fprintf (stderr, "blocking\n");
316
317 /* Block waiting for signals. */
318 sigsuspend (&wake_mask);
319 }
320
321 flags ^= __WCLONE;
322 }
323
324 sigprocmask (SIG_SETMASK, &org_mask, NULL);
325 }
326 else
327 {
328 do
329 ret = waitpid (pid, status, flags);
330 while (ret == -1 && errno == EINTR);
331 out_errno = errno;
332 }
333
334 if (debug_threads)
335 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
336 pid, flags, status ? *status : -1, ret);
337
338 errno = out_errno;
339 return ret;
340 }
341
342 /* Handle a GNU/Linux extended wait response. If we see a clone
343 event, we need to add the new LWP to our list (and not report the
344 trap to higher layers). */
345
346 static void
347 handle_extended_wait (struct lwp_info *event_child, int wstat)
348 {
349 int event = wstat >> 16;
350 struct lwp_info *new_lwp;
351
352 if (event == PTRACE_EVENT_CLONE)
353 {
354 ptid_t ptid;
355 unsigned long new_pid;
356 int ret, status = W_STOPCODE (SIGSTOP);
357
358 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
359
360 /* If we haven't already seen the new PID stop, wait for it now. */
361 if (! pull_pid_from_list (&stopped_pids, new_pid))
362 {
363 /* The new child has a pending SIGSTOP. We can't affect it until it
364 hits the SIGSTOP, but we're already attached. */
365
366 ret = my_waitpid (new_pid, &status, __WALL);
367
368 if (ret == -1)
369 perror_with_name ("waiting for new child");
370 else if (ret != new_pid)
371 warning ("wait returned unexpected PID %d", ret);
372 else if (!WIFSTOPPED (status))
373 warning ("wait returned unexpected status 0x%x", status);
374 }
375
376 linux_enable_event_reporting (new_pid);
377
378 ptid = ptid_build (pid_of (event_child), new_pid, 0);
379 new_lwp = (struct lwp_info *) add_lwp (ptid);
380 add_thread (ptid, new_lwp);
381
382 /* Either we're going to immediately resume the new thread
383 or leave it stopped. linux_resume_one_lwp is a nop if it
384 thinks the thread is currently running, so set this first
385 before calling linux_resume_one_lwp. */
386 new_lwp->stopped = 1;
387
388 /* Normally we will get the pending SIGSTOP. But in some cases
389 we might get another signal delivered to the group first.
390 If we do get another signal, be sure not to lose it. */
391 if (WSTOPSIG (status) == SIGSTOP)
392 {
393 if (stopping_threads)
394 new_lwp->stop_pc = get_stop_pc (new_lwp);
395 else
396 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
397 }
398 else
399 {
400 new_lwp->stop_expected = 1;
401
402 if (stopping_threads)
403 {
404 new_lwp->stop_pc = get_stop_pc (new_lwp);
405 new_lwp->status_pending_p = 1;
406 new_lwp->status_pending = status;
407 }
408 else
409 /* Pass the signal on. This is what GDB does - except
410 shouldn't we really report it instead? */
411 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
412 }
413
414 /* Always resume the current thread. If we are stopping
415 threads, it will have a pending SIGSTOP; we may as well
416 collect it now. */
417 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
418 }
419 }
420
421 /* Return the PC as read from the regcache of LWP, without any
422 adjustment. */
423
424 static CORE_ADDR
425 get_pc (struct lwp_info *lwp)
426 {
427 struct thread_info *saved_inferior;
428 struct regcache *regcache;
429 CORE_ADDR pc;
430
431 if (the_low_target.get_pc == NULL)
432 return 0;
433
434 saved_inferior = current_inferior;
435 current_inferior = get_lwp_thread (lwp);
436
437 regcache = get_thread_regcache (current_inferior, 1);
438 pc = (*the_low_target.get_pc) (regcache);
439
440 if (debug_threads)
441 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
442
443 current_inferior = saved_inferior;
444 return pc;
445 }
446
447 /* This function should only be called if LWP got a SIGTRAP.
448 The SIGTRAP could mean several things.
449
450 On i386, where decr_pc_after_break is non-zero:
451 If we were single-stepping this process using PTRACE_SINGLESTEP,
452 we will get only the one SIGTRAP (even if the instruction we
453 stepped over was a breakpoint). The value of $eip will be the
454 next instruction.
455 If we continue the process using PTRACE_CONT, we will get a
456 SIGTRAP when we hit a breakpoint. The value of $eip will be
457 the instruction after the breakpoint (i.e. needs to be
458 decremented). If we report the SIGTRAP to GDB, we must also
459 report the undecremented PC. If we cancel the SIGTRAP, we
460 must resume at the decremented PC.
461
462 (Presumably, not yet tested) On a non-decr_pc_after_break machine
463 with hardware or kernel single-step:
464 If we single-step over a breakpoint instruction, our PC will
465 point at the following instruction. If we continue and hit a
466 breakpoint instruction, our PC will point at the breakpoint
467 instruction. */
468
469 static CORE_ADDR
470 get_stop_pc (struct lwp_info *lwp)
471 {
472 CORE_ADDR stop_pc;
473
474 if (the_low_target.get_pc == NULL)
475 return 0;
476
477 stop_pc = get_pc (lwp);
478
479 if (WSTOPSIG (lwp->last_status) == SIGTRAP
480 && !lwp->stepping
481 && !lwp->stopped_by_watchpoint
482 && lwp->last_status >> 16 == 0)
483 stop_pc -= the_low_target.decr_pc_after_break;
484
485 if (debug_threads)
486 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
487
488 return stop_pc;
489 }
490
491 static void *
492 add_lwp (ptid_t ptid)
493 {
494 struct lwp_info *lwp;
495
496 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
497 memset (lwp, 0, sizeof (*lwp));
498
499 lwp->head.id = ptid;
500
501 if (the_low_target.new_thread != NULL)
502 lwp->arch_private = the_low_target.new_thread ();
503
504 add_inferior_to_list (&all_lwps, &lwp->head);
505
506 return lwp;
507 }
508
509 /* Start an inferior process and returns its pid.
510 ALLARGS is a vector of program-name and args. */
511
512 static int
513 linux_create_inferior (char *program, char **allargs)
514 {
515 #ifdef HAVE_PERSONALITY
516 int personality_orig = 0, personality_set = 0;
517 #endif
518 struct lwp_info *new_lwp;
519 int pid;
520 ptid_t ptid;
521
522 #ifdef HAVE_PERSONALITY
523 if (disable_randomization)
524 {
525 errno = 0;
526 personality_orig = personality (0xffffffff);
527 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
528 {
529 personality_set = 1;
530 personality (personality_orig | ADDR_NO_RANDOMIZE);
531 }
532 if (errno != 0 || (personality_set
533 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
534 warning ("Error disabling address space randomization: %s",
535 strerror (errno));
536 }
537 #endif
538
539 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
540 pid = vfork ();
541 #else
542 pid = fork ();
543 #endif
544 if (pid < 0)
545 perror_with_name ("fork");
546
547 if (pid == 0)
548 {
549 ptrace (PTRACE_TRACEME, 0, 0, 0);
550
551 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
552 signal (__SIGRTMIN + 1, SIG_DFL);
553 #endif
554
555 setpgid (0, 0);
556
557 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
558 stdout to stderr so that inferior i/o doesn't corrupt the connection.
559 Also, redirect stdin to /dev/null. */
560 if (remote_connection_is_stdio ())
561 {
562 close (0);
563 open ("/dev/null", O_RDONLY);
564 dup2 (2, 1);
565 if (write (2, "stdin/stdout redirected\n",
566 sizeof ("stdin/stdout redirected\n") - 1) < 0)
567 /* Errors ignored. */;
568 }
569
570 execv (program, allargs);
571 if (errno == ENOENT)
572 execvp (program, allargs);
573
574 fprintf (stderr, "Cannot exec %s: %s.\n", program,
575 strerror (errno));
576 fflush (stderr);
577 _exit (0177);
578 }
579
580 #ifdef HAVE_PERSONALITY
581 if (personality_set)
582 {
583 errno = 0;
584 personality (personality_orig);
585 if (errno != 0)
586 warning ("Error restoring address space randomization: %s",
587 strerror (errno));
588 }
589 #endif
590
591 linux_add_process (pid, 0);
592
593 ptid = ptid_build (pid, pid, 0);
594 new_lwp = add_lwp (ptid);
595 add_thread (ptid, new_lwp);
596 new_lwp->must_set_ptrace_flags = 1;
597
598 return pid;
599 }
600
601 /* Attach to an inferior process. */
602
603 static void
604 linux_attach_lwp_1 (unsigned long lwpid, int initial)
605 {
606 ptid_t ptid;
607 struct lwp_info *new_lwp;
608
609 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
610 {
611 if (!initial)
612 {
613 /* If we fail to attach to an LWP, just warn. */
614 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
615 strerror (errno), errno);
616 fflush (stderr);
617 return;
618 }
619 else
620 /* If we fail to attach to a process, report an error. */
621 error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
622 strerror (errno), errno);
623 }
624
625 if (initial)
626 /* If lwp is the tgid, we handle adding existing threads later.
627 Otherwise we just add lwp without bothering about any other
628 threads. */
629 ptid = ptid_build (lwpid, lwpid, 0);
630 else
631 {
632 /* Note that extracting the pid from the current inferior is
633 safe, since we're always called in the context of the same
634 process as this new thread. */
635 int pid = pid_of (get_thread_lwp (current_inferior));
636 ptid = ptid_build (pid, lwpid, 0);
637 }
638
639 new_lwp = (struct lwp_info *) add_lwp (ptid);
640 add_thread (ptid, new_lwp);
641
642 /* We need to wait for SIGSTOP before being able to make the next
643 ptrace call on this LWP. */
644 new_lwp->must_set_ptrace_flags = 1;
645
646 if (linux_proc_pid_is_stopped (lwpid))
647 {
648 if (debug_threads)
649 fprintf (stderr,
650 "Attached to a stopped process\n");
651
652 /* The process is definitely stopped. It is in a job control
653 stop, unless the kernel predates the TASK_STOPPED /
654 TASK_TRACED distinction, in which case it might be in a
655 ptrace stop. Make sure it is in a ptrace stop; from there we
656 can kill it, signal it, et cetera.
657
658 First make sure there is a pending SIGSTOP. Since we are
659 already attached, the process can not transition from stopped
660 to running without a PTRACE_CONT; so we know this signal will
661 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
662 probably already in the queue (unless this kernel is old
663 enough to use TASK_STOPPED for ptrace stops); but since
664 SIGSTOP is not an RT signal, it can only be queued once. */
665 kill_lwp (lwpid, SIGSTOP);
666
667 /* Finally, resume the stopped process. This will deliver the
668 SIGSTOP (or a higher priority signal, just like normal
669 PTRACE_ATTACH), which we'll catch later on. */
670 ptrace (PTRACE_CONT, lwpid, 0, 0);
671 }
672
673 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
674 brings it to a halt.
675
676 There are several cases to consider here:
677
678 1) gdbserver has already attached to the process and is being notified
679 of a new thread that is being created.
680 In this case we should ignore that SIGSTOP and resume the
681 process. This is handled below by setting stop_expected = 1,
682 and the fact that add_thread sets last_resume_kind ==
683 resume_continue.
684
685 2) This is the first thread (the process thread), and we're attaching
686 to it via attach_inferior.
687 In this case we want the process thread to stop.
688 This is handled by having linux_attach set last_resume_kind ==
689 resume_stop after we return.
690
691 If the pid we are attaching to is also the tgid, we attach to and
692 stop all the existing threads. Otherwise, we attach to pid and
693 ignore any other threads in the same group as this pid.
694
695 3) GDB is connecting to gdbserver and is requesting an enumeration of all
696 existing threads.
697 In this case we want the thread to stop.
698 FIXME: This case is currently not properly handled.
699 We should wait for the SIGSTOP but don't. Things work apparently
700 because enough time passes between when we ptrace (ATTACH) and when
701 gdb makes the next ptrace call on the thread.
702
703 On the other hand, if we are currently trying to stop all threads, we
704 should treat the new thread as if we had sent it a SIGSTOP. This works
705 because we are guaranteed that the add_lwp call above added us to the
706 end of the list, and so the new thread has not yet reached
707 wait_for_sigstop (but will). */
708 new_lwp->stop_expected = 1;
709 }
710
711 void
712 linux_attach_lwp (unsigned long lwpid)
713 {
714 linux_attach_lwp_1 (lwpid, 0);
715 }
716
717 /* Attach to PID. If PID is the tgid, attach to it and all
718 of its threads. */
719
720 int
721 linux_attach (unsigned long pid)
722 {
723 /* Attach to PID. We will check for other threads
724 soon. */
725 linux_attach_lwp_1 (pid, 1);
726 linux_add_process (pid, 1);
727
728 if (!non_stop)
729 {
730 struct thread_info *thread;
731
732 /* Don't ignore the initial SIGSTOP if we just attached to this
733 process. It will be collected by wait shortly. */
734 thread = find_thread_ptid (ptid_build (pid, pid, 0));
735 thread->last_resume_kind = resume_stop;
736 }
737
738 if (linux_proc_get_tgid (pid) == pid)
739 {
740 DIR *dir;
741 char pathname[128];
742
743 sprintf (pathname, "/proc/%ld/task", pid);
744
745 dir = opendir (pathname);
746
747 if (!dir)
748 {
749 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
750 fflush (stderr);
751 }
752 else
753 {
754 /* At this point we attached to the tgid. Scan the task for
755 existing threads. */
756 unsigned long lwp;
757 int new_threads_found;
758 int iterations = 0;
759 struct dirent *dp;
760
761 while (iterations < 2)
762 {
763 new_threads_found = 0;
764 /* Add all the other threads. While we go through the
765 threads, new threads may be spawned. Cycle through
766 the list of threads until we have done two iterations without
767 finding new threads. */
768 while ((dp = readdir (dir)) != NULL)
769 {
770 /* Fetch one lwp. */
771 lwp = strtoul (dp->d_name, NULL, 10);
772
773 /* Is this a new thread? */
774 if (lwp
775 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
776 {
777 linux_attach_lwp_1 (lwp, 0);
778 new_threads_found++;
779
780 if (debug_threads)
781 fprintf (stderr, "\
782 Found and attached to new lwp %ld\n", lwp);
783 }
784 }
785
786 if (!new_threads_found)
787 iterations++;
788 else
789 iterations = 0;
790
791 rewinddir (dir);
792 }
793 closedir (dir);
794 }
795 }
796
797 return 0;
798 }
799
800 struct counter
801 {
802 int pid;
803 int count;
804 };
805
806 static int
807 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
808 {
809 struct counter *counter = args;
810
811 if (ptid_get_pid (entry->id) == counter->pid)
812 {
813 if (++counter->count > 1)
814 return 1;
815 }
816
817 return 0;
818 }
819
820 static int
821 last_thread_of_process_p (struct thread_info *thread)
822 {
823 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
824 int pid = ptid_get_pid (ptid);
825 struct counter counter = { pid , 0 };
826
827 return (find_inferior (&all_threads,
828 second_thread_of_pid_p, &counter) == NULL);
829 }
830
831 /* Kill LWP. */
832
833 static void
834 linux_kill_one_lwp (struct lwp_info *lwp)
835 {
836 int pid = lwpid_of (lwp);
837
838 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
839 there is no signal context, and ptrace(PTRACE_KILL) (or
840 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
841 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
842 alternative is to kill with SIGKILL. We only need one SIGKILL
843 per process, not one for each thread. But since we still support
844 linuxthreads, and we also support debugging programs using raw
845 clone without CLONE_THREAD, we send one for each thread. For
846 years, we used PTRACE_KILL only, so we're being a bit paranoid
847 about some old kernels where PTRACE_KILL might work better
848 (dubious if there are any such, but that's why it's paranoia), so
849 we try SIGKILL first, PTRACE_KILL second, and so we're fine
850 everywhere. */
851
852 errno = 0;
853 kill (pid, SIGKILL);
854 if (debug_threads)
855 fprintf (stderr,
856 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
857 target_pid_to_str (ptid_of (lwp)),
858 errno ? strerror (errno) : "OK");
859
860 errno = 0;
861 ptrace (PTRACE_KILL, pid, 0, 0);
862 if (debug_threads)
863 fprintf (stderr,
864 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
865 target_pid_to_str (ptid_of (lwp)),
866 errno ? strerror (errno) : "OK");
867 }
868
869 /* Callback for `find_inferior'. Kills an lwp of a given process,
870 except the leader. */
871
872 static int
873 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
874 {
875 struct thread_info *thread = (struct thread_info *) entry;
876 struct lwp_info *lwp = get_thread_lwp (thread);
877 int wstat;
878 int pid = * (int *) args;
879
880 if (ptid_get_pid (entry->id) != pid)
881 return 0;
882
883 /* We avoid killing the first thread here, because of a Linux kernel (at
884 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
885 the children get a chance to be reaped, it will remain a zombie
886 forever. */
887
888 if (lwpid_of (lwp) == pid)
889 {
890 if (debug_threads)
891 fprintf (stderr, "lkop: is last of process %s\n",
892 target_pid_to_str (entry->id));
893 return 0;
894 }
895
896 do
897 {
898 linux_kill_one_lwp (lwp);
899
900 /* Make sure it died. The loop is most likely unnecessary. */
901 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
902 } while (pid > 0 && WIFSTOPPED (wstat));
903
904 return 0;
905 }
906
907 static int
908 linux_kill (int pid)
909 {
910 struct process_info *process;
911 struct lwp_info *lwp;
912 int wstat;
913 int lwpid;
914
915 process = find_process_pid (pid);
916 if (process == NULL)
917 return -1;
918
919 /* If we're killing a running inferior, make sure it is stopped
920 first, as PTRACE_KILL will not work otherwise. */
921 stop_all_lwps (0, NULL);
922
923 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
924
925 /* See the comment in linux_kill_one_lwp. We did not kill the first
926 thread in the list, so do so now. */
927 lwp = find_lwp_pid (pid_to_ptid (pid));
928
929 if (lwp == NULL)
930 {
931 if (debug_threads)
932 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
933 lwpid_of (lwp), pid);
934 }
935 else
936 {
937 if (debug_threads)
938 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
939 lwpid_of (lwp), pid);
940
941 do
942 {
943 linux_kill_one_lwp (lwp);
944
945 /* Make sure it died. The loop is most likely unnecessary. */
946 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
947 } while (lwpid > 0 && WIFSTOPPED (wstat));
948 }
949
950 the_target->mourn (process);
951
952 /* Since we presently can only stop all lwps of all processes, we
953 need to unstop lwps of other processes. */
954 unstop_all_lwps (0, NULL);
955 return 0;
956 }
957
958 static int
959 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
960 {
961 struct thread_info *thread = (struct thread_info *) entry;
962 struct lwp_info *lwp = get_thread_lwp (thread);
963 int pid = * (int *) args;
964
965 if (ptid_get_pid (entry->id) != pid)
966 return 0;
967
968 /* If this process is stopped but is expecting a SIGSTOP, then make
969 sure we take care of that now. This isn't absolutely guaranteed
970 to collect the SIGSTOP, but is fairly likely to. */
971 if (lwp->stop_expected)
972 {
973 int wstat;
974 /* Clear stop_expected, so that the SIGSTOP will be reported. */
975 lwp->stop_expected = 0;
976 linux_resume_one_lwp (lwp, 0, 0, NULL);
977 linux_wait_for_event (lwp->head.id, &wstat, __WALL);
978 }
979
980 /* Flush any pending changes to the process's registers. */
981 regcache_invalidate_one ((struct inferior_list_entry *)
982 get_lwp_thread (lwp));
983
984 /* Finally, let it resume. */
985 if (the_low_target.prepare_to_resume != NULL)
986 the_low_target.prepare_to_resume (lwp);
987 ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
988
989 delete_lwp (lwp);
990 return 0;
991 }
992
993 static int
994 linux_detach (int pid)
995 {
996 struct process_info *process;
997
998 process = find_process_pid (pid);
999 if (process == NULL)
1000 return -1;
1001
1002 /* Stop all threads before detaching. First, ptrace requires that
1003 the thread is stopped to sucessfully detach. Second, thread_db
1004 may need to uninstall thread event breakpoints from memory, which
1005 only works with a stopped process anyway. */
1006 stop_all_lwps (0, NULL);
1007
1008 #ifdef USE_THREAD_DB
1009 thread_db_detach (process);
1010 #endif
1011
1012 /* Stabilize threads (move out of jump pads). */
1013 stabilize_threads ();
1014
1015 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1016
1017 the_target->mourn (process);
1018
1019 /* Since we presently can only stop all lwps of all processes, we
1020 need to unstop lwps of other processes. */
1021 unstop_all_lwps (0, NULL);
1022 return 0;
1023 }
1024
1025 /* Remove all LWPs that belong to process PROC from the lwp list. */
1026
1027 static int
1028 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1029 {
1030 struct lwp_info *lwp = (struct lwp_info *) entry;
1031 struct process_info *process = proc;
1032
1033 if (pid_of (lwp) == pid_of (process))
1034 delete_lwp (lwp);
1035
1036 return 0;
1037 }
1038
1039 static void
1040 linux_mourn (struct process_info *process)
1041 {
1042 struct process_info_private *priv;
1043
1044 #ifdef USE_THREAD_DB
1045 thread_db_mourn (process);
1046 #endif
1047
1048 find_inferior (&all_lwps, delete_lwp_callback, process);
1049
1050 /* Freeing all private data. */
1051 priv = process->private;
1052 free (priv->arch_private);
1053 free (priv);
1054 process->private = NULL;
1055
1056 remove_process (process);
1057 }
1058
1059 static void
1060 linux_join (int pid)
1061 {
1062 int status, ret;
1063
1064 do {
1065 ret = my_waitpid (pid, &status, 0);
1066 if (WIFEXITED (status) || WIFSIGNALED (status))
1067 break;
1068 } while (ret != -1 || errno != ECHILD);
1069 }
1070
1071 /* Return nonzero if the given thread is still alive. */
1072 static int
1073 linux_thread_alive (ptid_t ptid)
1074 {
1075 struct lwp_info *lwp = find_lwp_pid (ptid);
1076
1077 /* We assume we always know if a thread exits. If a whole process
1078 exited but we still haven't been able to report it to GDB, we'll
1079 hold on to the last lwp of the dead process. */
1080 if (lwp != NULL)
1081 return !lwp->dead;
1082 else
1083 return 0;
1084 }
1085
1086 /* Return 1 if this lwp has an interesting status pending. */
1087 static int
1088 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1089 {
1090 struct lwp_info *lwp = (struct lwp_info *) entry;
1091 ptid_t ptid = * (ptid_t *) arg;
1092 struct thread_info *thread;
1093
1094 /* Check if we're only interested in events from a specific process
1095 or its lwps. */
1096 if (!ptid_equal (minus_one_ptid, ptid)
1097 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1098 return 0;
1099
1100 thread = get_lwp_thread (lwp);
1101
1102 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1103 report any status pending the LWP may have. */
1104 if (thread->last_resume_kind == resume_stop
1105 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1106 return 0;
1107
1108 return lwp->status_pending_p;
1109 }
1110
1111 static int
1112 same_lwp (struct inferior_list_entry *entry, void *data)
1113 {
1114 ptid_t ptid = *(ptid_t *) data;
1115 int lwp;
1116
1117 if (ptid_get_lwp (ptid) != 0)
1118 lwp = ptid_get_lwp (ptid);
1119 else
1120 lwp = ptid_get_pid (ptid);
1121
1122 if (ptid_get_lwp (entry->id) == lwp)
1123 return 1;
1124
1125 return 0;
1126 }
1127
1128 struct lwp_info *
1129 find_lwp_pid (ptid_t ptid)
1130 {
1131 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1132 }
1133
1134 static struct lwp_info *
1135 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1136 {
1137 int ret;
1138 int to_wait_for = -1;
1139 struct lwp_info *child = NULL;
1140
1141 if (debug_threads)
1142 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1143
1144 if (ptid_equal (ptid, minus_one_ptid))
1145 to_wait_for = -1; /* any child */
1146 else
1147 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1148
1149 options |= __WALL;
1150
1151 retry:
1152
1153 ret = my_waitpid (to_wait_for, wstatp, options);
1154 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1155 return NULL;
1156 else if (ret == -1)
1157 perror_with_name ("waitpid");
1158
1159 if (debug_threads
1160 && (!WIFSTOPPED (*wstatp)
1161 || (WSTOPSIG (*wstatp) != 32
1162 && WSTOPSIG (*wstatp) != 33)))
1163 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1164
1165 child = find_lwp_pid (pid_to_ptid (ret));
1166
1167 /* If we didn't find a process, one of two things presumably happened:
1168 - A process we started and then detached from has exited. Ignore it.
1169 - A process we are controlling has forked and the new child's stop
1170 was reported to us by the kernel. Save its PID. */
1171 if (child == NULL && WIFSTOPPED (*wstatp))
1172 {
1173 add_pid_to_list (&stopped_pids, ret);
1174 goto retry;
1175 }
1176 else if (child == NULL)
1177 goto retry;
1178
1179 child->stopped = 1;
1180
1181 child->last_status = *wstatp;
1182
1183 /* Architecture-specific setup after inferior is running.
1184 This needs to happen after we have attached to the inferior
1185 and it is stopped for the first time, but before we access
1186 any inferior registers. */
1187 if (new_inferior)
1188 {
1189 the_low_target.arch_setup ();
1190 #ifdef HAVE_LINUX_REGSETS
1191 memset (disabled_regsets, 0, num_regsets);
1192 #endif
1193 new_inferior = 0;
1194 }
1195
1196 /* Fetch the possibly triggered data watchpoint info and store it in
1197 CHILD.
1198
1199 On some archs, like x86, that use debug registers to set
1200 watchpoints, it's possible that the way to know which watched
1201 address trapped, is to check the register that is used to select
1202 which address to watch. Problem is, between setting the
1203 watchpoint and reading back which data address trapped, the user
1204 may change the set of watchpoints, and, as a consequence, GDB
1205 changes the debug registers in the inferior. To avoid reading
1206 back a stale stopped-data-address when that happens, we cache in
1207 LP the fact that a watchpoint trapped, and the corresponding data
1208 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1209 changes the debug registers meanwhile, we have the cached data we
1210 can rely on. */
1211
1212 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1213 {
1214 if (the_low_target.stopped_by_watchpoint == NULL)
1215 {
1216 child->stopped_by_watchpoint = 0;
1217 }
1218 else
1219 {
1220 struct thread_info *saved_inferior;
1221
1222 saved_inferior = current_inferior;
1223 current_inferior = get_lwp_thread (child);
1224
1225 child->stopped_by_watchpoint
1226 = the_low_target.stopped_by_watchpoint ();
1227
1228 if (child->stopped_by_watchpoint)
1229 {
1230 if (the_low_target.stopped_data_address != NULL)
1231 child->stopped_data_address
1232 = the_low_target.stopped_data_address ();
1233 else
1234 child->stopped_data_address = 0;
1235 }
1236
1237 current_inferior = saved_inferior;
1238 }
1239 }
1240
1241 /* Store the STOP_PC, with adjustment applied. This depends on the
1242 architecture being defined already (so that CHILD has a valid
1243 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1244 not). */
1245 if (WIFSTOPPED (*wstatp))
1246 child->stop_pc = get_stop_pc (child);
1247
1248 if (debug_threads
1249 && WIFSTOPPED (*wstatp)
1250 && the_low_target.get_pc != NULL)
1251 {
1252 struct thread_info *saved_inferior = current_inferior;
1253 struct regcache *regcache;
1254 CORE_ADDR pc;
1255
1256 current_inferior = get_lwp_thread (child);
1257 regcache = get_thread_regcache (current_inferior, 1);
1258 pc = (*the_low_target.get_pc) (regcache);
1259 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1260 current_inferior = saved_inferior;
1261 }
1262
1263 return child;
1264 }
1265
1266 /* This function should only be called if the LWP got a SIGTRAP.
1267
1268 Handle any tracepoint steps or hits. Return true if a tracepoint
1269 event was handled, 0 otherwise. */
1270
1271 static int
1272 handle_tracepoints (struct lwp_info *lwp)
1273 {
1274 struct thread_info *tinfo = get_lwp_thread (lwp);
1275 int tpoint_related_event = 0;
1276
1277 /* If this tracepoint hit causes a tracing stop, we'll immediately
1278 uninsert tracepoints. To do this, we temporarily pause all
1279 threads, unpatch away, and then unpause threads. We need to make
1280 sure the unpausing doesn't resume LWP too. */
1281 lwp->suspended++;
1282
1283 /* And we need to be sure that any all-threads-stopping doesn't try
1284 to move threads out of the jump pads, as it could deadlock the
1285 inferior (LWP could be in the jump pad, maybe even holding the
1286 lock.) */
1287
1288 /* Do any necessary step collect actions. */
1289 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1290
1291 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1292
1293 /* See if we just hit a tracepoint and do its main collect
1294 actions. */
1295 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1296
1297 lwp->suspended--;
1298
1299 gdb_assert (lwp->suspended == 0);
1300 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1301
1302 if (tpoint_related_event)
1303 {
1304 if (debug_threads)
1305 fprintf (stderr, "got a tracepoint event\n");
1306 return 1;
1307 }
1308
1309 return 0;
1310 }
1311
1312 /* Convenience wrapper. Returns true if LWP is presently collecting a
1313 fast tracepoint. */
1314
1315 static int
1316 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1317 struct fast_tpoint_collect_status *status)
1318 {
1319 CORE_ADDR thread_area;
1320
1321 if (the_low_target.get_thread_area == NULL)
1322 return 0;
1323
1324 /* Get the thread area address. This is used to recognize which
1325 thread is which when tracing with the in-process agent library.
1326 We don't read anything from the address, and treat it as opaque;
1327 it's the address itself that we assume is unique per-thread. */
1328 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1329 return 0;
1330
1331 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1332 }
1333
1334 /* The reason we resume in the caller, is because we want to be able
1335 to pass lwp->status_pending as WSTAT, and we need to clear
1336 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1337 refuses to resume. */
1338
1339 static int
1340 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1341 {
1342 struct thread_info *saved_inferior;
1343
1344 saved_inferior = current_inferior;
1345 current_inferior = get_lwp_thread (lwp);
1346
1347 if ((wstat == NULL
1348 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1349 && supports_fast_tracepoints ()
1350 && in_process_agent_loaded ())
1351 {
1352 struct fast_tpoint_collect_status status;
1353 int r;
1354
1355 if (debug_threads)
1356 fprintf (stderr, "\
1357 Checking whether LWP %ld needs to move out of the jump pad.\n",
1358 lwpid_of (lwp));
1359
1360 r = linux_fast_tracepoint_collecting (lwp, &status);
1361
1362 if (wstat == NULL
1363 || (WSTOPSIG (*wstat) != SIGILL
1364 && WSTOPSIG (*wstat) != SIGFPE
1365 && WSTOPSIG (*wstat) != SIGSEGV
1366 && WSTOPSIG (*wstat) != SIGBUS))
1367 {
1368 lwp->collecting_fast_tracepoint = r;
1369
1370 if (r != 0)
1371 {
1372 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1373 {
1374 /* Haven't executed the original instruction yet.
1375 Set breakpoint there, and wait till it's hit,
1376 then single-step until exiting the jump pad. */
1377 lwp->exit_jump_pad_bkpt
1378 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1379 }
1380
1381 if (debug_threads)
1382 fprintf (stderr, "\
1383 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1384 lwpid_of (lwp));
1385 current_inferior = saved_inferior;
1386
1387 return 1;
1388 }
1389 }
1390 else
1391 {
1392 /* If we get a synchronous signal while collecting, *and*
1393 while executing the (relocated) original instruction,
1394 reset the PC to point at the tpoint address, before
1395 reporting to GDB. Otherwise, it's an IPA lib bug: just
1396 report the signal to GDB, and pray for the best. */
1397
1398 lwp->collecting_fast_tracepoint = 0;
1399
1400 if (r != 0
1401 && (status.adjusted_insn_addr <= lwp->stop_pc
1402 && lwp->stop_pc < status.adjusted_insn_addr_end))
1403 {
1404 siginfo_t info;
1405 struct regcache *regcache;
1406
1407 /* The si_addr on a few signals references the address
1408 of the faulting instruction. Adjust that as
1409 well. */
1410 if ((WSTOPSIG (*wstat) == SIGILL
1411 || WSTOPSIG (*wstat) == SIGFPE
1412 || WSTOPSIG (*wstat) == SIGBUS
1413 || WSTOPSIG (*wstat) == SIGSEGV)
1414 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1415 /* Final check just to make sure we don't clobber
1416 the siginfo of non-kernel-sent signals. */
1417 && (uintptr_t) info.si_addr == lwp->stop_pc)
1418 {
1419 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1420 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1421 }
1422
1423 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1424 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1425 lwp->stop_pc = status.tpoint_addr;
1426
1427 /* Cancel any fast tracepoint lock this thread was
1428 holding. */
1429 force_unlock_trace_buffer ();
1430 }
1431
1432 if (lwp->exit_jump_pad_bkpt != NULL)
1433 {
1434 if (debug_threads)
1435 fprintf (stderr,
1436 "Cancelling fast exit-jump-pad: removing bkpt. "
1437 "stopping all threads momentarily.\n");
1438
1439 stop_all_lwps (1, lwp);
1440 cancel_breakpoints ();
1441
1442 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1443 lwp->exit_jump_pad_bkpt = NULL;
1444
1445 unstop_all_lwps (1, lwp);
1446
1447 gdb_assert (lwp->suspended >= 0);
1448 }
1449 }
1450 }
1451
1452 if (debug_threads)
1453 fprintf (stderr, "\
1454 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1455 lwpid_of (lwp));
1456
1457 current_inferior = saved_inferior;
1458 return 0;
1459 }
1460
1461 /* Enqueue one signal in the "signals to report later when out of the
1462 jump pad" list. */
1463
1464 static void
1465 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1466 {
1467 struct pending_signals *p_sig;
1468
1469 if (debug_threads)
1470 fprintf (stderr, "\
1471 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1472
1473 if (debug_threads)
1474 {
1475 struct pending_signals *sig;
1476
1477 for (sig = lwp->pending_signals_to_report;
1478 sig != NULL;
1479 sig = sig->prev)
1480 fprintf (stderr,
1481 " Already queued %d\n",
1482 sig->signal);
1483
1484 fprintf (stderr, " (no more currently queued signals)\n");
1485 }
1486
1487 /* Don't enqueue non-RT signals if they are already in the deferred
1488 queue. (SIGSTOP being the easiest signal to see ending up here
1489 twice) */
1490 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1491 {
1492 struct pending_signals *sig;
1493
1494 for (sig = lwp->pending_signals_to_report;
1495 sig != NULL;
1496 sig = sig->prev)
1497 {
1498 if (sig->signal == WSTOPSIG (*wstat))
1499 {
1500 if (debug_threads)
1501 fprintf (stderr,
1502 "Not requeuing already queued non-RT signal %d"
1503 " for LWP %ld\n",
1504 sig->signal,
1505 lwpid_of (lwp));
1506 return;
1507 }
1508 }
1509 }
1510
1511 p_sig = xmalloc (sizeof (*p_sig));
1512 p_sig->prev = lwp->pending_signals_to_report;
1513 p_sig->signal = WSTOPSIG (*wstat);
1514 memset (&p_sig->info, 0, sizeof (siginfo_t));
1515 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1516
1517 lwp->pending_signals_to_report = p_sig;
1518 }
1519
1520 /* Dequeue one signal from the "signals to report later when out of
1521 the jump pad" list. */
1522
1523 static int
1524 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1525 {
1526 if (lwp->pending_signals_to_report != NULL)
1527 {
1528 struct pending_signals **p_sig;
1529
1530 p_sig = &lwp->pending_signals_to_report;
1531 while ((*p_sig)->prev != NULL)
1532 p_sig = &(*p_sig)->prev;
1533
1534 *wstat = W_STOPCODE ((*p_sig)->signal);
1535 if ((*p_sig)->info.si_signo != 0)
1536 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1537 free (*p_sig);
1538 *p_sig = NULL;
1539
1540 if (debug_threads)
1541 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1542 WSTOPSIG (*wstat), lwpid_of (lwp));
1543
1544 if (debug_threads)
1545 {
1546 struct pending_signals *sig;
1547
1548 for (sig = lwp->pending_signals_to_report;
1549 sig != NULL;
1550 sig = sig->prev)
1551 fprintf (stderr,
1552 " Still queued %d\n",
1553 sig->signal);
1554
1555 fprintf (stderr, " (no more queued signals)\n");
1556 }
1557
1558 return 1;
1559 }
1560
1561 return 0;
1562 }
1563
1564 /* Arrange for a breakpoint to be hit again later. We don't keep the
1565 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1566 will handle the current event, eventually we will resume this LWP,
1567 and this breakpoint will trap again. */
1568
1569 static int
1570 cancel_breakpoint (struct lwp_info *lwp)
1571 {
1572 struct thread_info *saved_inferior;
1573
1574 /* There's nothing to do if we don't support breakpoints. */
1575 if (!supports_breakpoints ())
1576 return 0;
1577
1578 /* breakpoint_at reads from current inferior. */
1579 saved_inferior = current_inferior;
1580 current_inferior = get_lwp_thread (lwp);
1581
1582 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1583 {
1584 if (debug_threads)
1585 fprintf (stderr,
1586 "CB: Push back breakpoint for %s\n",
1587 target_pid_to_str (ptid_of (lwp)));
1588
1589 /* Back up the PC if necessary. */
1590 if (the_low_target.decr_pc_after_break)
1591 {
1592 struct regcache *regcache
1593 = get_thread_regcache (current_inferior, 1);
1594 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1595 }
1596
1597 current_inferior = saved_inferior;
1598 return 1;
1599 }
1600 else
1601 {
1602 if (debug_threads)
1603 fprintf (stderr,
1604 "CB: No breakpoint found at %s for [%s]\n",
1605 paddress (lwp->stop_pc),
1606 target_pid_to_str (ptid_of (lwp)));
1607 }
1608
1609 current_inferior = saved_inferior;
1610 return 0;
1611 }
1612
1613 /* When the event-loop is doing a step-over, this points at the thread
1614 being stepped. */
1615 ptid_t step_over_bkpt;
1616
1617 /* Wait for an event from child PID. If PID is -1, wait for any
1618 child. Store the stop status through the status pointer WSTAT.
1619 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1620 event was found and OPTIONS contains WNOHANG. Return the PID of
1621 the stopped child otherwise. */
1622
1623 static int
1624 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1625 {
1626 struct lwp_info *event_child, *requested_child;
1627 ptid_t wait_ptid;
1628
1629 event_child = NULL;
1630 requested_child = NULL;
1631
1632 /* Check for a lwp with a pending status. */
1633
1634 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1635 {
1636 event_child = (struct lwp_info *)
1637 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1638 if (debug_threads && event_child)
1639 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1640 }
1641 else
1642 {
1643 requested_child = find_lwp_pid (ptid);
1644
1645 if (!stopping_threads
1646 && requested_child->status_pending_p
1647 && requested_child->collecting_fast_tracepoint)
1648 {
1649 enqueue_one_deferred_signal (requested_child,
1650 &requested_child->status_pending);
1651 requested_child->status_pending_p = 0;
1652 requested_child->status_pending = 0;
1653 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1654 }
1655
1656 if (requested_child->suspended
1657 && requested_child->status_pending_p)
1658 fatal ("requesting an event out of a suspended child?");
1659
1660 if (requested_child->status_pending_p)
1661 event_child = requested_child;
1662 }
1663
1664 if (event_child != NULL)
1665 {
1666 if (debug_threads)
1667 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1668 lwpid_of (event_child), event_child->status_pending);
1669 *wstat = event_child->status_pending;
1670 event_child->status_pending_p = 0;
1671 event_child->status_pending = 0;
1672 current_inferior = get_lwp_thread (event_child);
1673 return lwpid_of (event_child);
1674 }
1675
1676 if (ptid_is_pid (ptid))
1677 {
1678 /* A request to wait for a specific tgid. This is not possible
1679 with waitpid, so instead, we wait for any child, and leave
1680 children we're not interested in right now with a pending
1681 status to report later. */
1682 wait_ptid = minus_one_ptid;
1683 }
1684 else
1685 wait_ptid = ptid;
1686
1687 /* We only enter this loop if no process has a pending wait status. Thus
1688 any action taken in response to a wait status inside this loop is
1689 responding as soon as we detect the status, not after any pending
1690 events. */
1691 while (1)
1692 {
1693 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
1694
1695 if ((options & WNOHANG) && event_child == NULL)
1696 {
1697 if (debug_threads)
1698 fprintf (stderr, "WNOHANG set, no event found\n");
1699 return 0;
1700 }
1701
1702 if (event_child == NULL)
1703 error ("event from unknown child");
1704
1705 if (ptid_is_pid (ptid)
1706 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1707 {
1708 if (! WIFSTOPPED (*wstat))
1709 mark_lwp_dead (event_child, *wstat);
1710 else
1711 {
1712 event_child->status_pending_p = 1;
1713 event_child->status_pending = *wstat;
1714 }
1715 continue;
1716 }
1717
1718 current_inferior = get_lwp_thread (event_child);
1719
1720 /* Check for thread exit. */
1721 if (! WIFSTOPPED (*wstat))
1722 {
1723 if (debug_threads)
1724 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1725
1726 /* If the last thread is exiting, just return. */
1727 if (last_thread_of_process_p (current_inferior))
1728 {
1729 if (debug_threads)
1730 fprintf (stderr, "LWP %ld is last lwp of process\n",
1731 lwpid_of (event_child));
1732 return lwpid_of (event_child);
1733 }
1734
1735 if (!non_stop)
1736 {
1737 current_inferior = (struct thread_info *) all_threads.head;
1738 if (debug_threads)
1739 fprintf (stderr, "Current inferior is now %ld\n",
1740 lwpid_of (get_thread_lwp (current_inferior)));
1741 }
1742 else
1743 {
1744 current_inferior = NULL;
1745 if (debug_threads)
1746 fprintf (stderr, "Current inferior is now <NULL>\n");
1747 }
1748
1749 /* If we were waiting for this particular child to do something...
1750 well, it did something. */
1751 if (requested_child != NULL)
1752 {
1753 int lwpid = lwpid_of (event_child);
1754
1755 /* Cancel the step-over operation --- the thread that
1756 started it is gone. */
1757 if (finish_step_over (event_child))
1758 unstop_all_lwps (1, event_child);
1759 delete_lwp (event_child);
1760 return lwpid;
1761 }
1762
1763 delete_lwp (event_child);
1764
1765 /* Wait for a more interesting event. */
1766 continue;
1767 }
1768
1769 if (event_child->must_set_ptrace_flags)
1770 {
1771 linux_enable_event_reporting (lwpid_of (event_child));
1772 event_child->must_set_ptrace_flags = 0;
1773 }
1774
1775 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1776 && *wstat >> 16 != 0)
1777 {
1778 handle_extended_wait (event_child, *wstat);
1779 continue;
1780 }
1781
1782 if (WIFSTOPPED (*wstat)
1783 && WSTOPSIG (*wstat) == SIGSTOP
1784 && event_child->stop_expected)
1785 {
1786 int should_stop;
1787
1788 if (debug_threads)
1789 fprintf (stderr, "Expected stop.\n");
1790 event_child->stop_expected = 0;
1791
1792 should_stop = (current_inferior->last_resume_kind == resume_stop
1793 || stopping_threads);
1794
1795 if (!should_stop)
1796 {
1797 linux_resume_one_lwp (event_child,
1798 event_child->stepping, 0, NULL);
1799 continue;
1800 }
1801 }
1802
1803 return lwpid_of (event_child);
1804 }
1805
1806 /* NOTREACHED */
1807 return 0;
1808 }
1809
1810 /* Count the LWP's that have had events. */
1811
1812 static int
1813 count_events_callback (struct inferior_list_entry *entry, void *data)
1814 {
1815 struct lwp_info *lp = (struct lwp_info *) entry;
1816 struct thread_info *thread = get_lwp_thread (lp);
1817 int *count = data;
1818
1819 gdb_assert (count != NULL);
1820
1821 /* Count only resumed LWPs that have a SIGTRAP event pending that
1822 should be reported to GDB. */
1823 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1824 && thread->last_resume_kind != resume_stop
1825 && lp->status_pending_p
1826 && WIFSTOPPED (lp->status_pending)
1827 && WSTOPSIG (lp->status_pending) == SIGTRAP
1828 && !breakpoint_inserted_here (lp->stop_pc))
1829 (*count)++;
1830
1831 return 0;
1832 }
1833
1834 /* Select the LWP (if any) that is currently being single-stepped. */
1835
1836 static int
1837 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1838 {
1839 struct lwp_info *lp = (struct lwp_info *) entry;
1840 struct thread_info *thread = get_lwp_thread (lp);
1841
1842 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1843 && thread->last_resume_kind == resume_step
1844 && lp->status_pending_p)
1845 return 1;
1846 else
1847 return 0;
1848 }
1849
1850 /* Select the Nth LWP that has had a SIGTRAP event that should be
1851 reported to GDB. */
1852
1853 static int
1854 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1855 {
1856 struct lwp_info *lp = (struct lwp_info *) entry;
1857 struct thread_info *thread = get_lwp_thread (lp);
1858 int *selector = data;
1859
1860 gdb_assert (selector != NULL);
1861
1862 /* Select only resumed LWPs that have a SIGTRAP event pending. */
1863 if (thread->last_resume_kind != resume_stop
1864 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1865 && lp->status_pending_p
1866 && WIFSTOPPED (lp->status_pending)
1867 && WSTOPSIG (lp->status_pending) == SIGTRAP
1868 && !breakpoint_inserted_here (lp->stop_pc))
1869 if ((*selector)-- == 0)
1870 return 1;
1871
1872 return 0;
1873 }
1874
1875 static int
1876 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1877 {
1878 struct lwp_info *lp = (struct lwp_info *) entry;
1879 struct thread_info *thread = get_lwp_thread (lp);
1880 struct lwp_info *event_lp = data;
1881
1882 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1883 if (lp == event_lp)
1884 return 0;
1885
1886 /* If a LWP other than the LWP that we're reporting an event for has
1887 hit a GDB breakpoint (as opposed to some random trap signal),
1888 then just arrange for it to hit it again later. We don't keep
1889 the SIGTRAP status and don't forward the SIGTRAP signal to the
1890 LWP. We will handle the current event, eventually we will resume
1891 all LWPs, and this one will get its breakpoint trap again.
1892
1893 If we do not do this, then we run the risk that the user will
1894 delete or disable the breakpoint, but the LWP will have already
1895 tripped on it. */
1896
1897 if (thread->last_resume_kind != resume_stop
1898 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1899 && lp->status_pending_p
1900 && WIFSTOPPED (lp->status_pending)
1901 && WSTOPSIG (lp->status_pending) == SIGTRAP
1902 && !lp->stepping
1903 && !lp->stopped_by_watchpoint
1904 && cancel_breakpoint (lp))
1905 /* Throw away the SIGTRAP. */
1906 lp->status_pending_p = 0;
1907
1908 return 0;
1909 }
1910
1911 static void
1912 linux_cancel_breakpoints (void)
1913 {
1914 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
1915 }
1916
1917 /* Select one LWP out of those that have events pending. */
1918
1919 static void
1920 select_event_lwp (struct lwp_info **orig_lp)
1921 {
1922 int num_events = 0;
1923 int random_selector;
1924 struct lwp_info *event_lp;
1925
1926 /* Give preference to any LWP that is being single-stepped. */
1927 event_lp
1928 = (struct lwp_info *) find_inferior (&all_lwps,
1929 select_singlestep_lwp_callback, NULL);
1930 if (event_lp != NULL)
1931 {
1932 if (debug_threads)
1933 fprintf (stderr,
1934 "SEL: Select single-step %s\n",
1935 target_pid_to_str (ptid_of (event_lp)));
1936 }
1937 else
1938 {
1939 /* No single-stepping LWP. Select one at random, out of those
1940 which have had SIGTRAP events. */
1941
1942 /* First see how many SIGTRAP events we have. */
1943 find_inferior (&all_lwps, count_events_callback, &num_events);
1944
1945 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1946 random_selector = (int)
1947 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1948
1949 if (debug_threads && num_events > 1)
1950 fprintf (stderr,
1951 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1952 num_events, random_selector);
1953
1954 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1955 select_event_lwp_callback,
1956 &random_selector);
1957 }
1958
1959 if (event_lp != NULL)
1960 {
1961 /* Switch the event LWP. */
1962 *orig_lp = event_lp;
1963 }
1964 }
1965
1966 /* Decrement the suspend count of an LWP. */
1967
1968 static int
1969 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
1970 {
1971 struct lwp_info *lwp = (struct lwp_info *) entry;
1972
1973 /* Ignore EXCEPT. */
1974 if (lwp == except)
1975 return 0;
1976
1977 lwp->suspended--;
1978
1979 gdb_assert (lwp->suspended >= 0);
1980 return 0;
1981 }
1982
1983 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
1984 NULL. */
1985
1986 static void
1987 unsuspend_all_lwps (struct lwp_info *except)
1988 {
1989 find_inferior (&all_lwps, unsuspend_one_lwp, except);
1990 }
1991
1992 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
1993 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
1994 void *data);
1995 static int lwp_running (struct inferior_list_entry *entry, void *data);
1996 static ptid_t linux_wait_1 (ptid_t ptid,
1997 struct target_waitstatus *ourstatus,
1998 int target_options);
1999
2000 /* Stabilize threads (move out of jump pads).
2001
2002 If a thread is midway collecting a fast tracepoint, we need to
2003 finish the collection and move it out of the jump pad before
2004 reporting the signal.
2005
2006 This avoids recursion while collecting (when a signal arrives
2007 midway, and the signal handler itself collects), which would trash
2008 the trace buffer. In case the user set a breakpoint in a signal
2009 handler, this avoids the backtrace showing the jump pad, etc..
2010 Most importantly, there are certain things we can't do safely if
2011 threads are stopped in a jump pad (or in its callee's). For
2012 example:
2013
2014 - starting a new trace run. A thread still collecting the
2015 previous run, could trash the trace buffer when resumed. The trace
2016 buffer control structures would have been reset but the thread had
2017 no way to tell. The thread could even midway memcpy'ing to the
2018 buffer, which would mean that when resumed, it would clobber the
2019 trace buffer that had been set for a new run.
2020
2021 - we can't rewrite/reuse the jump pads for new tracepoints
2022 safely. Say you do tstart while a thread is stopped midway while
2023 collecting. When the thread is later resumed, it finishes the
2024 collection, and returns to the jump pad, to execute the original
2025 instruction that was under the tracepoint jump at the time the
2026 older run had been started. If the jump pad had been rewritten
2027 since for something else in the new run, the thread would now
2028 execute the wrong / random instructions. */
2029
2030 static void
2031 linux_stabilize_threads (void)
2032 {
2033 struct thread_info *save_inferior;
2034 struct lwp_info *lwp_stuck;
2035
2036 lwp_stuck
2037 = (struct lwp_info *) find_inferior (&all_lwps,
2038 stuck_in_jump_pad_callback, NULL);
2039 if (lwp_stuck != NULL)
2040 {
2041 if (debug_threads)
2042 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2043 lwpid_of (lwp_stuck));
2044 return;
2045 }
2046
2047 save_inferior = current_inferior;
2048
2049 stabilizing_threads = 1;
2050
2051 /* Kick 'em all. */
2052 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2053
2054 /* Loop until all are stopped out of the jump pads. */
2055 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2056 {
2057 struct target_waitstatus ourstatus;
2058 struct lwp_info *lwp;
2059 int wstat;
2060
2061 /* Note that we go through the full wait even loop. While
2062 moving threads out of jump pad, we need to be able to step
2063 over internal breakpoints and such. */
2064 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2065
2066 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2067 {
2068 lwp = get_thread_lwp (current_inferior);
2069
2070 /* Lock it. */
2071 lwp->suspended++;
2072
2073 if (ourstatus.value.sig != TARGET_SIGNAL_0
2074 || current_inferior->last_resume_kind == resume_stop)
2075 {
2076 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
2077 enqueue_one_deferred_signal (lwp, &wstat);
2078 }
2079 }
2080 }
2081
2082 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2083
2084 stabilizing_threads = 0;
2085
2086 current_inferior = save_inferior;
2087
2088 if (debug_threads)
2089 {
2090 lwp_stuck
2091 = (struct lwp_info *) find_inferior (&all_lwps,
2092 stuck_in_jump_pad_callback, NULL);
2093 if (lwp_stuck != NULL)
2094 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2095 lwpid_of (lwp_stuck));
2096 }
2097 }
2098
2099 /* Wait for process, returns status. */
2100
2101 static ptid_t
2102 linux_wait_1 (ptid_t ptid,
2103 struct target_waitstatus *ourstatus, int target_options)
2104 {
2105 int w;
2106 struct lwp_info *event_child;
2107 int options;
2108 int pid;
2109 int step_over_finished;
2110 int bp_explains_trap;
2111 int maybe_internal_trap;
2112 int report_to_gdb;
2113 int trace_event;
2114
2115 /* Translate generic target options into linux options. */
2116 options = __WALL;
2117 if (target_options & TARGET_WNOHANG)
2118 options |= WNOHANG;
2119
2120 retry:
2121 bp_explains_trap = 0;
2122 trace_event = 0;
2123 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2124
2125 /* If we were only supposed to resume one thread, only wait for
2126 that thread - if it's still alive. If it died, however - which
2127 can happen if we're coming from the thread death case below -
2128 then we need to make sure we restart the other threads. We could
2129 pick a thread at random or restart all; restarting all is less
2130 arbitrary. */
2131 if (!non_stop
2132 && !ptid_equal (cont_thread, null_ptid)
2133 && !ptid_equal (cont_thread, minus_one_ptid))
2134 {
2135 struct thread_info *thread;
2136
2137 thread = (struct thread_info *) find_inferior_id (&all_threads,
2138 cont_thread);
2139
2140 /* No stepping, no signal - unless one is pending already, of course. */
2141 if (thread == NULL)
2142 {
2143 struct thread_resume resume_info;
2144 resume_info.thread = minus_one_ptid;
2145 resume_info.kind = resume_continue;
2146 resume_info.sig = 0;
2147 linux_resume (&resume_info, 1);
2148 }
2149 else
2150 ptid = cont_thread;
2151 }
2152
2153 if (ptid_equal (step_over_bkpt, null_ptid))
2154 pid = linux_wait_for_event (ptid, &w, options);
2155 else
2156 {
2157 if (debug_threads)
2158 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2159 target_pid_to_str (step_over_bkpt));
2160 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2161 }
2162
2163 if (pid == 0) /* only if TARGET_WNOHANG */
2164 return null_ptid;
2165
2166 event_child = get_thread_lwp (current_inferior);
2167
2168 /* If we are waiting for a particular child, and it exited,
2169 linux_wait_for_event will return its exit status. Similarly if
2170 the last child exited. If this is not the last child, however,
2171 do not report it as exited until there is a 'thread exited' response
2172 available in the remote protocol. Instead, just wait for another event.
2173 This should be safe, because if the thread crashed we will already
2174 have reported the termination signal to GDB; that should stop any
2175 in-progress stepping operations, etc.
2176
2177 Report the exit status of the last thread to exit. This matches
2178 LinuxThreads' behavior. */
2179
2180 if (last_thread_of_process_p (current_inferior))
2181 {
2182 if (WIFEXITED (w) || WIFSIGNALED (w))
2183 {
2184 if (WIFEXITED (w))
2185 {
2186 ourstatus->kind = TARGET_WAITKIND_EXITED;
2187 ourstatus->value.integer = WEXITSTATUS (w);
2188
2189 if (debug_threads)
2190 fprintf (stderr,
2191 "\nChild exited with retcode = %x \n",
2192 WEXITSTATUS (w));
2193 }
2194 else
2195 {
2196 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2197 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2198
2199 if (debug_threads)
2200 fprintf (stderr,
2201 "\nChild terminated with signal = %x \n",
2202 WTERMSIG (w));
2203
2204 }
2205
2206 return ptid_of (event_child);
2207 }
2208 }
2209 else
2210 {
2211 if (!WIFSTOPPED (w))
2212 goto retry;
2213 }
2214
2215 /* If this event was not handled before, and is not a SIGTRAP, we
2216 report it. SIGILL and SIGSEGV are also treated as traps in case
2217 a breakpoint is inserted at the current PC. If this target does
2218 not support internal breakpoints at all, we also report the
2219 SIGTRAP without further processing; it's of no concern to us. */
2220 maybe_internal_trap
2221 = (supports_breakpoints ()
2222 && (WSTOPSIG (w) == SIGTRAP
2223 || ((WSTOPSIG (w) == SIGILL
2224 || WSTOPSIG (w) == SIGSEGV)
2225 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2226
2227 if (maybe_internal_trap)
2228 {
2229 /* Handle anything that requires bookkeeping before deciding to
2230 report the event or continue waiting. */
2231
2232 /* First check if we can explain the SIGTRAP with an internal
2233 breakpoint, or if we should possibly report the event to GDB.
2234 Do this before anything that may remove or insert a
2235 breakpoint. */
2236 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2237
2238 /* We have a SIGTRAP, possibly a step-over dance has just
2239 finished. If so, tweak the state machine accordingly,
2240 reinsert breakpoints and delete any reinsert (software
2241 single-step) breakpoints. */
2242 step_over_finished = finish_step_over (event_child);
2243
2244 /* Now invoke the callbacks of any internal breakpoints there. */
2245 check_breakpoints (event_child->stop_pc);
2246
2247 /* Handle tracepoint data collecting. This may overflow the
2248 trace buffer, and cause a tracing stop, removing
2249 breakpoints. */
2250 trace_event = handle_tracepoints (event_child);
2251
2252 if (bp_explains_trap)
2253 {
2254 /* If we stepped or ran into an internal breakpoint, we've
2255 already handled it. So next time we resume (from this
2256 PC), we should step over it. */
2257 if (debug_threads)
2258 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2259
2260 if (breakpoint_here (event_child->stop_pc))
2261 event_child->need_step_over = 1;
2262 }
2263 }
2264 else
2265 {
2266 /* We have some other signal, possibly a step-over dance was in
2267 progress, and it should be cancelled too. */
2268 step_over_finished = finish_step_over (event_child);
2269 }
2270
2271 /* We have all the data we need. Either report the event to GDB, or
2272 resume threads and keep waiting for more. */
2273
2274 /* If we're collecting a fast tracepoint, finish the collection and
2275 move out of the jump pad before delivering a signal. See
2276 linux_stabilize_threads. */
2277
2278 if (WIFSTOPPED (w)
2279 && WSTOPSIG (w) != SIGTRAP
2280 && supports_fast_tracepoints ()
2281 && in_process_agent_loaded ())
2282 {
2283 if (debug_threads)
2284 fprintf (stderr,
2285 "Got signal %d for LWP %ld. Check if we need "
2286 "to defer or adjust it.\n",
2287 WSTOPSIG (w), lwpid_of (event_child));
2288
2289 /* Allow debugging the jump pad itself. */
2290 if (current_inferior->last_resume_kind != resume_step
2291 && maybe_move_out_of_jump_pad (event_child, &w))
2292 {
2293 enqueue_one_deferred_signal (event_child, &w);
2294
2295 if (debug_threads)
2296 fprintf (stderr,
2297 "Signal %d for LWP %ld deferred (in jump pad)\n",
2298 WSTOPSIG (w), lwpid_of (event_child));
2299
2300 linux_resume_one_lwp (event_child, 0, 0, NULL);
2301 goto retry;
2302 }
2303 }
2304
2305 if (event_child->collecting_fast_tracepoint)
2306 {
2307 if (debug_threads)
2308 fprintf (stderr, "\
2309 LWP %ld was trying to move out of the jump pad (%d). \
2310 Check if we're already there.\n",
2311 lwpid_of (event_child),
2312 event_child->collecting_fast_tracepoint);
2313
2314 trace_event = 1;
2315
2316 event_child->collecting_fast_tracepoint
2317 = linux_fast_tracepoint_collecting (event_child, NULL);
2318
2319 if (event_child->collecting_fast_tracepoint != 1)
2320 {
2321 /* No longer need this breakpoint. */
2322 if (event_child->exit_jump_pad_bkpt != NULL)
2323 {
2324 if (debug_threads)
2325 fprintf (stderr,
2326 "No longer need exit-jump-pad bkpt; removing it."
2327 "stopping all threads momentarily.\n");
2328
2329 /* Other running threads could hit this breakpoint.
2330 We don't handle moribund locations like GDB does,
2331 instead we always pause all threads when removing
2332 breakpoints, so that any step-over or
2333 decr_pc_after_break adjustment is always taken
2334 care of while the breakpoint is still
2335 inserted. */
2336 stop_all_lwps (1, event_child);
2337 cancel_breakpoints ();
2338
2339 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2340 event_child->exit_jump_pad_bkpt = NULL;
2341
2342 unstop_all_lwps (1, event_child);
2343
2344 gdb_assert (event_child->suspended >= 0);
2345 }
2346 }
2347
2348 if (event_child->collecting_fast_tracepoint == 0)
2349 {
2350 if (debug_threads)
2351 fprintf (stderr,
2352 "fast tracepoint finished "
2353 "collecting successfully.\n");
2354
2355 /* We may have a deferred signal to report. */
2356 if (dequeue_one_deferred_signal (event_child, &w))
2357 {
2358 if (debug_threads)
2359 fprintf (stderr, "dequeued one signal.\n");
2360 }
2361 else
2362 {
2363 if (debug_threads)
2364 fprintf (stderr, "no deferred signals.\n");
2365
2366 if (stabilizing_threads)
2367 {
2368 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2369 ourstatus->value.sig = TARGET_SIGNAL_0;
2370 return ptid_of (event_child);
2371 }
2372 }
2373 }
2374 }
2375
2376 /* Check whether GDB would be interested in this event. */
2377
2378 /* If GDB is not interested in this signal, don't stop other
2379 threads, and don't report it to GDB. Just resume the inferior
2380 right away. We do this for threading-related signals as well as
2381 any that GDB specifically requested we ignore. But never ignore
2382 SIGSTOP if we sent it ourselves, and do not ignore signals when
2383 stepping - they may require special handling to skip the signal
2384 handler. */
2385 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2386 thread library? */
2387 if (WIFSTOPPED (w)
2388 && current_inferior->last_resume_kind != resume_step
2389 && (
2390 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2391 (current_process ()->private->thread_db != NULL
2392 && (WSTOPSIG (w) == __SIGRTMIN
2393 || WSTOPSIG (w) == __SIGRTMIN + 1))
2394 ||
2395 #endif
2396 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2397 && !(WSTOPSIG (w) == SIGSTOP
2398 && current_inferior->last_resume_kind == resume_stop))))
2399 {
2400 siginfo_t info, *info_p;
2401
2402 if (debug_threads)
2403 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2404 WSTOPSIG (w), lwpid_of (event_child));
2405
2406 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2407 info_p = &info;
2408 else
2409 info_p = NULL;
2410 linux_resume_one_lwp (event_child, event_child->stepping,
2411 WSTOPSIG (w), info_p);
2412 goto retry;
2413 }
2414
2415 /* If GDB wanted this thread to single step, we always want to
2416 report the SIGTRAP, and let GDB handle it. Watchpoints should
2417 always be reported. So should signals we can't explain. A
2418 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2419 not support Z0 breakpoints. If we do, we're be able to handle
2420 GDB breakpoints on top of internal breakpoints, by handling the
2421 internal breakpoint and still reporting the event to GDB. If we
2422 don't, we're out of luck, GDB won't see the breakpoint hit. */
2423 report_to_gdb = (!maybe_internal_trap
2424 || current_inferior->last_resume_kind == resume_step
2425 || event_child->stopped_by_watchpoint
2426 || (!step_over_finished
2427 && !bp_explains_trap && !trace_event)
2428 || (gdb_breakpoint_here (event_child->stop_pc)
2429 && gdb_condition_true_at_breakpoint (event_child->stop_pc)));
2430
2431 /* We found no reason GDB would want us to stop. We either hit one
2432 of our own breakpoints, or finished an internal step GDB
2433 shouldn't know about. */
2434 if (!report_to_gdb)
2435 {
2436 if (debug_threads)
2437 {
2438 if (bp_explains_trap)
2439 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2440 if (step_over_finished)
2441 fprintf (stderr, "Step-over finished.\n");
2442 if (trace_event)
2443 fprintf (stderr, "Tracepoint event.\n");
2444 }
2445
2446 /* We're not reporting this breakpoint to GDB, so apply the
2447 decr_pc_after_break adjustment to the inferior's regcache
2448 ourselves. */
2449
2450 if (the_low_target.set_pc != NULL)
2451 {
2452 struct regcache *regcache
2453 = get_thread_regcache (get_lwp_thread (event_child), 1);
2454 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2455 }
2456
2457 /* We may have finished stepping over a breakpoint. If so,
2458 we've stopped and suspended all LWPs momentarily except the
2459 stepping one. This is where we resume them all again. We're
2460 going to keep waiting, so use proceed, which handles stepping
2461 over the next breakpoint. */
2462 if (debug_threads)
2463 fprintf (stderr, "proceeding all threads.\n");
2464
2465 if (step_over_finished)
2466 unsuspend_all_lwps (event_child);
2467
2468 proceed_all_lwps ();
2469 goto retry;
2470 }
2471
2472 if (debug_threads)
2473 {
2474 if (current_inferior->last_resume_kind == resume_step)
2475 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2476 if (event_child->stopped_by_watchpoint)
2477 fprintf (stderr, "Stopped by watchpoint.\n");
2478 if (gdb_breakpoint_here (event_child->stop_pc))
2479 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2480 if (debug_threads)
2481 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2482 }
2483
2484 /* Alright, we're going to report a stop. */
2485
2486 if (!non_stop && !stabilizing_threads)
2487 {
2488 /* In all-stop, stop all threads. */
2489 stop_all_lwps (0, NULL);
2490
2491 /* If we're not waiting for a specific LWP, choose an event LWP
2492 from among those that have had events. Giving equal priority
2493 to all LWPs that have had events helps prevent
2494 starvation. */
2495 if (ptid_equal (ptid, minus_one_ptid))
2496 {
2497 event_child->status_pending_p = 1;
2498 event_child->status_pending = w;
2499
2500 select_event_lwp (&event_child);
2501
2502 event_child->status_pending_p = 0;
2503 w = event_child->status_pending;
2504 }
2505
2506 /* Now that we've selected our final event LWP, cancel any
2507 breakpoints in other LWPs that have hit a GDB breakpoint.
2508 See the comment in cancel_breakpoints_callback to find out
2509 why. */
2510 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2511
2512 /* If we were going a step-over, all other threads but the stepping one
2513 had been paused in start_step_over, with their suspend counts
2514 incremented. We don't want to do a full unstop/unpause, because we're
2515 in all-stop mode (so we want threads stopped), but we still need to
2516 unsuspend the other threads, to decrement their `suspended' count
2517 back. */
2518 if (step_over_finished)
2519 unsuspend_all_lwps (event_child);
2520
2521 /* Stabilize threads (move out of jump pads). */
2522 stabilize_threads ();
2523 }
2524 else
2525 {
2526 /* If we just finished a step-over, then all threads had been
2527 momentarily paused. In all-stop, that's fine, we want
2528 threads stopped by now anyway. In non-stop, we need to
2529 re-resume threads that GDB wanted to be running. */
2530 if (step_over_finished)
2531 unstop_all_lwps (1, event_child);
2532 }
2533
2534 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2535
2536 if (current_inferior->last_resume_kind == resume_stop
2537 && WSTOPSIG (w) == SIGSTOP)
2538 {
2539 /* A thread that has been requested to stop by GDB with vCont;t,
2540 and it stopped cleanly, so report as SIG0. The use of
2541 SIGSTOP is an implementation detail. */
2542 ourstatus->value.sig = TARGET_SIGNAL_0;
2543 }
2544 else if (current_inferior->last_resume_kind == resume_stop
2545 && WSTOPSIG (w) != SIGSTOP)
2546 {
2547 /* A thread that has been requested to stop by GDB with vCont;t,
2548 but, it stopped for other reasons. */
2549 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2550 }
2551 else
2552 {
2553 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2554 }
2555
2556 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2557
2558 if (debug_threads)
2559 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2560 target_pid_to_str (ptid_of (event_child)),
2561 ourstatus->kind,
2562 ourstatus->value.sig);
2563
2564 return ptid_of (event_child);
2565 }
2566
2567 /* Get rid of any pending event in the pipe. */
2568 static void
2569 async_file_flush (void)
2570 {
2571 int ret;
2572 char buf;
2573
2574 do
2575 ret = read (linux_event_pipe[0], &buf, 1);
2576 while (ret >= 0 || (ret == -1 && errno == EINTR));
2577 }
2578
2579 /* Put something in the pipe, so the event loop wakes up. */
2580 static void
2581 async_file_mark (void)
2582 {
2583 int ret;
2584
2585 async_file_flush ();
2586
2587 do
2588 ret = write (linux_event_pipe[1], "+", 1);
2589 while (ret == 0 || (ret == -1 && errno == EINTR));
2590
2591 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2592 be awakened anyway. */
2593 }
2594
2595 static ptid_t
2596 linux_wait (ptid_t ptid,
2597 struct target_waitstatus *ourstatus, int target_options)
2598 {
2599 ptid_t event_ptid;
2600
2601 if (debug_threads)
2602 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2603
2604 /* Flush the async file first. */
2605 if (target_is_async_p ())
2606 async_file_flush ();
2607
2608 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2609
2610 /* If at least one stop was reported, there may be more. A single
2611 SIGCHLD can signal more than one child stop. */
2612 if (target_is_async_p ()
2613 && (target_options & TARGET_WNOHANG) != 0
2614 && !ptid_equal (event_ptid, null_ptid))
2615 async_file_mark ();
2616
2617 return event_ptid;
2618 }
2619
2620 /* Send a signal to an LWP. */
2621
2622 static int
2623 kill_lwp (unsigned long lwpid, int signo)
2624 {
2625 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2626 fails, then we are not using nptl threads and we should be using kill. */
2627
2628 #ifdef __NR_tkill
2629 {
2630 static int tkill_failed;
2631
2632 if (!tkill_failed)
2633 {
2634 int ret;
2635
2636 errno = 0;
2637 ret = syscall (__NR_tkill, lwpid, signo);
2638 if (errno != ENOSYS)
2639 return ret;
2640 tkill_failed = 1;
2641 }
2642 }
2643 #endif
2644
2645 return kill (lwpid, signo);
2646 }
2647
2648 void
2649 linux_stop_lwp (struct lwp_info *lwp)
2650 {
2651 send_sigstop (lwp);
2652 }
2653
2654 static void
2655 send_sigstop (struct lwp_info *lwp)
2656 {
2657 int pid;
2658
2659 pid = lwpid_of (lwp);
2660
2661 /* If we already have a pending stop signal for this process, don't
2662 send another. */
2663 if (lwp->stop_expected)
2664 {
2665 if (debug_threads)
2666 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2667
2668 return;
2669 }
2670
2671 if (debug_threads)
2672 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2673
2674 lwp->stop_expected = 1;
2675 kill_lwp (pid, SIGSTOP);
2676 }
2677
2678 static int
2679 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2680 {
2681 struct lwp_info *lwp = (struct lwp_info *) entry;
2682
2683 /* Ignore EXCEPT. */
2684 if (lwp == except)
2685 return 0;
2686
2687 if (lwp->stopped)
2688 return 0;
2689
2690 send_sigstop (lwp);
2691 return 0;
2692 }
2693
2694 /* Increment the suspend count of an LWP, and stop it, if not stopped
2695 yet. */
2696 static int
2697 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2698 void *except)
2699 {
2700 struct lwp_info *lwp = (struct lwp_info *) entry;
2701
2702 /* Ignore EXCEPT. */
2703 if (lwp == except)
2704 return 0;
2705
2706 lwp->suspended++;
2707
2708 return send_sigstop_callback (entry, except);
2709 }
2710
2711 static void
2712 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2713 {
2714 /* It's dead, really. */
2715 lwp->dead = 1;
2716
2717 /* Store the exit status for later. */
2718 lwp->status_pending_p = 1;
2719 lwp->status_pending = wstat;
2720
2721 /* Prevent trying to stop it. */
2722 lwp->stopped = 1;
2723
2724 /* No further stops are expected from a dead lwp. */
2725 lwp->stop_expected = 0;
2726 }
2727
2728 static void
2729 wait_for_sigstop (struct inferior_list_entry *entry)
2730 {
2731 struct lwp_info *lwp = (struct lwp_info *) entry;
2732 struct thread_info *saved_inferior;
2733 int wstat;
2734 ptid_t saved_tid;
2735 ptid_t ptid;
2736 int pid;
2737
2738 if (lwp->stopped)
2739 {
2740 if (debug_threads)
2741 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2742 lwpid_of (lwp));
2743 return;
2744 }
2745
2746 saved_inferior = current_inferior;
2747 if (saved_inferior != NULL)
2748 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2749 else
2750 saved_tid = null_ptid; /* avoid bogus unused warning */
2751
2752 ptid = lwp->head.id;
2753
2754 if (debug_threads)
2755 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2756
2757 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2758
2759 /* If we stopped with a non-SIGSTOP signal, save it for later
2760 and record the pending SIGSTOP. If the process exited, just
2761 return. */
2762 if (WIFSTOPPED (wstat))
2763 {
2764 if (debug_threads)
2765 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2766 lwpid_of (lwp), WSTOPSIG (wstat));
2767
2768 if (WSTOPSIG (wstat) != SIGSTOP)
2769 {
2770 if (debug_threads)
2771 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2772 lwpid_of (lwp), wstat);
2773
2774 lwp->status_pending_p = 1;
2775 lwp->status_pending = wstat;
2776 }
2777 }
2778 else
2779 {
2780 if (debug_threads)
2781 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2782
2783 lwp = find_lwp_pid (pid_to_ptid (pid));
2784 if (lwp)
2785 {
2786 /* Leave this status pending for the next time we're able to
2787 report it. In the mean time, we'll report this lwp as
2788 dead to GDB, so GDB doesn't try to read registers and
2789 memory from it. This can only happen if this was the
2790 last thread of the process; otherwise, PID is removed
2791 from the thread tables before linux_wait_for_event
2792 returns. */
2793 mark_lwp_dead (lwp, wstat);
2794 }
2795 }
2796
2797 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2798 current_inferior = saved_inferior;
2799 else
2800 {
2801 if (debug_threads)
2802 fprintf (stderr, "Previously current thread died.\n");
2803
2804 if (non_stop)
2805 {
2806 /* We can't change the current inferior behind GDB's back,
2807 otherwise, a subsequent command may apply to the wrong
2808 process. */
2809 current_inferior = NULL;
2810 }
2811 else
2812 {
2813 /* Set a valid thread as current. */
2814 set_desired_inferior (0);
2815 }
2816 }
2817 }
2818
2819 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2820 move it out, because we need to report the stop event to GDB. For
2821 example, if the user puts a breakpoint in the jump pad, it's
2822 because she wants to debug it. */
2823
2824 static int
2825 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2826 {
2827 struct lwp_info *lwp = (struct lwp_info *) entry;
2828 struct thread_info *thread = get_lwp_thread (lwp);
2829
2830 gdb_assert (lwp->suspended == 0);
2831 gdb_assert (lwp->stopped);
2832
2833 /* Allow debugging the jump pad, gdb_collect, etc.. */
2834 return (supports_fast_tracepoints ()
2835 && in_process_agent_loaded ()
2836 && (gdb_breakpoint_here (lwp->stop_pc)
2837 || lwp->stopped_by_watchpoint
2838 || thread->last_resume_kind == resume_step)
2839 && linux_fast_tracepoint_collecting (lwp, NULL));
2840 }
2841
2842 static void
2843 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
2844 {
2845 struct lwp_info *lwp = (struct lwp_info *) entry;
2846 struct thread_info *thread = get_lwp_thread (lwp);
2847 int *wstat;
2848
2849 gdb_assert (lwp->suspended == 0);
2850 gdb_assert (lwp->stopped);
2851
2852 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
2853
2854 /* Allow debugging the jump pad, gdb_collect, etc. */
2855 if (!gdb_breakpoint_here (lwp->stop_pc)
2856 && !lwp->stopped_by_watchpoint
2857 && thread->last_resume_kind != resume_step
2858 && maybe_move_out_of_jump_pad (lwp, wstat))
2859 {
2860 if (debug_threads)
2861 fprintf (stderr,
2862 "LWP %ld needs stabilizing (in jump pad)\n",
2863 lwpid_of (lwp));
2864
2865 if (wstat)
2866 {
2867 lwp->status_pending_p = 0;
2868 enqueue_one_deferred_signal (lwp, wstat);
2869
2870 if (debug_threads)
2871 fprintf (stderr,
2872 "Signal %d for LWP %ld deferred "
2873 "(in jump pad)\n",
2874 WSTOPSIG (*wstat), lwpid_of (lwp));
2875 }
2876
2877 linux_resume_one_lwp (lwp, 0, 0, NULL);
2878 }
2879 else
2880 lwp->suspended++;
2881 }
2882
2883 static int
2884 lwp_running (struct inferior_list_entry *entry, void *data)
2885 {
2886 struct lwp_info *lwp = (struct lwp_info *) entry;
2887
2888 if (lwp->dead)
2889 return 0;
2890 if (lwp->stopped)
2891 return 0;
2892 return 1;
2893 }
2894
2895 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2896 If SUSPEND, then also increase the suspend count of every LWP,
2897 except EXCEPT. */
2898
2899 static void
2900 stop_all_lwps (int suspend, struct lwp_info *except)
2901 {
2902 stopping_threads = 1;
2903
2904 if (suspend)
2905 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
2906 else
2907 find_inferior (&all_lwps, send_sigstop_callback, except);
2908 for_each_inferior (&all_lwps, wait_for_sigstop);
2909 stopping_threads = 0;
2910 }
2911
2912 /* Resume execution of the inferior process.
2913 If STEP is nonzero, single-step it.
2914 If SIGNAL is nonzero, give it that signal. */
2915
2916 static void
2917 linux_resume_one_lwp (struct lwp_info *lwp,
2918 int step, int signal, siginfo_t *info)
2919 {
2920 struct thread_info *saved_inferior;
2921 int fast_tp_collecting;
2922
2923 if (lwp->stopped == 0)
2924 return;
2925
2926 fast_tp_collecting = lwp->collecting_fast_tracepoint;
2927
2928 gdb_assert (!stabilizing_threads || fast_tp_collecting);
2929
2930 /* Cancel actions that rely on GDB not changing the PC (e.g., the
2931 user used the "jump" command, or "set $pc = foo"). */
2932 if (lwp->stop_pc != get_pc (lwp))
2933 {
2934 /* Collecting 'while-stepping' actions doesn't make sense
2935 anymore. */
2936 release_while_stepping_state_list (get_lwp_thread (lwp));
2937 }
2938
2939 /* If we have pending signals or status, and a new signal, enqueue the
2940 signal. Also enqueue the signal if we are waiting to reinsert a
2941 breakpoint; it will be picked up again below. */
2942 if (signal != 0
2943 && (lwp->status_pending_p
2944 || lwp->pending_signals != NULL
2945 || lwp->bp_reinsert != 0
2946 || fast_tp_collecting))
2947 {
2948 struct pending_signals *p_sig;
2949 p_sig = xmalloc (sizeof (*p_sig));
2950 p_sig->prev = lwp->pending_signals;
2951 p_sig->signal = signal;
2952 if (info == NULL)
2953 memset (&p_sig->info, 0, sizeof (siginfo_t));
2954 else
2955 memcpy (&p_sig->info, info, sizeof (siginfo_t));
2956 lwp->pending_signals = p_sig;
2957 }
2958
2959 if (lwp->status_pending_p)
2960 {
2961 if (debug_threads)
2962 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2963 " has pending status\n",
2964 lwpid_of (lwp), step ? "step" : "continue", signal,
2965 lwp->stop_expected ? "expected" : "not expected");
2966 return;
2967 }
2968
2969 saved_inferior = current_inferior;
2970 current_inferior = get_lwp_thread (lwp);
2971
2972 if (debug_threads)
2973 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2974 lwpid_of (lwp), step ? "step" : "continue", signal,
2975 lwp->stop_expected ? "expected" : "not expected");
2976
2977 /* This bit needs some thinking about. If we get a signal that
2978 we must report while a single-step reinsert is still pending,
2979 we often end up resuming the thread. It might be better to
2980 (ew) allow a stack of pending events; then we could be sure that
2981 the reinsert happened right away and not lose any signals.
2982
2983 Making this stack would also shrink the window in which breakpoints are
2984 uninserted (see comment in linux_wait_for_lwp) but not enough for
2985 complete correctness, so it won't solve that problem. It may be
2986 worthwhile just to solve this one, however. */
2987 if (lwp->bp_reinsert != 0)
2988 {
2989 if (debug_threads)
2990 fprintf (stderr, " pending reinsert at 0x%s\n",
2991 paddress (lwp->bp_reinsert));
2992
2993 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2994 {
2995 if (fast_tp_collecting == 0)
2996 {
2997 if (step == 0)
2998 fprintf (stderr, "BAD - reinserting but not stepping.\n");
2999 if (lwp->suspended)
3000 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3001 lwp->suspended);
3002 }
3003
3004 step = 1;
3005 }
3006
3007 /* Postpone any pending signal. It was enqueued above. */
3008 signal = 0;
3009 }
3010
3011 if (fast_tp_collecting == 1)
3012 {
3013 if (debug_threads)
3014 fprintf (stderr, "\
3015 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3016 lwpid_of (lwp));
3017
3018 /* Postpone any pending signal. It was enqueued above. */
3019 signal = 0;
3020 }
3021 else if (fast_tp_collecting == 2)
3022 {
3023 if (debug_threads)
3024 fprintf (stderr, "\
3025 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3026 lwpid_of (lwp));
3027
3028 if (can_hardware_single_step ())
3029 step = 1;
3030 else
3031 fatal ("moving out of jump pad single-stepping"
3032 " not implemented on this target");
3033
3034 /* Postpone any pending signal. It was enqueued above. */
3035 signal = 0;
3036 }
3037
3038 /* If we have while-stepping actions in this thread set it stepping.
3039 If we have a signal to deliver, it may or may not be set to
3040 SIG_IGN, we don't know. Assume so, and allow collecting
3041 while-stepping into a signal handler. A possible smart thing to
3042 do would be to set an internal breakpoint at the signal return
3043 address, continue, and carry on catching this while-stepping
3044 action only when that breakpoint is hit. A future
3045 enhancement. */
3046 if (get_lwp_thread (lwp)->while_stepping != NULL
3047 && can_hardware_single_step ())
3048 {
3049 if (debug_threads)
3050 fprintf (stderr,
3051 "lwp %ld has a while-stepping action -> forcing step.\n",
3052 lwpid_of (lwp));
3053 step = 1;
3054 }
3055
3056 if (debug_threads && the_low_target.get_pc != NULL)
3057 {
3058 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3059 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3060 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
3061 }
3062
3063 /* If we have pending signals, consume one unless we are trying to
3064 reinsert a breakpoint or we're trying to finish a fast tracepoint
3065 collect. */
3066 if (lwp->pending_signals != NULL
3067 && lwp->bp_reinsert == 0
3068 && fast_tp_collecting == 0)
3069 {
3070 struct pending_signals **p_sig;
3071
3072 p_sig = &lwp->pending_signals;
3073 while ((*p_sig)->prev != NULL)
3074 p_sig = &(*p_sig)->prev;
3075
3076 signal = (*p_sig)->signal;
3077 if ((*p_sig)->info.si_signo != 0)
3078 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
3079
3080 free (*p_sig);
3081 *p_sig = NULL;
3082 }
3083
3084 if (the_low_target.prepare_to_resume != NULL)
3085 the_low_target.prepare_to_resume (lwp);
3086
3087 regcache_invalidate_one ((struct inferior_list_entry *)
3088 get_lwp_thread (lwp));
3089 errno = 0;
3090 lwp->stopped = 0;
3091 lwp->stopped_by_watchpoint = 0;
3092 lwp->stepping = step;
3093 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
3094 /* Coerce to a uintptr_t first to avoid potential gcc warning
3095 of coercing an 8 byte integer to a 4 byte pointer. */
3096 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
3097
3098 current_inferior = saved_inferior;
3099 if (errno)
3100 {
3101 /* ESRCH from ptrace either means that the thread was already
3102 running (an error) or that it is gone (a race condition). If
3103 it's gone, we will get a notification the next time we wait,
3104 so we can ignore the error. We could differentiate these
3105 two, but it's tricky without waiting; the thread still exists
3106 as a zombie, so sending it signal 0 would succeed. So just
3107 ignore ESRCH. */
3108 if (errno == ESRCH)
3109 return;
3110
3111 perror_with_name ("ptrace");
3112 }
3113 }
3114
3115 struct thread_resume_array
3116 {
3117 struct thread_resume *resume;
3118 size_t n;
3119 };
3120
3121 /* This function is called once per thread. We look up the thread
3122 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3123 resume request.
3124
3125 This algorithm is O(threads * resume elements), but resume elements
3126 is small (and will remain small at least until GDB supports thread
3127 suspension). */
3128 static int
3129 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3130 {
3131 struct lwp_info *lwp;
3132 struct thread_info *thread;
3133 int ndx;
3134 struct thread_resume_array *r;
3135
3136 thread = (struct thread_info *) entry;
3137 lwp = get_thread_lwp (thread);
3138 r = arg;
3139
3140 for (ndx = 0; ndx < r->n; ndx++)
3141 {
3142 ptid_t ptid = r->resume[ndx].thread;
3143 if (ptid_equal (ptid, minus_one_ptid)
3144 || ptid_equal (ptid, entry->id)
3145 || (ptid_is_pid (ptid)
3146 && (ptid_get_pid (ptid) == pid_of (lwp)))
3147 || (ptid_get_lwp (ptid) == -1
3148 && (ptid_get_pid (ptid) == pid_of (lwp))))
3149 {
3150 if (r->resume[ndx].kind == resume_stop
3151 && thread->last_resume_kind == resume_stop)
3152 {
3153 if (debug_threads)
3154 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3155 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3156 ? "stopped"
3157 : "stopping",
3158 lwpid_of (lwp));
3159
3160 continue;
3161 }
3162
3163 lwp->resume = &r->resume[ndx];
3164 thread->last_resume_kind = lwp->resume->kind;
3165
3166 /* If we had a deferred signal to report, dequeue one now.
3167 This can happen if LWP gets more than one signal while
3168 trying to get out of a jump pad. */
3169 if (lwp->stopped
3170 && !lwp->status_pending_p
3171 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3172 {
3173 lwp->status_pending_p = 1;
3174
3175 if (debug_threads)
3176 fprintf (stderr,
3177 "Dequeueing deferred signal %d for LWP %ld, "
3178 "leaving status pending.\n",
3179 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3180 }
3181
3182 return 0;
3183 }
3184 }
3185
3186 /* No resume action for this thread. */
3187 lwp->resume = NULL;
3188
3189 return 0;
3190 }
3191
3192
3193 /* Set *FLAG_P if this lwp has an interesting status pending. */
3194 static int
3195 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3196 {
3197 struct lwp_info *lwp = (struct lwp_info *) entry;
3198
3199 /* LWPs which will not be resumed are not interesting, because
3200 we might not wait for them next time through linux_wait. */
3201 if (lwp->resume == NULL)
3202 return 0;
3203
3204 if (lwp->status_pending_p)
3205 * (int *) flag_p = 1;
3206
3207 return 0;
3208 }
3209
3210 /* Return 1 if this lwp that GDB wants running is stopped at an
3211 internal breakpoint that we need to step over. It assumes that any
3212 required STOP_PC adjustment has already been propagated to the
3213 inferior's regcache. */
3214
3215 static int
3216 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3217 {
3218 struct lwp_info *lwp = (struct lwp_info *) entry;
3219 struct thread_info *thread;
3220 struct thread_info *saved_inferior;
3221 CORE_ADDR pc;
3222
3223 /* LWPs which will not be resumed are not interesting, because we
3224 might not wait for them next time through linux_wait. */
3225
3226 if (!lwp->stopped)
3227 {
3228 if (debug_threads)
3229 fprintf (stderr,
3230 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3231 lwpid_of (lwp));
3232 return 0;
3233 }
3234
3235 thread = get_lwp_thread (lwp);
3236
3237 if (thread->last_resume_kind == resume_stop)
3238 {
3239 if (debug_threads)
3240 fprintf (stderr,
3241 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3242 lwpid_of (lwp));
3243 return 0;
3244 }
3245
3246 gdb_assert (lwp->suspended >= 0);
3247
3248 if (lwp->suspended)
3249 {
3250 if (debug_threads)
3251 fprintf (stderr,
3252 "Need step over [LWP %ld]? Ignoring, suspended\n",
3253 lwpid_of (lwp));
3254 return 0;
3255 }
3256
3257 if (!lwp->need_step_over)
3258 {
3259 if (debug_threads)
3260 fprintf (stderr,
3261 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3262 }
3263
3264 if (lwp->status_pending_p)
3265 {
3266 if (debug_threads)
3267 fprintf (stderr,
3268 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3269 lwpid_of (lwp));
3270 return 0;
3271 }
3272
3273 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3274 or we have. */
3275 pc = get_pc (lwp);
3276
3277 /* If the PC has changed since we stopped, then don't do anything,
3278 and let the breakpoint/tracepoint be hit. This happens if, for
3279 instance, GDB handled the decr_pc_after_break subtraction itself,
3280 GDB is OOL stepping this thread, or the user has issued a "jump"
3281 command, or poked thread's registers herself. */
3282 if (pc != lwp->stop_pc)
3283 {
3284 if (debug_threads)
3285 fprintf (stderr,
3286 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3287 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3288 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3289
3290 lwp->need_step_over = 0;
3291 return 0;
3292 }
3293
3294 saved_inferior = current_inferior;
3295 current_inferior = thread;
3296
3297 /* We can only step over breakpoints we know about. */
3298 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3299 {
3300 /* Don't step over a breakpoint that GDB expects to hit
3301 though. If the condition is being evaluated on the target's side
3302 and it evaluate to false, step over this breakpoint as well. */
3303 if (gdb_breakpoint_here (pc)
3304 && gdb_condition_true_at_breakpoint (pc))
3305 {
3306 if (debug_threads)
3307 fprintf (stderr,
3308 "Need step over [LWP %ld]? yes, but found"
3309 " GDB breakpoint at 0x%s; skipping step over\n",
3310 lwpid_of (lwp), paddress (pc));
3311
3312 current_inferior = saved_inferior;
3313 return 0;
3314 }
3315 else
3316 {
3317 if (debug_threads)
3318 fprintf (stderr,
3319 "Need step over [LWP %ld]? yes, "
3320 "found breakpoint at 0x%s\n",
3321 lwpid_of (lwp), paddress (pc));
3322
3323 /* We've found an lwp that needs stepping over --- return 1 so
3324 that find_inferior stops looking. */
3325 current_inferior = saved_inferior;
3326
3327 /* If the step over is cancelled, this is set again. */
3328 lwp->need_step_over = 0;
3329 return 1;
3330 }
3331 }
3332
3333 current_inferior = saved_inferior;
3334
3335 if (debug_threads)
3336 fprintf (stderr,
3337 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3338 lwpid_of (lwp), paddress (pc));
3339
3340 return 0;
3341 }
3342
3343 /* Start a step-over operation on LWP. When LWP stopped at a
3344 breakpoint, to make progress, we need to remove the breakpoint out
3345 of the way. If we let other threads run while we do that, they may
3346 pass by the breakpoint location and miss hitting it. To avoid
3347 that, a step-over momentarily stops all threads while LWP is
3348 single-stepped while the breakpoint is temporarily uninserted from
3349 the inferior. When the single-step finishes, we reinsert the
3350 breakpoint, and let all threads that are supposed to be running,
3351 run again.
3352
3353 On targets that don't support hardware single-step, we don't
3354 currently support full software single-stepping. Instead, we only
3355 support stepping over the thread event breakpoint, by asking the
3356 low target where to place a reinsert breakpoint. Since this
3357 routine assumes the breakpoint being stepped over is a thread event
3358 breakpoint, it usually assumes the return address of the current
3359 function is a good enough place to set the reinsert breakpoint. */
3360
3361 static int
3362 start_step_over (struct lwp_info *lwp)
3363 {
3364 struct thread_info *saved_inferior;
3365 CORE_ADDR pc;
3366 int step;
3367
3368 if (debug_threads)
3369 fprintf (stderr,
3370 "Starting step-over on LWP %ld. Stopping all threads\n",
3371 lwpid_of (lwp));
3372
3373 stop_all_lwps (1, lwp);
3374 gdb_assert (lwp->suspended == 0);
3375
3376 if (debug_threads)
3377 fprintf (stderr, "Done stopping all threads for step-over.\n");
3378
3379 /* Note, we should always reach here with an already adjusted PC,
3380 either by GDB (if we're resuming due to GDB's request), or by our
3381 caller, if we just finished handling an internal breakpoint GDB
3382 shouldn't care about. */
3383 pc = get_pc (lwp);
3384
3385 saved_inferior = current_inferior;
3386 current_inferior = get_lwp_thread (lwp);
3387
3388 lwp->bp_reinsert = pc;
3389 uninsert_breakpoints_at (pc);
3390 uninsert_fast_tracepoint_jumps_at (pc);
3391
3392 if (can_hardware_single_step ())
3393 {
3394 step = 1;
3395 }
3396 else
3397 {
3398 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3399 set_reinsert_breakpoint (raddr);
3400 step = 0;
3401 }
3402
3403 current_inferior = saved_inferior;
3404
3405 linux_resume_one_lwp (lwp, step, 0, NULL);
3406
3407 /* Require next event from this LWP. */
3408 step_over_bkpt = lwp->head.id;
3409 return 1;
3410 }
3411
3412 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3413 start_step_over, if still there, and delete any reinsert
3414 breakpoints we've set, on non hardware single-step targets. */
3415
3416 static int
3417 finish_step_over (struct lwp_info *lwp)
3418 {
3419 if (lwp->bp_reinsert != 0)
3420 {
3421 if (debug_threads)
3422 fprintf (stderr, "Finished step over.\n");
3423
3424 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3425 may be no breakpoint to reinsert there by now. */
3426 reinsert_breakpoints_at (lwp->bp_reinsert);
3427 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3428
3429 lwp->bp_reinsert = 0;
3430
3431 /* Delete any software-single-step reinsert breakpoints. No
3432 longer needed. We don't have to worry about other threads
3433 hitting this trap, and later not being able to explain it,
3434 because we were stepping over a breakpoint, and we hold all
3435 threads but LWP stopped while doing that. */
3436 if (!can_hardware_single_step ())
3437 delete_reinsert_breakpoints ();
3438
3439 step_over_bkpt = null_ptid;
3440 return 1;
3441 }
3442 else
3443 return 0;
3444 }
3445
3446 /* This function is called once per thread. We check the thread's resume
3447 request, which will tell us whether to resume, step, or leave the thread
3448 stopped; and what signal, if any, it should be sent.
3449
3450 For threads which we aren't explicitly told otherwise, we preserve
3451 the stepping flag; this is used for stepping over gdbserver-placed
3452 breakpoints.
3453
3454 If pending_flags was set in any thread, we queue any needed
3455 signals, since we won't actually resume. We already have a pending
3456 event to report, so we don't need to preserve any step requests;
3457 they should be re-issued if necessary. */
3458
3459 static int
3460 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3461 {
3462 struct lwp_info *lwp;
3463 struct thread_info *thread;
3464 int step;
3465 int leave_all_stopped = * (int *) arg;
3466 int leave_pending;
3467
3468 thread = (struct thread_info *) entry;
3469 lwp = get_thread_lwp (thread);
3470
3471 if (lwp->resume == NULL)
3472 return 0;
3473
3474 if (lwp->resume->kind == resume_stop)
3475 {
3476 if (debug_threads)
3477 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3478
3479 if (!lwp->stopped)
3480 {
3481 if (debug_threads)
3482 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3483
3484 /* Stop the thread, and wait for the event asynchronously,
3485 through the event loop. */
3486 send_sigstop (lwp);
3487 }
3488 else
3489 {
3490 if (debug_threads)
3491 fprintf (stderr, "already stopped LWP %ld\n",
3492 lwpid_of (lwp));
3493
3494 /* The LWP may have been stopped in an internal event that
3495 was not meant to be notified back to GDB (e.g., gdbserver
3496 breakpoint), so we should be reporting a stop event in
3497 this case too. */
3498
3499 /* If the thread already has a pending SIGSTOP, this is a
3500 no-op. Otherwise, something later will presumably resume
3501 the thread and this will cause it to cancel any pending
3502 operation, due to last_resume_kind == resume_stop. If
3503 the thread already has a pending status to report, we
3504 will still report it the next time we wait - see
3505 status_pending_p_callback. */
3506
3507 /* If we already have a pending signal to report, then
3508 there's no need to queue a SIGSTOP, as this means we're
3509 midway through moving the LWP out of the jumppad, and we
3510 will report the pending signal as soon as that is
3511 finished. */
3512 if (lwp->pending_signals_to_report == NULL)
3513 send_sigstop (lwp);
3514 }
3515
3516 /* For stop requests, we're done. */
3517 lwp->resume = NULL;
3518 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3519 return 0;
3520 }
3521
3522 /* If this thread which is about to be resumed has a pending status,
3523 then don't resume any threads - we can just report the pending
3524 status. Make sure to queue any signals that would otherwise be
3525 sent. In all-stop mode, we do this decision based on if *any*
3526 thread has a pending status. If there's a thread that needs the
3527 step-over-breakpoint dance, then don't resume any other thread
3528 but that particular one. */
3529 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3530
3531 if (!leave_pending)
3532 {
3533 if (debug_threads)
3534 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3535
3536 step = (lwp->resume->kind == resume_step);
3537 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3538 }
3539 else
3540 {
3541 if (debug_threads)
3542 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3543
3544 /* If we have a new signal, enqueue the signal. */
3545 if (lwp->resume->sig != 0)
3546 {
3547 struct pending_signals *p_sig;
3548 p_sig = xmalloc (sizeof (*p_sig));
3549 p_sig->prev = lwp->pending_signals;
3550 p_sig->signal = lwp->resume->sig;
3551 memset (&p_sig->info, 0, sizeof (siginfo_t));
3552
3553 /* If this is the same signal we were previously stopped by,
3554 make sure to queue its siginfo. We can ignore the return
3555 value of ptrace; if it fails, we'll skip
3556 PTRACE_SETSIGINFO. */
3557 if (WIFSTOPPED (lwp->last_status)
3558 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3559 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3560
3561 lwp->pending_signals = p_sig;
3562 }
3563 }
3564
3565 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3566 lwp->resume = NULL;
3567 return 0;
3568 }
3569
3570 static void
3571 linux_resume (struct thread_resume *resume_info, size_t n)
3572 {
3573 struct thread_resume_array array = { resume_info, n };
3574 struct lwp_info *need_step_over = NULL;
3575 int any_pending;
3576 int leave_all_stopped;
3577
3578 find_inferior (&all_threads, linux_set_resume_request, &array);
3579
3580 /* If there is a thread which would otherwise be resumed, which has
3581 a pending status, then don't resume any threads - we can just
3582 report the pending status. Make sure to queue any signals that
3583 would otherwise be sent. In non-stop mode, we'll apply this
3584 logic to each thread individually. We consume all pending events
3585 before considering to start a step-over (in all-stop). */
3586 any_pending = 0;
3587 if (!non_stop)
3588 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3589
3590 /* If there is a thread which would otherwise be resumed, which is
3591 stopped at a breakpoint that needs stepping over, then don't
3592 resume any threads - have it step over the breakpoint with all
3593 other threads stopped, then resume all threads again. Make sure
3594 to queue any signals that would otherwise be delivered or
3595 queued. */
3596 if (!any_pending && supports_breakpoints ())
3597 need_step_over
3598 = (struct lwp_info *) find_inferior (&all_lwps,
3599 need_step_over_p, NULL);
3600
3601 leave_all_stopped = (need_step_over != NULL || any_pending);
3602
3603 if (debug_threads)
3604 {
3605 if (need_step_over != NULL)
3606 fprintf (stderr, "Not resuming all, need step over\n");
3607 else if (any_pending)
3608 fprintf (stderr,
3609 "Not resuming, all-stop and found "
3610 "an LWP with pending status\n");
3611 else
3612 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3613 }
3614
3615 /* Even if we're leaving threads stopped, queue all signals we'd
3616 otherwise deliver. */
3617 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3618
3619 if (need_step_over)
3620 start_step_over (need_step_over);
3621 }
3622
3623 /* This function is called once per thread. We check the thread's
3624 last resume request, which will tell us whether to resume, step, or
3625 leave the thread stopped. Any signal the client requested to be
3626 delivered has already been enqueued at this point.
3627
3628 If any thread that GDB wants running is stopped at an internal
3629 breakpoint that needs stepping over, we start a step-over operation
3630 on that particular thread, and leave all others stopped. */
3631
3632 static int
3633 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3634 {
3635 struct lwp_info *lwp = (struct lwp_info *) entry;
3636 struct thread_info *thread;
3637 int step;
3638
3639 if (lwp == except)
3640 return 0;
3641
3642 if (debug_threads)
3643 fprintf (stderr,
3644 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3645
3646 if (!lwp->stopped)
3647 {
3648 if (debug_threads)
3649 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3650 return 0;
3651 }
3652
3653 thread = get_lwp_thread (lwp);
3654
3655 if (thread->last_resume_kind == resume_stop
3656 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3657 {
3658 if (debug_threads)
3659 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3660 lwpid_of (lwp));
3661 return 0;
3662 }
3663
3664 if (lwp->status_pending_p)
3665 {
3666 if (debug_threads)
3667 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3668 lwpid_of (lwp));
3669 return 0;
3670 }
3671
3672 gdb_assert (lwp->suspended >= 0);
3673
3674 if (lwp->suspended)
3675 {
3676 if (debug_threads)
3677 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3678 return 0;
3679 }
3680
3681 if (thread->last_resume_kind == resume_stop
3682 && lwp->pending_signals_to_report == NULL
3683 && lwp->collecting_fast_tracepoint == 0)
3684 {
3685 /* We haven't reported this LWP as stopped yet (otherwise, the
3686 last_status.kind check above would catch it, and we wouldn't
3687 reach here. This LWP may have been momentarily paused by a
3688 stop_all_lwps call while handling for example, another LWP's
3689 step-over. In that case, the pending expected SIGSTOP signal
3690 that was queued at vCont;t handling time will have already
3691 been consumed by wait_for_sigstop, and so we need to requeue
3692 another one here. Note that if the LWP already has a SIGSTOP
3693 pending, this is a no-op. */
3694
3695 if (debug_threads)
3696 fprintf (stderr,
3697 "Client wants LWP %ld to stop. "
3698 "Making sure it has a SIGSTOP pending\n",
3699 lwpid_of (lwp));
3700
3701 send_sigstop (lwp);
3702 }
3703
3704 step = thread->last_resume_kind == resume_step;
3705 linux_resume_one_lwp (lwp, step, 0, NULL);
3706 return 0;
3707 }
3708
3709 static int
3710 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3711 {
3712 struct lwp_info *lwp = (struct lwp_info *) entry;
3713
3714 if (lwp == except)
3715 return 0;
3716
3717 lwp->suspended--;
3718 gdb_assert (lwp->suspended >= 0);
3719
3720 return proceed_one_lwp (entry, except);
3721 }
3722
3723 /* When we finish a step-over, set threads running again. If there's
3724 another thread that may need a step-over, now's the time to start
3725 it. Eventually, we'll move all threads past their breakpoints. */
3726
3727 static void
3728 proceed_all_lwps (void)
3729 {
3730 struct lwp_info *need_step_over;
3731
3732 /* If there is a thread which would otherwise be resumed, which is
3733 stopped at a breakpoint that needs stepping over, then don't
3734 resume any threads - have it step over the breakpoint with all
3735 other threads stopped, then resume all threads again. */
3736
3737 if (supports_breakpoints ())
3738 {
3739 need_step_over
3740 = (struct lwp_info *) find_inferior (&all_lwps,
3741 need_step_over_p, NULL);
3742
3743 if (need_step_over != NULL)
3744 {
3745 if (debug_threads)
3746 fprintf (stderr, "proceed_all_lwps: found "
3747 "thread %ld needing a step-over\n",
3748 lwpid_of (need_step_over));
3749
3750 start_step_over (need_step_over);
3751 return;
3752 }
3753 }
3754
3755 if (debug_threads)
3756 fprintf (stderr, "Proceeding, no step-over needed\n");
3757
3758 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3759 }
3760
3761 /* Stopped LWPs that the client wanted to be running, that don't have
3762 pending statuses, are set to run again, except for EXCEPT, if not
3763 NULL. This undoes a stop_all_lwps call. */
3764
3765 static void
3766 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3767 {
3768 if (debug_threads)
3769 {
3770 if (except)
3771 fprintf (stderr,
3772 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3773 else
3774 fprintf (stderr,
3775 "unstopping all lwps\n");
3776 }
3777
3778 if (unsuspend)
3779 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3780 else
3781 find_inferior (&all_lwps, proceed_one_lwp, except);
3782 }
3783
3784
3785 #ifdef HAVE_LINUX_REGSETS
3786
3787 #define use_linux_regsets 1
3788
3789 static int
3790 regsets_fetch_inferior_registers (struct regcache *regcache)
3791 {
3792 struct regset_info *regset;
3793 int saw_general_regs = 0;
3794 int pid;
3795 struct iovec iov;
3796
3797 regset = target_regsets;
3798
3799 pid = lwpid_of (get_thread_lwp (current_inferior));
3800 while (regset->size >= 0)
3801 {
3802 void *buf, *data;
3803 int nt_type, res;
3804
3805 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3806 {
3807 regset ++;
3808 continue;
3809 }
3810
3811 buf = xmalloc (regset->size);
3812
3813 nt_type = regset->nt_type;
3814 if (nt_type)
3815 {
3816 iov.iov_base = buf;
3817 iov.iov_len = regset->size;
3818 data = (void *) &iov;
3819 }
3820 else
3821 data = buf;
3822
3823 #ifndef __sparc__
3824 res = ptrace (regset->get_request, pid, nt_type, data);
3825 #else
3826 res = ptrace (regset->get_request, pid, data, nt_type);
3827 #endif
3828 if (res < 0)
3829 {
3830 if (errno == EIO)
3831 {
3832 /* If we get EIO on a regset, do not try it again for
3833 this process. */
3834 disabled_regsets[regset - target_regsets] = 1;
3835 free (buf);
3836 continue;
3837 }
3838 else
3839 {
3840 char s[256];
3841 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3842 pid);
3843 perror (s);
3844 }
3845 }
3846 else if (regset->type == GENERAL_REGS)
3847 saw_general_regs = 1;
3848 regset->store_function (regcache, buf);
3849 regset ++;
3850 free (buf);
3851 }
3852 if (saw_general_regs)
3853 return 0;
3854 else
3855 return 1;
3856 }
3857
3858 static int
3859 regsets_store_inferior_registers (struct regcache *regcache)
3860 {
3861 struct regset_info *regset;
3862 int saw_general_regs = 0;
3863 int pid;
3864 struct iovec iov;
3865
3866 regset = target_regsets;
3867
3868 pid = lwpid_of (get_thread_lwp (current_inferior));
3869 while (regset->size >= 0)
3870 {
3871 void *buf, *data;
3872 int nt_type, res;
3873
3874 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3875 {
3876 regset ++;
3877 continue;
3878 }
3879
3880 buf = xmalloc (regset->size);
3881
3882 /* First fill the buffer with the current register set contents,
3883 in case there are any items in the kernel's regset that are
3884 not in gdbserver's regcache. */
3885
3886 nt_type = regset->nt_type;
3887 if (nt_type)
3888 {
3889 iov.iov_base = buf;
3890 iov.iov_len = regset->size;
3891 data = (void *) &iov;
3892 }
3893 else
3894 data = buf;
3895
3896 #ifndef __sparc__
3897 res = ptrace (regset->get_request, pid, nt_type, data);
3898 #else
3899 res = ptrace (regset->get_request, pid, &iov, data);
3900 #endif
3901
3902 if (res == 0)
3903 {
3904 /* Then overlay our cached registers on that. */
3905 regset->fill_function (regcache, buf);
3906
3907 /* Only now do we write the register set. */
3908 #ifndef __sparc__
3909 res = ptrace (regset->set_request, pid, nt_type, data);
3910 #else
3911 res = ptrace (regset->set_request, pid, data, nt_type);
3912 #endif
3913 }
3914
3915 if (res < 0)
3916 {
3917 if (errno == EIO)
3918 {
3919 /* If we get EIO on a regset, do not try it again for
3920 this process. */
3921 disabled_regsets[regset - target_regsets] = 1;
3922 free (buf);
3923 continue;
3924 }
3925 else if (errno == ESRCH)
3926 {
3927 /* At this point, ESRCH should mean the process is
3928 already gone, in which case we simply ignore attempts
3929 to change its registers. See also the related
3930 comment in linux_resume_one_lwp. */
3931 free (buf);
3932 return 0;
3933 }
3934 else
3935 {
3936 perror ("Warning: ptrace(regsets_store_inferior_registers)");
3937 }
3938 }
3939 else if (regset->type == GENERAL_REGS)
3940 saw_general_regs = 1;
3941 regset ++;
3942 free (buf);
3943 }
3944 if (saw_general_regs)
3945 return 0;
3946 else
3947 return 1;
3948 }
3949
3950 #else /* !HAVE_LINUX_REGSETS */
3951
3952 #define use_linux_regsets 0
3953 #define regsets_fetch_inferior_registers(regcache) 1
3954 #define regsets_store_inferior_registers(regcache) 1
3955
3956 #endif
3957
3958 /* Return 1 if register REGNO is supported by one of the regset ptrace
3959 calls or 0 if it has to be transferred individually. */
3960
3961 static int
3962 linux_register_in_regsets (int regno)
3963 {
3964 unsigned char mask = 1 << (regno % 8);
3965 size_t index = regno / 8;
3966
3967 return (use_linux_regsets
3968 && (the_low_target.regset_bitmap == NULL
3969 || (the_low_target.regset_bitmap[index] & mask) != 0));
3970 }
3971
3972 #ifdef HAVE_LINUX_USRREGS
3973
3974 int
3975 register_addr (int regnum)
3976 {
3977 int addr;
3978
3979 if (regnum < 0 || regnum >= the_low_target.num_regs)
3980 error ("Invalid register number %d.", regnum);
3981
3982 addr = the_low_target.regmap[regnum];
3983
3984 return addr;
3985 }
3986
3987 /* Fetch one register. */
3988 static void
3989 fetch_register (struct regcache *regcache, int regno)
3990 {
3991 CORE_ADDR regaddr;
3992 int i, size;
3993 char *buf;
3994 int pid;
3995
3996 if (regno >= the_low_target.num_regs)
3997 return;
3998 if ((*the_low_target.cannot_fetch_register) (regno))
3999 return;
4000
4001 regaddr = register_addr (regno);
4002 if (regaddr == -1)
4003 return;
4004
4005 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4006 & -sizeof (PTRACE_XFER_TYPE));
4007 buf = alloca (size);
4008
4009 pid = lwpid_of (get_thread_lwp (current_inferior));
4010 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4011 {
4012 errno = 0;
4013 *(PTRACE_XFER_TYPE *) (buf + i) =
4014 ptrace (PTRACE_PEEKUSER, pid,
4015 /* Coerce to a uintptr_t first to avoid potential gcc warning
4016 of coercing an 8 byte integer to a 4 byte pointer. */
4017 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
4018 regaddr += sizeof (PTRACE_XFER_TYPE);
4019 if (errno != 0)
4020 error ("reading register %d: %s", regno, strerror (errno));
4021 }
4022
4023 if (the_low_target.supply_ptrace_register)
4024 the_low_target.supply_ptrace_register (regcache, regno, buf);
4025 else
4026 supply_register (regcache, regno, buf);
4027 }
4028
4029 /* Store one register. */
4030 static void
4031 store_register (struct regcache *regcache, int regno)
4032 {
4033 CORE_ADDR regaddr;
4034 int i, size;
4035 char *buf;
4036 int pid;
4037
4038 if (regno >= the_low_target.num_regs)
4039 return;
4040 if ((*the_low_target.cannot_store_register) (regno))
4041 return;
4042
4043 regaddr = register_addr (regno);
4044 if (regaddr == -1)
4045 return;
4046
4047 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4048 & -sizeof (PTRACE_XFER_TYPE));
4049 buf = alloca (size);
4050 memset (buf, 0, size);
4051
4052 if (the_low_target.collect_ptrace_register)
4053 the_low_target.collect_ptrace_register (regcache, regno, buf);
4054 else
4055 collect_register (regcache, regno, buf);
4056
4057 pid = lwpid_of (get_thread_lwp (current_inferior));
4058 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4059 {
4060 errno = 0;
4061 ptrace (PTRACE_POKEUSER, pid,
4062 /* Coerce to a uintptr_t first to avoid potential gcc warning
4063 about coercing an 8 byte integer to a 4 byte pointer. */
4064 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
4065 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
4066 if (errno != 0)
4067 {
4068 /* At this point, ESRCH should mean the process is
4069 already gone, in which case we simply ignore attempts
4070 to change its registers. See also the related
4071 comment in linux_resume_one_lwp. */
4072 if (errno == ESRCH)
4073 return;
4074
4075 if ((*the_low_target.cannot_store_register) (regno) == 0)
4076 error ("writing register %d: %s", regno, strerror (errno));
4077 }
4078 regaddr += sizeof (PTRACE_XFER_TYPE);
4079 }
4080 }
4081
4082 /* Fetch all registers, or just one, from the child process.
4083 If REGNO is -1, do this for all registers, skipping any that are
4084 assumed to have been retrieved by regsets_fetch_inferior_registers,
4085 unless ALL is non-zero.
4086 Otherwise, REGNO specifies which register (so we can save time). */
4087 static void
4088 usr_fetch_inferior_registers (struct regcache *regcache, int regno, int all)
4089 {
4090 if (regno == -1)
4091 {
4092 for (regno = 0; regno < the_low_target.num_regs; regno++)
4093 if (all || !linux_register_in_regsets (regno))
4094 fetch_register (regcache, regno);
4095 }
4096 else
4097 fetch_register (regcache, regno);
4098 }
4099
4100 /* Store our register values back into the inferior.
4101 If REGNO is -1, do this for all registers, skipping any that are
4102 assumed to have been saved by regsets_store_inferior_registers,
4103 unless ALL is non-zero.
4104 Otherwise, REGNO specifies which register (so we can save time). */
4105 static void
4106 usr_store_inferior_registers (struct regcache *regcache, int regno, int all)
4107 {
4108 if (regno == -1)
4109 {
4110 for (regno = 0; regno < the_low_target.num_regs; regno++)
4111 if (all || !linux_register_in_regsets (regno))
4112 store_register (regcache, regno);
4113 }
4114 else
4115 store_register (regcache, regno);
4116 }
4117
4118 #else /* !HAVE_LINUX_USRREGS */
4119
4120 #define usr_fetch_inferior_registers(regcache, regno, all) do {} while (0)
4121 #define usr_store_inferior_registers(regcache, regno, all) do {} while (0)
4122
4123 #endif
4124
4125
4126 void
4127 linux_fetch_registers (struct regcache *regcache, int regno)
4128 {
4129 int use_regsets;
4130 int all = 0;
4131
4132 if (regno == -1)
4133 {
4134 all = regsets_fetch_inferior_registers (regcache);
4135 usr_fetch_inferior_registers (regcache, regno, all);
4136 }
4137 else
4138 {
4139 use_regsets = linux_register_in_regsets (regno);
4140 if (use_regsets)
4141 all = regsets_fetch_inferior_registers (regcache);
4142 if (!use_regsets || all)
4143 usr_fetch_inferior_registers (regcache, regno, 1);
4144 }
4145 }
4146
4147 void
4148 linux_store_registers (struct regcache *regcache, int regno)
4149 {
4150 int use_regsets;
4151 int all = 0;
4152
4153 if (regno == -1)
4154 {
4155 all = regsets_store_inferior_registers (regcache);
4156 usr_store_inferior_registers (regcache, regno, all);
4157 }
4158 else
4159 {
4160 use_regsets = linux_register_in_regsets (regno);
4161 if (use_regsets)
4162 all = regsets_store_inferior_registers (regcache);
4163 if (!use_regsets || all)
4164 usr_store_inferior_registers (regcache, regno, 1);
4165 }
4166 }
4167
4168
4169 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4170 to debugger memory starting at MYADDR. */
4171
4172 static int
4173 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4174 {
4175 register int i;
4176 /* Round starting address down to longword boundary. */
4177 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4178 /* Round ending address up; get number of longwords that makes. */
4179 register int count
4180 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4181 / sizeof (PTRACE_XFER_TYPE);
4182 /* Allocate buffer of that many longwords. */
4183 register PTRACE_XFER_TYPE *buffer
4184 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4185 int fd;
4186 char filename[64];
4187 int pid = lwpid_of (get_thread_lwp (current_inferior));
4188
4189 /* Try using /proc. Don't bother for one word. */
4190 if (len >= 3 * sizeof (long))
4191 {
4192 /* We could keep this file open and cache it - possibly one per
4193 thread. That requires some juggling, but is even faster. */
4194 sprintf (filename, "/proc/%d/mem", pid);
4195 fd = open (filename, O_RDONLY | O_LARGEFILE);
4196 if (fd == -1)
4197 goto no_proc;
4198
4199 /* If pread64 is available, use it. It's faster if the kernel
4200 supports it (only one syscall), and it's 64-bit safe even on
4201 32-bit platforms (for instance, SPARC debugging a SPARC64
4202 application). */
4203 #ifdef HAVE_PREAD64
4204 if (pread64 (fd, myaddr, len, memaddr) != len)
4205 #else
4206 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
4207 #endif
4208 {
4209 close (fd);
4210 goto no_proc;
4211 }
4212
4213 close (fd);
4214 return 0;
4215 }
4216
4217 no_proc:
4218 /* Read all the longwords */
4219 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4220 {
4221 errno = 0;
4222 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4223 about coercing an 8 byte integer to a 4 byte pointer. */
4224 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4225 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4226 if (errno)
4227 return errno;
4228 }
4229
4230 /* Copy appropriate bytes out of the buffer. */
4231 memcpy (myaddr,
4232 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4233 len);
4234
4235 return 0;
4236 }
4237
4238 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4239 memory at MEMADDR. On failure (cannot write to the inferior)
4240 returns the value of errno. */
4241
4242 static int
4243 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4244 {
4245 register int i;
4246 /* Round starting address down to longword boundary. */
4247 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4248 /* Round ending address up; get number of longwords that makes. */
4249 register int count
4250 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4251 / sizeof (PTRACE_XFER_TYPE);
4252
4253 /* Allocate buffer of that many longwords. */
4254 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4255 alloca (count * sizeof (PTRACE_XFER_TYPE));
4256
4257 int pid = lwpid_of (get_thread_lwp (current_inferior));
4258
4259 if (debug_threads)
4260 {
4261 /* Dump up to four bytes. */
4262 unsigned int val = * (unsigned int *) myaddr;
4263 if (len == 1)
4264 val = val & 0xff;
4265 else if (len == 2)
4266 val = val & 0xffff;
4267 else if (len == 3)
4268 val = val & 0xffffff;
4269 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4270 val, (long)memaddr);
4271 }
4272
4273 /* Fill start and end extra bytes of buffer with existing memory data. */
4274
4275 errno = 0;
4276 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4277 about coercing an 8 byte integer to a 4 byte pointer. */
4278 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4279 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4280 if (errno)
4281 return errno;
4282
4283 if (count > 1)
4284 {
4285 errno = 0;
4286 buffer[count - 1]
4287 = ptrace (PTRACE_PEEKTEXT, pid,
4288 /* Coerce to a uintptr_t first to avoid potential gcc warning
4289 about coercing an 8 byte integer to a 4 byte pointer. */
4290 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4291 * sizeof (PTRACE_XFER_TYPE)),
4292 0);
4293 if (errno)
4294 return errno;
4295 }
4296
4297 /* Copy data to be written over corresponding part of buffer. */
4298
4299 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4300 myaddr, len);
4301
4302 /* Write the entire buffer. */
4303
4304 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4305 {
4306 errno = 0;
4307 ptrace (PTRACE_POKETEXT, pid,
4308 /* Coerce to a uintptr_t first to avoid potential gcc warning
4309 about coercing an 8 byte integer to a 4 byte pointer. */
4310 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4311 (PTRACE_ARG4_TYPE) buffer[i]);
4312 if (errno)
4313 return errno;
4314 }
4315
4316 return 0;
4317 }
4318
4319 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4320 static int linux_supports_tracefork_flag;
4321
4322 static void
4323 linux_enable_event_reporting (int pid)
4324 {
4325 if (!linux_supports_tracefork_flag)
4326 return;
4327
4328 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4329 }
4330
4331 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4332
4333 static int
4334 linux_tracefork_grandchild (void *arg)
4335 {
4336 _exit (0);
4337 }
4338
4339 #define STACK_SIZE 4096
4340
4341 static int
4342 linux_tracefork_child (void *arg)
4343 {
4344 ptrace (PTRACE_TRACEME, 0, 0, 0);
4345 kill (getpid (), SIGSTOP);
4346
4347 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4348
4349 if (fork () == 0)
4350 linux_tracefork_grandchild (NULL);
4351
4352 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4353
4354 #ifdef __ia64__
4355 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4356 CLONE_VM | SIGCHLD, NULL);
4357 #else
4358 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
4359 CLONE_VM | SIGCHLD, NULL);
4360 #endif
4361
4362 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4363
4364 _exit (0);
4365 }
4366
4367 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4368 sure that we can enable the option, and that it had the desired
4369 effect. */
4370
4371 static void
4372 linux_test_for_tracefork (void)
4373 {
4374 int child_pid, ret, status;
4375 long second_pid;
4376 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4377 char *stack = xmalloc (STACK_SIZE * 4);
4378 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4379
4380 linux_supports_tracefork_flag = 0;
4381
4382 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4383
4384 child_pid = fork ();
4385 if (child_pid == 0)
4386 linux_tracefork_child (NULL);
4387
4388 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4389
4390 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4391 #ifdef __ia64__
4392 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4393 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4394 #else /* !__ia64__ */
4395 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4396 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4397 #endif /* !__ia64__ */
4398
4399 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4400
4401 if (child_pid == -1)
4402 perror_with_name ("clone");
4403
4404 ret = my_waitpid (child_pid, &status, 0);
4405 if (ret == -1)
4406 perror_with_name ("waitpid");
4407 else if (ret != child_pid)
4408 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4409 if (! WIFSTOPPED (status))
4410 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4411
4412 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4413 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4414 if (ret != 0)
4415 {
4416 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4417 if (ret != 0)
4418 {
4419 warning ("linux_test_for_tracefork: failed to kill child");
4420 return;
4421 }
4422
4423 ret = my_waitpid (child_pid, &status, 0);
4424 if (ret != child_pid)
4425 warning ("linux_test_for_tracefork: failed to wait for killed child");
4426 else if (!WIFSIGNALED (status))
4427 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4428 "killed child", status);
4429
4430 return;
4431 }
4432
4433 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4434 if (ret != 0)
4435 warning ("linux_test_for_tracefork: failed to resume child");
4436
4437 ret = my_waitpid (child_pid, &status, 0);
4438
4439 if (ret == child_pid && WIFSTOPPED (status)
4440 && status >> 16 == PTRACE_EVENT_FORK)
4441 {
4442 second_pid = 0;
4443 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4444 if (ret == 0 && second_pid != 0)
4445 {
4446 int second_status;
4447
4448 linux_supports_tracefork_flag = 1;
4449 my_waitpid (second_pid, &second_status, 0);
4450 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4451 if (ret != 0)
4452 warning ("linux_test_for_tracefork: failed to kill second child");
4453 my_waitpid (second_pid, &status, 0);
4454 }
4455 }
4456 else
4457 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4458 "(%d, status 0x%x)", ret, status);
4459
4460 do
4461 {
4462 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4463 if (ret != 0)
4464 warning ("linux_test_for_tracefork: failed to kill child");
4465 my_waitpid (child_pid, &status, 0);
4466 }
4467 while (WIFSTOPPED (status));
4468
4469 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4470 free (stack);
4471 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4472 }
4473
4474
4475 static void
4476 linux_look_up_symbols (void)
4477 {
4478 #ifdef USE_THREAD_DB
4479 struct process_info *proc = current_process ();
4480
4481 if (proc->private->thread_db != NULL)
4482 return;
4483
4484 /* If the kernel supports tracing forks then it also supports tracing
4485 clones, and then we don't need to use the magic thread event breakpoint
4486 to learn about threads. */
4487 thread_db_init (!linux_supports_tracefork_flag);
4488 #endif
4489 }
4490
4491 static void
4492 linux_request_interrupt (void)
4493 {
4494 extern unsigned long signal_pid;
4495
4496 if (!ptid_equal (cont_thread, null_ptid)
4497 && !ptid_equal (cont_thread, minus_one_ptid))
4498 {
4499 struct lwp_info *lwp;
4500 int lwpid;
4501
4502 lwp = get_thread_lwp (current_inferior);
4503 lwpid = lwpid_of (lwp);
4504 kill_lwp (lwpid, SIGINT);
4505 }
4506 else
4507 kill_lwp (signal_pid, SIGINT);
4508 }
4509
4510 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4511 to debugger memory starting at MYADDR. */
4512
4513 static int
4514 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4515 {
4516 char filename[PATH_MAX];
4517 int fd, n;
4518 int pid = lwpid_of (get_thread_lwp (current_inferior));
4519
4520 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4521
4522 fd = open (filename, O_RDONLY);
4523 if (fd < 0)
4524 return -1;
4525
4526 if (offset != (CORE_ADDR) 0
4527 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4528 n = -1;
4529 else
4530 n = read (fd, myaddr, len);
4531
4532 close (fd);
4533
4534 return n;
4535 }
4536
4537 /* These breakpoint and watchpoint related wrapper functions simply
4538 pass on the function call if the target has registered a
4539 corresponding function. */
4540
4541 static int
4542 linux_insert_point (char type, CORE_ADDR addr, int len)
4543 {
4544 if (the_low_target.insert_point != NULL)
4545 return the_low_target.insert_point (type, addr, len);
4546 else
4547 /* Unsupported (see target.h). */
4548 return 1;
4549 }
4550
4551 static int
4552 linux_remove_point (char type, CORE_ADDR addr, int len)
4553 {
4554 if (the_low_target.remove_point != NULL)
4555 return the_low_target.remove_point (type, addr, len);
4556 else
4557 /* Unsupported (see target.h). */
4558 return 1;
4559 }
4560
4561 static int
4562 linux_stopped_by_watchpoint (void)
4563 {
4564 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4565
4566 return lwp->stopped_by_watchpoint;
4567 }
4568
4569 static CORE_ADDR
4570 linux_stopped_data_address (void)
4571 {
4572 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4573
4574 return lwp->stopped_data_address;
4575 }
4576
4577 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4578 #if defined(__mcoldfire__)
4579 /* These should really be defined in the kernel's ptrace.h header. */
4580 #define PT_TEXT_ADDR 49*4
4581 #define PT_DATA_ADDR 50*4
4582 #define PT_TEXT_END_ADDR 51*4
4583 #elif defined(BFIN)
4584 #define PT_TEXT_ADDR 220
4585 #define PT_TEXT_END_ADDR 224
4586 #define PT_DATA_ADDR 228
4587 #elif defined(__TMS320C6X__)
4588 #define PT_TEXT_ADDR (0x10000*4)
4589 #define PT_DATA_ADDR (0x10004*4)
4590 #define PT_TEXT_END_ADDR (0x10008*4)
4591 #endif
4592
4593 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4594 to tell gdb about. */
4595
4596 static int
4597 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4598 {
4599 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4600 unsigned long text, text_end, data;
4601 int pid = lwpid_of (get_thread_lwp (current_inferior));
4602
4603 errno = 0;
4604
4605 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4606 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4607 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4608
4609 if (errno == 0)
4610 {
4611 /* Both text and data offsets produced at compile-time (and so
4612 used by gdb) are relative to the beginning of the program,
4613 with the data segment immediately following the text segment.
4614 However, the actual runtime layout in memory may put the data
4615 somewhere else, so when we send gdb a data base-address, we
4616 use the real data base address and subtract the compile-time
4617 data base-address from it (which is just the length of the
4618 text segment). BSS immediately follows data in both
4619 cases. */
4620 *text_p = text;
4621 *data_p = data - (text_end - text);
4622
4623 return 1;
4624 }
4625 #endif
4626 return 0;
4627 }
4628 #endif
4629
4630 static int
4631 linux_qxfer_osdata (const char *annex,
4632 unsigned char *readbuf, unsigned const char *writebuf,
4633 CORE_ADDR offset, int len)
4634 {
4635 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4636 }
4637
4638 /* Convert a native/host siginfo object, into/from the siginfo in the
4639 layout of the inferiors' architecture. */
4640
4641 static void
4642 siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
4643 {
4644 int done = 0;
4645
4646 if (the_low_target.siginfo_fixup != NULL)
4647 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4648
4649 /* If there was no callback, or the callback didn't do anything,
4650 then just do a straight memcpy. */
4651 if (!done)
4652 {
4653 if (direction == 1)
4654 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4655 else
4656 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4657 }
4658 }
4659
4660 static int
4661 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4662 unsigned const char *writebuf, CORE_ADDR offset, int len)
4663 {
4664 int pid;
4665 struct siginfo siginfo;
4666 char inf_siginfo[sizeof (struct siginfo)];
4667
4668 if (current_inferior == NULL)
4669 return -1;
4670
4671 pid = lwpid_of (get_thread_lwp (current_inferior));
4672
4673 if (debug_threads)
4674 fprintf (stderr, "%s siginfo for lwp %d.\n",
4675 readbuf != NULL ? "Reading" : "Writing",
4676 pid);
4677
4678 if (offset >= sizeof (siginfo))
4679 return -1;
4680
4681 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4682 return -1;
4683
4684 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4685 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4686 inferior with a 64-bit GDBSERVER should look the same as debugging it
4687 with a 32-bit GDBSERVER, we need to convert it. */
4688 siginfo_fixup (&siginfo, inf_siginfo, 0);
4689
4690 if (offset + len > sizeof (siginfo))
4691 len = sizeof (siginfo) - offset;
4692
4693 if (readbuf != NULL)
4694 memcpy (readbuf, inf_siginfo + offset, len);
4695 else
4696 {
4697 memcpy (inf_siginfo + offset, writebuf, len);
4698
4699 /* Convert back to ptrace layout before flushing it out. */
4700 siginfo_fixup (&siginfo, inf_siginfo, 1);
4701
4702 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4703 return -1;
4704 }
4705
4706 return len;
4707 }
4708
4709 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4710 so we notice when children change state; as the handler for the
4711 sigsuspend in my_waitpid. */
4712
4713 static void
4714 sigchld_handler (int signo)
4715 {
4716 int old_errno = errno;
4717
4718 if (debug_threads)
4719 {
4720 do
4721 {
4722 /* fprintf is not async-signal-safe, so call write
4723 directly. */
4724 if (write (2, "sigchld_handler\n",
4725 sizeof ("sigchld_handler\n") - 1) < 0)
4726 break; /* just ignore */
4727 } while (0);
4728 }
4729
4730 if (target_is_async_p ())
4731 async_file_mark (); /* trigger a linux_wait */
4732
4733 errno = old_errno;
4734 }
4735
4736 static int
4737 linux_supports_non_stop (void)
4738 {
4739 return 1;
4740 }
4741
4742 static int
4743 linux_async (int enable)
4744 {
4745 int previous = (linux_event_pipe[0] != -1);
4746
4747 if (debug_threads)
4748 fprintf (stderr, "linux_async (%d), previous=%d\n",
4749 enable, previous);
4750
4751 if (previous != enable)
4752 {
4753 sigset_t mask;
4754 sigemptyset (&mask);
4755 sigaddset (&mask, SIGCHLD);
4756
4757 sigprocmask (SIG_BLOCK, &mask, NULL);
4758
4759 if (enable)
4760 {
4761 if (pipe (linux_event_pipe) == -1)
4762 fatal ("creating event pipe failed.");
4763
4764 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4765 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4766
4767 /* Register the event loop handler. */
4768 add_file_handler (linux_event_pipe[0],
4769 handle_target_event, NULL);
4770
4771 /* Always trigger a linux_wait. */
4772 async_file_mark ();
4773 }
4774 else
4775 {
4776 delete_file_handler (linux_event_pipe[0]);
4777
4778 close (linux_event_pipe[0]);
4779 close (linux_event_pipe[1]);
4780 linux_event_pipe[0] = -1;
4781 linux_event_pipe[1] = -1;
4782 }
4783
4784 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4785 }
4786
4787 return previous;
4788 }
4789
4790 static int
4791 linux_start_non_stop (int nonstop)
4792 {
4793 /* Register or unregister from event-loop accordingly. */
4794 linux_async (nonstop);
4795 return 0;
4796 }
4797
4798 static int
4799 linux_supports_multi_process (void)
4800 {
4801 return 1;
4802 }
4803
4804 static int
4805 linux_supports_disable_randomization (void)
4806 {
4807 #ifdef HAVE_PERSONALITY
4808 return 1;
4809 #else
4810 return 0;
4811 #endif
4812 }
4813
4814 /* Enumerate spufs IDs for process PID. */
4815 static int
4816 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4817 {
4818 int pos = 0;
4819 int written = 0;
4820 char path[128];
4821 DIR *dir;
4822 struct dirent *entry;
4823
4824 sprintf (path, "/proc/%ld/fd", pid);
4825 dir = opendir (path);
4826 if (!dir)
4827 return -1;
4828
4829 rewinddir (dir);
4830 while ((entry = readdir (dir)) != NULL)
4831 {
4832 struct stat st;
4833 struct statfs stfs;
4834 int fd;
4835
4836 fd = atoi (entry->d_name);
4837 if (!fd)
4838 continue;
4839
4840 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4841 if (stat (path, &st) != 0)
4842 continue;
4843 if (!S_ISDIR (st.st_mode))
4844 continue;
4845
4846 if (statfs (path, &stfs) != 0)
4847 continue;
4848 if (stfs.f_type != SPUFS_MAGIC)
4849 continue;
4850
4851 if (pos >= offset && pos + 4 <= offset + len)
4852 {
4853 *(unsigned int *)(buf + pos - offset) = fd;
4854 written += 4;
4855 }
4856 pos += 4;
4857 }
4858
4859 closedir (dir);
4860 return written;
4861 }
4862
4863 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4864 object type, using the /proc file system. */
4865 static int
4866 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4867 unsigned const char *writebuf,
4868 CORE_ADDR offset, int len)
4869 {
4870 long pid = lwpid_of (get_thread_lwp (current_inferior));
4871 char buf[128];
4872 int fd = 0;
4873 int ret = 0;
4874
4875 if (!writebuf && !readbuf)
4876 return -1;
4877
4878 if (!*annex)
4879 {
4880 if (!readbuf)
4881 return -1;
4882 else
4883 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4884 }
4885
4886 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4887 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4888 if (fd <= 0)
4889 return -1;
4890
4891 if (offset != 0
4892 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4893 {
4894 close (fd);
4895 return 0;
4896 }
4897
4898 if (writebuf)
4899 ret = write (fd, writebuf, (size_t) len);
4900 else
4901 ret = read (fd, readbuf, (size_t) len);
4902
4903 close (fd);
4904 return ret;
4905 }
4906
4907 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
4908 struct target_loadseg
4909 {
4910 /* Core address to which the segment is mapped. */
4911 Elf32_Addr addr;
4912 /* VMA recorded in the program header. */
4913 Elf32_Addr p_vaddr;
4914 /* Size of this segment in memory. */
4915 Elf32_Word p_memsz;
4916 };
4917
4918 # if defined PT_GETDSBT
4919 struct target_loadmap
4920 {
4921 /* Protocol version number, must be zero. */
4922 Elf32_Word version;
4923 /* Pointer to the DSBT table, its size, and the DSBT index. */
4924 unsigned *dsbt_table;
4925 unsigned dsbt_size, dsbt_index;
4926 /* Number of segments in this map. */
4927 Elf32_Word nsegs;
4928 /* The actual memory map. */
4929 struct target_loadseg segs[/*nsegs*/];
4930 };
4931 # define LINUX_LOADMAP PT_GETDSBT
4932 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
4933 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
4934 # else
4935 struct target_loadmap
4936 {
4937 /* Protocol version number, must be zero. */
4938 Elf32_Half version;
4939 /* Number of segments in this map. */
4940 Elf32_Half nsegs;
4941 /* The actual memory map. */
4942 struct target_loadseg segs[/*nsegs*/];
4943 };
4944 # define LINUX_LOADMAP PTRACE_GETFDPIC
4945 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
4946 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
4947 # endif
4948
4949 static int
4950 linux_read_loadmap (const char *annex, CORE_ADDR offset,
4951 unsigned char *myaddr, unsigned int len)
4952 {
4953 int pid = lwpid_of (get_thread_lwp (current_inferior));
4954 int addr = -1;
4955 struct target_loadmap *data = NULL;
4956 unsigned int actual_length, copy_length;
4957
4958 if (strcmp (annex, "exec") == 0)
4959 addr = (int) LINUX_LOADMAP_EXEC;
4960 else if (strcmp (annex, "interp") == 0)
4961 addr = (int) LINUX_LOADMAP_INTERP;
4962 else
4963 return -1;
4964
4965 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
4966 return -1;
4967
4968 if (data == NULL)
4969 return -1;
4970
4971 actual_length = sizeof (struct target_loadmap)
4972 + sizeof (struct target_loadseg) * data->nsegs;
4973
4974 if (offset < 0 || offset > actual_length)
4975 return -1;
4976
4977 copy_length = actual_length - offset < len ? actual_length - offset : len;
4978 memcpy (myaddr, (char *) data + offset, copy_length);
4979 return copy_length;
4980 }
4981 #else
4982 # define linux_read_loadmap NULL
4983 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
4984
4985 static void
4986 linux_process_qsupported (const char *query)
4987 {
4988 if (the_low_target.process_qsupported != NULL)
4989 the_low_target.process_qsupported (query);
4990 }
4991
4992 static int
4993 linux_supports_tracepoints (void)
4994 {
4995 if (*the_low_target.supports_tracepoints == NULL)
4996 return 0;
4997
4998 return (*the_low_target.supports_tracepoints) ();
4999 }
5000
5001 static CORE_ADDR
5002 linux_read_pc (struct regcache *regcache)
5003 {
5004 if (the_low_target.get_pc == NULL)
5005 return 0;
5006
5007 return (*the_low_target.get_pc) (regcache);
5008 }
5009
5010 static void
5011 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5012 {
5013 gdb_assert (the_low_target.set_pc != NULL);
5014
5015 (*the_low_target.set_pc) (regcache, pc);
5016 }
5017
5018 static int
5019 linux_thread_stopped (struct thread_info *thread)
5020 {
5021 return get_thread_lwp (thread)->stopped;
5022 }
5023
5024 /* This exposes stop-all-threads functionality to other modules. */
5025
5026 static void
5027 linux_pause_all (int freeze)
5028 {
5029 stop_all_lwps (freeze, NULL);
5030 }
5031
5032 /* This exposes unstop-all-threads functionality to other gdbserver
5033 modules. */
5034
5035 static void
5036 linux_unpause_all (int unfreeze)
5037 {
5038 unstop_all_lwps (unfreeze, NULL);
5039 }
5040
5041 static int
5042 linux_prepare_to_access_memory (void)
5043 {
5044 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5045 running LWP. */
5046 if (non_stop)
5047 linux_pause_all (1);
5048 return 0;
5049 }
5050
5051 static void
5052 linux_done_accessing_memory (void)
5053 {
5054 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5055 running LWP. */
5056 if (non_stop)
5057 linux_unpause_all (1);
5058 }
5059
5060 static int
5061 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5062 CORE_ADDR collector,
5063 CORE_ADDR lockaddr,
5064 ULONGEST orig_size,
5065 CORE_ADDR *jump_entry,
5066 CORE_ADDR *trampoline,
5067 ULONGEST *trampoline_size,
5068 unsigned char *jjump_pad_insn,
5069 ULONGEST *jjump_pad_insn_size,
5070 CORE_ADDR *adjusted_insn_addr,
5071 CORE_ADDR *adjusted_insn_addr_end,
5072 char *err)
5073 {
5074 return (*the_low_target.install_fast_tracepoint_jump_pad)
5075 (tpoint, tpaddr, collector, lockaddr, orig_size,
5076 jump_entry, trampoline, trampoline_size,
5077 jjump_pad_insn, jjump_pad_insn_size,
5078 adjusted_insn_addr, adjusted_insn_addr_end,
5079 err);
5080 }
5081
5082 static struct emit_ops *
5083 linux_emit_ops (void)
5084 {
5085 if (the_low_target.emit_ops != NULL)
5086 return (*the_low_target.emit_ops) ();
5087 else
5088 return NULL;
5089 }
5090
5091 static int
5092 linux_get_min_fast_tracepoint_insn_len (void)
5093 {
5094 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5095 }
5096
5097 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5098
5099 static int
5100 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5101 CORE_ADDR *phdr_memaddr, int *num_phdr)
5102 {
5103 char filename[PATH_MAX];
5104 int fd;
5105 const int auxv_size = is_elf64
5106 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5107 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5108
5109 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5110
5111 fd = open (filename, O_RDONLY);
5112 if (fd < 0)
5113 return 1;
5114
5115 *phdr_memaddr = 0;
5116 *num_phdr = 0;
5117 while (read (fd, buf, auxv_size) == auxv_size
5118 && (*phdr_memaddr == 0 || *num_phdr == 0))
5119 {
5120 if (is_elf64)
5121 {
5122 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5123
5124 switch (aux->a_type)
5125 {
5126 case AT_PHDR:
5127 *phdr_memaddr = aux->a_un.a_val;
5128 break;
5129 case AT_PHNUM:
5130 *num_phdr = aux->a_un.a_val;
5131 break;
5132 }
5133 }
5134 else
5135 {
5136 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5137
5138 switch (aux->a_type)
5139 {
5140 case AT_PHDR:
5141 *phdr_memaddr = aux->a_un.a_val;
5142 break;
5143 case AT_PHNUM:
5144 *num_phdr = aux->a_un.a_val;
5145 break;
5146 }
5147 }
5148 }
5149
5150 close (fd);
5151
5152 if (*phdr_memaddr == 0 || *num_phdr == 0)
5153 {
5154 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5155 "phdr_memaddr = %ld, phdr_num = %d",
5156 (long) *phdr_memaddr, *num_phdr);
5157 return 2;
5158 }
5159
5160 return 0;
5161 }
5162
5163 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5164
5165 static CORE_ADDR
5166 get_dynamic (const int pid, const int is_elf64)
5167 {
5168 CORE_ADDR phdr_memaddr, relocation;
5169 int num_phdr, i;
5170 unsigned char *phdr_buf;
5171 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5172
5173 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5174 return 0;
5175
5176 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5177 phdr_buf = alloca (num_phdr * phdr_size);
5178
5179 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5180 return 0;
5181
5182 /* Compute relocation: it is expected to be 0 for "regular" executables,
5183 non-zero for PIE ones. */
5184 relocation = -1;
5185 for (i = 0; relocation == -1 && i < num_phdr; i++)
5186 if (is_elf64)
5187 {
5188 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5189
5190 if (p->p_type == PT_PHDR)
5191 relocation = phdr_memaddr - p->p_vaddr;
5192 }
5193 else
5194 {
5195 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5196
5197 if (p->p_type == PT_PHDR)
5198 relocation = phdr_memaddr - p->p_vaddr;
5199 }
5200
5201 if (relocation == -1)
5202 {
5203 warning ("Unexpected missing PT_PHDR");
5204 return 0;
5205 }
5206
5207 for (i = 0; i < num_phdr; i++)
5208 {
5209 if (is_elf64)
5210 {
5211 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5212
5213 if (p->p_type == PT_DYNAMIC)
5214 return p->p_vaddr + relocation;
5215 }
5216 else
5217 {
5218 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5219
5220 if (p->p_type == PT_DYNAMIC)
5221 return p->p_vaddr + relocation;
5222 }
5223 }
5224
5225 return 0;
5226 }
5227
5228 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5229 can be 0 if the inferior does not yet have the library list initialized. */
5230
5231 static CORE_ADDR
5232 get_r_debug (const int pid, const int is_elf64)
5233 {
5234 CORE_ADDR dynamic_memaddr;
5235 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5236 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5237
5238 dynamic_memaddr = get_dynamic (pid, is_elf64);
5239 if (dynamic_memaddr == 0)
5240 return (CORE_ADDR) -1;
5241
5242 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5243 {
5244 if (is_elf64)
5245 {
5246 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5247
5248 if (dyn->d_tag == DT_DEBUG)
5249 return dyn->d_un.d_val;
5250
5251 if (dyn->d_tag == DT_NULL)
5252 break;
5253 }
5254 else
5255 {
5256 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5257
5258 if (dyn->d_tag == DT_DEBUG)
5259 return dyn->d_un.d_val;
5260
5261 if (dyn->d_tag == DT_NULL)
5262 break;
5263 }
5264
5265 dynamic_memaddr += dyn_size;
5266 }
5267
5268 return (CORE_ADDR) -1;
5269 }
5270
5271 /* Read one pointer from MEMADDR in the inferior. */
5272
5273 static int
5274 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5275 {
5276 *ptr = 0;
5277 return linux_read_memory (memaddr, (unsigned char *) ptr, ptr_size);
5278 }
5279
5280 struct link_map_offsets
5281 {
5282 /* Offset and size of r_debug.r_version. */
5283 int r_version_offset;
5284
5285 /* Offset and size of r_debug.r_map. */
5286 int r_map_offset;
5287
5288 /* Offset to l_addr field in struct link_map. */
5289 int l_addr_offset;
5290
5291 /* Offset to l_name field in struct link_map. */
5292 int l_name_offset;
5293
5294 /* Offset to l_ld field in struct link_map. */
5295 int l_ld_offset;
5296
5297 /* Offset to l_next field in struct link_map. */
5298 int l_next_offset;
5299
5300 /* Offset to l_prev field in struct link_map. */
5301 int l_prev_offset;
5302 };
5303
5304 /* Construct qXfer:libraries:read reply. */
5305
5306 static int
5307 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5308 unsigned const char *writebuf,
5309 CORE_ADDR offset, int len)
5310 {
5311 char *document;
5312 unsigned document_len;
5313 struct process_info_private *const priv = current_process ()->private;
5314 char filename[PATH_MAX];
5315 int pid, is_elf64;
5316
5317 static const struct link_map_offsets lmo_32bit_offsets =
5318 {
5319 0, /* r_version offset. */
5320 4, /* r_debug.r_map offset. */
5321 0, /* l_addr offset in link_map. */
5322 4, /* l_name offset in link_map. */
5323 8, /* l_ld offset in link_map. */
5324 12, /* l_next offset in link_map. */
5325 16 /* l_prev offset in link_map. */
5326 };
5327
5328 static const struct link_map_offsets lmo_64bit_offsets =
5329 {
5330 0, /* r_version offset. */
5331 8, /* r_debug.r_map offset. */
5332 0, /* l_addr offset in link_map. */
5333 8, /* l_name offset in link_map. */
5334 16, /* l_ld offset in link_map. */
5335 24, /* l_next offset in link_map. */
5336 32 /* l_prev offset in link_map. */
5337 };
5338 const struct link_map_offsets *lmo;
5339
5340 if (writebuf != NULL)
5341 return -2;
5342 if (readbuf == NULL)
5343 return -1;
5344
5345 pid = lwpid_of (get_thread_lwp (current_inferior));
5346 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5347 is_elf64 = elf_64_file_p (filename);
5348 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5349
5350 if (priv->r_debug == 0)
5351 priv->r_debug = get_r_debug (pid, is_elf64);
5352
5353 if (priv->r_debug == (CORE_ADDR) -1 || priv->r_debug == 0)
5354 {
5355 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5356 }
5357 else
5358 {
5359 int allocated = 1024;
5360 char *p;
5361 const int ptr_size = is_elf64 ? 8 : 4;
5362 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
5363 int r_version, header_done = 0;
5364
5365 document = xmalloc (allocated);
5366 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5367 p = document + strlen (document);
5368
5369 r_version = 0;
5370 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5371 (unsigned char *) &r_version,
5372 sizeof (r_version)) != 0
5373 || r_version != 1)
5374 {
5375 warning ("unexpected r_debug version %d", r_version);
5376 goto done;
5377 }
5378
5379 if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5380 &lm_addr, ptr_size) != 0)
5381 {
5382 warning ("unable to read r_map from 0x%lx",
5383 (long) priv->r_debug + lmo->r_map_offset);
5384 goto done;
5385 }
5386
5387 lm_prev = 0;
5388 while (read_one_ptr (lm_addr + lmo->l_name_offset,
5389 &l_name, ptr_size) == 0
5390 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5391 &l_addr, ptr_size) == 0
5392 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5393 &l_ld, ptr_size) == 0
5394 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5395 &l_prev, ptr_size) == 0
5396 && read_one_ptr (lm_addr + lmo->l_next_offset,
5397 &l_next, ptr_size) == 0)
5398 {
5399 unsigned char libname[PATH_MAX];
5400
5401 if (lm_prev != l_prev)
5402 {
5403 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5404 (long) lm_prev, (long) l_prev);
5405 break;
5406 }
5407
5408 /* Not checking for error because reading may stop before
5409 we've got PATH_MAX worth of characters. */
5410 libname[0] = '\0';
5411 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5412 libname[sizeof (libname) - 1] = '\0';
5413 if (libname[0] != '\0')
5414 {
5415 /* 6x the size for xml_escape_text below. */
5416 size_t len = 6 * strlen ((char *) libname);
5417 char *name;
5418
5419 if (!header_done)
5420 {
5421 /* Terminate `<library-list-svr4'. */
5422 *p++ = '>';
5423 header_done = 1;
5424 }
5425
5426 while (allocated < p - document + len + 200)
5427 {
5428 /* Expand to guarantee sufficient storage. */
5429 uintptr_t document_len = p - document;
5430
5431 document = xrealloc (document, 2 * allocated);
5432 allocated *= 2;
5433 p = document + document_len;
5434 }
5435
5436 name = xml_escape_text ((char *) libname);
5437 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5438 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5439 name, (unsigned long) lm_addr,
5440 (unsigned long) l_addr, (unsigned long) l_ld);
5441 free (name);
5442 }
5443 else if (lm_prev == 0)
5444 {
5445 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5446 p = p + strlen (p);
5447 }
5448
5449 if (l_next == 0)
5450 break;
5451
5452 lm_prev = lm_addr;
5453 lm_addr = l_next;
5454 }
5455 done:
5456 strcpy (p, "</library-list-svr4>");
5457 }
5458
5459 document_len = strlen (document);
5460 if (offset < document_len)
5461 document_len -= offset;
5462 else
5463 document_len = 0;
5464 if (len > document_len)
5465 len = document_len;
5466
5467 memcpy (readbuf, document + offset, len);
5468 xfree (document);
5469
5470 return len;
5471 }
5472
5473 static struct target_ops linux_target_ops = {
5474 linux_create_inferior,
5475 linux_attach,
5476 linux_kill,
5477 linux_detach,
5478 linux_mourn,
5479 linux_join,
5480 linux_thread_alive,
5481 linux_resume,
5482 linux_wait,
5483 linux_fetch_registers,
5484 linux_store_registers,
5485 linux_prepare_to_access_memory,
5486 linux_done_accessing_memory,
5487 linux_read_memory,
5488 linux_write_memory,
5489 linux_look_up_symbols,
5490 linux_request_interrupt,
5491 linux_read_auxv,
5492 linux_insert_point,
5493 linux_remove_point,
5494 linux_stopped_by_watchpoint,
5495 linux_stopped_data_address,
5496 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
5497 linux_read_offsets,
5498 #else
5499 NULL,
5500 #endif
5501 #ifdef USE_THREAD_DB
5502 thread_db_get_tls_address,
5503 #else
5504 NULL,
5505 #endif
5506 linux_qxfer_spu,
5507 hostio_last_error_from_errno,
5508 linux_qxfer_osdata,
5509 linux_xfer_siginfo,
5510 linux_supports_non_stop,
5511 linux_async,
5512 linux_start_non_stop,
5513 linux_supports_multi_process,
5514 #ifdef USE_THREAD_DB
5515 thread_db_handle_monitor_command,
5516 #else
5517 NULL,
5518 #endif
5519 linux_common_core_of_thread,
5520 linux_read_loadmap,
5521 linux_process_qsupported,
5522 linux_supports_tracepoints,
5523 linux_read_pc,
5524 linux_write_pc,
5525 linux_thread_stopped,
5526 NULL,
5527 linux_pause_all,
5528 linux_unpause_all,
5529 linux_cancel_breakpoints,
5530 linux_stabilize_threads,
5531 linux_install_fast_tracepoint_jump_pad,
5532 linux_emit_ops,
5533 linux_supports_disable_randomization,
5534 linux_get_min_fast_tracepoint_insn_len,
5535 linux_qxfer_libraries_svr4,
5536 };
5537
5538 static void
5539 linux_init_signals ()
5540 {
5541 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5542 to find what the cancel signal actually is. */
5543 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5544 signal (__SIGRTMIN+1, SIG_IGN);
5545 #endif
5546 }
5547
5548 void
5549 initialize_low (void)
5550 {
5551 struct sigaction sigchld_action;
5552 memset (&sigchld_action, 0, sizeof (sigchld_action));
5553 set_target_ops (&linux_target_ops);
5554 set_breakpoint_data (the_low_target.breakpoint,
5555 the_low_target.breakpoint_len);
5556 linux_init_signals ();
5557 linux_test_for_tracefork ();
5558 #ifdef HAVE_LINUX_REGSETS
5559 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5560 ;
5561 disabled_regsets = xmalloc (num_regsets);
5562 #endif
5563
5564 sigchld_action.sa_handler = sigchld_handler;
5565 sigemptyset (&sigchld_action.sa_mask);
5566 sigchld_action.sa_flags = SA_RESTART;
5567 sigaction (SIGCHLD, &sigchld_action, NULL);
5568 }
This page took 0.184921 seconds and 3 git commands to generate.