Check if GDBserver is compatible with process
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-1996, 1998-2012 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "linux-osdata.h"
22 #include "agent.h"
23
24 #include <sys/wait.h>
25 #include <stdio.h>
26 #include <sys/param.h>
27 #include <sys/ptrace.h>
28 #include "linux-ptrace.h"
29 #include "linux-procfs.h"
30 #include <signal.h>
31 #include <sys/ioctl.h>
32 #include <fcntl.h>
33 #include <string.h>
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <errno.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #ifndef ELFMAG0
47 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
51 #include <elf.h>
52 #endif
53
54 #ifndef SPUFS_MAGIC
55 #define SPUFS_MAGIC 0x23c9b64e
56 #endif
57
58 #ifdef HAVE_PERSONALITY
59 # include <sys/personality.h>
60 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
61 # define ADDR_NO_RANDOMIZE 0x0040000
62 # endif
63 #endif
64
65 #ifndef O_LARGEFILE
66 #define O_LARGEFILE 0
67 #endif
68
69 #ifndef W_STOPCODE
70 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
71 #endif
72
73 /* This is the kernel's hard limit. Not to be confused with
74 SIGRTMIN. */
75 #ifndef __SIGRTMIN
76 #define __SIGRTMIN 32
77 #endif
78
79 #ifdef __UCLIBC__
80 #if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
81 #define HAS_NOMMU
82 #endif
83 #endif
84
85 #ifndef HAVE_ELF32_AUXV_T
86 /* Copied from glibc's elf.h. */
87 typedef struct
88 {
89 uint32_t a_type; /* Entry type */
90 union
91 {
92 uint32_t a_val; /* Integer value */
93 /* We use to have pointer elements added here. We cannot do that,
94 though, since it does not work when using 32-bit definitions
95 on 64-bit platforms and vice versa. */
96 } a_un;
97 } Elf32_auxv_t;
98 #endif
99
100 #ifndef HAVE_ELF64_AUXV_T
101 /* Copied from glibc's elf.h. */
102 typedef struct
103 {
104 uint64_t a_type; /* Entry type */
105 union
106 {
107 uint64_t a_val; /* Integer value */
108 /* We use to have pointer elements added here. We cannot do that,
109 though, since it does not work when using 32-bit definitions
110 on 64-bit platforms and vice versa. */
111 } a_un;
112 } Elf64_auxv_t;
113 #endif
114
115 /* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
116 representation of the thread ID.
117
118 ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
119 the same as the LWP ID.
120
121 ``all_processes'' is keyed by the "overall process ID", which
122 GNU/Linux calls tgid, "thread group ID". */
123
124 struct inferior_list all_lwps;
125
126 /* A list of all unknown processes which receive stop signals. Some
127 other process will presumably claim each of these as forked
128 children momentarily. */
129
130 struct simple_pid_list
131 {
132 /* The process ID. */
133 int pid;
134
135 /* The status as reported by waitpid. */
136 int status;
137
138 /* Next in chain. */
139 struct simple_pid_list *next;
140 };
141 struct simple_pid_list *stopped_pids;
142
143 /* Trivial list manipulation functions to keep track of a list of new
144 stopped processes. */
145
146 static void
147 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
148 {
149 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
150
151 new_pid->pid = pid;
152 new_pid->status = status;
153 new_pid->next = *listp;
154 *listp = new_pid;
155 }
156
157 static int
158 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
159 {
160 struct simple_pid_list **p;
161
162 for (p = listp; *p != NULL; p = &(*p)->next)
163 if ((*p)->pid == pid)
164 {
165 struct simple_pid_list *next = (*p)->next;
166
167 *statusp = (*p)->status;
168 xfree (*p);
169 *p = next;
170 return 1;
171 }
172 return 0;
173 }
174
175 /* FIXME this is a bit of a hack, and could be removed. */
176 int stopping_threads;
177
178 /* FIXME make into a target method? */
179 int using_threads = 1;
180
181 /* True if we're presently stabilizing threads (moving them out of
182 jump pads). */
183 static int stabilizing_threads;
184
185 /* This flag is true iff we've just created or attached to our first
186 inferior but it has not stopped yet. As soon as it does, we need
187 to call the low target's arch_setup callback. Doing this only on
188 the first inferior avoids reinializing the architecture on every
189 inferior, and avoids messing with the register caches of the
190 already running inferiors. NOTE: this assumes all inferiors under
191 control of gdbserver have the same architecture. */
192 static int new_inferior;
193
194 static void linux_resume_one_lwp (struct lwp_info *lwp,
195 int step, int signal, siginfo_t *info);
196 static void linux_resume (struct thread_resume *resume_info, size_t n);
197 static void stop_all_lwps (int suspend, struct lwp_info *except);
198 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
199 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
200 static void *add_lwp (ptid_t ptid);
201 static int linux_stopped_by_watchpoint (void);
202 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
203 static void proceed_all_lwps (void);
204 static int finish_step_over (struct lwp_info *lwp);
205 static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
206 static int kill_lwp (unsigned long lwpid, int signo);
207 static void linux_enable_event_reporting (int pid);
208
209 /* True if the low target can hardware single-step. Such targets
210 don't need a BREAKPOINT_REINSERT_ADDR callback. */
211
212 static int
213 can_hardware_single_step (void)
214 {
215 return (the_low_target.breakpoint_reinsert_addr == NULL);
216 }
217
218 /* True if the low target supports memory breakpoints. If so, we'll
219 have a GET_PC implementation. */
220
221 static int
222 supports_breakpoints (void)
223 {
224 return (the_low_target.get_pc != NULL);
225 }
226
227 /* Returns true if this target can support fast tracepoints. This
228 does not mean that the in-process agent has been loaded in the
229 inferior. */
230
231 static int
232 supports_fast_tracepoints (void)
233 {
234 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
235 }
236
237 struct pending_signals
238 {
239 int signal;
240 siginfo_t info;
241 struct pending_signals *prev;
242 };
243
244 #define PTRACE_ARG3_TYPE void *
245 #define PTRACE_ARG4_TYPE void *
246 #define PTRACE_XFER_TYPE long
247
248 #ifdef HAVE_LINUX_REGSETS
249 static char *disabled_regsets;
250 static int num_regsets;
251 #endif
252
253 /* The read/write ends of the pipe registered as waitable file in the
254 event loop. */
255 static int linux_event_pipe[2] = { -1, -1 };
256
257 /* True if we're currently in async mode. */
258 #define target_is_async_p() (linux_event_pipe[0] != -1)
259
260 static void send_sigstop (struct lwp_info *lwp);
261 static void wait_for_sigstop (struct inferior_list_entry *entry);
262
263 /* Return non-zero if HEADER is a 64-bit ELF file. */
264
265 static int
266 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
267 {
268 if (header->e_ident[EI_MAG0] == ELFMAG0
269 && header->e_ident[EI_MAG1] == ELFMAG1
270 && header->e_ident[EI_MAG2] == ELFMAG2
271 && header->e_ident[EI_MAG3] == ELFMAG3)
272 {
273 *machine = header->e_machine;
274 return header->e_ident[EI_CLASS] == ELFCLASS64;
275
276 }
277 *machine = EM_NONE;
278 return -1;
279 }
280
281 /* Return non-zero if FILE is a 64-bit ELF file,
282 zero if the file is not a 64-bit ELF file,
283 and -1 if the file is not accessible or doesn't exist. */
284
285 static int
286 elf_64_file_p (const char *file, unsigned int *machine)
287 {
288 Elf64_Ehdr header;
289 int fd;
290
291 fd = open (file, O_RDONLY);
292 if (fd < 0)
293 return -1;
294
295 if (read (fd, &header, sizeof (header)) != sizeof (header))
296 {
297 close (fd);
298 return 0;
299 }
300 close (fd);
301
302 return elf_64_header_p (&header, machine);
303 }
304
305 /* Accepts an integer PID; Returns true if the executable PID is
306 running is a 64-bit ELF file.. */
307
308 int
309 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
310 {
311 char file[MAXPATHLEN];
312
313 sprintf (file, "/proc/%d/exe", pid);
314 return elf_64_file_p (file, machine);
315 }
316
317 static void
318 delete_lwp (struct lwp_info *lwp)
319 {
320 remove_thread (get_lwp_thread (lwp));
321 remove_inferior (&all_lwps, &lwp->head);
322 free (lwp->arch_private);
323 free (lwp);
324 }
325
326 /* Add a process to the common process list, and set its private
327 data. */
328
329 static struct process_info *
330 linux_add_process (int pid, int attached)
331 {
332 struct process_info *proc;
333
334 /* Is this the first process? If so, then set the arch. */
335 if (all_processes.head == NULL)
336 new_inferior = 1;
337
338 proc = add_process (pid, attached);
339 proc->private = xcalloc (1, sizeof (*proc->private));
340
341 if (the_low_target.new_process != NULL)
342 proc->private->arch_private = the_low_target.new_process ();
343
344 return proc;
345 }
346
347 /* Wrapper function for waitpid which handles EINTR, and emulates
348 __WALL for systems where that is not available. */
349
350 static int
351 my_waitpid (int pid, int *status, int flags)
352 {
353 int ret, out_errno;
354
355 if (debug_threads)
356 fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
357
358 if (flags & __WALL)
359 {
360 sigset_t block_mask, org_mask, wake_mask;
361 int wnohang;
362
363 wnohang = (flags & WNOHANG) != 0;
364 flags &= ~(__WALL | __WCLONE);
365 flags |= WNOHANG;
366
367 /* Block all signals while here. This avoids knowing about
368 LinuxThread's signals. */
369 sigfillset (&block_mask);
370 sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
371
372 /* ... except during the sigsuspend below. */
373 sigemptyset (&wake_mask);
374
375 while (1)
376 {
377 /* Since all signals are blocked, there's no need to check
378 for EINTR here. */
379 ret = waitpid (pid, status, flags);
380 out_errno = errno;
381
382 if (ret == -1 && out_errno != ECHILD)
383 break;
384 else if (ret > 0)
385 break;
386
387 if (flags & __WCLONE)
388 {
389 /* We've tried both flavors now. If WNOHANG is set,
390 there's nothing else to do, just bail out. */
391 if (wnohang)
392 break;
393
394 if (debug_threads)
395 fprintf (stderr, "blocking\n");
396
397 /* Block waiting for signals. */
398 sigsuspend (&wake_mask);
399 }
400
401 flags ^= __WCLONE;
402 }
403
404 sigprocmask (SIG_SETMASK, &org_mask, NULL);
405 }
406 else
407 {
408 do
409 ret = waitpid (pid, status, flags);
410 while (ret == -1 && errno == EINTR);
411 out_errno = errno;
412 }
413
414 if (debug_threads)
415 fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
416 pid, flags, status ? *status : -1, ret);
417
418 errno = out_errno;
419 return ret;
420 }
421
422 /* Handle a GNU/Linux extended wait response. If we see a clone
423 event, we need to add the new LWP to our list (and not report the
424 trap to higher layers). */
425
426 static void
427 handle_extended_wait (struct lwp_info *event_child, int wstat)
428 {
429 int event = wstat >> 16;
430 struct lwp_info *new_lwp;
431
432 if (event == PTRACE_EVENT_CLONE)
433 {
434 ptid_t ptid;
435 unsigned long new_pid;
436 int ret, status;
437
438 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
439
440 /* If we haven't already seen the new PID stop, wait for it now. */
441 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
442 {
443 /* The new child has a pending SIGSTOP. We can't affect it until it
444 hits the SIGSTOP, but we're already attached. */
445
446 ret = my_waitpid (new_pid, &status, __WALL);
447
448 if (ret == -1)
449 perror_with_name ("waiting for new child");
450 else if (ret != new_pid)
451 warning ("wait returned unexpected PID %d", ret);
452 else if (!WIFSTOPPED (status))
453 warning ("wait returned unexpected status 0x%x", status);
454 }
455
456 linux_enable_event_reporting (new_pid);
457
458 ptid = ptid_build (pid_of (event_child), new_pid, 0);
459 new_lwp = (struct lwp_info *) add_lwp (ptid);
460 add_thread (ptid, new_lwp);
461
462 /* Either we're going to immediately resume the new thread
463 or leave it stopped. linux_resume_one_lwp is a nop if it
464 thinks the thread is currently running, so set this first
465 before calling linux_resume_one_lwp. */
466 new_lwp->stopped = 1;
467
468 /* Normally we will get the pending SIGSTOP. But in some cases
469 we might get another signal delivered to the group first.
470 If we do get another signal, be sure not to lose it. */
471 if (WSTOPSIG (status) == SIGSTOP)
472 {
473 if (stopping_threads)
474 new_lwp->stop_pc = get_stop_pc (new_lwp);
475 else
476 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
477 }
478 else
479 {
480 new_lwp->stop_expected = 1;
481
482 if (stopping_threads)
483 {
484 new_lwp->stop_pc = get_stop_pc (new_lwp);
485 new_lwp->status_pending_p = 1;
486 new_lwp->status_pending = status;
487 }
488 else
489 /* Pass the signal on. This is what GDB does - except
490 shouldn't we really report it instead? */
491 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
492 }
493
494 /* Always resume the current thread. If we are stopping
495 threads, it will have a pending SIGSTOP; we may as well
496 collect it now. */
497 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
498 }
499 }
500
501 /* Return the PC as read from the regcache of LWP, without any
502 adjustment. */
503
504 static CORE_ADDR
505 get_pc (struct lwp_info *lwp)
506 {
507 struct thread_info *saved_inferior;
508 struct regcache *regcache;
509 CORE_ADDR pc;
510
511 if (the_low_target.get_pc == NULL)
512 return 0;
513
514 saved_inferior = current_inferior;
515 current_inferior = get_lwp_thread (lwp);
516
517 regcache = get_thread_regcache (current_inferior, 1);
518 pc = (*the_low_target.get_pc) (regcache);
519
520 if (debug_threads)
521 fprintf (stderr, "pc is 0x%lx\n", (long) pc);
522
523 current_inferior = saved_inferior;
524 return pc;
525 }
526
527 /* This function should only be called if LWP got a SIGTRAP.
528 The SIGTRAP could mean several things.
529
530 On i386, where decr_pc_after_break is non-zero:
531 If we were single-stepping this process using PTRACE_SINGLESTEP,
532 we will get only the one SIGTRAP (even if the instruction we
533 stepped over was a breakpoint). The value of $eip will be the
534 next instruction.
535 If we continue the process using PTRACE_CONT, we will get a
536 SIGTRAP when we hit a breakpoint. The value of $eip will be
537 the instruction after the breakpoint (i.e. needs to be
538 decremented). If we report the SIGTRAP to GDB, we must also
539 report the undecremented PC. If we cancel the SIGTRAP, we
540 must resume at the decremented PC.
541
542 (Presumably, not yet tested) On a non-decr_pc_after_break machine
543 with hardware or kernel single-step:
544 If we single-step over a breakpoint instruction, our PC will
545 point at the following instruction. If we continue and hit a
546 breakpoint instruction, our PC will point at the breakpoint
547 instruction. */
548
549 static CORE_ADDR
550 get_stop_pc (struct lwp_info *lwp)
551 {
552 CORE_ADDR stop_pc;
553
554 if (the_low_target.get_pc == NULL)
555 return 0;
556
557 stop_pc = get_pc (lwp);
558
559 if (WSTOPSIG (lwp->last_status) == SIGTRAP
560 && !lwp->stepping
561 && !lwp->stopped_by_watchpoint
562 && lwp->last_status >> 16 == 0)
563 stop_pc -= the_low_target.decr_pc_after_break;
564
565 if (debug_threads)
566 fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
567
568 return stop_pc;
569 }
570
571 static void *
572 add_lwp (ptid_t ptid)
573 {
574 struct lwp_info *lwp;
575
576 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
577 memset (lwp, 0, sizeof (*lwp));
578
579 lwp->head.id = ptid;
580
581 if (the_low_target.new_thread != NULL)
582 lwp->arch_private = the_low_target.new_thread ();
583
584 add_inferior_to_list (&all_lwps, &lwp->head);
585
586 return lwp;
587 }
588
589 /* Start an inferior process and returns its pid.
590 ALLARGS is a vector of program-name and args. */
591
592 static int
593 linux_create_inferior (char *program, char **allargs)
594 {
595 #ifdef HAVE_PERSONALITY
596 int personality_orig = 0, personality_set = 0;
597 #endif
598 struct lwp_info *new_lwp;
599 int pid;
600 ptid_t ptid;
601
602 #ifdef HAVE_PERSONALITY
603 if (disable_randomization)
604 {
605 errno = 0;
606 personality_orig = personality (0xffffffff);
607 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
608 {
609 personality_set = 1;
610 personality (personality_orig | ADDR_NO_RANDOMIZE);
611 }
612 if (errno != 0 || (personality_set
613 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
614 warning ("Error disabling address space randomization: %s",
615 strerror (errno));
616 }
617 #endif
618
619 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
620 pid = vfork ();
621 #else
622 pid = fork ();
623 #endif
624 if (pid < 0)
625 perror_with_name ("fork");
626
627 if (pid == 0)
628 {
629 ptrace (PTRACE_TRACEME, 0, 0, 0);
630
631 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
632 signal (__SIGRTMIN + 1, SIG_DFL);
633 #endif
634
635 setpgid (0, 0);
636
637 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
638 stdout to stderr so that inferior i/o doesn't corrupt the connection.
639 Also, redirect stdin to /dev/null. */
640 if (remote_connection_is_stdio ())
641 {
642 close (0);
643 open ("/dev/null", O_RDONLY);
644 dup2 (2, 1);
645 if (write (2, "stdin/stdout redirected\n",
646 sizeof ("stdin/stdout redirected\n") - 1) < 0)
647 /* Errors ignored. */;
648 }
649
650 execv (program, allargs);
651 if (errno == ENOENT)
652 execvp (program, allargs);
653
654 fprintf (stderr, "Cannot exec %s: %s.\n", program,
655 strerror (errno));
656 fflush (stderr);
657 _exit (0177);
658 }
659
660 #ifdef HAVE_PERSONALITY
661 if (personality_set)
662 {
663 errno = 0;
664 personality (personality_orig);
665 if (errno != 0)
666 warning ("Error restoring address space randomization: %s",
667 strerror (errno));
668 }
669 #endif
670
671 linux_add_process (pid, 0);
672
673 ptid = ptid_build (pid, pid, 0);
674 new_lwp = add_lwp (ptid);
675 add_thread (ptid, new_lwp);
676 new_lwp->must_set_ptrace_flags = 1;
677
678 return pid;
679 }
680
681 /* Attach to an inferior process. */
682
683 static void
684 linux_attach_lwp_1 (unsigned long lwpid, int initial)
685 {
686 ptid_t ptid;
687 struct lwp_info *new_lwp;
688
689 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
690 {
691 struct buffer buffer;
692
693 if (!initial)
694 {
695 /* If we fail to attach to an LWP, just warn. */
696 fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
697 strerror (errno), errno);
698 fflush (stderr);
699 return;
700 }
701
702 /* If we fail to attach to a process, report an error. */
703 buffer_init (&buffer);
704 linux_ptrace_attach_warnings (lwpid, &buffer);
705 buffer_grow_str0 (&buffer, "");
706 error ("%sCannot attach to lwp %ld: %s (%d)", buffer_finish (&buffer),
707 lwpid, strerror (errno), errno);
708 }
709
710 if (initial)
711 /* If lwp is the tgid, we handle adding existing threads later.
712 Otherwise we just add lwp without bothering about any other
713 threads. */
714 ptid = ptid_build (lwpid, lwpid, 0);
715 else
716 {
717 /* Note that extracting the pid from the current inferior is
718 safe, since we're always called in the context of the same
719 process as this new thread. */
720 int pid = pid_of (get_thread_lwp (current_inferior));
721 ptid = ptid_build (pid, lwpid, 0);
722 }
723
724 new_lwp = (struct lwp_info *) add_lwp (ptid);
725 add_thread (ptid, new_lwp);
726
727 /* We need to wait for SIGSTOP before being able to make the next
728 ptrace call on this LWP. */
729 new_lwp->must_set_ptrace_flags = 1;
730
731 if (linux_proc_pid_is_stopped (lwpid))
732 {
733 if (debug_threads)
734 fprintf (stderr,
735 "Attached to a stopped process\n");
736
737 /* The process is definitely stopped. It is in a job control
738 stop, unless the kernel predates the TASK_STOPPED /
739 TASK_TRACED distinction, in which case it might be in a
740 ptrace stop. Make sure it is in a ptrace stop; from there we
741 can kill it, signal it, et cetera.
742
743 First make sure there is a pending SIGSTOP. Since we are
744 already attached, the process can not transition from stopped
745 to running without a PTRACE_CONT; so we know this signal will
746 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
747 probably already in the queue (unless this kernel is old
748 enough to use TASK_STOPPED for ptrace stops); but since
749 SIGSTOP is not an RT signal, it can only be queued once. */
750 kill_lwp (lwpid, SIGSTOP);
751
752 /* Finally, resume the stopped process. This will deliver the
753 SIGSTOP (or a higher priority signal, just like normal
754 PTRACE_ATTACH), which we'll catch later on. */
755 ptrace (PTRACE_CONT, lwpid, 0, 0);
756 }
757
758 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
759 brings it to a halt.
760
761 There are several cases to consider here:
762
763 1) gdbserver has already attached to the process and is being notified
764 of a new thread that is being created.
765 In this case we should ignore that SIGSTOP and resume the
766 process. This is handled below by setting stop_expected = 1,
767 and the fact that add_thread sets last_resume_kind ==
768 resume_continue.
769
770 2) This is the first thread (the process thread), and we're attaching
771 to it via attach_inferior.
772 In this case we want the process thread to stop.
773 This is handled by having linux_attach set last_resume_kind ==
774 resume_stop after we return.
775
776 If the pid we are attaching to is also the tgid, we attach to and
777 stop all the existing threads. Otherwise, we attach to pid and
778 ignore any other threads in the same group as this pid.
779
780 3) GDB is connecting to gdbserver and is requesting an enumeration of all
781 existing threads.
782 In this case we want the thread to stop.
783 FIXME: This case is currently not properly handled.
784 We should wait for the SIGSTOP but don't. Things work apparently
785 because enough time passes between when we ptrace (ATTACH) and when
786 gdb makes the next ptrace call on the thread.
787
788 On the other hand, if we are currently trying to stop all threads, we
789 should treat the new thread as if we had sent it a SIGSTOP. This works
790 because we are guaranteed that the add_lwp call above added us to the
791 end of the list, and so the new thread has not yet reached
792 wait_for_sigstop (but will). */
793 new_lwp->stop_expected = 1;
794 }
795
796 void
797 linux_attach_lwp (unsigned long lwpid)
798 {
799 linux_attach_lwp_1 (lwpid, 0);
800 }
801
802 /* Attach to PID. If PID is the tgid, attach to it and all
803 of its threads. */
804
805 int
806 linux_attach (unsigned long pid)
807 {
808 /* Attach to PID. We will check for other threads
809 soon. */
810 linux_attach_lwp_1 (pid, 1);
811 linux_add_process (pid, 1);
812
813 if (!non_stop)
814 {
815 struct thread_info *thread;
816
817 /* Don't ignore the initial SIGSTOP if we just attached to this
818 process. It will be collected by wait shortly. */
819 thread = find_thread_ptid (ptid_build (pid, pid, 0));
820 thread->last_resume_kind = resume_stop;
821 }
822
823 if (linux_proc_get_tgid (pid) == pid)
824 {
825 DIR *dir;
826 char pathname[128];
827
828 sprintf (pathname, "/proc/%ld/task", pid);
829
830 dir = opendir (pathname);
831
832 if (!dir)
833 {
834 fprintf (stderr, "Could not open /proc/%ld/task.\n", pid);
835 fflush (stderr);
836 }
837 else
838 {
839 /* At this point we attached to the tgid. Scan the task for
840 existing threads. */
841 unsigned long lwp;
842 int new_threads_found;
843 int iterations = 0;
844 struct dirent *dp;
845
846 while (iterations < 2)
847 {
848 new_threads_found = 0;
849 /* Add all the other threads. While we go through the
850 threads, new threads may be spawned. Cycle through
851 the list of threads until we have done two iterations without
852 finding new threads. */
853 while ((dp = readdir (dir)) != NULL)
854 {
855 /* Fetch one lwp. */
856 lwp = strtoul (dp->d_name, NULL, 10);
857
858 /* Is this a new thread? */
859 if (lwp
860 && find_thread_ptid (ptid_build (pid, lwp, 0)) == NULL)
861 {
862 linux_attach_lwp_1 (lwp, 0);
863 new_threads_found++;
864
865 if (debug_threads)
866 fprintf (stderr, "\
867 Found and attached to new lwp %ld\n", lwp);
868 }
869 }
870
871 if (!new_threads_found)
872 iterations++;
873 else
874 iterations = 0;
875
876 rewinddir (dir);
877 }
878 closedir (dir);
879 }
880 }
881
882 return 0;
883 }
884
885 struct counter
886 {
887 int pid;
888 int count;
889 };
890
891 static int
892 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
893 {
894 struct counter *counter = args;
895
896 if (ptid_get_pid (entry->id) == counter->pid)
897 {
898 if (++counter->count > 1)
899 return 1;
900 }
901
902 return 0;
903 }
904
905 static int
906 last_thread_of_process_p (struct thread_info *thread)
907 {
908 ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
909 int pid = ptid_get_pid (ptid);
910 struct counter counter = { pid , 0 };
911
912 return (find_inferior (&all_threads,
913 second_thread_of_pid_p, &counter) == NULL);
914 }
915
916 /* Kill LWP. */
917
918 static void
919 linux_kill_one_lwp (struct lwp_info *lwp)
920 {
921 int pid = lwpid_of (lwp);
922
923 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
924 there is no signal context, and ptrace(PTRACE_KILL) (or
925 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
926 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
927 alternative is to kill with SIGKILL. We only need one SIGKILL
928 per process, not one for each thread. But since we still support
929 linuxthreads, and we also support debugging programs using raw
930 clone without CLONE_THREAD, we send one for each thread. For
931 years, we used PTRACE_KILL only, so we're being a bit paranoid
932 about some old kernels where PTRACE_KILL might work better
933 (dubious if there are any such, but that's why it's paranoia), so
934 we try SIGKILL first, PTRACE_KILL second, and so we're fine
935 everywhere. */
936
937 errno = 0;
938 kill (pid, SIGKILL);
939 if (debug_threads)
940 fprintf (stderr,
941 "LKL: kill (SIGKILL) %s, 0, 0 (%s)\n",
942 target_pid_to_str (ptid_of (lwp)),
943 errno ? strerror (errno) : "OK");
944
945 errno = 0;
946 ptrace (PTRACE_KILL, pid, 0, 0);
947 if (debug_threads)
948 fprintf (stderr,
949 "LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
950 target_pid_to_str (ptid_of (lwp)),
951 errno ? strerror (errno) : "OK");
952 }
953
954 /* Callback for `find_inferior'. Kills an lwp of a given process,
955 except the leader. */
956
957 static int
958 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
959 {
960 struct thread_info *thread = (struct thread_info *) entry;
961 struct lwp_info *lwp = get_thread_lwp (thread);
962 int wstat;
963 int pid = * (int *) args;
964
965 if (ptid_get_pid (entry->id) != pid)
966 return 0;
967
968 /* We avoid killing the first thread here, because of a Linux kernel (at
969 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
970 the children get a chance to be reaped, it will remain a zombie
971 forever. */
972
973 if (lwpid_of (lwp) == pid)
974 {
975 if (debug_threads)
976 fprintf (stderr, "lkop: is last of process %s\n",
977 target_pid_to_str (entry->id));
978 return 0;
979 }
980
981 do
982 {
983 linux_kill_one_lwp (lwp);
984
985 /* Make sure it died. The loop is most likely unnecessary. */
986 pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
987 } while (pid > 0 && WIFSTOPPED (wstat));
988
989 return 0;
990 }
991
992 static int
993 linux_kill (int pid)
994 {
995 struct process_info *process;
996 struct lwp_info *lwp;
997 int wstat;
998 int lwpid;
999
1000 process = find_process_pid (pid);
1001 if (process == NULL)
1002 return -1;
1003
1004 /* If we're killing a running inferior, make sure it is stopped
1005 first, as PTRACE_KILL will not work otherwise. */
1006 stop_all_lwps (0, NULL);
1007
1008 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1009
1010 /* See the comment in linux_kill_one_lwp. We did not kill the first
1011 thread in the list, so do so now. */
1012 lwp = find_lwp_pid (pid_to_ptid (pid));
1013
1014 if (lwp == NULL)
1015 {
1016 if (debug_threads)
1017 fprintf (stderr, "lk_1: cannot find lwp %ld, for pid: %d\n",
1018 lwpid_of (lwp), pid);
1019 }
1020 else
1021 {
1022 if (debug_threads)
1023 fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
1024 lwpid_of (lwp), pid);
1025
1026 do
1027 {
1028 linux_kill_one_lwp (lwp);
1029
1030 /* Make sure it died. The loop is most likely unnecessary. */
1031 lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
1032 } while (lwpid > 0 && WIFSTOPPED (wstat));
1033 }
1034
1035 the_target->mourn (process);
1036
1037 /* Since we presently can only stop all lwps of all processes, we
1038 need to unstop lwps of other processes. */
1039 unstop_all_lwps (0, NULL);
1040 return 0;
1041 }
1042
1043 /* Get pending signal of THREAD, for detaching purposes. This is the
1044 signal the thread last stopped for, which we need to deliver to the
1045 thread when detaching, otherwise, it'd be suppressed/lost. */
1046
1047 static int
1048 get_detach_signal (struct thread_info *thread)
1049 {
1050 enum target_signal signo = TARGET_SIGNAL_0;
1051 int status;
1052 struct lwp_info *lp = get_thread_lwp (thread);
1053
1054 if (lp->status_pending_p)
1055 status = lp->status_pending;
1056 else
1057 {
1058 /* If the thread had been suspended by gdbserver, and it stopped
1059 cleanly, then it'll have stopped with SIGSTOP. But we don't
1060 want to deliver that SIGSTOP. */
1061 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1062 || thread->last_status.value.sig == TARGET_SIGNAL_0)
1063 return 0;
1064
1065 /* Otherwise, we may need to deliver the signal we
1066 intercepted. */
1067 status = lp->last_status;
1068 }
1069
1070 if (!WIFSTOPPED (status))
1071 {
1072 if (debug_threads)
1073 fprintf (stderr,
1074 "GPS: lwp %s hasn't stopped: no pending signal\n",
1075 target_pid_to_str (ptid_of (lp)));
1076 return 0;
1077 }
1078
1079 /* Extended wait statuses aren't real SIGTRAPs. */
1080 if (WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1081 {
1082 if (debug_threads)
1083 fprintf (stderr,
1084 "GPS: lwp %s had stopped with extended "
1085 "status: no pending signal\n",
1086 target_pid_to_str (ptid_of (lp)));
1087 return 0;
1088 }
1089
1090 signo = target_signal_from_host (WSTOPSIG (status));
1091
1092 if (program_signals_p && !program_signals[signo])
1093 {
1094 if (debug_threads)
1095 fprintf (stderr,
1096 "GPS: lwp %s had signal %s, but it is in nopass state\n",
1097 target_pid_to_str (ptid_of (lp)),
1098 target_signal_to_string (signo));
1099 return 0;
1100 }
1101 else if (!program_signals_p
1102 /* If we have no way to know which signals GDB does not
1103 want to have passed to the program, assume
1104 SIGTRAP/SIGINT, which is GDB's default. */
1105 && (signo == TARGET_SIGNAL_TRAP || signo == TARGET_SIGNAL_INT))
1106 {
1107 if (debug_threads)
1108 fprintf (stderr,
1109 "GPS: lwp %s had signal %s, "
1110 "but we don't know if we should pass it. Default to not.\n",
1111 target_pid_to_str (ptid_of (lp)),
1112 target_signal_to_string (signo));
1113 return 0;
1114 }
1115 else
1116 {
1117 if (debug_threads)
1118 fprintf (stderr,
1119 "GPS: lwp %s has pending signal %s: delivering it.\n",
1120 target_pid_to_str (ptid_of (lp)),
1121 target_signal_to_string (signo));
1122
1123 return WSTOPSIG (status);
1124 }
1125 }
1126
1127 static int
1128 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1129 {
1130 struct thread_info *thread = (struct thread_info *) entry;
1131 struct lwp_info *lwp = get_thread_lwp (thread);
1132 int pid = * (int *) args;
1133 int sig;
1134
1135 if (ptid_get_pid (entry->id) != pid)
1136 return 0;
1137
1138 /* If there is a pending SIGSTOP, get rid of it. */
1139 if (lwp->stop_expected)
1140 {
1141 if (debug_threads)
1142 fprintf (stderr,
1143 "Sending SIGCONT to %s\n",
1144 target_pid_to_str (ptid_of (lwp)));
1145
1146 kill_lwp (lwpid_of (lwp), SIGCONT);
1147 lwp->stop_expected = 0;
1148 }
1149
1150 /* Flush any pending changes to the process's registers. */
1151 regcache_invalidate_one ((struct inferior_list_entry *)
1152 get_lwp_thread (lwp));
1153
1154 /* Pass on any pending signal for this thread. */
1155 sig = get_detach_signal (thread);
1156
1157 /* Finally, let it resume. */
1158 if (the_low_target.prepare_to_resume != NULL)
1159 the_low_target.prepare_to_resume (lwp);
1160 if (ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, sig) < 0)
1161 error (_("Can't detach %s: %s"),
1162 target_pid_to_str (ptid_of (lwp)),
1163 strerror (errno));
1164
1165 delete_lwp (lwp);
1166 return 0;
1167 }
1168
1169 static int
1170 linux_detach (int pid)
1171 {
1172 struct process_info *process;
1173
1174 process = find_process_pid (pid);
1175 if (process == NULL)
1176 return -1;
1177
1178 /* Stop all threads before detaching. First, ptrace requires that
1179 the thread is stopped to sucessfully detach. Second, thread_db
1180 may need to uninstall thread event breakpoints from memory, which
1181 only works with a stopped process anyway. */
1182 stop_all_lwps (0, NULL);
1183
1184 #ifdef USE_THREAD_DB
1185 thread_db_detach (process);
1186 #endif
1187
1188 /* Stabilize threads (move out of jump pads). */
1189 stabilize_threads ();
1190
1191 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1192
1193 the_target->mourn (process);
1194
1195 /* Since we presently can only stop all lwps of all processes, we
1196 need to unstop lwps of other processes. */
1197 unstop_all_lwps (0, NULL);
1198 return 0;
1199 }
1200
1201 /* Remove all LWPs that belong to process PROC from the lwp list. */
1202
1203 static int
1204 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1205 {
1206 struct lwp_info *lwp = (struct lwp_info *) entry;
1207 struct process_info *process = proc;
1208
1209 if (pid_of (lwp) == pid_of (process))
1210 delete_lwp (lwp);
1211
1212 return 0;
1213 }
1214
1215 static void
1216 linux_mourn (struct process_info *process)
1217 {
1218 struct process_info_private *priv;
1219
1220 #ifdef USE_THREAD_DB
1221 thread_db_mourn (process);
1222 #endif
1223
1224 find_inferior (&all_lwps, delete_lwp_callback, process);
1225
1226 /* Freeing all private data. */
1227 priv = process->private;
1228 free (priv->arch_private);
1229 free (priv);
1230 process->private = NULL;
1231
1232 remove_process (process);
1233 }
1234
1235 static void
1236 linux_join (int pid)
1237 {
1238 int status, ret;
1239
1240 do {
1241 ret = my_waitpid (pid, &status, 0);
1242 if (WIFEXITED (status) || WIFSIGNALED (status))
1243 break;
1244 } while (ret != -1 || errno != ECHILD);
1245 }
1246
1247 /* Return nonzero if the given thread is still alive. */
1248 static int
1249 linux_thread_alive (ptid_t ptid)
1250 {
1251 struct lwp_info *lwp = find_lwp_pid (ptid);
1252
1253 /* We assume we always know if a thread exits. If a whole process
1254 exited but we still haven't been able to report it to GDB, we'll
1255 hold on to the last lwp of the dead process. */
1256 if (lwp != NULL)
1257 return !lwp->dead;
1258 else
1259 return 0;
1260 }
1261
1262 /* Return 1 if this lwp has an interesting status pending. */
1263 static int
1264 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1265 {
1266 struct lwp_info *lwp = (struct lwp_info *) entry;
1267 ptid_t ptid = * (ptid_t *) arg;
1268 struct thread_info *thread;
1269
1270 /* Check if we're only interested in events from a specific process
1271 or its lwps. */
1272 if (!ptid_equal (minus_one_ptid, ptid)
1273 && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
1274 return 0;
1275
1276 thread = get_lwp_thread (lwp);
1277
1278 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1279 report any status pending the LWP may have. */
1280 if (thread->last_resume_kind == resume_stop
1281 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1282 return 0;
1283
1284 return lwp->status_pending_p;
1285 }
1286
1287 static int
1288 same_lwp (struct inferior_list_entry *entry, void *data)
1289 {
1290 ptid_t ptid = *(ptid_t *) data;
1291 int lwp;
1292
1293 if (ptid_get_lwp (ptid) != 0)
1294 lwp = ptid_get_lwp (ptid);
1295 else
1296 lwp = ptid_get_pid (ptid);
1297
1298 if (ptid_get_lwp (entry->id) == lwp)
1299 return 1;
1300
1301 return 0;
1302 }
1303
1304 struct lwp_info *
1305 find_lwp_pid (ptid_t ptid)
1306 {
1307 return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1308 }
1309
1310 static struct lwp_info *
1311 linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1312 {
1313 int ret;
1314 int to_wait_for = -1;
1315 struct lwp_info *child = NULL;
1316
1317 if (debug_threads)
1318 fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1319
1320 if (ptid_equal (ptid, minus_one_ptid))
1321 to_wait_for = -1; /* any child */
1322 else
1323 to_wait_for = ptid_get_lwp (ptid); /* this lwp only */
1324
1325 options |= __WALL;
1326
1327 retry:
1328
1329 ret = my_waitpid (to_wait_for, wstatp, options);
1330 if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1331 return NULL;
1332 else if (ret == -1)
1333 perror_with_name ("waitpid");
1334
1335 if (debug_threads
1336 && (!WIFSTOPPED (*wstatp)
1337 || (WSTOPSIG (*wstatp) != 32
1338 && WSTOPSIG (*wstatp) != 33)))
1339 fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1340
1341 child = find_lwp_pid (pid_to_ptid (ret));
1342
1343 /* If we didn't find a process, one of two things presumably happened:
1344 - A process we started and then detached from has exited. Ignore it.
1345 - A process we are controlling has forked and the new child's stop
1346 was reported to us by the kernel. Save its PID. */
1347 if (child == NULL && WIFSTOPPED (*wstatp))
1348 {
1349 add_to_pid_list (&stopped_pids, ret, *wstatp);
1350 goto retry;
1351 }
1352 else if (child == NULL)
1353 goto retry;
1354
1355 child->stopped = 1;
1356
1357 child->last_status = *wstatp;
1358
1359 /* Architecture-specific setup after inferior is running.
1360 This needs to happen after we have attached to the inferior
1361 and it is stopped for the first time, but before we access
1362 any inferior registers. */
1363 if (new_inferior)
1364 {
1365 the_low_target.arch_setup ();
1366 #ifdef HAVE_LINUX_REGSETS
1367 memset (disabled_regsets, 0, num_regsets);
1368 #endif
1369 new_inferior = 0;
1370 }
1371
1372 /* Fetch the possibly triggered data watchpoint info and store it in
1373 CHILD.
1374
1375 On some archs, like x86, that use debug registers to set
1376 watchpoints, it's possible that the way to know which watched
1377 address trapped, is to check the register that is used to select
1378 which address to watch. Problem is, between setting the
1379 watchpoint and reading back which data address trapped, the user
1380 may change the set of watchpoints, and, as a consequence, GDB
1381 changes the debug registers in the inferior. To avoid reading
1382 back a stale stopped-data-address when that happens, we cache in
1383 LP the fact that a watchpoint trapped, and the corresponding data
1384 address, as soon as we see CHILD stop with a SIGTRAP. If GDB
1385 changes the debug registers meanwhile, we have the cached data we
1386 can rely on. */
1387
1388 if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1389 {
1390 if (the_low_target.stopped_by_watchpoint == NULL)
1391 {
1392 child->stopped_by_watchpoint = 0;
1393 }
1394 else
1395 {
1396 struct thread_info *saved_inferior;
1397
1398 saved_inferior = current_inferior;
1399 current_inferior = get_lwp_thread (child);
1400
1401 child->stopped_by_watchpoint
1402 = the_low_target.stopped_by_watchpoint ();
1403
1404 if (child->stopped_by_watchpoint)
1405 {
1406 if (the_low_target.stopped_data_address != NULL)
1407 child->stopped_data_address
1408 = the_low_target.stopped_data_address ();
1409 else
1410 child->stopped_data_address = 0;
1411 }
1412
1413 current_inferior = saved_inferior;
1414 }
1415 }
1416
1417 /* Store the STOP_PC, with adjustment applied. This depends on the
1418 architecture being defined already (so that CHILD has a valid
1419 regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1420 not). */
1421 if (WIFSTOPPED (*wstatp))
1422 child->stop_pc = get_stop_pc (child);
1423
1424 if (debug_threads
1425 && WIFSTOPPED (*wstatp)
1426 && the_low_target.get_pc != NULL)
1427 {
1428 struct thread_info *saved_inferior = current_inferior;
1429 struct regcache *regcache;
1430 CORE_ADDR pc;
1431
1432 current_inferior = get_lwp_thread (child);
1433 regcache = get_thread_regcache (current_inferior, 1);
1434 pc = (*the_low_target.get_pc) (regcache);
1435 fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1436 current_inferior = saved_inferior;
1437 }
1438
1439 return child;
1440 }
1441
1442 /* This function should only be called if the LWP got a SIGTRAP.
1443
1444 Handle any tracepoint steps or hits. Return true if a tracepoint
1445 event was handled, 0 otherwise. */
1446
1447 static int
1448 handle_tracepoints (struct lwp_info *lwp)
1449 {
1450 struct thread_info *tinfo = get_lwp_thread (lwp);
1451 int tpoint_related_event = 0;
1452
1453 /* If this tracepoint hit causes a tracing stop, we'll immediately
1454 uninsert tracepoints. To do this, we temporarily pause all
1455 threads, unpatch away, and then unpause threads. We need to make
1456 sure the unpausing doesn't resume LWP too. */
1457 lwp->suspended++;
1458
1459 /* And we need to be sure that any all-threads-stopping doesn't try
1460 to move threads out of the jump pads, as it could deadlock the
1461 inferior (LWP could be in the jump pad, maybe even holding the
1462 lock.) */
1463
1464 /* Do any necessary step collect actions. */
1465 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1466
1467 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1468
1469 /* See if we just hit a tracepoint and do its main collect
1470 actions. */
1471 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1472
1473 lwp->suspended--;
1474
1475 gdb_assert (lwp->suspended == 0);
1476 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1477
1478 if (tpoint_related_event)
1479 {
1480 if (debug_threads)
1481 fprintf (stderr, "got a tracepoint event\n");
1482 return 1;
1483 }
1484
1485 return 0;
1486 }
1487
1488 /* Convenience wrapper. Returns true if LWP is presently collecting a
1489 fast tracepoint. */
1490
1491 static int
1492 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1493 struct fast_tpoint_collect_status *status)
1494 {
1495 CORE_ADDR thread_area;
1496
1497 if (the_low_target.get_thread_area == NULL)
1498 return 0;
1499
1500 /* Get the thread area address. This is used to recognize which
1501 thread is which when tracing with the in-process agent library.
1502 We don't read anything from the address, and treat it as opaque;
1503 it's the address itself that we assume is unique per-thread. */
1504 if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1505 return 0;
1506
1507 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1508 }
1509
1510 /* The reason we resume in the caller, is because we want to be able
1511 to pass lwp->status_pending as WSTAT, and we need to clear
1512 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1513 refuses to resume. */
1514
1515 static int
1516 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1517 {
1518 struct thread_info *saved_inferior;
1519
1520 saved_inferior = current_inferior;
1521 current_inferior = get_lwp_thread (lwp);
1522
1523 if ((wstat == NULL
1524 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1525 && supports_fast_tracepoints ()
1526 && agent_loaded_p ())
1527 {
1528 struct fast_tpoint_collect_status status;
1529 int r;
1530
1531 if (debug_threads)
1532 fprintf (stderr, "\
1533 Checking whether LWP %ld needs to move out of the jump pad.\n",
1534 lwpid_of (lwp));
1535
1536 r = linux_fast_tracepoint_collecting (lwp, &status);
1537
1538 if (wstat == NULL
1539 || (WSTOPSIG (*wstat) != SIGILL
1540 && WSTOPSIG (*wstat) != SIGFPE
1541 && WSTOPSIG (*wstat) != SIGSEGV
1542 && WSTOPSIG (*wstat) != SIGBUS))
1543 {
1544 lwp->collecting_fast_tracepoint = r;
1545
1546 if (r != 0)
1547 {
1548 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1549 {
1550 /* Haven't executed the original instruction yet.
1551 Set breakpoint there, and wait till it's hit,
1552 then single-step until exiting the jump pad. */
1553 lwp->exit_jump_pad_bkpt
1554 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1555 }
1556
1557 if (debug_threads)
1558 fprintf (stderr, "\
1559 Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1560 lwpid_of (lwp));
1561 current_inferior = saved_inferior;
1562
1563 return 1;
1564 }
1565 }
1566 else
1567 {
1568 /* If we get a synchronous signal while collecting, *and*
1569 while executing the (relocated) original instruction,
1570 reset the PC to point at the tpoint address, before
1571 reporting to GDB. Otherwise, it's an IPA lib bug: just
1572 report the signal to GDB, and pray for the best. */
1573
1574 lwp->collecting_fast_tracepoint = 0;
1575
1576 if (r != 0
1577 && (status.adjusted_insn_addr <= lwp->stop_pc
1578 && lwp->stop_pc < status.adjusted_insn_addr_end))
1579 {
1580 siginfo_t info;
1581 struct regcache *regcache;
1582
1583 /* The si_addr on a few signals references the address
1584 of the faulting instruction. Adjust that as
1585 well. */
1586 if ((WSTOPSIG (*wstat) == SIGILL
1587 || WSTOPSIG (*wstat) == SIGFPE
1588 || WSTOPSIG (*wstat) == SIGBUS
1589 || WSTOPSIG (*wstat) == SIGSEGV)
1590 && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1591 /* Final check just to make sure we don't clobber
1592 the siginfo of non-kernel-sent signals. */
1593 && (uintptr_t) info.si_addr == lwp->stop_pc)
1594 {
1595 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1596 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1597 }
1598
1599 regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1600 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1601 lwp->stop_pc = status.tpoint_addr;
1602
1603 /* Cancel any fast tracepoint lock this thread was
1604 holding. */
1605 force_unlock_trace_buffer ();
1606 }
1607
1608 if (lwp->exit_jump_pad_bkpt != NULL)
1609 {
1610 if (debug_threads)
1611 fprintf (stderr,
1612 "Cancelling fast exit-jump-pad: removing bkpt. "
1613 "stopping all threads momentarily.\n");
1614
1615 stop_all_lwps (1, lwp);
1616 cancel_breakpoints ();
1617
1618 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1619 lwp->exit_jump_pad_bkpt = NULL;
1620
1621 unstop_all_lwps (1, lwp);
1622
1623 gdb_assert (lwp->suspended >= 0);
1624 }
1625 }
1626 }
1627
1628 if (debug_threads)
1629 fprintf (stderr, "\
1630 Checking whether LWP %ld needs to move out of the jump pad...no\n",
1631 lwpid_of (lwp));
1632
1633 current_inferior = saved_inferior;
1634 return 0;
1635 }
1636
1637 /* Enqueue one signal in the "signals to report later when out of the
1638 jump pad" list. */
1639
1640 static void
1641 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1642 {
1643 struct pending_signals *p_sig;
1644
1645 if (debug_threads)
1646 fprintf (stderr, "\
1647 Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1648
1649 if (debug_threads)
1650 {
1651 struct pending_signals *sig;
1652
1653 for (sig = lwp->pending_signals_to_report;
1654 sig != NULL;
1655 sig = sig->prev)
1656 fprintf (stderr,
1657 " Already queued %d\n",
1658 sig->signal);
1659
1660 fprintf (stderr, " (no more currently queued signals)\n");
1661 }
1662
1663 /* Don't enqueue non-RT signals if they are already in the deferred
1664 queue. (SIGSTOP being the easiest signal to see ending up here
1665 twice) */
1666 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1667 {
1668 struct pending_signals *sig;
1669
1670 for (sig = lwp->pending_signals_to_report;
1671 sig != NULL;
1672 sig = sig->prev)
1673 {
1674 if (sig->signal == WSTOPSIG (*wstat))
1675 {
1676 if (debug_threads)
1677 fprintf (stderr,
1678 "Not requeuing already queued non-RT signal %d"
1679 " for LWP %ld\n",
1680 sig->signal,
1681 lwpid_of (lwp));
1682 return;
1683 }
1684 }
1685 }
1686
1687 p_sig = xmalloc (sizeof (*p_sig));
1688 p_sig->prev = lwp->pending_signals_to_report;
1689 p_sig->signal = WSTOPSIG (*wstat);
1690 memset (&p_sig->info, 0, sizeof (siginfo_t));
1691 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1692
1693 lwp->pending_signals_to_report = p_sig;
1694 }
1695
1696 /* Dequeue one signal from the "signals to report later when out of
1697 the jump pad" list. */
1698
1699 static int
1700 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1701 {
1702 if (lwp->pending_signals_to_report != NULL)
1703 {
1704 struct pending_signals **p_sig;
1705
1706 p_sig = &lwp->pending_signals_to_report;
1707 while ((*p_sig)->prev != NULL)
1708 p_sig = &(*p_sig)->prev;
1709
1710 *wstat = W_STOPCODE ((*p_sig)->signal);
1711 if ((*p_sig)->info.si_signo != 0)
1712 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1713 free (*p_sig);
1714 *p_sig = NULL;
1715
1716 if (debug_threads)
1717 fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1718 WSTOPSIG (*wstat), lwpid_of (lwp));
1719
1720 if (debug_threads)
1721 {
1722 struct pending_signals *sig;
1723
1724 for (sig = lwp->pending_signals_to_report;
1725 sig != NULL;
1726 sig = sig->prev)
1727 fprintf (stderr,
1728 " Still queued %d\n",
1729 sig->signal);
1730
1731 fprintf (stderr, " (no more queued signals)\n");
1732 }
1733
1734 return 1;
1735 }
1736
1737 return 0;
1738 }
1739
1740 /* Arrange for a breakpoint to be hit again later. We don't keep the
1741 SIGTRAP status and don't forward the SIGTRAP signal to the LWP. We
1742 will handle the current event, eventually we will resume this LWP,
1743 and this breakpoint will trap again. */
1744
1745 static int
1746 cancel_breakpoint (struct lwp_info *lwp)
1747 {
1748 struct thread_info *saved_inferior;
1749
1750 /* There's nothing to do if we don't support breakpoints. */
1751 if (!supports_breakpoints ())
1752 return 0;
1753
1754 /* breakpoint_at reads from current inferior. */
1755 saved_inferior = current_inferior;
1756 current_inferior = get_lwp_thread (lwp);
1757
1758 if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1759 {
1760 if (debug_threads)
1761 fprintf (stderr,
1762 "CB: Push back breakpoint for %s\n",
1763 target_pid_to_str (ptid_of (lwp)));
1764
1765 /* Back up the PC if necessary. */
1766 if (the_low_target.decr_pc_after_break)
1767 {
1768 struct regcache *regcache
1769 = get_thread_regcache (current_inferior, 1);
1770 (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1771 }
1772
1773 current_inferior = saved_inferior;
1774 return 1;
1775 }
1776 else
1777 {
1778 if (debug_threads)
1779 fprintf (stderr,
1780 "CB: No breakpoint found at %s for [%s]\n",
1781 paddress (lwp->stop_pc),
1782 target_pid_to_str (ptid_of (lwp)));
1783 }
1784
1785 current_inferior = saved_inferior;
1786 return 0;
1787 }
1788
1789 /* When the event-loop is doing a step-over, this points at the thread
1790 being stepped. */
1791 ptid_t step_over_bkpt;
1792
1793 /* Wait for an event from child PID. If PID is -1, wait for any
1794 child. Store the stop status through the status pointer WSTAT.
1795 OPTIONS is passed to the waitpid call. Return 0 if no child stop
1796 event was found and OPTIONS contains WNOHANG. Return the PID of
1797 the stopped child otherwise. */
1798
1799 static int
1800 linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1801 {
1802 struct lwp_info *event_child, *requested_child;
1803 ptid_t wait_ptid;
1804
1805 event_child = NULL;
1806 requested_child = NULL;
1807
1808 /* Check for a lwp with a pending status. */
1809
1810 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
1811 {
1812 event_child = (struct lwp_info *)
1813 find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1814 if (debug_threads && event_child)
1815 fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1816 }
1817 else
1818 {
1819 requested_child = find_lwp_pid (ptid);
1820
1821 if (!stopping_threads
1822 && requested_child->status_pending_p
1823 && requested_child->collecting_fast_tracepoint)
1824 {
1825 enqueue_one_deferred_signal (requested_child,
1826 &requested_child->status_pending);
1827 requested_child->status_pending_p = 0;
1828 requested_child->status_pending = 0;
1829 linux_resume_one_lwp (requested_child, 0, 0, NULL);
1830 }
1831
1832 if (requested_child->suspended
1833 && requested_child->status_pending_p)
1834 fatal ("requesting an event out of a suspended child?");
1835
1836 if (requested_child->status_pending_p)
1837 event_child = requested_child;
1838 }
1839
1840 if (event_child != NULL)
1841 {
1842 if (debug_threads)
1843 fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1844 lwpid_of (event_child), event_child->status_pending);
1845 *wstat = event_child->status_pending;
1846 event_child->status_pending_p = 0;
1847 event_child->status_pending = 0;
1848 current_inferior = get_lwp_thread (event_child);
1849 return lwpid_of (event_child);
1850 }
1851
1852 if (ptid_is_pid (ptid))
1853 {
1854 /* A request to wait for a specific tgid. This is not possible
1855 with waitpid, so instead, we wait for any child, and leave
1856 children we're not interested in right now with a pending
1857 status to report later. */
1858 wait_ptid = minus_one_ptid;
1859 }
1860 else
1861 wait_ptid = ptid;
1862
1863 /* We only enter this loop if no process has a pending wait status. Thus
1864 any action taken in response to a wait status inside this loop is
1865 responding as soon as we detect the status, not after any pending
1866 events. */
1867 while (1)
1868 {
1869 event_child = linux_wait_for_lwp (wait_ptid, wstat, options);
1870
1871 if ((options & WNOHANG) && event_child == NULL)
1872 {
1873 if (debug_threads)
1874 fprintf (stderr, "WNOHANG set, no event found\n");
1875 return 0;
1876 }
1877
1878 if (event_child == NULL)
1879 error ("event from unknown child");
1880
1881 if (ptid_is_pid (ptid)
1882 && ptid_get_pid (ptid) != ptid_get_pid (ptid_of (event_child)))
1883 {
1884 if (! WIFSTOPPED (*wstat))
1885 mark_lwp_dead (event_child, *wstat);
1886 else
1887 {
1888 event_child->status_pending_p = 1;
1889 event_child->status_pending = *wstat;
1890 }
1891 continue;
1892 }
1893
1894 current_inferior = get_lwp_thread (event_child);
1895
1896 /* Check for thread exit. */
1897 if (! WIFSTOPPED (*wstat))
1898 {
1899 if (debug_threads)
1900 fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1901
1902 /* If the last thread is exiting, just return. */
1903 if (last_thread_of_process_p (current_inferior))
1904 {
1905 if (debug_threads)
1906 fprintf (stderr, "LWP %ld is last lwp of process\n",
1907 lwpid_of (event_child));
1908 return lwpid_of (event_child);
1909 }
1910
1911 if (!non_stop)
1912 {
1913 current_inferior = (struct thread_info *) all_threads.head;
1914 if (debug_threads)
1915 fprintf (stderr, "Current inferior is now %ld\n",
1916 lwpid_of (get_thread_lwp (current_inferior)));
1917 }
1918 else
1919 {
1920 current_inferior = NULL;
1921 if (debug_threads)
1922 fprintf (stderr, "Current inferior is now <NULL>\n");
1923 }
1924
1925 /* If we were waiting for this particular child to do something...
1926 well, it did something. */
1927 if (requested_child != NULL)
1928 {
1929 int lwpid = lwpid_of (event_child);
1930
1931 /* Cancel the step-over operation --- the thread that
1932 started it is gone. */
1933 if (finish_step_over (event_child))
1934 unstop_all_lwps (1, event_child);
1935 delete_lwp (event_child);
1936 return lwpid;
1937 }
1938
1939 delete_lwp (event_child);
1940
1941 /* Wait for a more interesting event. */
1942 continue;
1943 }
1944
1945 if (event_child->must_set_ptrace_flags)
1946 {
1947 linux_enable_event_reporting (lwpid_of (event_child));
1948 event_child->must_set_ptrace_flags = 0;
1949 }
1950
1951 if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1952 && *wstat >> 16 != 0)
1953 {
1954 handle_extended_wait (event_child, *wstat);
1955 continue;
1956 }
1957
1958 if (WIFSTOPPED (*wstat)
1959 && WSTOPSIG (*wstat) == SIGSTOP
1960 && event_child->stop_expected)
1961 {
1962 int should_stop;
1963
1964 if (debug_threads)
1965 fprintf (stderr, "Expected stop.\n");
1966 event_child->stop_expected = 0;
1967
1968 should_stop = (current_inferior->last_resume_kind == resume_stop
1969 || stopping_threads);
1970
1971 if (!should_stop)
1972 {
1973 linux_resume_one_lwp (event_child,
1974 event_child->stepping, 0, NULL);
1975 continue;
1976 }
1977 }
1978
1979 return lwpid_of (event_child);
1980 }
1981
1982 /* NOTREACHED */
1983 return 0;
1984 }
1985
1986 /* Count the LWP's that have had events. */
1987
1988 static int
1989 count_events_callback (struct inferior_list_entry *entry, void *data)
1990 {
1991 struct lwp_info *lp = (struct lwp_info *) entry;
1992 struct thread_info *thread = get_lwp_thread (lp);
1993 int *count = data;
1994
1995 gdb_assert (count != NULL);
1996
1997 /* Count only resumed LWPs that have a SIGTRAP event pending that
1998 should be reported to GDB. */
1999 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2000 && thread->last_resume_kind != resume_stop
2001 && lp->status_pending_p
2002 && WIFSTOPPED (lp->status_pending)
2003 && WSTOPSIG (lp->status_pending) == SIGTRAP
2004 && !breakpoint_inserted_here (lp->stop_pc))
2005 (*count)++;
2006
2007 return 0;
2008 }
2009
2010 /* Select the LWP (if any) that is currently being single-stepped. */
2011
2012 static int
2013 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2014 {
2015 struct lwp_info *lp = (struct lwp_info *) entry;
2016 struct thread_info *thread = get_lwp_thread (lp);
2017
2018 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2019 && thread->last_resume_kind == resume_step
2020 && lp->status_pending_p)
2021 return 1;
2022 else
2023 return 0;
2024 }
2025
2026 /* Select the Nth LWP that has had a SIGTRAP event that should be
2027 reported to GDB. */
2028
2029 static int
2030 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2031 {
2032 struct lwp_info *lp = (struct lwp_info *) entry;
2033 struct thread_info *thread = get_lwp_thread (lp);
2034 int *selector = data;
2035
2036 gdb_assert (selector != NULL);
2037
2038 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2039 if (thread->last_resume_kind != resume_stop
2040 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2041 && lp->status_pending_p
2042 && WIFSTOPPED (lp->status_pending)
2043 && WSTOPSIG (lp->status_pending) == SIGTRAP
2044 && !breakpoint_inserted_here (lp->stop_pc))
2045 if ((*selector)-- == 0)
2046 return 1;
2047
2048 return 0;
2049 }
2050
2051 static int
2052 cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
2053 {
2054 struct lwp_info *lp = (struct lwp_info *) entry;
2055 struct thread_info *thread = get_lwp_thread (lp);
2056 struct lwp_info *event_lp = data;
2057
2058 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2059 if (lp == event_lp)
2060 return 0;
2061
2062 /* If a LWP other than the LWP that we're reporting an event for has
2063 hit a GDB breakpoint (as opposed to some random trap signal),
2064 then just arrange for it to hit it again later. We don't keep
2065 the SIGTRAP status and don't forward the SIGTRAP signal to the
2066 LWP. We will handle the current event, eventually we will resume
2067 all LWPs, and this one will get its breakpoint trap again.
2068
2069 If we do not do this, then we run the risk that the user will
2070 delete or disable the breakpoint, but the LWP will have already
2071 tripped on it. */
2072
2073 if (thread->last_resume_kind != resume_stop
2074 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2075 && lp->status_pending_p
2076 && WIFSTOPPED (lp->status_pending)
2077 && WSTOPSIG (lp->status_pending) == SIGTRAP
2078 && !lp->stepping
2079 && !lp->stopped_by_watchpoint
2080 && cancel_breakpoint (lp))
2081 /* Throw away the SIGTRAP. */
2082 lp->status_pending_p = 0;
2083
2084 return 0;
2085 }
2086
2087 static void
2088 linux_cancel_breakpoints (void)
2089 {
2090 find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
2091 }
2092
2093 /* Select one LWP out of those that have events pending. */
2094
2095 static void
2096 select_event_lwp (struct lwp_info **orig_lp)
2097 {
2098 int num_events = 0;
2099 int random_selector;
2100 struct lwp_info *event_lp;
2101
2102 /* Give preference to any LWP that is being single-stepped. */
2103 event_lp
2104 = (struct lwp_info *) find_inferior (&all_lwps,
2105 select_singlestep_lwp_callback, NULL);
2106 if (event_lp != NULL)
2107 {
2108 if (debug_threads)
2109 fprintf (stderr,
2110 "SEL: Select single-step %s\n",
2111 target_pid_to_str (ptid_of (event_lp)));
2112 }
2113 else
2114 {
2115 /* No single-stepping LWP. Select one at random, out of those
2116 which have had SIGTRAP events. */
2117
2118 /* First see how many SIGTRAP events we have. */
2119 find_inferior (&all_lwps, count_events_callback, &num_events);
2120
2121 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2122 random_selector = (int)
2123 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2124
2125 if (debug_threads && num_events > 1)
2126 fprintf (stderr,
2127 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2128 num_events, random_selector);
2129
2130 event_lp = (struct lwp_info *) find_inferior (&all_lwps,
2131 select_event_lwp_callback,
2132 &random_selector);
2133 }
2134
2135 if (event_lp != NULL)
2136 {
2137 /* Switch the event LWP. */
2138 *orig_lp = event_lp;
2139 }
2140 }
2141
2142 /* Decrement the suspend count of an LWP. */
2143
2144 static int
2145 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2146 {
2147 struct lwp_info *lwp = (struct lwp_info *) entry;
2148
2149 /* Ignore EXCEPT. */
2150 if (lwp == except)
2151 return 0;
2152
2153 lwp->suspended--;
2154
2155 gdb_assert (lwp->suspended >= 0);
2156 return 0;
2157 }
2158
2159 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2160 NULL. */
2161
2162 static void
2163 unsuspend_all_lwps (struct lwp_info *except)
2164 {
2165 find_inferior (&all_lwps, unsuspend_one_lwp, except);
2166 }
2167
2168 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2169 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2170 void *data);
2171 static int lwp_running (struct inferior_list_entry *entry, void *data);
2172 static ptid_t linux_wait_1 (ptid_t ptid,
2173 struct target_waitstatus *ourstatus,
2174 int target_options);
2175
2176 /* Stabilize threads (move out of jump pads).
2177
2178 If a thread is midway collecting a fast tracepoint, we need to
2179 finish the collection and move it out of the jump pad before
2180 reporting the signal.
2181
2182 This avoids recursion while collecting (when a signal arrives
2183 midway, and the signal handler itself collects), which would trash
2184 the trace buffer. In case the user set a breakpoint in a signal
2185 handler, this avoids the backtrace showing the jump pad, etc..
2186 Most importantly, there are certain things we can't do safely if
2187 threads are stopped in a jump pad (or in its callee's). For
2188 example:
2189
2190 - starting a new trace run. A thread still collecting the
2191 previous run, could trash the trace buffer when resumed. The trace
2192 buffer control structures would have been reset but the thread had
2193 no way to tell. The thread could even midway memcpy'ing to the
2194 buffer, which would mean that when resumed, it would clobber the
2195 trace buffer that had been set for a new run.
2196
2197 - we can't rewrite/reuse the jump pads for new tracepoints
2198 safely. Say you do tstart while a thread is stopped midway while
2199 collecting. When the thread is later resumed, it finishes the
2200 collection, and returns to the jump pad, to execute the original
2201 instruction that was under the tracepoint jump at the time the
2202 older run had been started. If the jump pad had been rewritten
2203 since for something else in the new run, the thread would now
2204 execute the wrong / random instructions. */
2205
2206 static void
2207 linux_stabilize_threads (void)
2208 {
2209 struct thread_info *save_inferior;
2210 struct lwp_info *lwp_stuck;
2211
2212 lwp_stuck
2213 = (struct lwp_info *) find_inferior (&all_lwps,
2214 stuck_in_jump_pad_callback, NULL);
2215 if (lwp_stuck != NULL)
2216 {
2217 if (debug_threads)
2218 fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
2219 lwpid_of (lwp_stuck));
2220 return;
2221 }
2222
2223 save_inferior = current_inferior;
2224
2225 stabilizing_threads = 1;
2226
2227 /* Kick 'em all. */
2228 for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
2229
2230 /* Loop until all are stopped out of the jump pads. */
2231 while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
2232 {
2233 struct target_waitstatus ourstatus;
2234 struct lwp_info *lwp;
2235 int wstat;
2236
2237 /* Note that we go through the full wait even loop. While
2238 moving threads out of jump pad, we need to be able to step
2239 over internal breakpoints and such. */
2240 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2241
2242 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2243 {
2244 lwp = get_thread_lwp (current_inferior);
2245
2246 /* Lock it. */
2247 lwp->suspended++;
2248
2249 if (ourstatus.value.sig != TARGET_SIGNAL_0
2250 || current_inferior->last_resume_kind == resume_stop)
2251 {
2252 wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
2253 enqueue_one_deferred_signal (lwp, &wstat);
2254 }
2255 }
2256 }
2257
2258 find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
2259
2260 stabilizing_threads = 0;
2261
2262 current_inferior = save_inferior;
2263
2264 if (debug_threads)
2265 {
2266 lwp_stuck
2267 = (struct lwp_info *) find_inferior (&all_lwps,
2268 stuck_in_jump_pad_callback, NULL);
2269 if (lwp_stuck != NULL)
2270 fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
2271 lwpid_of (lwp_stuck));
2272 }
2273 }
2274
2275 /* Wait for process, returns status. */
2276
2277 static ptid_t
2278 linux_wait_1 (ptid_t ptid,
2279 struct target_waitstatus *ourstatus, int target_options)
2280 {
2281 int w;
2282 struct lwp_info *event_child;
2283 int options;
2284 int pid;
2285 int step_over_finished;
2286 int bp_explains_trap;
2287 int maybe_internal_trap;
2288 int report_to_gdb;
2289 int trace_event;
2290
2291 /* Translate generic target options into linux options. */
2292 options = __WALL;
2293 if (target_options & TARGET_WNOHANG)
2294 options |= WNOHANG;
2295
2296 retry:
2297 bp_explains_trap = 0;
2298 trace_event = 0;
2299 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2300
2301 /* If we were only supposed to resume one thread, only wait for
2302 that thread - if it's still alive. If it died, however - which
2303 can happen if we're coming from the thread death case below -
2304 then we need to make sure we restart the other threads. We could
2305 pick a thread at random or restart all; restarting all is less
2306 arbitrary. */
2307 if (!non_stop
2308 && !ptid_equal (cont_thread, null_ptid)
2309 && !ptid_equal (cont_thread, minus_one_ptid))
2310 {
2311 struct thread_info *thread;
2312
2313 thread = (struct thread_info *) find_inferior_id (&all_threads,
2314 cont_thread);
2315
2316 /* No stepping, no signal - unless one is pending already, of course. */
2317 if (thread == NULL)
2318 {
2319 struct thread_resume resume_info;
2320 resume_info.thread = minus_one_ptid;
2321 resume_info.kind = resume_continue;
2322 resume_info.sig = 0;
2323 linux_resume (&resume_info, 1);
2324 }
2325 else
2326 ptid = cont_thread;
2327 }
2328
2329 if (ptid_equal (step_over_bkpt, null_ptid))
2330 pid = linux_wait_for_event (ptid, &w, options);
2331 else
2332 {
2333 if (debug_threads)
2334 fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2335 target_pid_to_str (step_over_bkpt));
2336 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2337 }
2338
2339 if (pid == 0) /* only if TARGET_WNOHANG */
2340 return null_ptid;
2341
2342 event_child = get_thread_lwp (current_inferior);
2343
2344 /* If we are waiting for a particular child, and it exited,
2345 linux_wait_for_event will return its exit status. Similarly if
2346 the last child exited. If this is not the last child, however,
2347 do not report it as exited until there is a 'thread exited' response
2348 available in the remote protocol. Instead, just wait for another event.
2349 This should be safe, because if the thread crashed we will already
2350 have reported the termination signal to GDB; that should stop any
2351 in-progress stepping operations, etc.
2352
2353 Report the exit status of the last thread to exit. This matches
2354 LinuxThreads' behavior. */
2355
2356 if (last_thread_of_process_p (current_inferior))
2357 {
2358 if (WIFEXITED (w) || WIFSIGNALED (w))
2359 {
2360 if (WIFEXITED (w))
2361 {
2362 ourstatus->kind = TARGET_WAITKIND_EXITED;
2363 ourstatus->value.integer = WEXITSTATUS (w);
2364
2365 if (debug_threads)
2366 fprintf (stderr,
2367 "\nChild exited with retcode = %x \n",
2368 WEXITSTATUS (w));
2369 }
2370 else
2371 {
2372 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2373 ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2374
2375 if (debug_threads)
2376 fprintf (stderr,
2377 "\nChild terminated with signal = %x \n",
2378 WTERMSIG (w));
2379
2380 }
2381
2382 return ptid_of (event_child);
2383 }
2384 }
2385 else
2386 {
2387 if (!WIFSTOPPED (w))
2388 goto retry;
2389 }
2390
2391 /* If this event was not handled before, and is not a SIGTRAP, we
2392 report it. SIGILL and SIGSEGV are also treated as traps in case
2393 a breakpoint is inserted at the current PC. If this target does
2394 not support internal breakpoints at all, we also report the
2395 SIGTRAP without further processing; it's of no concern to us. */
2396 maybe_internal_trap
2397 = (supports_breakpoints ()
2398 && (WSTOPSIG (w) == SIGTRAP
2399 || ((WSTOPSIG (w) == SIGILL
2400 || WSTOPSIG (w) == SIGSEGV)
2401 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2402
2403 if (maybe_internal_trap)
2404 {
2405 /* Handle anything that requires bookkeeping before deciding to
2406 report the event or continue waiting. */
2407
2408 /* First check if we can explain the SIGTRAP with an internal
2409 breakpoint, or if we should possibly report the event to GDB.
2410 Do this before anything that may remove or insert a
2411 breakpoint. */
2412 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2413
2414 /* We have a SIGTRAP, possibly a step-over dance has just
2415 finished. If so, tweak the state machine accordingly,
2416 reinsert breakpoints and delete any reinsert (software
2417 single-step) breakpoints. */
2418 step_over_finished = finish_step_over (event_child);
2419
2420 /* Now invoke the callbacks of any internal breakpoints there. */
2421 check_breakpoints (event_child->stop_pc);
2422
2423 /* Handle tracepoint data collecting. This may overflow the
2424 trace buffer, and cause a tracing stop, removing
2425 breakpoints. */
2426 trace_event = handle_tracepoints (event_child);
2427
2428 if (bp_explains_trap)
2429 {
2430 /* If we stepped or ran into an internal breakpoint, we've
2431 already handled it. So next time we resume (from this
2432 PC), we should step over it. */
2433 if (debug_threads)
2434 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2435
2436 if (breakpoint_here (event_child->stop_pc))
2437 event_child->need_step_over = 1;
2438 }
2439 }
2440 else
2441 {
2442 /* We have some other signal, possibly a step-over dance was in
2443 progress, and it should be cancelled too. */
2444 step_over_finished = finish_step_over (event_child);
2445 }
2446
2447 /* We have all the data we need. Either report the event to GDB, or
2448 resume threads and keep waiting for more. */
2449
2450 /* If we're collecting a fast tracepoint, finish the collection and
2451 move out of the jump pad before delivering a signal. See
2452 linux_stabilize_threads. */
2453
2454 if (WIFSTOPPED (w)
2455 && WSTOPSIG (w) != SIGTRAP
2456 && supports_fast_tracepoints ()
2457 && agent_loaded_p ())
2458 {
2459 if (debug_threads)
2460 fprintf (stderr,
2461 "Got signal %d for LWP %ld. Check if we need "
2462 "to defer or adjust it.\n",
2463 WSTOPSIG (w), lwpid_of (event_child));
2464
2465 /* Allow debugging the jump pad itself. */
2466 if (current_inferior->last_resume_kind != resume_step
2467 && maybe_move_out_of_jump_pad (event_child, &w))
2468 {
2469 enqueue_one_deferred_signal (event_child, &w);
2470
2471 if (debug_threads)
2472 fprintf (stderr,
2473 "Signal %d for LWP %ld deferred (in jump pad)\n",
2474 WSTOPSIG (w), lwpid_of (event_child));
2475
2476 linux_resume_one_lwp (event_child, 0, 0, NULL);
2477 goto retry;
2478 }
2479 }
2480
2481 if (event_child->collecting_fast_tracepoint)
2482 {
2483 if (debug_threads)
2484 fprintf (stderr, "\
2485 LWP %ld was trying to move out of the jump pad (%d). \
2486 Check if we're already there.\n",
2487 lwpid_of (event_child),
2488 event_child->collecting_fast_tracepoint);
2489
2490 trace_event = 1;
2491
2492 event_child->collecting_fast_tracepoint
2493 = linux_fast_tracepoint_collecting (event_child, NULL);
2494
2495 if (event_child->collecting_fast_tracepoint != 1)
2496 {
2497 /* No longer need this breakpoint. */
2498 if (event_child->exit_jump_pad_bkpt != NULL)
2499 {
2500 if (debug_threads)
2501 fprintf (stderr,
2502 "No longer need exit-jump-pad bkpt; removing it."
2503 "stopping all threads momentarily.\n");
2504
2505 /* Other running threads could hit this breakpoint.
2506 We don't handle moribund locations like GDB does,
2507 instead we always pause all threads when removing
2508 breakpoints, so that any step-over or
2509 decr_pc_after_break adjustment is always taken
2510 care of while the breakpoint is still
2511 inserted. */
2512 stop_all_lwps (1, event_child);
2513 cancel_breakpoints ();
2514
2515 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2516 event_child->exit_jump_pad_bkpt = NULL;
2517
2518 unstop_all_lwps (1, event_child);
2519
2520 gdb_assert (event_child->suspended >= 0);
2521 }
2522 }
2523
2524 if (event_child->collecting_fast_tracepoint == 0)
2525 {
2526 if (debug_threads)
2527 fprintf (stderr,
2528 "fast tracepoint finished "
2529 "collecting successfully.\n");
2530
2531 /* We may have a deferred signal to report. */
2532 if (dequeue_one_deferred_signal (event_child, &w))
2533 {
2534 if (debug_threads)
2535 fprintf (stderr, "dequeued one signal.\n");
2536 }
2537 else
2538 {
2539 if (debug_threads)
2540 fprintf (stderr, "no deferred signals.\n");
2541
2542 if (stabilizing_threads)
2543 {
2544 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2545 ourstatus->value.sig = TARGET_SIGNAL_0;
2546 return ptid_of (event_child);
2547 }
2548 }
2549 }
2550 }
2551
2552 /* Check whether GDB would be interested in this event. */
2553
2554 /* If GDB is not interested in this signal, don't stop other
2555 threads, and don't report it to GDB. Just resume the inferior
2556 right away. We do this for threading-related signals as well as
2557 any that GDB specifically requested we ignore. But never ignore
2558 SIGSTOP if we sent it ourselves, and do not ignore signals when
2559 stepping - they may require special handling to skip the signal
2560 handler. */
2561 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2562 thread library? */
2563 if (WIFSTOPPED (w)
2564 && current_inferior->last_resume_kind != resume_step
2565 && (
2566 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2567 (current_process ()->private->thread_db != NULL
2568 && (WSTOPSIG (w) == __SIGRTMIN
2569 || WSTOPSIG (w) == __SIGRTMIN + 1))
2570 ||
2571 #endif
2572 (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2573 && !(WSTOPSIG (w) == SIGSTOP
2574 && current_inferior->last_resume_kind == resume_stop))))
2575 {
2576 siginfo_t info, *info_p;
2577
2578 if (debug_threads)
2579 fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2580 WSTOPSIG (w), lwpid_of (event_child));
2581
2582 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2583 info_p = &info;
2584 else
2585 info_p = NULL;
2586 linux_resume_one_lwp (event_child, event_child->stepping,
2587 WSTOPSIG (w), info_p);
2588 goto retry;
2589 }
2590
2591 /* If GDB wanted this thread to single step, we always want to
2592 report the SIGTRAP, and let GDB handle it. Watchpoints should
2593 always be reported. So should signals we can't explain. A
2594 SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2595 not support Z0 breakpoints. If we do, we're be able to handle
2596 GDB breakpoints on top of internal breakpoints, by handling the
2597 internal breakpoint and still reporting the event to GDB. If we
2598 don't, we're out of luck, GDB won't see the breakpoint hit. */
2599 report_to_gdb = (!maybe_internal_trap
2600 || current_inferior->last_resume_kind == resume_step
2601 || event_child->stopped_by_watchpoint
2602 || (!step_over_finished
2603 && !bp_explains_trap && !trace_event)
2604 || (gdb_breakpoint_here (event_child->stop_pc)
2605 && gdb_condition_true_at_breakpoint (event_child->stop_pc)));
2606
2607 /* We found no reason GDB would want us to stop. We either hit one
2608 of our own breakpoints, or finished an internal step GDB
2609 shouldn't know about. */
2610 if (!report_to_gdb)
2611 {
2612 if (debug_threads)
2613 {
2614 if (bp_explains_trap)
2615 fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2616 if (step_over_finished)
2617 fprintf (stderr, "Step-over finished.\n");
2618 if (trace_event)
2619 fprintf (stderr, "Tracepoint event.\n");
2620 }
2621
2622 /* We're not reporting this breakpoint to GDB, so apply the
2623 decr_pc_after_break adjustment to the inferior's regcache
2624 ourselves. */
2625
2626 if (the_low_target.set_pc != NULL)
2627 {
2628 struct regcache *regcache
2629 = get_thread_regcache (get_lwp_thread (event_child), 1);
2630 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2631 }
2632
2633 /* We may have finished stepping over a breakpoint. If so,
2634 we've stopped and suspended all LWPs momentarily except the
2635 stepping one. This is where we resume them all again. We're
2636 going to keep waiting, so use proceed, which handles stepping
2637 over the next breakpoint. */
2638 if (debug_threads)
2639 fprintf (stderr, "proceeding all threads.\n");
2640
2641 if (step_over_finished)
2642 unsuspend_all_lwps (event_child);
2643
2644 proceed_all_lwps ();
2645 goto retry;
2646 }
2647
2648 if (debug_threads)
2649 {
2650 if (current_inferior->last_resume_kind == resume_step)
2651 fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2652 if (event_child->stopped_by_watchpoint)
2653 fprintf (stderr, "Stopped by watchpoint.\n");
2654 if (gdb_breakpoint_here (event_child->stop_pc))
2655 fprintf (stderr, "Stopped by GDB breakpoint.\n");
2656 if (debug_threads)
2657 fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2658 }
2659
2660 /* Alright, we're going to report a stop. */
2661
2662 if (!non_stop && !stabilizing_threads)
2663 {
2664 /* In all-stop, stop all threads. */
2665 stop_all_lwps (0, NULL);
2666
2667 /* If we're not waiting for a specific LWP, choose an event LWP
2668 from among those that have had events. Giving equal priority
2669 to all LWPs that have had events helps prevent
2670 starvation. */
2671 if (ptid_equal (ptid, minus_one_ptid))
2672 {
2673 event_child->status_pending_p = 1;
2674 event_child->status_pending = w;
2675
2676 select_event_lwp (&event_child);
2677
2678 event_child->status_pending_p = 0;
2679 w = event_child->status_pending;
2680 }
2681
2682 /* Now that we've selected our final event LWP, cancel any
2683 breakpoints in other LWPs that have hit a GDB breakpoint.
2684 See the comment in cancel_breakpoints_callback to find out
2685 why. */
2686 find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2687
2688 /* If we were going a step-over, all other threads but the stepping one
2689 had been paused in start_step_over, with their suspend counts
2690 incremented. We don't want to do a full unstop/unpause, because we're
2691 in all-stop mode (so we want threads stopped), but we still need to
2692 unsuspend the other threads, to decrement their `suspended' count
2693 back. */
2694 if (step_over_finished)
2695 unsuspend_all_lwps (event_child);
2696
2697 /* Stabilize threads (move out of jump pads). */
2698 stabilize_threads ();
2699 }
2700 else
2701 {
2702 /* If we just finished a step-over, then all threads had been
2703 momentarily paused. In all-stop, that's fine, we want
2704 threads stopped by now anyway. In non-stop, we need to
2705 re-resume threads that GDB wanted to be running. */
2706 if (step_over_finished)
2707 unstop_all_lwps (1, event_child);
2708 }
2709
2710 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2711
2712 if (current_inferior->last_resume_kind == resume_stop
2713 && WSTOPSIG (w) == SIGSTOP)
2714 {
2715 /* A thread that has been requested to stop by GDB with vCont;t,
2716 and it stopped cleanly, so report as SIG0. The use of
2717 SIGSTOP is an implementation detail. */
2718 ourstatus->value.sig = TARGET_SIGNAL_0;
2719 }
2720 else if (current_inferior->last_resume_kind == resume_stop
2721 && WSTOPSIG (w) != SIGSTOP)
2722 {
2723 /* A thread that has been requested to stop by GDB with vCont;t,
2724 but, it stopped for other reasons. */
2725 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2726 }
2727 else
2728 {
2729 ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2730 }
2731
2732 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2733
2734 if (debug_threads)
2735 fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2736 target_pid_to_str (ptid_of (event_child)),
2737 ourstatus->kind,
2738 ourstatus->value.sig);
2739
2740 return ptid_of (event_child);
2741 }
2742
2743 /* Get rid of any pending event in the pipe. */
2744 static void
2745 async_file_flush (void)
2746 {
2747 int ret;
2748 char buf;
2749
2750 do
2751 ret = read (linux_event_pipe[0], &buf, 1);
2752 while (ret >= 0 || (ret == -1 && errno == EINTR));
2753 }
2754
2755 /* Put something in the pipe, so the event loop wakes up. */
2756 static void
2757 async_file_mark (void)
2758 {
2759 int ret;
2760
2761 async_file_flush ();
2762
2763 do
2764 ret = write (linux_event_pipe[1], "+", 1);
2765 while (ret == 0 || (ret == -1 && errno == EINTR));
2766
2767 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2768 be awakened anyway. */
2769 }
2770
2771 static ptid_t
2772 linux_wait (ptid_t ptid,
2773 struct target_waitstatus *ourstatus, int target_options)
2774 {
2775 ptid_t event_ptid;
2776
2777 if (debug_threads)
2778 fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2779
2780 /* Flush the async file first. */
2781 if (target_is_async_p ())
2782 async_file_flush ();
2783
2784 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2785
2786 /* If at least one stop was reported, there may be more. A single
2787 SIGCHLD can signal more than one child stop. */
2788 if (target_is_async_p ()
2789 && (target_options & TARGET_WNOHANG) != 0
2790 && !ptid_equal (event_ptid, null_ptid))
2791 async_file_mark ();
2792
2793 return event_ptid;
2794 }
2795
2796 /* Send a signal to an LWP. */
2797
2798 static int
2799 kill_lwp (unsigned long lwpid, int signo)
2800 {
2801 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2802 fails, then we are not using nptl threads and we should be using kill. */
2803
2804 #ifdef __NR_tkill
2805 {
2806 static int tkill_failed;
2807
2808 if (!tkill_failed)
2809 {
2810 int ret;
2811
2812 errno = 0;
2813 ret = syscall (__NR_tkill, lwpid, signo);
2814 if (errno != ENOSYS)
2815 return ret;
2816 tkill_failed = 1;
2817 }
2818 }
2819 #endif
2820
2821 return kill (lwpid, signo);
2822 }
2823
2824 void
2825 linux_stop_lwp (struct lwp_info *lwp)
2826 {
2827 send_sigstop (lwp);
2828 }
2829
2830 static void
2831 send_sigstop (struct lwp_info *lwp)
2832 {
2833 int pid;
2834
2835 pid = lwpid_of (lwp);
2836
2837 /* If we already have a pending stop signal for this process, don't
2838 send another. */
2839 if (lwp->stop_expected)
2840 {
2841 if (debug_threads)
2842 fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2843
2844 return;
2845 }
2846
2847 if (debug_threads)
2848 fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2849
2850 lwp->stop_expected = 1;
2851 kill_lwp (pid, SIGSTOP);
2852 }
2853
2854 static int
2855 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2856 {
2857 struct lwp_info *lwp = (struct lwp_info *) entry;
2858
2859 /* Ignore EXCEPT. */
2860 if (lwp == except)
2861 return 0;
2862
2863 if (lwp->stopped)
2864 return 0;
2865
2866 send_sigstop (lwp);
2867 return 0;
2868 }
2869
2870 /* Increment the suspend count of an LWP, and stop it, if not stopped
2871 yet. */
2872 static int
2873 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2874 void *except)
2875 {
2876 struct lwp_info *lwp = (struct lwp_info *) entry;
2877
2878 /* Ignore EXCEPT. */
2879 if (lwp == except)
2880 return 0;
2881
2882 lwp->suspended++;
2883
2884 return send_sigstop_callback (entry, except);
2885 }
2886
2887 static void
2888 mark_lwp_dead (struct lwp_info *lwp, int wstat)
2889 {
2890 /* It's dead, really. */
2891 lwp->dead = 1;
2892
2893 /* Store the exit status for later. */
2894 lwp->status_pending_p = 1;
2895 lwp->status_pending = wstat;
2896
2897 /* Prevent trying to stop it. */
2898 lwp->stopped = 1;
2899
2900 /* No further stops are expected from a dead lwp. */
2901 lwp->stop_expected = 0;
2902 }
2903
2904 static void
2905 wait_for_sigstop (struct inferior_list_entry *entry)
2906 {
2907 struct lwp_info *lwp = (struct lwp_info *) entry;
2908 struct thread_info *saved_inferior;
2909 int wstat;
2910 ptid_t saved_tid;
2911 ptid_t ptid;
2912 int pid;
2913
2914 if (lwp->stopped)
2915 {
2916 if (debug_threads)
2917 fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2918 lwpid_of (lwp));
2919 return;
2920 }
2921
2922 saved_inferior = current_inferior;
2923 if (saved_inferior != NULL)
2924 saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2925 else
2926 saved_tid = null_ptid; /* avoid bogus unused warning */
2927
2928 ptid = lwp->head.id;
2929
2930 if (debug_threads)
2931 fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2932
2933 pid = linux_wait_for_event (ptid, &wstat, __WALL);
2934
2935 /* If we stopped with a non-SIGSTOP signal, save it for later
2936 and record the pending SIGSTOP. If the process exited, just
2937 return. */
2938 if (WIFSTOPPED (wstat))
2939 {
2940 if (debug_threads)
2941 fprintf (stderr, "LWP %ld stopped with signal %d\n",
2942 lwpid_of (lwp), WSTOPSIG (wstat));
2943
2944 if (WSTOPSIG (wstat) != SIGSTOP)
2945 {
2946 if (debug_threads)
2947 fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2948 lwpid_of (lwp), wstat);
2949
2950 lwp->status_pending_p = 1;
2951 lwp->status_pending = wstat;
2952 }
2953 }
2954 else
2955 {
2956 if (debug_threads)
2957 fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2958
2959 lwp = find_lwp_pid (pid_to_ptid (pid));
2960 if (lwp)
2961 {
2962 /* Leave this status pending for the next time we're able to
2963 report it. In the mean time, we'll report this lwp as
2964 dead to GDB, so GDB doesn't try to read registers and
2965 memory from it. This can only happen if this was the
2966 last thread of the process; otherwise, PID is removed
2967 from the thread tables before linux_wait_for_event
2968 returns. */
2969 mark_lwp_dead (lwp, wstat);
2970 }
2971 }
2972
2973 if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2974 current_inferior = saved_inferior;
2975 else
2976 {
2977 if (debug_threads)
2978 fprintf (stderr, "Previously current thread died.\n");
2979
2980 if (non_stop)
2981 {
2982 /* We can't change the current inferior behind GDB's back,
2983 otherwise, a subsequent command may apply to the wrong
2984 process. */
2985 current_inferior = NULL;
2986 }
2987 else
2988 {
2989 /* Set a valid thread as current. */
2990 set_desired_inferior (0);
2991 }
2992 }
2993 }
2994
2995 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2996 move it out, because we need to report the stop event to GDB. For
2997 example, if the user puts a breakpoint in the jump pad, it's
2998 because she wants to debug it. */
2999
3000 static int
3001 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3002 {
3003 struct lwp_info *lwp = (struct lwp_info *) entry;
3004 struct thread_info *thread = get_lwp_thread (lwp);
3005
3006 gdb_assert (lwp->suspended == 0);
3007 gdb_assert (lwp->stopped);
3008
3009 /* Allow debugging the jump pad, gdb_collect, etc.. */
3010 return (supports_fast_tracepoints ()
3011 && agent_loaded_p ()
3012 && (gdb_breakpoint_here (lwp->stop_pc)
3013 || lwp->stopped_by_watchpoint
3014 || thread->last_resume_kind == resume_step)
3015 && linux_fast_tracepoint_collecting (lwp, NULL));
3016 }
3017
3018 static void
3019 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3020 {
3021 struct lwp_info *lwp = (struct lwp_info *) entry;
3022 struct thread_info *thread = get_lwp_thread (lwp);
3023 int *wstat;
3024
3025 gdb_assert (lwp->suspended == 0);
3026 gdb_assert (lwp->stopped);
3027
3028 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3029
3030 /* Allow debugging the jump pad, gdb_collect, etc. */
3031 if (!gdb_breakpoint_here (lwp->stop_pc)
3032 && !lwp->stopped_by_watchpoint
3033 && thread->last_resume_kind != resume_step
3034 && maybe_move_out_of_jump_pad (lwp, wstat))
3035 {
3036 if (debug_threads)
3037 fprintf (stderr,
3038 "LWP %ld needs stabilizing (in jump pad)\n",
3039 lwpid_of (lwp));
3040
3041 if (wstat)
3042 {
3043 lwp->status_pending_p = 0;
3044 enqueue_one_deferred_signal (lwp, wstat);
3045
3046 if (debug_threads)
3047 fprintf (stderr,
3048 "Signal %d for LWP %ld deferred "
3049 "(in jump pad)\n",
3050 WSTOPSIG (*wstat), lwpid_of (lwp));
3051 }
3052
3053 linux_resume_one_lwp (lwp, 0, 0, NULL);
3054 }
3055 else
3056 lwp->suspended++;
3057 }
3058
3059 static int
3060 lwp_running (struct inferior_list_entry *entry, void *data)
3061 {
3062 struct lwp_info *lwp = (struct lwp_info *) entry;
3063
3064 if (lwp->dead)
3065 return 0;
3066 if (lwp->stopped)
3067 return 0;
3068 return 1;
3069 }
3070
3071 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3072 If SUSPEND, then also increase the suspend count of every LWP,
3073 except EXCEPT. */
3074
3075 static void
3076 stop_all_lwps (int suspend, struct lwp_info *except)
3077 {
3078 stopping_threads = 1;
3079
3080 if (suspend)
3081 find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
3082 else
3083 find_inferior (&all_lwps, send_sigstop_callback, except);
3084 for_each_inferior (&all_lwps, wait_for_sigstop);
3085 stopping_threads = 0;
3086 }
3087
3088 /* Resume execution of the inferior process.
3089 If STEP is nonzero, single-step it.
3090 If SIGNAL is nonzero, give it that signal. */
3091
3092 static void
3093 linux_resume_one_lwp (struct lwp_info *lwp,
3094 int step, int signal, siginfo_t *info)
3095 {
3096 struct thread_info *saved_inferior;
3097 int fast_tp_collecting;
3098
3099 if (lwp->stopped == 0)
3100 return;
3101
3102 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3103
3104 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3105
3106 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3107 user used the "jump" command, or "set $pc = foo"). */
3108 if (lwp->stop_pc != get_pc (lwp))
3109 {
3110 /* Collecting 'while-stepping' actions doesn't make sense
3111 anymore. */
3112 release_while_stepping_state_list (get_lwp_thread (lwp));
3113 }
3114
3115 /* If we have pending signals or status, and a new signal, enqueue the
3116 signal. Also enqueue the signal if we are waiting to reinsert a
3117 breakpoint; it will be picked up again below. */
3118 if (signal != 0
3119 && (lwp->status_pending_p
3120 || lwp->pending_signals != NULL
3121 || lwp->bp_reinsert != 0
3122 || fast_tp_collecting))
3123 {
3124 struct pending_signals *p_sig;
3125 p_sig = xmalloc (sizeof (*p_sig));
3126 p_sig->prev = lwp->pending_signals;
3127 p_sig->signal = signal;
3128 if (info == NULL)
3129 memset (&p_sig->info, 0, sizeof (siginfo_t));
3130 else
3131 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3132 lwp->pending_signals = p_sig;
3133 }
3134
3135 if (lwp->status_pending_p)
3136 {
3137 if (debug_threads)
3138 fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
3139 " has pending status\n",
3140 lwpid_of (lwp), step ? "step" : "continue", signal,
3141 lwp->stop_expected ? "expected" : "not expected");
3142 return;
3143 }
3144
3145 saved_inferior = current_inferior;
3146 current_inferior = get_lwp_thread (lwp);
3147
3148 if (debug_threads)
3149 fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
3150 lwpid_of (lwp), step ? "step" : "continue", signal,
3151 lwp->stop_expected ? "expected" : "not expected");
3152
3153 /* This bit needs some thinking about. If we get a signal that
3154 we must report while a single-step reinsert is still pending,
3155 we often end up resuming the thread. It might be better to
3156 (ew) allow a stack of pending events; then we could be sure that
3157 the reinsert happened right away and not lose any signals.
3158
3159 Making this stack would also shrink the window in which breakpoints are
3160 uninserted (see comment in linux_wait_for_lwp) but not enough for
3161 complete correctness, so it won't solve that problem. It may be
3162 worthwhile just to solve this one, however. */
3163 if (lwp->bp_reinsert != 0)
3164 {
3165 if (debug_threads)
3166 fprintf (stderr, " pending reinsert at 0x%s\n",
3167 paddress (lwp->bp_reinsert));
3168
3169 if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
3170 {
3171 if (fast_tp_collecting == 0)
3172 {
3173 if (step == 0)
3174 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3175 if (lwp->suspended)
3176 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3177 lwp->suspended);
3178 }
3179
3180 step = 1;
3181 }
3182
3183 /* Postpone any pending signal. It was enqueued above. */
3184 signal = 0;
3185 }
3186
3187 if (fast_tp_collecting == 1)
3188 {
3189 if (debug_threads)
3190 fprintf (stderr, "\
3191 lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
3192 lwpid_of (lwp));
3193
3194 /* Postpone any pending signal. It was enqueued above. */
3195 signal = 0;
3196 }
3197 else if (fast_tp_collecting == 2)
3198 {
3199 if (debug_threads)
3200 fprintf (stderr, "\
3201 lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
3202 lwpid_of (lwp));
3203
3204 if (can_hardware_single_step ())
3205 step = 1;
3206 else
3207 fatal ("moving out of jump pad single-stepping"
3208 " not implemented on this target");
3209
3210 /* Postpone any pending signal. It was enqueued above. */
3211 signal = 0;
3212 }
3213
3214 /* If we have while-stepping actions in this thread set it stepping.
3215 If we have a signal to deliver, it may or may not be set to
3216 SIG_IGN, we don't know. Assume so, and allow collecting
3217 while-stepping into a signal handler. A possible smart thing to
3218 do would be to set an internal breakpoint at the signal return
3219 address, continue, and carry on catching this while-stepping
3220 action only when that breakpoint is hit. A future
3221 enhancement. */
3222 if (get_lwp_thread (lwp)->while_stepping != NULL
3223 && can_hardware_single_step ())
3224 {
3225 if (debug_threads)
3226 fprintf (stderr,
3227 "lwp %ld has a while-stepping action -> forcing step.\n",
3228 lwpid_of (lwp));
3229 step = 1;
3230 }
3231
3232 if (debug_threads && the_low_target.get_pc != NULL)
3233 {
3234 struct regcache *regcache = get_thread_regcache (current_inferior, 1);
3235 CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
3236 fprintf (stderr, " resuming from pc 0x%lx\n", (long) pc);
3237 }
3238
3239 /* If we have pending signals, consume one unless we are trying to
3240 reinsert a breakpoint or we're trying to finish a fast tracepoint
3241 collect. */
3242 if (lwp->pending_signals != NULL
3243 && lwp->bp_reinsert == 0
3244 && fast_tp_collecting == 0)
3245 {
3246 struct pending_signals **p_sig;
3247
3248 p_sig = &lwp->pending_signals;
3249 while ((*p_sig)->prev != NULL)
3250 p_sig = &(*p_sig)->prev;
3251
3252 signal = (*p_sig)->signal;
3253 if ((*p_sig)->info.si_signo != 0)
3254 ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
3255
3256 free (*p_sig);
3257 *p_sig = NULL;
3258 }
3259
3260 if (the_low_target.prepare_to_resume != NULL)
3261 the_low_target.prepare_to_resume (lwp);
3262
3263 regcache_invalidate_one ((struct inferior_list_entry *)
3264 get_lwp_thread (lwp));
3265 errno = 0;
3266 lwp->stopped = 0;
3267 lwp->stopped_by_watchpoint = 0;
3268 lwp->stepping = step;
3269 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
3270 /* Coerce to a uintptr_t first to avoid potential gcc warning
3271 of coercing an 8 byte integer to a 4 byte pointer. */
3272 (PTRACE_ARG4_TYPE) (uintptr_t) signal);
3273
3274 current_inferior = saved_inferior;
3275 if (errno)
3276 {
3277 /* ESRCH from ptrace either means that the thread was already
3278 running (an error) or that it is gone (a race condition). If
3279 it's gone, we will get a notification the next time we wait,
3280 so we can ignore the error. We could differentiate these
3281 two, but it's tricky without waiting; the thread still exists
3282 as a zombie, so sending it signal 0 would succeed. So just
3283 ignore ESRCH. */
3284 if (errno == ESRCH)
3285 return;
3286
3287 perror_with_name ("ptrace");
3288 }
3289 }
3290
3291 struct thread_resume_array
3292 {
3293 struct thread_resume *resume;
3294 size_t n;
3295 };
3296
3297 /* This function is called once per thread. We look up the thread
3298 in RESUME_PTR, and mark the thread with a pointer to the appropriate
3299 resume request.
3300
3301 This algorithm is O(threads * resume elements), but resume elements
3302 is small (and will remain small at least until GDB supports thread
3303 suspension). */
3304 static int
3305 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3306 {
3307 struct lwp_info *lwp;
3308 struct thread_info *thread;
3309 int ndx;
3310 struct thread_resume_array *r;
3311
3312 thread = (struct thread_info *) entry;
3313 lwp = get_thread_lwp (thread);
3314 r = arg;
3315
3316 for (ndx = 0; ndx < r->n; ndx++)
3317 {
3318 ptid_t ptid = r->resume[ndx].thread;
3319 if (ptid_equal (ptid, minus_one_ptid)
3320 || ptid_equal (ptid, entry->id)
3321 || (ptid_is_pid (ptid)
3322 && (ptid_get_pid (ptid) == pid_of (lwp)))
3323 || (ptid_get_lwp (ptid) == -1
3324 && (ptid_get_pid (ptid) == pid_of (lwp))))
3325 {
3326 if (r->resume[ndx].kind == resume_stop
3327 && thread->last_resume_kind == resume_stop)
3328 {
3329 if (debug_threads)
3330 fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3331 thread->last_status.kind == TARGET_WAITKIND_STOPPED
3332 ? "stopped"
3333 : "stopping",
3334 lwpid_of (lwp));
3335
3336 continue;
3337 }
3338
3339 lwp->resume = &r->resume[ndx];
3340 thread->last_resume_kind = lwp->resume->kind;
3341
3342 /* If we had a deferred signal to report, dequeue one now.
3343 This can happen if LWP gets more than one signal while
3344 trying to get out of a jump pad. */
3345 if (lwp->stopped
3346 && !lwp->status_pending_p
3347 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3348 {
3349 lwp->status_pending_p = 1;
3350
3351 if (debug_threads)
3352 fprintf (stderr,
3353 "Dequeueing deferred signal %d for LWP %ld, "
3354 "leaving status pending.\n",
3355 WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3356 }
3357
3358 return 0;
3359 }
3360 }
3361
3362 /* No resume action for this thread. */
3363 lwp->resume = NULL;
3364
3365 return 0;
3366 }
3367
3368
3369 /* Set *FLAG_P if this lwp has an interesting status pending. */
3370 static int
3371 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3372 {
3373 struct lwp_info *lwp = (struct lwp_info *) entry;
3374
3375 /* LWPs which will not be resumed are not interesting, because
3376 we might not wait for them next time through linux_wait. */
3377 if (lwp->resume == NULL)
3378 return 0;
3379
3380 if (lwp->status_pending_p)
3381 * (int *) flag_p = 1;
3382
3383 return 0;
3384 }
3385
3386 /* Return 1 if this lwp that GDB wants running is stopped at an
3387 internal breakpoint that we need to step over. It assumes that any
3388 required STOP_PC adjustment has already been propagated to the
3389 inferior's regcache. */
3390
3391 static int
3392 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3393 {
3394 struct lwp_info *lwp = (struct lwp_info *) entry;
3395 struct thread_info *thread;
3396 struct thread_info *saved_inferior;
3397 CORE_ADDR pc;
3398
3399 /* LWPs which will not be resumed are not interesting, because we
3400 might not wait for them next time through linux_wait. */
3401
3402 if (!lwp->stopped)
3403 {
3404 if (debug_threads)
3405 fprintf (stderr,
3406 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3407 lwpid_of (lwp));
3408 return 0;
3409 }
3410
3411 thread = get_lwp_thread (lwp);
3412
3413 if (thread->last_resume_kind == resume_stop)
3414 {
3415 if (debug_threads)
3416 fprintf (stderr,
3417 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3418 lwpid_of (lwp));
3419 return 0;
3420 }
3421
3422 gdb_assert (lwp->suspended >= 0);
3423
3424 if (lwp->suspended)
3425 {
3426 if (debug_threads)
3427 fprintf (stderr,
3428 "Need step over [LWP %ld]? Ignoring, suspended\n",
3429 lwpid_of (lwp));
3430 return 0;
3431 }
3432
3433 if (!lwp->need_step_over)
3434 {
3435 if (debug_threads)
3436 fprintf (stderr,
3437 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3438 }
3439
3440 if (lwp->status_pending_p)
3441 {
3442 if (debug_threads)
3443 fprintf (stderr,
3444 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3445 lwpid_of (lwp));
3446 return 0;
3447 }
3448
3449 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3450 or we have. */
3451 pc = get_pc (lwp);
3452
3453 /* If the PC has changed since we stopped, then don't do anything,
3454 and let the breakpoint/tracepoint be hit. This happens if, for
3455 instance, GDB handled the decr_pc_after_break subtraction itself,
3456 GDB is OOL stepping this thread, or the user has issued a "jump"
3457 command, or poked thread's registers herself. */
3458 if (pc != lwp->stop_pc)
3459 {
3460 if (debug_threads)
3461 fprintf (stderr,
3462 "Need step over [LWP %ld]? Cancelling, PC was changed. "
3463 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3464 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3465
3466 lwp->need_step_over = 0;
3467 return 0;
3468 }
3469
3470 saved_inferior = current_inferior;
3471 current_inferior = thread;
3472
3473 /* We can only step over breakpoints we know about. */
3474 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3475 {
3476 /* Don't step over a breakpoint that GDB expects to hit
3477 though. If the condition is being evaluated on the target's side
3478 and it evaluate to false, step over this breakpoint as well. */
3479 if (gdb_breakpoint_here (pc)
3480 && gdb_condition_true_at_breakpoint (pc))
3481 {
3482 if (debug_threads)
3483 fprintf (stderr,
3484 "Need step over [LWP %ld]? yes, but found"
3485 " GDB breakpoint at 0x%s; skipping step over\n",
3486 lwpid_of (lwp), paddress (pc));
3487
3488 current_inferior = saved_inferior;
3489 return 0;
3490 }
3491 else
3492 {
3493 if (debug_threads)
3494 fprintf (stderr,
3495 "Need step over [LWP %ld]? yes, "
3496 "found breakpoint at 0x%s\n",
3497 lwpid_of (lwp), paddress (pc));
3498
3499 /* We've found an lwp that needs stepping over --- return 1 so
3500 that find_inferior stops looking. */
3501 current_inferior = saved_inferior;
3502
3503 /* If the step over is cancelled, this is set again. */
3504 lwp->need_step_over = 0;
3505 return 1;
3506 }
3507 }
3508
3509 current_inferior = saved_inferior;
3510
3511 if (debug_threads)
3512 fprintf (stderr,
3513 "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3514 lwpid_of (lwp), paddress (pc));
3515
3516 return 0;
3517 }
3518
3519 /* Start a step-over operation on LWP. When LWP stopped at a
3520 breakpoint, to make progress, we need to remove the breakpoint out
3521 of the way. If we let other threads run while we do that, they may
3522 pass by the breakpoint location and miss hitting it. To avoid
3523 that, a step-over momentarily stops all threads while LWP is
3524 single-stepped while the breakpoint is temporarily uninserted from
3525 the inferior. When the single-step finishes, we reinsert the
3526 breakpoint, and let all threads that are supposed to be running,
3527 run again.
3528
3529 On targets that don't support hardware single-step, we don't
3530 currently support full software single-stepping. Instead, we only
3531 support stepping over the thread event breakpoint, by asking the
3532 low target where to place a reinsert breakpoint. Since this
3533 routine assumes the breakpoint being stepped over is a thread event
3534 breakpoint, it usually assumes the return address of the current
3535 function is a good enough place to set the reinsert breakpoint. */
3536
3537 static int
3538 start_step_over (struct lwp_info *lwp)
3539 {
3540 struct thread_info *saved_inferior;
3541 CORE_ADDR pc;
3542 int step;
3543
3544 if (debug_threads)
3545 fprintf (stderr,
3546 "Starting step-over on LWP %ld. Stopping all threads\n",
3547 lwpid_of (lwp));
3548
3549 stop_all_lwps (1, lwp);
3550 gdb_assert (lwp->suspended == 0);
3551
3552 if (debug_threads)
3553 fprintf (stderr, "Done stopping all threads for step-over.\n");
3554
3555 /* Note, we should always reach here with an already adjusted PC,
3556 either by GDB (if we're resuming due to GDB's request), or by our
3557 caller, if we just finished handling an internal breakpoint GDB
3558 shouldn't care about. */
3559 pc = get_pc (lwp);
3560
3561 saved_inferior = current_inferior;
3562 current_inferior = get_lwp_thread (lwp);
3563
3564 lwp->bp_reinsert = pc;
3565 uninsert_breakpoints_at (pc);
3566 uninsert_fast_tracepoint_jumps_at (pc);
3567
3568 if (can_hardware_single_step ())
3569 {
3570 step = 1;
3571 }
3572 else
3573 {
3574 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3575 set_reinsert_breakpoint (raddr);
3576 step = 0;
3577 }
3578
3579 current_inferior = saved_inferior;
3580
3581 linux_resume_one_lwp (lwp, step, 0, NULL);
3582
3583 /* Require next event from this LWP. */
3584 step_over_bkpt = lwp->head.id;
3585 return 1;
3586 }
3587
3588 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3589 start_step_over, if still there, and delete any reinsert
3590 breakpoints we've set, on non hardware single-step targets. */
3591
3592 static int
3593 finish_step_over (struct lwp_info *lwp)
3594 {
3595 if (lwp->bp_reinsert != 0)
3596 {
3597 if (debug_threads)
3598 fprintf (stderr, "Finished step over.\n");
3599
3600 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3601 may be no breakpoint to reinsert there by now. */
3602 reinsert_breakpoints_at (lwp->bp_reinsert);
3603 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3604
3605 lwp->bp_reinsert = 0;
3606
3607 /* Delete any software-single-step reinsert breakpoints. No
3608 longer needed. We don't have to worry about other threads
3609 hitting this trap, and later not being able to explain it,
3610 because we were stepping over a breakpoint, and we hold all
3611 threads but LWP stopped while doing that. */
3612 if (!can_hardware_single_step ())
3613 delete_reinsert_breakpoints ();
3614
3615 step_over_bkpt = null_ptid;
3616 return 1;
3617 }
3618 else
3619 return 0;
3620 }
3621
3622 /* This function is called once per thread. We check the thread's resume
3623 request, which will tell us whether to resume, step, or leave the thread
3624 stopped; and what signal, if any, it should be sent.
3625
3626 For threads which we aren't explicitly told otherwise, we preserve
3627 the stepping flag; this is used for stepping over gdbserver-placed
3628 breakpoints.
3629
3630 If pending_flags was set in any thread, we queue any needed
3631 signals, since we won't actually resume. We already have a pending
3632 event to report, so we don't need to preserve any step requests;
3633 they should be re-issued if necessary. */
3634
3635 static int
3636 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3637 {
3638 struct lwp_info *lwp;
3639 struct thread_info *thread;
3640 int step;
3641 int leave_all_stopped = * (int *) arg;
3642 int leave_pending;
3643
3644 thread = (struct thread_info *) entry;
3645 lwp = get_thread_lwp (thread);
3646
3647 if (lwp->resume == NULL)
3648 return 0;
3649
3650 if (lwp->resume->kind == resume_stop)
3651 {
3652 if (debug_threads)
3653 fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3654
3655 if (!lwp->stopped)
3656 {
3657 if (debug_threads)
3658 fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3659
3660 /* Stop the thread, and wait for the event asynchronously,
3661 through the event loop. */
3662 send_sigstop (lwp);
3663 }
3664 else
3665 {
3666 if (debug_threads)
3667 fprintf (stderr, "already stopped LWP %ld\n",
3668 lwpid_of (lwp));
3669
3670 /* The LWP may have been stopped in an internal event that
3671 was not meant to be notified back to GDB (e.g., gdbserver
3672 breakpoint), so we should be reporting a stop event in
3673 this case too. */
3674
3675 /* If the thread already has a pending SIGSTOP, this is a
3676 no-op. Otherwise, something later will presumably resume
3677 the thread and this will cause it to cancel any pending
3678 operation, due to last_resume_kind == resume_stop. If
3679 the thread already has a pending status to report, we
3680 will still report it the next time we wait - see
3681 status_pending_p_callback. */
3682
3683 /* If we already have a pending signal to report, then
3684 there's no need to queue a SIGSTOP, as this means we're
3685 midway through moving the LWP out of the jumppad, and we
3686 will report the pending signal as soon as that is
3687 finished. */
3688 if (lwp->pending_signals_to_report == NULL)
3689 send_sigstop (lwp);
3690 }
3691
3692 /* For stop requests, we're done. */
3693 lwp->resume = NULL;
3694 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3695 return 0;
3696 }
3697
3698 /* If this thread which is about to be resumed has a pending status,
3699 then don't resume any threads - we can just report the pending
3700 status. Make sure to queue any signals that would otherwise be
3701 sent. In all-stop mode, we do this decision based on if *any*
3702 thread has a pending status. If there's a thread that needs the
3703 step-over-breakpoint dance, then don't resume any other thread
3704 but that particular one. */
3705 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3706
3707 if (!leave_pending)
3708 {
3709 if (debug_threads)
3710 fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3711
3712 step = (lwp->resume->kind == resume_step);
3713 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3714 }
3715 else
3716 {
3717 if (debug_threads)
3718 fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3719
3720 /* If we have a new signal, enqueue the signal. */
3721 if (lwp->resume->sig != 0)
3722 {
3723 struct pending_signals *p_sig;
3724 p_sig = xmalloc (sizeof (*p_sig));
3725 p_sig->prev = lwp->pending_signals;
3726 p_sig->signal = lwp->resume->sig;
3727 memset (&p_sig->info, 0, sizeof (siginfo_t));
3728
3729 /* If this is the same signal we were previously stopped by,
3730 make sure to queue its siginfo. We can ignore the return
3731 value of ptrace; if it fails, we'll skip
3732 PTRACE_SETSIGINFO. */
3733 if (WIFSTOPPED (lwp->last_status)
3734 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3735 ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3736
3737 lwp->pending_signals = p_sig;
3738 }
3739 }
3740
3741 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3742 lwp->resume = NULL;
3743 return 0;
3744 }
3745
3746 static void
3747 linux_resume (struct thread_resume *resume_info, size_t n)
3748 {
3749 struct thread_resume_array array = { resume_info, n };
3750 struct lwp_info *need_step_over = NULL;
3751 int any_pending;
3752 int leave_all_stopped;
3753
3754 find_inferior (&all_threads, linux_set_resume_request, &array);
3755
3756 /* If there is a thread which would otherwise be resumed, which has
3757 a pending status, then don't resume any threads - we can just
3758 report the pending status. Make sure to queue any signals that
3759 would otherwise be sent. In non-stop mode, we'll apply this
3760 logic to each thread individually. We consume all pending events
3761 before considering to start a step-over (in all-stop). */
3762 any_pending = 0;
3763 if (!non_stop)
3764 find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3765
3766 /* If there is a thread which would otherwise be resumed, which is
3767 stopped at a breakpoint that needs stepping over, then don't
3768 resume any threads - have it step over the breakpoint with all
3769 other threads stopped, then resume all threads again. Make sure
3770 to queue any signals that would otherwise be delivered or
3771 queued. */
3772 if (!any_pending && supports_breakpoints ())
3773 need_step_over
3774 = (struct lwp_info *) find_inferior (&all_lwps,
3775 need_step_over_p, NULL);
3776
3777 leave_all_stopped = (need_step_over != NULL || any_pending);
3778
3779 if (debug_threads)
3780 {
3781 if (need_step_over != NULL)
3782 fprintf (stderr, "Not resuming all, need step over\n");
3783 else if (any_pending)
3784 fprintf (stderr,
3785 "Not resuming, all-stop and found "
3786 "an LWP with pending status\n");
3787 else
3788 fprintf (stderr, "Resuming, no pending status or step over needed\n");
3789 }
3790
3791 /* Even if we're leaving threads stopped, queue all signals we'd
3792 otherwise deliver. */
3793 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3794
3795 if (need_step_over)
3796 start_step_over (need_step_over);
3797 }
3798
3799 /* This function is called once per thread. We check the thread's
3800 last resume request, which will tell us whether to resume, step, or
3801 leave the thread stopped. Any signal the client requested to be
3802 delivered has already been enqueued at this point.
3803
3804 If any thread that GDB wants running is stopped at an internal
3805 breakpoint that needs stepping over, we start a step-over operation
3806 on that particular thread, and leave all others stopped. */
3807
3808 static int
3809 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3810 {
3811 struct lwp_info *lwp = (struct lwp_info *) entry;
3812 struct thread_info *thread;
3813 int step;
3814
3815 if (lwp == except)
3816 return 0;
3817
3818 if (debug_threads)
3819 fprintf (stderr,
3820 "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3821
3822 if (!lwp->stopped)
3823 {
3824 if (debug_threads)
3825 fprintf (stderr, " LWP %ld already running\n", lwpid_of (lwp));
3826 return 0;
3827 }
3828
3829 thread = get_lwp_thread (lwp);
3830
3831 if (thread->last_resume_kind == resume_stop
3832 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3833 {
3834 if (debug_threads)
3835 fprintf (stderr, " client wants LWP to remain %ld stopped\n",
3836 lwpid_of (lwp));
3837 return 0;
3838 }
3839
3840 if (lwp->status_pending_p)
3841 {
3842 if (debug_threads)
3843 fprintf (stderr, " LWP %ld has pending status, leaving stopped\n",
3844 lwpid_of (lwp));
3845 return 0;
3846 }
3847
3848 gdb_assert (lwp->suspended >= 0);
3849
3850 if (lwp->suspended)
3851 {
3852 if (debug_threads)
3853 fprintf (stderr, " LWP %ld is suspended\n", lwpid_of (lwp));
3854 return 0;
3855 }
3856
3857 if (thread->last_resume_kind == resume_stop
3858 && lwp->pending_signals_to_report == NULL
3859 && lwp->collecting_fast_tracepoint == 0)
3860 {
3861 /* We haven't reported this LWP as stopped yet (otherwise, the
3862 last_status.kind check above would catch it, and we wouldn't
3863 reach here. This LWP may have been momentarily paused by a
3864 stop_all_lwps call while handling for example, another LWP's
3865 step-over. In that case, the pending expected SIGSTOP signal
3866 that was queued at vCont;t handling time will have already
3867 been consumed by wait_for_sigstop, and so we need to requeue
3868 another one here. Note that if the LWP already has a SIGSTOP
3869 pending, this is a no-op. */
3870
3871 if (debug_threads)
3872 fprintf (stderr,
3873 "Client wants LWP %ld to stop. "
3874 "Making sure it has a SIGSTOP pending\n",
3875 lwpid_of (lwp));
3876
3877 send_sigstop (lwp);
3878 }
3879
3880 step = thread->last_resume_kind == resume_step;
3881 linux_resume_one_lwp (lwp, step, 0, NULL);
3882 return 0;
3883 }
3884
3885 static int
3886 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3887 {
3888 struct lwp_info *lwp = (struct lwp_info *) entry;
3889
3890 if (lwp == except)
3891 return 0;
3892
3893 lwp->suspended--;
3894 gdb_assert (lwp->suspended >= 0);
3895
3896 return proceed_one_lwp (entry, except);
3897 }
3898
3899 /* When we finish a step-over, set threads running again. If there's
3900 another thread that may need a step-over, now's the time to start
3901 it. Eventually, we'll move all threads past their breakpoints. */
3902
3903 static void
3904 proceed_all_lwps (void)
3905 {
3906 struct lwp_info *need_step_over;
3907
3908 /* If there is a thread which would otherwise be resumed, which is
3909 stopped at a breakpoint that needs stepping over, then don't
3910 resume any threads - have it step over the breakpoint with all
3911 other threads stopped, then resume all threads again. */
3912
3913 if (supports_breakpoints ())
3914 {
3915 need_step_over
3916 = (struct lwp_info *) find_inferior (&all_lwps,
3917 need_step_over_p, NULL);
3918
3919 if (need_step_over != NULL)
3920 {
3921 if (debug_threads)
3922 fprintf (stderr, "proceed_all_lwps: found "
3923 "thread %ld needing a step-over\n",
3924 lwpid_of (need_step_over));
3925
3926 start_step_over (need_step_over);
3927 return;
3928 }
3929 }
3930
3931 if (debug_threads)
3932 fprintf (stderr, "Proceeding, no step-over needed\n");
3933
3934 find_inferior (&all_lwps, proceed_one_lwp, NULL);
3935 }
3936
3937 /* Stopped LWPs that the client wanted to be running, that don't have
3938 pending statuses, are set to run again, except for EXCEPT, if not
3939 NULL. This undoes a stop_all_lwps call. */
3940
3941 static void
3942 unstop_all_lwps (int unsuspend, struct lwp_info *except)
3943 {
3944 if (debug_threads)
3945 {
3946 if (except)
3947 fprintf (stderr,
3948 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3949 else
3950 fprintf (stderr,
3951 "unstopping all lwps\n");
3952 }
3953
3954 if (unsuspend)
3955 find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3956 else
3957 find_inferior (&all_lwps, proceed_one_lwp, except);
3958 }
3959
3960
3961 #ifdef HAVE_LINUX_REGSETS
3962
3963 #define use_linux_regsets 1
3964
3965 static int
3966 regsets_fetch_inferior_registers (struct regcache *regcache)
3967 {
3968 struct regset_info *regset;
3969 int saw_general_regs = 0;
3970 int pid;
3971 struct iovec iov;
3972
3973 regset = target_regsets;
3974
3975 pid = lwpid_of (get_thread_lwp (current_inferior));
3976 while (regset->size >= 0)
3977 {
3978 void *buf, *data;
3979 int nt_type, res;
3980
3981 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3982 {
3983 regset ++;
3984 continue;
3985 }
3986
3987 buf = xmalloc (regset->size);
3988
3989 nt_type = regset->nt_type;
3990 if (nt_type)
3991 {
3992 iov.iov_base = buf;
3993 iov.iov_len = regset->size;
3994 data = (void *) &iov;
3995 }
3996 else
3997 data = buf;
3998
3999 #ifndef __sparc__
4000 res = ptrace (regset->get_request, pid, nt_type, data);
4001 #else
4002 res = ptrace (regset->get_request, pid, data, nt_type);
4003 #endif
4004 if (res < 0)
4005 {
4006 if (errno == EIO)
4007 {
4008 /* If we get EIO on a regset, do not try it again for
4009 this process. */
4010 disabled_regsets[regset - target_regsets] = 1;
4011 free (buf);
4012 continue;
4013 }
4014 else
4015 {
4016 char s[256];
4017 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4018 pid);
4019 perror (s);
4020 }
4021 }
4022 else if (regset->type == GENERAL_REGS)
4023 saw_general_regs = 1;
4024 regset->store_function (regcache, buf);
4025 regset ++;
4026 free (buf);
4027 }
4028 if (saw_general_regs)
4029 return 0;
4030 else
4031 return 1;
4032 }
4033
4034 static int
4035 regsets_store_inferior_registers (struct regcache *regcache)
4036 {
4037 struct regset_info *regset;
4038 int saw_general_regs = 0;
4039 int pid;
4040 struct iovec iov;
4041
4042 regset = target_regsets;
4043
4044 pid = lwpid_of (get_thread_lwp (current_inferior));
4045 while (regset->size >= 0)
4046 {
4047 void *buf, *data;
4048 int nt_type, res;
4049
4050 if (regset->size == 0 || disabled_regsets[regset - target_regsets])
4051 {
4052 regset ++;
4053 continue;
4054 }
4055
4056 buf = xmalloc (regset->size);
4057
4058 /* First fill the buffer with the current register set contents,
4059 in case there are any items in the kernel's regset that are
4060 not in gdbserver's regcache. */
4061
4062 nt_type = regset->nt_type;
4063 if (nt_type)
4064 {
4065 iov.iov_base = buf;
4066 iov.iov_len = regset->size;
4067 data = (void *) &iov;
4068 }
4069 else
4070 data = buf;
4071
4072 #ifndef __sparc__
4073 res = ptrace (regset->get_request, pid, nt_type, data);
4074 #else
4075 res = ptrace (regset->get_request, pid, data, nt_type);
4076 #endif
4077
4078 if (res == 0)
4079 {
4080 /* Then overlay our cached registers on that. */
4081 regset->fill_function (regcache, buf);
4082
4083 /* Only now do we write the register set. */
4084 #ifndef __sparc__
4085 res = ptrace (regset->set_request, pid, nt_type, data);
4086 #else
4087 res = ptrace (regset->set_request, pid, data, nt_type);
4088 #endif
4089 }
4090
4091 if (res < 0)
4092 {
4093 if (errno == EIO)
4094 {
4095 /* If we get EIO on a regset, do not try it again for
4096 this process. */
4097 disabled_regsets[regset - target_regsets] = 1;
4098 free (buf);
4099 continue;
4100 }
4101 else if (errno == ESRCH)
4102 {
4103 /* At this point, ESRCH should mean the process is
4104 already gone, in which case we simply ignore attempts
4105 to change its registers. See also the related
4106 comment in linux_resume_one_lwp. */
4107 free (buf);
4108 return 0;
4109 }
4110 else
4111 {
4112 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4113 }
4114 }
4115 else if (regset->type == GENERAL_REGS)
4116 saw_general_regs = 1;
4117 regset ++;
4118 free (buf);
4119 }
4120 if (saw_general_regs)
4121 return 0;
4122 else
4123 return 1;
4124 }
4125
4126 #else /* !HAVE_LINUX_REGSETS */
4127
4128 #define use_linux_regsets 0
4129 #define regsets_fetch_inferior_registers(regcache) 1
4130 #define regsets_store_inferior_registers(regcache) 1
4131
4132 #endif
4133
4134 /* Return 1 if register REGNO is supported by one of the regset ptrace
4135 calls or 0 if it has to be transferred individually. */
4136
4137 static int
4138 linux_register_in_regsets (int regno)
4139 {
4140 unsigned char mask = 1 << (regno % 8);
4141 size_t index = regno / 8;
4142
4143 return (use_linux_regsets
4144 && (the_low_target.regset_bitmap == NULL
4145 || (the_low_target.regset_bitmap[index] & mask) != 0));
4146 }
4147
4148 #ifdef HAVE_LINUX_USRREGS
4149
4150 int
4151 register_addr (int regnum)
4152 {
4153 int addr;
4154
4155 if (regnum < 0 || regnum >= the_low_target.num_regs)
4156 error ("Invalid register number %d.", regnum);
4157
4158 addr = the_low_target.regmap[regnum];
4159
4160 return addr;
4161 }
4162
4163 /* Fetch one register. */
4164 static void
4165 fetch_register (struct regcache *regcache, int regno)
4166 {
4167 CORE_ADDR regaddr;
4168 int i, size;
4169 char *buf;
4170 int pid;
4171
4172 if (regno >= the_low_target.num_regs)
4173 return;
4174 if ((*the_low_target.cannot_fetch_register) (regno))
4175 return;
4176
4177 regaddr = register_addr (regno);
4178 if (regaddr == -1)
4179 return;
4180
4181 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4182 & -sizeof (PTRACE_XFER_TYPE));
4183 buf = alloca (size);
4184
4185 pid = lwpid_of (get_thread_lwp (current_inferior));
4186 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4187 {
4188 errno = 0;
4189 *(PTRACE_XFER_TYPE *) (buf + i) =
4190 ptrace (PTRACE_PEEKUSER, pid,
4191 /* Coerce to a uintptr_t first to avoid potential gcc warning
4192 of coercing an 8 byte integer to a 4 byte pointer. */
4193 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
4194 regaddr += sizeof (PTRACE_XFER_TYPE);
4195 if (errno != 0)
4196 error ("reading register %d: %s", regno, strerror (errno));
4197 }
4198
4199 if (the_low_target.supply_ptrace_register)
4200 the_low_target.supply_ptrace_register (regcache, regno, buf);
4201 else
4202 supply_register (regcache, regno, buf);
4203 }
4204
4205 /* Store one register. */
4206 static void
4207 store_register (struct regcache *regcache, int regno)
4208 {
4209 CORE_ADDR regaddr;
4210 int i, size;
4211 char *buf;
4212 int pid;
4213
4214 if (regno >= the_low_target.num_regs)
4215 return;
4216 if ((*the_low_target.cannot_store_register) (regno))
4217 return;
4218
4219 regaddr = register_addr (regno);
4220 if (regaddr == -1)
4221 return;
4222
4223 size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
4224 & -sizeof (PTRACE_XFER_TYPE));
4225 buf = alloca (size);
4226 memset (buf, 0, size);
4227
4228 if (the_low_target.collect_ptrace_register)
4229 the_low_target.collect_ptrace_register (regcache, regno, buf);
4230 else
4231 collect_register (regcache, regno, buf);
4232
4233 pid = lwpid_of (get_thread_lwp (current_inferior));
4234 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4235 {
4236 errno = 0;
4237 ptrace (PTRACE_POKEUSER, pid,
4238 /* Coerce to a uintptr_t first to avoid potential gcc warning
4239 about coercing an 8 byte integer to a 4 byte pointer. */
4240 (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
4241 (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
4242 if (errno != 0)
4243 {
4244 /* At this point, ESRCH should mean the process is
4245 already gone, in which case we simply ignore attempts
4246 to change its registers. See also the related
4247 comment in linux_resume_one_lwp. */
4248 if (errno == ESRCH)
4249 return;
4250
4251 if ((*the_low_target.cannot_store_register) (regno) == 0)
4252 error ("writing register %d: %s", regno, strerror (errno));
4253 }
4254 regaddr += sizeof (PTRACE_XFER_TYPE);
4255 }
4256 }
4257
4258 /* Fetch all registers, or just one, from the child process.
4259 If REGNO is -1, do this for all registers, skipping any that are
4260 assumed to have been retrieved by regsets_fetch_inferior_registers,
4261 unless ALL is non-zero.
4262 Otherwise, REGNO specifies which register (so we can save time). */
4263 static void
4264 usr_fetch_inferior_registers (struct regcache *regcache, int regno, int all)
4265 {
4266 if (regno == -1)
4267 {
4268 for (regno = 0; regno < the_low_target.num_regs; regno++)
4269 if (all || !linux_register_in_regsets (regno))
4270 fetch_register (regcache, regno);
4271 }
4272 else
4273 fetch_register (regcache, regno);
4274 }
4275
4276 /* Store our register values back into the inferior.
4277 If REGNO is -1, do this for all registers, skipping any that are
4278 assumed to have been saved by regsets_store_inferior_registers,
4279 unless ALL is non-zero.
4280 Otherwise, REGNO specifies which register (so we can save time). */
4281 static void
4282 usr_store_inferior_registers (struct regcache *regcache, int regno, int all)
4283 {
4284 if (regno == -1)
4285 {
4286 for (regno = 0; regno < the_low_target.num_regs; regno++)
4287 if (all || !linux_register_in_regsets (regno))
4288 store_register (regcache, regno);
4289 }
4290 else
4291 store_register (regcache, regno);
4292 }
4293
4294 #else /* !HAVE_LINUX_USRREGS */
4295
4296 #define usr_fetch_inferior_registers(regcache, regno, all) do {} while (0)
4297 #define usr_store_inferior_registers(regcache, regno, all) do {} while (0)
4298
4299 #endif
4300
4301
4302 void
4303 linux_fetch_registers (struct regcache *regcache, int regno)
4304 {
4305 int use_regsets;
4306 int all = 0;
4307
4308 if (regno == -1)
4309 {
4310 if (the_low_target.fetch_register != NULL)
4311 for (regno = 0; regno < the_low_target.num_regs; regno++)
4312 (*the_low_target.fetch_register) (regcache, regno);
4313
4314 all = regsets_fetch_inferior_registers (regcache);
4315 usr_fetch_inferior_registers (regcache, -1, all);
4316 }
4317 else
4318 {
4319 if (the_low_target.fetch_register != NULL
4320 && (*the_low_target.fetch_register) (regcache, regno))
4321 return;
4322
4323 use_regsets = linux_register_in_regsets (regno);
4324 if (use_regsets)
4325 all = regsets_fetch_inferior_registers (regcache);
4326 if (!use_regsets || all)
4327 usr_fetch_inferior_registers (regcache, regno, 1);
4328 }
4329 }
4330
4331 void
4332 linux_store_registers (struct regcache *regcache, int regno)
4333 {
4334 int use_regsets;
4335 int all = 0;
4336
4337 if (regno == -1)
4338 {
4339 all = regsets_store_inferior_registers (regcache);
4340 usr_store_inferior_registers (regcache, regno, all);
4341 }
4342 else
4343 {
4344 use_regsets = linux_register_in_regsets (regno);
4345 if (use_regsets)
4346 all = regsets_store_inferior_registers (regcache);
4347 if (!use_regsets || all)
4348 usr_store_inferior_registers (regcache, regno, 1);
4349 }
4350 }
4351
4352
4353 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4354 to debugger memory starting at MYADDR. */
4355
4356 static int
4357 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4358 {
4359 register int i;
4360 /* Round starting address down to longword boundary. */
4361 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4362 /* Round ending address up; get number of longwords that makes. */
4363 register int count
4364 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4365 / sizeof (PTRACE_XFER_TYPE);
4366 /* Allocate buffer of that many longwords. */
4367 register PTRACE_XFER_TYPE *buffer
4368 = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4369 int fd;
4370 char filename[64];
4371 int pid = lwpid_of (get_thread_lwp (current_inferior));
4372
4373 /* Try using /proc. Don't bother for one word. */
4374 if (len >= 3 * sizeof (long))
4375 {
4376 /* We could keep this file open and cache it - possibly one per
4377 thread. That requires some juggling, but is even faster. */
4378 sprintf (filename, "/proc/%d/mem", pid);
4379 fd = open (filename, O_RDONLY | O_LARGEFILE);
4380 if (fd == -1)
4381 goto no_proc;
4382
4383 /* If pread64 is available, use it. It's faster if the kernel
4384 supports it (only one syscall), and it's 64-bit safe even on
4385 32-bit platforms (for instance, SPARC debugging a SPARC64
4386 application). */
4387 #ifdef HAVE_PREAD64
4388 if (pread64 (fd, myaddr, len, memaddr) != len)
4389 #else
4390 if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
4391 #endif
4392 {
4393 close (fd);
4394 goto no_proc;
4395 }
4396
4397 close (fd);
4398 return 0;
4399 }
4400
4401 no_proc:
4402 /* Read all the longwords */
4403 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4404 {
4405 errno = 0;
4406 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4407 about coercing an 8 byte integer to a 4 byte pointer. */
4408 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4409 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4410 if (errno)
4411 return errno;
4412 }
4413
4414 /* Copy appropriate bytes out of the buffer. */
4415 memcpy (myaddr,
4416 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4417 len);
4418
4419 return 0;
4420 }
4421
4422 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4423 memory at MEMADDR. On failure (cannot write to the inferior)
4424 returns the value of errno. */
4425
4426 static int
4427 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4428 {
4429 register int i;
4430 /* Round starting address down to longword boundary. */
4431 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4432 /* Round ending address up; get number of longwords that makes. */
4433 register int count
4434 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4435 / sizeof (PTRACE_XFER_TYPE);
4436
4437 /* Allocate buffer of that many longwords. */
4438 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4439 alloca (count * sizeof (PTRACE_XFER_TYPE));
4440
4441 int pid = lwpid_of (get_thread_lwp (current_inferior));
4442
4443 if (debug_threads)
4444 {
4445 /* Dump up to four bytes. */
4446 unsigned int val = * (unsigned int *) myaddr;
4447 if (len == 1)
4448 val = val & 0xff;
4449 else if (len == 2)
4450 val = val & 0xffff;
4451 else if (len == 3)
4452 val = val & 0xffffff;
4453 fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4454 val, (long)memaddr);
4455 }
4456
4457 /* Fill start and end extra bytes of buffer with existing memory data. */
4458
4459 errno = 0;
4460 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4461 about coercing an 8 byte integer to a 4 byte pointer. */
4462 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4463 (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4464 if (errno)
4465 return errno;
4466
4467 if (count > 1)
4468 {
4469 errno = 0;
4470 buffer[count - 1]
4471 = ptrace (PTRACE_PEEKTEXT, pid,
4472 /* Coerce to a uintptr_t first to avoid potential gcc warning
4473 about coercing an 8 byte integer to a 4 byte pointer. */
4474 (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4475 * sizeof (PTRACE_XFER_TYPE)),
4476 0);
4477 if (errno)
4478 return errno;
4479 }
4480
4481 /* Copy data to be written over corresponding part of buffer. */
4482
4483 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4484 myaddr, len);
4485
4486 /* Write the entire buffer. */
4487
4488 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4489 {
4490 errno = 0;
4491 ptrace (PTRACE_POKETEXT, pid,
4492 /* Coerce to a uintptr_t first to avoid potential gcc warning
4493 about coercing an 8 byte integer to a 4 byte pointer. */
4494 (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4495 (PTRACE_ARG4_TYPE) buffer[i]);
4496 if (errno)
4497 return errno;
4498 }
4499
4500 return 0;
4501 }
4502
4503 /* Non-zero if the kernel supports PTRACE_O_TRACEFORK. */
4504 static int linux_supports_tracefork_flag;
4505
4506 static void
4507 linux_enable_event_reporting (int pid)
4508 {
4509 if (!linux_supports_tracefork_flag)
4510 return;
4511
4512 ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4513 }
4514
4515 /* Helper functions for linux_test_for_tracefork, called via clone (). */
4516
4517 static int
4518 linux_tracefork_grandchild (void *arg)
4519 {
4520 _exit (0);
4521 }
4522
4523 #define STACK_SIZE 4096
4524
4525 static int
4526 linux_tracefork_child (void *arg)
4527 {
4528 ptrace (PTRACE_TRACEME, 0, 0, 0);
4529 kill (getpid (), SIGSTOP);
4530
4531 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4532
4533 if (fork () == 0)
4534 linux_tracefork_grandchild (NULL);
4535
4536 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4537
4538 #ifdef __ia64__
4539 __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4540 CLONE_VM | SIGCHLD, NULL);
4541 #else
4542 clone (linux_tracefork_grandchild, (char *) arg + STACK_SIZE,
4543 CLONE_VM | SIGCHLD, NULL);
4544 #endif
4545
4546 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4547
4548 _exit (0);
4549 }
4550
4551 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events. Make
4552 sure that we can enable the option, and that it had the desired
4553 effect. */
4554
4555 static void
4556 linux_test_for_tracefork (void)
4557 {
4558 int child_pid, ret, status;
4559 long second_pid;
4560 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4561 char *stack = xmalloc (STACK_SIZE * 4);
4562 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4563
4564 linux_supports_tracefork_flag = 0;
4565
4566 #if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4567
4568 child_pid = fork ();
4569 if (child_pid == 0)
4570 linux_tracefork_child (NULL);
4571
4572 #else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4573
4574 /* Use CLONE_VM instead of fork, to support uClinux (no MMU). */
4575 #ifdef __ia64__
4576 child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4577 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4578 #else /* !__ia64__ */
4579 child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4580 CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4581 #endif /* !__ia64__ */
4582
4583 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4584
4585 if (child_pid == -1)
4586 perror_with_name ("clone");
4587
4588 ret = my_waitpid (child_pid, &status, 0);
4589 if (ret == -1)
4590 perror_with_name ("waitpid");
4591 else if (ret != child_pid)
4592 error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4593 if (! WIFSTOPPED (status))
4594 error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4595
4596 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4597 (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4598 if (ret != 0)
4599 {
4600 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4601 if (ret != 0)
4602 {
4603 warning ("linux_test_for_tracefork: failed to kill child");
4604 return;
4605 }
4606
4607 ret = my_waitpid (child_pid, &status, 0);
4608 if (ret != child_pid)
4609 warning ("linux_test_for_tracefork: failed to wait for killed child");
4610 else if (!WIFSIGNALED (status))
4611 warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4612 "killed child", status);
4613
4614 return;
4615 }
4616
4617 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4618 if (ret != 0)
4619 warning ("linux_test_for_tracefork: failed to resume child");
4620
4621 ret = my_waitpid (child_pid, &status, 0);
4622
4623 if (ret == child_pid && WIFSTOPPED (status)
4624 && status >> 16 == PTRACE_EVENT_FORK)
4625 {
4626 second_pid = 0;
4627 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4628 if (ret == 0 && second_pid != 0)
4629 {
4630 int second_status;
4631
4632 linux_supports_tracefork_flag = 1;
4633 my_waitpid (second_pid, &second_status, 0);
4634 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4635 if (ret != 0)
4636 warning ("linux_test_for_tracefork: failed to kill second child");
4637 my_waitpid (second_pid, &status, 0);
4638 }
4639 }
4640 else
4641 warning ("linux_test_for_tracefork: unexpected result from waitpid "
4642 "(%d, status 0x%x)", ret, status);
4643
4644 do
4645 {
4646 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4647 if (ret != 0)
4648 warning ("linux_test_for_tracefork: failed to kill child");
4649 my_waitpid (child_pid, &status, 0);
4650 }
4651 while (WIFSTOPPED (status));
4652
4653 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4654 free (stack);
4655 #endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4656 }
4657
4658
4659 static void
4660 linux_look_up_symbols (void)
4661 {
4662 #ifdef USE_THREAD_DB
4663 struct process_info *proc = current_process ();
4664
4665 if (proc->private->thread_db != NULL)
4666 return;
4667
4668 /* If the kernel supports tracing forks then it also supports tracing
4669 clones, and then we don't need to use the magic thread event breakpoint
4670 to learn about threads. */
4671 thread_db_init (!linux_supports_tracefork_flag);
4672 #endif
4673 }
4674
4675 static void
4676 linux_request_interrupt (void)
4677 {
4678 extern unsigned long signal_pid;
4679
4680 if (!ptid_equal (cont_thread, null_ptid)
4681 && !ptid_equal (cont_thread, minus_one_ptid))
4682 {
4683 struct lwp_info *lwp;
4684 int lwpid;
4685
4686 lwp = get_thread_lwp (current_inferior);
4687 lwpid = lwpid_of (lwp);
4688 kill_lwp (lwpid, SIGINT);
4689 }
4690 else
4691 kill_lwp (signal_pid, SIGINT);
4692 }
4693
4694 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4695 to debugger memory starting at MYADDR. */
4696
4697 static int
4698 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4699 {
4700 char filename[PATH_MAX];
4701 int fd, n;
4702 int pid = lwpid_of (get_thread_lwp (current_inferior));
4703
4704 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4705
4706 fd = open (filename, O_RDONLY);
4707 if (fd < 0)
4708 return -1;
4709
4710 if (offset != (CORE_ADDR) 0
4711 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4712 n = -1;
4713 else
4714 n = read (fd, myaddr, len);
4715
4716 close (fd);
4717
4718 return n;
4719 }
4720
4721 /* These breakpoint and watchpoint related wrapper functions simply
4722 pass on the function call if the target has registered a
4723 corresponding function. */
4724
4725 static int
4726 linux_insert_point (char type, CORE_ADDR addr, int len)
4727 {
4728 if (the_low_target.insert_point != NULL)
4729 return the_low_target.insert_point (type, addr, len);
4730 else
4731 /* Unsupported (see target.h). */
4732 return 1;
4733 }
4734
4735 static int
4736 linux_remove_point (char type, CORE_ADDR addr, int len)
4737 {
4738 if (the_low_target.remove_point != NULL)
4739 return the_low_target.remove_point (type, addr, len);
4740 else
4741 /* Unsupported (see target.h). */
4742 return 1;
4743 }
4744
4745 static int
4746 linux_stopped_by_watchpoint (void)
4747 {
4748 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4749
4750 return lwp->stopped_by_watchpoint;
4751 }
4752
4753 static CORE_ADDR
4754 linux_stopped_data_address (void)
4755 {
4756 struct lwp_info *lwp = get_thread_lwp (current_inferior);
4757
4758 return lwp->stopped_data_address;
4759 }
4760
4761 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
4762 #if defined(__mcoldfire__)
4763 /* These should really be defined in the kernel's ptrace.h header. */
4764 #define PT_TEXT_ADDR 49*4
4765 #define PT_DATA_ADDR 50*4
4766 #define PT_TEXT_END_ADDR 51*4
4767 #elif defined(BFIN)
4768 #define PT_TEXT_ADDR 220
4769 #define PT_TEXT_END_ADDR 224
4770 #define PT_DATA_ADDR 228
4771 #elif defined(__TMS320C6X__)
4772 #define PT_TEXT_ADDR (0x10000*4)
4773 #define PT_DATA_ADDR (0x10004*4)
4774 #define PT_TEXT_END_ADDR (0x10008*4)
4775 #endif
4776
4777 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4778 to tell gdb about. */
4779
4780 static int
4781 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4782 {
4783 #if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4784 unsigned long text, text_end, data;
4785 int pid = lwpid_of (get_thread_lwp (current_inferior));
4786
4787 errno = 0;
4788
4789 text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4790 text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4791 data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4792
4793 if (errno == 0)
4794 {
4795 /* Both text and data offsets produced at compile-time (and so
4796 used by gdb) are relative to the beginning of the program,
4797 with the data segment immediately following the text segment.
4798 However, the actual runtime layout in memory may put the data
4799 somewhere else, so when we send gdb a data base-address, we
4800 use the real data base address and subtract the compile-time
4801 data base-address from it (which is just the length of the
4802 text segment). BSS immediately follows data in both
4803 cases. */
4804 *text_p = text;
4805 *data_p = data - (text_end - text);
4806
4807 return 1;
4808 }
4809 #endif
4810 return 0;
4811 }
4812 #endif
4813
4814 static int
4815 linux_qxfer_osdata (const char *annex,
4816 unsigned char *readbuf, unsigned const char *writebuf,
4817 CORE_ADDR offset, int len)
4818 {
4819 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4820 }
4821
4822 /* Convert a native/host siginfo object, into/from the siginfo in the
4823 layout of the inferiors' architecture. */
4824
4825 static void
4826 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4827 {
4828 int done = 0;
4829
4830 if (the_low_target.siginfo_fixup != NULL)
4831 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4832
4833 /* If there was no callback, or the callback didn't do anything,
4834 then just do a straight memcpy. */
4835 if (!done)
4836 {
4837 if (direction == 1)
4838 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4839 else
4840 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4841 }
4842 }
4843
4844 static int
4845 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4846 unsigned const char *writebuf, CORE_ADDR offset, int len)
4847 {
4848 int pid;
4849 siginfo_t siginfo;
4850 char inf_siginfo[sizeof (siginfo_t)];
4851
4852 if (current_inferior == NULL)
4853 return -1;
4854
4855 pid = lwpid_of (get_thread_lwp (current_inferior));
4856
4857 if (debug_threads)
4858 fprintf (stderr, "%s siginfo for lwp %d.\n",
4859 readbuf != NULL ? "Reading" : "Writing",
4860 pid);
4861
4862 if (offset >= sizeof (siginfo))
4863 return -1;
4864
4865 if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4866 return -1;
4867
4868 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4869 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4870 inferior with a 64-bit GDBSERVER should look the same as debugging it
4871 with a 32-bit GDBSERVER, we need to convert it. */
4872 siginfo_fixup (&siginfo, inf_siginfo, 0);
4873
4874 if (offset + len > sizeof (siginfo))
4875 len = sizeof (siginfo) - offset;
4876
4877 if (readbuf != NULL)
4878 memcpy (readbuf, inf_siginfo + offset, len);
4879 else
4880 {
4881 memcpy (inf_siginfo + offset, writebuf, len);
4882
4883 /* Convert back to ptrace layout before flushing it out. */
4884 siginfo_fixup (&siginfo, inf_siginfo, 1);
4885
4886 if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4887 return -1;
4888 }
4889
4890 return len;
4891 }
4892
4893 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4894 so we notice when children change state; as the handler for the
4895 sigsuspend in my_waitpid. */
4896
4897 static void
4898 sigchld_handler (int signo)
4899 {
4900 int old_errno = errno;
4901
4902 if (debug_threads)
4903 {
4904 do
4905 {
4906 /* fprintf is not async-signal-safe, so call write
4907 directly. */
4908 if (write (2, "sigchld_handler\n",
4909 sizeof ("sigchld_handler\n") - 1) < 0)
4910 break; /* just ignore */
4911 } while (0);
4912 }
4913
4914 if (target_is_async_p ())
4915 async_file_mark (); /* trigger a linux_wait */
4916
4917 errno = old_errno;
4918 }
4919
4920 static int
4921 linux_supports_non_stop (void)
4922 {
4923 return 1;
4924 }
4925
4926 static int
4927 linux_async (int enable)
4928 {
4929 int previous = (linux_event_pipe[0] != -1);
4930
4931 if (debug_threads)
4932 fprintf (stderr, "linux_async (%d), previous=%d\n",
4933 enable, previous);
4934
4935 if (previous != enable)
4936 {
4937 sigset_t mask;
4938 sigemptyset (&mask);
4939 sigaddset (&mask, SIGCHLD);
4940
4941 sigprocmask (SIG_BLOCK, &mask, NULL);
4942
4943 if (enable)
4944 {
4945 if (pipe (linux_event_pipe) == -1)
4946 fatal ("creating event pipe failed.");
4947
4948 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4949 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4950
4951 /* Register the event loop handler. */
4952 add_file_handler (linux_event_pipe[0],
4953 handle_target_event, NULL);
4954
4955 /* Always trigger a linux_wait. */
4956 async_file_mark ();
4957 }
4958 else
4959 {
4960 delete_file_handler (linux_event_pipe[0]);
4961
4962 close (linux_event_pipe[0]);
4963 close (linux_event_pipe[1]);
4964 linux_event_pipe[0] = -1;
4965 linux_event_pipe[1] = -1;
4966 }
4967
4968 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4969 }
4970
4971 return previous;
4972 }
4973
4974 static int
4975 linux_start_non_stop (int nonstop)
4976 {
4977 /* Register or unregister from event-loop accordingly. */
4978 linux_async (nonstop);
4979 return 0;
4980 }
4981
4982 static int
4983 linux_supports_multi_process (void)
4984 {
4985 return 1;
4986 }
4987
4988 static int
4989 linux_supports_disable_randomization (void)
4990 {
4991 #ifdef HAVE_PERSONALITY
4992 return 1;
4993 #else
4994 return 0;
4995 #endif
4996 }
4997
4998 static int
4999 linux_supports_agent (void)
5000 {
5001 return 1;
5002 }
5003
5004 /* Enumerate spufs IDs for process PID. */
5005 static int
5006 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5007 {
5008 int pos = 0;
5009 int written = 0;
5010 char path[128];
5011 DIR *dir;
5012 struct dirent *entry;
5013
5014 sprintf (path, "/proc/%ld/fd", pid);
5015 dir = opendir (path);
5016 if (!dir)
5017 return -1;
5018
5019 rewinddir (dir);
5020 while ((entry = readdir (dir)) != NULL)
5021 {
5022 struct stat st;
5023 struct statfs stfs;
5024 int fd;
5025
5026 fd = atoi (entry->d_name);
5027 if (!fd)
5028 continue;
5029
5030 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5031 if (stat (path, &st) != 0)
5032 continue;
5033 if (!S_ISDIR (st.st_mode))
5034 continue;
5035
5036 if (statfs (path, &stfs) != 0)
5037 continue;
5038 if (stfs.f_type != SPUFS_MAGIC)
5039 continue;
5040
5041 if (pos >= offset && pos + 4 <= offset + len)
5042 {
5043 *(unsigned int *)(buf + pos - offset) = fd;
5044 written += 4;
5045 }
5046 pos += 4;
5047 }
5048
5049 closedir (dir);
5050 return written;
5051 }
5052
5053 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5054 object type, using the /proc file system. */
5055 static int
5056 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5057 unsigned const char *writebuf,
5058 CORE_ADDR offset, int len)
5059 {
5060 long pid = lwpid_of (get_thread_lwp (current_inferior));
5061 char buf[128];
5062 int fd = 0;
5063 int ret = 0;
5064
5065 if (!writebuf && !readbuf)
5066 return -1;
5067
5068 if (!*annex)
5069 {
5070 if (!readbuf)
5071 return -1;
5072 else
5073 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5074 }
5075
5076 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5077 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5078 if (fd <= 0)
5079 return -1;
5080
5081 if (offset != 0
5082 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5083 {
5084 close (fd);
5085 return 0;
5086 }
5087
5088 if (writebuf)
5089 ret = write (fd, writebuf, (size_t) len);
5090 else
5091 ret = read (fd, readbuf, (size_t) len);
5092
5093 close (fd);
5094 return ret;
5095 }
5096
5097 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5098 struct target_loadseg
5099 {
5100 /* Core address to which the segment is mapped. */
5101 Elf32_Addr addr;
5102 /* VMA recorded in the program header. */
5103 Elf32_Addr p_vaddr;
5104 /* Size of this segment in memory. */
5105 Elf32_Word p_memsz;
5106 };
5107
5108 # if defined PT_GETDSBT
5109 struct target_loadmap
5110 {
5111 /* Protocol version number, must be zero. */
5112 Elf32_Word version;
5113 /* Pointer to the DSBT table, its size, and the DSBT index. */
5114 unsigned *dsbt_table;
5115 unsigned dsbt_size, dsbt_index;
5116 /* Number of segments in this map. */
5117 Elf32_Word nsegs;
5118 /* The actual memory map. */
5119 struct target_loadseg segs[/*nsegs*/];
5120 };
5121 # define LINUX_LOADMAP PT_GETDSBT
5122 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5123 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5124 # else
5125 struct target_loadmap
5126 {
5127 /* Protocol version number, must be zero. */
5128 Elf32_Half version;
5129 /* Number of segments in this map. */
5130 Elf32_Half nsegs;
5131 /* The actual memory map. */
5132 struct target_loadseg segs[/*nsegs*/];
5133 };
5134 # define LINUX_LOADMAP PTRACE_GETFDPIC
5135 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5136 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5137 # endif
5138
5139 static int
5140 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5141 unsigned char *myaddr, unsigned int len)
5142 {
5143 int pid = lwpid_of (get_thread_lwp (current_inferior));
5144 int addr = -1;
5145 struct target_loadmap *data = NULL;
5146 unsigned int actual_length, copy_length;
5147
5148 if (strcmp (annex, "exec") == 0)
5149 addr = (int) LINUX_LOADMAP_EXEC;
5150 else if (strcmp (annex, "interp") == 0)
5151 addr = (int) LINUX_LOADMAP_INTERP;
5152 else
5153 return -1;
5154
5155 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5156 return -1;
5157
5158 if (data == NULL)
5159 return -1;
5160
5161 actual_length = sizeof (struct target_loadmap)
5162 + sizeof (struct target_loadseg) * data->nsegs;
5163
5164 if (offset < 0 || offset > actual_length)
5165 return -1;
5166
5167 copy_length = actual_length - offset < len ? actual_length - offset : len;
5168 memcpy (myaddr, (char *) data + offset, copy_length);
5169 return copy_length;
5170 }
5171 #else
5172 # define linux_read_loadmap NULL
5173 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5174
5175 static void
5176 linux_process_qsupported (const char *query)
5177 {
5178 if (the_low_target.process_qsupported != NULL)
5179 the_low_target.process_qsupported (query);
5180 }
5181
5182 static int
5183 linux_supports_tracepoints (void)
5184 {
5185 if (*the_low_target.supports_tracepoints == NULL)
5186 return 0;
5187
5188 return (*the_low_target.supports_tracepoints) ();
5189 }
5190
5191 static CORE_ADDR
5192 linux_read_pc (struct regcache *regcache)
5193 {
5194 if (the_low_target.get_pc == NULL)
5195 return 0;
5196
5197 return (*the_low_target.get_pc) (regcache);
5198 }
5199
5200 static void
5201 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5202 {
5203 gdb_assert (the_low_target.set_pc != NULL);
5204
5205 (*the_low_target.set_pc) (regcache, pc);
5206 }
5207
5208 static int
5209 linux_thread_stopped (struct thread_info *thread)
5210 {
5211 return get_thread_lwp (thread)->stopped;
5212 }
5213
5214 /* This exposes stop-all-threads functionality to other modules. */
5215
5216 static void
5217 linux_pause_all (int freeze)
5218 {
5219 stop_all_lwps (freeze, NULL);
5220 }
5221
5222 /* This exposes unstop-all-threads functionality to other gdbserver
5223 modules. */
5224
5225 static void
5226 linux_unpause_all (int unfreeze)
5227 {
5228 unstop_all_lwps (unfreeze, NULL);
5229 }
5230
5231 static int
5232 linux_prepare_to_access_memory (void)
5233 {
5234 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5235 running LWP. */
5236 if (non_stop)
5237 linux_pause_all (1);
5238 return 0;
5239 }
5240
5241 static void
5242 linux_done_accessing_memory (void)
5243 {
5244 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5245 running LWP. */
5246 if (non_stop)
5247 linux_unpause_all (1);
5248 }
5249
5250 static int
5251 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5252 CORE_ADDR collector,
5253 CORE_ADDR lockaddr,
5254 ULONGEST orig_size,
5255 CORE_ADDR *jump_entry,
5256 CORE_ADDR *trampoline,
5257 ULONGEST *trampoline_size,
5258 unsigned char *jjump_pad_insn,
5259 ULONGEST *jjump_pad_insn_size,
5260 CORE_ADDR *adjusted_insn_addr,
5261 CORE_ADDR *adjusted_insn_addr_end,
5262 char *err)
5263 {
5264 return (*the_low_target.install_fast_tracepoint_jump_pad)
5265 (tpoint, tpaddr, collector, lockaddr, orig_size,
5266 jump_entry, trampoline, trampoline_size,
5267 jjump_pad_insn, jjump_pad_insn_size,
5268 adjusted_insn_addr, adjusted_insn_addr_end,
5269 err);
5270 }
5271
5272 static struct emit_ops *
5273 linux_emit_ops (void)
5274 {
5275 if (the_low_target.emit_ops != NULL)
5276 return (*the_low_target.emit_ops) ();
5277 else
5278 return NULL;
5279 }
5280
5281 static int
5282 linux_get_min_fast_tracepoint_insn_len (void)
5283 {
5284 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5285 }
5286
5287 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5288
5289 static int
5290 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5291 CORE_ADDR *phdr_memaddr, int *num_phdr)
5292 {
5293 char filename[PATH_MAX];
5294 int fd;
5295 const int auxv_size = is_elf64
5296 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5297 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5298
5299 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5300
5301 fd = open (filename, O_RDONLY);
5302 if (fd < 0)
5303 return 1;
5304
5305 *phdr_memaddr = 0;
5306 *num_phdr = 0;
5307 while (read (fd, buf, auxv_size) == auxv_size
5308 && (*phdr_memaddr == 0 || *num_phdr == 0))
5309 {
5310 if (is_elf64)
5311 {
5312 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5313
5314 switch (aux->a_type)
5315 {
5316 case AT_PHDR:
5317 *phdr_memaddr = aux->a_un.a_val;
5318 break;
5319 case AT_PHNUM:
5320 *num_phdr = aux->a_un.a_val;
5321 break;
5322 }
5323 }
5324 else
5325 {
5326 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5327
5328 switch (aux->a_type)
5329 {
5330 case AT_PHDR:
5331 *phdr_memaddr = aux->a_un.a_val;
5332 break;
5333 case AT_PHNUM:
5334 *num_phdr = aux->a_un.a_val;
5335 break;
5336 }
5337 }
5338 }
5339
5340 close (fd);
5341
5342 if (*phdr_memaddr == 0 || *num_phdr == 0)
5343 {
5344 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5345 "phdr_memaddr = %ld, phdr_num = %d",
5346 (long) *phdr_memaddr, *num_phdr);
5347 return 2;
5348 }
5349
5350 return 0;
5351 }
5352
5353 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5354
5355 static CORE_ADDR
5356 get_dynamic (const int pid, const int is_elf64)
5357 {
5358 CORE_ADDR phdr_memaddr, relocation;
5359 int num_phdr, i;
5360 unsigned char *phdr_buf;
5361 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5362
5363 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5364 return 0;
5365
5366 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5367 phdr_buf = alloca (num_phdr * phdr_size);
5368
5369 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5370 return 0;
5371
5372 /* Compute relocation: it is expected to be 0 for "regular" executables,
5373 non-zero for PIE ones. */
5374 relocation = -1;
5375 for (i = 0; relocation == -1 && i < num_phdr; i++)
5376 if (is_elf64)
5377 {
5378 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5379
5380 if (p->p_type == PT_PHDR)
5381 relocation = phdr_memaddr - p->p_vaddr;
5382 }
5383 else
5384 {
5385 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5386
5387 if (p->p_type == PT_PHDR)
5388 relocation = phdr_memaddr - p->p_vaddr;
5389 }
5390
5391 if (relocation == -1)
5392 {
5393 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5394 any real world executables, including PIE executables, have always
5395 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5396 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5397 or present DT_DEBUG anyway (fpc binaries are statically linked).
5398
5399 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5400
5401 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5402
5403 return 0;
5404 }
5405
5406 for (i = 0; i < num_phdr; i++)
5407 {
5408 if (is_elf64)
5409 {
5410 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5411
5412 if (p->p_type == PT_DYNAMIC)
5413 return p->p_vaddr + relocation;
5414 }
5415 else
5416 {
5417 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5418
5419 if (p->p_type == PT_DYNAMIC)
5420 return p->p_vaddr + relocation;
5421 }
5422 }
5423
5424 return 0;
5425 }
5426
5427 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5428 can be 0 if the inferior does not yet have the library list initialized.
5429 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5430 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5431
5432 static CORE_ADDR
5433 get_r_debug (const int pid, const int is_elf64)
5434 {
5435 CORE_ADDR dynamic_memaddr;
5436 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5437 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5438 CORE_ADDR map = -1;
5439
5440 dynamic_memaddr = get_dynamic (pid, is_elf64);
5441 if (dynamic_memaddr == 0)
5442 return map;
5443
5444 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5445 {
5446 if (is_elf64)
5447 {
5448 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5449 union
5450 {
5451 Elf64_Xword map;
5452 unsigned char buf[sizeof (Elf64_Xword)];
5453 }
5454 rld_map;
5455
5456 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5457 {
5458 if (linux_read_memory (dyn->d_un.d_val,
5459 rld_map.buf, sizeof (rld_map.buf)) == 0)
5460 return rld_map.map;
5461 else
5462 break;
5463 }
5464
5465 if (dyn->d_tag == DT_DEBUG && map == -1)
5466 map = dyn->d_un.d_val;
5467
5468 if (dyn->d_tag == DT_NULL)
5469 break;
5470 }
5471 else
5472 {
5473 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5474 union
5475 {
5476 Elf32_Word map;
5477 unsigned char buf[sizeof (Elf32_Word)];
5478 }
5479 rld_map;
5480
5481 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5482 {
5483 if (linux_read_memory (dyn->d_un.d_val,
5484 rld_map.buf, sizeof (rld_map.buf)) == 0)
5485 return rld_map.map;
5486 else
5487 break;
5488 }
5489
5490 if (dyn->d_tag == DT_DEBUG && map == -1)
5491 map = dyn->d_un.d_val;
5492
5493 if (dyn->d_tag == DT_NULL)
5494 break;
5495 }
5496
5497 dynamic_memaddr += dyn_size;
5498 }
5499
5500 return map;
5501 }
5502
5503 /* Read one pointer from MEMADDR in the inferior. */
5504
5505 static int
5506 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5507 {
5508 int ret;
5509
5510 /* Go through a union so this works on either big or little endian
5511 hosts, when the inferior's pointer size is smaller than the size
5512 of CORE_ADDR. It is assumed the inferior's endianness is the
5513 same of the superior's. */
5514 union
5515 {
5516 CORE_ADDR core_addr;
5517 unsigned int ui;
5518 unsigned char uc;
5519 } addr;
5520
5521 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5522 if (ret == 0)
5523 {
5524 if (ptr_size == sizeof (CORE_ADDR))
5525 *ptr = addr.core_addr;
5526 else if (ptr_size == sizeof (unsigned int))
5527 *ptr = addr.ui;
5528 else
5529 gdb_assert_not_reached ("unhandled pointer size");
5530 }
5531 return ret;
5532 }
5533
5534 struct link_map_offsets
5535 {
5536 /* Offset and size of r_debug.r_version. */
5537 int r_version_offset;
5538
5539 /* Offset and size of r_debug.r_map. */
5540 int r_map_offset;
5541
5542 /* Offset to l_addr field in struct link_map. */
5543 int l_addr_offset;
5544
5545 /* Offset to l_name field in struct link_map. */
5546 int l_name_offset;
5547
5548 /* Offset to l_ld field in struct link_map. */
5549 int l_ld_offset;
5550
5551 /* Offset to l_next field in struct link_map. */
5552 int l_next_offset;
5553
5554 /* Offset to l_prev field in struct link_map. */
5555 int l_prev_offset;
5556 };
5557
5558 /* Construct qXfer:libraries-svr4:read reply. */
5559
5560 static int
5561 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5562 unsigned const char *writebuf,
5563 CORE_ADDR offset, int len)
5564 {
5565 char *document;
5566 unsigned document_len;
5567 struct process_info_private *const priv = current_process ()->private;
5568 char filename[PATH_MAX];
5569 int pid, is_elf64;
5570
5571 static const struct link_map_offsets lmo_32bit_offsets =
5572 {
5573 0, /* r_version offset. */
5574 4, /* r_debug.r_map offset. */
5575 0, /* l_addr offset in link_map. */
5576 4, /* l_name offset in link_map. */
5577 8, /* l_ld offset in link_map. */
5578 12, /* l_next offset in link_map. */
5579 16 /* l_prev offset in link_map. */
5580 };
5581
5582 static const struct link_map_offsets lmo_64bit_offsets =
5583 {
5584 0, /* r_version offset. */
5585 8, /* r_debug.r_map offset. */
5586 0, /* l_addr offset in link_map. */
5587 8, /* l_name offset in link_map. */
5588 16, /* l_ld offset in link_map. */
5589 24, /* l_next offset in link_map. */
5590 32 /* l_prev offset in link_map. */
5591 };
5592 const struct link_map_offsets *lmo;
5593 unsigned int machine;
5594
5595 if (writebuf != NULL)
5596 return -2;
5597 if (readbuf == NULL)
5598 return -1;
5599
5600 pid = lwpid_of (get_thread_lwp (current_inferior));
5601 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5602 is_elf64 = elf_64_file_p (filename, &machine);
5603 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5604
5605 if (priv->r_debug == 0)
5606 priv->r_debug = get_r_debug (pid, is_elf64);
5607
5608 if (priv->r_debug == (CORE_ADDR) -1 || priv->r_debug == 0)
5609 {
5610 document = xstrdup ("<library-list-svr4 version=\"1.0\"/>\n");
5611 }
5612 else
5613 {
5614 int allocated = 1024;
5615 char *p;
5616 const int ptr_size = is_elf64 ? 8 : 4;
5617 CORE_ADDR lm_addr, lm_prev, l_name, l_addr, l_ld, l_next, l_prev;
5618 int r_version, header_done = 0;
5619
5620 document = xmalloc (allocated);
5621 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5622 p = document + strlen (document);
5623
5624 r_version = 0;
5625 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5626 (unsigned char *) &r_version,
5627 sizeof (r_version)) != 0
5628 || r_version != 1)
5629 {
5630 warning ("unexpected r_debug version %d", r_version);
5631 goto done;
5632 }
5633
5634 if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5635 &lm_addr, ptr_size) != 0)
5636 {
5637 warning ("unable to read r_map from 0x%lx",
5638 (long) priv->r_debug + lmo->r_map_offset);
5639 goto done;
5640 }
5641
5642 lm_prev = 0;
5643 while (read_one_ptr (lm_addr + lmo->l_name_offset,
5644 &l_name, ptr_size) == 0
5645 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5646 &l_addr, ptr_size) == 0
5647 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5648 &l_ld, ptr_size) == 0
5649 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5650 &l_prev, ptr_size) == 0
5651 && read_one_ptr (lm_addr + lmo->l_next_offset,
5652 &l_next, ptr_size) == 0)
5653 {
5654 unsigned char libname[PATH_MAX];
5655
5656 if (lm_prev != l_prev)
5657 {
5658 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5659 (long) lm_prev, (long) l_prev);
5660 break;
5661 }
5662
5663 /* Not checking for error because reading may stop before
5664 we've got PATH_MAX worth of characters. */
5665 libname[0] = '\0';
5666 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5667 libname[sizeof (libname) - 1] = '\0';
5668 if (libname[0] != '\0')
5669 {
5670 /* 6x the size for xml_escape_text below. */
5671 size_t len = 6 * strlen ((char *) libname);
5672 char *name;
5673
5674 if (!header_done)
5675 {
5676 /* Terminate `<library-list-svr4'. */
5677 *p++ = '>';
5678 header_done = 1;
5679 }
5680
5681 while (allocated < p - document + len + 200)
5682 {
5683 /* Expand to guarantee sufficient storage. */
5684 uintptr_t document_len = p - document;
5685
5686 document = xrealloc (document, 2 * allocated);
5687 allocated *= 2;
5688 p = document + document_len;
5689 }
5690
5691 name = xml_escape_text ((char *) libname);
5692 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5693 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5694 name, (unsigned long) lm_addr,
5695 (unsigned long) l_addr, (unsigned long) l_ld);
5696 free (name);
5697 }
5698 else if (lm_prev == 0)
5699 {
5700 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5701 p = p + strlen (p);
5702 }
5703
5704 if (l_next == 0)
5705 break;
5706
5707 lm_prev = lm_addr;
5708 lm_addr = l_next;
5709 }
5710 done:
5711 if (!header_done)
5712 {
5713 /* Empty list; terminate `<library-list-svr4'. */
5714 strcpy (p, "/>");
5715 }
5716 else
5717 strcpy (p, "</library-list-svr4>");
5718 }
5719
5720 document_len = strlen (document);
5721 if (offset < document_len)
5722 document_len -= offset;
5723 else
5724 document_len = 0;
5725 if (len > document_len)
5726 len = document_len;
5727
5728 memcpy (readbuf, document + offset, len);
5729 xfree (document);
5730
5731 return len;
5732 }
5733
5734 static struct target_ops linux_target_ops = {
5735 linux_create_inferior,
5736 linux_attach,
5737 linux_kill,
5738 linux_detach,
5739 linux_mourn,
5740 linux_join,
5741 linux_thread_alive,
5742 linux_resume,
5743 linux_wait,
5744 linux_fetch_registers,
5745 linux_store_registers,
5746 linux_prepare_to_access_memory,
5747 linux_done_accessing_memory,
5748 linux_read_memory,
5749 linux_write_memory,
5750 linux_look_up_symbols,
5751 linux_request_interrupt,
5752 linux_read_auxv,
5753 linux_insert_point,
5754 linux_remove_point,
5755 linux_stopped_by_watchpoint,
5756 linux_stopped_data_address,
5757 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
5758 linux_read_offsets,
5759 #else
5760 NULL,
5761 #endif
5762 #ifdef USE_THREAD_DB
5763 thread_db_get_tls_address,
5764 #else
5765 NULL,
5766 #endif
5767 linux_qxfer_spu,
5768 hostio_last_error_from_errno,
5769 linux_qxfer_osdata,
5770 linux_xfer_siginfo,
5771 linux_supports_non_stop,
5772 linux_async,
5773 linux_start_non_stop,
5774 linux_supports_multi_process,
5775 #ifdef USE_THREAD_DB
5776 thread_db_handle_monitor_command,
5777 #else
5778 NULL,
5779 #endif
5780 linux_common_core_of_thread,
5781 linux_read_loadmap,
5782 linux_process_qsupported,
5783 linux_supports_tracepoints,
5784 linux_read_pc,
5785 linux_write_pc,
5786 linux_thread_stopped,
5787 NULL,
5788 linux_pause_all,
5789 linux_unpause_all,
5790 linux_cancel_breakpoints,
5791 linux_stabilize_threads,
5792 linux_install_fast_tracepoint_jump_pad,
5793 linux_emit_ops,
5794 linux_supports_disable_randomization,
5795 linux_get_min_fast_tracepoint_insn_len,
5796 linux_qxfer_libraries_svr4,
5797 linux_supports_agent,
5798 };
5799
5800 static void
5801 linux_init_signals ()
5802 {
5803 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5804 to find what the cancel signal actually is. */
5805 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
5806 signal (__SIGRTMIN+1, SIG_IGN);
5807 #endif
5808 }
5809
5810 void
5811 initialize_low (void)
5812 {
5813 struct sigaction sigchld_action;
5814 memset (&sigchld_action, 0, sizeof (sigchld_action));
5815 set_target_ops (&linux_target_ops);
5816 set_breakpoint_data (the_low_target.breakpoint,
5817 the_low_target.breakpoint_len);
5818 linux_init_signals ();
5819 linux_test_for_tracefork ();
5820 #ifdef HAVE_LINUX_REGSETS
5821 for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5822 ;
5823 disabled_regsets = xmalloc (num_regsets);
5824 #endif
5825
5826 sigchld_action.sa_handler = sigchld_handler;
5827 sigemptyset (&sigchld_action.sa_mask);
5828 sigchld_action.sa_flags = SA_RESTART;
5829 sigaction (SIGCHLD, &sigchld_action, NULL);
5830 }
This page took 0.218468 seconds and 4 git commands to generate.