Move safe_strerror to common/
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #include <sys/ptrace.h>
28 #include "nat/linux-ptrace.h"
29 #include "nat/linux-procfs.h"
30 #include <signal.h>
31 #include <sys/ioctl.h>
32 #include <fcntl.h>
33 #include <unistd.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <ctype.h>
37 #include <pwd.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40 #include <sys/stat.h>
41 #include <sys/vfs.h>
42 #include <sys/uio.h>
43 #include "filestuff.h"
44 #include "tracepoint.h"
45 #include "hostio.h"
46 #ifndef ELFMAG0
47 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
48 then ELFMAG0 will have been defined. If it didn't get included by
49 gdb_proc_service.h then including it will likely introduce a duplicate
50 definition of elf_fpregset_t. */
51 #include <elf.h>
52 #endif
53
54 #ifndef SPUFS_MAGIC
55 #define SPUFS_MAGIC 0x23c9b64e
56 #endif
57
58 #ifdef HAVE_PERSONALITY
59 # include <sys/personality.h>
60 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
61 # define ADDR_NO_RANDOMIZE 0x0040000
62 # endif
63 #endif
64
65 #ifndef O_LARGEFILE
66 #define O_LARGEFILE 0
67 #endif
68
69 #ifndef W_STOPCODE
70 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
71 #endif
72
73 /* This is the kernel's hard limit. Not to be confused with
74 SIGRTMIN. */
75 #ifndef __SIGRTMIN
76 #define __SIGRTMIN 32
77 #endif
78
79 /* Some targets did not define these ptrace constants from the start,
80 so gdbserver defines them locally here. In the future, these may
81 be removed after they are added to asm/ptrace.h. */
82 #if !(defined(PT_TEXT_ADDR) \
83 || defined(PT_DATA_ADDR) \
84 || defined(PT_TEXT_END_ADDR))
85 #if defined(__mcoldfire__)
86 /* These are still undefined in 3.10 kernels. */
87 #define PT_TEXT_ADDR 49*4
88 #define PT_DATA_ADDR 50*4
89 #define PT_TEXT_END_ADDR 51*4
90 /* BFIN already defines these since at least 2.6.32 kernels. */
91 #elif defined(BFIN)
92 #define PT_TEXT_ADDR 220
93 #define PT_TEXT_END_ADDR 224
94 #define PT_DATA_ADDR 228
95 /* These are still undefined in 3.10 kernels. */
96 #elif defined(__TMS320C6X__)
97 #define PT_TEXT_ADDR (0x10000*4)
98 #define PT_DATA_ADDR (0x10004*4)
99 #define PT_TEXT_END_ADDR (0x10008*4)
100 #endif
101 #endif
102
103 #ifdef HAVE_LINUX_BTRACE
104 # include "nat/linux-btrace.h"
105 #endif
106
107 #ifndef HAVE_ELF32_AUXV_T
108 /* Copied from glibc's elf.h. */
109 typedef struct
110 {
111 uint32_t a_type; /* Entry type */
112 union
113 {
114 uint32_t a_val; /* Integer value */
115 /* We use to have pointer elements added here. We cannot do that,
116 though, since it does not work when using 32-bit definitions
117 on 64-bit platforms and vice versa. */
118 } a_un;
119 } Elf32_auxv_t;
120 #endif
121
122 #ifndef HAVE_ELF64_AUXV_T
123 /* Copied from glibc's elf.h. */
124 typedef struct
125 {
126 uint64_t a_type; /* Entry type */
127 union
128 {
129 uint64_t a_val; /* Integer value */
130 /* We use to have pointer elements added here. We cannot do that,
131 though, since it does not work when using 32-bit definitions
132 on 64-bit platforms and vice versa. */
133 } a_un;
134 } Elf64_auxv_t;
135 #endif
136
137 /* A list of all unknown processes which receive stop signals. Some
138 other process will presumably claim each of these as forked
139 children momentarily. */
140
141 struct simple_pid_list
142 {
143 /* The process ID. */
144 int pid;
145
146 /* The status as reported by waitpid. */
147 int status;
148
149 /* Next in chain. */
150 struct simple_pid_list *next;
151 };
152 struct simple_pid_list *stopped_pids;
153
154 /* Trivial list manipulation functions to keep track of a list of new
155 stopped processes. */
156
157 static void
158 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
159 {
160 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
161
162 new_pid->pid = pid;
163 new_pid->status = status;
164 new_pid->next = *listp;
165 *listp = new_pid;
166 }
167
168 static int
169 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
170 {
171 struct simple_pid_list **p;
172
173 for (p = listp; *p != NULL; p = &(*p)->next)
174 if ((*p)->pid == pid)
175 {
176 struct simple_pid_list *next = (*p)->next;
177
178 *statusp = (*p)->status;
179 xfree (*p);
180 *p = next;
181 return 1;
182 }
183 return 0;
184 }
185
186 enum stopping_threads_kind
187 {
188 /* Not stopping threads presently. */
189 NOT_STOPPING_THREADS,
190
191 /* Stopping threads. */
192 STOPPING_THREADS,
193
194 /* Stopping and suspending threads. */
195 STOPPING_AND_SUSPENDING_THREADS
196 };
197
198 /* This is set while stop_all_lwps is in effect. */
199 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
200
201 /* FIXME make into a target method? */
202 int using_threads = 1;
203
204 /* True if we're presently stabilizing threads (moving them out of
205 jump pads). */
206 static int stabilizing_threads;
207
208 static void linux_resume_one_lwp (struct lwp_info *lwp,
209 int step, int signal, siginfo_t *info);
210 static void linux_resume (struct thread_resume *resume_info, size_t n);
211 static void stop_all_lwps (int suspend, struct lwp_info *except);
212 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
213 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
214 int *wstat, int options);
215 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
216 static struct lwp_info *add_lwp (ptid_t ptid);
217 static int linux_stopped_by_watchpoint (void);
218 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
219 static void proceed_all_lwps (void);
220 static int finish_step_over (struct lwp_info *lwp);
221 static int kill_lwp (unsigned long lwpid, int signo);
222
223 /* When the event-loop is doing a step-over, this points at the thread
224 being stepped. */
225 ptid_t step_over_bkpt;
226
227 /* True if the low target can hardware single-step. Such targets
228 don't need a BREAKPOINT_REINSERT_ADDR callback. */
229
230 static int
231 can_hardware_single_step (void)
232 {
233 return (the_low_target.breakpoint_reinsert_addr == NULL);
234 }
235
236 /* True if the low target supports memory breakpoints. If so, we'll
237 have a GET_PC implementation. */
238
239 static int
240 supports_breakpoints (void)
241 {
242 return (the_low_target.get_pc != NULL);
243 }
244
245 /* Returns true if this target can support fast tracepoints. This
246 does not mean that the in-process agent has been loaded in the
247 inferior. */
248
249 static int
250 supports_fast_tracepoints (void)
251 {
252 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
253 }
254
255 /* True if LWP is stopped in its stepping range. */
256
257 static int
258 lwp_in_step_range (struct lwp_info *lwp)
259 {
260 CORE_ADDR pc = lwp->stop_pc;
261
262 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
263 }
264
265 struct pending_signals
266 {
267 int signal;
268 siginfo_t info;
269 struct pending_signals *prev;
270 };
271
272 /* The read/write ends of the pipe registered as waitable file in the
273 event loop. */
274 static int linux_event_pipe[2] = { -1, -1 };
275
276 /* True if we're currently in async mode. */
277 #define target_is_async_p() (linux_event_pipe[0] != -1)
278
279 static void send_sigstop (struct lwp_info *lwp);
280 static void wait_for_sigstop (void);
281
282 /* Return non-zero if HEADER is a 64-bit ELF file. */
283
284 static int
285 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
286 {
287 if (header->e_ident[EI_MAG0] == ELFMAG0
288 && header->e_ident[EI_MAG1] == ELFMAG1
289 && header->e_ident[EI_MAG2] == ELFMAG2
290 && header->e_ident[EI_MAG3] == ELFMAG3)
291 {
292 *machine = header->e_machine;
293 return header->e_ident[EI_CLASS] == ELFCLASS64;
294
295 }
296 *machine = EM_NONE;
297 return -1;
298 }
299
300 /* Return non-zero if FILE is a 64-bit ELF file,
301 zero if the file is not a 64-bit ELF file,
302 and -1 if the file is not accessible or doesn't exist. */
303
304 static int
305 elf_64_file_p (const char *file, unsigned int *machine)
306 {
307 Elf64_Ehdr header;
308 int fd;
309
310 fd = open (file, O_RDONLY);
311 if (fd < 0)
312 return -1;
313
314 if (read (fd, &header, sizeof (header)) != sizeof (header))
315 {
316 close (fd);
317 return 0;
318 }
319 close (fd);
320
321 return elf_64_header_p (&header, machine);
322 }
323
324 /* Accepts an integer PID; Returns true if the executable PID is
325 running is a 64-bit ELF file.. */
326
327 int
328 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
329 {
330 char file[PATH_MAX];
331
332 sprintf (file, "/proc/%d/exe", pid);
333 return elf_64_file_p (file, machine);
334 }
335
336 static void
337 delete_lwp (struct lwp_info *lwp)
338 {
339 struct thread_info *thr = get_lwp_thread (lwp);
340
341 if (debug_threads)
342 debug_printf ("deleting %ld\n", lwpid_of (thr));
343
344 remove_thread (thr);
345 free (lwp->arch_private);
346 free (lwp);
347 }
348
349 /* Add a process to the common process list, and set its private
350 data. */
351
352 static struct process_info *
353 linux_add_process (int pid, int attached)
354 {
355 struct process_info *proc;
356
357 proc = add_process (pid, attached);
358 proc->private = xcalloc (1, sizeof (*proc->private));
359
360 /* Set the arch when the first LWP stops. */
361 proc->private->new_inferior = 1;
362
363 if (the_low_target.new_process != NULL)
364 proc->private->arch_private = the_low_target.new_process ();
365
366 return proc;
367 }
368
369 static CORE_ADDR get_pc (struct lwp_info *lwp);
370
371 /* Handle a GNU/Linux extended wait response. If we see a clone
372 event, we need to add the new LWP to our list (and not report the
373 trap to higher layers). */
374
375 static void
376 handle_extended_wait (struct lwp_info *event_child, int wstat)
377 {
378 int event = linux_ptrace_get_extended_event (wstat);
379 struct thread_info *event_thr = get_lwp_thread (event_child);
380 struct lwp_info *new_lwp;
381
382 if (event == PTRACE_EVENT_CLONE)
383 {
384 ptid_t ptid;
385 unsigned long new_pid;
386 int ret, status;
387
388 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
389 &new_pid);
390
391 /* If we haven't already seen the new PID stop, wait for it now. */
392 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
393 {
394 /* The new child has a pending SIGSTOP. We can't affect it until it
395 hits the SIGSTOP, but we're already attached. */
396
397 ret = my_waitpid (new_pid, &status, __WALL);
398
399 if (ret == -1)
400 perror_with_name ("waiting for new child");
401 else if (ret != new_pid)
402 warning ("wait returned unexpected PID %d", ret);
403 else if (!WIFSTOPPED (status))
404 warning ("wait returned unexpected status 0x%x", status);
405 }
406
407 if (debug_threads)
408 debug_printf ("HEW: Got clone event "
409 "from LWP %ld, new child is LWP %ld\n",
410 lwpid_of (event_thr), new_pid);
411
412 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
413 new_lwp = add_lwp (ptid);
414
415 /* Either we're going to immediately resume the new thread
416 or leave it stopped. linux_resume_one_lwp is a nop if it
417 thinks the thread is currently running, so set this first
418 before calling linux_resume_one_lwp. */
419 new_lwp->stopped = 1;
420
421 /* If we're suspending all threads, leave this one suspended
422 too. */
423 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
424 new_lwp->suspended = 1;
425
426 /* Normally we will get the pending SIGSTOP. But in some cases
427 we might get another signal delivered to the group first.
428 If we do get another signal, be sure not to lose it. */
429 if (WSTOPSIG (status) == SIGSTOP)
430 {
431 if (stopping_threads == NOT_STOPPING_THREADS)
432 linux_resume_one_lwp (new_lwp, 0, 0, NULL);
433 }
434 else
435 {
436 new_lwp->stop_expected = 1;
437
438 if (stopping_threads != NOT_STOPPING_THREADS)
439 {
440 new_lwp->status_pending_p = 1;
441 new_lwp->status_pending = status;
442 }
443 else
444 /* Pass the signal on. This is what GDB does - except
445 shouldn't we really report it instead? */
446 linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
447 }
448
449 /* Always resume the current thread. If we are stopping
450 threads, it will have a pending SIGSTOP; we may as well
451 collect it now. */
452 linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
453 }
454 }
455
456 /* Return the PC as read from the regcache of LWP, without any
457 adjustment. */
458
459 static CORE_ADDR
460 get_pc (struct lwp_info *lwp)
461 {
462 struct thread_info *saved_thread;
463 struct regcache *regcache;
464 CORE_ADDR pc;
465
466 if (the_low_target.get_pc == NULL)
467 return 0;
468
469 saved_thread = current_thread;
470 current_thread = get_lwp_thread (lwp);
471
472 regcache = get_thread_regcache (current_thread, 1);
473 pc = (*the_low_target.get_pc) (regcache);
474
475 if (debug_threads)
476 debug_printf ("pc is 0x%lx\n", (long) pc);
477
478 current_thread = saved_thread;
479 return pc;
480 }
481
482 /* This function should only be called if LWP got a SIGTRAP.
483 The SIGTRAP could mean several things.
484
485 On i386, where decr_pc_after_break is non-zero:
486
487 If we were single-stepping this process using PTRACE_SINGLESTEP, we
488 will get only the one SIGTRAP. The value of $eip will be the next
489 instruction. If the instruction we stepped over was a breakpoint,
490 we need to decrement the PC.
491
492 If we continue the process using PTRACE_CONT, we will get a
493 SIGTRAP when we hit a breakpoint. The value of $eip will be
494 the instruction after the breakpoint (i.e. needs to be
495 decremented). If we report the SIGTRAP to GDB, we must also
496 report the undecremented PC. If the breakpoint is removed, we
497 must resume at the decremented PC.
498
499 On a non-decr_pc_after_break machine with hardware or kernel
500 single-step:
501
502 If we either single-step a breakpoint instruction, or continue and
503 hit a breakpoint instruction, our PC will point at the breakpoint
504 instruction. */
505
506 static int
507 check_stopped_by_breakpoint (struct lwp_info *lwp)
508 {
509 CORE_ADDR pc;
510 CORE_ADDR sw_breakpoint_pc;
511 struct thread_info *saved_thread;
512
513 if (the_low_target.get_pc == NULL)
514 return 0;
515
516 pc = get_pc (lwp);
517 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
518
519 /* breakpoint_at reads from the current thread. */
520 saved_thread = current_thread;
521 current_thread = get_lwp_thread (lwp);
522
523 /* We may have just stepped a breakpoint instruction. E.g., in
524 non-stop mode, GDB first tells the thread A to step a range, and
525 then the user inserts a breakpoint inside the range. In that
526 case, we need to report the breakpoint PC. But, when we're
527 trying to step past one of our own breakpoints, that happens to
528 have been placed on top of a permanent breakpoint instruction, we
529 shouldn't adjust the PC, otherwise the program would keep
530 trapping the permanent breakpoint forever. */
531 if ((!lwp->stepping
532 || (!ptid_equal (ptid_of (current_thread), step_over_bkpt)
533 && lwp->stop_pc == sw_breakpoint_pc))
534 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
535 {
536 if (debug_threads)
537 {
538 struct thread_info *thr = get_lwp_thread (lwp);
539
540 debug_printf ("CSBB: %s stopped by software breakpoint\n",
541 target_pid_to_str (ptid_of (thr)));
542 }
543
544 /* Back up the PC if necessary. */
545 if (pc != sw_breakpoint_pc)
546 {
547 struct regcache *regcache
548 = get_thread_regcache (current_thread, 1);
549 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
550 }
551
552 lwp->stop_pc = sw_breakpoint_pc;
553 lwp->stop_reason = LWP_STOPPED_BY_SW_BREAKPOINT;
554 current_thread = saved_thread;
555 return 1;
556 }
557
558 if (hardware_breakpoint_inserted_here (pc))
559 {
560 if (debug_threads)
561 {
562 struct thread_info *thr = get_lwp_thread (lwp);
563
564 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
565 target_pid_to_str (ptid_of (thr)));
566 }
567
568 lwp->stop_pc = pc;
569 lwp->stop_reason = LWP_STOPPED_BY_HW_BREAKPOINT;
570 current_thread = saved_thread;
571 return 1;
572 }
573
574 current_thread = saved_thread;
575 return 0;
576 }
577
578 static struct lwp_info *
579 add_lwp (ptid_t ptid)
580 {
581 struct lwp_info *lwp;
582
583 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
584 memset (lwp, 0, sizeof (*lwp));
585
586 if (the_low_target.new_thread != NULL)
587 lwp->arch_private = the_low_target.new_thread ();
588
589 lwp->thread = add_thread (ptid, lwp);
590
591 return lwp;
592 }
593
594 /* Start an inferior process and returns its pid.
595 ALLARGS is a vector of program-name and args. */
596
597 static int
598 linux_create_inferior (char *program, char **allargs)
599 {
600 #ifdef HAVE_PERSONALITY
601 int personality_orig = 0, personality_set = 0;
602 #endif
603 struct lwp_info *new_lwp;
604 int pid;
605 ptid_t ptid;
606
607 #ifdef HAVE_PERSONALITY
608 if (disable_randomization)
609 {
610 errno = 0;
611 personality_orig = personality (0xffffffff);
612 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
613 {
614 personality_set = 1;
615 personality (personality_orig | ADDR_NO_RANDOMIZE);
616 }
617 if (errno != 0 || (personality_set
618 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
619 warning ("Error disabling address space randomization: %s",
620 strerror (errno));
621 }
622 #endif
623
624 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
625 pid = vfork ();
626 #else
627 pid = fork ();
628 #endif
629 if (pid < 0)
630 perror_with_name ("fork");
631
632 if (pid == 0)
633 {
634 close_most_fds ();
635 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
636
637 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
638 signal (__SIGRTMIN + 1, SIG_DFL);
639 #endif
640
641 setpgid (0, 0);
642
643 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
644 stdout to stderr so that inferior i/o doesn't corrupt the connection.
645 Also, redirect stdin to /dev/null. */
646 if (remote_connection_is_stdio ())
647 {
648 close (0);
649 open ("/dev/null", O_RDONLY);
650 dup2 (2, 1);
651 if (write (2, "stdin/stdout redirected\n",
652 sizeof ("stdin/stdout redirected\n") - 1) < 0)
653 {
654 /* Errors ignored. */;
655 }
656 }
657
658 execv (program, allargs);
659 if (errno == ENOENT)
660 execvp (program, allargs);
661
662 fprintf (stderr, "Cannot exec %s: %s.\n", program,
663 strerror (errno));
664 fflush (stderr);
665 _exit (0177);
666 }
667
668 #ifdef HAVE_PERSONALITY
669 if (personality_set)
670 {
671 errno = 0;
672 personality (personality_orig);
673 if (errno != 0)
674 warning ("Error restoring address space randomization: %s",
675 strerror (errno));
676 }
677 #endif
678
679 linux_add_process (pid, 0);
680
681 ptid = ptid_build (pid, pid, 0);
682 new_lwp = add_lwp (ptid);
683 new_lwp->must_set_ptrace_flags = 1;
684
685 return pid;
686 }
687
688 /* Attach to an inferior process. Returns 0 on success, ERRNO on
689 error. */
690
691 int
692 linux_attach_lwp (ptid_t ptid)
693 {
694 struct lwp_info *new_lwp;
695 int lwpid = ptid_get_lwp (ptid);
696
697 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
698 != 0)
699 return errno;
700
701 new_lwp = add_lwp (ptid);
702
703 /* We need to wait for SIGSTOP before being able to make the next
704 ptrace call on this LWP. */
705 new_lwp->must_set_ptrace_flags = 1;
706
707 if (linux_proc_pid_is_stopped (lwpid))
708 {
709 if (debug_threads)
710 debug_printf ("Attached to a stopped process\n");
711
712 /* The process is definitely stopped. It is in a job control
713 stop, unless the kernel predates the TASK_STOPPED /
714 TASK_TRACED distinction, in which case it might be in a
715 ptrace stop. Make sure it is in a ptrace stop; from there we
716 can kill it, signal it, et cetera.
717
718 First make sure there is a pending SIGSTOP. Since we are
719 already attached, the process can not transition from stopped
720 to running without a PTRACE_CONT; so we know this signal will
721 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
722 probably already in the queue (unless this kernel is old
723 enough to use TASK_STOPPED for ptrace stops); but since
724 SIGSTOP is not an RT signal, it can only be queued once. */
725 kill_lwp (lwpid, SIGSTOP);
726
727 /* Finally, resume the stopped process. This will deliver the
728 SIGSTOP (or a higher priority signal, just like normal
729 PTRACE_ATTACH), which we'll catch later on. */
730 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
731 }
732
733 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
734 brings it to a halt.
735
736 There are several cases to consider here:
737
738 1) gdbserver has already attached to the process and is being notified
739 of a new thread that is being created.
740 In this case we should ignore that SIGSTOP and resume the
741 process. This is handled below by setting stop_expected = 1,
742 and the fact that add_thread sets last_resume_kind ==
743 resume_continue.
744
745 2) This is the first thread (the process thread), and we're attaching
746 to it via attach_inferior.
747 In this case we want the process thread to stop.
748 This is handled by having linux_attach set last_resume_kind ==
749 resume_stop after we return.
750
751 If the pid we are attaching to is also the tgid, we attach to and
752 stop all the existing threads. Otherwise, we attach to pid and
753 ignore any other threads in the same group as this pid.
754
755 3) GDB is connecting to gdbserver and is requesting an enumeration of all
756 existing threads.
757 In this case we want the thread to stop.
758 FIXME: This case is currently not properly handled.
759 We should wait for the SIGSTOP but don't. Things work apparently
760 because enough time passes between when we ptrace (ATTACH) and when
761 gdb makes the next ptrace call on the thread.
762
763 On the other hand, if we are currently trying to stop all threads, we
764 should treat the new thread as if we had sent it a SIGSTOP. This works
765 because we are guaranteed that the add_lwp call above added us to the
766 end of the list, and so the new thread has not yet reached
767 wait_for_sigstop (but will). */
768 new_lwp->stop_expected = 1;
769
770 return 0;
771 }
772
773 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
774 already attached. Returns true if a new LWP is found, false
775 otherwise. */
776
777 static int
778 attach_proc_task_lwp_callback (ptid_t ptid)
779 {
780 /* Is this a new thread? */
781 if (find_thread_ptid (ptid) == NULL)
782 {
783 int lwpid = ptid_get_lwp (ptid);
784 int err;
785
786 if (debug_threads)
787 debug_printf ("Found new lwp %d\n", lwpid);
788
789 err = linux_attach_lwp (ptid);
790
791 /* Be quiet if we simply raced with the thread exiting. EPERM
792 is returned if the thread's task still exists, and is marked
793 as exited or zombie, as well as other conditions, so in that
794 case, confirm the status in /proc/PID/status. */
795 if (err == ESRCH
796 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
797 {
798 if (debug_threads)
799 {
800 debug_printf ("Cannot attach to lwp %d: "
801 "thread is gone (%d: %s)\n",
802 lwpid, err, strerror (err));
803 }
804 }
805 else if (err != 0)
806 {
807 warning (_("Cannot attach to lwp %d: %s"),
808 lwpid,
809 linux_ptrace_attach_fail_reason_string (ptid, err));
810 }
811
812 return 1;
813 }
814 return 0;
815 }
816
817 /* Attach to PID. If PID is the tgid, attach to it and all
818 of its threads. */
819
820 static int
821 linux_attach (unsigned long pid)
822 {
823 ptid_t ptid = ptid_build (pid, pid, 0);
824 int err;
825
826 /* Attach to PID. We will check for other threads
827 soon. */
828 err = linux_attach_lwp (ptid);
829 if (err != 0)
830 error ("Cannot attach to process %ld: %s",
831 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
832
833 linux_add_process (pid, 1);
834
835 if (!non_stop)
836 {
837 struct thread_info *thread;
838
839 /* Don't ignore the initial SIGSTOP if we just attached to this
840 process. It will be collected by wait shortly. */
841 thread = find_thread_ptid (ptid_build (pid, pid, 0));
842 thread->last_resume_kind = resume_stop;
843 }
844
845 /* We must attach to every LWP. If /proc is mounted, use that to
846 find them now. On the one hand, the inferior may be using raw
847 clone instead of using pthreads. On the other hand, even if it
848 is using pthreads, GDB may not be connected yet (thread_db needs
849 to do symbol lookups, through qSymbol). Also, thread_db walks
850 structures in the inferior's address space to find the list of
851 threads/LWPs, and those structures may well be corrupted. Note
852 that once thread_db is loaded, we'll still use it to list threads
853 and associate pthread info with each LWP. */
854 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
855 return 0;
856 }
857
858 struct counter
859 {
860 int pid;
861 int count;
862 };
863
864 static int
865 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
866 {
867 struct counter *counter = args;
868
869 if (ptid_get_pid (entry->id) == counter->pid)
870 {
871 if (++counter->count > 1)
872 return 1;
873 }
874
875 return 0;
876 }
877
878 static int
879 last_thread_of_process_p (int pid)
880 {
881 struct counter counter = { pid , 0 };
882
883 return (find_inferior (&all_threads,
884 second_thread_of_pid_p, &counter) == NULL);
885 }
886
887 /* Kill LWP. */
888
889 static void
890 linux_kill_one_lwp (struct lwp_info *lwp)
891 {
892 struct thread_info *thr = get_lwp_thread (lwp);
893 int pid = lwpid_of (thr);
894
895 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
896 there is no signal context, and ptrace(PTRACE_KILL) (or
897 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
898 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
899 alternative is to kill with SIGKILL. We only need one SIGKILL
900 per process, not one for each thread. But since we still support
901 linuxthreads, and we also support debugging programs using raw
902 clone without CLONE_THREAD, we send one for each thread. For
903 years, we used PTRACE_KILL only, so we're being a bit paranoid
904 about some old kernels where PTRACE_KILL might work better
905 (dubious if there are any such, but that's why it's paranoia), so
906 we try SIGKILL first, PTRACE_KILL second, and so we're fine
907 everywhere. */
908
909 errno = 0;
910 kill_lwp (pid, SIGKILL);
911 if (debug_threads)
912 {
913 int save_errno = errno;
914
915 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
916 target_pid_to_str (ptid_of (thr)),
917 save_errno ? strerror (save_errno) : "OK");
918 }
919
920 errno = 0;
921 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
922 if (debug_threads)
923 {
924 int save_errno = errno;
925
926 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
927 target_pid_to_str (ptid_of (thr)),
928 save_errno ? strerror (save_errno) : "OK");
929 }
930 }
931
932 /* Kill LWP and wait for it to die. */
933
934 static void
935 kill_wait_lwp (struct lwp_info *lwp)
936 {
937 struct thread_info *thr = get_lwp_thread (lwp);
938 int pid = ptid_get_pid (ptid_of (thr));
939 int lwpid = ptid_get_lwp (ptid_of (thr));
940 int wstat;
941 int res;
942
943 if (debug_threads)
944 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
945
946 do
947 {
948 linux_kill_one_lwp (lwp);
949
950 /* Make sure it died. Notes:
951
952 - The loop is most likely unnecessary.
953
954 - We don't use linux_wait_for_event as that could delete lwps
955 while we're iterating over them. We're not interested in
956 any pending status at this point, only in making sure all
957 wait status on the kernel side are collected until the
958 process is reaped.
959
960 - We don't use __WALL here as the __WALL emulation relies on
961 SIGCHLD, and killing a stopped process doesn't generate
962 one, nor an exit status.
963 */
964 res = my_waitpid (lwpid, &wstat, 0);
965 if (res == -1 && errno == ECHILD)
966 res = my_waitpid (lwpid, &wstat, __WCLONE);
967 } while (res > 0 && WIFSTOPPED (wstat));
968
969 gdb_assert (res > 0);
970 }
971
972 /* Callback for `find_inferior'. Kills an lwp of a given process,
973 except the leader. */
974
975 static int
976 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
977 {
978 struct thread_info *thread = (struct thread_info *) entry;
979 struct lwp_info *lwp = get_thread_lwp (thread);
980 int pid = * (int *) args;
981
982 if (ptid_get_pid (entry->id) != pid)
983 return 0;
984
985 /* We avoid killing the first thread here, because of a Linux kernel (at
986 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
987 the children get a chance to be reaped, it will remain a zombie
988 forever. */
989
990 if (lwpid_of (thread) == pid)
991 {
992 if (debug_threads)
993 debug_printf ("lkop: is last of process %s\n",
994 target_pid_to_str (entry->id));
995 return 0;
996 }
997
998 kill_wait_lwp (lwp);
999 return 0;
1000 }
1001
1002 static int
1003 linux_kill (int pid)
1004 {
1005 struct process_info *process;
1006 struct lwp_info *lwp;
1007
1008 process = find_process_pid (pid);
1009 if (process == NULL)
1010 return -1;
1011
1012 /* If we're killing a running inferior, make sure it is stopped
1013 first, as PTRACE_KILL will not work otherwise. */
1014 stop_all_lwps (0, NULL);
1015
1016 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1017
1018 /* See the comment in linux_kill_one_lwp. We did not kill the first
1019 thread in the list, so do so now. */
1020 lwp = find_lwp_pid (pid_to_ptid (pid));
1021
1022 if (lwp == NULL)
1023 {
1024 if (debug_threads)
1025 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1026 pid);
1027 }
1028 else
1029 kill_wait_lwp (lwp);
1030
1031 the_target->mourn (process);
1032
1033 /* Since we presently can only stop all lwps of all processes, we
1034 need to unstop lwps of other processes. */
1035 unstop_all_lwps (0, NULL);
1036 return 0;
1037 }
1038
1039 /* Get pending signal of THREAD, for detaching purposes. This is the
1040 signal the thread last stopped for, which we need to deliver to the
1041 thread when detaching, otherwise, it'd be suppressed/lost. */
1042
1043 static int
1044 get_detach_signal (struct thread_info *thread)
1045 {
1046 enum gdb_signal signo = GDB_SIGNAL_0;
1047 int status;
1048 struct lwp_info *lp = get_thread_lwp (thread);
1049
1050 if (lp->status_pending_p)
1051 status = lp->status_pending;
1052 else
1053 {
1054 /* If the thread had been suspended by gdbserver, and it stopped
1055 cleanly, then it'll have stopped with SIGSTOP. But we don't
1056 want to deliver that SIGSTOP. */
1057 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1058 || thread->last_status.value.sig == GDB_SIGNAL_0)
1059 return 0;
1060
1061 /* Otherwise, we may need to deliver the signal we
1062 intercepted. */
1063 status = lp->last_status;
1064 }
1065
1066 if (!WIFSTOPPED (status))
1067 {
1068 if (debug_threads)
1069 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1070 target_pid_to_str (ptid_of (thread)));
1071 return 0;
1072 }
1073
1074 /* Extended wait statuses aren't real SIGTRAPs. */
1075 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1076 {
1077 if (debug_threads)
1078 debug_printf ("GPS: lwp %s had stopped with extended "
1079 "status: no pending signal\n",
1080 target_pid_to_str (ptid_of (thread)));
1081 return 0;
1082 }
1083
1084 signo = gdb_signal_from_host (WSTOPSIG (status));
1085
1086 if (program_signals_p && !program_signals[signo])
1087 {
1088 if (debug_threads)
1089 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1090 target_pid_to_str (ptid_of (thread)),
1091 gdb_signal_to_string (signo));
1092 return 0;
1093 }
1094 else if (!program_signals_p
1095 /* If we have no way to know which signals GDB does not
1096 want to have passed to the program, assume
1097 SIGTRAP/SIGINT, which is GDB's default. */
1098 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1099 {
1100 if (debug_threads)
1101 debug_printf ("GPS: lwp %s had signal %s, "
1102 "but we don't know if we should pass it. "
1103 "Default to not.\n",
1104 target_pid_to_str (ptid_of (thread)),
1105 gdb_signal_to_string (signo));
1106 return 0;
1107 }
1108 else
1109 {
1110 if (debug_threads)
1111 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1112 target_pid_to_str (ptid_of (thread)),
1113 gdb_signal_to_string (signo));
1114
1115 return WSTOPSIG (status);
1116 }
1117 }
1118
1119 static int
1120 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1121 {
1122 struct thread_info *thread = (struct thread_info *) entry;
1123 struct lwp_info *lwp = get_thread_lwp (thread);
1124 int pid = * (int *) args;
1125 int sig;
1126
1127 if (ptid_get_pid (entry->id) != pid)
1128 return 0;
1129
1130 /* If there is a pending SIGSTOP, get rid of it. */
1131 if (lwp->stop_expected)
1132 {
1133 if (debug_threads)
1134 debug_printf ("Sending SIGCONT to %s\n",
1135 target_pid_to_str (ptid_of (thread)));
1136
1137 kill_lwp (lwpid_of (thread), SIGCONT);
1138 lwp->stop_expected = 0;
1139 }
1140
1141 /* Flush any pending changes to the process's registers. */
1142 regcache_invalidate_thread (thread);
1143
1144 /* Pass on any pending signal for this thread. */
1145 sig = get_detach_signal (thread);
1146
1147 /* Finally, let it resume. */
1148 if (the_low_target.prepare_to_resume != NULL)
1149 the_low_target.prepare_to_resume (lwp);
1150 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1151 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1152 error (_("Can't detach %s: %s"),
1153 target_pid_to_str (ptid_of (thread)),
1154 strerror (errno));
1155
1156 delete_lwp (lwp);
1157 return 0;
1158 }
1159
1160 static int
1161 linux_detach (int pid)
1162 {
1163 struct process_info *process;
1164
1165 process = find_process_pid (pid);
1166 if (process == NULL)
1167 return -1;
1168
1169 /* Stop all threads before detaching. First, ptrace requires that
1170 the thread is stopped to sucessfully detach. Second, thread_db
1171 may need to uninstall thread event breakpoints from memory, which
1172 only works with a stopped process anyway. */
1173 stop_all_lwps (0, NULL);
1174
1175 #ifdef USE_THREAD_DB
1176 thread_db_detach (process);
1177 #endif
1178
1179 /* Stabilize threads (move out of jump pads). */
1180 stabilize_threads ();
1181
1182 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1183
1184 the_target->mourn (process);
1185
1186 /* Since we presently can only stop all lwps of all processes, we
1187 need to unstop lwps of other processes. */
1188 unstop_all_lwps (0, NULL);
1189 return 0;
1190 }
1191
1192 /* Remove all LWPs that belong to process PROC from the lwp list. */
1193
1194 static int
1195 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1196 {
1197 struct thread_info *thread = (struct thread_info *) entry;
1198 struct lwp_info *lwp = get_thread_lwp (thread);
1199 struct process_info *process = proc;
1200
1201 if (pid_of (thread) == pid_of (process))
1202 delete_lwp (lwp);
1203
1204 return 0;
1205 }
1206
1207 static void
1208 linux_mourn (struct process_info *process)
1209 {
1210 struct process_info_private *priv;
1211
1212 #ifdef USE_THREAD_DB
1213 thread_db_mourn (process);
1214 #endif
1215
1216 find_inferior (&all_threads, delete_lwp_callback, process);
1217
1218 /* Freeing all private data. */
1219 priv = process->private;
1220 free (priv->arch_private);
1221 free (priv);
1222 process->private = NULL;
1223
1224 remove_process (process);
1225 }
1226
1227 static void
1228 linux_join (int pid)
1229 {
1230 int status, ret;
1231
1232 do {
1233 ret = my_waitpid (pid, &status, 0);
1234 if (WIFEXITED (status) || WIFSIGNALED (status))
1235 break;
1236 } while (ret != -1 || errno != ECHILD);
1237 }
1238
1239 /* Return nonzero if the given thread is still alive. */
1240 static int
1241 linux_thread_alive (ptid_t ptid)
1242 {
1243 struct lwp_info *lwp = find_lwp_pid (ptid);
1244
1245 /* We assume we always know if a thread exits. If a whole process
1246 exited but we still haven't been able to report it to GDB, we'll
1247 hold on to the last lwp of the dead process. */
1248 if (lwp != NULL)
1249 return !lwp->dead;
1250 else
1251 return 0;
1252 }
1253
1254 /* Return 1 if this lwp still has an interesting status pending. If
1255 not (e.g., it had stopped for a breakpoint that is gone), return
1256 false. */
1257
1258 static int
1259 thread_still_has_status_pending_p (struct thread_info *thread)
1260 {
1261 struct lwp_info *lp = get_thread_lwp (thread);
1262
1263 if (!lp->status_pending_p)
1264 return 0;
1265
1266 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1267 report any status pending the LWP may have. */
1268 if (thread->last_resume_kind == resume_stop
1269 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1270 return 0;
1271
1272 if (thread->last_resume_kind != resume_stop
1273 && (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT
1274 || lp->stop_reason == LWP_STOPPED_BY_HW_BREAKPOINT))
1275 {
1276 struct thread_info *saved_thread;
1277 CORE_ADDR pc;
1278 int discard = 0;
1279
1280 gdb_assert (lp->last_status != 0);
1281
1282 pc = get_pc (lp);
1283
1284 saved_thread = current_thread;
1285 current_thread = thread;
1286
1287 if (pc != lp->stop_pc)
1288 {
1289 if (debug_threads)
1290 debug_printf ("PC of %ld changed\n",
1291 lwpid_of (thread));
1292 discard = 1;
1293 }
1294 else if (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT
1295 && !(*the_low_target.breakpoint_at) (pc))
1296 {
1297 if (debug_threads)
1298 debug_printf ("previous SW breakpoint of %ld gone\n",
1299 lwpid_of (thread));
1300 discard = 1;
1301 }
1302 else if (lp->stop_reason == LWP_STOPPED_BY_HW_BREAKPOINT
1303 && !hardware_breakpoint_inserted_here (pc))
1304 {
1305 if (debug_threads)
1306 debug_printf ("previous HW breakpoint of %ld gone\n",
1307 lwpid_of (thread));
1308 discard = 1;
1309 }
1310
1311 current_thread = saved_thread;
1312
1313 if (discard)
1314 {
1315 if (debug_threads)
1316 debug_printf ("discarding pending breakpoint status\n");
1317 lp->status_pending_p = 0;
1318 return 0;
1319 }
1320 }
1321
1322 return 1;
1323 }
1324
1325 /* Return 1 if this lwp has an interesting status pending. */
1326 static int
1327 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1328 {
1329 struct thread_info *thread = (struct thread_info *) entry;
1330 struct lwp_info *lp = get_thread_lwp (thread);
1331 ptid_t ptid = * (ptid_t *) arg;
1332
1333 /* Check if we're only interested in events from a specific process
1334 or its lwps. */
1335 if (!ptid_equal (minus_one_ptid, ptid)
1336 && ptid_get_pid (ptid) != ptid_get_pid (thread->entry.id))
1337 return 0;
1338
1339 if (lp->status_pending_p
1340 && !thread_still_has_status_pending_p (thread))
1341 {
1342 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1343 return 0;
1344 }
1345
1346 return lp->status_pending_p;
1347 }
1348
1349 static int
1350 same_lwp (struct inferior_list_entry *entry, void *data)
1351 {
1352 ptid_t ptid = *(ptid_t *) data;
1353 int lwp;
1354
1355 if (ptid_get_lwp (ptid) != 0)
1356 lwp = ptid_get_lwp (ptid);
1357 else
1358 lwp = ptid_get_pid (ptid);
1359
1360 if (ptid_get_lwp (entry->id) == lwp)
1361 return 1;
1362
1363 return 0;
1364 }
1365
1366 struct lwp_info *
1367 find_lwp_pid (ptid_t ptid)
1368 {
1369 struct inferior_list_entry *thread
1370 = find_inferior (&all_threads, same_lwp, &ptid);
1371
1372 if (thread == NULL)
1373 return NULL;
1374
1375 return get_thread_lwp ((struct thread_info *) thread);
1376 }
1377
1378 /* Return the number of known LWPs in the tgid given by PID. */
1379
1380 static int
1381 num_lwps (int pid)
1382 {
1383 struct inferior_list_entry *inf, *tmp;
1384 int count = 0;
1385
1386 ALL_INFERIORS (&all_threads, inf, tmp)
1387 {
1388 if (ptid_get_pid (inf->id) == pid)
1389 count++;
1390 }
1391
1392 return count;
1393 }
1394
1395 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1396 their exits until all other threads in the group have exited. */
1397
1398 static void
1399 check_zombie_leaders (void)
1400 {
1401 struct process_info *proc, *tmp;
1402
1403 ALL_PROCESSES (proc, tmp)
1404 {
1405 pid_t leader_pid = pid_of (proc);
1406 struct lwp_info *leader_lp;
1407
1408 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1409
1410 if (debug_threads)
1411 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1412 "num_lwps=%d, zombie=%d\n",
1413 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1414 linux_proc_pid_is_zombie (leader_pid));
1415
1416 if (leader_lp != NULL
1417 /* Check if there are other threads in the group, as we may
1418 have raced with the inferior simply exiting. */
1419 && !last_thread_of_process_p (leader_pid)
1420 && linux_proc_pid_is_zombie (leader_pid))
1421 {
1422 /* A leader zombie can mean one of two things:
1423
1424 - It exited, and there's an exit status pending
1425 available, or only the leader exited (not the whole
1426 program). In the latter case, we can't waitpid the
1427 leader's exit status until all other threads are gone.
1428
1429 - There are 3 or more threads in the group, and a thread
1430 other than the leader exec'd. On an exec, the Linux
1431 kernel destroys all other threads (except the execing
1432 one) in the thread group, and resets the execing thread's
1433 tid to the tgid. No exit notification is sent for the
1434 execing thread -- from the ptracer's perspective, it
1435 appears as though the execing thread just vanishes.
1436 Until we reap all other threads except the leader and the
1437 execing thread, the leader will be zombie, and the
1438 execing thread will be in `D (disc sleep)'. As soon as
1439 all other threads are reaped, the execing thread changes
1440 it's tid to the tgid, and the previous (zombie) leader
1441 vanishes, giving place to the "new" leader. We could try
1442 distinguishing the exit and exec cases, by waiting once
1443 more, and seeing if something comes out, but it doesn't
1444 sound useful. The previous leader _does_ go away, and
1445 we'll re-add the new one once we see the exec event
1446 (which is just the same as what would happen if the
1447 previous leader did exit voluntarily before some other
1448 thread execs). */
1449
1450 if (debug_threads)
1451 fprintf (stderr,
1452 "CZL: Thread group leader %d zombie "
1453 "(it exited, or another thread execd).\n",
1454 leader_pid);
1455
1456 delete_lwp (leader_lp);
1457 }
1458 }
1459 }
1460
1461 /* Callback for `find_inferior'. Returns the first LWP that is not
1462 stopped. ARG is a PTID filter. */
1463
1464 static int
1465 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1466 {
1467 struct thread_info *thr = (struct thread_info *) entry;
1468 struct lwp_info *lwp;
1469 ptid_t filter = *(ptid_t *) arg;
1470
1471 if (!ptid_match (ptid_of (thr), filter))
1472 return 0;
1473
1474 lwp = get_thread_lwp (thr);
1475 if (!lwp->stopped)
1476 return 1;
1477
1478 return 0;
1479 }
1480
1481 /* This function should only be called if the LWP got a SIGTRAP.
1482
1483 Handle any tracepoint steps or hits. Return true if a tracepoint
1484 event was handled, 0 otherwise. */
1485
1486 static int
1487 handle_tracepoints (struct lwp_info *lwp)
1488 {
1489 struct thread_info *tinfo = get_lwp_thread (lwp);
1490 int tpoint_related_event = 0;
1491
1492 gdb_assert (lwp->suspended == 0);
1493
1494 /* If this tracepoint hit causes a tracing stop, we'll immediately
1495 uninsert tracepoints. To do this, we temporarily pause all
1496 threads, unpatch away, and then unpause threads. We need to make
1497 sure the unpausing doesn't resume LWP too. */
1498 lwp->suspended++;
1499
1500 /* And we need to be sure that any all-threads-stopping doesn't try
1501 to move threads out of the jump pads, as it could deadlock the
1502 inferior (LWP could be in the jump pad, maybe even holding the
1503 lock.) */
1504
1505 /* Do any necessary step collect actions. */
1506 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1507
1508 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1509
1510 /* See if we just hit a tracepoint and do its main collect
1511 actions. */
1512 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1513
1514 lwp->suspended--;
1515
1516 gdb_assert (lwp->suspended == 0);
1517 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1518
1519 if (tpoint_related_event)
1520 {
1521 if (debug_threads)
1522 debug_printf ("got a tracepoint event\n");
1523 return 1;
1524 }
1525
1526 return 0;
1527 }
1528
1529 /* Convenience wrapper. Returns true if LWP is presently collecting a
1530 fast tracepoint. */
1531
1532 static int
1533 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1534 struct fast_tpoint_collect_status *status)
1535 {
1536 CORE_ADDR thread_area;
1537 struct thread_info *thread = get_lwp_thread (lwp);
1538
1539 if (the_low_target.get_thread_area == NULL)
1540 return 0;
1541
1542 /* Get the thread area address. This is used to recognize which
1543 thread is which when tracing with the in-process agent library.
1544 We don't read anything from the address, and treat it as opaque;
1545 it's the address itself that we assume is unique per-thread. */
1546 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1547 return 0;
1548
1549 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1550 }
1551
1552 /* The reason we resume in the caller, is because we want to be able
1553 to pass lwp->status_pending as WSTAT, and we need to clear
1554 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1555 refuses to resume. */
1556
1557 static int
1558 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1559 {
1560 struct thread_info *saved_thread;
1561
1562 saved_thread = current_thread;
1563 current_thread = get_lwp_thread (lwp);
1564
1565 if ((wstat == NULL
1566 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1567 && supports_fast_tracepoints ()
1568 && agent_loaded_p ())
1569 {
1570 struct fast_tpoint_collect_status status;
1571 int r;
1572
1573 if (debug_threads)
1574 debug_printf ("Checking whether LWP %ld needs to move out of the "
1575 "jump pad.\n",
1576 lwpid_of (current_thread));
1577
1578 r = linux_fast_tracepoint_collecting (lwp, &status);
1579
1580 if (wstat == NULL
1581 || (WSTOPSIG (*wstat) != SIGILL
1582 && WSTOPSIG (*wstat) != SIGFPE
1583 && WSTOPSIG (*wstat) != SIGSEGV
1584 && WSTOPSIG (*wstat) != SIGBUS))
1585 {
1586 lwp->collecting_fast_tracepoint = r;
1587
1588 if (r != 0)
1589 {
1590 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1591 {
1592 /* Haven't executed the original instruction yet.
1593 Set breakpoint there, and wait till it's hit,
1594 then single-step until exiting the jump pad. */
1595 lwp->exit_jump_pad_bkpt
1596 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1597 }
1598
1599 if (debug_threads)
1600 debug_printf ("Checking whether LWP %ld needs to move out of "
1601 "the jump pad...it does\n",
1602 lwpid_of (current_thread));
1603 current_thread = saved_thread;
1604
1605 return 1;
1606 }
1607 }
1608 else
1609 {
1610 /* If we get a synchronous signal while collecting, *and*
1611 while executing the (relocated) original instruction,
1612 reset the PC to point at the tpoint address, before
1613 reporting to GDB. Otherwise, it's an IPA lib bug: just
1614 report the signal to GDB, and pray for the best. */
1615
1616 lwp->collecting_fast_tracepoint = 0;
1617
1618 if (r != 0
1619 && (status.adjusted_insn_addr <= lwp->stop_pc
1620 && lwp->stop_pc < status.adjusted_insn_addr_end))
1621 {
1622 siginfo_t info;
1623 struct regcache *regcache;
1624
1625 /* The si_addr on a few signals references the address
1626 of the faulting instruction. Adjust that as
1627 well. */
1628 if ((WSTOPSIG (*wstat) == SIGILL
1629 || WSTOPSIG (*wstat) == SIGFPE
1630 || WSTOPSIG (*wstat) == SIGBUS
1631 || WSTOPSIG (*wstat) == SIGSEGV)
1632 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1633 (PTRACE_TYPE_ARG3) 0, &info) == 0
1634 /* Final check just to make sure we don't clobber
1635 the siginfo of non-kernel-sent signals. */
1636 && (uintptr_t) info.si_addr == lwp->stop_pc)
1637 {
1638 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1639 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1640 (PTRACE_TYPE_ARG3) 0, &info);
1641 }
1642
1643 regcache = get_thread_regcache (current_thread, 1);
1644 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1645 lwp->stop_pc = status.tpoint_addr;
1646
1647 /* Cancel any fast tracepoint lock this thread was
1648 holding. */
1649 force_unlock_trace_buffer ();
1650 }
1651
1652 if (lwp->exit_jump_pad_bkpt != NULL)
1653 {
1654 if (debug_threads)
1655 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1656 "stopping all threads momentarily.\n");
1657
1658 stop_all_lwps (1, lwp);
1659
1660 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1661 lwp->exit_jump_pad_bkpt = NULL;
1662
1663 unstop_all_lwps (1, lwp);
1664
1665 gdb_assert (lwp->suspended >= 0);
1666 }
1667 }
1668 }
1669
1670 if (debug_threads)
1671 debug_printf ("Checking whether LWP %ld needs to move out of the "
1672 "jump pad...no\n",
1673 lwpid_of (current_thread));
1674
1675 current_thread = saved_thread;
1676 return 0;
1677 }
1678
1679 /* Enqueue one signal in the "signals to report later when out of the
1680 jump pad" list. */
1681
1682 static void
1683 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1684 {
1685 struct pending_signals *p_sig;
1686 struct thread_info *thread = get_lwp_thread (lwp);
1687
1688 if (debug_threads)
1689 debug_printf ("Deferring signal %d for LWP %ld.\n",
1690 WSTOPSIG (*wstat), lwpid_of (thread));
1691
1692 if (debug_threads)
1693 {
1694 struct pending_signals *sig;
1695
1696 for (sig = lwp->pending_signals_to_report;
1697 sig != NULL;
1698 sig = sig->prev)
1699 debug_printf (" Already queued %d\n",
1700 sig->signal);
1701
1702 debug_printf (" (no more currently queued signals)\n");
1703 }
1704
1705 /* Don't enqueue non-RT signals if they are already in the deferred
1706 queue. (SIGSTOP being the easiest signal to see ending up here
1707 twice) */
1708 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1709 {
1710 struct pending_signals *sig;
1711
1712 for (sig = lwp->pending_signals_to_report;
1713 sig != NULL;
1714 sig = sig->prev)
1715 {
1716 if (sig->signal == WSTOPSIG (*wstat))
1717 {
1718 if (debug_threads)
1719 debug_printf ("Not requeuing already queued non-RT signal %d"
1720 " for LWP %ld\n",
1721 sig->signal,
1722 lwpid_of (thread));
1723 return;
1724 }
1725 }
1726 }
1727
1728 p_sig = xmalloc (sizeof (*p_sig));
1729 p_sig->prev = lwp->pending_signals_to_report;
1730 p_sig->signal = WSTOPSIG (*wstat);
1731 memset (&p_sig->info, 0, sizeof (siginfo_t));
1732 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1733 &p_sig->info);
1734
1735 lwp->pending_signals_to_report = p_sig;
1736 }
1737
1738 /* Dequeue one signal from the "signals to report later when out of
1739 the jump pad" list. */
1740
1741 static int
1742 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1743 {
1744 struct thread_info *thread = get_lwp_thread (lwp);
1745
1746 if (lwp->pending_signals_to_report != NULL)
1747 {
1748 struct pending_signals **p_sig;
1749
1750 p_sig = &lwp->pending_signals_to_report;
1751 while ((*p_sig)->prev != NULL)
1752 p_sig = &(*p_sig)->prev;
1753
1754 *wstat = W_STOPCODE ((*p_sig)->signal);
1755 if ((*p_sig)->info.si_signo != 0)
1756 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1757 &(*p_sig)->info);
1758 free (*p_sig);
1759 *p_sig = NULL;
1760
1761 if (debug_threads)
1762 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1763 WSTOPSIG (*wstat), lwpid_of (thread));
1764
1765 if (debug_threads)
1766 {
1767 struct pending_signals *sig;
1768
1769 for (sig = lwp->pending_signals_to_report;
1770 sig != NULL;
1771 sig = sig->prev)
1772 debug_printf (" Still queued %d\n",
1773 sig->signal);
1774
1775 debug_printf (" (no more queued signals)\n");
1776 }
1777
1778 return 1;
1779 }
1780
1781 return 0;
1782 }
1783
1784 /* Return true if the event in LP may be caused by breakpoint. */
1785
1786 static int
1787 wstatus_maybe_breakpoint (int wstatus)
1788 {
1789 return (WIFSTOPPED (wstatus)
1790 && (WSTOPSIG (wstatus) == SIGTRAP
1791 /* SIGILL and SIGSEGV are also treated as traps in case a
1792 breakpoint is inserted at the current PC. */
1793 || WSTOPSIG (wstatus) == SIGILL
1794 || WSTOPSIG (wstatus) == SIGSEGV));
1795 }
1796
1797 /* Fetch the possibly triggered data watchpoint info and store it in
1798 CHILD.
1799
1800 On some archs, like x86, that use debug registers to set
1801 watchpoints, it's possible that the way to know which watched
1802 address trapped, is to check the register that is used to select
1803 which address to watch. Problem is, between setting the watchpoint
1804 and reading back which data address trapped, the user may change
1805 the set of watchpoints, and, as a consequence, GDB changes the
1806 debug registers in the inferior. To avoid reading back a stale
1807 stopped-data-address when that happens, we cache in LP the fact
1808 that a watchpoint trapped, and the corresponding data address, as
1809 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1810 registers meanwhile, we have the cached data we can rely on. */
1811
1812 static int
1813 check_stopped_by_watchpoint (struct lwp_info *child)
1814 {
1815 if (the_low_target.stopped_by_watchpoint != NULL)
1816 {
1817 struct thread_info *saved_thread;
1818
1819 saved_thread = current_thread;
1820 current_thread = get_lwp_thread (child);
1821
1822 if (the_low_target.stopped_by_watchpoint ())
1823 {
1824 child->stop_reason = LWP_STOPPED_BY_WATCHPOINT;
1825
1826 if (the_low_target.stopped_data_address != NULL)
1827 child->stopped_data_address
1828 = the_low_target.stopped_data_address ();
1829 else
1830 child->stopped_data_address = 0;
1831 }
1832
1833 current_thread = saved_thread;
1834 }
1835
1836 return child->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
1837 }
1838
1839 /* Do low-level handling of the event, and check if we should go on
1840 and pass it to caller code. Return the affected lwp if we are, or
1841 NULL otherwise. */
1842
1843 static struct lwp_info *
1844 linux_low_filter_event (int lwpid, int wstat)
1845 {
1846 struct lwp_info *child;
1847 struct thread_info *thread;
1848 int have_stop_pc = 0;
1849
1850 child = find_lwp_pid (pid_to_ptid (lwpid));
1851
1852 /* If we didn't find a process, one of two things presumably happened:
1853 - A process we started and then detached from has exited. Ignore it.
1854 - A process we are controlling has forked and the new child's stop
1855 was reported to us by the kernel. Save its PID. */
1856 if (child == NULL && WIFSTOPPED (wstat))
1857 {
1858 add_to_pid_list (&stopped_pids, lwpid, wstat);
1859 return NULL;
1860 }
1861 else if (child == NULL)
1862 return NULL;
1863
1864 thread = get_lwp_thread (child);
1865
1866 child->stopped = 1;
1867
1868 child->last_status = wstat;
1869
1870 /* Check if the thread has exited. */
1871 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
1872 {
1873 if (debug_threads)
1874 debug_printf ("LLFE: %d exited.\n", lwpid);
1875 if (num_lwps (pid_of (thread)) > 1)
1876 {
1877
1878 /* If there is at least one more LWP, then the exit signal was
1879 not the end of the debugged application and should be
1880 ignored. */
1881 delete_lwp (child);
1882 return NULL;
1883 }
1884 else
1885 {
1886 /* This was the last lwp in the process. Since events are
1887 serialized to GDB core, and we can't report this one
1888 right now, but GDB core and the other target layers will
1889 want to be notified about the exit code/signal, leave the
1890 status pending for the next time we're able to report
1891 it. */
1892 mark_lwp_dead (child, wstat);
1893 return child;
1894 }
1895 }
1896
1897 gdb_assert (WIFSTOPPED (wstat));
1898
1899 if (WIFSTOPPED (wstat))
1900 {
1901 struct process_info *proc;
1902
1903 /* Architecture-specific setup after inferior is running. This
1904 needs to happen after we have attached to the inferior and it
1905 is stopped for the first time, but before we access any
1906 inferior registers. */
1907 proc = find_process_pid (pid_of (thread));
1908 if (proc->private->new_inferior)
1909 {
1910 struct thread_info *saved_thread;
1911
1912 saved_thread = current_thread;
1913 current_thread = thread;
1914
1915 the_low_target.arch_setup ();
1916
1917 current_thread = saved_thread;
1918
1919 proc->private->new_inferior = 0;
1920 }
1921 }
1922
1923 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
1924 {
1925 struct process_info *proc = find_process_pid (pid_of (thread));
1926
1927 linux_enable_event_reporting (lwpid, proc->attached);
1928 child->must_set_ptrace_flags = 0;
1929 }
1930
1931 /* Be careful to not overwrite stop_pc until
1932 check_stopped_by_breakpoint is called. */
1933 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1934 && linux_is_extended_waitstatus (wstat))
1935 {
1936 child->stop_pc = get_pc (child);
1937 handle_extended_wait (child, wstat);
1938 return NULL;
1939 }
1940
1941 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1942 && check_stopped_by_watchpoint (child))
1943 ;
1944 else if (WIFSTOPPED (wstat) && wstatus_maybe_breakpoint (wstat))
1945 {
1946 if (check_stopped_by_breakpoint (child))
1947 have_stop_pc = 1;
1948 }
1949
1950 if (!have_stop_pc)
1951 child->stop_pc = get_pc (child);
1952
1953 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
1954 && child->stop_expected)
1955 {
1956 if (debug_threads)
1957 debug_printf ("Expected stop.\n");
1958 child->stop_expected = 0;
1959
1960 if (thread->last_resume_kind == resume_stop)
1961 {
1962 /* We want to report the stop to the core. Treat the
1963 SIGSTOP as a normal event. */
1964 }
1965 else if (stopping_threads != NOT_STOPPING_THREADS)
1966 {
1967 /* Stopping threads. We don't want this SIGSTOP to end up
1968 pending. */
1969 return NULL;
1970 }
1971 else
1972 {
1973 /* Filter out the event. */
1974 linux_resume_one_lwp (child, child->stepping, 0, NULL);
1975 return NULL;
1976 }
1977 }
1978
1979 child->status_pending_p = 1;
1980 child->status_pending = wstat;
1981 return child;
1982 }
1983
1984 /* Wait for an event from child(ren) WAIT_PTID, and return any that
1985 match FILTER_PTID (leaving others pending). The PTIDs can be:
1986 minus_one_ptid, to specify any child; a pid PTID, specifying all
1987 lwps of a thread group; or a PTID representing a single lwp. Store
1988 the stop status through the status pointer WSTAT. OPTIONS is
1989 passed to the waitpid call. Return 0 if no event was found and
1990 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
1991 was found. Return the PID of the stopped child otherwise. */
1992
1993 static int
1994 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
1995 int *wstatp, int options)
1996 {
1997 struct thread_info *event_thread;
1998 struct lwp_info *event_child, *requested_child;
1999 sigset_t block_mask, prev_mask;
2000
2001 retry:
2002 /* N.B. event_thread points to the thread_info struct that contains
2003 event_child. Keep them in sync. */
2004 event_thread = NULL;
2005 event_child = NULL;
2006 requested_child = NULL;
2007
2008 /* Check for a lwp with a pending status. */
2009
2010 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2011 {
2012 event_thread = (struct thread_info *)
2013 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2014 if (event_thread != NULL)
2015 event_child = get_thread_lwp (event_thread);
2016 if (debug_threads && event_thread)
2017 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2018 }
2019 else if (!ptid_equal (filter_ptid, null_ptid))
2020 {
2021 requested_child = find_lwp_pid (filter_ptid);
2022
2023 if (stopping_threads == NOT_STOPPING_THREADS
2024 && requested_child->status_pending_p
2025 && requested_child->collecting_fast_tracepoint)
2026 {
2027 enqueue_one_deferred_signal (requested_child,
2028 &requested_child->status_pending);
2029 requested_child->status_pending_p = 0;
2030 requested_child->status_pending = 0;
2031 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2032 }
2033
2034 if (requested_child->suspended
2035 && requested_child->status_pending_p)
2036 {
2037 internal_error (__FILE__, __LINE__,
2038 "requesting an event out of a"
2039 " suspended child?");
2040 }
2041
2042 if (requested_child->status_pending_p)
2043 {
2044 event_child = requested_child;
2045 event_thread = get_lwp_thread (event_child);
2046 }
2047 }
2048
2049 if (event_child != NULL)
2050 {
2051 if (debug_threads)
2052 debug_printf ("Got an event from pending child %ld (%04x)\n",
2053 lwpid_of (event_thread), event_child->status_pending);
2054 *wstatp = event_child->status_pending;
2055 event_child->status_pending_p = 0;
2056 event_child->status_pending = 0;
2057 current_thread = event_thread;
2058 return lwpid_of (event_thread);
2059 }
2060
2061 /* But if we don't find a pending event, we'll have to wait.
2062
2063 We only enter this loop if no process has a pending wait status.
2064 Thus any action taken in response to a wait status inside this
2065 loop is responding as soon as we detect the status, not after any
2066 pending events. */
2067
2068 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2069 all signals while here. */
2070 sigfillset (&block_mask);
2071 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2072
2073 /* Always pull all events out of the kernel. We'll randomly select
2074 an event LWP out of all that have events, to prevent
2075 starvation. */
2076 while (event_child == NULL)
2077 {
2078 pid_t ret = 0;
2079
2080 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2081 quirks:
2082
2083 - If the thread group leader exits while other threads in the
2084 thread group still exist, waitpid(TGID, ...) hangs. That
2085 waitpid won't return an exit status until the other threads
2086 in the group are reaped.
2087
2088 - When a non-leader thread execs, that thread just vanishes
2089 without reporting an exit (so we'd hang if we waited for it
2090 explicitly in that case). The exec event is reported to
2091 the TGID pid (although we don't currently enable exec
2092 events). */
2093 errno = 0;
2094 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2095
2096 if (debug_threads)
2097 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2098 ret, errno ? strerror (errno) : "ERRNO-OK");
2099
2100 if (ret > 0)
2101 {
2102 if (debug_threads)
2103 {
2104 debug_printf ("LLW: waitpid %ld received %s\n",
2105 (long) ret, status_to_str (*wstatp));
2106 }
2107
2108 /* Filter all events. IOW, leave all events pending. We'll
2109 randomly select an event LWP out of all that have events
2110 below. */
2111 linux_low_filter_event (ret, *wstatp);
2112 /* Retry until nothing comes out of waitpid. A single
2113 SIGCHLD can indicate more than one child stopped. */
2114 continue;
2115 }
2116
2117 /* Now that we've pulled all events out of the kernel, check if
2118 there's any LWP with a status to report to the core. */
2119 event_thread = (struct thread_info *)
2120 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2121 if (event_thread != NULL)
2122 {
2123 event_child = get_thread_lwp (event_thread);
2124 *wstatp = event_child->status_pending;
2125 event_child->status_pending_p = 0;
2126 event_child->status_pending = 0;
2127 break;
2128 }
2129
2130 /* Check for zombie thread group leaders. Those can't be reaped
2131 until all other threads in the thread group are. */
2132 check_zombie_leaders ();
2133
2134 /* If there are no resumed children left in the set of LWPs we
2135 want to wait for, bail. We can't just block in
2136 waitpid/sigsuspend, because lwps might have been left stopped
2137 in trace-stop state, and we'd be stuck forever waiting for
2138 their status to change (which would only happen if we resumed
2139 them). Even if WNOHANG is set, this return code is preferred
2140 over 0 (below), as it is more detailed. */
2141 if ((find_inferior (&all_threads,
2142 not_stopped_callback,
2143 &wait_ptid) == NULL))
2144 {
2145 if (debug_threads)
2146 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2147 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2148 return -1;
2149 }
2150
2151 /* No interesting event to report to the caller. */
2152 if ((options & WNOHANG))
2153 {
2154 if (debug_threads)
2155 debug_printf ("WNOHANG set, no event found\n");
2156
2157 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2158 return 0;
2159 }
2160
2161 /* Block until we get an event reported with SIGCHLD. */
2162 if (debug_threads)
2163 debug_printf ("sigsuspend'ing\n");
2164
2165 sigsuspend (&prev_mask);
2166 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2167 goto retry;
2168 }
2169
2170 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2171
2172 current_thread = event_thread;
2173
2174 /* Check for thread exit. */
2175 if (! WIFSTOPPED (*wstatp))
2176 {
2177 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2178
2179 if (debug_threads)
2180 debug_printf ("LWP %d is the last lwp of process. "
2181 "Process %ld exiting.\n",
2182 pid_of (event_thread), lwpid_of (event_thread));
2183 return lwpid_of (event_thread);
2184 }
2185
2186 return lwpid_of (event_thread);
2187 }
2188
2189 /* Wait for an event from child(ren) PTID. PTIDs can be:
2190 minus_one_ptid, to specify any child; a pid PTID, specifying all
2191 lwps of a thread group; or a PTID representing a single lwp. Store
2192 the stop status through the status pointer WSTAT. OPTIONS is
2193 passed to the waitpid call. Return 0 if no event was found and
2194 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2195 was found. Return the PID of the stopped child otherwise. */
2196
2197 static int
2198 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2199 {
2200 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2201 }
2202
2203 /* Count the LWP's that have had events. */
2204
2205 static int
2206 count_events_callback (struct inferior_list_entry *entry, void *data)
2207 {
2208 struct thread_info *thread = (struct thread_info *) entry;
2209 int *count = data;
2210
2211 gdb_assert (count != NULL);
2212
2213 /* Count only resumed LWPs that have an event pending. */
2214 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2215 && thread->last_resume_kind != resume_stop
2216 && thread->status_pending_p)
2217 (*count)++;
2218
2219 return 0;
2220 }
2221
2222 /* Select the LWP (if any) that is currently being single-stepped. */
2223
2224 static int
2225 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2226 {
2227 struct thread_info *thread = (struct thread_info *) entry;
2228 struct lwp_info *lp = get_thread_lwp (thread);
2229
2230 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2231 && thread->last_resume_kind == resume_step
2232 && lp->status_pending_p)
2233 return 1;
2234 else
2235 return 0;
2236 }
2237
2238 /* Select the Nth LWP that has had a SIGTRAP event that should be
2239 reported to GDB. */
2240
2241 static int
2242 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2243 {
2244 struct thread_info *thread = (struct thread_info *) entry;
2245 int *selector = data;
2246
2247 gdb_assert (selector != NULL);
2248
2249 /* Select only resumed LWPs that have an event pending. */
2250 if (thread->last_resume_kind != resume_stop
2251 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2252 && thread->status_pending_p)
2253 if ((*selector)-- == 0)
2254 return 1;
2255
2256 return 0;
2257 }
2258
2259 /* Select one LWP out of those that have events pending. */
2260
2261 static void
2262 select_event_lwp (struct lwp_info **orig_lp)
2263 {
2264 int num_events = 0;
2265 int random_selector;
2266 struct thread_info *event_thread = NULL;
2267
2268 /* In all-stop, give preference to the LWP that is being
2269 single-stepped. There will be at most one, and it's the LWP that
2270 the core is most interested in. If we didn't do this, then we'd
2271 have to handle pending step SIGTRAPs somehow in case the core
2272 later continues the previously-stepped thread, otherwise we'd
2273 report the pending SIGTRAP, and the core, not having stepped the
2274 thread, wouldn't understand what the trap was for, and therefore
2275 would report it to the user as a random signal. */
2276 if (!non_stop)
2277 {
2278 event_thread
2279 = (struct thread_info *) find_inferior (&all_threads,
2280 select_singlestep_lwp_callback,
2281 NULL);
2282 if (event_thread != NULL)
2283 {
2284 if (debug_threads)
2285 debug_printf ("SEL: Select single-step %s\n",
2286 target_pid_to_str (ptid_of (event_thread)));
2287 }
2288 }
2289 if (event_thread == NULL)
2290 {
2291 /* No single-stepping LWP. Select one at random, out of those
2292 which have had SIGTRAP events. */
2293
2294 /* First see how many SIGTRAP events we have. */
2295 find_inferior (&all_threads, count_events_callback, &num_events);
2296
2297 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2298 random_selector = (int)
2299 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2300
2301 if (debug_threads && num_events > 1)
2302 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2303 num_events, random_selector);
2304
2305 event_thread
2306 = (struct thread_info *) find_inferior (&all_threads,
2307 select_event_lwp_callback,
2308 &random_selector);
2309 }
2310
2311 if (event_thread != NULL)
2312 {
2313 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2314
2315 /* Switch the event LWP. */
2316 *orig_lp = event_lp;
2317 }
2318 }
2319
2320 /* Decrement the suspend count of an LWP. */
2321
2322 static int
2323 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2324 {
2325 struct thread_info *thread = (struct thread_info *) entry;
2326 struct lwp_info *lwp = get_thread_lwp (thread);
2327
2328 /* Ignore EXCEPT. */
2329 if (lwp == except)
2330 return 0;
2331
2332 lwp->suspended--;
2333
2334 gdb_assert (lwp->suspended >= 0);
2335 return 0;
2336 }
2337
2338 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2339 NULL. */
2340
2341 static void
2342 unsuspend_all_lwps (struct lwp_info *except)
2343 {
2344 find_inferior (&all_threads, unsuspend_one_lwp, except);
2345 }
2346
2347 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2348 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2349 void *data);
2350 static int lwp_running (struct inferior_list_entry *entry, void *data);
2351 static ptid_t linux_wait_1 (ptid_t ptid,
2352 struct target_waitstatus *ourstatus,
2353 int target_options);
2354
2355 /* Stabilize threads (move out of jump pads).
2356
2357 If a thread is midway collecting a fast tracepoint, we need to
2358 finish the collection and move it out of the jump pad before
2359 reporting the signal.
2360
2361 This avoids recursion while collecting (when a signal arrives
2362 midway, and the signal handler itself collects), which would trash
2363 the trace buffer. In case the user set a breakpoint in a signal
2364 handler, this avoids the backtrace showing the jump pad, etc..
2365 Most importantly, there are certain things we can't do safely if
2366 threads are stopped in a jump pad (or in its callee's). For
2367 example:
2368
2369 - starting a new trace run. A thread still collecting the
2370 previous run, could trash the trace buffer when resumed. The trace
2371 buffer control structures would have been reset but the thread had
2372 no way to tell. The thread could even midway memcpy'ing to the
2373 buffer, which would mean that when resumed, it would clobber the
2374 trace buffer that had been set for a new run.
2375
2376 - we can't rewrite/reuse the jump pads for new tracepoints
2377 safely. Say you do tstart while a thread is stopped midway while
2378 collecting. When the thread is later resumed, it finishes the
2379 collection, and returns to the jump pad, to execute the original
2380 instruction that was under the tracepoint jump at the time the
2381 older run had been started. If the jump pad had been rewritten
2382 since for something else in the new run, the thread would now
2383 execute the wrong / random instructions. */
2384
2385 static void
2386 linux_stabilize_threads (void)
2387 {
2388 struct thread_info *saved_thread;
2389 struct thread_info *thread_stuck;
2390
2391 thread_stuck
2392 = (struct thread_info *) find_inferior (&all_threads,
2393 stuck_in_jump_pad_callback,
2394 NULL);
2395 if (thread_stuck != NULL)
2396 {
2397 if (debug_threads)
2398 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2399 lwpid_of (thread_stuck));
2400 return;
2401 }
2402
2403 saved_thread = current_thread;
2404
2405 stabilizing_threads = 1;
2406
2407 /* Kick 'em all. */
2408 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2409
2410 /* Loop until all are stopped out of the jump pads. */
2411 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2412 {
2413 struct target_waitstatus ourstatus;
2414 struct lwp_info *lwp;
2415 int wstat;
2416
2417 /* Note that we go through the full wait even loop. While
2418 moving threads out of jump pad, we need to be able to step
2419 over internal breakpoints and such. */
2420 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2421
2422 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2423 {
2424 lwp = get_thread_lwp (current_thread);
2425
2426 /* Lock it. */
2427 lwp->suspended++;
2428
2429 if (ourstatus.value.sig != GDB_SIGNAL_0
2430 || current_thread->last_resume_kind == resume_stop)
2431 {
2432 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2433 enqueue_one_deferred_signal (lwp, &wstat);
2434 }
2435 }
2436 }
2437
2438 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2439
2440 stabilizing_threads = 0;
2441
2442 current_thread = saved_thread;
2443
2444 if (debug_threads)
2445 {
2446 thread_stuck
2447 = (struct thread_info *) find_inferior (&all_threads,
2448 stuck_in_jump_pad_callback,
2449 NULL);
2450 if (thread_stuck != NULL)
2451 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2452 lwpid_of (thread_stuck));
2453 }
2454 }
2455
2456 static void async_file_mark (void);
2457
2458 /* Convenience function that is called when the kernel reports an
2459 event that is not passed out to GDB. */
2460
2461 static ptid_t
2462 ignore_event (struct target_waitstatus *ourstatus)
2463 {
2464 /* If we got an event, there may still be others, as a single
2465 SIGCHLD can indicate more than one child stopped. This forces
2466 another target_wait call. */
2467 async_file_mark ();
2468
2469 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2470 return null_ptid;
2471 }
2472
2473 /* Wait for process, returns status. */
2474
2475 static ptid_t
2476 linux_wait_1 (ptid_t ptid,
2477 struct target_waitstatus *ourstatus, int target_options)
2478 {
2479 int w;
2480 struct lwp_info *event_child;
2481 int options;
2482 int pid;
2483 int step_over_finished;
2484 int bp_explains_trap;
2485 int maybe_internal_trap;
2486 int report_to_gdb;
2487 int trace_event;
2488 int in_step_range;
2489
2490 if (debug_threads)
2491 {
2492 debug_enter ();
2493 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2494 }
2495
2496 /* Translate generic target options into linux options. */
2497 options = __WALL;
2498 if (target_options & TARGET_WNOHANG)
2499 options |= WNOHANG;
2500
2501 bp_explains_trap = 0;
2502 trace_event = 0;
2503 in_step_range = 0;
2504 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2505
2506 if (ptid_equal (step_over_bkpt, null_ptid))
2507 pid = linux_wait_for_event (ptid, &w, options);
2508 else
2509 {
2510 if (debug_threads)
2511 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2512 target_pid_to_str (step_over_bkpt));
2513 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2514 }
2515
2516 if (pid == 0)
2517 {
2518 gdb_assert (target_options & TARGET_WNOHANG);
2519
2520 if (debug_threads)
2521 {
2522 debug_printf ("linux_wait_1 ret = null_ptid, "
2523 "TARGET_WAITKIND_IGNORE\n");
2524 debug_exit ();
2525 }
2526
2527 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2528 return null_ptid;
2529 }
2530 else if (pid == -1)
2531 {
2532 if (debug_threads)
2533 {
2534 debug_printf ("linux_wait_1 ret = null_ptid, "
2535 "TARGET_WAITKIND_NO_RESUMED\n");
2536 debug_exit ();
2537 }
2538
2539 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2540 return null_ptid;
2541 }
2542
2543 event_child = get_thread_lwp (current_thread);
2544
2545 /* linux_wait_for_event only returns an exit status for the last
2546 child of a process. Report it. */
2547 if (WIFEXITED (w) || WIFSIGNALED (w))
2548 {
2549 if (WIFEXITED (w))
2550 {
2551 ourstatus->kind = TARGET_WAITKIND_EXITED;
2552 ourstatus->value.integer = WEXITSTATUS (w);
2553
2554 if (debug_threads)
2555 {
2556 debug_printf ("linux_wait_1 ret = %s, exited with "
2557 "retcode %d\n",
2558 target_pid_to_str (ptid_of (current_thread)),
2559 WEXITSTATUS (w));
2560 debug_exit ();
2561 }
2562 }
2563 else
2564 {
2565 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2566 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2567
2568 if (debug_threads)
2569 {
2570 debug_printf ("linux_wait_1 ret = %s, terminated with "
2571 "signal %d\n",
2572 target_pid_to_str (ptid_of (current_thread)),
2573 WTERMSIG (w));
2574 debug_exit ();
2575 }
2576 }
2577
2578 return ptid_of (current_thread);
2579 }
2580
2581 /* If this event was not handled before, and is not a SIGTRAP, we
2582 report it. SIGILL and SIGSEGV are also treated as traps in case
2583 a breakpoint is inserted at the current PC. If this target does
2584 not support internal breakpoints at all, we also report the
2585 SIGTRAP without further processing; it's of no concern to us. */
2586 maybe_internal_trap
2587 = (supports_breakpoints ()
2588 && (WSTOPSIG (w) == SIGTRAP
2589 || ((WSTOPSIG (w) == SIGILL
2590 || WSTOPSIG (w) == SIGSEGV)
2591 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2592
2593 if (maybe_internal_trap)
2594 {
2595 /* Handle anything that requires bookkeeping before deciding to
2596 report the event or continue waiting. */
2597
2598 /* First check if we can explain the SIGTRAP with an internal
2599 breakpoint, or if we should possibly report the event to GDB.
2600 Do this before anything that may remove or insert a
2601 breakpoint. */
2602 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2603
2604 /* We have a SIGTRAP, possibly a step-over dance has just
2605 finished. If so, tweak the state machine accordingly,
2606 reinsert breakpoints and delete any reinsert (software
2607 single-step) breakpoints. */
2608 step_over_finished = finish_step_over (event_child);
2609
2610 /* Now invoke the callbacks of any internal breakpoints there. */
2611 check_breakpoints (event_child->stop_pc);
2612
2613 /* Handle tracepoint data collecting. This may overflow the
2614 trace buffer, and cause a tracing stop, removing
2615 breakpoints. */
2616 trace_event = handle_tracepoints (event_child);
2617
2618 if (bp_explains_trap)
2619 {
2620 /* If we stepped or ran into an internal breakpoint, we've
2621 already handled it. So next time we resume (from this
2622 PC), we should step over it. */
2623 if (debug_threads)
2624 debug_printf ("Hit a gdbserver breakpoint.\n");
2625
2626 if (breakpoint_here (event_child->stop_pc))
2627 event_child->need_step_over = 1;
2628 }
2629 }
2630 else
2631 {
2632 /* We have some other signal, possibly a step-over dance was in
2633 progress, and it should be cancelled too. */
2634 step_over_finished = finish_step_over (event_child);
2635 }
2636
2637 /* We have all the data we need. Either report the event to GDB, or
2638 resume threads and keep waiting for more. */
2639
2640 /* If we're collecting a fast tracepoint, finish the collection and
2641 move out of the jump pad before delivering a signal. See
2642 linux_stabilize_threads. */
2643
2644 if (WIFSTOPPED (w)
2645 && WSTOPSIG (w) != SIGTRAP
2646 && supports_fast_tracepoints ()
2647 && agent_loaded_p ())
2648 {
2649 if (debug_threads)
2650 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2651 "to defer or adjust it.\n",
2652 WSTOPSIG (w), lwpid_of (current_thread));
2653
2654 /* Allow debugging the jump pad itself. */
2655 if (current_thread->last_resume_kind != resume_step
2656 && maybe_move_out_of_jump_pad (event_child, &w))
2657 {
2658 enqueue_one_deferred_signal (event_child, &w);
2659
2660 if (debug_threads)
2661 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2662 WSTOPSIG (w), lwpid_of (current_thread));
2663
2664 linux_resume_one_lwp (event_child, 0, 0, NULL);
2665
2666 return ignore_event (ourstatus);
2667 }
2668 }
2669
2670 if (event_child->collecting_fast_tracepoint)
2671 {
2672 if (debug_threads)
2673 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2674 "Check if we're already there.\n",
2675 lwpid_of (current_thread),
2676 event_child->collecting_fast_tracepoint);
2677
2678 trace_event = 1;
2679
2680 event_child->collecting_fast_tracepoint
2681 = linux_fast_tracepoint_collecting (event_child, NULL);
2682
2683 if (event_child->collecting_fast_tracepoint != 1)
2684 {
2685 /* No longer need this breakpoint. */
2686 if (event_child->exit_jump_pad_bkpt != NULL)
2687 {
2688 if (debug_threads)
2689 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2690 "stopping all threads momentarily.\n");
2691
2692 /* Other running threads could hit this breakpoint.
2693 We don't handle moribund locations like GDB does,
2694 instead we always pause all threads when removing
2695 breakpoints, so that any step-over or
2696 decr_pc_after_break adjustment is always taken
2697 care of while the breakpoint is still
2698 inserted. */
2699 stop_all_lwps (1, event_child);
2700
2701 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2702 event_child->exit_jump_pad_bkpt = NULL;
2703
2704 unstop_all_lwps (1, event_child);
2705
2706 gdb_assert (event_child->suspended >= 0);
2707 }
2708 }
2709
2710 if (event_child->collecting_fast_tracepoint == 0)
2711 {
2712 if (debug_threads)
2713 debug_printf ("fast tracepoint finished "
2714 "collecting successfully.\n");
2715
2716 /* We may have a deferred signal to report. */
2717 if (dequeue_one_deferred_signal (event_child, &w))
2718 {
2719 if (debug_threads)
2720 debug_printf ("dequeued one signal.\n");
2721 }
2722 else
2723 {
2724 if (debug_threads)
2725 debug_printf ("no deferred signals.\n");
2726
2727 if (stabilizing_threads)
2728 {
2729 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2730 ourstatus->value.sig = GDB_SIGNAL_0;
2731
2732 if (debug_threads)
2733 {
2734 debug_printf ("linux_wait_1 ret = %s, stopped "
2735 "while stabilizing threads\n",
2736 target_pid_to_str (ptid_of (current_thread)));
2737 debug_exit ();
2738 }
2739
2740 return ptid_of (current_thread);
2741 }
2742 }
2743 }
2744 }
2745
2746 /* Check whether GDB would be interested in this event. */
2747
2748 /* If GDB is not interested in this signal, don't stop other
2749 threads, and don't report it to GDB. Just resume the inferior
2750 right away. We do this for threading-related signals as well as
2751 any that GDB specifically requested we ignore. But never ignore
2752 SIGSTOP if we sent it ourselves, and do not ignore signals when
2753 stepping - they may require special handling to skip the signal
2754 handler. */
2755 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2756 thread library? */
2757 if (WIFSTOPPED (w)
2758 && current_thread->last_resume_kind != resume_step
2759 && (
2760 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2761 (current_process ()->private->thread_db != NULL
2762 && (WSTOPSIG (w) == __SIGRTMIN
2763 || WSTOPSIG (w) == __SIGRTMIN + 1))
2764 ||
2765 #endif
2766 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2767 && !(WSTOPSIG (w) == SIGSTOP
2768 && current_thread->last_resume_kind == resume_stop))))
2769 {
2770 siginfo_t info, *info_p;
2771
2772 if (debug_threads)
2773 debug_printf ("Ignored signal %d for LWP %ld.\n",
2774 WSTOPSIG (w), lwpid_of (current_thread));
2775
2776 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2777 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2778 info_p = &info;
2779 else
2780 info_p = NULL;
2781 linux_resume_one_lwp (event_child, event_child->stepping,
2782 WSTOPSIG (w), info_p);
2783 return ignore_event (ourstatus);
2784 }
2785
2786 /* Note that all addresses are always "out of the step range" when
2787 there's no range to begin with. */
2788 in_step_range = lwp_in_step_range (event_child);
2789
2790 /* If GDB wanted this thread to single step, and the thread is out
2791 of the step range, we always want to report the SIGTRAP, and let
2792 GDB handle it. Watchpoints should always be reported. So should
2793 signals we can't explain. A SIGTRAP we can't explain could be a
2794 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2795 do, we're be able to handle GDB breakpoints on top of internal
2796 breakpoints, by handling the internal breakpoint and still
2797 reporting the event to GDB. If we don't, we're out of luck, GDB
2798 won't see the breakpoint hit. */
2799 report_to_gdb = (!maybe_internal_trap
2800 || (current_thread->last_resume_kind == resume_step
2801 && !in_step_range)
2802 || event_child->stop_reason == LWP_STOPPED_BY_WATCHPOINT
2803 || (!step_over_finished && !in_step_range
2804 && !bp_explains_trap && !trace_event)
2805 || (gdb_breakpoint_here (event_child->stop_pc)
2806 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2807 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2808
2809 run_breakpoint_commands (event_child->stop_pc);
2810
2811 /* We found no reason GDB would want us to stop. We either hit one
2812 of our own breakpoints, or finished an internal step GDB
2813 shouldn't know about. */
2814 if (!report_to_gdb)
2815 {
2816 if (debug_threads)
2817 {
2818 if (bp_explains_trap)
2819 debug_printf ("Hit a gdbserver breakpoint.\n");
2820 if (step_over_finished)
2821 debug_printf ("Step-over finished.\n");
2822 if (trace_event)
2823 debug_printf ("Tracepoint event.\n");
2824 if (lwp_in_step_range (event_child))
2825 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2826 paddress (event_child->stop_pc),
2827 paddress (event_child->step_range_start),
2828 paddress (event_child->step_range_end));
2829 }
2830
2831 /* We're not reporting this breakpoint to GDB, so apply the
2832 decr_pc_after_break adjustment to the inferior's regcache
2833 ourselves. */
2834
2835 if (the_low_target.set_pc != NULL)
2836 {
2837 struct regcache *regcache
2838 = get_thread_regcache (current_thread, 1);
2839 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2840 }
2841
2842 /* We may have finished stepping over a breakpoint. If so,
2843 we've stopped and suspended all LWPs momentarily except the
2844 stepping one. This is where we resume them all again. We're
2845 going to keep waiting, so use proceed, which handles stepping
2846 over the next breakpoint. */
2847 if (debug_threads)
2848 debug_printf ("proceeding all threads.\n");
2849
2850 if (step_over_finished)
2851 unsuspend_all_lwps (event_child);
2852
2853 proceed_all_lwps ();
2854 return ignore_event (ourstatus);
2855 }
2856
2857 if (debug_threads)
2858 {
2859 if (current_thread->last_resume_kind == resume_step)
2860 {
2861 if (event_child->step_range_start == event_child->step_range_end)
2862 debug_printf ("GDB wanted to single-step, reporting event.\n");
2863 else if (!lwp_in_step_range (event_child))
2864 debug_printf ("Out of step range, reporting event.\n");
2865 }
2866 if (event_child->stop_reason == LWP_STOPPED_BY_WATCHPOINT)
2867 debug_printf ("Stopped by watchpoint.\n");
2868 else if (gdb_breakpoint_here (event_child->stop_pc))
2869 debug_printf ("Stopped by GDB breakpoint.\n");
2870 if (debug_threads)
2871 debug_printf ("Hit a non-gdbserver trap event.\n");
2872 }
2873
2874 /* Alright, we're going to report a stop. */
2875
2876 if (!stabilizing_threads)
2877 {
2878 /* In all-stop, stop all threads. */
2879 if (!non_stop)
2880 stop_all_lwps (0, NULL);
2881
2882 /* If we're not waiting for a specific LWP, choose an event LWP
2883 from among those that have had events. Giving equal priority
2884 to all LWPs that have had events helps prevent
2885 starvation. */
2886 if (ptid_equal (ptid, minus_one_ptid))
2887 {
2888 event_child->status_pending_p = 1;
2889 event_child->status_pending = w;
2890
2891 select_event_lwp (&event_child);
2892
2893 /* current_thread and event_child must stay in sync. */
2894 current_thread = get_lwp_thread (event_child);
2895
2896 event_child->status_pending_p = 0;
2897 w = event_child->status_pending;
2898 }
2899
2900 if (step_over_finished)
2901 {
2902 if (!non_stop)
2903 {
2904 /* If we were doing a step-over, all other threads but
2905 the stepping one had been paused in start_step_over,
2906 with their suspend counts incremented. We don't want
2907 to do a full unstop/unpause, because we're in
2908 all-stop mode (so we want threads stopped), but we
2909 still need to unsuspend the other threads, to
2910 decrement their `suspended' count back. */
2911 unsuspend_all_lwps (event_child);
2912 }
2913 else
2914 {
2915 /* If we just finished a step-over, then all threads had
2916 been momentarily paused. In all-stop, that's fine,
2917 we want threads stopped by now anyway. In non-stop,
2918 we need to re-resume threads that GDB wanted to be
2919 running. */
2920 unstop_all_lwps (1, event_child);
2921 }
2922 }
2923
2924 /* Stabilize threads (move out of jump pads). */
2925 if (!non_stop)
2926 stabilize_threads ();
2927 }
2928 else
2929 {
2930 /* If we just finished a step-over, then all threads had been
2931 momentarily paused. In all-stop, that's fine, we want
2932 threads stopped by now anyway. In non-stop, we need to
2933 re-resume threads that GDB wanted to be running. */
2934 if (step_over_finished)
2935 unstop_all_lwps (1, event_child);
2936 }
2937
2938 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2939
2940 /* Now that we've selected our final event LWP, un-adjust its PC if
2941 it was a software breakpoint. */
2942 if (event_child->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT)
2943 {
2944 int decr_pc = the_low_target.decr_pc_after_break;
2945
2946 if (decr_pc != 0)
2947 {
2948 struct regcache *regcache
2949 = get_thread_regcache (current_thread, 1);
2950 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
2951 }
2952 }
2953
2954 if (current_thread->last_resume_kind == resume_stop
2955 && WSTOPSIG (w) == SIGSTOP)
2956 {
2957 /* A thread that has been requested to stop by GDB with vCont;t,
2958 and it stopped cleanly, so report as SIG0. The use of
2959 SIGSTOP is an implementation detail. */
2960 ourstatus->value.sig = GDB_SIGNAL_0;
2961 }
2962 else if (current_thread->last_resume_kind == resume_stop
2963 && WSTOPSIG (w) != SIGSTOP)
2964 {
2965 /* A thread that has been requested to stop by GDB with vCont;t,
2966 but, it stopped for other reasons. */
2967 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2968 }
2969 else
2970 {
2971 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2972 }
2973
2974 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2975
2976 if (debug_threads)
2977 {
2978 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
2979 target_pid_to_str (ptid_of (current_thread)),
2980 ourstatus->kind, ourstatus->value.sig);
2981 debug_exit ();
2982 }
2983
2984 return ptid_of (current_thread);
2985 }
2986
2987 /* Get rid of any pending event in the pipe. */
2988 static void
2989 async_file_flush (void)
2990 {
2991 int ret;
2992 char buf;
2993
2994 do
2995 ret = read (linux_event_pipe[0], &buf, 1);
2996 while (ret >= 0 || (ret == -1 && errno == EINTR));
2997 }
2998
2999 /* Put something in the pipe, so the event loop wakes up. */
3000 static void
3001 async_file_mark (void)
3002 {
3003 int ret;
3004
3005 async_file_flush ();
3006
3007 do
3008 ret = write (linux_event_pipe[1], "+", 1);
3009 while (ret == 0 || (ret == -1 && errno == EINTR));
3010
3011 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3012 be awakened anyway. */
3013 }
3014
3015 static ptid_t
3016 linux_wait (ptid_t ptid,
3017 struct target_waitstatus *ourstatus, int target_options)
3018 {
3019 ptid_t event_ptid;
3020
3021 /* Flush the async file first. */
3022 if (target_is_async_p ())
3023 async_file_flush ();
3024
3025 do
3026 {
3027 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3028 }
3029 while ((target_options & TARGET_WNOHANG) == 0
3030 && ptid_equal (event_ptid, null_ptid)
3031 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3032
3033 /* If at least one stop was reported, there may be more. A single
3034 SIGCHLD can signal more than one child stop. */
3035 if (target_is_async_p ()
3036 && (target_options & TARGET_WNOHANG) != 0
3037 && !ptid_equal (event_ptid, null_ptid))
3038 async_file_mark ();
3039
3040 return event_ptid;
3041 }
3042
3043 /* Send a signal to an LWP. */
3044
3045 static int
3046 kill_lwp (unsigned long lwpid, int signo)
3047 {
3048 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3049 fails, then we are not using nptl threads and we should be using kill. */
3050
3051 #ifdef __NR_tkill
3052 {
3053 static int tkill_failed;
3054
3055 if (!tkill_failed)
3056 {
3057 int ret;
3058
3059 errno = 0;
3060 ret = syscall (__NR_tkill, lwpid, signo);
3061 if (errno != ENOSYS)
3062 return ret;
3063 tkill_failed = 1;
3064 }
3065 }
3066 #endif
3067
3068 return kill (lwpid, signo);
3069 }
3070
3071 void
3072 linux_stop_lwp (struct lwp_info *lwp)
3073 {
3074 send_sigstop (lwp);
3075 }
3076
3077 static void
3078 send_sigstop (struct lwp_info *lwp)
3079 {
3080 int pid;
3081
3082 pid = lwpid_of (get_lwp_thread (lwp));
3083
3084 /* If we already have a pending stop signal for this process, don't
3085 send another. */
3086 if (lwp->stop_expected)
3087 {
3088 if (debug_threads)
3089 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3090
3091 return;
3092 }
3093
3094 if (debug_threads)
3095 debug_printf ("Sending sigstop to lwp %d\n", pid);
3096
3097 lwp->stop_expected = 1;
3098 kill_lwp (pid, SIGSTOP);
3099 }
3100
3101 static int
3102 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3103 {
3104 struct thread_info *thread = (struct thread_info *) entry;
3105 struct lwp_info *lwp = get_thread_lwp (thread);
3106
3107 /* Ignore EXCEPT. */
3108 if (lwp == except)
3109 return 0;
3110
3111 if (lwp->stopped)
3112 return 0;
3113
3114 send_sigstop (lwp);
3115 return 0;
3116 }
3117
3118 /* Increment the suspend count of an LWP, and stop it, if not stopped
3119 yet. */
3120 static int
3121 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3122 void *except)
3123 {
3124 struct thread_info *thread = (struct thread_info *) entry;
3125 struct lwp_info *lwp = get_thread_lwp (thread);
3126
3127 /* Ignore EXCEPT. */
3128 if (lwp == except)
3129 return 0;
3130
3131 lwp->suspended++;
3132
3133 return send_sigstop_callback (entry, except);
3134 }
3135
3136 static void
3137 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3138 {
3139 /* It's dead, really. */
3140 lwp->dead = 1;
3141
3142 /* Store the exit status for later. */
3143 lwp->status_pending_p = 1;
3144 lwp->status_pending = wstat;
3145
3146 /* Prevent trying to stop it. */
3147 lwp->stopped = 1;
3148
3149 /* No further stops are expected from a dead lwp. */
3150 lwp->stop_expected = 0;
3151 }
3152
3153 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3154
3155 static void
3156 wait_for_sigstop (void)
3157 {
3158 struct thread_info *saved_thread;
3159 ptid_t saved_tid;
3160 int wstat;
3161 int ret;
3162
3163 saved_thread = current_thread;
3164 if (saved_thread != NULL)
3165 saved_tid = saved_thread->entry.id;
3166 else
3167 saved_tid = null_ptid; /* avoid bogus unused warning */
3168
3169 if (debug_threads)
3170 debug_printf ("wait_for_sigstop: pulling events\n");
3171
3172 /* Passing NULL_PTID as filter indicates we want all events to be
3173 left pending. Eventually this returns when there are no
3174 unwaited-for children left. */
3175 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3176 &wstat, __WALL);
3177 gdb_assert (ret == -1);
3178
3179 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3180 current_thread = saved_thread;
3181 else
3182 {
3183 if (debug_threads)
3184 debug_printf ("Previously current thread died.\n");
3185
3186 if (non_stop)
3187 {
3188 /* We can't change the current inferior behind GDB's back,
3189 otherwise, a subsequent command may apply to the wrong
3190 process. */
3191 current_thread = NULL;
3192 }
3193 else
3194 {
3195 /* Set a valid thread as current. */
3196 set_desired_thread (0);
3197 }
3198 }
3199 }
3200
3201 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3202 move it out, because we need to report the stop event to GDB. For
3203 example, if the user puts a breakpoint in the jump pad, it's
3204 because she wants to debug it. */
3205
3206 static int
3207 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3208 {
3209 struct thread_info *thread = (struct thread_info *) entry;
3210 struct lwp_info *lwp = get_thread_lwp (thread);
3211
3212 gdb_assert (lwp->suspended == 0);
3213 gdb_assert (lwp->stopped);
3214
3215 /* Allow debugging the jump pad, gdb_collect, etc.. */
3216 return (supports_fast_tracepoints ()
3217 && agent_loaded_p ()
3218 && (gdb_breakpoint_here (lwp->stop_pc)
3219 || lwp->stop_reason == LWP_STOPPED_BY_WATCHPOINT
3220 || thread->last_resume_kind == resume_step)
3221 && linux_fast_tracepoint_collecting (lwp, NULL));
3222 }
3223
3224 static void
3225 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3226 {
3227 struct thread_info *thread = (struct thread_info *) entry;
3228 struct lwp_info *lwp = get_thread_lwp (thread);
3229 int *wstat;
3230
3231 gdb_assert (lwp->suspended == 0);
3232 gdb_assert (lwp->stopped);
3233
3234 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3235
3236 /* Allow debugging the jump pad, gdb_collect, etc. */
3237 if (!gdb_breakpoint_here (lwp->stop_pc)
3238 && lwp->stop_reason != LWP_STOPPED_BY_WATCHPOINT
3239 && thread->last_resume_kind != resume_step
3240 && maybe_move_out_of_jump_pad (lwp, wstat))
3241 {
3242 if (debug_threads)
3243 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3244 lwpid_of (thread));
3245
3246 if (wstat)
3247 {
3248 lwp->status_pending_p = 0;
3249 enqueue_one_deferred_signal (lwp, wstat);
3250
3251 if (debug_threads)
3252 debug_printf ("Signal %d for LWP %ld deferred "
3253 "(in jump pad)\n",
3254 WSTOPSIG (*wstat), lwpid_of (thread));
3255 }
3256
3257 linux_resume_one_lwp (lwp, 0, 0, NULL);
3258 }
3259 else
3260 lwp->suspended++;
3261 }
3262
3263 static int
3264 lwp_running (struct inferior_list_entry *entry, void *data)
3265 {
3266 struct thread_info *thread = (struct thread_info *) entry;
3267 struct lwp_info *lwp = get_thread_lwp (thread);
3268
3269 if (lwp->dead)
3270 return 0;
3271 if (lwp->stopped)
3272 return 0;
3273 return 1;
3274 }
3275
3276 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3277 If SUSPEND, then also increase the suspend count of every LWP,
3278 except EXCEPT. */
3279
3280 static void
3281 stop_all_lwps (int suspend, struct lwp_info *except)
3282 {
3283 /* Should not be called recursively. */
3284 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3285
3286 if (debug_threads)
3287 {
3288 debug_enter ();
3289 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3290 suspend ? "stop-and-suspend" : "stop",
3291 except != NULL
3292 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3293 : "none");
3294 }
3295
3296 stopping_threads = (suspend
3297 ? STOPPING_AND_SUSPENDING_THREADS
3298 : STOPPING_THREADS);
3299
3300 if (suspend)
3301 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3302 else
3303 find_inferior (&all_threads, send_sigstop_callback, except);
3304 wait_for_sigstop ();
3305 stopping_threads = NOT_STOPPING_THREADS;
3306
3307 if (debug_threads)
3308 {
3309 debug_printf ("stop_all_lwps done, setting stopping_threads "
3310 "back to !stopping\n");
3311 debug_exit ();
3312 }
3313 }
3314
3315 /* Resume execution of the inferior process.
3316 If STEP is nonzero, single-step it.
3317 If SIGNAL is nonzero, give it that signal. */
3318
3319 static void
3320 linux_resume_one_lwp (struct lwp_info *lwp,
3321 int step, int signal, siginfo_t *info)
3322 {
3323 struct thread_info *thread = get_lwp_thread (lwp);
3324 struct thread_info *saved_thread;
3325 int fast_tp_collecting;
3326
3327 if (lwp->stopped == 0)
3328 return;
3329
3330 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3331
3332 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3333
3334 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3335 user used the "jump" command, or "set $pc = foo"). */
3336 if (lwp->stop_pc != get_pc (lwp))
3337 {
3338 /* Collecting 'while-stepping' actions doesn't make sense
3339 anymore. */
3340 release_while_stepping_state_list (thread);
3341 }
3342
3343 /* If we have pending signals or status, and a new signal, enqueue the
3344 signal. Also enqueue the signal if we are waiting to reinsert a
3345 breakpoint; it will be picked up again below. */
3346 if (signal != 0
3347 && (lwp->status_pending_p
3348 || lwp->pending_signals != NULL
3349 || lwp->bp_reinsert != 0
3350 || fast_tp_collecting))
3351 {
3352 struct pending_signals *p_sig;
3353 p_sig = xmalloc (sizeof (*p_sig));
3354 p_sig->prev = lwp->pending_signals;
3355 p_sig->signal = signal;
3356 if (info == NULL)
3357 memset (&p_sig->info, 0, sizeof (siginfo_t));
3358 else
3359 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3360 lwp->pending_signals = p_sig;
3361 }
3362
3363 if (lwp->status_pending_p)
3364 {
3365 if (debug_threads)
3366 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3367 " has pending status\n",
3368 lwpid_of (thread), step ? "step" : "continue", signal,
3369 lwp->stop_expected ? "expected" : "not expected");
3370 return;
3371 }
3372
3373 saved_thread = current_thread;
3374 current_thread = thread;
3375
3376 if (debug_threads)
3377 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3378 lwpid_of (thread), step ? "step" : "continue", signal,
3379 lwp->stop_expected ? "expected" : "not expected");
3380
3381 /* This bit needs some thinking about. If we get a signal that
3382 we must report while a single-step reinsert is still pending,
3383 we often end up resuming the thread. It might be better to
3384 (ew) allow a stack of pending events; then we could be sure that
3385 the reinsert happened right away and not lose any signals.
3386
3387 Making this stack would also shrink the window in which breakpoints are
3388 uninserted (see comment in linux_wait_for_lwp) but not enough for
3389 complete correctness, so it won't solve that problem. It may be
3390 worthwhile just to solve this one, however. */
3391 if (lwp->bp_reinsert != 0)
3392 {
3393 if (debug_threads)
3394 debug_printf (" pending reinsert at 0x%s\n",
3395 paddress (lwp->bp_reinsert));
3396
3397 if (can_hardware_single_step ())
3398 {
3399 if (fast_tp_collecting == 0)
3400 {
3401 if (step == 0)
3402 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3403 if (lwp->suspended)
3404 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3405 lwp->suspended);
3406 }
3407
3408 step = 1;
3409 }
3410
3411 /* Postpone any pending signal. It was enqueued above. */
3412 signal = 0;
3413 }
3414
3415 if (fast_tp_collecting == 1)
3416 {
3417 if (debug_threads)
3418 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3419 " (exit-jump-pad-bkpt)\n",
3420 lwpid_of (thread));
3421
3422 /* Postpone any pending signal. It was enqueued above. */
3423 signal = 0;
3424 }
3425 else if (fast_tp_collecting == 2)
3426 {
3427 if (debug_threads)
3428 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3429 " single-stepping\n",
3430 lwpid_of (thread));
3431
3432 if (can_hardware_single_step ())
3433 step = 1;
3434 else
3435 {
3436 internal_error (__FILE__, __LINE__,
3437 "moving out of jump pad single-stepping"
3438 " not implemented on this target");
3439 }
3440
3441 /* Postpone any pending signal. It was enqueued above. */
3442 signal = 0;
3443 }
3444
3445 /* If we have while-stepping actions in this thread set it stepping.
3446 If we have a signal to deliver, it may or may not be set to
3447 SIG_IGN, we don't know. Assume so, and allow collecting
3448 while-stepping into a signal handler. A possible smart thing to
3449 do would be to set an internal breakpoint at the signal return
3450 address, continue, and carry on catching this while-stepping
3451 action only when that breakpoint is hit. A future
3452 enhancement. */
3453 if (thread->while_stepping != NULL
3454 && can_hardware_single_step ())
3455 {
3456 if (debug_threads)
3457 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3458 lwpid_of (thread));
3459 step = 1;
3460 }
3461
3462 if (the_low_target.get_pc != NULL)
3463 {
3464 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3465
3466 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3467
3468 if (debug_threads)
3469 {
3470 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3471 (long) lwp->stop_pc);
3472 }
3473 }
3474
3475 /* If we have pending signals, consume one unless we are trying to
3476 reinsert a breakpoint or we're trying to finish a fast tracepoint
3477 collect. */
3478 if (lwp->pending_signals != NULL
3479 && lwp->bp_reinsert == 0
3480 && fast_tp_collecting == 0)
3481 {
3482 struct pending_signals **p_sig;
3483
3484 p_sig = &lwp->pending_signals;
3485 while ((*p_sig)->prev != NULL)
3486 p_sig = &(*p_sig)->prev;
3487
3488 signal = (*p_sig)->signal;
3489 if ((*p_sig)->info.si_signo != 0)
3490 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3491 &(*p_sig)->info);
3492
3493 free (*p_sig);
3494 *p_sig = NULL;
3495 }
3496
3497 if (the_low_target.prepare_to_resume != NULL)
3498 the_low_target.prepare_to_resume (lwp);
3499
3500 regcache_invalidate_thread (thread);
3501 errno = 0;
3502 lwp->stopped = 0;
3503 lwp->stop_reason = LWP_STOPPED_BY_NO_REASON;
3504 lwp->stepping = step;
3505 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3506 (PTRACE_TYPE_ARG3) 0,
3507 /* Coerce to a uintptr_t first to avoid potential gcc warning
3508 of coercing an 8 byte integer to a 4 byte pointer. */
3509 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3510
3511 current_thread = saved_thread;
3512 if (errno)
3513 {
3514 /* ESRCH from ptrace either means that the thread was already
3515 running (an error) or that it is gone (a race condition). If
3516 it's gone, we will get a notification the next time we wait,
3517 so we can ignore the error. We could differentiate these
3518 two, but it's tricky without waiting; the thread still exists
3519 as a zombie, so sending it signal 0 would succeed. So just
3520 ignore ESRCH. */
3521 if (errno == ESRCH)
3522 return;
3523
3524 perror_with_name ("ptrace");
3525 }
3526 }
3527
3528 struct thread_resume_array
3529 {
3530 struct thread_resume *resume;
3531 size_t n;
3532 };
3533
3534 /* This function is called once per thread via find_inferior.
3535 ARG is a pointer to a thread_resume_array struct.
3536 We look up the thread specified by ENTRY in ARG, and mark the thread
3537 with a pointer to the appropriate resume request.
3538
3539 This algorithm is O(threads * resume elements), but resume elements
3540 is small (and will remain small at least until GDB supports thread
3541 suspension). */
3542
3543 static int
3544 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3545 {
3546 struct thread_info *thread = (struct thread_info *) entry;
3547 struct lwp_info *lwp = get_thread_lwp (thread);
3548 int ndx;
3549 struct thread_resume_array *r;
3550
3551 r = arg;
3552
3553 for (ndx = 0; ndx < r->n; ndx++)
3554 {
3555 ptid_t ptid = r->resume[ndx].thread;
3556 if (ptid_equal (ptid, minus_one_ptid)
3557 || ptid_equal (ptid, entry->id)
3558 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3559 of PID'. */
3560 || (ptid_get_pid (ptid) == pid_of (thread)
3561 && (ptid_is_pid (ptid)
3562 || ptid_get_lwp (ptid) == -1)))
3563 {
3564 if (r->resume[ndx].kind == resume_stop
3565 && thread->last_resume_kind == resume_stop)
3566 {
3567 if (debug_threads)
3568 debug_printf ("already %s LWP %ld at GDB's request\n",
3569 (thread->last_status.kind
3570 == TARGET_WAITKIND_STOPPED)
3571 ? "stopped"
3572 : "stopping",
3573 lwpid_of (thread));
3574
3575 continue;
3576 }
3577
3578 lwp->resume = &r->resume[ndx];
3579 thread->last_resume_kind = lwp->resume->kind;
3580
3581 lwp->step_range_start = lwp->resume->step_range_start;
3582 lwp->step_range_end = lwp->resume->step_range_end;
3583
3584 /* If we had a deferred signal to report, dequeue one now.
3585 This can happen if LWP gets more than one signal while
3586 trying to get out of a jump pad. */
3587 if (lwp->stopped
3588 && !lwp->status_pending_p
3589 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3590 {
3591 lwp->status_pending_p = 1;
3592
3593 if (debug_threads)
3594 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3595 "leaving status pending.\n",
3596 WSTOPSIG (lwp->status_pending),
3597 lwpid_of (thread));
3598 }
3599
3600 return 0;
3601 }
3602 }
3603
3604 /* No resume action for this thread. */
3605 lwp->resume = NULL;
3606
3607 return 0;
3608 }
3609
3610 /* find_inferior callback for linux_resume.
3611 Set *FLAG_P if this lwp has an interesting status pending. */
3612
3613 static int
3614 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3615 {
3616 struct thread_info *thread = (struct thread_info *) entry;
3617 struct lwp_info *lwp = get_thread_lwp (thread);
3618
3619 /* LWPs which will not be resumed are not interesting, because
3620 we might not wait for them next time through linux_wait. */
3621 if (lwp->resume == NULL)
3622 return 0;
3623
3624 if (thread_still_has_status_pending_p (thread))
3625 * (int *) flag_p = 1;
3626
3627 return 0;
3628 }
3629
3630 /* Return 1 if this lwp that GDB wants running is stopped at an
3631 internal breakpoint that we need to step over. It assumes that any
3632 required STOP_PC adjustment has already been propagated to the
3633 inferior's regcache. */
3634
3635 static int
3636 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3637 {
3638 struct thread_info *thread = (struct thread_info *) entry;
3639 struct lwp_info *lwp = get_thread_lwp (thread);
3640 struct thread_info *saved_thread;
3641 CORE_ADDR pc;
3642
3643 /* LWPs which will not be resumed are not interesting, because we
3644 might not wait for them next time through linux_wait. */
3645
3646 if (!lwp->stopped)
3647 {
3648 if (debug_threads)
3649 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3650 lwpid_of (thread));
3651 return 0;
3652 }
3653
3654 if (thread->last_resume_kind == resume_stop)
3655 {
3656 if (debug_threads)
3657 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3658 " stopped\n",
3659 lwpid_of (thread));
3660 return 0;
3661 }
3662
3663 gdb_assert (lwp->suspended >= 0);
3664
3665 if (lwp->suspended)
3666 {
3667 if (debug_threads)
3668 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3669 lwpid_of (thread));
3670 return 0;
3671 }
3672
3673 if (!lwp->need_step_over)
3674 {
3675 if (debug_threads)
3676 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
3677 }
3678
3679 if (lwp->status_pending_p)
3680 {
3681 if (debug_threads)
3682 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3683 " status.\n",
3684 lwpid_of (thread));
3685 return 0;
3686 }
3687
3688 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3689 or we have. */
3690 pc = get_pc (lwp);
3691
3692 /* If the PC has changed since we stopped, then don't do anything,
3693 and let the breakpoint/tracepoint be hit. This happens if, for
3694 instance, GDB handled the decr_pc_after_break subtraction itself,
3695 GDB is OOL stepping this thread, or the user has issued a "jump"
3696 command, or poked thread's registers herself. */
3697 if (pc != lwp->stop_pc)
3698 {
3699 if (debug_threads)
3700 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3701 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3702 lwpid_of (thread),
3703 paddress (lwp->stop_pc), paddress (pc));
3704
3705 lwp->need_step_over = 0;
3706 return 0;
3707 }
3708
3709 saved_thread = current_thread;
3710 current_thread = thread;
3711
3712 /* We can only step over breakpoints we know about. */
3713 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3714 {
3715 /* Don't step over a breakpoint that GDB expects to hit
3716 though. If the condition is being evaluated on the target's side
3717 and it evaluate to false, step over this breakpoint as well. */
3718 if (gdb_breakpoint_here (pc)
3719 && gdb_condition_true_at_breakpoint (pc)
3720 && gdb_no_commands_at_breakpoint (pc))
3721 {
3722 if (debug_threads)
3723 debug_printf ("Need step over [LWP %ld]? yes, but found"
3724 " GDB breakpoint at 0x%s; skipping step over\n",
3725 lwpid_of (thread), paddress (pc));
3726
3727 current_thread = saved_thread;
3728 return 0;
3729 }
3730 else
3731 {
3732 if (debug_threads)
3733 debug_printf ("Need step over [LWP %ld]? yes, "
3734 "found breakpoint at 0x%s\n",
3735 lwpid_of (thread), paddress (pc));
3736
3737 /* We've found an lwp that needs stepping over --- return 1 so
3738 that find_inferior stops looking. */
3739 current_thread = saved_thread;
3740
3741 /* If the step over is cancelled, this is set again. */
3742 lwp->need_step_over = 0;
3743 return 1;
3744 }
3745 }
3746
3747 current_thread = saved_thread;
3748
3749 if (debug_threads)
3750 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3751 " at 0x%s\n",
3752 lwpid_of (thread), paddress (pc));
3753
3754 return 0;
3755 }
3756
3757 /* Start a step-over operation on LWP. When LWP stopped at a
3758 breakpoint, to make progress, we need to remove the breakpoint out
3759 of the way. If we let other threads run while we do that, they may
3760 pass by the breakpoint location and miss hitting it. To avoid
3761 that, a step-over momentarily stops all threads while LWP is
3762 single-stepped while the breakpoint is temporarily uninserted from
3763 the inferior. When the single-step finishes, we reinsert the
3764 breakpoint, and let all threads that are supposed to be running,
3765 run again.
3766
3767 On targets that don't support hardware single-step, we don't
3768 currently support full software single-stepping. Instead, we only
3769 support stepping over the thread event breakpoint, by asking the
3770 low target where to place a reinsert breakpoint. Since this
3771 routine assumes the breakpoint being stepped over is a thread event
3772 breakpoint, it usually assumes the return address of the current
3773 function is a good enough place to set the reinsert breakpoint. */
3774
3775 static int
3776 start_step_over (struct lwp_info *lwp)
3777 {
3778 struct thread_info *thread = get_lwp_thread (lwp);
3779 struct thread_info *saved_thread;
3780 CORE_ADDR pc;
3781 int step;
3782
3783 if (debug_threads)
3784 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3785 lwpid_of (thread));
3786
3787 stop_all_lwps (1, lwp);
3788 gdb_assert (lwp->suspended == 0);
3789
3790 if (debug_threads)
3791 debug_printf ("Done stopping all threads for step-over.\n");
3792
3793 /* Note, we should always reach here with an already adjusted PC,
3794 either by GDB (if we're resuming due to GDB's request), or by our
3795 caller, if we just finished handling an internal breakpoint GDB
3796 shouldn't care about. */
3797 pc = get_pc (lwp);
3798
3799 saved_thread = current_thread;
3800 current_thread = thread;
3801
3802 lwp->bp_reinsert = pc;
3803 uninsert_breakpoints_at (pc);
3804 uninsert_fast_tracepoint_jumps_at (pc);
3805
3806 if (can_hardware_single_step ())
3807 {
3808 step = 1;
3809 }
3810 else
3811 {
3812 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3813 set_reinsert_breakpoint (raddr);
3814 step = 0;
3815 }
3816
3817 current_thread = saved_thread;
3818
3819 linux_resume_one_lwp (lwp, step, 0, NULL);
3820
3821 /* Require next event from this LWP. */
3822 step_over_bkpt = thread->entry.id;
3823 return 1;
3824 }
3825
3826 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3827 start_step_over, if still there, and delete any reinsert
3828 breakpoints we've set, on non hardware single-step targets. */
3829
3830 static int
3831 finish_step_over (struct lwp_info *lwp)
3832 {
3833 if (lwp->bp_reinsert != 0)
3834 {
3835 if (debug_threads)
3836 debug_printf ("Finished step over.\n");
3837
3838 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3839 may be no breakpoint to reinsert there by now. */
3840 reinsert_breakpoints_at (lwp->bp_reinsert);
3841 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3842
3843 lwp->bp_reinsert = 0;
3844
3845 /* Delete any software-single-step reinsert breakpoints. No
3846 longer needed. We don't have to worry about other threads
3847 hitting this trap, and later not being able to explain it,
3848 because we were stepping over a breakpoint, and we hold all
3849 threads but LWP stopped while doing that. */
3850 if (!can_hardware_single_step ())
3851 delete_reinsert_breakpoints ();
3852
3853 step_over_bkpt = null_ptid;
3854 return 1;
3855 }
3856 else
3857 return 0;
3858 }
3859
3860 /* This function is called once per thread. We check the thread's resume
3861 request, which will tell us whether to resume, step, or leave the thread
3862 stopped; and what signal, if any, it should be sent.
3863
3864 For threads which we aren't explicitly told otherwise, we preserve
3865 the stepping flag; this is used for stepping over gdbserver-placed
3866 breakpoints.
3867
3868 If pending_flags was set in any thread, we queue any needed
3869 signals, since we won't actually resume. We already have a pending
3870 event to report, so we don't need to preserve any step requests;
3871 they should be re-issued if necessary. */
3872
3873 static int
3874 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3875 {
3876 struct thread_info *thread = (struct thread_info *) entry;
3877 struct lwp_info *lwp = get_thread_lwp (thread);
3878 int step;
3879 int leave_all_stopped = * (int *) arg;
3880 int leave_pending;
3881
3882 if (lwp->resume == NULL)
3883 return 0;
3884
3885 if (lwp->resume->kind == resume_stop)
3886 {
3887 if (debug_threads)
3888 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
3889
3890 if (!lwp->stopped)
3891 {
3892 if (debug_threads)
3893 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
3894
3895 /* Stop the thread, and wait for the event asynchronously,
3896 through the event loop. */
3897 send_sigstop (lwp);
3898 }
3899 else
3900 {
3901 if (debug_threads)
3902 debug_printf ("already stopped LWP %ld\n",
3903 lwpid_of (thread));
3904
3905 /* The LWP may have been stopped in an internal event that
3906 was not meant to be notified back to GDB (e.g., gdbserver
3907 breakpoint), so we should be reporting a stop event in
3908 this case too. */
3909
3910 /* If the thread already has a pending SIGSTOP, this is a
3911 no-op. Otherwise, something later will presumably resume
3912 the thread and this will cause it to cancel any pending
3913 operation, due to last_resume_kind == resume_stop. If
3914 the thread already has a pending status to report, we
3915 will still report it the next time we wait - see
3916 status_pending_p_callback. */
3917
3918 /* If we already have a pending signal to report, then
3919 there's no need to queue a SIGSTOP, as this means we're
3920 midway through moving the LWP out of the jumppad, and we
3921 will report the pending signal as soon as that is
3922 finished. */
3923 if (lwp->pending_signals_to_report == NULL)
3924 send_sigstop (lwp);
3925 }
3926
3927 /* For stop requests, we're done. */
3928 lwp->resume = NULL;
3929 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3930 return 0;
3931 }
3932
3933 /* If this thread which is about to be resumed has a pending status,
3934 then don't resume any threads - we can just report the pending
3935 status. Make sure to queue any signals that would otherwise be
3936 sent. In all-stop mode, we do this decision based on if *any*
3937 thread has a pending status. If there's a thread that needs the
3938 step-over-breakpoint dance, then don't resume any other thread
3939 but that particular one. */
3940 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3941
3942 if (!leave_pending)
3943 {
3944 if (debug_threads)
3945 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
3946
3947 step = (lwp->resume->kind == resume_step);
3948 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3949 }
3950 else
3951 {
3952 if (debug_threads)
3953 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
3954
3955 /* If we have a new signal, enqueue the signal. */
3956 if (lwp->resume->sig != 0)
3957 {
3958 struct pending_signals *p_sig;
3959 p_sig = xmalloc (sizeof (*p_sig));
3960 p_sig->prev = lwp->pending_signals;
3961 p_sig->signal = lwp->resume->sig;
3962 memset (&p_sig->info, 0, sizeof (siginfo_t));
3963
3964 /* If this is the same signal we were previously stopped by,
3965 make sure to queue its siginfo. We can ignore the return
3966 value of ptrace; if it fails, we'll skip
3967 PTRACE_SETSIGINFO. */
3968 if (WIFSTOPPED (lwp->last_status)
3969 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3970 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3971 &p_sig->info);
3972
3973 lwp->pending_signals = p_sig;
3974 }
3975 }
3976
3977 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3978 lwp->resume = NULL;
3979 return 0;
3980 }
3981
3982 static void
3983 linux_resume (struct thread_resume *resume_info, size_t n)
3984 {
3985 struct thread_resume_array array = { resume_info, n };
3986 struct thread_info *need_step_over = NULL;
3987 int any_pending;
3988 int leave_all_stopped;
3989
3990 if (debug_threads)
3991 {
3992 debug_enter ();
3993 debug_printf ("linux_resume:\n");
3994 }
3995
3996 find_inferior (&all_threads, linux_set_resume_request, &array);
3997
3998 /* If there is a thread which would otherwise be resumed, which has
3999 a pending status, then don't resume any threads - we can just
4000 report the pending status. Make sure to queue any signals that
4001 would otherwise be sent. In non-stop mode, we'll apply this
4002 logic to each thread individually. We consume all pending events
4003 before considering to start a step-over (in all-stop). */
4004 any_pending = 0;
4005 if (!non_stop)
4006 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4007
4008 /* If there is a thread which would otherwise be resumed, which is
4009 stopped at a breakpoint that needs stepping over, then don't
4010 resume any threads - have it step over the breakpoint with all
4011 other threads stopped, then resume all threads again. Make sure
4012 to queue any signals that would otherwise be delivered or
4013 queued. */
4014 if (!any_pending && supports_breakpoints ())
4015 need_step_over
4016 = (struct thread_info *) find_inferior (&all_threads,
4017 need_step_over_p, NULL);
4018
4019 leave_all_stopped = (need_step_over != NULL || any_pending);
4020
4021 if (debug_threads)
4022 {
4023 if (need_step_over != NULL)
4024 debug_printf ("Not resuming all, need step over\n");
4025 else if (any_pending)
4026 debug_printf ("Not resuming, all-stop and found "
4027 "an LWP with pending status\n");
4028 else
4029 debug_printf ("Resuming, no pending status or step over needed\n");
4030 }
4031
4032 /* Even if we're leaving threads stopped, queue all signals we'd
4033 otherwise deliver. */
4034 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4035
4036 if (need_step_over)
4037 start_step_over (get_thread_lwp (need_step_over));
4038
4039 if (debug_threads)
4040 {
4041 debug_printf ("linux_resume done\n");
4042 debug_exit ();
4043 }
4044 }
4045
4046 /* This function is called once per thread. We check the thread's
4047 last resume request, which will tell us whether to resume, step, or
4048 leave the thread stopped. Any signal the client requested to be
4049 delivered has already been enqueued at this point.
4050
4051 If any thread that GDB wants running is stopped at an internal
4052 breakpoint that needs stepping over, we start a step-over operation
4053 on that particular thread, and leave all others stopped. */
4054
4055 static int
4056 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4057 {
4058 struct thread_info *thread = (struct thread_info *) entry;
4059 struct lwp_info *lwp = get_thread_lwp (thread);
4060 int step;
4061
4062 if (lwp == except)
4063 return 0;
4064
4065 if (debug_threads)
4066 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4067
4068 if (!lwp->stopped)
4069 {
4070 if (debug_threads)
4071 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4072 return 0;
4073 }
4074
4075 if (thread->last_resume_kind == resume_stop
4076 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4077 {
4078 if (debug_threads)
4079 debug_printf (" client wants LWP to remain %ld stopped\n",
4080 lwpid_of (thread));
4081 return 0;
4082 }
4083
4084 if (lwp->status_pending_p)
4085 {
4086 if (debug_threads)
4087 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4088 lwpid_of (thread));
4089 return 0;
4090 }
4091
4092 gdb_assert (lwp->suspended >= 0);
4093
4094 if (lwp->suspended)
4095 {
4096 if (debug_threads)
4097 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4098 return 0;
4099 }
4100
4101 if (thread->last_resume_kind == resume_stop
4102 && lwp->pending_signals_to_report == NULL
4103 && lwp->collecting_fast_tracepoint == 0)
4104 {
4105 /* We haven't reported this LWP as stopped yet (otherwise, the
4106 last_status.kind check above would catch it, and we wouldn't
4107 reach here. This LWP may have been momentarily paused by a
4108 stop_all_lwps call while handling for example, another LWP's
4109 step-over. In that case, the pending expected SIGSTOP signal
4110 that was queued at vCont;t handling time will have already
4111 been consumed by wait_for_sigstop, and so we need to requeue
4112 another one here. Note that if the LWP already has a SIGSTOP
4113 pending, this is a no-op. */
4114
4115 if (debug_threads)
4116 debug_printf ("Client wants LWP %ld to stop. "
4117 "Making sure it has a SIGSTOP pending\n",
4118 lwpid_of (thread));
4119
4120 send_sigstop (lwp);
4121 }
4122
4123 step = thread->last_resume_kind == resume_step;
4124 linux_resume_one_lwp (lwp, step, 0, NULL);
4125 return 0;
4126 }
4127
4128 static int
4129 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4130 {
4131 struct thread_info *thread = (struct thread_info *) entry;
4132 struct lwp_info *lwp = get_thread_lwp (thread);
4133
4134 if (lwp == except)
4135 return 0;
4136
4137 lwp->suspended--;
4138 gdb_assert (lwp->suspended >= 0);
4139
4140 return proceed_one_lwp (entry, except);
4141 }
4142
4143 /* When we finish a step-over, set threads running again. If there's
4144 another thread that may need a step-over, now's the time to start
4145 it. Eventually, we'll move all threads past their breakpoints. */
4146
4147 static void
4148 proceed_all_lwps (void)
4149 {
4150 struct thread_info *need_step_over;
4151
4152 /* If there is a thread which would otherwise be resumed, which is
4153 stopped at a breakpoint that needs stepping over, then don't
4154 resume any threads - have it step over the breakpoint with all
4155 other threads stopped, then resume all threads again. */
4156
4157 if (supports_breakpoints ())
4158 {
4159 need_step_over
4160 = (struct thread_info *) find_inferior (&all_threads,
4161 need_step_over_p, NULL);
4162
4163 if (need_step_over != NULL)
4164 {
4165 if (debug_threads)
4166 debug_printf ("proceed_all_lwps: found "
4167 "thread %ld needing a step-over\n",
4168 lwpid_of (need_step_over));
4169
4170 start_step_over (get_thread_lwp (need_step_over));
4171 return;
4172 }
4173 }
4174
4175 if (debug_threads)
4176 debug_printf ("Proceeding, no step-over needed\n");
4177
4178 find_inferior (&all_threads, proceed_one_lwp, NULL);
4179 }
4180
4181 /* Stopped LWPs that the client wanted to be running, that don't have
4182 pending statuses, are set to run again, except for EXCEPT, if not
4183 NULL. This undoes a stop_all_lwps call. */
4184
4185 static void
4186 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4187 {
4188 if (debug_threads)
4189 {
4190 debug_enter ();
4191 if (except)
4192 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4193 lwpid_of (get_lwp_thread (except)));
4194 else
4195 debug_printf ("unstopping all lwps\n");
4196 }
4197
4198 if (unsuspend)
4199 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4200 else
4201 find_inferior (&all_threads, proceed_one_lwp, except);
4202
4203 if (debug_threads)
4204 {
4205 debug_printf ("unstop_all_lwps done\n");
4206 debug_exit ();
4207 }
4208 }
4209
4210
4211 #ifdef HAVE_LINUX_REGSETS
4212
4213 #define use_linux_regsets 1
4214
4215 /* Returns true if REGSET has been disabled. */
4216
4217 static int
4218 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4219 {
4220 return (info->disabled_regsets != NULL
4221 && info->disabled_regsets[regset - info->regsets]);
4222 }
4223
4224 /* Disable REGSET. */
4225
4226 static void
4227 disable_regset (struct regsets_info *info, struct regset_info *regset)
4228 {
4229 int dr_offset;
4230
4231 dr_offset = regset - info->regsets;
4232 if (info->disabled_regsets == NULL)
4233 info->disabled_regsets = xcalloc (1, info->num_regsets);
4234 info->disabled_regsets[dr_offset] = 1;
4235 }
4236
4237 static int
4238 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4239 struct regcache *regcache)
4240 {
4241 struct regset_info *regset;
4242 int saw_general_regs = 0;
4243 int pid;
4244 struct iovec iov;
4245
4246 pid = lwpid_of (current_thread);
4247 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4248 {
4249 void *buf, *data;
4250 int nt_type, res;
4251
4252 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4253 continue;
4254
4255 buf = xmalloc (regset->size);
4256
4257 nt_type = regset->nt_type;
4258 if (nt_type)
4259 {
4260 iov.iov_base = buf;
4261 iov.iov_len = regset->size;
4262 data = (void *) &iov;
4263 }
4264 else
4265 data = buf;
4266
4267 #ifndef __sparc__
4268 res = ptrace (regset->get_request, pid,
4269 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4270 #else
4271 res = ptrace (regset->get_request, pid, data, nt_type);
4272 #endif
4273 if (res < 0)
4274 {
4275 if (errno == EIO)
4276 {
4277 /* If we get EIO on a regset, do not try it again for
4278 this process mode. */
4279 disable_regset (regsets_info, regset);
4280 }
4281 else if (errno == ENODATA)
4282 {
4283 /* ENODATA may be returned if the regset is currently
4284 not "active". This can happen in normal operation,
4285 so suppress the warning in this case. */
4286 }
4287 else
4288 {
4289 char s[256];
4290 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4291 pid);
4292 perror (s);
4293 }
4294 }
4295 else
4296 {
4297 if (regset->type == GENERAL_REGS)
4298 saw_general_regs = 1;
4299 regset->store_function (regcache, buf);
4300 }
4301 free (buf);
4302 }
4303 if (saw_general_regs)
4304 return 0;
4305 else
4306 return 1;
4307 }
4308
4309 static int
4310 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4311 struct regcache *regcache)
4312 {
4313 struct regset_info *regset;
4314 int saw_general_regs = 0;
4315 int pid;
4316 struct iovec iov;
4317
4318 pid = lwpid_of (current_thread);
4319 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4320 {
4321 void *buf, *data;
4322 int nt_type, res;
4323
4324 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4325 || regset->fill_function == NULL)
4326 continue;
4327
4328 buf = xmalloc (regset->size);
4329
4330 /* First fill the buffer with the current register set contents,
4331 in case there are any items in the kernel's regset that are
4332 not in gdbserver's regcache. */
4333
4334 nt_type = regset->nt_type;
4335 if (nt_type)
4336 {
4337 iov.iov_base = buf;
4338 iov.iov_len = regset->size;
4339 data = (void *) &iov;
4340 }
4341 else
4342 data = buf;
4343
4344 #ifndef __sparc__
4345 res = ptrace (regset->get_request, pid,
4346 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4347 #else
4348 res = ptrace (regset->get_request, pid, data, nt_type);
4349 #endif
4350
4351 if (res == 0)
4352 {
4353 /* Then overlay our cached registers on that. */
4354 regset->fill_function (regcache, buf);
4355
4356 /* Only now do we write the register set. */
4357 #ifndef __sparc__
4358 res = ptrace (regset->set_request, pid,
4359 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4360 #else
4361 res = ptrace (regset->set_request, pid, data, nt_type);
4362 #endif
4363 }
4364
4365 if (res < 0)
4366 {
4367 if (errno == EIO)
4368 {
4369 /* If we get EIO on a regset, do not try it again for
4370 this process mode. */
4371 disable_regset (regsets_info, regset);
4372 }
4373 else if (errno == ESRCH)
4374 {
4375 /* At this point, ESRCH should mean the process is
4376 already gone, in which case we simply ignore attempts
4377 to change its registers. See also the related
4378 comment in linux_resume_one_lwp. */
4379 free (buf);
4380 return 0;
4381 }
4382 else
4383 {
4384 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4385 }
4386 }
4387 else if (regset->type == GENERAL_REGS)
4388 saw_general_regs = 1;
4389 free (buf);
4390 }
4391 if (saw_general_regs)
4392 return 0;
4393 else
4394 return 1;
4395 }
4396
4397 #else /* !HAVE_LINUX_REGSETS */
4398
4399 #define use_linux_regsets 0
4400 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4401 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4402
4403 #endif
4404
4405 /* Return 1 if register REGNO is supported by one of the regset ptrace
4406 calls or 0 if it has to be transferred individually. */
4407
4408 static int
4409 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4410 {
4411 unsigned char mask = 1 << (regno % 8);
4412 size_t index = regno / 8;
4413
4414 return (use_linux_regsets
4415 && (regs_info->regset_bitmap == NULL
4416 || (regs_info->regset_bitmap[index] & mask) != 0));
4417 }
4418
4419 #ifdef HAVE_LINUX_USRREGS
4420
4421 int
4422 register_addr (const struct usrregs_info *usrregs, int regnum)
4423 {
4424 int addr;
4425
4426 if (regnum < 0 || regnum >= usrregs->num_regs)
4427 error ("Invalid register number %d.", regnum);
4428
4429 addr = usrregs->regmap[regnum];
4430
4431 return addr;
4432 }
4433
4434 /* Fetch one register. */
4435 static void
4436 fetch_register (const struct usrregs_info *usrregs,
4437 struct regcache *regcache, int regno)
4438 {
4439 CORE_ADDR regaddr;
4440 int i, size;
4441 char *buf;
4442 int pid;
4443
4444 if (regno >= usrregs->num_regs)
4445 return;
4446 if ((*the_low_target.cannot_fetch_register) (regno))
4447 return;
4448
4449 regaddr = register_addr (usrregs, regno);
4450 if (regaddr == -1)
4451 return;
4452
4453 size = ((register_size (regcache->tdesc, regno)
4454 + sizeof (PTRACE_XFER_TYPE) - 1)
4455 & -sizeof (PTRACE_XFER_TYPE));
4456 buf = alloca (size);
4457
4458 pid = lwpid_of (current_thread);
4459 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4460 {
4461 errno = 0;
4462 *(PTRACE_XFER_TYPE *) (buf + i) =
4463 ptrace (PTRACE_PEEKUSER, pid,
4464 /* Coerce to a uintptr_t first to avoid potential gcc warning
4465 of coercing an 8 byte integer to a 4 byte pointer. */
4466 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4467 regaddr += sizeof (PTRACE_XFER_TYPE);
4468 if (errno != 0)
4469 error ("reading register %d: %s", regno, strerror (errno));
4470 }
4471
4472 if (the_low_target.supply_ptrace_register)
4473 the_low_target.supply_ptrace_register (regcache, regno, buf);
4474 else
4475 supply_register (regcache, regno, buf);
4476 }
4477
4478 /* Store one register. */
4479 static void
4480 store_register (const struct usrregs_info *usrregs,
4481 struct regcache *regcache, int regno)
4482 {
4483 CORE_ADDR regaddr;
4484 int i, size;
4485 char *buf;
4486 int pid;
4487
4488 if (regno >= usrregs->num_regs)
4489 return;
4490 if ((*the_low_target.cannot_store_register) (regno))
4491 return;
4492
4493 regaddr = register_addr (usrregs, regno);
4494 if (regaddr == -1)
4495 return;
4496
4497 size = ((register_size (regcache->tdesc, regno)
4498 + sizeof (PTRACE_XFER_TYPE) - 1)
4499 & -sizeof (PTRACE_XFER_TYPE));
4500 buf = alloca (size);
4501 memset (buf, 0, size);
4502
4503 if (the_low_target.collect_ptrace_register)
4504 the_low_target.collect_ptrace_register (regcache, regno, buf);
4505 else
4506 collect_register (regcache, regno, buf);
4507
4508 pid = lwpid_of (current_thread);
4509 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4510 {
4511 errno = 0;
4512 ptrace (PTRACE_POKEUSER, pid,
4513 /* Coerce to a uintptr_t first to avoid potential gcc warning
4514 about coercing an 8 byte integer to a 4 byte pointer. */
4515 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4516 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4517 if (errno != 0)
4518 {
4519 /* At this point, ESRCH should mean the process is
4520 already gone, in which case we simply ignore attempts
4521 to change its registers. See also the related
4522 comment in linux_resume_one_lwp. */
4523 if (errno == ESRCH)
4524 return;
4525
4526 if ((*the_low_target.cannot_store_register) (regno) == 0)
4527 error ("writing register %d: %s", regno, strerror (errno));
4528 }
4529 regaddr += sizeof (PTRACE_XFER_TYPE);
4530 }
4531 }
4532
4533 /* Fetch all registers, or just one, from the child process.
4534 If REGNO is -1, do this for all registers, skipping any that are
4535 assumed to have been retrieved by regsets_fetch_inferior_registers,
4536 unless ALL is non-zero.
4537 Otherwise, REGNO specifies which register (so we can save time). */
4538 static void
4539 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4540 struct regcache *regcache, int regno, int all)
4541 {
4542 struct usrregs_info *usr = regs_info->usrregs;
4543
4544 if (regno == -1)
4545 {
4546 for (regno = 0; regno < usr->num_regs; regno++)
4547 if (all || !linux_register_in_regsets (regs_info, regno))
4548 fetch_register (usr, regcache, regno);
4549 }
4550 else
4551 fetch_register (usr, regcache, regno);
4552 }
4553
4554 /* Store our register values back into the inferior.
4555 If REGNO is -1, do this for all registers, skipping any that are
4556 assumed to have been saved by regsets_store_inferior_registers,
4557 unless ALL is non-zero.
4558 Otherwise, REGNO specifies which register (so we can save time). */
4559 static void
4560 usr_store_inferior_registers (const struct regs_info *regs_info,
4561 struct regcache *regcache, int regno, int all)
4562 {
4563 struct usrregs_info *usr = regs_info->usrregs;
4564
4565 if (regno == -1)
4566 {
4567 for (regno = 0; regno < usr->num_regs; regno++)
4568 if (all || !linux_register_in_regsets (regs_info, regno))
4569 store_register (usr, regcache, regno);
4570 }
4571 else
4572 store_register (usr, regcache, regno);
4573 }
4574
4575 #else /* !HAVE_LINUX_USRREGS */
4576
4577 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4578 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4579
4580 #endif
4581
4582
4583 void
4584 linux_fetch_registers (struct regcache *regcache, int regno)
4585 {
4586 int use_regsets;
4587 int all = 0;
4588 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4589
4590 if (regno == -1)
4591 {
4592 if (the_low_target.fetch_register != NULL
4593 && regs_info->usrregs != NULL)
4594 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4595 (*the_low_target.fetch_register) (regcache, regno);
4596
4597 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4598 if (regs_info->usrregs != NULL)
4599 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4600 }
4601 else
4602 {
4603 if (the_low_target.fetch_register != NULL
4604 && (*the_low_target.fetch_register) (regcache, regno))
4605 return;
4606
4607 use_regsets = linux_register_in_regsets (regs_info, regno);
4608 if (use_regsets)
4609 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4610 regcache);
4611 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4612 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4613 }
4614 }
4615
4616 void
4617 linux_store_registers (struct regcache *regcache, int regno)
4618 {
4619 int use_regsets;
4620 int all = 0;
4621 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4622
4623 if (regno == -1)
4624 {
4625 all = regsets_store_inferior_registers (regs_info->regsets_info,
4626 regcache);
4627 if (regs_info->usrregs != NULL)
4628 usr_store_inferior_registers (regs_info, regcache, regno, all);
4629 }
4630 else
4631 {
4632 use_regsets = linux_register_in_regsets (regs_info, regno);
4633 if (use_regsets)
4634 all = regsets_store_inferior_registers (regs_info->regsets_info,
4635 regcache);
4636 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4637 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4638 }
4639 }
4640
4641
4642 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4643 to debugger memory starting at MYADDR. */
4644
4645 static int
4646 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4647 {
4648 int pid = lwpid_of (current_thread);
4649 register PTRACE_XFER_TYPE *buffer;
4650 register CORE_ADDR addr;
4651 register int count;
4652 char filename[64];
4653 register int i;
4654 int ret;
4655 int fd;
4656
4657 /* Try using /proc. Don't bother for one word. */
4658 if (len >= 3 * sizeof (long))
4659 {
4660 int bytes;
4661
4662 /* We could keep this file open and cache it - possibly one per
4663 thread. That requires some juggling, but is even faster. */
4664 sprintf (filename, "/proc/%d/mem", pid);
4665 fd = open (filename, O_RDONLY | O_LARGEFILE);
4666 if (fd == -1)
4667 goto no_proc;
4668
4669 /* If pread64 is available, use it. It's faster if the kernel
4670 supports it (only one syscall), and it's 64-bit safe even on
4671 32-bit platforms (for instance, SPARC debugging a SPARC64
4672 application). */
4673 #ifdef HAVE_PREAD64
4674 bytes = pread64 (fd, myaddr, len, memaddr);
4675 #else
4676 bytes = -1;
4677 if (lseek (fd, memaddr, SEEK_SET) != -1)
4678 bytes = read (fd, myaddr, len);
4679 #endif
4680
4681 close (fd);
4682 if (bytes == len)
4683 return 0;
4684
4685 /* Some data was read, we'll try to get the rest with ptrace. */
4686 if (bytes > 0)
4687 {
4688 memaddr += bytes;
4689 myaddr += bytes;
4690 len -= bytes;
4691 }
4692 }
4693
4694 no_proc:
4695 /* Round starting address down to longword boundary. */
4696 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4697 /* Round ending address up; get number of longwords that makes. */
4698 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4699 / sizeof (PTRACE_XFER_TYPE));
4700 /* Allocate buffer of that many longwords. */
4701 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4702
4703 /* Read all the longwords */
4704 errno = 0;
4705 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4706 {
4707 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4708 about coercing an 8 byte integer to a 4 byte pointer. */
4709 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4710 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4711 (PTRACE_TYPE_ARG4) 0);
4712 if (errno)
4713 break;
4714 }
4715 ret = errno;
4716
4717 /* Copy appropriate bytes out of the buffer. */
4718 if (i > 0)
4719 {
4720 i *= sizeof (PTRACE_XFER_TYPE);
4721 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4722 memcpy (myaddr,
4723 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4724 i < len ? i : len);
4725 }
4726
4727 return ret;
4728 }
4729
4730 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4731 memory at MEMADDR. On failure (cannot write to the inferior)
4732 returns the value of errno. Always succeeds if LEN is zero. */
4733
4734 static int
4735 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4736 {
4737 register int i;
4738 /* Round starting address down to longword boundary. */
4739 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4740 /* Round ending address up; get number of longwords that makes. */
4741 register int count
4742 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4743 / sizeof (PTRACE_XFER_TYPE);
4744
4745 /* Allocate buffer of that many longwords. */
4746 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4747 alloca (count * sizeof (PTRACE_XFER_TYPE));
4748
4749 int pid = lwpid_of (current_thread);
4750
4751 if (len == 0)
4752 {
4753 /* Zero length write always succeeds. */
4754 return 0;
4755 }
4756
4757 if (debug_threads)
4758 {
4759 /* Dump up to four bytes. */
4760 unsigned int val = * (unsigned int *) myaddr;
4761 if (len == 1)
4762 val = val & 0xff;
4763 else if (len == 2)
4764 val = val & 0xffff;
4765 else if (len == 3)
4766 val = val & 0xffffff;
4767 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4768 val, (long)memaddr);
4769 }
4770
4771 /* Fill start and end extra bytes of buffer with existing memory data. */
4772
4773 errno = 0;
4774 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4775 about coercing an 8 byte integer to a 4 byte pointer. */
4776 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4777 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4778 (PTRACE_TYPE_ARG4) 0);
4779 if (errno)
4780 return errno;
4781
4782 if (count > 1)
4783 {
4784 errno = 0;
4785 buffer[count - 1]
4786 = ptrace (PTRACE_PEEKTEXT, pid,
4787 /* Coerce to a uintptr_t first to avoid potential gcc warning
4788 about coercing an 8 byte integer to a 4 byte pointer. */
4789 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4790 * sizeof (PTRACE_XFER_TYPE)),
4791 (PTRACE_TYPE_ARG4) 0);
4792 if (errno)
4793 return errno;
4794 }
4795
4796 /* Copy data to be written over corresponding part of buffer. */
4797
4798 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4799 myaddr, len);
4800
4801 /* Write the entire buffer. */
4802
4803 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4804 {
4805 errno = 0;
4806 ptrace (PTRACE_POKETEXT, pid,
4807 /* Coerce to a uintptr_t first to avoid potential gcc warning
4808 about coercing an 8 byte integer to a 4 byte pointer. */
4809 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4810 (PTRACE_TYPE_ARG4) buffer[i]);
4811 if (errno)
4812 return errno;
4813 }
4814
4815 return 0;
4816 }
4817
4818 static void
4819 linux_look_up_symbols (void)
4820 {
4821 #ifdef USE_THREAD_DB
4822 struct process_info *proc = current_process ();
4823
4824 if (proc->private->thread_db != NULL)
4825 return;
4826
4827 /* If the kernel supports tracing clones, then we don't need to
4828 use the magic thread event breakpoint to learn about
4829 threads. */
4830 thread_db_init (!linux_supports_traceclone ());
4831 #endif
4832 }
4833
4834 static void
4835 linux_request_interrupt (void)
4836 {
4837 extern unsigned long signal_pid;
4838
4839 /* Send a SIGINT to the process group. This acts just like the user
4840 typed a ^C on the controlling terminal. */
4841 kill (-signal_pid, SIGINT);
4842 }
4843
4844 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4845 to debugger memory starting at MYADDR. */
4846
4847 static int
4848 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4849 {
4850 char filename[PATH_MAX];
4851 int fd, n;
4852 int pid = lwpid_of (current_thread);
4853
4854 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4855
4856 fd = open (filename, O_RDONLY);
4857 if (fd < 0)
4858 return -1;
4859
4860 if (offset != (CORE_ADDR) 0
4861 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4862 n = -1;
4863 else
4864 n = read (fd, myaddr, len);
4865
4866 close (fd);
4867
4868 return n;
4869 }
4870
4871 /* These breakpoint and watchpoint related wrapper functions simply
4872 pass on the function call if the target has registered a
4873 corresponding function. */
4874
4875 static int
4876 linux_supports_z_point_type (char z_type)
4877 {
4878 return (the_low_target.supports_z_point_type != NULL
4879 && the_low_target.supports_z_point_type (z_type));
4880 }
4881
4882 static int
4883 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
4884 int size, struct raw_breakpoint *bp)
4885 {
4886 if (the_low_target.insert_point != NULL)
4887 return the_low_target.insert_point (type, addr, size, bp);
4888 else
4889 /* Unsupported (see target.h). */
4890 return 1;
4891 }
4892
4893 static int
4894 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
4895 int size, struct raw_breakpoint *bp)
4896 {
4897 if (the_low_target.remove_point != NULL)
4898 return the_low_target.remove_point (type, addr, size, bp);
4899 else
4900 /* Unsupported (see target.h). */
4901 return 1;
4902 }
4903
4904 static int
4905 linux_stopped_by_watchpoint (void)
4906 {
4907 struct lwp_info *lwp = get_thread_lwp (current_thread);
4908
4909 return lwp->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
4910 }
4911
4912 static CORE_ADDR
4913 linux_stopped_data_address (void)
4914 {
4915 struct lwp_info *lwp = get_thread_lwp (current_thread);
4916
4917 return lwp->stopped_data_address;
4918 }
4919
4920 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4921 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4922 && defined(PT_TEXT_END_ADDR)
4923
4924 /* This is only used for targets that define PT_TEXT_ADDR,
4925 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4926 the target has different ways of acquiring this information, like
4927 loadmaps. */
4928
4929 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4930 to tell gdb about. */
4931
4932 static int
4933 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4934 {
4935 unsigned long text, text_end, data;
4936 int pid = lwpid_of (get_thread_lwp (current_thread));
4937
4938 errno = 0;
4939
4940 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4941 (PTRACE_TYPE_ARG4) 0);
4942 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4943 (PTRACE_TYPE_ARG4) 0);
4944 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4945 (PTRACE_TYPE_ARG4) 0);
4946
4947 if (errno == 0)
4948 {
4949 /* Both text and data offsets produced at compile-time (and so
4950 used by gdb) are relative to the beginning of the program,
4951 with the data segment immediately following the text segment.
4952 However, the actual runtime layout in memory may put the data
4953 somewhere else, so when we send gdb a data base-address, we
4954 use the real data base address and subtract the compile-time
4955 data base-address from it (which is just the length of the
4956 text segment). BSS immediately follows data in both
4957 cases. */
4958 *text_p = text;
4959 *data_p = data - (text_end - text);
4960
4961 return 1;
4962 }
4963 return 0;
4964 }
4965 #endif
4966
4967 static int
4968 linux_qxfer_osdata (const char *annex,
4969 unsigned char *readbuf, unsigned const char *writebuf,
4970 CORE_ADDR offset, int len)
4971 {
4972 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4973 }
4974
4975 /* Convert a native/host siginfo object, into/from the siginfo in the
4976 layout of the inferiors' architecture. */
4977
4978 static void
4979 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4980 {
4981 int done = 0;
4982
4983 if (the_low_target.siginfo_fixup != NULL)
4984 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4985
4986 /* If there was no callback, or the callback didn't do anything,
4987 then just do a straight memcpy. */
4988 if (!done)
4989 {
4990 if (direction == 1)
4991 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4992 else
4993 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4994 }
4995 }
4996
4997 static int
4998 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4999 unsigned const char *writebuf, CORE_ADDR offset, int len)
5000 {
5001 int pid;
5002 siginfo_t siginfo;
5003 char inf_siginfo[sizeof (siginfo_t)];
5004
5005 if (current_thread == NULL)
5006 return -1;
5007
5008 pid = lwpid_of (current_thread);
5009
5010 if (debug_threads)
5011 debug_printf ("%s siginfo for lwp %d.\n",
5012 readbuf != NULL ? "Reading" : "Writing",
5013 pid);
5014
5015 if (offset >= sizeof (siginfo))
5016 return -1;
5017
5018 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5019 return -1;
5020
5021 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5022 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5023 inferior with a 64-bit GDBSERVER should look the same as debugging it
5024 with a 32-bit GDBSERVER, we need to convert it. */
5025 siginfo_fixup (&siginfo, inf_siginfo, 0);
5026
5027 if (offset + len > sizeof (siginfo))
5028 len = sizeof (siginfo) - offset;
5029
5030 if (readbuf != NULL)
5031 memcpy (readbuf, inf_siginfo + offset, len);
5032 else
5033 {
5034 memcpy (inf_siginfo + offset, writebuf, len);
5035
5036 /* Convert back to ptrace layout before flushing it out. */
5037 siginfo_fixup (&siginfo, inf_siginfo, 1);
5038
5039 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5040 return -1;
5041 }
5042
5043 return len;
5044 }
5045
5046 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5047 so we notice when children change state; as the handler for the
5048 sigsuspend in my_waitpid. */
5049
5050 static void
5051 sigchld_handler (int signo)
5052 {
5053 int old_errno = errno;
5054
5055 if (debug_threads)
5056 {
5057 do
5058 {
5059 /* fprintf is not async-signal-safe, so call write
5060 directly. */
5061 if (write (2, "sigchld_handler\n",
5062 sizeof ("sigchld_handler\n") - 1) < 0)
5063 break; /* just ignore */
5064 } while (0);
5065 }
5066
5067 if (target_is_async_p ())
5068 async_file_mark (); /* trigger a linux_wait */
5069
5070 errno = old_errno;
5071 }
5072
5073 static int
5074 linux_supports_non_stop (void)
5075 {
5076 return 1;
5077 }
5078
5079 static int
5080 linux_async (int enable)
5081 {
5082 int previous = target_is_async_p ();
5083
5084 if (debug_threads)
5085 debug_printf ("linux_async (%d), previous=%d\n",
5086 enable, previous);
5087
5088 if (previous != enable)
5089 {
5090 sigset_t mask;
5091 sigemptyset (&mask);
5092 sigaddset (&mask, SIGCHLD);
5093
5094 sigprocmask (SIG_BLOCK, &mask, NULL);
5095
5096 if (enable)
5097 {
5098 if (pipe (linux_event_pipe) == -1)
5099 {
5100 linux_event_pipe[0] = -1;
5101 linux_event_pipe[1] = -1;
5102 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5103
5104 warning ("creating event pipe failed.");
5105 return previous;
5106 }
5107
5108 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5109 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5110
5111 /* Register the event loop handler. */
5112 add_file_handler (linux_event_pipe[0],
5113 handle_target_event, NULL);
5114
5115 /* Always trigger a linux_wait. */
5116 async_file_mark ();
5117 }
5118 else
5119 {
5120 delete_file_handler (linux_event_pipe[0]);
5121
5122 close (linux_event_pipe[0]);
5123 close (linux_event_pipe[1]);
5124 linux_event_pipe[0] = -1;
5125 linux_event_pipe[1] = -1;
5126 }
5127
5128 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5129 }
5130
5131 return previous;
5132 }
5133
5134 static int
5135 linux_start_non_stop (int nonstop)
5136 {
5137 /* Register or unregister from event-loop accordingly. */
5138 linux_async (nonstop);
5139
5140 if (target_is_async_p () != (nonstop != 0))
5141 return -1;
5142
5143 return 0;
5144 }
5145
5146 static int
5147 linux_supports_multi_process (void)
5148 {
5149 return 1;
5150 }
5151
5152 static int
5153 linux_supports_disable_randomization (void)
5154 {
5155 #ifdef HAVE_PERSONALITY
5156 return 1;
5157 #else
5158 return 0;
5159 #endif
5160 }
5161
5162 static int
5163 linux_supports_agent (void)
5164 {
5165 return 1;
5166 }
5167
5168 static int
5169 linux_supports_range_stepping (void)
5170 {
5171 if (*the_low_target.supports_range_stepping == NULL)
5172 return 0;
5173
5174 return (*the_low_target.supports_range_stepping) ();
5175 }
5176
5177 /* Enumerate spufs IDs for process PID. */
5178 static int
5179 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5180 {
5181 int pos = 0;
5182 int written = 0;
5183 char path[128];
5184 DIR *dir;
5185 struct dirent *entry;
5186
5187 sprintf (path, "/proc/%ld/fd", pid);
5188 dir = opendir (path);
5189 if (!dir)
5190 return -1;
5191
5192 rewinddir (dir);
5193 while ((entry = readdir (dir)) != NULL)
5194 {
5195 struct stat st;
5196 struct statfs stfs;
5197 int fd;
5198
5199 fd = atoi (entry->d_name);
5200 if (!fd)
5201 continue;
5202
5203 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5204 if (stat (path, &st) != 0)
5205 continue;
5206 if (!S_ISDIR (st.st_mode))
5207 continue;
5208
5209 if (statfs (path, &stfs) != 0)
5210 continue;
5211 if (stfs.f_type != SPUFS_MAGIC)
5212 continue;
5213
5214 if (pos >= offset && pos + 4 <= offset + len)
5215 {
5216 *(unsigned int *)(buf + pos - offset) = fd;
5217 written += 4;
5218 }
5219 pos += 4;
5220 }
5221
5222 closedir (dir);
5223 return written;
5224 }
5225
5226 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5227 object type, using the /proc file system. */
5228 static int
5229 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5230 unsigned const char *writebuf,
5231 CORE_ADDR offset, int len)
5232 {
5233 long pid = lwpid_of (current_thread);
5234 char buf[128];
5235 int fd = 0;
5236 int ret = 0;
5237
5238 if (!writebuf && !readbuf)
5239 return -1;
5240
5241 if (!*annex)
5242 {
5243 if (!readbuf)
5244 return -1;
5245 else
5246 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5247 }
5248
5249 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5250 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5251 if (fd <= 0)
5252 return -1;
5253
5254 if (offset != 0
5255 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5256 {
5257 close (fd);
5258 return 0;
5259 }
5260
5261 if (writebuf)
5262 ret = write (fd, writebuf, (size_t) len);
5263 else
5264 ret = read (fd, readbuf, (size_t) len);
5265
5266 close (fd);
5267 return ret;
5268 }
5269
5270 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5271 struct target_loadseg
5272 {
5273 /* Core address to which the segment is mapped. */
5274 Elf32_Addr addr;
5275 /* VMA recorded in the program header. */
5276 Elf32_Addr p_vaddr;
5277 /* Size of this segment in memory. */
5278 Elf32_Word p_memsz;
5279 };
5280
5281 # if defined PT_GETDSBT
5282 struct target_loadmap
5283 {
5284 /* Protocol version number, must be zero. */
5285 Elf32_Word version;
5286 /* Pointer to the DSBT table, its size, and the DSBT index. */
5287 unsigned *dsbt_table;
5288 unsigned dsbt_size, dsbt_index;
5289 /* Number of segments in this map. */
5290 Elf32_Word nsegs;
5291 /* The actual memory map. */
5292 struct target_loadseg segs[/*nsegs*/];
5293 };
5294 # define LINUX_LOADMAP PT_GETDSBT
5295 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5296 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5297 # else
5298 struct target_loadmap
5299 {
5300 /* Protocol version number, must be zero. */
5301 Elf32_Half version;
5302 /* Number of segments in this map. */
5303 Elf32_Half nsegs;
5304 /* The actual memory map. */
5305 struct target_loadseg segs[/*nsegs*/];
5306 };
5307 # define LINUX_LOADMAP PTRACE_GETFDPIC
5308 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5309 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5310 # endif
5311
5312 static int
5313 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5314 unsigned char *myaddr, unsigned int len)
5315 {
5316 int pid = lwpid_of (current_thread);
5317 int addr = -1;
5318 struct target_loadmap *data = NULL;
5319 unsigned int actual_length, copy_length;
5320
5321 if (strcmp (annex, "exec") == 0)
5322 addr = (int) LINUX_LOADMAP_EXEC;
5323 else if (strcmp (annex, "interp") == 0)
5324 addr = (int) LINUX_LOADMAP_INTERP;
5325 else
5326 return -1;
5327
5328 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5329 return -1;
5330
5331 if (data == NULL)
5332 return -1;
5333
5334 actual_length = sizeof (struct target_loadmap)
5335 + sizeof (struct target_loadseg) * data->nsegs;
5336
5337 if (offset < 0 || offset > actual_length)
5338 return -1;
5339
5340 copy_length = actual_length - offset < len ? actual_length - offset : len;
5341 memcpy (myaddr, (char *) data + offset, copy_length);
5342 return copy_length;
5343 }
5344 #else
5345 # define linux_read_loadmap NULL
5346 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5347
5348 static void
5349 linux_process_qsupported (const char *query)
5350 {
5351 if (the_low_target.process_qsupported != NULL)
5352 the_low_target.process_qsupported (query);
5353 }
5354
5355 static int
5356 linux_supports_tracepoints (void)
5357 {
5358 if (*the_low_target.supports_tracepoints == NULL)
5359 return 0;
5360
5361 return (*the_low_target.supports_tracepoints) ();
5362 }
5363
5364 static CORE_ADDR
5365 linux_read_pc (struct regcache *regcache)
5366 {
5367 if (the_low_target.get_pc == NULL)
5368 return 0;
5369
5370 return (*the_low_target.get_pc) (regcache);
5371 }
5372
5373 static void
5374 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5375 {
5376 gdb_assert (the_low_target.set_pc != NULL);
5377
5378 (*the_low_target.set_pc) (regcache, pc);
5379 }
5380
5381 static int
5382 linux_thread_stopped (struct thread_info *thread)
5383 {
5384 return get_thread_lwp (thread)->stopped;
5385 }
5386
5387 /* This exposes stop-all-threads functionality to other modules. */
5388
5389 static void
5390 linux_pause_all (int freeze)
5391 {
5392 stop_all_lwps (freeze, NULL);
5393 }
5394
5395 /* This exposes unstop-all-threads functionality to other gdbserver
5396 modules. */
5397
5398 static void
5399 linux_unpause_all (int unfreeze)
5400 {
5401 unstop_all_lwps (unfreeze, NULL);
5402 }
5403
5404 static int
5405 linux_prepare_to_access_memory (void)
5406 {
5407 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5408 running LWP. */
5409 if (non_stop)
5410 linux_pause_all (1);
5411 return 0;
5412 }
5413
5414 static void
5415 linux_done_accessing_memory (void)
5416 {
5417 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5418 running LWP. */
5419 if (non_stop)
5420 linux_unpause_all (1);
5421 }
5422
5423 static int
5424 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5425 CORE_ADDR collector,
5426 CORE_ADDR lockaddr,
5427 ULONGEST orig_size,
5428 CORE_ADDR *jump_entry,
5429 CORE_ADDR *trampoline,
5430 ULONGEST *trampoline_size,
5431 unsigned char *jjump_pad_insn,
5432 ULONGEST *jjump_pad_insn_size,
5433 CORE_ADDR *adjusted_insn_addr,
5434 CORE_ADDR *adjusted_insn_addr_end,
5435 char *err)
5436 {
5437 return (*the_low_target.install_fast_tracepoint_jump_pad)
5438 (tpoint, tpaddr, collector, lockaddr, orig_size,
5439 jump_entry, trampoline, trampoline_size,
5440 jjump_pad_insn, jjump_pad_insn_size,
5441 adjusted_insn_addr, adjusted_insn_addr_end,
5442 err);
5443 }
5444
5445 static struct emit_ops *
5446 linux_emit_ops (void)
5447 {
5448 if (the_low_target.emit_ops != NULL)
5449 return (*the_low_target.emit_ops) ();
5450 else
5451 return NULL;
5452 }
5453
5454 static int
5455 linux_get_min_fast_tracepoint_insn_len (void)
5456 {
5457 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5458 }
5459
5460 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5461
5462 static int
5463 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5464 CORE_ADDR *phdr_memaddr, int *num_phdr)
5465 {
5466 char filename[PATH_MAX];
5467 int fd;
5468 const int auxv_size = is_elf64
5469 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5470 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5471
5472 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5473
5474 fd = open (filename, O_RDONLY);
5475 if (fd < 0)
5476 return 1;
5477
5478 *phdr_memaddr = 0;
5479 *num_phdr = 0;
5480 while (read (fd, buf, auxv_size) == auxv_size
5481 && (*phdr_memaddr == 0 || *num_phdr == 0))
5482 {
5483 if (is_elf64)
5484 {
5485 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5486
5487 switch (aux->a_type)
5488 {
5489 case AT_PHDR:
5490 *phdr_memaddr = aux->a_un.a_val;
5491 break;
5492 case AT_PHNUM:
5493 *num_phdr = aux->a_un.a_val;
5494 break;
5495 }
5496 }
5497 else
5498 {
5499 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5500
5501 switch (aux->a_type)
5502 {
5503 case AT_PHDR:
5504 *phdr_memaddr = aux->a_un.a_val;
5505 break;
5506 case AT_PHNUM:
5507 *num_phdr = aux->a_un.a_val;
5508 break;
5509 }
5510 }
5511 }
5512
5513 close (fd);
5514
5515 if (*phdr_memaddr == 0 || *num_phdr == 0)
5516 {
5517 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5518 "phdr_memaddr = %ld, phdr_num = %d",
5519 (long) *phdr_memaddr, *num_phdr);
5520 return 2;
5521 }
5522
5523 return 0;
5524 }
5525
5526 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5527
5528 static CORE_ADDR
5529 get_dynamic (const int pid, const int is_elf64)
5530 {
5531 CORE_ADDR phdr_memaddr, relocation;
5532 int num_phdr, i;
5533 unsigned char *phdr_buf;
5534 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5535
5536 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5537 return 0;
5538
5539 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5540 phdr_buf = alloca (num_phdr * phdr_size);
5541
5542 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5543 return 0;
5544
5545 /* Compute relocation: it is expected to be 0 for "regular" executables,
5546 non-zero for PIE ones. */
5547 relocation = -1;
5548 for (i = 0; relocation == -1 && i < num_phdr; i++)
5549 if (is_elf64)
5550 {
5551 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5552
5553 if (p->p_type == PT_PHDR)
5554 relocation = phdr_memaddr - p->p_vaddr;
5555 }
5556 else
5557 {
5558 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5559
5560 if (p->p_type == PT_PHDR)
5561 relocation = phdr_memaddr - p->p_vaddr;
5562 }
5563
5564 if (relocation == -1)
5565 {
5566 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5567 any real world executables, including PIE executables, have always
5568 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5569 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5570 or present DT_DEBUG anyway (fpc binaries are statically linked).
5571
5572 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5573
5574 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5575
5576 return 0;
5577 }
5578
5579 for (i = 0; i < num_phdr; i++)
5580 {
5581 if (is_elf64)
5582 {
5583 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5584
5585 if (p->p_type == PT_DYNAMIC)
5586 return p->p_vaddr + relocation;
5587 }
5588 else
5589 {
5590 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5591
5592 if (p->p_type == PT_DYNAMIC)
5593 return p->p_vaddr + relocation;
5594 }
5595 }
5596
5597 return 0;
5598 }
5599
5600 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5601 can be 0 if the inferior does not yet have the library list initialized.
5602 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5603 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5604
5605 static CORE_ADDR
5606 get_r_debug (const int pid, const int is_elf64)
5607 {
5608 CORE_ADDR dynamic_memaddr;
5609 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5610 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5611 CORE_ADDR map = -1;
5612
5613 dynamic_memaddr = get_dynamic (pid, is_elf64);
5614 if (dynamic_memaddr == 0)
5615 return map;
5616
5617 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5618 {
5619 if (is_elf64)
5620 {
5621 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5622 #ifdef DT_MIPS_RLD_MAP
5623 union
5624 {
5625 Elf64_Xword map;
5626 unsigned char buf[sizeof (Elf64_Xword)];
5627 }
5628 rld_map;
5629
5630 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5631 {
5632 if (linux_read_memory (dyn->d_un.d_val,
5633 rld_map.buf, sizeof (rld_map.buf)) == 0)
5634 return rld_map.map;
5635 else
5636 break;
5637 }
5638 #endif /* DT_MIPS_RLD_MAP */
5639
5640 if (dyn->d_tag == DT_DEBUG && map == -1)
5641 map = dyn->d_un.d_val;
5642
5643 if (dyn->d_tag == DT_NULL)
5644 break;
5645 }
5646 else
5647 {
5648 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5649 #ifdef DT_MIPS_RLD_MAP
5650 union
5651 {
5652 Elf32_Word map;
5653 unsigned char buf[sizeof (Elf32_Word)];
5654 }
5655 rld_map;
5656
5657 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5658 {
5659 if (linux_read_memory (dyn->d_un.d_val,
5660 rld_map.buf, sizeof (rld_map.buf)) == 0)
5661 return rld_map.map;
5662 else
5663 break;
5664 }
5665 #endif /* DT_MIPS_RLD_MAP */
5666
5667 if (dyn->d_tag == DT_DEBUG && map == -1)
5668 map = dyn->d_un.d_val;
5669
5670 if (dyn->d_tag == DT_NULL)
5671 break;
5672 }
5673
5674 dynamic_memaddr += dyn_size;
5675 }
5676
5677 return map;
5678 }
5679
5680 /* Read one pointer from MEMADDR in the inferior. */
5681
5682 static int
5683 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5684 {
5685 int ret;
5686
5687 /* Go through a union so this works on either big or little endian
5688 hosts, when the inferior's pointer size is smaller than the size
5689 of CORE_ADDR. It is assumed the inferior's endianness is the
5690 same of the superior's. */
5691 union
5692 {
5693 CORE_ADDR core_addr;
5694 unsigned int ui;
5695 unsigned char uc;
5696 } addr;
5697
5698 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5699 if (ret == 0)
5700 {
5701 if (ptr_size == sizeof (CORE_ADDR))
5702 *ptr = addr.core_addr;
5703 else if (ptr_size == sizeof (unsigned int))
5704 *ptr = addr.ui;
5705 else
5706 gdb_assert_not_reached ("unhandled pointer size");
5707 }
5708 return ret;
5709 }
5710
5711 struct link_map_offsets
5712 {
5713 /* Offset and size of r_debug.r_version. */
5714 int r_version_offset;
5715
5716 /* Offset and size of r_debug.r_map. */
5717 int r_map_offset;
5718
5719 /* Offset to l_addr field in struct link_map. */
5720 int l_addr_offset;
5721
5722 /* Offset to l_name field in struct link_map. */
5723 int l_name_offset;
5724
5725 /* Offset to l_ld field in struct link_map. */
5726 int l_ld_offset;
5727
5728 /* Offset to l_next field in struct link_map. */
5729 int l_next_offset;
5730
5731 /* Offset to l_prev field in struct link_map. */
5732 int l_prev_offset;
5733 };
5734
5735 /* Construct qXfer:libraries-svr4:read reply. */
5736
5737 static int
5738 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5739 unsigned const char *writebuf,
5740 CORE_ADDR offset, int len)
5741 {
5742 char *document;
5743 unsigned document_len;
5744 struct process_info_private *const priv = current_process ()->private;
5745 char filename[PATH_MAX];
5746 int pid, is_elf64;
5747
5748 static const struct link_map_offsets lmo_32bit_offsets =
5749 {
5750 0, /* r_version offset. */
5751 4, /* r_debug.r_map offset. */
5752 0, /* l_addr offset in link_map. */
5753 4, /* l_name offset in link_map. */
5754 8, /* l_ld offset in link_map. */
5755 12, /* l_next offset in link_map. */
5756 16 /* l_prev offset in link_map. */
5757 };
5758
5759 static const struct link_map_offsets lmo_64bit_offsets =
5760 {
5761 0, /* r_version offset. */
5762 8, /* r_debug.r_map offset. */
5763 0, /* l_addr offset in link_map. */
5764 8, /* l_name offset in link_map. */
5765 16, /* l_ld offset in link_map. */
5766 24, /* l_next offset in link_map. */
5767 32 /* l_prev offset in link_map. */
5768 };
5769 const struct link_map_offsets *lmo;
5770 unsigned int machine;
5771 int ptr_size;
5772 CORE_ADDR lm_addr = 0, lm_prev = 0;
5773 int allocated = 1024;
5774 char *p;
5775 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5776 int header_done = 0;
5777
5778 if (writebuf != NULL)
5779 return -2;
5780 if (readbuf == NULL)
5781 return -1;
5782
5783 pid = lwpid_of (current_thread);
5784 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5785 is_elf64 = elf_64_file_p (filename, &machine);
5786 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5787 ptr_size = is_elf64 ? 8 : 4;
5788
5789 while (annex[0] != '\0')
5790 {
5791 const char *sep;
5792 CORE_ADDR *addrp;
5793 int len;
5794
5795 sep = strchr (annex, '=');
5796 if (sep == NULL)
5797 break;
5798
5799 len = sep - annex;
5800 if (len == 5 && strncmp (annex, "start", 5) == 0)
5801 addrp = &lm_addr;
5802 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5803 addrp = &lm_prev;
5804 else
5805 {
5806 annex = strchr (sep, ';');
5807 if (annex == NULL)
5808 break;
5809 annex++;
5810 continue;
5811 }
5812
5813 annex = decode_address_to_semicolon (addrp, sep + 1);
5814 }
5815
5816 if (lm_addr == 0)
5817 {
5818 int r_version = 0;
5819
5820 if (priv->r_debug == 0)
5821 priv->r_debug = get_r_debug (pid, is_elf64);
5822
5823 /* We failed to find DT_DEBUG. Such situation will not change
5824 for this inferior - do not retry it. Report it to GDB as
5825 E01, see for the reasons at the GDB solib-svr4.c side. */
5826 if (priv->r_debug == (CORE_ADDR) -1)
5827 return -1;
5828
5829 if (priv->r_debug != 0)
5830 {
5831 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5832 (unsigned char *) &r_version,
5833 sizeof (r_version)) != 0
5834 || r_version != 1)
5835 {
5836 warning ("unexpected r_debug version %d", r_version);
5837 }
5838 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5839 &lm_addr, ptr_size) != 0)
5840 {
5841 warning ("unable to read r_map from 0x%lx",
5842 (long) priv->r_debug + lmo->r_map_offset);
5843 }
5844 }
5845 }
5846
5847 document = xmalloc (allocated);
5848 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5849 p = document + strlen (document);
5850
5851 while (lm_addr
5852 && read_one_ptr (lm_addr + lmo->l_name_offset,
5853 &l_name, ptr_size) == 0
5854 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5855 &l_addr, ptr_size) == 0
5856 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5857 &l_ld, ptr_size) == 0
5858 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5859 &l_prev, ptr_size) == 0
5860 && read_one_ptr (lm_addr + lmo->l_next_offset,
5861 &l_next, ptr_size) == 0)
5862 {
5863 unsigned char libname[PATH_MAX];
5864
5865 if (lm_prev != l_prev)
5866 {
5867 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5868 (long) lm_prev, (long) l_prev);
5869 break;
5870 }
5871
5872 /* Ignore the first entry even if it has valid name as the first entry
5873 corresponds to the main executable. The first entry should not be
5874 skipped if the dynamic loader was loaded late by a static executable
5875 (see solib-svr4.c parameter ignore_first). But in such case the main
5876 executable does not have PT_DYNAMIC present and this function already
5877 exited above due to failed get_r_debug. */
5878 if (lm_prev == 0)
5879 {
5880 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5881 p = p + strlen (p);
5882 }
5883 else
5884 {
5885 /* Not checking for error because reading may stop before
5886 we've got PATH_MAX worth of characters. */
5887 libname[0] = '\0';
5888 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5889 libname[sizeof (libname) - 1] = '\0';
5890 if (libname[0] != '\0')
5891 {
5892 /* 6x the size for xml_escape_text below. */
5893 size_t len = 6 * strlen ((char *) libname);
5894 char *name;
5895
5896 if (!header_done)
5897 {
5898 /* Terminate `<library-list-svr4'. */
5899 *p++ = '>';
5900 header_done = 1;
5901 }
5902
5903 while (allocated < p - document + len + 200)
5904 {
5905 /* Expand to guarantee sufficient storage. */
5906 uintptr_t document_len = p - document;
5907
5908 document = xrealloc (document, 2 * allocated);
5909 allocated *= 2;
5910 p = document + document_len;
5911 }
5912
5913 name = xml_escape_text ((char *) libname);
5914 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5915 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5916 name, (unsigned long) lm_addr,
5917 (unsigned long) l_addr, (unsigned long) l_ld);
5918 free (name);
5919 }
5920 }
5921
5922 lm_prev = lm_addr;
5923 lm_addr = l_next;
5924 }
5925
5926 if (!header_done)
5927 {
5928 /* Empty list; terminate `<library-list-svr4'. */
5929 strcpy (p, "/>");
5930 }
5931 else
5932 strcpy (p, "</library-list-svr4>");
5933
5934 document_len = strlen (document);
5935 if (offset < document_len)
5936 document_len -= offset;
5937 else
5938 document_len = 0;
5939 if (len > document_len)
5940 len = document_len;
5941
5942 memcpy (readbuf, document + offset, len);
5943 xfree (document);
5944
5945 return len;
5946 }
5947
5948 #ifdef HAVE_LINUX_BTRACE
5949
5950 /* See to_enable_btrace target method. */
5951
5952 static struct btrace_target_info *
5953 linux_low_enable_btrace (ptid_t ptid)
5954 {
5955 struct btrace_target_info *tinfo;
5956
5957 tinfo = linux_enable_btrace (ptid);
5958
5959 if (tinfo != NULL)
5960 {
5961 struct thread_info *thread = find_thread_ptid (ptid);
5962 struct regcache *regcache = get_thread_regcache (thread, 0);
5963
5964 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5965 }
5966
5967 return tinfo;
5968 }
5969
5970 /* See to_disable_btrace target method. */
5971
5972 static int
5973 linux_low_disable_btrace (struct btrace_target_info *tinfo)
5974 {
5975 enum btrace_error err;
5976
5977 err = linux_disable_btrace (tinfo);
5978 return (err == BTRACE_ERR_NONE ? 0 : -1);
5979 }
5980
5981 /* See to_read_btrace target method. */
5982
5983 static int
5984 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5985 int type)
5986 {
5987 VEC (btrace_block_s) *btrace;
5988 struct btrace_block *block;
5989 enum btrace_error err;
5990 int i;
5991
5992 btrace = NULL;
5993 err = linux_read_btrace (&btrace, tinfo, type);
5994 if (err != BTRACE_ERR_NONE)
5995 {
5996 if (err == BTRACE_ERR_OVERFLOW)
5997 buffer_grow_str0 (buffer, "E.Overflow.");
5998 else
5999 buffer_grow_str0 (buffer, "E.Generic Error.");
6000
6001 return -1;
6002 }
6003
6004 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6005 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6006
6007 for (i = 0; VEC_iterate (btrace_block_s, btrace, i, block); i++)
6008 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6009 paddress (block->begin), paddress (block->end));
6010
6011 buffer_grow_str0 (buffer, "</btrace>\n");
6012
6013 VEC_free (btrace_block_s, btrace);
6014
6015 return 0;
6016 }
6017 #endif /* HAVE_LINUX_BTRACE */
6018
6019 static struct target_ops linux_target_ops = {
6020 linux_create_inferior,
6021 linux_attach,
6022 linux_kill,
6023 linux_detach,
6024 linux_mourn,
6025 linux_join,
6026 linux_thread_alive,
6027 linux_resume,
6028 linux_wait,
6029 linux_fetch_registers,
6030 linux_store_registers,
6031 linux_prepare_to_access_memory,
6032 linux_done_accessing_memory,
6033 linux_read_memory,
6034 linux_write_memory,
6035 linux_look_up_symbols,
6036 linux_request_interrupt,
6037 linux_read_auxv,
6038 linux_supports_z_point_type,
6039 linux_insert_point,
6040 linux_remove_point,
6041 linux_stopped_by_watchpoint,
6042 linux_stopped_data_address,
6043 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6044 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6045 && defined(PT_TEXT_END_ADDR)
6046 linux_read_offsets,
6047 #else
6048 NULL,
6049 #endif
6050 #ifdef USE_THREAD_DB
6051 thread_db_get_tls_address,
6052 #else
6053 NULL,
6054 #endif
6055 linux_qxfer_spu,
6056 hostio_last_error_from_errno,
6057 linux_qxfer_osdata,
6058 linux_xfer_siginfo,
6059 linux_supports_non_stop,
6060 linux_async,
6061 linux_start_non_stop,
6062 linux_supports_multi_process,
6063 #ifdef USE_THREAD_DB
6064 thread_db_handle_monitor_command,
6065 #else
6066 NULL,
6067 #endif
6068 linux_common_core_of_thread,
6069 linux_read_loadmap,
6070 linux_process_qsupported,
6071 linux_supports_tracepoints,
6072 linux_read_pc,
6073 linux_write_pc,
6074 linux_thread_stopped,
6075 NULL,
6076 linux_pause_all,
6077 linux_unpause_all,
6078 linux_stabilize_threads,
6079 linux_install_fast_tracepoint_jump_pad,
6080 linux_emit_ops,
6081 linux_supports_disable_randomization,
6082 linux_get_min_fast_tracepoint_insn_len,
6083 linux_qxfer_libraries_svr4,
6084 linux_supports_agent,
6085 #ifdef HAVE_LINUX_BTRACE
6086 linux_supports_btrace,
6087 linux_low_enable_btrace,
6088 linux_low_disable_btrace,
6089 linux_low_read_btrace,
6090 #else
6091 NULL,
6092 NULL,
6093 NULL,
6094 NULL,
6095 #endif
6096 linux_supports_range_stepping,
6097 };
6098
6099 static void
6100 linux_init_signals ()
6101 {
6102 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6103 to find what the cancel signal actually is. */
6104 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6105 signal (__SIGRTMIN+1, SIG_IGN);
6106 #endif
6107 }
6108
6109 #ifdef HAVE_LINUX_REGSETS
6110 void
6111 initialize_regsets_info (struct regsets_info *info)
6112 {
6113 for (info->num_regsets = 0;
6114 info->regsets[info->num_regsets].size >= 0;
6115 info->num_regsets++)
6116 ;
6117 }
6118 #endif
6119
6120 void
6121 initialize_low (void)
6122 {
6123 struct sigaction sigchld_action;
6124 memset (&sigchld_action, 0, sizeof (sigchld_action));
6125 set_target_ops (&linux_target_ops);
6126 set_breakpoint_data (the_low_target.breakpoint,
6127 the_low_target.breakpoint_len);
6128 linux_init_signals ();
6129 linux_ptrace_init_warnings ();
6130
6131 sigchld_action.sa_handler = sigchld_handler;
6132 sigemptyset (&sigchld_action.sa_mask);
6133 sigchld_action.sa_flags = SA_RESTART;
6134 sigaction (SIGCHLD, &sigchld_action, NULL);
6135
6136 initialize_low_arch ();
6137 }
This page took 0.180396 seconds and 5 git commands to generate.