Make lwp_info.arch_private handling shared
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #include <sys/ptrace.h>
28 #include "nat/linux-ptrace.h"
29 #include "nat/linux-procfs.h"
30 #include "nat/linux-personality.h"
31 #include <signal.h>
32 #include <sys/ioctl.h>
33 #include <fcntl.h>
34 #include <unistd.h>
35 #include <sys/syscall.h>
36 #include <sched.h>
37 #include <ctype.h>
38 #include <pwd.h>
39 #include <sys/types.h>
40 #include <dirent.h>
41 #include <sys/stat.h>
42 #include <sys/vfs.h>
43 #include <sys/uio.h>
44 #include "filestuff.h"
45 #include "tracepoint.h"
46 #include "hostio.h"
47 #ifndef ELFMAG0
48 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
49 then ELFMAG0 will have been defined. If it didn't get included by
50 gdb_proc_service.h then including it will likely introduce a duplicate
51 definition of elf_fpregset_t. */
52 #include <elf.h>
53 #endif
54
55 #ifndef SPUFS_MAGIC
56 #define SPUFS_MAGIC 0x23c9b64e
57 #endif
58
59 #ifdef HAVE_PERSONALITY
60 # include <sys/personality.h>
61 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
62 # define ADDR_NO_RANDOMIZE 0x0040000
63 # endif
64 #endif
65
66 #ifndef O_LARGEFILE
67 #define O_LARGEFILE 0
68 #endif
69
70 #ifndef W_STOPCODE
71 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
72 #endif
73
74 /* This is the kernel's hard limit. Not to be confused with
75 SIGRTMIN. */
76 #ifndef __SIGRTMIN
77 #define __SIGRTMIN 32
78 #endif
79
80 /* Some targets did not define these ptrace constants from the start,
81 so gdbserver defines them locally here. In the future, these may
82 be removed after they are added to asm/ptrace.h. */
83 #if !(defined(PT_TEXT_ADDR) \
84 || defined(PT_DATA_ADDR) \
85 || defined(PT_TEXT_END_ADDR))
86 #if defined(__mcoldfire__)
87 /* These are still undefined in 3.10 kernels. */
88 #define PT_TEXT_ADDR 49*4
89 #define PT_DATA_ADDR 50*4
90 #define PT_TEXT_END_ADDR 51*4
91 /* BFIN already defines these since at least 2.6.32 kernels. */
92 #elif defined(BFIN)
93 #define PT_TEXT_ADDR 220
94 #define PT_TEXT_END_ADDR 224
95 #define PT_DATA_ADDR 228
96 /* These are still undefined in 3.10 kernels. */
97 #elif defined(__TMS320C6X__)
98 #define PT_TEXT_ADDR (0x10000*4)
99 #define PT_DATA_ADDR (0x10004*4)
100 #define PT_TEXT_END_ADDR (0x10008*4)
101 #endif
102 #endif
103
104 #ifdef HAVE_LINUX_BTRACE
105 # include "nat/linux-btrace.h"
106 # include "btrace-common.h"
107 #endif
108
109 #ifndef HAVE_ELF32_AUXV_T
110 /* Copied from glibc's elf.h. */
111 typedef struct
112 {
113 uint32_t a_type; /* Entry type */
114 union
115 {
116 uint32_t a_val; /* Integer value */
117 /* We use to have pointer elements added here. We cannot do that,
118 though, since it does not work when using 32-bit definitions
119 on 64-bit platforms and vice versa. */
120 } a_un;
121 } Elf32_auxv_t;
122 #endif
123
124 #ifndef HAVE_ELF64_AUXV_T
125 /* Copied from glibc's elf.h. */
126 typedef struct
127 {
128 uint64_t a_type; /* Entry type */
129 union
130 {
131 uint64_t a_val; /* Integer value */
132 /* We use to have pointer elements added here. We cannot do that,
133 though, since it does not work when using 32-bit definitions
134 on 64-bit platforms and vice versa. */
135 } a_un;
136 } Elf64_auxv_t;
137 #endif
138
139 /* LWP accessors. */
140
141 /* See nat/linux-nat.h. */
142
143 ptid_t
144 ptid_of_lwp (struct lwp_info *lwp)
145 {
146 return ptid_of (get_lwp_thread (lwp));
147 }
148
149 /* See nat/linux-nat.h. */
150
151 void
152 lwp_set_arch_private_info (struct lwp_info *lwp,
153 struct arch_lwp_info *info)
154 {
155 lwp->arch_private = info;
156 }
157
158 /* See nat/linux-nat.h. */
159
160 struct arch_lwp_info *
161 lwp_arch_private_info (struct lwp_info *lwp)
162 {
163 return lwp->arch_private;
164 }
165
166 /* See nat/linux-nat.h. */
167
168 int
169 lwp_is_stopped (struct lwp_info *lwp)
170 {
171 return lwp->stopped;
172 }
173
174 /* See nat/linux-nat.h. */
175
176 enum target_stop_reason
177 lwp_stop_reason (struct lwp_info *lwp)
178 {
179 return lwp->stop_reason;
180 }
181
182 /* A list of all unknown processes which receive stop signals. Some
183 other process will presumably claim each of these as forked
184 children momentarily. */
185
186 struct simple_pid_list
187 {
188 /* The process ID. */
189 int pid;
190
191 /* The status as reported by waitpid. */
192 int status;
193
194 /* Next in chain. */
195 struct simple_pid_list *next;
196 };
197 struct simple_pid_list *stopped_pids;
198
199 /* Trivial list manipulation functions to keep track of a list of new
200 stopped processes. */
201
202 static void
203 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
204 {
205 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
206
207 new_pid->pid = pid;
208 new_pid->status = status;
209 new_pid->next = *listp;
210 *listp = new_pid;
211 }
212
213 static int
214 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
215 {
216 struct simple_pid_list **p;
217
218 for (p = listp; *p != NULL; p = &(*p)->next)
219 if ((*p)->pid == pid)
220 {
221 struct simple_pid_list *next = (*p)->next;
222
223 *statusp = (*p)->status;
224 xfree (*p);
225 *p = next;
226 return 1;
227 }
228 return 0;
229 }
230
231 enum stopping_threads_kind
232 {
233 /* Not stopping threads presently. */
234 NOT_STOPPING_THREADS,
235
236 /* Stopping threads. */
237 STOPPING_THREADS,
238
239 /* Stopping and suspending threads. */
240 STOPPING_AND_SUSPENDING_THREADS
241 };
242
243 /* This is set while stop_all_lwps is in effect. */
244 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
245
246 /* FIXME make into a target method? */
247 int using_threads = 1;
248
249 /* True if we're presently stabilizing threads (moving them out of
250 jump pads). */
251 static int stabilizing_threads;
252
253 static void linux_resume_one_lwp (struct lwp_info *lwp,
254 int step, int signal, siginfo_t *info);
255 static void linux_resume (struct thread_resume *resume_info, size_t n);
256 static void stop_all_lwps (int suspend, struct lwp_info *except);
257 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
258 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
259 int *wstat, int options);
260 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
261 static struct lwp_info *add_lwp (ptid_t ptid);
262 static int linux_stopped_by_watchpoint (void);
263 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
264 static void proceed_all_lwps (void);
265 static int finish_step_over (struct lwp_info *lwp);
266 static int kill_lwp (unsigned long lwpid, int signo);
267
268 /* When the event-loop is doing a step-over, this points at the thread
269 being stepped. */
270 ptid_t step_over_bkpt;
271
272 /* True if the low target can hardware single-step. Such targets
273 don't need a BREAKPOINT_REINSERT_ADDR callback. */
274
275 static int
276 can_hardware_single_step (void)
277 {
278 return (the_low_target.breakpoint_reinsert_addr == NULL);
279 }
280
281 /* True if the low target supports memory breakpoints. If so, we'll
282 have a GET_PC implementation. */
283
284 static int
285 supports_breakpoints (void)
286 {
287 return (the_low_target.get_pc != NULL);
288 }
289
290 /* Returns true if this target can support fast tracepoints. This
291 does not mean that the in-process agent has been loaded in the
292 inferior. */
293
294 static int
295 supports_fast_tracepoints (void)
296 {
297 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
298 }
299
300 /* True if LWP is stopped in its stepping range. */
301
302 static int
303 lwp_in_step_range (struct lwp_info *lwp)
304 {
305 CORE_ADDR pc = lwp->stop_pc;
306
307 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
308 }
309
310 struct pending_signals
311 {
312 int signal;
313 siginfo_t info;
314 struct pending_signals *prev;
315 };
316
317 /* The read/write ends of the pipe registered as waitable file in the
318 event loop. */
319 static int linux_event_pipe[2] = { -1, -1 };
320
321 /* True if we're currently in async mode. */
322 #define target_is_async_p() (linux_event_pipe[0] != -1)
323
324 static void send_sigstop (struct lwp_info *lwp);
325 static void wait_for_sigstop (void);
326
327 /* Return non-zero if HEADER is a 64-bit ELF file. */
328
329 static int
330 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
331 {
332 if (header->e_ident[EI_MAG0] == ELFMAG0
333 && header->e_ident[EI_MAG1] == ELFMAG1
334 && header->e_ident[EI_MAG2] == ELFMAG2
335 && header->e_ident[EI_MAG3] == ELFMAG3)
336 {
337 *machine = header->e_machine;
338 return header->e_ident[EI_CLASS] == ELFCLASS64;
339
340 }
341 *machine = EM_NONE;
342 return -1;
343 }
344
345 /* Return non-zero if FILE is a 64-bit ELF file,
346 zero if the file is not a 64-bit ELF file,
347 and -1 if the file is not accessible or doesn't exist. */
348
349 static int
350 elf_64_file_p (const char *file, unsigned int *machine)
351 {
352 Elf64_Ehdr header;
353 int fd;
354
355 fd = open (file, O_RDONLY);
356 if (fd < 0)
357 return -1;
358
359 if (read (fd, &header, sizeof (header)) != sizeof (header))
360 {
361 close (fd);
362 return 0;
363 }
364 close (fd);
365
366 return elf_64_header_p (&header, machine);
367 }
368
369 /* Accepts an integer PID; Returns true if the executable PID is
370 running is a 64-bit ELF file.. */
371
372 int
373 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
374 {
375 char file[PATH_MAX];
376
377 sprintf (file, "/proc/%d/exe", pid);
378 return elf_64_file_p (file, machine);
379 }
380
381 static void
382 delete_lwp (struct lwp_info *lwp)
383 {
384 struct thread_info *thr = get_lwp_thread (lwp);
385
386 if (debug_threads)
387 debug_printf ("deleting %ld\n", lwpid_of (thr));
388
389 remove_thread (thr);
390 free (lwp->arch_private);
391 free (lwp);
392 }
393
394 /* Add a process to the common process list, and set its private
395 data. */
396
397 static struct process_info *
398 linux_add_process (int pid, int attached)
399 {
400 struct process_info *proc;
401
402 proc = add_process (pid, attached);
403 proc->priv = xcalloc (1, sizeof (*proc->priv));
404
405 /* Set the arch when the first LWP stops. */
406 proc->priv->new_inferior = 1;
407
408 if (the_low_target.new_process != NULL)
409 proc->priv->arch_private = the_low_target.new_process ();
410
411 return proc;
412 }
413
414 static CORE_ADDR get_pc (struct lwp_info *lwp);
415
416 /* Handle a GNU/Linux extended wait response. If we see a clone
417 event, we need to add the new LWP to our list (and not report the
418 trap to higher layers). */
419
420 static void
421 handle_extended_wait (struct lwp_info *event_child, int wstat)
422 {
423 int event = linux_ptrace_get_extended_event (wstat);
424 struct thread_info *event_thr = get_lwp_thread (event_child);
425 struct lwp_info *new_lwp;
426
427 if (event == PTRACE_EVENT_CLONE)
428 {
429 ptid_t ptid;
430 unsigned long new_pid;
431 int ret, status;
432
433 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
434 &new_pid);
435
436 /* If we haven't already seen the new PID stop, wait for it now. */
437 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
438 {
439 /* The new child has a pending SIGSTOP. We can't affect it until it
440 hits the SIGSTOP, but we're already attached. */
441
442 ret = my_waitpid (new_pid, &status, __WALL);
443
444 if (ret == -1)
445 perror_with_name ("waiting for new child");
446 else if (ret != new_pid)
447 warning ("wait returned unexpected PID %d", ret);
448 else if (!WIFSTOPPED (status))
449 warning ("wait returned unexpected status 0x%x", status);
450 }
451
452 if (debug_threads)
453 debug_printf ("HEW: Got clone event "
454 "from LWP %ld, new child is LWP %ld\n",
455 lwpid_of (event_thr), new_pid);
456
457 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
458 new_lwp = add_lwp (ptid);
459
460 /* Either we're going to immediately resume the new thread
461 or leave it stopped. linux_resume_one_lwp is a nop if it
462 thinks the thread is currently running, so set this first
463 before calling linux_resume_one_lwp. */
464 new_lwp->stopped = 1;
465
466 /* If we're suspending all threads, leave this one suspended
467 too. */
468 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
469 new_lwp->suspended = 1;
470
471 /* Normally we will get the pending SIGSTOP. But in some cases
472 we might get another signal delivered to the group first.
473 If we do get another signal, be sure not to lose it. */
474 if (WSTOPSIG (status) != SIGSTOP)
475 {
476 new_lwp->stop_expected = 1;
477 new_lwp->status_pending_p = 1;
478 new_lwp->status_pending = status;
479 }
480 }
481 }
482
483 /* Return the PC as read from the regcache of LWP, without any
484 adjustment. */
485
486 static CORE_ADDR
487 get_pc (struct lwp_info *lwp)
488 {
489 struct thread_info *saved_thread;
490 struct regcache *regcache;
491 CORE_ADDR pc;
492
493 if (the_low_target.get_pc == NULL)
494 return 0;
495
496 saved_thread = current_thread;
497 current_thread = get_lwp_thread (lwp);
498
499 regcache = get_thread_regcache (current_thread, 1);
500 pc = (*the_low_target.get_pc) (regcache);
501
502 if (debug_threads)
503 debug_printf ("pc is 0x%lx\n", (long) pc);
504
505 current_thread = saved_thread;
506 return pc;
507 }
508
509 /* This function should only be called if LWP got a SIGTRAP.
510 The SIGTRAP could mean several things.
511
512 On i386, where decr_pc_after_break is non-zero:
513
514 If we were single-stepping this process using PTRACE_SINGLESTEP, we
515 will get only the one SIGTRAP. The value of $eip will be the next
516 instruction. If the instruction we stepped over was a breakpoint,
517 we need to decrement the PC.
518
519 If we continue the process using PTRACE_CONT, we will get a
520 SIGTRAP when we hit a breakpoint. The value of $eip will be
521 the instruction after the breakpoint (i.e. needs to be
522 decremented). If we report the SIGTRAP to GDB, we must also
523 report the undecremented PC. If the breakpoint is removed, we
524 must resume at the decremented PC.
525
526 On a non-decr_pc_after_break machine with hardware or kernel
527 single-step:
528
529 If we either single-step a breakpoint instruction, or continue and
530 hit a breakpoint instruction, our PC will point at the breakpoint
531 instruction. */
532
533 static int
534 check_stopped_by_breakpoint (struct lwp_info *lwp)
535 {
536 CORE_ADDR pc;
537 CORE_ADDR sw_breakpoint_pc;
538 struct thread_info *saved_thread;
539 #if USE_SIGTRAP_SIGINFO
540 siginfo_t siginfo;
541 #endif
542
543 if (the_low_target.get_pc == NULL)
544 return 0;
545
546 pc = get_pc (lwp);
547 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
548
549 /* breakpoint_at reads from the current thread. */
550 saved_thread = current_thread;
551 current_thread = get_lwp_thread (lwp);
552
553 #if USE_SIGTRAP_SIGINFO
554 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
555 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
556 {
557 if (siginfo.si_signo == SIGTRAP)
558 {
559 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
560 {
561 if (debug_threads)
562 {
563 struct thread_info *thr = get_lwp_thread (lwp);
564
565 debug_printf ("CSBB: Push back software breakpoint for %s\n",
566 target_pid_to_str (ptid_of (thr)));
567 }
568
569 /* Back up the PC if necessary. */
570 if (pc != sw_breakpoint_pc)
571 {
572 struct regcache *regcache
573 = get_thread_regcache (current_thread, 1);
574 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
575 }
576
577 lwp->stop_pc = sw_breakpoint_pc;
578 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
579 current_thread = saved_thread;
580 return 1;
581 }
582 else if (siginfo.si_code == TRAP_HWBKPT)
583 {
584 if (debug_threads)
585 {
586 struct thread_info *thr = get_lwp_thread (lwp);
587
588 debug_printf ("CSBB: Push back hardware "
589 "breakpoint/watchpoint for %s\n",
590 target_pid_to_str (ptid_of (thr)));
591 }
592
593 lwp->stop_pc = pc;
594 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
595 current_thread = saved_thread;
596 return 1;
597 }
598 }
599 }
600 #else
601 /* We may have just stepped a breakpoint instruction. E.g., in
602 non-stop mode, GDB first tells the thread A to step a range, and
603 then the user inserts a breakpoint inside the range. In that
604 case we need to report the breakpoint PC. */
605 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
606 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
607 {
608 if (debug_threads)
609 {
610 struct thread_info *thr = get_lwp_thread (lwp);
611
612 debug_printf ("CSBB: %s stopped by software breakpoint\n",
613 target_pid_to_str (ptid_of (thr)));
614 }
615
616 /* Back up the PC if necessary. */
617 if (pc != sw_breakpoint_pc)
618 {
619 struct regcache *regcache
620 = get_thread_regcache (current_thread, 1);
621 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
622 }
623
624 lwp->stop_pc = sw_breakpoint_pc;
625 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
626 current_thread = saved_thread;
627 return 1;
628 }
629
630 if (hardware_breakpoint_inserted_here (pc))
631 {
632 if (debug_threads)
633 {
634 struct thread_info *thr = get_lwp_thread (lwp);
635
636 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
637 target_pid_to_str (ptid_of (thr)));
638 }
639
640 lwp->stop_pc = pc;
641 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
642 current_thread = saved_thread;
643 return 1;
644 }
645 #endif
646
647 current_thread = saved_thread;
648 return 0;
649 }
650
651 static struct lwp_info *
652 add_lwp (ptid_t ptid)
653 {
654 struct lwp_info *lwp;
655
656 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
657 memset (lwp, 0, sizeof (*lwp));
658
659 if (the_low_target.new_thread != NULL)
660 the_low_target.new_thread (lwp);
661
662 lwp->thread = add_thread (ptid, lwp);
663
664 return lwp;
665 }
666
667 /* Start an inferior process and returns its pid.
668 ALLARGS is a vector of program-name and args. */
669
670 static int
671 linux_create_inferior (char *program, char **allargs)
672 {
673 struct lwp_info *new_lwp;
674 int pid;
675 ptid_t ptid;
676 struct cleanup *restore_personality
677 = maybe_disable_address_space_randomization (disable_randomization);
678
679 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
680 pid = vfork ();
681 #else
682 pid = fork ();
683 #endif
684 if (pid < 0)
685 perror_with_name ("fork");
686
687 if (pid == 0)
688 {
689 close_most_fds ();
690 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
691
692 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
693 signal (__SIGRTMIN + 1, SIG_DFL);
694 #endif
695
696 setpgid (0, 0);
697
698 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
699 stdout to stderr so that inferior i/o doesn't corrupt the connection.
700 Also, redirect stdin to /dev/null. */
701 if (remote_connection_is_stdio ())
702 {
703 close (0);
704 open ("/dev/null", O_RDONLY);
705 dup2 (2, 1);
706 if (write (2, "stdin/stdout redirected\n",
707 sizeof ("stdin/stdout redirected\n") - 1) < 0)
708 {
709 /* Errors ignored. */;
710 }
711 }
712
713 execv (program, allargs);
714 if (errno == ENOENT)
715 execvp (program, allargs);
716
717 fprintf (stderr, "Cannot exec %s: %s.\n", program,
718 strerror (errno));
719 fflush (stderr);
720 _exit (0177);
721 }
722
723 do_cleanups (restore_personality);
724
725 linux_add_process (pid, 0);
726
727 ptid = ptid_build (pid, pid, 0);
728 new_lwp = add_lwp (ptid);
729 new_lwp->must_set_ptrace_flags = 1;
730
731 return pid;
732 }
733
734 /* Attach to an inferior process. Returns 0 on success, ERRNO on
735 error. */
736
737 int
738 linux_attach_lwp (ptid_t ptid)
739 {
740 struct lwp_info *new_lwp;
741 int lwpid = ptid_get_lwp (ptid);
742
743 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
744 != 0)
745 return errno;
746
747 new_lwp = add_lwp (ptid);
748
749 /* We need to wait for SIGSTOP before being able to make the next
750 ptrace call on this LWP. */
751 new_lwp->must_set_ptrace_flags = 1;
752
753 if (linux_proc_pid_is_stopped (lwpid))
754 {
755 if (debug_threads)
756 debug_printf ("Attached to a stopped process\n");
757
758 /* The process is definitely stopped. It is in a job control
759 stop, unless the kernel predates the TASK_STOPPED /
760 TASK_TRACED distinction, in which case it might be in a
761 ptrace stop. Make sure it is in a ptrace stop; from there we
762 can kill it, signal it, et cetera.
763
764 First make sure there is a pending SIGSTOP. Since we are
765 already attached, the process can not transition from stopped
766 to running without a PTRACE_CONT; so we know this signal will
767 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
768 probably already in the queue (unless this kernel is old
769 enough to use TASK_STOPPED for ptrace stops); but since
770 SIGSTOP is not an RT signal, it can only be queued once. */
771 kill_lwp (lwpid, SIGSTOP);
772
773 /* Finally, resume the stopped process. This will deliver the
774 SIGSTOP (or a higher priority signal, just like normal
775 PTRACE_ATTACH), which we'll catch later on. */
776 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
777 }
778
779 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
780 brings it to a halt.
781
782 There are several cases to consider here:
783
784 1) gdbserver has already attached to the process and is being notified
785 of a new thread that is being created.
786 In this case we should ignore that SIGSTOP and resume the
787 process. This is handled below by setting stop_expected = 1,
788 and the fact that add_thread sets last_resume_kind ==
789 resume_continue.
790
791 2) This is the first thread (the process thread), and we're attaching
792 to it via attach_inferior.
793 In this case we want the process thread to stop.
794 This is handled by having linux_attach set last_resume_kind ==
795 resume_stop after we return.
796
797 If the pid we are attaching to is also the tgid, we attach to and
798 stop all the existing threads. Otherwise, we attach to pid and
799 ignore any other threads in the same group as this pid.
800
801 3) GDB is connecting to gdbserver and is requesting an enumeration of all
802 existing threads.
803 In this case we want the thread to stop.
804 FIXME: This case is currently not properly handled.
805 We should wait for the SIGSTOP but don't. Things work apparently
806 because enough time passes between when we ptrace (ATTACH) and when
807 gdb makes the next ptrace call on the thread.
808
809 On the other hand, if we are currently trying to stop all threads, we
810 should treat the new thread as if we had sent it a SIGSTOP. This works
811 because we are guaranteed that the add_lwp call above added us to the
812 end of the list, and so the new thread has not yet reached
813 wait_for_sigstop (but will). */
814 new_lwp->stop_expected = 1;
815
816 return 0;
817 }
818
819 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
820 already attached. Returns true if a new LWP is found, false
821 otherwise. */
822
823 static int
824 attach_proc_task_lwp_callback (ptid_t ptid)
825 {
826 /* Is this a new thread? */
827 if (find_thread_ptid (ptid) == NULL)
828 {
829 int lwpid = ptid_get_lwp (ptid);
830 int err;
831
832 if (debug_threads)
833 debug_printf ("Found new lwp %d\n", lwpid);
834
835 err = linux_attach_lwp (ptid);
836
837 /* Be quiet if we simply raced with the thread exiting. EPERM
838 is returned if the thread's task still exists, and is marked
839 as exited or zombie, as well as other conditions, so in that
840 case, confirm the status in /proc/PID/status. */
841 if (err == ESRCH
842 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
843 {
844 if (debug_threads)
845 {
846 debug_printf ("Cannot attach to lwp %d: "
847 "thread is gone (%d: %s)\n",
848 lwpid, err, strerror (err));
849 }
850 }
851 else if (err != 0)
852 {
853 warning (_("Cannot attach to lwp %d: %s"),
854 lwpid,
855 linux_ptrace_attach_fail_reason_string (ptid, err));
856 }
857
858 return 1;
859 }
860 return 0;
861 }
862
863 /* Attach to PID. If PID is the tgid, attach to it and all
864 of its threads. */
865
866 static int
867 linux_attach (unsigned long pid)
868 {
869 ptid_t ptid = ptid_build (pid, pid, 0);
870 int err;
871
872 /* Attach to PID. We will check for other threads
873 soon. */
874 err = linux_attach_lwp (ptid);
875 if (err != 0)
876 error ("Cannot attach to process %ld: %s",
877 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
878
879 linux_add_process (pid, 1);
880
881 if (!non_stop)
882 {
883 struct thread_info *thread;
884
885 /* Don't ignore the initial SIGSTOP if we just attached to this
886 process. It will be collected by wait shortly. */
887 thread = find_thread_ptid (ptid_build (pid, pid, 0));
888 thread->last_resume_kind = resume_stop;
889 }
890
891 /* We must attach to every LWP. If /proc is mounted, use that to
892 find them now. On the one hand, the inferior may be using raw
893 clone instead of using pthreads. On the other hand, even if it
894 is using pthreads, GDB may not be connected yet (thread_db needs
895 to do symbol lookups, through qSymbol). Also, thread_db walks
896 structures in the inferior's address space to find the list of
897 threads/LWPs, and those structures may well be corrupted. Note
898 that once thread_db is loaded, we'll still use it to list threads
899 and associate pthread info with each LWP. */
900 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
901 return 0;
902 }
903
904 struct counter
905 {
906 int pid;
907 int count;
908 };
909
910 static int
911 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
912 {
913 struct counter *counter = args;
914
915 if (ptid_get_pid (entry->id) == counter->pid)
916 {
917 if (++counter->count > 1)
918 return 1;
919 }
920
921 return 0;
922 }
923
924 static int
925 last_thread_of_process_p (int pid)
926 {
927 struct counter counter = { pid , 0 };
928
929 return (find_inferior (&all_threads,
930 second_thread_of_pid_p, &counter) == NULL);
931 }
932
933 /* Kill LWP. */
934
935 static void
936 linux_kill_one_lwp (struct lwp_info *lwp)
937 {
938 struct thread_info *thr = get_lwp_thread (lwp);
939 int pid = lwpid_of (thr);
940
941 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
942 there is no signal context, and ptrace(PTRACE_KILL) (or
943 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
944 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
945 alternative is to kill with SIGKILL. We only need one SIGKILL
946 per process, not one for each thread. But since we still support
947 linuxthreads, and we also support debugging programs using raw
948 clone without CLONE_THREAD, we send one for each thread. For
949 years, we used PTRACE_KILL only, so we're being a bit paranoid
950 about some old kernels where PTRACE_KILL might work better
951 (dubious if there are any such, but that's why it's paranoia), so
952 we try SIGKILL first, PTRACE_KILL second, and so we're fine
953 everywhere. */
954
955 errno = 0;
956 kill_lwp (pid, SIGKILL);
957 if (debug_threads)
958 {
959 int save_errno = errno;
960
961 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
962 target_pid_to_str (ptid_of (thr)),
963 save_errno ? strerror (save_errno) : "OK");
964 }
965
966 errno = 0;
967 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
968 if (debug_threads)
969 {
970 int save_errno = errno;
971
972 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
973 target_pid_to_str (ptid_of (thr)),
974 save_errno ? strerror (save_errno) : "OK");
975 }
976 }
977
978 /* Kill LWP and wait for it to die. */
979
980 static void
981 kill_wait_lwp (struct lwp_info *lwp)
982 {
983 struct thread_info *thr = get_lwp_thread (lwp);
984 int pid = ptid_get_pid (ptid_of (thr));
985 int lwpid = ptid_get_lwp (ptid_of (thr));
986 int wstat;
987 int res;
988
989 if (debug_threads)
990 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
991
992 do
993 {
994 linux_kill_one_lwp (lwp);
995
996 /* Make sure it died. Notes:
997
998 - The loop is most likely unnecessary.
999
1000 - We don't use linux_wait_for_event as that could delete lwps
1001 while we're iterating over them. We're not interested in
1002 any pending status at this point, only in making sure all
1003 wait status on the kernel side are collected until the
1004 process is reaped.
1005
1006 - We don't use __WALL here as the __WALL emulation relies on
1007 SIGCHLD, and killing a stopped process doesn't generate
1008 one, nor an exit status.
1009 */
1010 res = my_waitpid (lwpid, &wstat, 0);
1011 if (res == -1 && errno == ECHILD)
1012 res = my_waitpid (lwpid, &wstat, __WCLONE);
1013 } while (res > 0 && WIFSTOPPED (wstat));
1014
1015 gdb_assert (res > 0);
1016 }
1017
1018 /* Callback for `find_inferior'. Kills an lwp of a given process,
1019 except the leader. */
1020
1021 static int
1022 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1023 {
1024 struct thread_info *thread = (struct thread_info *) entry;
1025 struct lwp_info *lwp = get_thread_lwp (thread);
1026 int pid = * (int *) args;
1027
1028 if (ptid_get_pid (entry->id) != pid)
1029 return 0;
1030
1031 /* We avoid killing the first thread here, because of a Linux kernel (at
1032 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1033 the children get a chance to be reaped, it will remain a zombie
1034 forever. */
1035
1036 if (lwpid_of (thread) == pid)
1037 {
1038 if (debug_threads)
1039 debug_printf ("lkop: is last of process %s\n",
1040 target_pid_to_str (entry->id));
1041 return 0;
1042 }
1043
1044 kill_wait_lwp (lwp);
1045 return 0;
1046 }
1047
1048 static int
1049 linux_kill (int pid)
1050 {
1051 struct process_info *process;
1052 struct lwp_info *lwp;
1053
1054 process = find_process_pid (pid);
1055 if (process == NULL)
1056 return -1;
1057
1058 /* If we're killing a running inferior, make sure it is stopped
1059 first, as PTRACE_KILL will not work otherwise. */
1060 stop_all_lwps (0, NULL);
1061
1062 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1063
1064 /* See the comment in linux_kill_one_lwp. We did not kill the first
1065 thread in the list, so do so now. */
1066 lwp = find_lwp_pid (pid_to_ptid (pid));
1067
1068 if (lwp == NULL)
1069 {
1070 if (debug_threads)
1071 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1072 pid);
1073 }
1074 else
1075 kill_wait_lwp (lwp);
1076
1077 the_target->mourn (process);
1078
1079 /* Since we presently can only stop all lwps of all processes, we
1080 need to unstop lwps of other processes. */
1081 unstop_all_lwps (0, NULL);
1082 return 0;
1083 }
1084
1085 /* Get pending signal of THREAD, for detaching purposes. This is the
1086 signal the thread last stopped for, which we need to deliver to the
1087 thread when detaching, otherwise, it'd be suppressed/lost. */
1088
1089 static int
1090 get_detach_signal (struct thread_info *thread)
1091 {
1092 enum gdb_signal signo = GDB_SIGNAL_0;
1093 int status;
1094 struct lwp_info *lp = get_thread_lwp (thread);
1095
1096 if (lp->status_pending_p)
1097 status = lp->status_pending;
1098 else
1099 {
1100 /* If the thread had been suspended by gdbserver, and it stopped
1101 cleanly, then it'll have stopped with SIGSTOP. But we don't
1102 want to deliver that SIGSTOP. */
1103 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1104 || thread->last_status.value.sig == GDB_SIGNAL_0)
1105 return 0;
1106
1107 /* Otherwise, we may need to deliver the signal we
1108 intercepted. */
1109 status = lp->last_status;
1110 }
1111
1112 if (!WIFSTOPPED (status))
1113 {
1114 if (debug_threads)
1115 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1116 target_pid_to_str (ptid_of (thread)));
1117 return 0;
1118 }
1119
1120 /* Extended wait statuses aren't real SIGTRAPs. */
1121 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1122 {
1123 if (debug_threads)
1124 debug_printf ("GPS: lwp %s had stopped with extended "
1125 "status: no pending signal\n",
1126 target_pid_to_str (ptid_of (thread)));
1127 return 0;
1128 }
1129
1130 signo = gdb_signal_from_host (WSTOPSIG (status));
1131
1132 if (program_signals_p && !program_signals[signo])
1133 {
1134 if (debug_threads)
1135 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1136 target_pid_to_str (ptid_of (thread)),
1137 gdb_signal_to_string (signo));
1138 return 0;
1139 }
1140 else if (!program_signals_p
1141 /* If we have no way to know which signals GDB does not
1142 want to have passed to the program, assume
1143 SIGTRAP/SIGINT, which is GDB's default. */
1144 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1145 {
1146 if (debug_threads)
1147 debug_printf ("GPS: lwp %s had signal %s, "
1148 "but we don't know if we should pass it. "
1149 "Default to not.\n",
1150 target_pid_to_str (ptid_of (thread)),
1151 gdb_signal_to_string (signo));
1152 return 0;
1153 }
1154 else
1155 {
1156 if (debug_threads)
1157 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1158 target_pid_to_str (ptid_of (thread)),
1159 gdb_signal_to_string (signo));
1160
1161 return WSTOPSIG (status);
1162 }
1163 }
1164
1165 static int
1166 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1167 {
1168 struct thread_info *thread = (struct thread_info *) entry;
1169 struct lwp_info *lwp = get_thread_lwp (thread);
1170 int pid = * (int *) args;
1171 int sig;
1172
1173 if (ptid_get_pid (entry->id) != pid)
1174 return 0;
1175
1176 /* If there is a pending SIGSTOP, get rid of it. */
1177 if (lwp->stop_expected)
1178 {
1179 if (debug_threads)
1180 debug_printf ("Sending SIGCONT to %s\n",
1181 target_pid_to_str (ptid_of (thread)));
1182
1183 kill_lwp (lwpid_of (thread), SIGCONT);
1184 lwp->stop_expected = 0;
1185 }
1186
1187 /* Flush any pending changes to the process's registers. */
1188 regcache_invalidate_thread (thread);
1189
1190 /* Pass on any pending signal for this thread. */
1191 sig = get_detach_signal (thread);
1192
1193 /* Finally, let it resume. */
1194 if (the_low_target.prepare_to_resume != NULL)
1195 the_low_target.prepare_to_resume (lwp);
1196 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1197 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1198 error (_("Can't detach %s: %s"),
1199 target_pid_to_str (ptid_of (thread)),
1200 strerror (errno));
1201
1202 delete_lwp (lwp);
1203 return 0;
1204 }
1205
1206 static int
1207 linux_detach (int pid)
1208 {
1209 struct process_info *process;
1210
1211 process = find_process_pid (pid);
1212 if (process == NULL)
1213 return -1;
1214
1215 /* Stop all threads before detaching. First, ptrace requires that
1216 the thread is stopped to sucessfully detach. Second, thread_db
1217 may need to uninstall thread event breakpoints from memory, which
1218 only works with a stopped process anyway. */
1219 stop_all_lwps (0, NULL);
1220
1221 #ifdef USE_THREAD_DB
1222 thread_db_detach (process);
1223 #endif
1224
1225 /* Stabilize threads (move out of jump pads). */
1226 stabilize_threads ();
1227
1228 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1229
1230 the_target->mourn (process);
1231
1232 /* Since we presently can only stop all lwps of all processes, we
1233 need to unstop lwps of other processes. */
1234 unstop_all_lwps (0, NULL);
1235 return 0;
1236 }
1237
1238 /* Remove all LWPs that belong to process PROC from the lwp list. */
1239
1240 static int
1241 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1242 {
1243 struct thread_info *thread = (struct thread_info *) entry;
1244 struct lwp_info *lwp = get_thread_lwp (thread);
1245 struct process_info *process = proc;
1246
1247 if (pid_of (thread) == pid_of (process))
1248 delete_lwp (lwp);
1249
1250 return 0;
1251 }
1252
1253 static void
1254 linux_mourn (struct process_info *process)
1255 {
1256 struct process_info_private *priv;
1257
1258 #ifdef USE_THREAD_DB
1259 thread_db_mourn (process);
1260 #endif
1261
1262 find_inferior (&all_threads, delete_lwp_callback, process);
1263
1264 /* Freeing all private data. */
1265 priv = process->priv;
1266 free (priv->arch_private);
1267 free (priv);
1268 process->priv = NULL;
1269
1270 remove_process (process);
1271 }
1272
1273 static void
1274 linux_join (int pid)
1275 {
1276 int status, ret;
1277
1278 do {
1279 ret = my_waitpid (pid, &status, 0);
1280 if (WIFEXITED (status) || WIFSIGNALED (status))
1281 break;
1282 } while (ret != -1 || errno != ECHILD);
1283 }
1284
1285 /* Return nonzero if the given thread is still alive. */
1286 static int
1287 linux_thread_alive (ptid_t ptid)
1288 {
1289 struct lwp_info *lwp = find_lwp_pid (ptid);
1290
1291 /* We assume we always know if a thread exits. If a whole process
1292 exited but we still haven't been able to report it to GDB, we'll
1293 hold on to the last lwp of the dead process. */
1294 if (lwp != NULL)
1295 return !lwp->dead;
1296 else
1297 return 0;
1298 }
1299
1300 /* Return 1 if this lwp still has an interesting status pending. If
1301 not (e.g., it had stopped for a breakpoint that is gone), return
1302 false. */
1303
1304 static int
1305 thread_still_has_status_pending_p (struct thread_info *thread)
1306 {
1307 struct lwp_info *lp = get_thread_lwp (thread);
1308
1309 if (!lp->status_pending_p)
1310 return 0;
1311
1312 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1313 report any status pending the LWP may have. */
1314 if (thread->last_resume_kind == resume_stop
1315 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1316 return 0;
1317
1318 if (thread->last_resume_kind != resume_stop
1319 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1320 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1321 {
1322 struct thread_info *saved_thread;
1323 CORE_ADDR pc;
1324 int discard = 0;
1325
1326 gdb_assert (lp->last_status != 0);
1327
1328 pc = get_pc (lp);
1329
1330 saved_thread = current_thread;
1331 current_thread = thread;
1332
1333 if (pc != lp->stop_pc)
1334 {
1335 if (debug_threads)
1336 debug_printf ("PC of %ld changed\n",
1337 lwpid_of (thread));
1338 discard = 1;
1339 }
1340
1341 #if !USE_SIGTRAP_SIGINFO
1342 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1343 && !(*the_low_target.breakpoint_at) (pc))
1344 {
1345 if (debug_threads)
1346 debug_printf ("previous SW breakpoint of %ld gone\n",
1347 lwpid_of (thread));
1348 discard = 1;
1349 }
1350 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1351 && !hardware_breakpoint_inserted_here (pc))
1352 {
1353 if (debug_threads)
1354 debug_printf ("previous HW breakpoint of %ld gone\n",
1355 lwpid_of (thread));
1356 discard = 1;
1357 }
1358 #endif
1359
1360 current_thread = saved_thread;
1361
1362 if (discard)
1363 {
1364 if (debug_threads)
1365 debug_printf ("discarding pending breakpoint status\n");
1366 lp->status_pending_p = 0;
1367 return 0;
1368 }
1369 }
1370
1371 return 1;
1372 }
1373
1374 /* Return 1 if this lwp has an interesting status pending. */
1375 static int
1376 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1377 {
1378 struct thread_info *thread = (struct thread_info *) entry;
1379 struct lwp_info *lp = get_thread_lwp (thread);
1380 ptid_t ptid = * (ptid_t *) arg;
1381
1382 /* Check if we're only interested in events from a specific process
1383 or a specific LWP. */
1384 if (!ptid_match (ptid_of (thread), ptid))
1385 return 0;
1386
1387 if (lp->status_pending_p
1388 && !thread_still_has_status_pending_p (thread))
1389 {
1390 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1391 return 0;
1392 }
1393
1394 return lp->status_pending_p;
1395 }
1396
1397 static int
1398 same_lwp (struct inferior_list_entry *entry, void *data)
1399 {
1400 ptid_t ptid = *(ptid_t *) data;
1401 int lwp;
1402
1403 if (ptid_get_lwp (ptid) != 0)
1404 lwp = ptid_get_lwp (ptid);
1405 else
1406 lwp = ptid_get_pid (ptid);
1407
1408 if (ptid_get_lwp (entry->id) == lwp)
1409 return 1;
1410
1411 return 0;
1412 }
1413
1414 struct lwp_info *
1415 find_lwp_pid (ptid_t ptid)
1416 {
1417 struct inferior_list_entry *thread
1418 = find_inferior (&all_threads, same_lwp, &ptid);
1419
1420 if (thread == NULL)
1421 return NULL;
1422
1423 return get_thread_lwp ((struct thread_info *) thread);
1424 }
1425
1426 /* Return the number of known LWPs in the tgid given by PID. */
1427
1428 static int
1429 num_lwps (int pid)
1430 {
1431 struct inferior_list_entry *inf, *tmp;
1432 int count = 0;
1433
1434 ALL_INFERIORS (&all_threads, inf, tmp)
1435 {
1436 if (ptid_get_pid (inf->id) == pid)
1437 count++;
1438 }
1439
1440 return count;
1441 }
1442
1443 /* The arguments passed to iterate_over_lwps. */
1444
1445 struct iterate_over_lwps_args
1446 {
1447 /* The FILTER argument passed to iterate_over_lwps. */
1448 ptid_t filter;
1449
1450 /* The CALLBACK argument passed to iterate_over_lwps. */
1451 iterate_over_lwps_ftype *callback;
1452
1453 /* The DATA argument passed to iterate_over_lwps. */
1454 void *data;
1455 };
1456
1457 /* Callback for find_inferior used by iterate_over_lwps to filter
1458 calls to the callback supplied to that function. Returning a
1459 nonzero value causes find_inferiors to stop iterating and return
1460 the current inferior_list_entry. Returning zero indicates that
1461 find_inferiors should continue iterating. */
1462
1463 static int
1464 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1465 {
1466 struct iterate_over_lwps_args *args
1467 = (struct iterate_over_lwps_args *) args_p;
1468
1469 if (ptid_match (entry->id, args->filter))
1470 {
1471 struct thread_info *thr = (struct thread_info *) entry;
1472 struct lwp_info *lwp = get_thread_lwp (thr);
1473
1474 return (*args->callback) (lwp, args->data);
1475 }
1476
1477 return 0;
1478 }
1479
1480 /* See nat/linux-nat.h. */
1481
1482 struct lwp_info *
1483 iterate_over_lwps (ptid_t filter,
1484 iterate_over_lwps_ftype callback,
1485 void *data)
1486 {
1487 struct iterate_over_lwps_args args = {filter, callback, data};
1488 struct inferior_list_entry *entry;
1489
1490 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1491 if (entry == NULL)
1492 return NULL;
1493
1494 return get_thread_lwp ((struct thread_info *) entry);
1495 }
1496
1497 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1498 their exits until all other threads in the group have exited. */
1499
1500 static void
1501 check_zombie_leaders (void)
1502 {
1503 struct process_info *proc, *tmp;
1504
1505 ALL_PROCESSES (proc, tmp)
1506 {
1507 pid_t leader_pid = pid_of (proc);
1508 struct lwp_info *leader_lp;
1509
1510 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1511
1512 if (debug_threads)
1513 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1514 "num_lwps=%d, zombie=%d\n",
1515 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1516 linux_proc_pid_is_zombie (leader_pid));
1517
1518 if (leader_lp != NULL
1519 /* Check if there are other threads in the group, as we may
1520 have raced with the inferior simply exiting. */
1521 && !last_thread_of_process_p (leader_pid)
1522 && linux_proc_pid_is_zombie (leader_pid))
1523 {
1524 /* A leader zombie can mean one of two things:
1525
1526 - It exited, and there's an exit status pending
1527 available, or only the leader exited (not the whole
1528 program). In the latter case, we can't waitpid the
1529 leader's exit status until all other threads are gone.
1530
1531 - There are 3 or more threads in the group, and a thread
1532 other than the leader exec'd. On an exec, the Linux
1533 kernel destroys all other threads (except the execing
1534 one) in the thread group, and resets the execing thread's
1535 tid to the tgid. No exit notification is sent for the
1536 execing thread -- from the ptracer's perspective, it
1537 appears as though the execing thread just vanishes.
1538 Until we reap all other threads except the leader and the
1539 execing thread, the leader will be zombie, and the
1540 execing thread will be in `D (disc sleep)'. As soon as
1541 all other threads are reaped, the execing thread changes
1542 it's tid to the tgid, and the previous (zombie) leader
1543 vanishes, giving place to the "new" leader. We could try
1544 distinguishing the exit and exec cases, by waiting once
1545 more, and seeing if something comes out, but it doesn't
1546 sound useful. The previous leader _does_ go away, and
1547 we'll re-add the new one once we see the exec event
1548 (which is just the same as what would happen if the
1549 previous leader did exit voluntarily before some other
1550 thread execs). */
1551
1552 if (debug_threads)
1553 fprintf (stderr,
1554 "CZL: Thread group leader %d zombie "
1555 "(it exited, or another thread execd).\n",
1556 leader_pid);
1557
1558 delete_lwp (leader_lp);
1559 }
1560 }
1561 }
1562
1563 /* Callback for `find_inferior'. Returns the first LWP that is not
1564 stopped. ARG is a PTID filter. */
1565
1566 static int
1567 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1568 {
1569 struct thread_info *thr = (struct thread_info *) entry;
1570 struct lwp_info *lwp;
1571 ptid_t filter = *(ptid_t *) arg;
1572
1573 if (!ptid_match (ptid_of (thr), filter))
1574 return 0;
1575
1576 lwp = get_thread_lwp (thr);
1577 if (!lwp->stopped)
1578 return 1;
1579
1580 return 0;
1581 }
1582
1583 /* This function should only be called if the LWP got a SIGTRAP.
1584
1585 Handle any tracepoint steps or hits. Return true if a tracepoint
1586 event was handled, 0 otherwise. */
1587
1588 static int
1589 handle_tracepoints (struct lwp_info *lwp)
1590 {
1591 struct thread_info *tinfo = get_lwp_thread (lwp);
1592 int tpoint_related_event = 0;
1593
1594 gdb_assert (lwp->suspended == 0);
1595
1596 /* If this tracepoint hit causes a tracing stop, we'll immediately
1597 uninsert tracepoints. To do this, we temporarily pause all
1598 threads, unpatch away, and then unpause threads. We need to make
1599 sure the unpausing doesn't resume LWP too. */
1600 lwp->suspended++;
1601
1602 /* And we need to be sure that any all-threads-stopping doesn't try
1603 to move threads out of the jump pads, as it could deadlock the
1604 inferior (LWP could be in the jump pad, maybe even holding the
1605 lock.) */
1606
1607 /* Do any necessary step collect actions. */
1608 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1609
1610 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1611
1612 /* See if we just hit a tracepoint and do its main collect
1613 actions. */
1614 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1615
1616 lwp->suspended--;
1617
1618 gdb_assert (lwp->suspended == 0);
1619 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1620
1621 if (tpoint_related_event)
1622 {
1623 if (debug_threads)
1624 debug_printf ("got a tracepoint event\n");
1625 return 1;
1626 }
1627
1628 return 0;
1629 }
1630
1631 /* Convenience wrapper. Returns true if LWP is presently collecting a
1632 fast tracepoint. */
1633
1634 static int
1635 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1636 struct fast_tpoint_collect_status *status)
1637 {
1638 CORE_ADDR thread_area;
1639 struct thread_info *thread = get_lwp_thread (lwp);
1640
1641 if (the_low_target.get_thread_area == NULL)
1642 return 0;
1643
1644 /* Get the thread area address. This is used to recognize which
1645 thread is which when tracing with the in-process agent library.
1646 We don't read anything from the address, and treat it as opaque;
1647 it's the address itself that we assume is unique per-thread. */
1648 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1649 return 0;
1650
1651 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1652 }
1653
1654 /* The reason we resume in the caller, is because we want to be able
1655 to pass lwp->status_pending as WSTAT, and we need to clear
1656 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1657 refuses to resume. */
1658
1659 static int
1660 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1661 {
1662 struct thread_info *saved_thread;
1663
1664 saved_thread = current_thread;
1665 current_thread = get_lwp_thread (lwp);
1666
1667 if ((wstat == NULL
1668 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1669 && supports_fast_tracepoints ()
1670 && agent_loaded_p ())
1671 {
1672 struct fast_tpoint_collect_status status;
1673 int r;
1674
1675 if (debug_threads)
1676 debug_printf ("Checking whether LWP %ld needs to move out of the "
1677 "jump pad.\n",
1678 lwpid_of (current_thread));
1679
1680 r = linux_fast_tracepoint_collecting (lwp, &status);
1681
1682 if (wstat == NULL
1683 || (WSTOPSIG (*wstat) != SIGILL
1684 && WSTOPSIG (*wstat) != SIGFPE
1685 && WSTOPSIG (*wstat) != SIGSEGV
1686 && WSTOPSIG (*wstat) != SIGBUS))
1687 {
1688 lwp->collecting_fast_tracepoint = r;
1689
1690 if (r != 0)
1691 {
1692 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1693 {
1694 /* Haven't executed the original instruction yet.
1695 Set breakpoint there, and wait till it's hit,
1696 then single-step until exiting the jump pad. */
1697 lwp->exit_jump_pad_bkpt
1698 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1699 }
1700
1701 if (debug_threads)
1702 debug_printf ("Checking whether LWP %ld needs to move out of "
1703 "the jump pad...it does\n",
1704 lwpid_of (current_thread));
1705 current_thread = saved_thread;
1706
1707 return 1;
1708 }
1709 }
1710 else
1711 {
1712 /* If we get a synchronous signal while collecting, *and*
1713 while executing the (relocated) original instruction,
1714 reset the PC to point at the tpoint address, before
1715 reporting to GDB. Otherwise, it's an IPA lib bug: just
1716 report the signal to GDB, and pray for the best. */
1717
1718 lwp->collecting_fast_tracepoint = 0;
1719
1720 if (r != 0
1721 && (status.adjusted_insn_addr <= lwp->stop_pc
1722 && lwp->stop_pc < status.adjusted_insn_addr_end))
1723 {
1724 siginfo_t info;
1725 struct regcache *regcache;
1726
1727 /* The si_addr on a few signals references the address
1728 of the faulting instruction. Adjust that as
1729 well. */
1730 if ((WSTOPSIG (*wstat) == SIGILL
1731 || WSTOPSIG (*wstat) == SIGFPE
1732 || WSTOPSIG (*wstat) == SIGBUS
1733 || WSTOPSIG (*wstat) == SIGSEGV)
1734 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1735 (PTRACE_TYPE_ARG3) 0, &info) == 0
1736 /* Final check just to make sure we don't clobber
1737 the siginfo of non-kernel-sent signals. */
1738 && (uintptr_t) info.si_addr == lwp->stop_pc)
1739 {
1740 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1741 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1742 (PTRACE_TYPE_ARG3) 0, &info);
1743 }
1744
1745 regcache = get_thread_regcache (current_thread, 1);
1746 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1747 lwp->stop_pc = status.tpoint_addr;
1748
1749 /* Cancel any fast tracepoint lock this thread was
1750 holding. */
1751 force_unlock_trace_buffer ();
1752 }
1753
1754 if (lwp->exit_jump_pad_bkpt != NULL)
1755 {
1756 if (debug_threads)
1757 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1758 "stopping all threads momentarily.\n");
1759
1760 stop_all_lwps (1, lwp);
1761
1762 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1763 lwp->exit_jump_pad_bkpt = NULL;
1764
1765 unstop_all_lwps (1, lwp);
1766
1767 gdb_assert (lwp->suspended >= 0);
1768 }
1769 }
1770 }
1771
1772 if (debug_threads)
1773 debug_printf ("Checking whether LWP %ld needs to move out of the "
1774 "jump pad...no\n",
1775 lwpid_of (current_thread));
1776
1777 current_thread = saved_thread;
1778 return 0;
1779 }
1780
1781 /* Enqueue one signal in the "signals to report later when out of the
1782 jump pad" list. */
1783
1784 static void
1785 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1786 {
1787 struct pending_signals *p_sig;
1788 struct thread_info *thread = get_lwp_thread (lwp);
1789
1790 if (debug_threads)
1791 debug_printf ("Deferring signal %d for LWP %ld.\n",
1792 WSTOPSIG (*wstat), lwpid_of (thread));
1793
1794 if (debug_threads)
1795 {
1796 struct pending_signals *sig;
1797
1798 for (sig = lwp->pending_signals_to_report;
1799 sig != NULL;
1800 sig = sig->prev)
1801 debug_printf (" Already queued %d\n",
1802 sig->signal);
1803
1804 debug_printf (" (no more currently queued signals)\n");
1805 }
1806
1807 /* Don't enqueue non-RT signals if they are already in the deferred
1808 queue. (SIGSTOP being the easiest signal to see ending up here
1809 twice) */
1810 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1811 {
1812 struct pending_signals *sig;
1813
1814 for (sig = lwp->pending_signals_to_report;
1815 sig != NULL;
1816 sig = sig->prev)
1817 {
1818 if (sig->signal == WSTOPSIG (*wstat))
1819 {
1820 if (debug_threads)
1821 debug_printf ("Not requeuing already queued non-RT signal %d"
1822 " for LWP %ld\n",
1823 sig->signal,
1824 lwpid_of (thread));
1825 return;
1826 }
1827 }
1828 }
1829
1830 p_sig = xmalloc (sizeof (*p_sig));
1831 p_sig->prev = lwp->pending_signals_to_report;
1832 p_sig->signal = WSTOPSIG (*wstat);
1833 memset (&p_sig->info, 0, sizeof (siginfo_t));
1834 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1835 &p_sig->info);
1836
1837 lwp->pending_signals_to_report = p_sig;
1838 }
1839
1840 /* Dequeue one signal from the "signals to report later when out of
1841 the jump pad" list. */
1842
1843 static int
1844 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1845 {
1846 struct thread_info *thread = get_lwp_thread (lwp);
1847
1848 if (lwp->pending_signals_to_report != NULL)
1849 {
1850 struct pending_signals **p_sig;
1851
1852 p_sig = &lwp->pending_signals_to_report;
1853 while ((*p_sig)->prev != NULL)
1854 p_sig = &(*p_sig)->prev;
1855
1856 *wstat = W_STOPCODE ((*p_sig)->signal);
1857 if ((*p_sig)->info.si_signo != 0)
1858 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1859 &(*p_sig)->info);
1860 free (*p_sig);
1861 *p_sig = NULL;
1862
1863 if (debug_threads)
1864 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1865 WSTOPSIG (*wstat), lwpid_of (thread));
1866
1867 if (debug_threads)
1868 {
1869 struct pending_signals *sig;
1870
1871 for (sig = lwp->pending_signals_to_report;
1872 sig != NULL;
1873 sig = sig->prev)
1874 debug_printf (" Still queued %d\n",
1875 sig->signal);
1876
1877 debug_printf (" (no more queued signals)\n");
1878 }
1879
1880 return 1;
1881 }
1882
1883 return 0;
1884 }
1885
1886 /* Fetch the possibly triggered data watchpoint info and store it in
1887 CHILD.
1888
1889 On some archs, like x86, that use debug registers to set
1890 watchpoints, it's possible that the way to know which watched
1891 address trapped, is to check the register that is used to select
1892 which address to watch. Problem is, between setting the watchpoint
1893 and reading back which data address trapped, the user may change
1894 the set of watchpoints, and, as a consequence, GDB changes the
1895 debug registers in the inferior. To avoid reading back a stale
1896 stopped-data-address when that happens, we cache in LP the fact
1897 that a watchpoint trapped, and the corresponding data address, as
1898 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1899 registers meanwhile, we have the cached data we can rely on. */
1900
1901 static int
1902 check_stopped_by_watchpoint (struct lwp_info *child)
1903 {
1904 if (the_low_target.stopped_by_watchpoint != NULL)
1905 {
1906 struct thread_info *saved_thread;
1907
1908 saved_thread = current_thread;
1909 current_thread = get_lwp_thread (child);
1910
1911 if (the_low_target.stopped_by_watchpoint ())
1912 {
1913 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
1914
1915 if (the_low_target.stopped_data_address != NULL)
1916 child->stopped_data_address
1917 = the_low_target.stopped_data_address ();
1918 else
1919 child->stopped_data_address = 0;
1920 }
1921
1922 current_thread = saved_thread;
1923 }
1924
1925 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
1926 }
1927
1928 /* Do low-level handling of the event, and check if we should go on
1929 and pass it to caller code. Return the affected lwp if we are, or
1930 NULL otherwise. */
1931
1932 static struct lwp_info *
1933 linux_low_filter_event (int lwpid, int wstat)
1934 {
1935 struct lwp_info *child;
1936 struct thread_info *thread;
1937 int have_stop_pc = 0;
1938
1939 child = find_lwp_pid (pid_to_ptid (lwpid));
1940
1941 /* If we didn't find a process, one of two things presumably happened:
1942 - A process we started and then detached from has exited. Ignore it.
1943 - A process we are controlling has forked and the new child's stop
1944 was reported to us by the kernel. Save its PID. */
1945 if (child == NULL && WIFSTOPPED (wstat))
1946 {
1947 add_to_pid_list (&stopped_pids, lwpid, wstat);
1948 return NULL;
1949 }
1950 else if (child == NULL)
1951 return NULL;
1952
1953 thread = get_lwp_thread (child);
1954
1955 child->stopped = 1;
1956
1957 child->last_status = wstat;
1958
1959 /* Check if the thread has exited. */
1960 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
1961 {
1962 if (debug_threads)
1963 debug_printf ("LLFE: %d exited.\n", lwpid);
1964 if (num_lwps (pid_of (thread)) > 1)
1965 {
1966
1967 /* If there is at least one more LWP, then the exit signal was
1968 not the end of the debugged application and should be
1969 ignored. */
1970 delete_lwp (child);
1971 return NULL;
1972 }
1973 else
1974 {
1975 /* This was the last lwp in the process. Since events are
1976 serialized to GDB core, and we can't report this one
1977 right now, but GDB core and the other target layers will
1978 want to be notified about the exit code/signal, leave the
1979 status pending for the next time we're able to report
1980 it. */
1981 mark_lwp_dead (child, wstat);
1982 return child;
1983 }
1984 }
1985
1986 gdb_assert (WIFSTOPPED (wstat));
1987
1988 if (WIFSTOPPED (wstat))
1989 {
1990 struct process_info *proc;
1991
1992 /* Architecture-specific setup after inferior is running. This
1993 needs to happen after we have attached to the inferior and it
1994 is stopped for the first time, but before we access any
1995 inferior registers. */
1996 proc = find_process_pid (pid_of (thread));
1997 if (proc->priv->new_inferior)
1998 {
1999 struct thread_info *saved_thread;
2000
2001 saved_thread = current_thread;
2002 current_thread = thread;
2003
2004 the_low_target.arch_setup ();
2005
2006 current_thread = saved_thread;
2007
2008 proc->priv->new_inferior = 0;
2009 }
2010 }
2011
2012 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2013 {
2014 struct process_info *proc = find_process_pid (pid_of (thread));
2015
2016 linux_enable_event_reporting (lwpid, proc->attached);
2017 child->must_set_ptrace_flags = 0;
2018 }
2019
2020 /* Be careful to not overwrite stop_pc until
2021 check_stopped_by_breakpoint is called. */
2022 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2023 && linux_is_extended_waitstatus (wstat))
2024 {
2025 child->stop_pc = get_pc (child);
2026 handle_extended_wait (child, wstat);
2027 return NULL;
2028 }
2029
2030 /* Check first whether this was a SW/HW breakpoint before checking
2031 watchpoints, because at least s390 can't tell the data address of
2032 hardware watchpoint hits, and returns stopped-by-watchpoint as
2033 long as there's a watchpoint set. */
2034 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2035 {
2036 if (check_stopped_by_breakpoint (child))
2037 have_stop_pc = 1;
2038 }
2039
2040 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2041 or hardware watchpoint. Check which is which if we got
2042 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2043 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2044 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2045 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2046 check_stopped_by_watchpoint (child);
2047
2048 if (!have_stop_pc)
2049 child->stop_pc = get_pc (child);
2050
2051 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2052 && child->stop_expected)
2053 {
2054 if (debug_threads)
2055 debug_printf ("Expected stop.\n");
2056 child->stop_expected = 0;
2057
2058 if (thread->last_resume_kind == resume_stop)
2059 {
2060 /* We want to report the stop to the core. Treat the
2061 SIGSTOP as a normal event. */
2062 }
2063 else if (stopping_threads != NOT_STOPPING_THREADS)
2064 {
2065 /* Stopping threads. We don't want this SIGSTOP to end up
2066 pending. */
2067 return NULL;
2068 }
2069 else
2070 {
2071 /* Filter out the event. */
2072 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2073 return NULL;
2074 }
2075 }
2076
2077 child->status_pending_p = 1;
2078 child->status_pending = wstat;
2079 return child;
2080 }
2081
2082 /* Resume LWPs that are currently stopped without any pending status
2083 to report, but are resumed from the core's perspective. */
2084
2085 static void
2086 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2087 {
2088 struct thread_info *thread = (struct thread_info *) entry;
2089 struct lwp_info *lp = get_thread_lwp (thread);
2090
2091 if (lp->stopped
2092 && !lp->status_pending_p
2093 && thread->last_resume_kind != resume_stop
2094 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2095 {
2096 int step = thread->last_resume_kind == resume_step;
2097
2098 if (debug_threads)
2099 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2100 target_pid_to_str (ptid_of (thread)),
2101 paddress (lp->stop_pc),
2102 step);
2103
2104 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2105 }
2106 }
2107
2108 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2109 match FILTER_PTID (leaving others pending). The PTIDs can be:
2110 minus_one_ptid, to specify any child; a pid PTID, specifying all
2111 lwps of a thread group; or a PTID representing a single lwp. Store
2112 the stop status through the status pointer WSTAT. OPTIONS is
2113 passed to the waitpid call. Return 0 if no event was found and
2114 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2115 was found. Return the PID of the stopped child otherwise. */
2116
2117 static int
2118 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2119 int *wstatp, int options)
2120 {
2121 struct thread_info *event_thread;
2122 struct lwp_info *event_child, *requested_child;
2123 sigset_t block_mask, prev_mask;
2124
2125 retry:
2126 /* N.B. event_thread points to the thread_info struct that contains
2127 event_child. Keep them in sync. */
2128 event_thread = NULL;
2129 event_child = NULL;
2130 requested_child = NULL;
2131
2132 /* Check for a lwp with a pending status. */
2133
2134 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2135 {
2136 event_thread = (struct thread_info *)
2137 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2138 if (event_thread != NULL)
2139 event_child = get_thread_lwp (event_thread);
2140 if (debug_threads && event_thread)
2141 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2142 }
2143 else if (!ptid_equal (filter_ptid, null_ptid))
2144 {
2145 requested_child = find_lwp_pid (filter_ptid);
2146
2147 if (stopping_threads == NOT_STOPPING_THREADS
2148 && requested_child->status_pending_p
2149 && requested_child->collecting_fast_tracepoint)
2150 {
2151 enqueue_one_deferred_signal (requested_child,
2152 &requested_child->status_pending);
2153 requested_child->status_pending_p = 0;
2154 requested_child->status_pending = 0;
2155 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2156 }
2157
2158 if (requested_child->suspended
2159 && requested_child->status_pending_p)
2160 {
2161 internal_error (__FILE__, __LINE__,
2162 "requesting an event out of a"
2163 " suspended child?");
2164 }
2165
2166 if (requested_child->status_pending_p)
2167 {
2168 event_child = requested_child;
2169 event_thread = get_lwp_thread (event_child);
2170 }
2171 }
2172
2173 if (event_child != NULL)
2174 {
2175 if (debug_threads)
2176 debug_printf ("Got an event from pending child %ld (%04x)\n",
2177 lwpid_of (event_thread), event_child->status_pending);
2178 *wstatp = event_child->status_pending;
2179 event_child->status_pending_p = 0;
2180 event_child->status_pending = 0;
2181 current_thread = event_thread;
2182 return lwpid_of (event_thread);
2183 }
2184
2185 /* But if we don't find a pending event, we'll have to wait.
2186
2187 We only enter this loop if no process has a pending wait status.
2188 Thus any action taken in response to a wait status inside this
2189 loop is responding as soon as we detect the status, not after any
2190 pending events. */
2191
2192 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2193 all signals while here. */
2194 sigfillset (&block_mask);
2195 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2196
2197 /* Always pull all events out of the kernel. We'll randomly select
2198 an event LWP out of all that have events, to prevent
2199 starvation. */
2200 while (event_child == NULL)
2201 {
2202 pid_t ret = 0;
2203
2204 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2205 quirks:
2206
2207 - If the thread group leader exits while other threads in the
2208 thread group still exist, waitpid(TGID, ...) hangs. That
2209 waitpid won't return an exit status until the other threads
2210 in the group are reaped.
2211
2212 - When a non-leader thread execs, that thread just vanishes
2213 without reporting an exit (so we'd hang if we waited for it
2214 explicitly in that case). The exec event is reported to
2215 the TGID pid (although we don't currently enable exec
2216 events). */
2217 errno = 0;
2218 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2219
2220 if (debug_threads)
2221 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2222 ret, errno ? strerror (errno) : "ERRNO-OK");
2223
2224 if (ret > 0)
2225 {
2226 if (debug_threads)
2227 {
2228 debug_printf ("LLW: waitpid %ld received %s\n",
2229 (long) ret, status_to_str (*wstatp));
2230 }
2231
2232 /* Filter all events. IOW, leave all events pending. We'll
2233 randomly select an event LWP out of all that have events
2234 below. */
2235 linux_low_filter_event (ret, *wstatp);
2236 /* Retry until nothing comes out of waitpid. A single
2237 SIGCHLD can indicate more than one child stopped. */
2238 continue;
2239 }
2240
2241 /* Now that we've pulled all events out of the kernel, resume
2242 LWPs that don't have an interesting event to report. */
2243 if (stopping_threads == NOT_STOPPING_THREADS)
2244 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2245
2246 /* ... and find an LWP with a status to report to the core, if
2247 any. */
2248 event_thread = (struct thread_info *)
2249 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2250 if (event_thread != NULL)
2251 {
2252 event_child = get_thread_lwp (event_thread);
2253 *wstatp = event_child->status_pending;
2254 event_child->status_pending_p = 0;
2255 event_child->status_pending = 0;
2256 break;
2257 }
2258
2259 /* Check for zombie thread group leaders. Those can't be reaped
2260 until all other threads in the thread group are. */
2261 check_zombie_leaders ();
2262
2263 /* If there are no resumed children left in the set of LWPs we
2264 want to wait for, bail. We can't just block in
2265 waitpid/sigsuspend, because lwps might have been left stopped
2266 in trace-stop state, and we'd be stuck forever waiting for
2267 their status to change (which would only happen if we resumed
2268 them). Even if WNOHANG is set, this return code is preferred
2269 over 0 (below), as it is more detailed. */
2270 if ((find_inferior (&all_threads,
2271 not_stopped_callback,
2272 &wait_ptid) == NULL))
2273 {
2274 if (debug_threads)
2275 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2276 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2277 return -1;
2278 }
2279
2280 /* No interesting event to report to the caller. */
2281 if ((options & WNOHANG))
2282 {
2283 if (debug_threads)
2284 debug_printf ("WNOHANG set, no event found\n");
2285
2286 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2287 return 0;
2288 }
2289
2290 /* Block until we get an event reported with SIGCHLD. */
2291 if (debug_threads)
2292 debug_printf ("sigsuspend'ing\n");
2293
2294 sigsuspend (&prev_mask);
2295 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2296 goto retry;
2297 }
2298
2299 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2300
2301 current_thread = event_thread;
2302
2303 /* Check for thread exit. */
2304 if (! WIFSTOPPED (*wstatp))
2305 {
2306 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2307
2308 if (debug_threads)
2309 debug_printf ("LWP %d is the last lwp of process. "
2310 "Process %ld exiting.\n",
2311 pid_of (event_thread), lwpid_of (event_thread));
2312 return lwpid_of (event_thread);
2313 }
2314
2315 return lwpid_of (event_thread);
2316 }
2317
2318 /* Wait for an event from child(ren) PTID. PTIDs can be:
2319 minus_one_ptid, to specify any child; a pid PTID, specifying all
2320 lwps of a thread group; or a PTID representing a single lwp. Store
2321 the stop status through the status pointer WSTAT. OPTIONS is
2322 passed to the waitpid call. Return 0 if no event was found and
2323 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2324 was found. Return the PID of the stopped child otherwise. */
2325
2326 static int
2327 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2328 {
2329 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2330 }
2331
2332 /* Count the LWP's that have had events. */
2333
2334 static int
2335 count_events_callback (struct inferior_list_entry *entry, void *data)
2336 {
2337 struct thread_info *thread = (struct thread_info *) entry;
2338 struct lwp_info *lp = get_thread_lwp (thread);
2339 int *count = data;
2340
2341 gdb_assert (count != NULL);
2342
2343 /* Count only resumed LWPs that have an event pending. */
2344 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2345 && lp->status_pending_p)
2346 (*count)++;
2347
2348 return 0;
2349 }
2350
2351 /* Select the LWP (if any) that is currently being single-stepped. */
2352
2353 static int
2354 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2355 {
2356 struct thread_info *thread = (struct thread_info *) entry;
2357 struct lwp_info *lp = get_thread_lwp (thread);
2358
2359 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2360 && thread->last_resume_kind == resume_step
2361 && lp->status_pending_p)
2362 return 1;
2363 else
2364 return 0;
2365 }
2366
2367 /* Select the Nth LWP that has had an event. */
2368
2369 static int
2370 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2371 {
2372 struct thread_info *thread = (struct thread_info *) entry;
2373 struct lwp_info *lp = get_thread_lwp (thread);
2374 int *selector = data;
2375
2376 gdb_assert (selector != NULL);
2377
2378 /* Select only resumed LWPs that have an event pending. */
2379 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2380 && lp->status_pending_p)
2381 if ((*selector)-- == 0)
2382 return 1;
2383
2384 return 0;
2385 }
2386
2387 /* Select one LWP out of those that have events pending. */
2388
2389 static void
2390 select_event_lwp (struct lwp_info **orig_lp)
2391 {
2392 int num_events = 0;
2393 int random_selector;
2394 struct thread_info *event_thread = NULL;
2395
2396 /* In all-stop, give preference to the LWP that is being
2397 single-stepped. There will be at most one, and it's the LWP that
2398 the core is most interested in. If we didn't do this, then we'd
2399 have to handle pending step SIGTRAPs somehow in case the core
2400 later continues the previously-stepped thread, otherwise we'd
2401 report the pending SIGTRAP, and the core, not having stepped the
2402 thread, wouldn't understand what the trap was for, and therefore
2403 would report it to the user as a random signal. */
2404 if (!non_stop)
2405 {
2406 event_thread
2407 = (struct thread_info *) find_inferior (&all_threads,
2408 select_singlestep_lwp_callback,
2409 NULL);
2410 if (event_thread != NULL)
2411 {
2412 if (debug_threads)
2413 debug_printf ("SEL: Select single-step %s\n",
2414 target_pid_to_str (ptid_of (event_thread)));
2415 }
2416 }
2417 if (event_thread == NULL)
2418 {
2419 /* No single-stepping LWP. Select one at random, out of those
2420 which have had events. */
2421
2422 /* First see how many events we have. */
2423 find_inferior (&all_threads, count_events_callback, &num_events);
2424 gdb_assert (num_events > 0);
2425
2426 /* Now randomly pick a LWP out of those that have had
2427 events. */
2428 random_selector = (int)
2429 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2430
2431 if (debug_threads && num_events > 1)
2432 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2433 num_events, random_selector);
2434
2435 event_thread
2436 = (struct thread_info *) find_inferior (&all_threads,
2437 select_event_lwp_callback,
2438 &random_selector);
2439 }
2440
2441 if (event_thread != NULL)
2442 {
2443 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2444
2445 /* Switch the event LWP. */
2446 *orig_lp = event_lp;
2447 }
2448 }
2449
2450 /* Decrement the suspend count of an LWP. */
2451
2452 static int
2453 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2454 {
2455 struct thread_info *thread = (struct thread_info *) entry;
2456 struct lwp_info *lwp = get_thread_lwp (thread);
2457
2458 /* Ignore EXCEPT. */
2459 if (lwp == except)
2460 return 0;
2461
2462 lwp->suspended--;
2463
2464 gdb_assert (lwp->suspended >= 0);
2465 return 0;
2466 }
2467
2468 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2469 NULL. */
2470
2471 static void
2472 unsuspend_all_lwps (struct lwp_info *except)
2473 {
2474 find_inferior (&all_threads, unsuspend_one_lwp, except);
2475 }
2476
2477 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2478 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2479 void *data);
2480 static int lwp_running (struct inferior_list_entry *entry, void *data);
2481 static ptid_t linux_wait_1 (ptid_t ptid,
2482 struct target_waitstatus *ourstatus,
2483 int target_options);
2484
2485 /* Stabilize threads (move out of jump pads).
2486
2487 If a thread is midway collecting a fast tracepoint, we need to
2488 finish the collection and move it out of the jump pad before
2489 reporting the signal.
2490
2491 This avoids recursion while collecting (when a signal arrives
2492 midway, and the signal handler itself collects), which would trash
2493 the trace buffer. In case the user set a breakpoint in a signal
2494 handler, this avoids the backtrace showing the jump pad, etc..
2495 Most importantly, there are certain things we can't do safely if
2496 threads are stopped in a jump pad (or in its callee's). For
2497 example:
2498
2499 - starting a new trace run. A thread still collecting the
2500 previous run, could trash the trace buffer when resumed. The trace
2501 buffer control structures would have been reset but the thread had
2502 no way to tell. The thread could even midway memcpy'ing to the
2503 buffer, which would mean that when resumed, it would clobber the
2504 trace buffer that had been set for a new run.
2505
2506 - we can't rewrite/reuse the jump pads for new tracepoints
2507 safely. Say you do tstart while a thread is stopped midway while
2508 collecting. When the thread is later resumed, it finishes the
2509 collection, and returns to the jump pad, to execute the original
2510 instruction that was under the tracepoint jump at the time the
2511 older run had been started. If the jump pad had been rewritten
2512 since for something else in the new run, the thread would now
2513 execute the wrong / random instructions. */
2514
2515 static void
2516 linux_stabilize_threads (void)
2517 {
2518 struct thread_info *saved_thread;
2519 struct thread_info *thread_stuck;
2520
2521 thread_stuck
2522 = (struct thread_info *) find_inferior (&all_threads,
2523 stuck_in_jump_pad_callback,
2524 NULL);
2525 if (thread_stuck != NULL)
2526 {
2527 if (debug_threads)
2528 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2529 lwpid_of (thread_stuck));
2530 return;
2531 }
2532
2533 saved_thread = current_thread;
2534
2535 stabilizing_threads = 1;
2536
2537 /* Kick 'em all. */
2538 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2539
2540 /* Loop until all are stopped out of the jump pads. */
2541 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2542 {
2543 struct target_waitstatus ourstatus;
2544 struct lwp_info *lwp;
2545 int wstat;
2546
2547 /* Note that we go through the full wait even loop. While
2548 moving threads out of jump pad, we need to be able to step
2549 over internal breakpoints and such. */
2550 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2551
2552 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2553 {
2554 lwp = get_thread_lwp (current_thread);
2555
2556 /* Lock it. */
2557 lwp->suspended++;
2558
2559 if (ourstatus.value.sig != GDB_SIGNAL_0
2560 || current_thread->last_resume_kind == resume_stop)
2561 {
2562 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2563 enqueue_one_deferred_signal (lwp, &wstat);
2564 }
2565 }
2566 }
2567
2568 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2569
2570 stabilizing_threads = 0;
2571
2572 current_thread = saved_thread;
2573
2574 if (debug_threads)
2575 {
2576 thread_stuck
2577 = (struct thread_info *) find_inferior (&all_threads,
2578 stuck_in_jump_pad_callback,
2579 NULL);
2580 if (thread_stuck != NULL)
2581 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2582 lwpid_of (thread_stuck));
2583 }
2584 }
2585
2586 static void async_file_mark (void);
2587
2588 /* Convenience function that is called when the kernel reports an
2589 event that is not passed out to GDB. */
2590
2591 static ptid_t
2592 ignore_event (struct target_waitstatus *ourstatus)
2593 {
2594 /* If we got an event, there may still be others, as a single
2595 SIGCHLD can indicate more than one child stopped. This forces
2596 another target_wait call. */
2597 async_file_mark ();
2598
2599 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2600 return null_ptid;
2601 }
2602
2603 /* Wait for process, returns status. */
2604
2605 static ptid_t
2606 linux_wait_1 (ptid_t ptid,
2607 struct target_waitstatus *ourstatus, int target_options)
2608 {
2609 int w;
2610 struct lwp_info *event_child;
2611 int options;
2612 int pid;
2613 int step_over_finished;
2614 int bp_explains_trap;
2615 int maybe_internal_trap;
2616 int report_to_gdb;
2617 int trace_event;
2618 int in_step_range;
2619
2620 if (debug_threads)
2621 {
2622 debug_enter ();
2623 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2624 }
2625
2626 /* Translate generic target options into linux options. */
2627 options = __WALL;
2628 if (target_options & TARGET_WNOHANG)
2629 options |= WNOHANG;
2630
2631 bp_explains_trap = 0;
2632 trace_event = 0;
2633 in_step_range = 0;
2634 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2635
2636 if (ptid_equal (step_over_bkpt, null_ptid))
2637 pid = linux_wait_for_event (ptid, &w, options);
2638 else
2639 {
2640 if (debug_threads)
2641 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2642 target_pid_to_str (step_over_bkpt));
2643 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2644 }
2645
2646 if (pid == 0)
2647 {
2648 gdb_assert (target_options & TARGET_WNOHANG);
2649
2650 if (debug_threads)
2651 {
2652 debug_printf ("linux_wait_1 ret = null_ptid, "
2653 "TARGET_WAITKIND_IGNORE\n");
2654 debug_exit ();
2655 }
2656
2657 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2658 return null_ptid;
2659 }
2660 else if (pid == -1)
2661 {
2662 if (debug_threads)
2663 {
2664 debug_printf ("linux_wait_1 ret = null_ptid, "
2665 "TARGET_WAITKIND_NO_RESUMED\n");
2666 debug_exit ();
2667 }
2668
2669 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2670 return null_ptid;
2671 }
2672
2673 event_child = get_thread_lwp (current_thread);
2674
2675 /* linux_wait_for_event only returns an exit status for the last
2676 child of a process. Report it. */
2677 if (WIFEXITED (w) || WIFSIGNALED (w))
2678 {
2679 if (WIFEXITED (w))
2680 {
2681 ourstatus->kind = TARGET_WAITKIND_EXITED;
2682 ourstatus->value.integer = WEXITSTATUS (w);
2683
2684 if (debug_threads)
2685 {
2686 debug_printf ("linux_wait_1 ret = %s, exited with "
2687 "retcode %d\n",
2688 target_pid_to_str (ptid_of (current_thread)),
2689 WEXITSTATUS (w));
2690 debug_exit ();
2691 }
2692 }
2693 else
2694 {
2695 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2696 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2697
2698 if (debug_threads)
2699 {
2700 debug_printf ("linux_wait_1 ret = %s, terminated with "
2701 "signal %d\n",
2702 target_pid_to_str (ptid_of (current_thread)),
2703 WTERMSIG (w));
2704 debug_exit ();
2705 }
2706 }
2707
2708 return ptid_of (current_thread);
2709 }
2710
2711 /* If step-over executes a breakpoint instruction, it means a
2712 gdb/gdbserver breakpoint had been planted on top of a permanent
2713 breakpoint. The PC has been adjusted by
2714 check_stopped_by_breakpoint to point at the breakpoint address.
2715 Advance the PC manually past the breakpoint, otherwise the
2716 program would keep trapping the permanent breakpoint forever. */
2717 if (!ptid_equal (step_over_bkpt, null_ptid)
2718 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2719 {
2720 unsigned int increment_pc = the_low_target.breakpoint_len;
2721
2722 if (debug_threads)
2723 {
2724 debug_printf ("step-over for %s executed software breakpoint\n",
2725 target_pid_to_str (ptid_of (current_thread)));
2726 }
2727
2728 if (increment_pc != 0)
2729 {
2730 struct regcache *regcache
2731 = get_thread_regcache (current_thread, 1);
2732
2733 event_child->stop_pc += increment_pc;
2734 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2735
2736 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2737 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2738 }
2739 }
2740
2741 /* If this event was not handled before, and is not a SIGTRAP, we
2742 report it. SIGILL and SIGSEGV are also treated as traps in case
2743 a breakpoint is inserted at the current PC. If this target does
2744 not support internal breakpoints at all, we also report the
2745 SIGTRAP without further processing; it's of no concern to us. */
2746 maybe_internal_trap
2747 = (supports_breakpoints ()
2748 && (WSTOPSIG (w) == SIGTRAP
2749 || ((WSTOPSIG (w) == SIGILL
2750 || WSTOPSIG (w) == SIGSEGV)
2751 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2752
2753 if (maybe_internal_trap)
2754 {
2755 /* Handle anything that requires bookkeeping before deciding to
2756 report the event or continue waiting. */
2757
2758 /* First check if we can explain the SIGTRAP with an internal
2759 breakpoint, or if we should possibly report the event to GDB.
2760 Do this before anything that may remove or insert a
2761 breakpoint. */
2762 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2763
2764 /* We have a SIGTRAP, possibly a step-over dance has just
2765 finished. If so, tweak the state machine accordingly,
2766 reinsert breakpoints and delete any reinsert (software
2767 single-step) breakpoints. */
2768 step_over_finished = finish_step_over (event_child);
2769
2770 /* Now invoke the callbacks of any internal breakpoints there. */
2771 check_breakpoints (event_child->stop_pc);
2772
2773 /* Handle tracepoint data collecting. This may overflow the
2774 trace buffer, and cause a tracing stop, removing
2775 breakpoints. */
2776 trace_event = handle_tracepoints (event_child);
2777
2778 if (bp_explains_trap)
2779 {
2780 /* If we stepped or ran into an internal breakpoint, we've
2781 already handled it. So next time we resume (from this
2782 PC), we should step over it. */
2783 if (debug_threads)
2784 debug_printf ("Hit a gdbserver breakpoint.\n");
2785
2786 if (breakpoint_here (event_child->stop_pc))
2787 event_child->need_step_over = 1;
2788 }
2789 }
2790 else
2791 {
2792 /* We have some other signal, possibly a step-over dance was in
2793 progress, and it should be cancelled too. */
2794 step_over_finished = finish_step_over (event_child);
2795 }
2796
2797 /* We have all the data we need. Either report the event to GDB, or
2798 resume threads and keep waiting for more. */
2799
2800 /* If we're collecting a fast tracepoint, finish the collection and
2801 move out of the jump pad before delivering a signal. See
2802 linux_stabilize_threads. */
2803
2804 if (WIFSTOPPED (w)
2805 && WSTOPSIG (w) != SIGTRAP
2806 && supports_fast_tracepoints ()
2807 && agent_loaded_p ())
2808 {
2809 if (debug_threads)
2810 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2811 "to defer or adjust it.\n",
2812 WSTOPSIG (w), lwpid_of (current_thread));
2813
2814 /* Allow debugging the jump pad itself. */
2815 if (current_thread->last_resume_kind != resume_step
2816 && maybe_move_out_of_jump_pad (event_child, &w))
2817 {
2818 enqueue_one_deferred_signal (event_child, &w);
2819
2820 if (debug_threads)
2821 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2822 WSTOPSIG (w), lwpid_of (current_thread));
2823
2824 linux_resume_one_lwp (event_child, 0, 0, NULL);
2825
2826 return ignore_event (ourstatus);
2827 }
2828 }
2829
2830 if (event_child->collecting_fast_tracepoint)
2831 {
2832 if (debug_threads)
2833 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2834 "Check if we're already there.\n",
2835 lwpid_of (current_thread),
2836 event_child->collecting_fast_tracepoint);
2837
2838 trace_event = 1;
2839
2840 event_child->collecting_fast_tracepoint
2841 = linux_fast_tracepoint_collecting (event_child, NULL);
2842
2843 if (event_child->collecting_fast_tracepoint != 1)
2844 {
2845 /* No longer need this breakpoint. */
2846 if (event_child->exit_jump_pad_bkpt != NULL)
2847 {
2848 if (debug_threads)
2849 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2850 "stopping all threads momentarily.\n");
2851
2852 /* Other running threads could hit this breakpoint.
2853 We don't handle moribund locations like GDB does,
2854 instead we always pause all threads when removing
2855 breakpoints, so that any step-over or
2856 decr_pc_after_break adjustment is always taken
2857 care of while the breakpoint is still
2858 inserted. */
2859 stop_all_lwps (1, event_child);
2860
2861 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2862 event_child->exit_jump_pad_bkpt = NULL;
2863
2864 unstop_all_lwps (1, event_child);
2865
2866 gdb_assert (event_child->suspended >= 0);
2867 }
2868 }
2869
2870 if (event_child->collecting_fast_tracepoint == 0)
2871 {
2872 if (debug_threads)
2873 debug_printf ("fast tracepoint finished "
2874 "collecting successfully.\n");
2875
2876 /* We may have a deferred signal to report. */
2877 if (dequeue_one_deferred_signal (event_child, &w))
2878 {
2879 if (debug_threads)
2880 debug_printf ("dequeued one signal.\n");
2881 }
2882 else
2883 {
2884 if (debug_threads)
2885 debug_printf ("no deferred signals.\n");
2886
2887 if (stabilizing_threads)
2888 {
2889 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2890 ourstatus->value.sig = GDB_SIGNAL_0;
2891
2892 if (debug_threads)
2893 {
2894 debug_printf ("linux_wait_1 ret = %s, stopped "
2895 "while stabilizing threads\n",
2896 target_pid_to_str (ptid_of (current_thread)));
2897 debug_exit ();
2898 }
2899
2900 return ptid_of (current_thread);
2901 }
2902 }
2903 }
2904 }
2905
2906 /* Check whether GDB would be interested in this event. */
2907
2908 /* If GDB is not interested in this signal, don't stop other
2909 threads, and don't report it to GDB. Just resume the inferior
2910 right away. We do this for threading-related signals as well as
2911 any that GDB specifically requested we ignore. But never ignore
2912 SIGSTOP if we sent it ourselves, and do not ignore signals when
2913 stepping - they may require special handling to skip the signal
2914 handler. Also never ignore signals that could be caused by a
2915 breakpoint. */
2916 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2917 thread library? */
2918 if (WIFSTOPPED (w)
2919 && current_thread->last_resume_kind != resume_step
2920 && (
2921 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2922 (current_process ()->priv->thread_db != NULL
2923 && (WSTOPSIG (w) == __SIGRTMIN
2924 || WSTOPSIG (w) == __SIGRTMIN + 1))
2925 ||
2926 #endif
2927 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2928 && !(WSTOPSIG (w) == SIGSTOP
2929 && current_thread->last_resume_kind == resume_stop)
2930 && !linux_wstatus_maybe_breakpoint (w))))
2931 {
2932 siginfo_t info, *info_p;
2933
2934 if (debug_threads)
2935 debug_printf ("Ignored signal %d for LWP %ld.\n",
2936 WSTOPSIG (w), lwpid_of (current_thread));
2937
2938 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2939 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2940 info_p = &info;
2941 else
2942 info_p = NULL;
2943 linux_resume_one_lwp (event_child, event_child->stepping,
2944 WSTOPSIG (w), info_p);
2945 return ignore_event (ourstatus);
2946 }
2947
2948 /* Note that all addresses are always "out of the step range" when
2949 there's no range to begin with. */
2950 in_step_range = lwp_in_step_range (event_child);
2951
2952 /* If GDB wanted this thread to single step, and the thread is out
2953 of the step range, we always want to report the SIGTRAP, and let
2954 GDB handle it. Watchpoints should always be reported. So should
2955 signals we can't explain. A SIGTRAP we can't explain could be a
2956 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2957 do, we're be able to handle GDB breakpoints on top of internal
2958 breakpoints, by handling the internal breakpoint and still
2959 reporting the event to GDB. If we don't, we're out of luck, GDB
2960 won't see the breakpoint hit. */
2961 report_to_gdb = (!maybe_internal_trap
2962 || (current_thread->last_resume_kind == resume_step
2963 && !in_step_range)
2964 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
2965 || (!step_over_finished && !in_step_range
2966 && !bp_explains_trap && !trace_event)
2967 || (gdb_breakpoint_here (event_child->stop_pc)
2968 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2969 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2970
2971 run_breakpoint_commands (event_child->stop_pc);
2972
2973 /* We found no reason GDB would want us to stop. We either hit one
2974 of our own breakpoints, or finished an internal step GDB
2975 shouldn't know about. */
2976 if (!report_to_gdb)
2977 {
2978 if (debug_threads)
2979 {
2980 if (bp_explains_trap)
2981 debug_printf ("Hit a gdbserver breakpoint.\n");
2982 if (step_over_finished)
2983 debug_printf ("Step-over finished.\n");
2984 if (trace_event)
2985 debug_printf ("Tracepoint event.\n");
2986 if (lwp_in_step_range (event_child))
2987 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2988 paddress (event_child->stop_pc),
2989 paddress (event_child->step_range_start),
2990 paddress (event_child->step_range_end));
2991 }
2992
2993 /* We're not reporting this breakpoint to GDB, so apply the
2994 decr_pc_after_break adjustment to the inferior's regcache
2995 ourselves. */
2996
2997 if (the_low_target.set_pc != NULL)
2998 {
2999 struct regcache *regcache
3000 = get_thread_regcache (current_thread, 1);
3001 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3002 }
3003
3004 /* We may have finished stepping over a breakpoint. If so,
3005 we've stopped and suspended all LWPs momentarily except the
3006 stepping one. This is where we resume them all again. We're
3007 going to keep waiting, so use proceed, which handles stepping
3008 over the next breakpoint. */
3009 if (debug_threads)
3010 debug_printf ("proceeding all threads.\n");
3011
3012 if (step_over_finished)
3013 unsuspend_all_lwps (event_child);
3014
3015 proceed_all_lwps ();
3016 return ignore_event (ourstatus);
3017 }
3018
3019 if (debug_threads)
3020 {
3021 if (current_thread->last_resume_kind == resume_step)
3022 {
3023 if (event_child->step_range_start == event_child->step_range_end)
3024 debug_printf ("GDB wanted to single-step, reporting event.\n");
3025 else if (!lwp_in_step_range (event_child))
3026 debug_printf ("Out of step range, reporting event.\n");
3027 }
3028 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3029 debug_printf ("Stopped by watchpoint.\n");
3030 else if (gdb_breakpoint_here (event_child->stop_pc))
3031 debug_printf ("Stopped by GDB breakpoint.\n");
3032 if (debug_threads)
3033 debug_printf ("Hit a non-gdbserver trap event.\n");
3034 }
3035
3036 /* Alright, we're going to report a stop. */
3037
3038 if (!stabilizing_threads)
3039 {
3040 /* In all-stop, stop all threads. */
3041 if (!non_stop)
3042 stop_all_lwps (0, NULL);
3043
3044 /* If we're not waiting for a specific LWP, choose an event LWP
3045 from among those that have had events. Giving equal priority
3046 to all LWPs that have had events helps prevent
3047 starvation. */
3048 if (ptid_equal (ptid, minus_one_ptid))
3049 {
3050 event_child->status_pending_p = 1;
3051 event_child->status_pending = w;
3052
3053 select_event_lwp (&event_child);
3054
3055 /* current_thread and event_child must stay in sync. */
3056 current_thread = get_lwp_thread (event_child);
3057
3058 event_child->status_pending_p = 0;
3059 w = event_child->status_pending;
3060 }
3061
3062 if (step_over_finished)
3063 {
3064 if (!non_stop)
3065 {
3066 /* If we were doing a step-over, all other threads but
3067 the stepping one had been paused in start_step_over,
3068 with their suspend counts incremented. We don't want
3069 to do a full unstop/unpause, because we're in
3070 all-stop mode (so we want threads stopped), but we
3071 still need to unsuspend the other threads, to
3072 decrement their `suspended' count back. */
3073 unsuspend_all_lwps (event_child);
3074 }
3075 else
3076 {
3077 /* If we just finished a step-over, then all threads had
3078 been momentarily paused. In all-stop, that's fine,
3079 we want threads stopped by now anyway. In non-stop,
3080 we need to re-resume threads that GDB wanted to be
3081 running. */
3082 unstop_all_lwps (1, event_child);
3083 }
3084 }
3085
3086 /* Stabilize threads (move out of jump pads). */
3087 if (!non_stop)
3088 stabilize_threads ();
3089 }
3090 else
3091 {
3092 /* If we just finished a step-over, then all threads had been
3093 momentarily paused. In all-stop, that's fine, we want
3094 threads stopped by now anyway. In non-stop, we need to
3095 re-resume threads that GDB wanted to be running. */
3096 if (step_over_finished)
3097 unstop_all_lwps (1, event_child);
3098 }
3099
3100 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3101
3102 /* Now that we've selected our final event LWP, un-adjust its PC if
3103 it was a software breakpoint, and the client doesn't know we can
3104 adjust the breakpoint ourselves. */
3105 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3106 && !swbreak_feature)
3107 {
3108 int decr_pc = the_low_target.decr_pc_after_break;
3109
3110 if (decr_pc != 0)
3111 {
3112 struct regcache *regcache
3113 = get_thread_regcache (current_thread, 1);
3114 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3115 }
3116 }
3117
3118 if (current_thread->last_resume_kind == resume_stop
3119 && WSTOPSIG (w) == SIGSTOP)
3120 {
3121 /* A thread that has been requested to stop by GDB with vCont;t,
3122 and it stopped cleanly, so report as SIG0. The use of
3123 SIGSTOP is an implementation detail. */
3124 ourstatus->value.sig = GDB_SIGNAL_0;
3125 }
3126 else if (current_thread->last_resume_kind == resume_stop
3127 && WSTOPSIG (w) != SIGSTOP)
3128 {
3129 /* A thread that has been requested to stop by GDB with vCont;t,
3130 but, it stopped for other reasons. */
3131 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3132 }
3133 else
3134 {
3135 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3136 }
3137
3138 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3139
3140 if (debug_threads)
3141 {
3142 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3143 target_pid_to_str (ptid_of (current_thread)),
3144 ourstatus->kind, ourstatus->value.sig);
3145 debug_exit ();
3146 }
3147
3148 return ptid_of (current_thread);
3149 }
3150
3151 /* Get rid of any pending event in the pipe. */
3152 static void
3153 async_file_flush (void)
3154 {
3155 int ret;
3156 char buf;
3157
3158 do
3159 ret = read (linux_event_pipe[0], &buf, 1);
3160 while (ret >= 0 || (ret == -1 && errno == EINTR));
3161 }
3162
3163 /* Put something in the pipe, so the event loop wakes up. */
3164 static void
3165 async_file_mark (void)
3166 {
3167 int ret;
3168
3169 async_file_flush ();
3170
3171 do
3172 ret = write (linux_event_pipe[1], "+", 1);
3173 while (ret == 0 || (ret == -1 && errno == EINTR));
3174
3175 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3176 be awakened anyway. */
3177 }
3178
3179 static ptid_t
3180 linux_wait (ptid_t ptid,
3181 struct target_waitstatus *ourstatus, int target_options)
3182 {
3183 ptid_t event_ptid;
3184
3185 /* Flush the async file first. */
3186 if (target_is_async_p ())
3187 async_file_flush ();
3188
3189 do
3190 {
3191 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3192 }
3193 while ((target_options & TARGET_WNOHANG) == 0
3194 && ptid_equal (event_ptid, null_ptid)
3195 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3196
3197 /* If at least one stop was reported, there may be more. A single
3198 SIGCHLD can signal more than one child stop. */
3199 if (target_is_async_p ()
3200 && (target_options & TARGET_WNOHANG) != 0
3201 && !ptid_equal (event_ptid, null_ptid))
3202 async_file_mark ();
3203
3204 return event_ptid;
3205 }
3206
3207 /* Send a signal to an LWP. */
3208
3209 static int
3210 kill_lwp (unsigned long lwpid, int signo)
3211 {
3212 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3213 fails, then we are not using nptl threads and we should be using kill. */
3214
3215 #ifdef __NR_tkill
3216 {
3217 static int tkill_failed;
3218
3219 if (!tkill_failed)
3220 {
3221 int ret;
3222
3223 errno = 0;
3224 ret = syscall (__NR_tkill, lwpid, signo);
3225 if (errno != ENOSYS)
3226 return ret;
3227 tkill_failed = 1;
3228 }
3229 }
3230 #endif
3231
3232 return kill (lwpid, signo);
3233 }
3234
3235 void
3236 linux_stop_lwp (struct lwp_info *lwp)
3237 {
3238 send_sigstop (lwp);
3239 }
3240
3241 static void
3242 send_sigstop (struct lwp_info *lwp)
3243 {
3244 int pid;
3245
3246 pid = lwpid_of (get_lwp_thread (lwp));
3247
3248 /* If we already have a pending stop signal for this process, don't
3249 send another. */
3250 if (lwp->stop_expected)
3251 {
3252 if (debug_threads)
3253 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3254
3255 return;
3256 }
3257
3258 if (debug_threads)
3259 debug_printf ("Sending sigstop to lwp %d\n", pid);
3260
3261 lwp->stop_expected = 1;
3262 kill_lwp (pid, SIGSTOP);
3263 }
3264
3265 static int
3266 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3267 {
3268 struct thread_info *thread = (struct thread_info *) entry;
3269 struct lwp_info *lwp = get_thread_lwp (thread);
3270
3271 /* Ignore EXCEPT. */
3272 if (lwp == except)
3273 return 0;
3274
3275 if (lwp->stopped)
3276 return 0;
3277
3278 send_sigstop (lwp);
3279 return 0;
3280 }
3281
3282 /* Increment the suspend count of an LWP, and stop it, if not stopped
3283 yet. */
3284 static int
3285 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3286 void *except)
3287 {
3288 struct thread_info *thread = (struct thread_info *) entry;
3289 struct lwp_info *lwp = get_thread_lwp (thread);
3290
3291 /* Ignore EXCEPT. */
3292 if (lwp == except)
3293 return 0;
3294
3295 lwp->suspended++;
3296
3297 return send_sigstop_callback (entry, except);
3298 }
3299
3300 static void
3301 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3302 {
3303 /* It's dead, really. */
3304 lwp->dead = 1;
3305
3306 /* Store the exit status for later. */
3307 lwp->status_pending_p = 1;
3308 lwp->status_pending = wstat;
3309
3310 /* Prevent trying to stop it. */
3311 lwp->stopped = 1;
3312
3313 /* No further stops are expected from a dead lwp. */
3314 lwp->stop_expected = 0;
3315 }
3316
3317 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3318
3319 static void
3320 wait_for_sigstop (void)
3321 {
3322 struct thread_info *saved_thread;
3323 ptid_t saved_tid;
3324 int wstat;
3325 int ret;
3326
3327 saved_thread = current_thread;
3328 if (saved_thread != NULL)
3329 saved_tid = saved_thread->entry.id;
3330 else
3331 saved_tid = null_ptid; /* avoid bogus unused warning */
3332
3333 if (debug_threads)
3334 debug_printf ("wait_for_sigstop: pulling events\n");
3335
3336 /* Passing NULL_PTID as filter indicates we want all events to be
3337 left pending. Eventually this returns when there are no
3338 unwaited-for children left. */
3339 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3340 &wstat, __WALL);
3341 gdb_assert (ret == -1);
3342
3343 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3344 current_thread = saved_thread;
3345 else
3346 {
3347 if (debug_threads)
3348 debug_printf ("Previously current thread died.\n");
3349
3350 if (non_stop)
3351 {
3352 /* We can't change the current inferior behind GDB's back,
3353 otherwise, a subsequent command may apply to the wrong
3354 process. */
3355 current_thread = NULL;
3356 }
3357 else
3358 {
3359 /* Set a valid thread as current. */
3360 set_desired_thread (0);
3361 }
3362 }
3363 }
3364
3365 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3366 move it out, because we need to report the stop event to GDB. For
3367 example, if the user puts a breakpoint in the jump pad, it's
3368 because she wants to debug it. */
3369
3370 static int
3371 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3372 {
3373 struct thread_info *thread = (struct thread_info *) entry;
3374 struct lwp_info *lwp = get_thread_lwp (thread);
3375
3376 gdb_assert (lwp->suspended == 0);
3377 gdb_assert (lwp->stopped);
3378
3379 /* Allow debugging the jump pad, gdb_collect, etc.. */
3380 return (supports_fast_tracepoints ()
3381 && agent_loaded_p ()
3382 && (gdb_breakpoint_here (lwp->stop_pc)
3383 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3384 || thread->last_resume_kind == resume_step)
3385 && linux_fast_tracepoint_collecting (lwp, NULL));
3386 }
3387
3388 static void
3389 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3390 {
3391 struct thread_info *thread = (struct thread_info *) entry;
3392 struct lwp_info *lwp = get_thread_lwp (thread);
3393 int *wstat;
3394
3395 gdb_assert (lwp->suspended == 0);
3396 gdb_assert (lwp->stopped);
3397
3398 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3399
3400 /* Allow debugging the jump pad, gdb_collect, etc. */
3401 if (!gdb_breakpoint_here (lwp->stop_pc)
3402 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3403 && thread->last_resume_kind != resume_step
3404 && maybe_move_out_of_jump_pad (lwp, wstat))
3405 {
3406 if (debug_threads)
3407 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3408 lwpid_of (thread));
3409
3410 if (wstat)
3411 {
3412 lwp->status_pending_p = 0;
3413 enqueue_one_deferred_signal (lwp, wstat);
3414
3415 if (debug_threads)
3416 debug_printf ("Signal %d for LWP %ld deferred "
3417 "(in jump pad)\n",
3418 WSTOPSIG (*wstat), lwpid_of (thread));
3419 }
3420
3421 linux_resume_one_lwp (lwp, 0, 0, NULL);
3422 }
3423 else
3424 lwp->suspended++;
3425 }
3426
3427 static int
3428 lwp_running (struct inferior_list_entry *entry, void *data)
3429 {
3430 struct thread_info *thread = (struct thread_info *) entry;
3431 struct lwp_info *lwp = get_thread_lwp (thread);
3432
3433 if (lwp->dead)
3434 return 0;
3435 if (lwp->stopped)
3436 return 0;
3437 return 1;
3438 }
3439
3440 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3441 If SUSPEND, then also increase the suspend count of every LWP,
3442 except EXCEPT. */
3443
3444 static void
3445 stop_all_lwps (int suspend, struct lwp_info *except)
3446 {
3447 /* Should not be called recursively. */
3448 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3449
3450 if (debug_threads)
3451 {
3452 debug_enter ();
3453 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3454 suspend ? "stop-and-suspend" : "stop",
3455 except != NULL
3456 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3457 : "none");
3458 }
3459
3460 stopping_threads = (suspend
3461 ? STOPPING_AND_SUSPENDING_THREADS
3462 : STOPPING_THREADS);
3463
3464 if (suspend)
3465 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3466 else
3467 find_inferior (&all_threads, send_sigstop_callback, except);
3468 wait_for_sigstop ();
3469 stopping_threads = NOT_STOPPING_THREADS;
3470
3471 if (debug_threads)
3472 {
3473 debug_printf ("stop_all_lwps done, setting stopping_threads "
3474 "back to !stopping\n");
3475 debug_exit ();
3476 }
3477 }
3478
3479 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3480 SIGNAL is nonzero, give it that signal. */
3481
3482 static void
3483 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3484 int step, int signal, siginfo_t *info)
3485 {
3486 struct thread_info *thread = get_lwp_thread (lwp);
3487 struct thread_info *saved_thread;
3488 int fast_tp_collecting;
3489
3490 if (lwp->stopped == 0)
3491 return;
3492
3493 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3494
3495 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3496
3497 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3498 user used the "jump" command, or "set $pc = foo"). */
3499 if (lwp->stop_pc != get_pc (lwp))
3500 {
3501 /* Collecting 'while-stepping' actions doesn't make sense
3502 anymore. */
3503 release_while_stepping_state_list (thread);
3504 }
3505
3506 /* If we have pending signals or status, and a new signal, enqueue the
3507 signal. Also enqueue the signal if we are waiting to reinsert a
3508 breakpoint; it will be picked up again below. */
3509 if (signal != 0
3510 && (lwp->status_pending_p
3511 || lwp->pending_signals != NULL
3512 || lwp->bp_reinsert != 0
3513 || fast_tp_collecting))
3514 {
3515 struct pending_signals *p_sig;
3516 p_sig = xmalloc (sizeof (*p_sig));
3517 p_sig->prev = lwp->pending_signals;
3518 p_sig->signal = signal;
3519 if (info == NULL)
3520 memset (&p_sig->info, 0, sizeof (siginfo_t));
3521 else
3522 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3523 lwp->pending_signals = p_sig;
3524 }
3525
3526 if (lwp->status_pending_p)
3527 {
3528 if (debug_threads)
3529 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3530 " has pending status\n",
3531 lwpid_of (thread), step ? "step" : "continue", signal,
3532 lwp->stop_expected ? "expected" : "not expected");
3533 return;
3534 }
3535
3536 saved_thread = current_thread;
3537 current_thread = thread;
3538
3539 if (debug_threads)
3540 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3541 lwpid_of (thread), step ? "step" : "continue", signal,
3542 lwp->stop_expected ? "expected" : "not expected");
3543
3544 /* This bit needs some thinking about. If we get a signal that
3545 we must report while a single-step reinsert is still pending,
3546 we often end up resuming the thread. It might be better to
3547 (ew) allow a stack of pending events; then we could be sure that
3548 the reinsert happened right away and not lose any signals.
3549
3550 Making this stack would also shrink the window in which breakpoints are
3551 uninserted (see comment in linux_wait_for_lwp) but not enough for
3552 complete correctness, so it won't solve that problem. It may be
3553 worthwhile just to solve this one, however. */
3554 if (lwp->bp_reinsert != 0)
3555 {
3556 if (debug_threads)
3557 debug_printf (" pending reinsert at 0x%s\n",
3558 paddress (lwp->bp_reinsert));
3559
3560 if (can_hardware_single_step ())
3561 {
3562 if (fast_tp_collecting == 0)
3563 {
3564 if (step == 0)
3565 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3566 if (lwp->suspended)
3567 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3568 lwp->suspended);
3569 }
3570
3571 step = 1;
3572 }
3573
3574 /* Postpone any pending signal. It was enqueued above. */
3575 signal = 0;
3576 }
3577
3578 if (fast_tp_collecting == 1)
3579 {
3580 if (debug_threads)
3581 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3582 " (exit-jump-pad-bkpt)\n",
3583 lwpid_of (thread));
3584
3585 /* Postpone any pending signal. It was enqueued above. */
3586 signal = 0;
3587 }
3588 else if (fast_tp_collecting == 2)
3589 {
3590 if (debug_threads)
3591 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3592 " single-stepping\n",
3593 lwpid_of (thread));
3594
3595 if (can_hardware_single_step ())
3596 step = 1;
3597 else
3598 {
3599 internal_error (__FILE__, __LINE__,
3600 "moving out of jump pad single-stepping"
3601 " not implemented on this target");
3602 }
3603
3604 /* Postpone any pending signal. It was enqueued above. */
3605 signal = 0;
3606 }
3607
3608 /* If we have while-stepping actions in this thread set it stepping.
3609 If we have a signal to deliver, it may or may not be set to
3610 SIG_IGN, we don't know. Assume so, and allow collecting
3611 while-stepping into a signal handler. A possible smart thing to
3612 do would be to set an internal breakpoint at the signal return
3613 address, continue, and carry on catching this while-stepping
3614 action only when that breakpoint is hit. A future
3615 enhancement. */
3616 if (thread->while_stepping != NULL
3617 && can_hardware_single_step ())
3618 {
3619 if (debug_threads)
3620 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3621 lwpid_of (thread));
3622 step = 1;
3623 }
3624
3625 if (the_low_target.get_pc != NULL)
3626 {
3627 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3628
3629 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3630
3631 if (debug_threads)
3632 {
3633 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3634 (long) lwp->stop_pc);
3635 }
3636 }
3637
3638 /* If we have pending signals, consume one unless we are trying to
3639 reinsert a breakpoint or we're trying to finish a fast tracepoint
3640 collect. */
3641 if (lwp->pending_signals != NULL
3642 && lwp->bp_reinsert == 0
3643 && fast_tp_collecting == 0)
3644 {
3645 struct pending_signals **p_sig;
3646
3647 p_sig = &lwp->pending_signals;
3648 while ((*p_sig)->prev != NULL)
3649 p_sig = &(*p_sig)->prev;
3650
3651 signal = (*p_sig)->signal;
3652 if ((*p_sig)->info.si_signo != 0)
3653 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3654 &(*p_sig)->info);
3655
3656 free (*p_sig);
3657 *p_sig = NULL;
3658 }
3659
3660 if (the_low_target.prepare_to_resume != NULL)
3661 the_low_target.prepare_to_resume (lwp);
3662
3663 regcache_invalidate_thread (thread);
3664 errno = 0;
3665 lwp->stepping = step;
3666 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3667 (PTRACE_TYPE_ARG3) 0,
3668 /* Coerce to a uintptr_t first to avoid potential gcc warning
3669 of coercing an 8 byte integer to a 4 byte pointer. */
3670 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3671
3672 current_thread = saved_thread;
3673 if (errno)
3674 perror_with_name ("resuming thread");
3675
3676 /* Successfully resumed. Clear state that no longer makes sense,
3677 and mark the LWP as running. Must not do this before resuming
3678 otherwise if that fails other code will be confused. E.g., we'd
3679 later try to stop the LWP and hang forever waiting for a stop
3680 status. Note that we must not throw after this is cleared,
3681 otherwise handle_zombie_lwp_error would get confused. */
3682 lwp->stopped = 0;
3683 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3684 }
3685
3686 /* Called when we try to resume a stopped LWP and that errors out. If
3687 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3688 or about to become), discard the error, clear any pending status
3689 the LWP may have, and return true (we'll collect the exit status
3690 soon enough). Otherwise, return false. */
3691
3692 static int
3693 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3694 {
3695 struct thread_info *thread = get_lwp_thread (lp);
3696
3697 /* If we get an error after resuming the LWP successfully, we'd
3698 confuse !T state for the LWP being gone. */
3699 gdb_assert (lp->stopped);
3700
3701 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3702 because even if ptrace failed with ESRCH, the tracee may be "not
3703 yet fully dead", but already refusing ptrace requests. In that
3704 case the tracee has 'R (Running)' state for a little bit
3705 (observed in Linux 3.18). See also the note on ESRCH in the
3706 ptrace(2) man page. Instead, check whether the LWP has any state
3707 other than ptrace-stopped. */
3708
3709 /* Don't assume anything if /proc/PID/status can't be read. */
3710 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3711 {
3712 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3713 lp->status_pending_p = 0;
3714 return 1;
3715 }
3716 return 0;
3717 }
3718
3719 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3720 disappears while we try to resume it. */
3721
3722 static void
3723 linux_resume_one_lwp (struct lwp_info *lwp,
3724 int step, int signal, siginfo_t *info)
3725 {
3726 TRY
3727 {
3728 linux_resume_one_lwp_throw (lwp, step, signal, info);
3729 }
3730 CATCH (ex, RETURN_MASK_ERROR)
3731 {
3732 if (!check_ptrace_stopped_lwp_gone (lwp))
3733 throw_exception (ex);
3734 }
3735 END_CATCH
3736 }
3737
3738 struct thread_resume_array
3739 {
3740 struct thread_resume *resume;
3741 size_t n;
3742 };
3743
3744 /* This function is called once per thread via find_inferior.
3745 ARG is a pointer to a thread_resume_array struct.
3746 We look up the thread specified by ENTRY in ARG, and mark the thread
3747 with a pointer to the appropriate resume request.
3748
3749 This algorithm is O(threads * resume elements), but resume elements
3750 is small (and will remain small at least until GDB supports thread
3751 suspension). */
3752
3753 static int
3754 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3755 {
3756 struct thread_info *thread = (struct thread_info *) entry;
3757 struct lwp_info *lwp = get_thread_lwp (thread);
3758 int ndx;
3759 struct thread_resume_array *r;
3760
3761 r = arg;
3762
3763 for (ndx = 0; ndx < r->n; ndx++)
3764 {
3765 ptid_t ptid = r->resume[ndx].thread;
3766 if (ptid_equal (ptid, minus_one_ptid)
3767 || ptid_equal (ptid, entry->id)
3768 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3769 of PID'. */
3770 || (ptid_get_pid (ptid) == pid_of (thread)
3771 && (ptid_is_pid (ptid)
3772 || ptid_get_lwp (ptid) == -1)))
3773 {
3774 if (r->resume[ndx].kind == resume_stop
3775 && thread->last_resume_kind == resume_stop)
3776 {
3777 if (debug_threads)
3778 debug_printf ("already %s LWP %ld at GDB's request\n",
3779 (thread->last_status.kind
3780 == TARGET_WAITKIND_STOPPED)
3781 ? "stopped"
3782 : "stopping",
3783 lwpid_of (thread));
3784
3785 continue;
3786 }
3787
3788 lwp->resume = &r->resume[ndx];
3789 thread->last_resume_kind = lwp->resume->kind;
3790
3791 lwp->step_range_start = lwp->resume->step_range_start;
3792 lwp->step_range_end = lwp->resume->step_range_end;
3793
3794 /* If we had a deferred signal to report, dequeue one now.
3795 This can happen if LWP gets more than one signal while
3796 trying to get out of a jump pad. */
3797 if (lwp->stopped
3798 && !lwp->status_pending_p
3799 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3800 {
3801 lwp->status_pending_p = 1;
3802
3803 if (debug_threads)
3804 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3805 "leaving status pending.\n",
3806 WSTOPSIG (lwp->status_pending),
3807 lwpid_of (thread));
3808 }
3809
3810 return 0;
3811 }
3812 }
3813
3814 /* No resume action for this thread. */
3815 lwp->resume = NULL;
3816
3817 return 0;
3818 }
3819
3820 /* find_inferior callback for linux_resume.
3821 Set *FLAG_P if this lwp has an interesting status pending. */
3822
3823 static int
3824 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3825 {
3826 struct thread_info *thread = (struct thread_info *) entry;
3827 struct lwp_info *lwp = get_thread_lwp (thread);
3828
3829 /* LWPs which will not be resumed are not interesting, because
3830 we might not wait for them next time through linux_wait. */
3831 if (lwp->resume == NULL)
3832 return 0;
3833
3834 if (thread_still_has_status_pending_p (thread))
3835 * (int *) flag_p = 1;
3836
3837 return 0;
3838 }
3839
3840 /* Return 1 if this lwp that GDB wants running is stopped at an
3841 internal breakpoint that we need to step over. It assumes that any
3842 required STOP_PC adjustment has already been propagated to the
3843 inferior's regcache. */
3844
3845 static int
3846 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3847 {
3848 struct thread_info *thread = (struct thread_info *) entry;
3849 struct lwp_info *lwp = get_thread_lwp (thread);
3850 struct thread_info *saved_thread;
3851 CORE_ADDR pc;
3852
3853 /* LWPs which will not be resumed are not interesting, because we
3854 might not wait for them next time through linux_wait. */
3855
3856 if (!lwp->stopped)
3857 {
3858 if (debug_threads)
3859 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3860 lwpid_of (thread));
3861 return 0;
3862 }
3863
3864 if (thread->last_resume_kind == resume_stop)
3865 {
3866 if (debug_threads)
3867 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3868 " stopped\n",
3869 lwpid_of (thread));
3870 return 0;
3871 }
3872
3873 gdb_assert (lwp->suspended >= 0);
3874
3875 if (lwp->suspended)
3876 {
3877 if (debug_threads)
3878 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3879 lwpid_of (thread));
3880 return 0;
3881 }
3882
3883 if (!lwp->need_step_over)
3884 {
3885 if (debug_threads)
3886 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
3887 }
3888
3889 if (lwp->status_pending_p)
3890 {
3891 if (debug_threads)
3892 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3893 " status.\n",
3894 lwpid_of (thread));
3895 return 0;
3896 }
3897
3898 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3899 or we have. */
3900 pc = get_pc (lwp);
3901
3902 /* If the PC has changed since we stopped, then don't do anything,
3903 and let the breakpoint/tracepoint be hit. This happens if, for
3904 instance, GDB handled the decr_pc_after_break subtraction itself,
3905 GDB is OOL stepping this thread, or the user has issued a "jump"
3906 command, or poked thread's registers herself. */
3907 if (pc != lwp->stop_pc)
3908 {
3909 if (debug_threads)
3910 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3911 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3912 lwpid_of (thread),
3913 paddress (lwp->stop_pc), paddress (pc));
3914
3915 lwp->need_step_over = 0;
3916 return 0;
3917 }
3918
3919 saved_thread = current_thread;
3920 current_thread = thread;
3921
3922 /* We can only step over breakpoints we know about. */
3923 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3924 {
3925 /* Don't step over a breakpoint that GDB expects to hit
3926 though. If the condition is being evaluated on the target's side
3927 and it evaluate to false, step over this breakpoint as well. */
3928 if (gdb_breakpoint_here (pc)
3929 && gdb_condition_true_at_breakpoint (pc)
3930 && gdb_no_commands_at_breakpoint (pc))
3931 {
3932 if (debug_threads)
3933 debug_printf ("Need step over [LWP %ld]? yes, but found"
3934 " GDB breakpoint at 0x%s; skipping step over\n",
3935 lwpid_of (thread), paddress (pc));
3936
3937 current_thread = saved_thread;
3938 return 0;
3939 }
3940 else
3941 {
3942 if (debug_threads)
3943 debug_printf ("Need step over [LWP %ld]? yes, "
3944 "found breakpoint at 0x%s\n",
3945 lwpid_of (thread), paddress (pc));
3946
3947 /* We've found an lwp that needs stepping over --- return 1 so
3948 that find_inferior stops looking. */
3949 current_thread = saved_thread;
3950
3951 /* If the step over is cancelled, this is set again. */
3952 lwp->need_step_over = 0;
3953 return 1;
3954 }
3955 }
3956
3957 current_thread = saved_thread;
3958
3959 if (debug_threads)
3960 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3961 " at 0x%s\n",
3962 lwpid_of (thread), paddress (pc));
3963
3964 return 0;
3965 }
3966
3967 /* Start a step-over operation on LWP. When LWP stopped at a
3968 breakpoint, to make progress, we need to remove the breakpoint out
3969 of the way. If we let other threads run while we do that, they may
3970 pass by the breakpoint location and miss hitting it. To avoid
3971 that, a step-over momentarily stops all threads while LWP is
3972 single-stepped while the breakpoint is temporarily uninserted from
3973 the inferior. When the single-step finishes, we reinsert the
3974 breakpoint, and let all threads that are supposed to be running,
3975 run again.
3976
3977 On targets that don't support hardware single-step, we don't
3978 currently support full software single-stepping. Instead, we only
3979 support stepping over the thread event breakpoint, by asking the
3980 low target where to place a reinsert breakpoint. Since this
3981 routine assumes the breakpoint being stepped over is a thread event
3982 breakpoint, it usually assumes the return address of the current
3983 function is a good enough place to set the reinsert breakpoint. */
3984
3985 static int
3986 start_step_over (struct lwp_info *lwp)
3987 {
3988 struct thread_info *thread = get_lwp_thread (lwp);
3989 struct thread_info *saved_thread;
3990 CORE_ADDR pc;
3991 int step;
3992
3993 if (debug_threads)
3994 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3995 lwpid_of (thread));
3996
3997 stop_all_lwps (1, lwp);
3998 gdb_assert (lwp->suspended == 0);
3999
4000 if (debug_threads)
4001 debug_printf ("Done stopping all threads for step-over.\n");
4002
4003 /* Note, we should always reach here with an already adjusted PC,
4004 either by GDB (if we're resuming due to GDB's request), or by our
4005 caller, if we just finished handling an internal breakpoint GDB
4006 shouldn't care about. */
4007 pc = get_pc (lwp);
4008
4009 saved_thread = current_thread;
4010 current_thread = thread;
4011
4012 lwp->bp_reinsert = pc;
4013 uninsert_breakpoints_at (pc);
4014 uninsert_fast_tracepoint_jumps_at (pc);
4015
4016 if (can_hardware_single_step ())
4017 {
4018 step = 1;
4019 }
4020 else
4021 {
4022 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4023 set_reinsert_breakpoint (raddr);
4024 step = 0;
4025 }
4026
4027 current_thread = saved_thread;
4028
4029 linux_resume_one_lwp (lwp, step, 0, NULL);
4030
4031 /* Require next event from this LWP. */
4032 step_over_bkpt = thread->entry.id;
4033 return 1;
4034 }
4035
4036 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4037 start_step_over, if still there, and delete any reinsert
4038 breakpoints we've set, on non hardware single-step targets. */
4039
4040 static int
4041 finish_step_over (struct lwp_info *lwp)
4042 {
4043 if (lwp->bp_reinsert != 0)
4044 {
4045 if (debug_threads)
4046 debug_printf ("Finished step over.\n");
4047
4048 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4049 may be no breakpoint to reinsert there by now. */
4050 reinsert_breakpoints_at (lwp->bp_reinsert);
4051 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4052
4053 lwp->bp_reinsert = 0;
4054
4055 /* Delete any software-single-step reinsert breakpoints. No
4056 longer needed. We don't have to worry about other threads
4057 hitting this trap, and later not being able to explain it,
4058 because we were stepping over a breakpoint, and we hold all
4059 threads but LWP stopped while doing that. */
4060 if (!can_hardware_single_step ())
4061 delete_reinsert_breakpoints ();
4062
4063 step_over_bkpt = null_ptid;
4064 return 1;
4065 }
4066 else
4067 return 0;
4068 }
4069
4070 /* This function is called once per thread. We check the thread's resume
4071 request, which will tell us whether to resume, step, or leave the thread
4072 stopped; and what signal, if any, it should be sent.
4073
4074 For threads which we aren't explicitly told otherwise, we preserve
4075 the stepping flag; this is used for stepping over gdbserver-placed
4076 breakpoints.
4077
4078 If pending_flags was set in any thread, we queue any needed
4079 signals, since we won't actually resume. We already have a pending
4080 event to report, so we don't need to preserve any step requests;
4081 they should be re-issued if necessary. */
4082
4083 static int
4084 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4085 {
4086 struct thread_info *thread = (struct thread_info *) entry;
4087 struct lwp_info *lwp = get_thread_lwp (thread);
4088 int step;
4089 int leave_all_stopped = * (int *) arg;
4090 int leave_pending;
4091
4092 if (lwp->resume == NULL)
4093 return 0;
4094
4095 if (lwp->resume->kind == resume_stop)
4096 {
4097 if (debug_threads)
4098 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4099
4100 if (!lwp->stopped)
4101 {
4102 if (debug_threads)
4103 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4104
4105 /* Stop the thread, and wait for the event asynchronously,
4106 through the event loop. */
4107 send_sigstop (lwp);
4108 }
4109 else
4110 {
4111 if (debug_threads)
4112 debug_printf ("already stopped LWP %ld\n",
4113 lwpid_of (thread));
4114
4115 /* The LWP may have been stopped in an internal event that
4116 was not meant to be notified back to GDB (e.g., gdbserver
4117 breakpoint), so we should be reporting a stop event in
4118 this case too. */
4119
4120 /* If the thread already has a pending SIGSTOP, this is a
4121 no-op. Otherwise, something later will presumably resume
4122 the thread and this will cause it to cancel any pending
4123 operation, due to last_resume_kind == resume_stop. If
4124 the thread already has a pending status to report, we
4125 will still report it the next time we wait - see
4126 status_pending_p_callback. */
4127
4128 /* If we already have a pending signal to report, then
4129 there's no need to queue a SIGSTOP, as this means we're
4130 midway through moving the LWP out of the jumppad, and we
4131 will report the pending signal as soon as that is
4132 finished. */
4133 if (lwp->pending_signals_to_report == NULL)
4134 send_sigstop (lwp);
4135 }
4136
4137 /* For stop requests, we're done. */
4138 lwp->resume = NULL;
4139 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4140 return 0;
4141 }
4142
4143 /* If this thread which is about to be resumed has a pending status,
4144 then don't resume any threads - we can just report the pending
4145 status. Make sure to queue any signals that would otherwise be
4146 sent. In all-stop mode, we do this decision based on if *any*
4147 thread has a pending status. If there's a thread that needs the
4148 step-over-breakpoint dance, then don't resume any other thread
4149 but that particular one. */
4150 leave_pending = (lwp->status_pending_p || leave_all_stopped);
4151
4152 if (!leave_pending)
4153 {
4154 if (debug_threads)
4155 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4156
4157 step = (lwp->resume->kind == resume_step);
4158 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4159 }
4160 else
4161 {
4162 if (debug_threads)
4163 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4164
4165 /* If we have a new signal, enqueue the signal. */
4166 if (lwp->resume->sig != 0)
4167 {
4168 struct pending_signals *p_sig;
4169 p_sig = xmalloc (sizeof (*p_sig));
4170 p_sig->prev = lwp->pending_signals;
4171 p_sig->signal = lwp->resume->sig;
4172 memset (&p_sig->info, 0, sizeof (siginfo_t));
4173
4174 /* If this is the same signal we were previously stopped by,
4175 make sure to queue its siginfo. We can ignore the return
4176 value of ptrace; if it fails, we'll skip
4177 PTRACE_SETSIGINFO. */
4178 if (WIFSTOPPED (lwp->last_status)
4179 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4180 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4181 &p_sig->info);
4182
4183 lwp->pending_signals = p_sig;
4184 }
4185 }
4186
4187 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4188 lwp->resume = NULL;
4189 return 0;
4190 }
4191
4192 static void
4193 linux_resume (struct thread_resume *resume_info, size_t n)
4194 {
4195 struct thread_resume_array array = { resume_info, n };
4196 struct thread_info *need_step_over = NULL;
4197 int any_pending;
4198 int leave_all_stopped;
4199
4200 if (debug_threads)
4201 {
4202 debug_enter ();
4203 debug_printf ("linux_resume:\n");
4204 }
4205
4206 find_inferior (&all_threads, linux_set_resume_request, &array);
4207
4208 /* If there is a thread which would otherwise be resumed, which has
4209 a pending status, then don't resume any threads - we can just
4210 report the pending status. Make sure to queue any signals that
4211 would otherwise be sent. In non-stop mode, we'll apply this
4212 logic to each thread individually. We consume all pending events
4213 before considering to start a step-over (in all-stop). */
4214 any_pending = 0;
4215 if (!non_stop)
4216 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4217
4218 /* If there is a thread which would otherwise be resumed, which is
4219 stopped at a breakpoint that needs stepping over, then don't
4220 resume any threads - have it step over the breakpoint with all
4221 other threads stopped, then resume all threads again. Make sure
4222 to queue any signals that would otherwise be delivered or
4223 queued. */
4224 if (!any_pending && supports_breakpoints ())
4225 need_step_over
4226 = (struct thread_info *) find_inferior (&all_threads,
4227 need_step_over_p, NULL);
4228
4229 leave_all_stopped = (need_step_over != NULL || any_pending);
4230
4231 if (debug_threads)
4232 {
4233 if (need_step_over != NULL)
4234 debug_printf ("Not resuming all, need step over\n");
4235 else if (any_pending)
4236 debug_printf ("Not resuming, all-stop and found "
4237 "an LWP with pending status\n");
4238 else
4239 debug_printf ("Resuming, no pending status or step over needed\n");
4240 }
4241
4242 /* Even if we're leaving threads stopped, queue all signals we'd
4243 otherwise deliver. */
4244 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4245
4246 if (need_step_over)
4247 start_step_over (get_thread_lwp (need_step_over));
4248
4249 if (debug_threads)
4250 {
4251 debug_printf ("linux_resume done\n");
4252 debug_exit ();
4253 }
4254 }
4255
4256 /* This function is called once per thread. We check the thread's
4257 last resume request, which will tell us whether to resume, step, or
4258 leave the thread stopped. Any signal the client requested to be
4259 delivered has already been enqueued at this point.
4260
4261 If any thread that GDB wants running is stopped at an internal
4262 breakpoint that needs stepping over, we start a step-over operation
4263 on that particular thread, and leave all others stopped. */
4264
4265 static int
4266 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4267 {
4268 struct thread_info *thread = (struct thread_info *) entry;
4269 struct lwp_info *lwp = get_thread_lwp (thread);
4270 int step;
4271
4272 if (lwp == except)
4273 return 0;
4274
4275 if (debug_threads)
4276 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4277
4278 if (!lwp->stopped)
4279 {
4280 if (debug_threads)
4281 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4282 return 0;
4283 }
4284
4285 if (thread->last_resume_kind == resume_stop
4286 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4287 {
4288 if (debug_threads)
4289 debug_printf (" client wants LWP to remain %ld stopped\n",
4290 lwpid_of (thread));
4291 return 0;
4292 }
4293
4294 if (lwp->status_pending_p)
4295 {
4296 if (debug_threads)
4297 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4298 lwpid_of (thread));
4299 return 0;
4300 }
4301
4302 gdb_assert (lwp->suspended >= 0);
4303
4304 if (lwp->suspended)
4305 {
4306 if (debug_threads)
4307 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4308 return 0;
4309 }
4310
4311 if (thread->last_resume_kind == resume_stop
4312 && lwp->pending_signals_to_report == NULL
4313 && lwp->collecting_fast_tracepoint == 0)
4314 {
4315 /* We haven't reported this LWP as stopped yet (otherwise, the
4316 last_status.kind check above would catch it, and we wouldn't
4317 reach here. This LWP may have been momentarily paused by a
4318 stop_all_lwps call while handling for example, another LWP's
4319 step-over. In that case, the pending expected SIGSTOP signal
4320 that was queued at vCont;t handling time will have already
4321 been consumed by wait_for_sigstop, and so we need to requeue
4322 another one here. Note that if the LWP already has a SIGSTOP
4323 pending, this is a no-op. */
4324
4325 if (debug_threads)
4326 debug_printf ("Client wants LWP %ld to stop. "
4327 "Making sure it has a SIGSTOP pending\n",
4328 lwpid_of (thread));
4329
4330 send_sigstop (lwp);
4331 }
4332
4333 step = thread->last_resume_kind == resume_step;
4334 linux_resume_one_lwp (lwp, step, 0, NULL);
4335 return 0;
4336 }
4337
4338 static int
4339 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4340 {
4341 struct thread_info *thread = (struct thread_info *) entry;
4342 struct lwp_info *lwp = get_thread_lwp (thread);
4343
4344 if (lwp == except)
4345 return 0;
4346
4347 lwp->suspended--;
4348 gdb_assert (lwp->suspended >= 0);
4349
4350 return proceed_one_lwp (entry, except);
4351 }
4352
4353 /* When we finish a step-over, set threads running again. If there's
4354 another thread that may need a step-over, now's the time to start
4355 it. Eventually, we'll move all threads past their breakpoints. */
4356
4357 static void
4358 proceed_all_lwps (void)
4359 {
4360 struct thread_info *need_step_over;
4361
4362 /* If there is a thread which would otherwise be resumed, which is
4363 stopped at a breakpoint that needs stepping over, then don't
4364 resume any threads - have it step over the breakpoint with all
4365 other threads stopped, then resume all threads again. */
4366
4367 if (supports_breakpoints ())
4368 {
4369 need_step_over
4370 = (struct thread_info *) find_inferior (&all_threads,
4371 need_step_over_p, NULL);
4372
4373 if (need_step_over != NULL)
4374 {
4375 if (debug_threads)
4376 debug_printf ("proceed_all_lwps: found "
4377 "thread %ld needing a step-over\n",
4378 lwpid_of (need_step_over));
4379
4380 start_step_over (get_thread_lwp (need_step_over));
4381 return;
4382 }
4383 }
4384
4385 if (debug_threads)
4386 debug_printf ("Proceeding, no step-over needed\n");
4387
4388 find_inferior (&all_threads, proceed_one_lwp, NULL);
4389 }
4390
4391 /* Stopped LWPs that the client wanted to be running, that don't have
4392 pending statuses, are set to run again, except for EXCEPT, if not
4393 NULL. This undoes a stop_all_lwps call. */
4394
4395 static void
4396 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4397 {
4398 if (debug_threads)
4399 {
4400 debug_enter ();
4401 if (except)
4402 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4403 lwpid_of (get_lwp_thread (except)));
4404 else
4405 debug_printf ("unstopping all lwps\n");
4406 }
4407
4408 if (unsuspend)
4409 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4410 else
4411 find_inferior (&all_threads, proceed_one_lwp, except);
4412
4413 if (debug_threads)
4414 {
4415 debug_printf ("unstop_all_lwps done\n");
4416 debug_exit ();
4417 }
4418 }
4419
4420
4421 #ifdef HAVE_LINUX_REGSETS
4422
4423 #define use_linux_regsets 1
4424
4425 /* Returns true if REGSET has been disabled. */
4426
4427 static int
4428 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4429 {
4430 return (info->disabled_regsets != NULL
4431 && info->disabled_regsets[regset - info->regsets]);
4432 }
4433
4434 /* Disable REGSET. */
4435
4436 static void
4437 disable_regset (struct regsets_info *info, struct regset_info *regset)
4438 {
4439 int dr_offset;
4440
4441 dr_offset = regset - info->regsets;
4442 if (info->disabled_regsets == NULL)
4443 info->disabled_regsets = xcalloc (1, info->num_regsets);
4444 info->disabled_regsets[dr_offset] = 1;
4445 }
4446
4447 static int
4448 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4449 struct regcache *regcache)
4450 {
4451 struct regset_info *regset;
4452 int saw_general_regs = 0;
4453 int pid;
4454 struct iovec iov;
4455
4456 pid = lwpid_of (current_thread);
4457 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4458 {
4459 void *buf, *data;
4460 int nt_type, res;
4461
4462 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4463 continue;
4464
4465 buf = xmalloc (regset->size);
4466
4467 nt_type = regset->nt_type;
4468 if (nt_type)
4469 {
4470 iov.iov_base = buf;
4471 iov.iov_len = regset->size;
4472 data = (void *) &iov;
4473 }
4474 else
4475 data = buf;
4476
4477 #ifndef __sparc__
4478 res = ptrace (regset->get_request, pid,
4479 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4480 #else
4481 res = ptrace (regset->get_request, pid, data, nt_type);
4482 #endif
4483 if (res < 0)
4484 {
4485 if (errno == EIO)
4486 {
4487 /* If we get EIO on a regset, do not try it again for
4488 this process mode. */
4489 disable_regset (regsets_info, regset);
4490 }
4491 else if (errno == ENODATA)
4492 {
4493 /* ENODATA may be returned if the regset is currently
4494 not "active". This can happen in normal operation,
4495 so suppress the warning in this case. */
4496 }
4497 else
4498 {
4499 char s[256];
4500 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4501 pid);
4502 perror (s);
4503 }
4504 }
4505 else
4506 {
4507 if (regset->type == GENERAL_REGS)
4508 saw_general_regs = 1;
4509 regset->store_function (regcache, buf);
4510 }
4511 free (buf);
4512 }
4513 if (saw_general_regs)
4514 return 0;
4515 else
4516 return 1;
4517 }
4518
4519 static int
4520 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4521 struct regcache *regcache)
4522 {
4523 struct regset_info *regset;
4524 int saw_general_regs = 0;
4525 int pid;
4526 struct iovec iov;
4527
4528 pid = lwpid_of (current_thread);
4529 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4530 {
4531 void *buf, *data;
4532 int nt_type, res;
4533
4534 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4535 || regset->fill_function == NULL)
4536 continue;
4537
4538 buf = xmalloc (regset->size);
4539
4540 /* First fill the buffer with the current register set contents,
4541 in case there are any items in the kernel's regset that are
4542 not in gdbserver's regcache. */
4543
4544 nt_type = regset->nt_type;
4545 if (nt_type)
4546 {
4547 iov.iov_base = buf;
4548 iov.iov_len = regset->size;
4549 data = (void *) &iov;
4550 }
4551 else
4552 data = buf;
4553
4554 #ifndef __sparc__
4555 res = ptrace (regset->get_request, pid,
4556 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4557 #else
4558 res = ptrace (regset->get_request, pid, data, nt_type);
4559 #endif
4560
4561 if (res == 0)
4562 {
4563 /* Then overlay our cached registers on that. */
4564 regset->fill_function (regcache, buf);
4565
4566 /* Only now do we write the register set. */
4567 #ifndef __sparc__
4568 res = ptrace (regset->set_request, pid,
4569 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4570 #else
4571 res = ptrace (regset->set_request, pid, data, nt_type);
4572 #endif
4573 }
4574
4575 if (res < 0)
4576 {
4577 if (errno == EIO)
4578 {
4579 /* If we get EIO on a regset, do not try it again for
4580 this process mode. */
4581 disable_regset (regsets_info, regset);
4582 }
4583 else if (errno == ESRCH)
4584 {
4585 /* At this point, ESRCH should mean the process is
4586 already gone, in which case we simply ignore attempts
4587 to change its registers. See also the related
4588 comment in linux_resume_one_lwp. */
4589 free (buf);
4590 return 0;
4591 }
4592 else
4593 {
4594 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4595 }
4596 }
4597 else if (regset->type == GENERAL_REGS)
4598 saw_general_regs = 1;
4599 free (buf);
4600 }
4601 if (saw_general_regs)
4602 return 0;
4603 else
4604 return 1;
4605 }
4606
4607 #else /* !HAVE_LINUX_REGSETS */
4608
4609 #define use_linux_regsets 0
4610 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4611 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4612
4613 #endif
4614
4615 /* Return 1 if register REGNO is supported by one of the regset ptrace
4616 calls or 0 if it has to be transferred individually. */
4617
4618 static int
4619 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4620 {
4621 unsigned char mask = 1 << (regno % 8);
4622 size_t index = regno / 8;
4623
4624 return (use_linux_regsets
4625 && (regs_info->regset_bitmap == NULL
4626 || (regs_info->regset_bitmap[index] & mask) != 0));
4627 }
4628
4629 #ifdef HAVE_LINUX_USRREGS
4630
4631 int
4632 register_addr (const struct usrregs_info *usrregs, int regnum)
4633 {
4634 int addr;
4635
4636 if (regnum < 0 || regnum >= usrregs->num_regs)
4637 error ("Invalid register number %d.", regnum);
4638
4639 addr = usrregs->regmap[regnum];
4640
4641 return addr;
4642 }
4643
4644 /* Fetch one register. */
4645 static void
4646 fetch_register (const struct usrregs_info *usrregs,
4647 struct regcache *regcache, int regno)
4648 {
4649 CORE_ADDR regaddr;
4650 int i, size;
4651 char *buf;
4652 int pid;
4653
4654 if (regno >= usrregs->num_regs)
4655 return;
4656 if ((*the_low_target.cannot_fetch_register) (regno))
4657 return;
4658
4659 regaddr = register_addr (usrregs, regno);
4660 if (regaddr == -1)
4661 return;
4662
4663 size = ((register_size (regcache->tdesc, regno)
4664 + sizeof (PTRACE_XFER_TYPE) - 1)
4665 & -sizeof (PTRACE_XFER_TYPE));
4666 buf = alloca (size);
4667
4668 pid = lwpid_of (current_thread);
4669 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4670 {
4671 errno = 0;
4672 *(PTRACE_XFER_TYPE *) (buf + i) =
4673 ptrace (PTRACE_PEEKUSER, pid,
4674 /* Coerce to a uintptr_t first to avoid potential gcc warning
4675 of coercing an 8 byte integer to a 4 byte pointer. */
4676 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4677 regaddr += sizeof (PTRACE_XFER_TYPE);
4678 if (errno != 0)
4679 error ("reading register %d: %s", regno, strerror (errno));
4680 }
4681
4682 if (the_low_target.supply_ptrace_register)
4683 the_low_target.supply_ptrace_register (regcache, regno, buf);
4684 else
4685 supply_register (regcache, regno, buf);
4686 }
4687
4688 /* Store one register. */
4689 static void
4690 store_register (const struct usrregs_info *usrregs,
4691 struct regcache *regcache, int regno)
4692 {
4693 CORE_ADDR regaddr;
4694 int i, size;
4695 char *buf;
4696 int pid;
4697
4698 if (regno >= usrregs->num_regs)
4699 return;
4700 if ((*the_low_target.cannot_store_register) (regno))
4701 return;
4702
4703 regaddr = register_addr (usrregs, regno);
4704 if (regaddr == -1)
4705 return;
4706
4707 size = ((register_size (regcache->tdesc, regno)
4708 + sizeof (PTRACE_XFER_TYPE) - 1)
4709 & -sizeof (PTRACE_XFER_TYPE));
4710 buf = alloca (size);
4711 memset (buf, 0, size);
4712
4713 if (the_low_target.collect_ptrace_register)
4714 the_low_target.collect_ptrace_register (regcache, regno, buf);
4715 else
4716 collect_register (regcache, regno, buf);
4717
4718 pid = lwpid_of (current_thread);
4719 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4720 {
4721 errno = 0;
4722 ptrace (PTRACE_POKEUSER, pid,
4723 /* Coerce to a uintptr_t first to avoid potential gcc warning
4724 about coercing an 8 byte integer to a 4 byte pointer. */
4725 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4726 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4727 if (errno != 0)
4728 {
4729 /* At this point, ESRCH should mean the process is
4730 already gone, in which case we simply ignore attempts
4731 to change its registers. See also the related
4732 comment in linux_resume_one_lwp. */
4733 if (errno == ESRCH)
4734 return;
4735
4736 if ((*the_low_target.cannot_store_register) (regno) == 0)
4737 error ("writing register %d: %s", regno, strerror (errno));
4738 }
4739 regaddr += sizeof (PTRACE_XFER_TYPE);
4740 }
4741 }
4742
4743 /* Fetch all registers, or just one, from the child process.
4744 If REGNO is -1, do this for all registers, skipping any that are
4745 assumed to have been retrieved by regsets_fetch_inferior_registers,
4746 unless ALL is non-zero.
4747 Otherwise, REGNO specifies which register (so we can save time). */
4748 static void
4749 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4750 struct regcache *regcache, int regno, int all)
4751 {
4752 struct usrregs_info *usr = regs_info->usrregs;
4753
4754 if (regno == -1)
4755 {
4756 for (regno = 0; regno < usr->num_regs; regno++)
4757 if (all || !linux_register_in_regsets (regs_info, regno))
4758 fetch_register (usr, regcache, regno);
4759 }
4760 else
4761 fetch_register (usr, regcache, regno);
4762 }
4763
4764 /* Store our register values back into the inferior.
4765 If REGNO is -1, do this for all registers, skipping any that are
4766 assumed to have been saved by regsets_store_inferior_registers,
4767 unless ALL is non-zero.
4768 Otherwise, REGNO specifies which register (so we can save time). */
4769 static void
4770 usr_store_inferior_registers (const struct regs_info *regs_info,
4771 struct regcache *regcache, int regno, int all)
4772 {
4773 struct usrregs_info *usr = regs_info->usrregs;
4774
4775 if (regno == -1)
4776 {
4777 for (regno = 0; regno < usr->num_regs; regno++)
4778 if (all || !linux_register_in_regsets (regs_info, regno))
4779 store_register (usr, regcache, regno);
4780 }
4781 else
4782 store_register (usr, regcache, regno);
4783 }
4784
4785 #else /* !HAVE_LINUX_USRREGS */
4786
4787 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4788 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4789
4790 #endif
4791
4792
4793 void
4794 linux_fetch_registers (struct regcache *regcache, int regno)
4795 {
4796 int use_regsets;
4797 int all = 0;
4798 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4799
4800 if (regno == -1)
4801 {
4802 if (the_low_target.fetch_register != NULL
4803 && regs_info->usrregs != NULL)
4804 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4805 (*the_low_target.fetch_register) (regcache, regno);
4806
4807 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4808 if (regs_info->usrregs != NULL)
4809 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4810 }
4811 else
4812 {
4813 if (the_low_target.fetch_register != NULL
4814 && (*the_low_target.fetch_register) (regcache, regno))
4815 return;
4816
4817 use_regsets = linux_register_in_regsets (regs_info, regno);
4818 if (use_regsets)
4819 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4820 regcache);
4821 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4822 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4823 }
4824 }
4825
4826 void
4827 linux_store_registers (struct regcache *regcache, int regno)
4828 {
4829 int use_regsets;
4830 int all = 0;
4831 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4832
4833 if (regno == -1)
4834 {
4835 all = regsets_store_inferior_registers (regs_info->regsets_info,
4836 regcache);
4837 if (regs_info->usrregs != NULL)
4838 usr_store_inferior_registers (regs_info, regcache, regno, all);
4839 }
4840 else
4841 {
4842 use_regsets = linux_register_in_regsets (regs_info, regno);
4843 if (use_regsets)
4844 all = regsets_store_inferior_registers (regs_info->regsets_info,
4845 regcache);
4846 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4847 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4848 }
4849 }
4850
4851
4852 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4853 to debugger memory starting at MYADDR. */
4854
4855 static int
4856 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4857 {
4858 int pid = lwpid_of (current_thread);
4859 register PTRACE_XFER_TYPE *buffer;
4860 register CORE_ADDR addr;
4861 register int count;
4862 char filename[64];
4863 register int i;
4864 int ret;
4865 int fd;
4866
4867 /* Try using /proc. Don't bother for one word. */
4868 if (len >= 3 * sizeof (long))
4869 {
4870 int bytes;
4871
4872 /* We could keep this file open and cache it - possibly one per
4873 thread. That requires some juggling, but is even faster. */
4874 sprintf (filename, "/proc/%d/mem", pid);
4875 fd = open (filename, O_RDONLY | O_LARGEFILE);
4876 if (fd == -1)
4877 goto no_proc;
4878
4879 /* If pread64 is available, use it. It's faster if the kernel
4880 supports it (only one syscall), and it's 64-bit safe even on
4881 32-bit platforms (for instance, SPARC debugging a SPARC64
4882 application). */
4883 #ifdef HAVE_PREAD64
4884 bytes = pread64 (fd, myaddr, len, memaddr);
4885 #else
4886 bytes = -1;
4887 if (lseek (fd, memaddr, SEEK_SET) != -1)
4888 bytes = read (fd, myaddr, len);
4889 #endif
4890
4891 close (fd);
4892 if (bytes == len)
4893 return 0;
4894
4895 /* Some data was read, we'll try to get the rest with ptrace. */
4896 if (bytes > 0)
4897 {
4898 memaddr += bytes;
4899 myaddr += bytes;
4900 len -= bytes;
4901 }
4902 }
4903
4904 no_proc:
4905 /* Round starting address down to longword boundary. */
4906 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4907 /* Round ending address up; get number of longwords that makes. */
4908 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4909 / sizeof (PTRACE_XFER_TYPE));
4910 /* Allocate buffer of that many longwords. */
4911 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4912
4913 /* Read all the longwords */
4914 errno = 0;
4915 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4916 {
4917 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4918 about coercing an 8 byte integer to a 4 byte pointer. */
4919 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4920 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4921 (PTRACE_TYPE_ARG4) 0);
4922 if (errno)
4923 break;
4924 }
4925 ret = errno;
4926
4927 /* Copy appropriate bytes out of the buffer. */
4928 if (i > 0)
4929 {
4930 i *= sizeof (PTRACE_XFER_TYPE);
4931 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4932 memcpy (myaddr,
4933 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4934 i < len ? i : len);
4935 }
4936
4937 return ret;
4938 }
4939
4940 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4941 memory at MEMADDR. On failure (cannot write to the inferior)
4942 returns the value of errno. Always succeeds if LEN is zero. */
4943
4944 static int
4945 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4946 {
4947 register int i;
4948 /* Round starting address down to longword boundary. */
4949 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4950 /* Round ending address up; get number of longwords that makes. */
4951 register int count
4952 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4953 / sizeof (PTRACE_XFER_TYPE);
4954
4955 /* Allocate buffer of that many longwords. */
4956 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4957 alloca (count * sizeof (PTRACE_XFER_TYPE));
4958
4959 int pid = lwpid_of (current_thread);
4960
4961 if (len == 0)
4962 {
4963 /* Zero length write always succeeds. */
4964 return 0;
4965 }
4966
4967 if (debug_threads)
4968 {
4969 /* Dump up to four bytes. */
4970 unsigned int val = * (unsigned int *) myaddr;
4971 if (len == 1)
4972 val = val & 0xff;
4973 else if (len == 2)
4974 val = val & 0xffff;
4975 else if (len == 3)
4976 val = val & 0xffffff;
4977 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4978 val, (long)memaddr);
4979 }
4980
4981 /* Fill start and end extra bytes of buffer with existing memory data. */
4982
4983 errno = 0;
4984 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4985 about coercing an 8 byte integer to a 4 byte pointer. */
4986 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4987 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4988 (PTRACE_TYPE_ARG4) 0);
4989 if (errno)
4990 return errno;
4991
4992 if (count > 1)
4993 {
4994 errno = 0;
4995 buffer[count - 1]
4996 = ptrace (PTRACE_PEEKTEXT, pid,
4997 /* Coerce to a uintptr_t first to avoid potential gcc warning
4998 about coercing an 8 byte integer to a 4 byte pointer. */
4999 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5000 * sizeof (PTRACE_XFER_TYPE)),
5001 (PTRACE_TYPE_ARG4) 0);
5002 if (errno)
5003 return errno;
5004 }
5005
5006 /* Copy data to be written over corresponding part of buffer. */
5007
5008 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5009 myaddr, len);
5010
5011 /* Write the entire buffer. */
5012
5013 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5014 {
5015 errno = 0;
5016 ptrace (PTRACE_POKETEXT, pid,
5017 /* Coerce to a uintptr_t first to avoid potential gcc warning
5018 about coercing an 8 byte integer to a 4 byte pointer. */
5019 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5020 (PTRACE_TYPE_ARG4) buffer[i]);
5021 if (errno)
5022 return errno;
5023 }
5024
5025 return 0;
5026 }
5027
5028 static void
5029 linux_look_up_symbols (void)
5030 {
5031 #ifdef USE_THREAD_DB
5032 struct process_info *proc = current_process ();
5033
5034 if (proc->priv->thread_db != NULL)
5035 return;
5036
5037 /* If the kernel supports tracing clones, then we don't need to
5038 use the magic thread event breakpoint to learn about
5039 threads. */
5040 thread_db_init (!linux_supports_traceclone ());
5041 #endif
5042 }
5043
5044 static void
5045 linux_request_interrupt (void)
5046 {
5047 extern unsigned long signal_pid;
5048
5049 /* Send a SIGINT to the process group. This acts just like the user
5050 typed a ^C on the controlling terminal. */
5051 kill (-signal_pid, SIGINT);
5052 }
5053
5054 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5055 to debugger memory starting at MYADDR. */
5056
5057 static int
5058 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5059 {
5060 char filename[PATH_MAX];
5061 int fd, n;
5062 int pid = lwpid_of (current_thread);
5063
5064 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5065
5066 fd = open (filename, O_RDONLY);
5067 if (fd < 0)
5068 return -1;
5069
5070 if (offset != (CORE_ADDR) 0
5071 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5072 n = -1;
5073 else
5074 n = read (fd, myaddr, len);
5075
5076 close (fd);
5077
5078 return n;
5079 }
5080
5081 /* These breakpoint and watchpoint related wrapper functions simply
5082 pass on the function call if the target has registered a
5083 corresponding function. */
5084
5085 static int
5086 linux_supports_z_point_type (char z_type)
5087 {
5088 return (the_low_target.supports_z_point_type != NULL
5089 && the_low_target.supports_z_point_type (z_type));
5090 }
5091
5092 static int
5093 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5094 int size, struct raw_breakpoint *bp)
5095 {
5096 if (the_low_target.insert_point != NULL)
5097 return the_low_target.insert_point (type, addr, size, bp);
5098 else
5099 /* Unsupported (see target.h). */
5100 return 1;
5101 }
5102
5103 static int
5104 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5105 int size, struct raw_breakpoint *bp)
5106 {
5107 if (the_low_target.remove_point != NULL)
5108 return the_low_target.remove_point (type, addr, size, bp);
5109 else
5110 /* Unsupported (see target.h). */
5111 return 1;
5112 }
5113
5114 /* Implement the to_stopped_by_sw_breakpoint target_ops
5115 method. */
5116
5117 static int
5118 linux_stopped_by_sw_breakpoint (void)
5119 {
5120 struct lwp_info *lwp = get_thread_lwp (current_thread);
5121
5122 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5123 }
5124
5125 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5126 method. */
5127
5128 static int
5129 linux_supports_stopped_by_sw_breakpoint (void)
5130 {
5131 return USE_SIGTRAP_SIGINFO;
5132 }
5133
5134 /* Implement the to_stopped_by_hw_breakpoint target_ops
5135 method. */
5136
5137 static int
5138 linux_stopped_by_hw_breakpoint (void)
5139 {
5140 struct lwp_info *lwp = get_thread_lwp (current_thread);
5141
5142 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5143 }
5144
5145 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5146 method. */
5147
5148 static int
5149 linux_supports_stopped_by_hw_breakpoint (void)
5150 {
5151 return USE_SIGTRAP_SIGINFO;
5152 }
5153
5154 static int
5155 linux_stopped_by_watchpoint (void)
5156 {
5157 struct lwp_info *lwp = get_thread_lwp (current_thread);
5158
5159 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5160 }
5161
5162 static CORE_ADDR
5163 linux_stopped_data_address (void)
5164 {
5165 struct lwp_info *lwp = get_thread_lwp (current_thread);
5166
5167 return lwp->stopped_data_address;
5168 }
5169
5170 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5171 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5172 && defined(PT_TEXT_END_ADDR)
5173
5174 /* This is only used for targets that define PT_TEXT_ADDR,
5175 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5176 the target has different ways of acquiring this information, like
5177 loadmaps. */
5178
5179 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5180 to tell gdb about. */
5181
5182 static int
5183 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5184 {
5185 unsigned long text, text_end, data;
5186 int pid = lwpid_of (get_thread_lwp (current_thread));
5187
5188 errno = 0;
5189
5190 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5191 (PTRACE_TYPE_ARG4) 0);
5192 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5193 (PTRACE_TYPE_ARG4) 0);
5194 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5195 (PTRACE_TYPE_ARG4) 0);
5196
5197 if (errno == 0)
5198 {
5199 /* Both text and data offsets produced at compile-time (and so
5200 used by gdb) are relative to the beginning of the program,
5201 with the data segment immediately following the text segment.
5202 However, the actual runtime layout in memory may put the data
5203 somewhere else, so when we send gdb a data base-address, we
5204 use the real data base address and subtract the compile-time
5205 data base-address from it (which is just the length of the
5206 text segment). BSS immediately follows data in both
5207 cases. */
5208 *text_p = text;
5209 *data_p = data - (text_end - text);
5210
5211 return 1;
5212 }
5213 return 0;
5214 }
5215 #endif
5216
5217 static int
5218 linux_qxfer_osdata (const char *annex,
5219 unsigned char *readbuf, unsigned const char *writebuf,
5220 CORE_ADDR offset, int len)
5221 {
5222 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5223 }
5224
5225 /* Convert a native/host siginfo object, into/from the siginfo in the
5226 layout of the inferiors' architecture. */
5227
5228 static void
5229 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5230 {
5231 int done = 0;
5232
5233 if (the_low_target.siginfo_fixup != NULL)
5234 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5235
5236 /* If there was no callback, or the callback didn't do anything,
5237 then just do a straight memcpy. */
5238 if (!done)
5239 {
5240 if (direction == 1)
5241 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5242 else
5243 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5244 }
5245 }
5246
5247 static int
5248 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5249 unsigned const char *writebuf, CORE_ADDR offset, int len)
5250 {
5251 int pid;
5252 siginfo_t siginfo;
5253 char inf_siginfo[sizeof (siginfo_t)];
5254
5255 if (current_thread == NULL)
5256 return -1;
5257
5258 pid = lwpid_of (current_thread);
5259
5260 if (debug_threads)
5261 debug_printf ("%s siginfo for lwp %d.\n",
5262 readbuf != NULL ? "Reading" : "Writing",
5263 pid);
5264
5265 if (offset >= sizeof (siginfo))
5266 return -1;
5267
5268 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5269 return -1;
5270
5271 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5272 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5273 inferior with a 64-bit GDBSERVER should look the same as debugging it
5274 with a 32-bit GDBSERVER, we need to convert it. */
5275 siginfo_fixup (&siginfo, inf_siginfo, 0);
5276
5277 if (offset + len > sizeof (siginfo))
5278 len = sizeof (siginfo) - offset;
5279
5280 if (readbuf != NULL)
5281 memcpy (readbuf, inf_siginfo + offset, len);
5282 else
5283 {
5284 memcpy (inf_siginfo + offset, writebuf, len);
5285
5286 /* Convert back to ptrace layout before flushing it out. */
5287 siginfo_fixup (&siginfo, inf_siginfo, 1);
5288
5289 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5290 return -1;
5291 }
5292
5293 return len;
5294 }
5295
5296 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5297 so we notice when children change state; as the handler for the
5298 sigsuspend in my_waitpid. */
5299
5300 static void
5301 sigchld_handler (int signo)
5302 {
5303 int old_errno = errno;
5304
5305 if (debug_threads)
5306 {
5307 do
5308 {
5309 /* fprintf is not async-signal-safe, so call write
5310 directly. */
5311 if (write (2, "sigchld_handler\n",
5312 sizeof ("sigchld_handler\n") - 1) < 0)
5313 break; /* just ignore */
5314 } while (0);
5315 }
5316
5317 if (target_is_async_p ())
5318 async_file_mark (); /* trigger a linux_wait */
5319
5320 errno = old_errno;
5321 }
5322
5323 static int
5324 linux_supports_non_stop (void)
5325 {
5326 return 1;
5327 }
5328
5329 static int
5330 linux_async (int enable)
5331 {
5332 int previous = target_is_async_p ();
5333
5334 if (debug_threads)
5335 debug_printf ("linux_async (%d), previous=%d\n",
5336 enable, previous);
5337
5338 if (previous != enable)
5339 {
5340 sigset_t mask;
5341 sigemptyset (&mask);
5342 sigaddset (&mask, SIGCHLD);
5343
5344 sigprocmask (SIG_BLOCK, &mask, NULL);
5345
5346 if (enable)
5347 {
5348 if (pipe (linux_event_pipe) == -1)
5349 {
5350 linux_event_pipe[0] = -1;
5351 linux_event_pipe[1] = -1;
5352 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5353
5354 warning ("creating event pipe failed.");
5355 return previous;
5356 }
5357
5358 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5359 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5360
5361 /* Register the event loop handler. */
5362 add_file_handler (linux_event_pipe[0],
5363 handle_target_event, NULL);
5364
5365 /* Always trigger a linux_wait. */
5366 async_file_mark ();
5367 }
5368 else
5369 {
5370 delete_file_handler (linux_event_pipe[0]);
5371
5372 close (linux_event_pipe[0]);
5373 close (linux_event_pipe[1]);
5374 linux_event_pipe[0] = -1;
5375 linux_event_pipe[1] = -1;
5376 }
5377
5378 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5379 }
5380
5381 return previous;
5382 }
5383
5384 static int
5385 linux_start_non_stop (int nonstop)
5386 {
5387 /* Register or unregister from event-loop accordingly. */
5388 linux_async (nonstop);
5389
5390 if (target_is_async_p () != (nonstop != 0))
5391 return -1;
5392
5393 return 0;
5394 }
5395
5396 static int
5397 linux_supports_multi_process (void)
5398 {
5399 return 1;
5400 }
5401
5402 static int
5403 linux_supports_disable_randomization (void)
5404 {
5405 #ifdef HAVE_PERSONALITY
5406 return 1;
5407 #else
5408 return 0;
5409 #endif
5410 }
5411
5412 static int
5413 linux_supports_agent (void)
5414 {
5415 return 1;
5416 }
5417
5418 static int
5419 linux_supports_range_stepping (void)
5420 {
5421 if (*the_low_target.supports_range_stepping == NULL)
5422 return 0;
5423
5424 return (*the_low_target.supports_range_stepping) ();
5425 }
5426
5427 /* Enumerate spufs IDs for process PID. */
5428 static int
5429 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5430 {
5431 int pos = 0;
5432 int written = 0;
5433 char path[128];
5434 DIR *dir;
5435 struct dirent *entry;
5436
5437 sprintf (path, "/proc/%ld/fd", pid);
5438 dir = opendir (path);
5439 if (!dir)
5440 return -1;
5441
5442 rewinddir (dir);
5443 while ((entry = readdir (dir)) != NULL)
5444 {
5445 struct stat st;
5446 struct statfs stfs;
5447 int fd;
5448
5449 fd = atoi (entry->d_name);
5450 if (!fd)
5451 continue;
5452
5453 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5454 if (stat (path, &st) != 0)
5455 continue;
5456 if (!S_ISDIR (st.st_mode))
5457 continue;
5458
5459 if (statfs (path, &stfs) != 0)
5460 continue;
5461 if (stfs.f_type != SPUFS_MAGIC)
5462 continue;
5463
5464 if (pos >= offset && pos + 4 <= offset + len)
5465 {
5466 *(unsigned int *)(buf + pos - offset) = fd;
5467 written += 4;
5468 }
5469 pos += 4;
5470 }
5471
5472 closedir (dir);
5473 return written;
5474 }
5475
5476 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5477 object type, using the /proc file system. */
5478 static int
5479 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5480 unsigned const char *writebuf,
5481 CORE_ADDR offset, int len)
5482 {
5483 long pid = lwpid_of (current_thread);
5484 char buf[128];
5485 int fd = 0;
5486 int ret = 0;
5487
5488 if (!writebuf && !readbuf)
5489 return -1;
5490
5491 if (!*annex)
5492 {
5493 if (!readbuf)
5494 return -1;
5495 else
5496 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5497 }
5498
5499 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5500 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5501 if (fd <= 0)
5502 return -1;
5503
5504 if (offset != 0
5505 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5506 {
5507 close (fd);
5508 return 0;
5509 }
5510
5511 if (writebuf)
5512 ret = write (fd, writebuf, (size_t) len);
5513 else
5514 ret = read (fd, readbuf, (size_t) len);
5515
5516 close (fd);
5517 return ret;
5518 }
5519
5520 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5521 struct target_loadseg
5522 {
5523 /* Core address to which the segment is mapped. */
5524 Elf32_Addr addr;
5525 /* VMA recorded in the program header. */
5526 Elf32_Addr p_vaddr;
5527 /* Size of this segment in memory. */
5528 Elf32_Word p_memsz;
5529 };
5530
5531 # if defined PT_GETDSBT
5532 struct target_loadmap
5533 {
5534 /* Protocol version number, must be zero. */
5535 Elf32_Word version;
5536 /* Pointer to the DSBT table, its size, and the DSBT index. */
5537 unsigned *dsbt_table;
5538 unsigned dsbt_size, dsbt_index;
5539 /* Number of segments in this map. */
5540 Elf32_Word nsegs;
5541 /* The actual memory map. */
5542 struct target_loadseg segs[/*nsegs*/];
5543 };
5544 # define LINUX_LOADMAP PT_GETDSBT
5545 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5546 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5547 # else
5548 struct target_loadmap
5549 {
5550 /* Protocol version number, must be zero. */
5551 Elf32_Half version;
5552 /* Number of segments in this map. */
5553 Elf32_Half nsegs;
5554 /* The actual memory map. */
5555 struct target_loadseg segs[/*nsegs*/];
5556 };
5557 # define LINUX_LOADMAP PTRACE_GETFDPIC
5558 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5559 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5560 # endif
5561
5562 static int
5563 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5564 unsigned char *myaddr, unsigned int len)
5565 {
5566 int pid = lwpid_of (current_thread);
5567 int addr = -1;
5568 struct target_loadmap *data = NULL;
5569 unsigned int actual_length, copy_length;
5570
5571 if (strcmp (annex, "exec") == 0)
5572 addr = (int) LINUX_LOADMAP_EXEC;
5573 else if (strcmp (annex, "interp") == 0)
5574 addr = (int) LINUX_LOADMAP_INTERP;
5575 else
5576 return -1;
5577
5578 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5579 return -1;
5580
5581 if (data == NULL)
5582 return -1;
5583
5584 actual_length = sizeof (struct target_loadmap)
5585 + sizeof (struct target_loadseg) * data->nsegs;
5586
5587 if (offset < 0 || offset > actual_length)
5588 return -1;
5589
5590 copy_length = actual_length - offset < len ? actual_length - offset : len;
5591 memcpy (myaddr, (char *) data + offset, copy_length);
5592 return copy_length;
5593 }
5594 #else
5595 # define linux_read_loadmap NULL
5596 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5597
5598 static void
5599 linux_process_qsupported (const char *query)
5600 {
5601 if (the_low_target.process_qsupported != NULL)
5602 the_low_target.process_qsupported (query);
5603 }
5604
5605 static int
5606 linux_supports_tracepoints (void)
5607 {
5608 if (*the_low_target.supports_tracepoints == NULL)
5609 return 0;
5610
5611 return (*the_low_target.supports_tracepoints) ();
5612 }
5613
5614 static CORE_ADDR
5615 linux_read_pc (struct regcache *regcache)
5616 {
5617 if (the_low_target.get_pc == NULL)
5618 return 0;
5619
5620 return (*the_low_target.get_pc) (regcache);
5621 }
5622
5623 static void
5624 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5625 {
5626 gdb_assert (the_low_target.set_pc != NULL);
5627
5628 (*the_low_target.set_pc) (regcache, pc);
5629 }
5630
5631 static int
5632 linux_thread_stopped (struct thread_info *thread)
5633 {
5634 return get_thread_lwp (thread)->stopped;
5635 }
5636
5637 /* This exposes stop-all-threads functionality to other modules. */
5638
5639 static void
5640 linux_pause_all (int freeze)
5641 {
5642 stop_all_lwps (freeze, NULL);
5643 }
5644
5645 /* This exposes unstop-all-threads functionality to other gdbserver
5646 modules. */
5647
5648 static void
5649 linux_unpause_all (int unfreeze)
5650 {
5651 unstop_all_lwps (unfreeze, NULL);
5652 }
5653
5654 static int
5655 linux_prepare_to_access_memory (void)
5656 {
5657 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5658 running LWP. */
5659 if (non_stop)
5660 linux_pause_all (1);
5661 return 0;
5662 }
5663
5664 static void
5665 linux_done_accessing_memory (void)
5666 {
5667 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5668 running LWP. */
5669 if (non_stop)
5670 linux_unpause_all (1);
5671 }
5672
5673 static int
5674 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5675 CORE_ADDR collector,
5676 CORE_ADDR lockaddr,
5677 ULONGEST orig_size,
5678 CORE_ADDR *jump_entry,
5679 CORE_ADDR *trampoline,
5680 ULONGEST *trampoline_size,
5681 unsigned char *jjump_pad_insn,
5682 ULONGEST *jjump_pad_insn_size,
5683 CORE_ADDR *adjusted_insn_addr,
5684 CORE_ADDR *adjusted_insn_addr_end,
5685 char *err)
5686 {
5687 return (*the_low_target.install_fast_tracepoint_jump_pad)
5688 (tpoint, tpaddr, collector, lockaddr, orig_size,
5689 jump_entry, trampoline, trampoline_size,
5690 jjump_pad_insn, jjump_pad_insn_size,
5691 adjusted_insn_addr, adjusted_insn_addr_end,
5692 err);
5693 }
5694
5695 static struct emit_ops *
5696 linux_emit_ops (void)
5697 {
5698 if (the_low_target.emit_ops != NULL)
5699 return (*the_low_target.emit_ops) ();
5700 else
5701 return NULL;
5702 }
5703
5704 static int
5705 linux_get_min_fast_tracepoint_insn_len (void)
5706 {
5707 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5708 }
5709
5710 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5711
5712 static int
5713 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5714 CORE_ADDR *phdr_memaddr, int *num_phdr)
5715 {
5716 char filename[PATH_MAX];
5717 int fd;
5718 const int auxv_size = is_elf64
5719 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5720 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5721
5722 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5723
5724 fd = open (filename, O_RDONLY);
5725 if (fd < 0)
5726 return 1;
5727
5728 *phdr_memaddr = 0;
5729 *num_phdr = 0;
5730 while (read (fd, buf, auxv_size) == auxv_size
5731 && (*phdr_memaddr == 0 || *num_phdr == 0))
5732 {
5733 if (is_elf64)
5734 {
5735 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5736
5737 switch (aux->a_type)
5738 {
5739 case AT_PHDR:
5740 *phdr_memaddr = aux->a_un.a_val;
5741 break;
5742 case AT_PHNUM:
5743 *num_phdr = aux->a_un.a_val;
5744 break;
5745 }
5746 }
5747 else
5748 {
5749 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5750
5751 switch (aux->a_type)
5752 {
5753 case AT_PHDR:
5754 *phdr_memaddr = aux->a_un.a_val;
5755 break;
5756 case AT_PHNUM:
5757 *num_phdr = aux->a_un.a_val;
5758 break;
5759 }
5760 }
5761 }
5762
5763 close (fd);
5764
5765 if (*phdr_memaddr == 0 || *num_phdr == 0)
5766 {
5767 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5768 "phdr_memaddr = %ld, phdr_num = %d",
5769 (long) *phdr_memaddr, *num_phdr);
5770 return 2;
5771 }
5772
5773 return 0;
5774 }
5775
5776 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5777
5778 static CORE_ADDR
5779 get_dynamic (const int pid, const int is_elf64)
5780 {
5781 CORE_ADDR phdr_memaddr, relocation;
5782 int num_phdr, i;
5783 unsigned char *phdr_buf;
5784 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5785
5786 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5787 return 0;
5788
5789 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5790 phdr_buf = alloca (num_phdr * phdr_size);
5791
5792 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5793 return 0;
5794
5795 /* Compute relocation: it is expected to be 0 for "regular" executables,
5796 non-zero for PIE ones. */
5797 relocation = -1;
5798 for (i = 0; relocation == -1 && i < num_phdr; i++)
5799 if (is_elf64)
5800 {
5801 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5802
5803 if (p->p_type == PT_PHDR)
5804 relocation = phdr_memaddr - p->p_vaddr;
5805 }
5806 else
5807 {
5808 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5809
5810 if (p->p_type == PT_PHDR)
5811 relocation = phdr_memaddr - p->p_vaddr;
5812 }
5813
5814 if (relocation == -1)
5815 {
5816 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5817 any real world executables, including PIE executables, have always
5818 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5819 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5820 or present DT_DEBUG anyway (fpc binaries are statically linked).
5821
5822 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5823
5824 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5825
5826 return 0;
5827 }
5828
5829 for (i = 0; i < num_phdr; i++)
5830 {
5831 if (is_elf64)
5832 {
5833 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5834
5835 if (p->p_type == PT_DYNAMIC)
5836 return p->p_vaddr + relocation;
5837 }
5838 else
5839 {
5840 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5841
5842 if (p->p_type == PT_DYNAMIC)
5843 return p->p_vaddr + relocation;
5844 }
5845 }
5846
5847 return 0;
5848 }
5849
5850 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5851 can be 0 if the inferior does not yet have the library list initialized.
5852 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5853 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5854
5855 static CORE_ADDR
5856 get_r_debug (const int pid, const int is_elf64)
5857 {
5858 CORE_ADDR dynamic_memaddr;
5859 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5860 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5861 CORE_ADDR map = -1;
5862
5863 dynamic_memaddr = get_dynamic (pid, is_elf64);
5864 if (dynamic_memaddr == 0)
5865 return map;
5866
5867 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5868 {
5869 if (is_elf64)
5870 {
5871 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5872 #ifdef DT_MIPS_RLD_MAP
5873 union
5874 {
5875 Elf64_Xword map;
5876 unsigned char buf[sizeof (Elf64_Xword)];
5877 }
5878 rld_map;
5879
5880 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5881 {
5882 if (linux_read_memory (dyn->d_un.d_val,
5883 rld_map.buf, sizeof (rld_map.buf)) == 0)
5884 return rld_map.map;
5885 else
5886 break;
5887 }
5888 #endif /* DT_MIPS_RLD_MAP */
5889
5890 if (dyn->d_tag == DT_DEBUG && map == -1)
5891 map = dyn->d_un.d_val;
5892
5893 if (dyn->d_tag == DT_NULL)
5894 break;
5895 }
5896 else
5897 {
5898 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5899 #ifdef DT_MIPS_RLD_MAP
5900 union
5901 {
5902 Elf32_Word map;
5903 unsigned char buf[sizeof (Elf32_Word)];
5904 }
5905 rld_map;
5906
5907 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5908 {
5909 if (linux_read_memory (dyn->d_un.d_val,
5910 rld_map.buf, sizeof (rld_map.buf)) == 0)
5911 return rld_map.map;
5912 else
5913 break;
5914 }
5915 #endif /* DT_MIPS_RLD_MAP */
5916
5917 if (dyn->d_tag == DT_DEBUG && map == -1)
5918 map = dyn->d_un.d_val;
5919
5920 if (dyn->d_tag == DT_NULL)
5921 break;
5922 }
5923
5924 dynamic_memaddr += dyn_size;
5925 }
5926
5927 return map;
5928 }
5929
5930 /* Read one pointer from MEMADDR in the inferior. */
5931
5932 static int
5933 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5934 {
5935 int ret;
5936
5937 /* Go through a union so this works on either big or little endian
5938 hosts, when the inferior's pointer size is smaller than the size
5939 of CORE_ADDR. It is assumed the inferior's endianness is the
5940 same of the superior's. */
5941 union
5942 {
5943 CORE_ADDR core_addr;
5944 unsigned int ui;
5945 unsigned char uc;
5946 } addr;
5947
5948 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5949 if (ret == 0)
5950 {
5951 if (ptr_size == sizeof (CORE_ADDR))
5952 *ptr = addr.core_addr;
5953 else if (ptr_size == sizeof (unsigned int))
5954 *ptr = addr.ui;
5955 else
5956 gdb_assert_not_reached ("unhandled pointer size");
5957 }
5958 return ret;
5959 }
5960
5961 struct link_map_offsets
5962 {
5963 /* Offset and size of r_debug.r_version. */
5964 int r_version_offset;
5965
5966 /* Offset and size of r_debug.r_map. */
5967 int r_map_offset;
5968
5969 /* Offset to l_addr field in struct link_map. */
5970 int l_addr_offset;
5971
5972 /* Offset to l_name field in struct link_map. */
5973 int l_name_offset;
5974
5975 /* Offset to l_ld field in struct link_map. */
5976 int l_ld_offset;
5977
5978 /* Offset to l_next field in struct link_map. */
5979 int l_next_offset;
5980
5981 /* Offset to l_prev field in struct link_map. */
5982 int l_prev_offset;
5983 };
5984
5985 /* Construct qXfer:libraries-svr4:read reply. */
5986
5987 static int
5988 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5989 unsigned const char *writebuf,
5990 CORE_ADDR offset, int len)
5991 {
5992 char *document;
5993 unsigned document_len;
5994 struct process_info_private *const priv = current_process ()->priv;
5995 char filename[PATH_MAX];
5996 int pid, is_elf64;
5997
5998 static const struct link_map_offsets lmo_32bit_offsets =
5999 {
6000 0, /* r_version offset. */
6001 4, /* r_debug.r_map offset. */
6002 0, /* l_addr offset in link_map. */
6003 4, /* l_name offset in link_map. */
6004 8, /* l_ld offset in link_map. */
6005 12, /* l_next offset in link_map. */
6006 16 /* l_prev offset in link_map. */
6007 };
6008
6009 static const struct link_map_offsets lmo_64bit_offsets =
6010 {
6011 0, /* r_version offset. */
6012 8, /* r_debug.r_map offset. */
6013 0, /* l_addr offset in link_map. */
6014 8, /* l_name offset in link_map. */
6015 16, /* l_ld offset in link_map. */
6016 24, /* l_next offset in link_map. */
6017 32 /* l_prev offset in link_map. */
6018 };
6019 const struct link_map_offsets *lmo;
6020 unsigned int machine;
6021 int ptr_size;
6022 CORE_ADDR lm_addr = 0, lm_prev = 0;
6023 int allocated = 1024;
6024 char *p;
6025 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6026 int header_done = 0;
6027
6028 if (writebuf != NULL)
6029 return -2;
6030 if (readbuf == NULL)
6031 return -1;
6032
6033 pid = lwpid_of (current_thread);
6034 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6035 is_elf64 = elf_64_file_p (filename, &machine);
6036 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6037 ptr_size = is_elf64 ? 8 : 4;
6038
6039 while (annex[0] != '\0')
6040 {
6041 const char *sep;
6042 CORE_ADDR *addrp;
6043 int len;
6044
6045 sep = strchr (annex, '=');
6046 if (sep == NULL)
6047 break;
6048
6049 len = sep - annex;
6050 if (len == 5 && startswith (annex, "start"))
6051 addrp = &lm_addr;
6052 else if (len == 4 && startswith (annex, "prev"))
6053 addrp = &lm_prev;
6054 else
6055 {
6056 annex = strchr (sep, ';');
6057 if (annex == NULL)
6058 break;
6059 annex++;
6060 continue;
6061 }
6062
6063 annex = decode_address_to_semicolon (addrp, sep + 1);
6064 }
6065
6066 if (lm_addr == 0)
6067 {
6068 int r_version = 0;
6069
6070 if (priv->r_debug == 0)
6071 priv->r_debug = get_r_debug (pid, is_elf64);
6072
6073 /* We failed to find DT_DEBUG. Such situation will not change
6074 for this inferior - do not retry it. Report it to GDB as
6075 E01, see for the reasons at the GDB solib-svr4.c side. */
6076 if (priv->r_debug == (CORE_ADDR) -1)
6077 return -1;
6078
6079 if (priv->r_debug != 0)
6080 {
6081 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6082 (unsigned char *) &r_version,
6083 sizeof (r_version)) != 0
6084 || r_version != 1)
6085 {
6086 warning ("unexpected r_debug version %d", r_version);
6087 }
6088 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6089 &lm_addr, ptr_size) != 0)
6090 {
6091 warning ("unable to read r_map from 0x%lx",
6092 (long) priv->r_debug + lmo->r_map_offset);
6093 }
6094 }
6095 }
6096
6097 document = xmalloc (allocated);
6098 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6099 p = document + strlen (document);
6100
6101 while (lm_addr
6102 && read_one_ptr (lm_addr + lmo->l_name_offset,
6103 &l_name, ptr_size) == 0
6104 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6105 &l_addr, ptr_size) == 0
6106 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6107 &l_ld, ptr_size) == 0
6108 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6109 &l_prev, ptr_size) == 0
6110 && read_one_ptr (lm_addr + lmo->l_next_offset,
6111 &l_next, ptr_size) == 0)
6112 {
6113 unsigned char libname[PATH_MAX];
6114
6115 if (lm_prev != l_prev)
6116 {
6117 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6118 (long) lm_prev, (long) l_prev);
6119 break;
6120 }
6121
6122 /* Ignore the first entry even if it has valid name as the first entry
6123 corresponds to the main executable. The first entry should not be
6124 skipped if the dynamic loader was loaded late by a static executable
6125 (see solib-svr4.c parameter ignore_first). But in such case the main
6126 executable does not have PT_DYNAMIC present and this function already
6127 exited above due to failed get_r_debug. */
6128 if (lm_prev == 0)
6129 {
6130 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6131 p = p + strlen (p);
6132 }
6133 else
6134 {
6135 /* Not checking for error because reading may stop before
6136 we've got PATH_MAX worth of characters. */
6137 libname[0] = '\0';
6138 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6139 libname[sizeof (libname) - 1] = '\0';
6140 if (libname[0] != '\0')
6141 {
6142 /* 6x the size for xml_escape_text below. */
6143 size_t len = 6 * strlen ((char *) libname);
6144 char *name;
6145
6146 if (!header_done)
6147 {
6148 /* Terminate `<library-list-svr4'. */
6149 *p++ = '>';
6150 header_done = 1;
6151 }
6152
6153 while (allocated < p - document + len + 200)
6154 {
6155 /* Expand to guarantee sufficient storage. */
6156 uintptr_t document_len = p - document;
6157
6158 document = xrealloc (document, 2 * allocated);
6159 allocated *= 2;
6160 p = document + document_len;
6161 }
6162
6163 name = xml_escape_text ((char *) libname);
6164 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6165 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6166 name, (unsigned long) lm_addr,
6167 (unsigned long) l_addr, (unsigned long) l_ld);
6168 free (name);
6169 }
6170 }
6171
6172 lm_prev = lm_addr;
6173 lm_addr = l_next;
6174 }
6175
6176 if (!header_done)
6177 {
6178 /* Empty list; terminate `<library-list-svr4'. */
6179 strcpy (p, "/>");
6180 }
6181 else
6182 strcpy (p, "</library-list-svr4>");
6183
6184 document_len = strlen (document);
6185 if (offset < document_len)
6186 document_len -= offset;
6187 else
6188 document_len = 0;
6189 if (len > document_len)
6190 len = document_len;
6191
6192 memcpy (readbuf, document + offset, len);
6193 xfree (document);
6194
6195 return len;
6196 }
6197
6198 #ifdef HAVE_LINUX_BTRACE
6199
6200 /* See to_enable_btrace target method. */
6201
6202 static struct btrace_target_info *
6203 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6204 {
6205 struct btrace_target_info *tinfo;
6206
6207 tinfo = linux_enable_btrace (ptid, conf);
6208
6209 if (tinfo != NULL && tinfo->ptr_bits == 0)
6210 {
6211 struct thread_info *thread = find_thread_ptid (ptid);
6212 struct regcache *regcache = get_thread_regcache (thread, 0);
6213
6214 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6215 }
6216
6217 return tinfo;
6218 }
6219
6220 /* See to_disable_btrace target method. */
6221
6222 static int
6223 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6224 {
6225 enum btrace_error err;
6226
6227 err = linux_disable_btrace (tinfo);
6228 return (err == BTRACE_ERR_NONE ? 0 : -1);
6229 }
6230
6231 /* See to_read_btrace target method. */
6232
6233 static int
6234 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6235 int type)
6236 {
6237 struct btrace_data btrace;
6238 struct btrace_block *block;
6239 enum btrace_error err;
6240 int i;
6241
6242 btrace_data_init (&btrace);
6243
6244 err = linux_read_btrace (&btrace, tinfo, type);
6245 if (err != BTRACE_ERR_NONE)
6246 {
6247 if (err == BTRACE_ERR_OVERFLOW)
6248 buffer_grow_str0 (buffer, "E.Overflow.");
6249 else
6250 buffer_grow_str0 (buffer, "E.Generic Error.");
6251
6252 btrace_data_fini (&btrace);
6253 return -1;
6254 }
6255
6256 switch (btrace.format)
6257 {
6258 case BTRACE_FORMAT_NONE:
6259 buffer_grow_str0 (buffer, "E.No Trace.");
6260 break;
6261
6262 case BTRACE_FORMAT_BTS:
6263 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6264 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6265
6266 for (i = 0;
6267 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6268 i++)
6269 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6270 paddress (block->begin), paddress (block->end));
6271
6272 buffer_grow_str0 (buffer, "</btrace>\n");
6273 break;
6274
6275 default:
6276 buffer_grow_str0 (buffer, "E.Unknown Trace Format.");
6277
6278 btrace_data_fini (&btrace);
6279 return -1;
6280 }
6281
6282 btrace_data_fini (&btrace);
6283 return 0;
6284 }
6285
6286 /* See to_btrace_conf target method. */
6287
6288 static int
6289 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6290 struct buffer *buffer)
6291 {
6292 const struct btrace_config *conf;
6293
6294 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6295 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6296
6297 conf = linux_btrace_conf (tinfo);
6298 if (conf != NULL)
6299 {
6300 switch (conf->format)
6301 {
6302 case BTRACE_FORMAT_NONE:
6303 break;
6304
6305 case BTRACE_FORMAT_BTS:
6306 buffer_xml_printf (buffer, "<bts");
6307 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6308 buffer_xml_printf (buffer, " />\n");
6309 break;
6310 }
6311 }
6312
6313 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6314 return 0;
6315 }
6316 #endif /* HAVE_LINUX_BTRACE */
6317
6318 /* See nat/linux-nat.h. */
6319
6320 ptid_t
6321 current_lwp_ptid (void)
6322 {
6323 return ptid_of (current_thread);
6324 }
6325
6326 static struct target_ops linux_target_ops = {
6327 linux_create_inferior,
6328 linux_attach,
6329 linux_kill,
6330 linux_detach,
6331 linux_mourn,
6332 linux_join,
6333 linux_thread_alive,
6334 linux_resume,
6335 linux_wait,
6336 linux_fetch_registers,
6337 linux_store_registers,
6338 linux_prepare_to_access_memory,
6339 linux_done_accessing_memory,
6340 linux_read_memory,
6341 linux_write_memory,
6342 linux_look_up_symbols,
6343 linux_request_interrupt,
6344 linux_read_auxv,
6345 linux_supports_z_point_type,
6346 linux_insert_point,
6347 linux_remove_point,
6348 linux_stopped_by_sw_breakpoint,
6349 linux_supports_stopped_by_sw_breakpoint,
6350 linux_stopped_by_hw_breakpoint,
6351 linux_supports_stopped_by_hw_breakpoint,
6352 linux_stopped_by_watchpoint,
6353 linux_stopped_data_address,
6354 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6355 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6356 && defined(PT_TEXT_END_ADDR)
6357 linux_read_offsets,
6358 #else
6359 NULL,
6360 #endif
6361 #ifdef USE_THREAD_DB
6362 thread_db_get_tls_address,
6363 #else
6364 NULL,
6365 #endif
6366 linux_qxfer_spu,
6367 hostio_last_error_from_errno,
6368 linux_qxfer_osdata,
6369 linux_xfer_siginfo,
6370 linux_supports_non_stop,
6371 linux_async,
6372 linux_start_non_stop,
6373 linux_supports_multi_process,
6374 #ifdef USE_THREAD_DB
6375 thread_db_handle_monitor_command,
6376 #else
6377 NULL,
6378 #endif
6379 linux_common_core_of_thread,
6380 linux_read_loadmap,
6381 linux_process_qsupported,
6382 linux_supports_tracepoints,
6383 linux_read_pc,
6384 linux_write_pc,
6385 linux_thread_stopped,
6386 NULL,
6387 linux_pause_all,
6388 linux_unpause_all,
6389 linux_stabilize_threads,
6390 linux_install_fast_tracepoint_jump_pad,
6391 linux_emit_ops,
6392 linux_supports_disable_randomization,
6393 linux_get_min_fast_tracepoint_insn_len,
6394 linux_qxfer_libraries_svr4,
6395 linux_supports_agent,
6396 #ifdef HAVE_LINUX_BTRACE
6397 linux_supports_btrace,
6398 linux_low_enable_btrace,
6399 linux_low_disable_btrace,
6400 linux_low_read_btrace,
6401 linux_low_btrace_conf,
6402 #else
6403 NULL,
6404 NULL,
6405 NULL,
6406 NULL,
6407 NULL,
6408 #endif
6409 linux_supports_range_stepping,
6410 };
6411
6412 static void
6413 linux_init_signals ()
6414 {
6415 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6416 to find what the cancel signal actually is. */
6417 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6418 signal (__SIGRTMIN+1, SIG_IGN);
6419 #endif
6420 }
6421
6422 #ifdef HAVE_LINUX_REGSETS
6423 void
6424 initialize_regsets_info (struct regsets_info *info)
6425 {
6426 for (info->num_regsets = 0;
6427 info->regsets[info->num_regsets].size >= 0;
6428 info->num_regsets++)
6429 ;
6430 }
6431 #endif
6432
6433 void
6434 initialize_low (void)
6435 {
6436 struct sigaction sigchld_action;
6437 memset (&sigchld_action, 0, sizeof (sigchld_action));
6438 set_target_ops (&linux_target_ops);
6439 set_breakpoint_data (the_low_target.breakpoint,
6440 the_low_target.breakpoint_len);
6441 linux_init_signals ();
6442 linux_ptrace_init_warnings ();
6443
6444 sigchld_action.sa_handler = sigchld_handler;
6445 sigemptyset (&sigchld_action.sa_mask);
6446 sigchld_action.sa_flags = SA_RESTART;
6447 sigaction (SIGCHLD, &sigchld_action, NULL);
6448
6449 initialize_low_arch ();
6450 }
This page took 0.209166 seconds and 4 git commands to generate.