Tests for validate symbol file using build-id
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25 #include "nat/linux-maps.h"
26
27 #include "nat/linux-nat.h"
28 #include "nat/linux-waitpid.h"
29 #include "gdb_wait.h"
30 #include <sys/ptrace.h>
31 #include "nat/linux-ptrace.h"
32 #include "nat/linux-procfs.h"
33 #include "nat/linux-personality.h"
34 #include <signal.h>
35 #include <sys/ioctl.h>
36 #include <fcntl.h>
37 #include <unistd.h>
38 #include <sys/syscall.h>
39 #include <sched.h>
40 #include <ctype.h>
41 #include <pwd.h>
42 #include <sys/types.h>
43 #include <dirent.h>
44 #include <sys/stat.h>
45 #include <sys/vfs.h>
46 #include <sys/uio.h>
47 #include <search.h>
48 #include "filestuff.h"
49 #include "tracepoint.h"
50 #include "hostio.h"
51 #include "rsp-low.h"
52 #ifndef ELFMAG0
53 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
54 then ELFMAG0 will have been defined. If it didn't get included by
55 gdb_proc_service.h then including it will likely introduce a duplicate
56 definition of elf_fpregset_t. */
57 #include <elf.h>
58 #endif
59 #include "nat/linux-namespaces.h"
60
61 #ifndef SPUFS_MAGIC
62 #define SPUFS_MAGIC 0x23c9b64e
63 #endif
64
65 #ifdef HAVE_PERSONALITY
66 # include <sys/personality.h>
67 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
68 # define ADDR_NO_RANDOMIZE 0x0040000
69 # endif
70 #endif
71
72 #ifndef O_LARGEFILE
73 #define O_LARGEFILE 0
74 #endif
75
76 #ifndef W_STOPCODE
77 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
78 #endif
79
80 /* This is the kernel's hard limit. Not to be confused with
81 SIGRTMIN. */
82 #ifndef __SIGRTMIN
83 #define __SIGRTMIN 32
84 #endif
85
86 /* Some targets did not define these ptrace constants from the start,
87 so gdbserver defines them locally here. In the future, these may
88 be removed after they are added to asm/ptrace.h. */
89 #if !(defined(PT_TEXT_ADDR) \
90 || defined(PT_DATA_ADDR) \
91 || defined(PT_TEXT_END_ADDR))
92 #if defined(__mcoldfire__)
93 /* These are still undefined in 3.10 kernels. */
94 #define PT_TEXT_ADDR 49*4
95 #define PT_DATA_ADDR 50*4
96 #define PT_TEXT_END_ADDR 51*4
97 /* BFIN already defines these since at least 2.6.32 kernels. */
98 #elif defined(BFIN)
99 #define PT_TEXT_ADDR 220
100 #define PT_TEXT_END_ADDR 224
101 #define PT_DATA_ADDR 228
102 /* These are still undefined in 3.10 kernels. */
103 #elif defined(__TMS320C6X__)
104 #define PT_TEXT_ADDR (0x10000*4)
105 #define PT_DATA_ADDR (0x10004*4)
106 #define PT_TEXT_END_ADDR (0x10008*4)
107 #endif
108 #endif
109
110 #ifdef HAVE_LINUX_BTRACE
111 # include "nat/linux-btrace.h"
112 # include "btrace-common.h"
113 #endif
114
115 #ifndef HAVE_ELF32_AUXV_T
116 /* Copied from glibc's elf.h. */
117 typedef struct
118 {
119 uint32_t a_type; /* Entry type */
120 union
121 {
122 uint32_t a_val; /* Integer value */
123 /* We use to have pointer elements added here. We cannot do that,
124 though, since it does not work when using 32-bit definitions
125 on 64-bit platforms and vice versa. */
126 } a_un;
127 } Elf32_auxv_t;
128 #endif
129
130 #ifndef HAVE_ELF64_AUXV_T
131 /* Copied from glibc's elf.h. */
132 typedef struct
133 {
134 uint64_t a_type; /* Entry type */
135 union
136 {
137 uint64_t a_val; /* Integer value */
138 /* We use to have pointer elements added here. We cannot do that,
139 though, since it does not work when using 32-bit definitions
140 on 64-bit platforms and vice versa. */
141 } a_un;
142 } Elf64_auxv_t;
143 #endif
144
145 /* LWP accessors. */
146
147 /* See nat/linux-nat.h. */
148
149 ptid_t
150 ptid_of_lwp (struct lwp_info *lwp)
151 {
152 return ptid_of (get_lwp_thread (lwp));
153 }
154
155 /* See nat/linux-nat.h. */
156
157 void
158 lwp_set_arch_private_info (struct lwp_info *lwp,
159 struct arch_lwp_info *info)
160 {
161 lwp->arch_private = info;
162 }
163
164 /* See nat/linux-nat.h. */
165
166 struct arch_lwp_info *
167 lwp_arch_private_info (struct lwp_info *lwp)
168 {
169 return lwp->arch_private;
170 }
171
172 /* See nat/linux-nat.h. */
173
174 int
175 lwp_is_stopped (struct lwp_info *lwp)
176 {
177 return lwp->stopped;
178 }
179
180 /* See nat/linux-nat.h. */
181
182 enum target_stop_reason
183 lwp_stop_reason (struct lwp_info *lwp)
184 {
185 return lwp->stop_reason;
186 }
187
188 typedef union ElfXX_Ehdr
189 {
190 Elf32_Ehdr _32;
191 Elf64_Ehdr _64;
192 } ElfXX_Ehdr;
193
194 typedef union ElfXX_Phdr
195 {
196 Elf32_Phdr _32;
197 Elf64_Phdr _64;
198 } ElfXX_Phdr;
199
200 typedef union ElfXX_Nhdr
201 {
202 Elf32_Nhdr _32;
203 Elf64_Nhdr _64;
204 } ElfXX_Nhdr;
205
206 #define ELFXX_FLD(elf64, hdr, fld) ((elf64) ? (hdr)._64.fld : (hdr)._32.fld)
207 #define ELFXX_SIZEOF(elf64, hdr) ((elf64) ? sizeof ((hdr)._64) \
208 : sizeof ((hdr)._32))
209 /* Round up to next 4 byte boundary. */
210 #define ELFXX_ROUNDUP_4(elf64, what) (((what) + 3) & ~(ULONGEST) 3)
211 #define BUILD_ID_INVALID "?"
212
213 /* A list of all unknown processes which receive stop signals. Some
214 other process will presumably claim each of these as forked
215 children momentarily. */
216
217 struct simple_pid_list
218 {
219 /* The process ID. */
220 int pid;
221
222 /* The status as reported by waitpid. */
223 int status;
224
225 /* Next in chain. */
226 struct simple_pid_list *next;
227 };
228 struct simple_pid_list *stopped_pids;
229
230 /* Trivial list manipulation functions to keep track of a list of new
231 stopped processes. */
232
233 static void
234 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
235 {
236 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
237
238 new_pid->pid = pid;
239 new_pid->status = status;
240 new_pid->next = *listp;
241 *listp = new_pid;
242 }
243
244 static int
245 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
246 {
247 struct simple_pid_list **p;
248
249 for (p = listp; *p != NULL; p = &(*p)->next)
250 if ((*p)->pid == pid)
251 {
252 struct simple_pid_list *next = (*p)->next;
253
254 *statusp = (*p)->status;
255 xfree (*p);
256 *p = next;
257 return 1;
258 }
259 return 0;
260 }
261
262 enum stopping_threads_kind
263 {
264 /* Not stopping threads presently. */
265 NOT_STOPPING_THREADS,
266
267 /* Stopping threads. */
268 STOPPING_THREADS,
269
270 /* Stopping and suspending threads. */
271 STOPPING_AND_SUSPENDING_THREADS
272 };
273
274 /* This is set while stop_all_lwps is in effect. */
275 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
276
277 /* FIXME make into a target method? */
278 int using_threads = 1;
279
280 /* True if we're presently stabilizing threads (moving them out of
281 jump pads). */
282 static int stabilizing_threads;
283
284 static void linux_resume_one_lwp (struct lwp_info *lwp,
285 int step, int signal, siginfo_t *info);
286 static void linux_resume (struct thread_resume *resume_info, size_t n);
287 static void stop_all_lwps (int suspend, struct lwp_info *except);
288 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
289 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
290 int *wstat, int options);
291 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
292 static struct lwp_info *add_lwp (ptid_t ptid);
293 static int linux_stopped_by_watchpoint (void);
294 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
295 static void proceed_all_lwps (void);
296 static int finish_step_over (struct lwp_info *lwp);
297 static int kill_lwp (unsigned long lwpid, int signo);
298
299 /* When the event-loop is doing a step-over, this points at the thread
300 being stepped. */
301 ptid_t step_over_bkpt;
302
303 /* True if the low target can hardware single-step. Such targets
304 don't need a BREAKPOINT_REINSERT_ADDR callback. */
305
306 static int
307 can_hardware_single_step (void)
308 {
309 return (the_low_target.breakpoint_reinsert_addr == NULL);
310 }
311
312 /* True if the low target supports memory breakpoints. If so, we'll
313 have a GET_PC implementation. */
314
315 static int
316 supports_breakpoints (void)
317 {
318 return (the_low_target.get_pc != NULL);
319 }
320
321 /* Returns true if this target can support fast tracepoints. This
322 does not mean that the in-process agent has been loaded in the
323 inferior. */
324
325 static int
326 supports_fast_tracepoints (void)
327 {
328 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
329 }
330
331 /* True if LWP is stopped in its stepping range. */
332
333 static int
334 lwp_in_step_range (struct lwp_info *lwp)
335 {
336 CORE_ADDR pc = lwp->stop_pc;
337
338 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
339 }
340
341 struct pending_signals
342 {
343 int signal;
344 siginfo_t info;
345 struct pending_signals *prev;
346 };
347
348 /* The read/write ends of the pipe registered as waitable file in the
349 event loop. */
350 static int linux_event_pipe[2] = { -1, -1 };
351
352 /* True if we're currently in async mode. */
353 #define target_is_async_p() (linux_event_pipe[0] != -1)
354
355 static void send_sigstop (struct lwp_info *lwp);
356 static void wait_for_sigstop (void);
357
358 /* Return non-zero if HEADER is a 64-bit ELF file. */
359
360 static int
361 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
362 {
363 if (header->e_ident[EI_MAG0] == ELFMAG0
364 && header->e_ident[EI_MAG1] == ELFMAG1
365 && header->e_ident[EI_MAG2] == ELFMAG2
366 && header->e_ident[EI_MAG3] == ELFMAG3)
367 {
368 *machine = header->e_machine;
369 return header->e_ident[EI_CLASS] == ELFCLASS64;
370
371 }
372 *machine = EM_NONE;
373 return -1;
374 }
375
376 /* Return non-zero if FILE is a 64-bit ELF file,
377 zero if the file is not a 64-bit ELF file,
378 and -1 if the file is not accessible or doesn't exist. */
379
380 static int
381 elf_64_file_p (const char *file, unsigned int *machine)
382 {
383 Elf64_Ehdr header;
384 int fd;
385
386 fd = open (file, O_RDONLY);
387 if (fd < 0)
388 return -1;
389
390 if (read (fd, &header, sizeof (header)) != sizeof (header))
391 {
392 close (fd);
393 return 0;
394 }
395 close (fd);
396
397 return elf_64_header_p (&header, machine);
398 }
399
400 /* Accepts an integer PID; Returns true if the executable PID is
401 running is a 64-bit ELF file.. */
402
403 int
404 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
405 {
406 char file[PATH_MAX];
407
408 sprintf (file, "/proc/%d/exe", pid);
409 return elf_64_file_p (file, machine);
410 }
411
412 static void
413 delete_lwp (struct lwp_info *lwp)
414 {
415 struct thread_info *thr = get_lwp_thread (lwp);
416
417 if (debug_threads)
418 debug_printf ("deleting %ld\n", lwpid_of (thr));
419
420 remove_thread (thr);
421 free (lwp->arch_private);
422 free (lwp);
423 }
424
425 /* Add a process to the common process list, and set its private
426 data. */
427
428 static struct process_info *
429 linux_add_process (int pid, int attached)
430 {
431 struct process_info *proc;
432
433 proc = add_process (pid, attached);
434 proc->priv = xcalloc (1, sizeof (*proc->priv));
435
436 /* Set the arch when the first LWP stops. */
437 proc->priv->new_inferior = 1;
438
439 if (the_low_target.new_process != NULL)
440 proc->priv->arch_private = the_low_target.new_process ();
441
442 return proc;
443 }
444
445 static CORE_ADDR get_pc (struct lwp_info *lwp);
446
447 /* Handle a GNU/Linux extended wait response. If we see a clone
448 event, we need to add the new LWP to our list (and return 0 so as
449 not to report the trap to higher layers). */
450
451 static int
452 handle_extended_wait (struct lwp_info *event_lwp, int wstat)
453 {
454 int event = linux_ptrace_get_extended_event (wstat);
455 struct thread_info *event_thr = get_lwp_thread (event_lwp);
456 struct lwp_info *new_lwp;
457
458 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
459 || (event == PTRACE_EVENT_CLONE))
460 {
461 ptid_t ptid;
462 unsigned long new_pid;
463 int ret, status;
464
465 /* Get the pid of the new lwp. */
466 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
467 &new_pid);
468
469 /* If we haven't already seen the new PID stop, wait for it now. */
470 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
471 {
472 /* The new child has a pending SIGSTOP. We can't affect it until it
473 hits the SIGSTOP, but we're already attached. */
474
475 ret = my_waitpid (new_pid, &status, __WALL);
476
477 if (ret == -1)
478 perror_with_name ("waiting for new child");
479 else if (ret != new_pid)
480 warning ("wait returned unexpected PID %d", ret);
481 else if (!WIFSTOPPED (status))
482 warning ("wait returned unexpected status 0x%x", status);
483 }
484
485 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
486 {
487 struct process_info *parent_proc;
488 struct process_info *child_proc;
489 struct lwp_info *child_lwp;
490 struct thread_info *child_thr;
491 struct target_desc *tdesc;
492
493 ptid = ptid_build (new_pid, new_pid, 0);
494
495 if (debug_threads)
496 {
497 debug_printf ("HEW: Got fork event from LWP %ld, "
498 "new child is %d\n",
499 ptid_get_lwp (ptid_of (event_thr)),
500 ptid_get_pid (ptid));
501 }
502
503 /* Add the new process to the tables and clone the breakpoint
504 lists of the parent. We need to do this even if the new process
505 will be detached, since we will need the process object and the
506 breakpoints to remove any breakpoints from memory when we
507 detach, and the client side will access registers. */
508 child_proc = linux_add_process (new_pid, 0);
509 gdb_assert (child_proc != NULL);
510 child_lwp = add_lwp (ptid);
511 gdb_assert (child_lwp != NULL);
512 child_lwp->stopped = 1;
513 child_lwp->must_set_ptrace_flags = 1;
514 child_lwp->status_pending_p = 0;
515 child_thr = get_lwp_thread (child_lwp);
516 child_thr->last_resume_kind = resume_stop;
517 parent_proc = get_thread_process (event_thr);
518 child_proc->attached = parent_proc->attached;
519 clone_all_breakpoints (&child_proc->breakpoints,
520 &child_proc->raw_breakpoints,
521 parent_proc->breakpoints);
522
523 tdesc = xmalloc (sizeof (struct target_desc));
524 copy_target_description (tdesc, parent_proc->tdesc);
525 child_proc->tdesc = tdesc;
526
527 /* Clone arch-specific process data. */
528 if (the_low_target.new_fork != NULL)
529 the_low_target.new_fork (parent_proc, child_proc);
530
531 /* Save fork info in the parent thread. */
532 if (event == PTRACE_EVENT_FORK)
533 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
534 else if (event == PTRACE_EVENT_VFORK)
535 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
536
537 event_lwp->waitstatus.value.related_pid = ptid;
538
539 /* The status_pending field contains bits denoting the
540 extended event, so when the pending event is handled,
541 the handler will look at lwp->waitstatus. */
542 event_lwp->status_pending_p = 1;
543 event_lwp->status_pending = wstat;
544
545 /* Report the event. */
546 return 0;
547 }
548
549 if (debug_threads)
550 debug_printf ("HEW: Got clone event "
551 "from LWP %ld, new child is LWP %ld\n",
552 lwpid_of (event_thr), new_pid);
553
554 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
555 new_lwp = add_lwp (ptid);
556
557 /* Either we're going to immediately resume the new thread
558 or leave it stopped. linux_resume_one_lwp is a nop if it
559 thinks the thread is currently running, so set this first
560 before calling linux_resume_one_lwp. */
561 new_lwp->stopped = 1;
562
563 /* If we're suspending all threads, leave this one suspended
564 too. */
565 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
566 new_lwp->suspended = 1;
567
568 /* Normally we will get the pending SIGSTOP. But in some cases
569 we might get another signal delivered to the group first.
570 If we do get another signal, be sure not to lose it. */
571 if (WSTOPSIG (status) != SIGSTOP)
572 {
573 new_lwp->stop_expected = 1;
574 new_lwp->status_pending_p = 1;
575 new_lwp->status_pending = status;
576 }
577
578 /* Don't report the event. */
579 return 1;
580 }
581 else if (event == PTRACE_EVENT_VFORK_DONE)
582 {
583 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
584
585 /* Report the event. */
586 return 0;
587 }
588
589 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
590 }
591
592 /* Return the PC as read from the regcache of LWP, without any
593 adjustment. */
594
595 static CORE_ADDR
596 get_pc (struct lwp_info *lwp)
597 {
598 struct thread_info *saved_thread;
599 struct regcache *regcache;
600 CORE_ADDR pc;
601
602 if (the_low_target.get_pc == NULL)
603 return 0;
604
605 saved_thread = current_thread;
606 current_thread = get_lwp_thread (lwp);
607
608 regcache = get_thread_regcache (current_thread, 1);
609 pc = (*the_low_target.get_pc) (regcache);
610
611 if (debug_threads)
612 debug_printf ("pc is 0x%lx\n", (long) pc);
613
614 current_thread = saved_thread;
615 return pc;
616 }
617
618 /* This function should only be called if LWP got a SIGTRAP.
619 The SIGTRAP could mean several things.
620
621 On i386, where decr_pc_after_break is non-zero:
622
623 If we were single-stepping this process using PTRACE_SINGLESTEP, we
624 will get only the one SIGTRAP. The value of $eip will be the next
625 instruction. If the instruction we stepped over was a breakpoint,
626 we need to decrement the PC.
627
628 If we continue the process using PTRACE_CONT, we will get a
629 SIGTRAP when we hit a breakpoint. The value of $eip will be
630 the instruction after the breakpoint (i.e. needs to be
631 decremented). If we report the SIGTRAP to GDB, we must also
632 report the undecremented PC. If the breakpoint is removed, we
633 must resume at the decremented PC.
634
635 On a non-decr_pc_after_break machine with hardware or kernel
636 single-step:
637
638 If we either single-step a breakpoint instruction, or continue and
639 hit a breakpoint instruction, our PC will point at the breakpoint
640 instruction. */
641
642 static int
643 check_stopped_by_breakpoint (struct lwp_info *lwp)
644 {
645 CORE_ADDR pc;
646 CORE_ADDR sw_breakpoint_pc;
647 struct thread_info *saved_thread;
648 #if USE_SIGTRAP_SIGINFO
649 siginfo_t siginfo;
650 #endif
651
652 if (the_low_target.get_pc == NULL)
653 return 0;
654
655 pc = get_pc (lwp);
656 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
657
658 /* breakpoint_at reads from the current thread. */
659 saved_thread = current_thread;
660 current_thread = get_lwp_thread (lwp);
661
662 #if USE_SIGTRAP_SIGINFO
663 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
664 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
665 {
666 if (siginfo.si_signo == SIGTRAP)
667 {
668 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
669 {
670 if (debug_threads)
671 {
672 struct thread_info *thr = get_lwp_thread (lwp);
673
674 debug_printf ("CSBB: %s stopped by software breakpoint\n",
675 target_pid_to_str (ptid_of (thr)));
676 }
677
678 /* Back up the PC if necessary. */
679 if (pc != sw_breakpoint_pc)
680 {
681 struct regcache *regcache
682 = get_thread_regcache (current_thread, 1);
683 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
684 }
685
686 lwp->stop_pc = sw_breakpoint_pc;
687 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
688 current_thread = saved_thread;
689 return 1;
690 }
691 else if (siginfo.si_code == TRAP_HWBKPT)
692 {
693 if (debug_threads)
694 {
695 struct thread_info *thr = get_lwp_thread (lwp);
696
697 debug_printf ("CSBB: %s stopped by hardware "
698 "breakpoint/watchpoint\n",
699 target_pid_to_str (ptid_of (thr)));
700 }
701
702 lwp->stop_pc = pc;
703 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
704 current_thread = saved_thread;
705 return 1;
706 }
707 else if (siginfo.si_code == TRAP_TRACE)
708 {
709 if (debug_threads)
710 {
711 struct thread_info *thr = get_lwp_thread (lwp);
712
713 debug_printf ("CSBB: %s stopped by trace\n",
714 target_pid_to_str (ptid_of (thr)));
715 }
716 }
717 }
718 }
719 #else
720 /* We may have just stepped a breakpoint instruction. E.g., in
721 non-stop mode, GDB first tells the thread A to step a range, and
722 then the user inserts a breakpoint inside the range. In that
723 case we need to report the breakpoint PC. */
724 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
725 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
726 {
727 if (debug_threads)
728 {
729 struct thread_info *thr = get_lwp_thread (lwp);
730
731 debug_printf ("CSBB: %s stopped by software breakpoint\n",
732 target_pid_to_str (ptid_of (thr)));
733 }
734
735 /* Back up the PC if necessary. */
736 if (pc != sw_breakpoint_pc)
737 {
738 struct regcache *regcache
739 = get_thread_regcache (current_thread, 1);
740 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
741 }
742
743 lwp->stop_pc = sw_breakpoint_pc;
744 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
745 current_thread = saved_thread;
746 return 1;
747 }
748
749 if (hardware_breakpoint_inserted_here (pc))
750 {
751 if (debug_threads)
752 {
753 struct thread_info *thr = get_lwp_thread (lwp);
754
755 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
756 target_pid_to_str (ptid_of (thr)));
757 }
758
759 lwp->stop_pc = pc;
760 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
761 current_thread = saved_thread;
762 return 1;
763 }
764 #endif
765
766 current_thread = saved_thread;
767 return 0;
768 }
769
770 static struct lwp_info *
771 add_lwp (ptid_t ptid)
772 {
773 struct lwp_info *lwp;
774
775 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
776 memset (lwp, 0, sizeof (*lwp));
777
778 if (the_low_target.new_thread != NULL)
779 the_low_target.new_thread (lwp);
780
781 lwp->thread = add_thread (ptid, lwp);
782
783 return lwp;
784 }
785
786 /* Start an inferior process and returns its pid.
787 ALLARGS is a vector of program-name and args. */
788
789 static int
790 linux_create_inferior (char *program, char **allargs)
791 {
792 struct lwp_info *new_lwp;
793 int pid;
794 ptid_t ptid;
795 struct cleanup *restore_personality
796 = maybe_disable_address_space_randomization (disable_randomization);
797
798 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
799 pid = vfork ();
800 #else
801 pid = fork ();
802 #endif
803 if (pid < 0)
804 perror_with_name ("fork");
805
806 if (pid == 0)
807 {
808 close_most_fds ();
809 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
810
811 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
812 signal (__SIGRTMIN + 1, SIG_DFL);
813 #endif
814
815 setpgid (0, 0);
816
817 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
818 stdout to stderr so that inferior i/o doesn't corrupt the connection.
819 Also, redirect stdin to /dev/null. */
820 if (remote_connection_is_stdio ())
821 {
822 close (0);
823 open ("/dev/null", O_RDONLY);
824 dup2 (2, 1);
825 if (write (2, "stdin/stdout redirected\n",
826 sizeof ("stdin/stdout redirected\n") - 1) < 0)
827 {
828 /* Errors ignored. */;
829 }
830 }
831
832 execv (program, allargs);
833 if (errno == ENOENT)
834 execvp (program, allargs);
835
836 fprintf (stderr, "Cannot exec %s: %s.\n", program,
837 strerror (errno));
838 fflush (stderr);
839 _exit (0177);
840 }
841
842 do_cleanups (restore_personality);
843
844 linux_add_process (pid, 0);
845
846 ptid = ptid_build (pid, pid, 0);
847 new_lwp = add_lwp (ptid);
848 new_lwp->must_set_ptrace_flags = 1;
849
850 return pid;
851 }
852
853 /* Attach to an inferior process. Returns 0 on success, ERRNO on
854 error. */
855
856 int
857 linux_attach_lwp (ptid_t ptid)
858 {
859 struct lwp_info *new_lwp;
860 int lwpid = ptid_get_lwp (ptid);
861
862 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
863 != 0)
864 return errno;
865
866 new_lwp = add_lwp (ptid);
867
868 /* We need to wait for SIGSTOP before being able to make the next
869 ptrace call on this LWP. */
870 new_lwp->must_set_ptrace_flags = 1;
871
872 if (linux_proc_pid_is_stopped (lwpid))
873 {
874 if (debug_threads)
875 debug_printf ("Attached to a stopped process\n");
876
877 /* The process is definitely stopped. It is in a job control
878 stop, unless the kernel predates the TASK_STOPPED /
879 TASK_TRACED distinction, in which case it might be in a
880 ptrace stop. Make sure it is in a ptrace stop; from there we
881 can kill it, signal it, et cetera.
882
883 First make sure there is a pending SIGSTOP. Since we are
884 already attached, the process can not transition from stopped
885 to running without a PTRACE_CONT; so we know this signal will
886 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
887 probably already in the queue (unless this kernel is old
888 enough to use TASK_STOPPED for ptrace stops); but since
889 SIGSTOP is not an RT signal, it can only be queued once. */
890 kill_lwp (lwpid, SIGSTOP);
891
892 /* Finally, resume the stopped process. This will deliver the
893 SIGSTOP (or a higher priority signal, just like normal
894 PTRACE_ATTACH), which we'll catch later on. */
895 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
896 }
897
898 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
899 brings it to a halt.
900
901 There are several cases to consider here:
902
903 1) gdbserver has already attached to the process and is being notified
904 of a new thread that is being created.
905 In this case we should ignore that SIGSTOP and resume the
906 process. This is handled below by setting stop_expected = 1,
907 and the fact that add_thread sets last_resume_kind ==
908 resume_continue.
909
910 2) This is the first thread (the process thread), and we're attaching
911 to it via attach_inferior.
912 In this case we want the process thread to stop.
913 This is handled by having linux_attach set last_resume_kind ==
914 resume_stop after we return.
915
916 If the pid we are attaching to is also the tgid, we attach to and
917 stop all the existing threads. Otherwise, we attach to pid and
918 ignore any other threads in the same group as this pid.
919
920 3) GDB is connecting to gdbserver and is requesting an enumeration of all
921 existing threads.
922 In this case we want the thread to stop.
923 FIXME: This case is currently not properly handled.
924 We should wait for the SIGSTOP but don't. Things work apparently
925 because enough time passes between when we ptrace (ATTACH) and when
926 gdb makes the next ptrace call on the thread.
927
928 On the other hand, if we are currently trying to stop all threads, we
929 should treat the new thread as if we had sent it a SIGSTOP. This works
930 because we are guaranteed that the add_lwp call above added us to the
931 end of the list, and so the new thread has not yet reached
932 wait_for_sigstop (but will). */
933 new_lwp->stop_expected = 1;
934
935 return 0;
936 }
937
938 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
939 already attached. Returns true if a new LWP is found, false
940 otherwise. */
941
942 static int
943 attach_proc_task_lwp_callback (ptid_t ptid)
944 {
945 /* Is this a new thread? */
946 if (find_thread_ptid (ptid) == NULL)
947 {
948 int lwpid = ptid_get_lwp (ptid);
949 int err;
950
951 if (debug_threads)
952 debug_printf ("Found new lwp %d\n", lwpid);
953
954 err = linux_attach_lwp (ptid);
955
956 /* Be quiet if we simply raced with the thread exiting. EPERM
957 is returned if the thread's task still exists, and is marked
958 as exited or zombie, as well as other conditions, so in that
959 case, confirm the status in /proc/PID/status. */
960 if (err == ESRCH
961 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
962 {
963 if (debug_threads)
964 {
965 debug_printf ("Cannot attach to lwp %d: "
966 "thread is gone (%d: %s)\n",
967 lwpid, err, strerror (err));
968 }
969 }
970 else if (err != 0)
971 {
972 warning (_("Cannot attach to lwp %d: %s"),
973 lwpid,
974 linux_ptrace_attach_fail_reason_string (ptid, err));
975 }
976
977 return 1;
978 }
979 return 0;
980 }
981
982 /* Attach to PID. If PID is the tgid, attach to it and all
983 of its threads. */
984
985 static int
986 linux_attach (unsigned long pid)
987 {
988 ptid_t ptid = ptid_build (pid, pid, 0);
989 int err;
990
991 /* Attach to PID. We will check for other threads
992 soon. */
993 err = linux_attach_lwp (ptid);
994 if (err != 0)
995 error ("Cannot attach to process %ld: %s",
996 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
997
998 linux_add_process (pid, 1);
999
1000 if (!non_stop)
1001 {
1002 struct thread_info *thread;
1003
1004 /* Don't ignore the initial SIGSTOP if we just attached to this
1005 process. It will be collected by wait shortly. */
1006 thread = find_thread_ptid (ptid_build (pid, pid, 0));
1007 thread->last_resume_kind = resume_stop;
1008 }
1009
1010 /* We must attach to every LWP. If /proc is mounted, use that to
1011 find them now. On the one hand, the inferior may be using raw
1012 clone instead of using pthreads. On the other hand, even if it
1013 is using pthreads, GDB may not be connected yet (thread_db needs
1014 to do symbol lookups, through qSymbol). Also, thread_db walks
1015 structures in the inferior's address space to find the list of
1016 threads/LWPs, and those structures may well be corrupted. Note
1017 that once thread_db is loaded, we'll still use it to list threads
1018 and associate pthread info with each LWP. */
1019 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1020 return 0;
1021 }
1022
1023 struct counter
1024 {
1025 int pid;
1026 int count;
1027 };
1028
1029 static int
1030 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1031 {
1032 struct counter *counter = args;
1033
1034 if (ptid_get_pid (entry->id) == counter->pid)
1035 {
1036 if (++counter->count > 1)
1037 return 1;
1038 }
1039
1040 return 0;
1041 }
1042
1043 static int
1044 last_thread_of_process_p (int pid)
1045 {
1046 struct counter counter = { pid , 0 };
1047
1048 return (find_inferior (&all_threads,
1049 second_thread_of_pid_p, &counter) == NULL);
1050 }
1051
1052 /* Kill LWP. */
1053
1054 static void
1055 linux_kill_one_lwp (struct lwp_info *lwp)
1056 {
1057 struct thread_info *thr = get_lwp_thread (lwp);
1058 int pid = lwpid_of (thr);
1059
1060 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1061 there is no signal context, and ptrace(PTRACE_KILL) (or
1062 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1063 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1064 alternative is to kill with SIGKILL. We only need one SIGKILL
1065 per process, not one for each thread. But since we still support
1066 linuxthreads, and we also support debugging programs using raw
1067 clone without CLONE_THREAD, we send one for each thread. For
1068 years, we used PTRACE_KILL only, so we're being a bit paranoid
1069 about some old kernels where PTRACE_KILL might work better
1070 (dubious if there are any such, but that's why it's paranoia), so
1071 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1072 everywhere. */
1073
1074 errno = 0;
1075 kill_lwp (pid, SIGKILL);
1076 if (debug_threads)
1077 {
1078 int save_errno = errno;
1079
1080 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1081 target_pid_to_str (ptid_of (thr)),
1082 save_errno ? strerror (save_errno) : "OK");
1083 }
1084
1085 errno = 0;
1086 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1087 if (debug_threads)
1088 {
1089 int save_errno = errno;
1090
1091 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1092 target_pid_to_str (ptid_of (thr)),
1093 save_errno ? strerror (save_errno) : "OK");
1094 }
1095 }
1096
1097 /* Kill LWP and wait for it to die. */
1098
1099 static void
1100 kill_wait_lwp (struct lwp_info *lwp)
1101 {
1102 struct thread_info *thr = get_lwp_thread (lwp);
1103 int pid = ptid_get_pid (ptid_of (thr));
1104 int lwpid = ptid_get_lwp (ptid_of (thr));
1105 int wstat;
1106 int res;
1107
1108 if (debug_threads)
1109 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1110
1111 do
1112 {
1113 linux_kill_one_lwp (lwp);
1114
1115 /* Make sure it died. Notes:
1116
1117 - The loop is most likely unnecessary.
1118
1119 - We don't use linux_wait_for_event as that could delete lwps
1120 while we're iterating over them. We're not interested in
1121 any pending status at this point, only in making sure all
1122 wait status on the kernel side are collected until the
1123 process is reaped.
1124
1125 - We don't use __WALL here as the __WALL emulation relies on
1126 SIGCHLD, and killing a stopped process doesn't generate
1127 one, nor an exit status.
1128 */
1129 res = my_waitpid (lwpid, &wstat, 0);
1130 if (res == -1 && errno == ECHILD)
1131 res = my_waitpid (lwpid, &wstat, __WCLONE);
1132 } while (res > 0 && WIFSTOPPED (wstat));
1133
1134 /* Even if it was stopped, the child may have already disappeared.
1135 E.g., if it was killed by SIGKILL. */
1136 if (res < 0 && errno != ECHILD)
1137 perror_with_name ("kill_wait_lwp");
1138 }
1139
1140 /* Callback for `find_inferior'. Kills an lwp of a given process,
1141 except the leader. */
1142
1143 static int
1144 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1145 {
1146 struct thread_info *thread = (struct thread_info *) entry;
1147 struct lwp_info *lwp = get_thread_lwp (thread);
1148 int pid = * (int *) args;
1149
1150 if (ptid_get_pid (entry->id) != pid)
1151 return 0;
1152
1153 /* We avoid killing the first thread here, because of a Linux kernel (at
1154 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1155 the children get a chance to be reaped, it will remain a zombie
1156 forever. */
1157
1158 if (lwpid_of (thread) == pid)
1159 {
1160 if (debug_threads)
1161 debug_printf ("lkop: is last of process %s\n",
1162 target_pid_to_str (entry->id));
1163 return 0;
1164 }
1165
1166 kill_wait_lwp (lwp);
1167 return 0;
1168 }
1169
1170 static int
1171 linux_kill (int pid)
1172 {
1173 struct process_info *process;
1174 struct lwp_info *lwp;
1175
1176 process = find_process_pid (pid);
1177 if (process == NULL)
1178 return -1;
1179
1180 /* If we're killing a running inferior, make sure it is stopped
1181 first, as PTRACE_KILL will not work otherwise. */
1182 stop_all_lwps (0, NULL);
1183
1184 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1185
1186 /* See the comment in linux_kill_one_lwp. We did not kill the first
1187 thread in the list, so do so now. */
1188 lwp = find_lwp_pid (pid_to_ptid (pid));
1189
1190 if (lwp == NULL)
1191 {
1192 if (debug_threads)
1193 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1194 pid);
1195 }
1196 else
1197 kill_wait_lwp (lwp);
1198
1199 the_target->mourn (process);
1200
1201 /* Since we presently can only stop all lwps of all processes, we
1202 need to unstop lwps of other processes. */
1203 unstop_all_lwps (0, NULL);
1204 return 0;
1205 }
1206
1207 /* Get pending signal of THREAD, for detaching purposes. This is the
1208 signal the thread last stopped for, which we need to deliver to the
1209 thread when detaching, otherwise, it'd be suppressed/lost. */
1210
1211 static int
1212 get_detach_signal (struct thread_info *thread)
1213 {
1214 enum gdb_signal signo = GDB_SIGNAL_0;
1215 int status;
1216 struct lwp_info *lp = get_thread_lwp (thread);
1217
1218 if (lp->status_pending_p)
1219 status = lp->status_pending;
1220 else
1221 {
1222 /* If the thread had been suspended by gdbserver, and it stopped
1223 cleanly, then it'll have stopped with SIGSTOP. But we don't
1224 want to deliver that SIGSTOP. */
1225 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1226 || thread->last_status.value.sig == GDB_SIGNAL_0)
1227 return 0;
1228
1229 /* Otherwise, we may need to deliver the signal we
1230 intercepted. */
1231 status = lp->last_status;
1232 }
1233
1234 if (!WIFSTOPPED (status))
1235 {
1236 if (debug_threads)
1237 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1238 target_pid_to_str (ptid_of (thread)));
1239 return 0;
1240 }
1241
1242 /* Extended wait statuses aren't real SIGTRAPs. */
1243 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1244 {
1245 if (debug_threads)
1246 debug_printf ("GPS: lwp %s had stopped with extended "
1247 "status: no pending signal\n",
1248 target_pid_to_str (ptid_of (thread)));
1249 return 0;
1250 }
1251
1252 signo = gdb_signal_from_host (WSTOPSIG (status));
1253
1254 if (program_signals_p && !program_signals[signo])
1255 {
1256 if (debug_threads)
1257 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1258 target_pid_to_str (ptid_of (thread)),
1259 gdb_signal_to_string (signo));
1260 return 0;
1261 }
1262 else if (!program_signals_p
1263 /* If we have no way to know which signals GDB does not
1264 want to have passed to the program, assume
1265 SIGTRAP/SIGINT, which is GDB's default. */
1266 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1267 {
1268 if (debug_threads)
1269 debug_printf ("GPS: lwp %s had signal %s, "
1270 "but we don't know if we should pass it. "
1271 "Default to not.\n",
1272 target_pid_to_str (ptid_of (thread)),
1273 gdb_signal_to_string (signo));
1274 return 0;
1275 }
1276 else
1277 {
1278 if (debug_threads)
1279 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1280 target_pid_to_str (ptid_of (thread)),
1281 gdb_signal_to_string (signo));
1282
1283 return WSTOPSIG (status);
1284 }
1285 }
1286
1287 static int
1288 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1289 {
1290 struct thread_info *thread = (struct thread_info *) entry;
1291 struct lwp_info *lwp = get_thread_lwp (thread);
1292 int pid = * (int *) args;
1293 int sig;
1294
1295 if (ptid_get_pid (entry->id) != pid)
1296 return 0;
1297
1298 /* If there is a pending SIGSTOP, get rid of it. */
1299 if (lwp->stop_expected)
1300 {
1301 if (debug_threads)
1302 debug_printf ("Sending SIGCONT to %s\n",
1303 target_pid_to_str (ptid_of (thread)));
1304
1305 kill_lwp (lwpid_of (thread), SIGCONT);
1306 lwp->stop_expected = 0;
1307 }
1308
1309 /* Flush any pending changes to the process's registers. */
1310 regcache_invalidate_thread (thread);
1311
1312 /* Pass on any pending signal for this thread. */
1313 sig = get_detach_signal (thread);
1314
1315 /* Finally, let it resume. */
1316 if (the_low_target.prepare_to_resume != NULL)
1317 the_low_target.prepare_to_resume (lwp);
1318 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1319 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1320 error (_("Can't detach %s: %s"),
1321 target_pid_to_str (ptid_of (thread)),
1322 strerror (errno));
1323
1324 delete_lwp (lwp);
1325 return 0;
1326 }
1327
1328 static int
1329 linux_detach (int pid)
1330 {
1331 struct process_info *process;
1332
1333 process = find_process_pid (pid);
1334 if (process == NULL)
1335 return -1;
1336
1337 /* Stop all threads before detaching. First, ptrace requires that
1338 the thread is stopped to sucessfully detach. Second, thread_db
1339 may need to uninstall thread event breakpoints from memory, which
1340 only works with a stopped process anyway. */
1341 stop_all_lwps (0, NULL);
1342
1343 #ifdef USE_THREAD_DB
1344 thread_db_detach (process);
1345 #endif
1346
1347 /* Stabilize threads (move out of jump pads). */
1348 stabilize_threads ();
1349
1350 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1351
1352 the_target->mourn (process);
1353
1354 /* Since we presently can only stop all lwps of all processes, we
1355 need to unstop lwps of other processes. */
1356 unstop_all_lwps (0, NULL);
1357 return 0;
1358 }
1359
1360 /* Remove all LWPs that belong to process PROC from the lwp list. */
1361
1362 static int
1363 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1364 {
1365 struct thread_info *thread = (struct thread_info *) entry;
1366 struct lwp_info *lwp = get_thread_lwp (thread);
1367 struct process_info *process = proc;
1368
1369 if (pid_of (thread) == pid_of (process))
1370 delete_lwp (lwp);
1371
1372 return 0;
1373 }
1374
1375 static void
1376 linux_mourn (struct process_info *process)
1377 {
1378 struct process_info_private *priv;
1379
1380 #ifdef USE_THREAD_DB
1381 thread_db_mourn (process);
1382 #endif
1383
1384 find_inferior (&all_threads, delete_lwp_callback, process);
1385
1386 /* Freeing all private data. */
1387 priv = process->priv;
1388 free (priv->arch_private);
1389 free (priv);
1390 process->priv = NULL;
1391
1392 remove_process (process);
1393 }
1394
1395 static void
1396 linux_join (int pid)
1397 {
1398 int status, ret;
1399
1400 do {
1401 ret = my_waitpid (pid, &status, 0);
1402 if (WIFEXITED (status) || WIFSIGNALED (status))
1403 break;
1404 } while (ret != -1 || errno != ECHILD);
1405 }
1406
1407 /* Return nonzero if the given thread is still alive. */
1408 static int
1409 linux_thread_alive (ptid_t ptid)
1410 {
1411 struct lwp_info *lwp = find_lwp_pid (ptid);
1412
1413 /* We assume we always know if a thread exits. If a whole process
1414 exited but we still haven't been able to report it to GDB, we'll
1415 hold on to the last lwp of the dead process. */
1416 if (lwp != NULL)
1417 return !lwp->dead;
1418 else
1419 return 0;
1420 }
1421
1422 /* Return 1 if this lwp still has an interesting status pending. If
1423 not (e.g., it had stopped for a breakpoint that is gone), return
1424 false. */
1425
1426 static int
1427 thread_still_has_status_pending_p (struct thread_info *thread)
1428 {
1429 struct lwp_info *lp = get_thread_lwp (thread);
1430
1431 if (!lp->status_pending_p)
1432 return 0;
1433
1434 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1435 report any status pending the LWP may have. */
1436 if (thread->last_resume_kind == resume_stop
1437 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1438 return 0;
1439
1440 if (thread->last_resume_kind != resume_stop
1441 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1442 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1443 {
1444 struct thread_info *saved_thread;
1445 CORE_ADDR pc;
1446 int discard = 0;
1447
1448 gdb_assert (lp->last_status != 0);
1449
1450 pc = get_pc (lp);
1451
1452 saved_thread = current_thread;
1453 current_thread = thread;
1454
1455 if (pc != lp->stop_pc)
1456 {
1457 if (debug_threads)
1458 debug_printf ("PC of %ld changed\n",
1459 lwpid_of (thread));
1460 discard = 1;
1461 }
1462
1463 #if !USE_SIGTRAP_SIGINFO
1464 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1465 && !(*the_low_target.breakpoint_at) (pc))
1466 {
1467 if (debug_threads)
1468 debug_printf ("previous SW breakpoint of %ld gone\n",
1469 lwpid_of (thread));
1470 discard = 1;
1471 }
1472 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1473 && !hardware_breakpoint_inserted_here (pc))
1474 {
1475 if (debug_threads)
1476 debug_printf ("previous HW breakpoint of %ld gone\n",
1477 lwpid_of (thread));
1478 discard = 1;
1479 }
1480 #endif
1481
1482 current_thread = saved_thread;
1483
1484 if (discard)
1485 {
1486 if (debug_threads)
1487 debug_printf ("discarding pending breakpoint status\n");
1488 lp->status_pending_p = 0;
1489 return 0;
1490 }
1491 }
1492
1493 return 1;
1494 }
1495
1496 /* Return 1 if this lwp has an interesting status pending. */
1497 static int
1498 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1499 {
1500 struct thread_info *thread = (struct thread_info *) entry;
1501 struct lwp_info *lp = get_thread_lwp (thread);
1502 ptid_t ptid = * (ptid_t *) arg;
1503
1504 /* Check if we're only interested in events from a specific process
1505 or a specific LWP. */
1506 if (!ptid_match (ptid_of (thread), ptid))
1507 return 0;
1508
1509 if (lp->status_pending_p
1510 && !thread_still_has_status_pending_p (thread))
1511 {
1512 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1513 return 0;
1514 }
1515
1516 return lp->status_pending_p;
1517 }
1518
1519 static int
1520 same_lwp (struct inferior_list_entry *entry, void *data)
1521 {
1522 ptid_t ptid = *(ptid_t *) data;
1523 int lwp;
1524
1525 if (ptid_get_lwp (ptid) != 0)
1526 lwp = ptid_get_lwp (ptid);
1527 else
1528 lwp = ptid_get_pid (ptid);
1529
1530 if (ptid_get_lwp (entry->id) == lwp)
1531 return 1;
1532
1533 return 0;
1534 }
1535
1536 struct lwp_info *
1537 find_lwp_pid (ptid_t ptid)
1538 {
1539 struct inferior_list_entry *thread
1540 = find_inferior (&all_threads, same_lwp, &ptid);
1541
1542 if (thread == NULL)
1543 return NULL;
1544
1545 return get_thread_lwp ((struct thread_info *) thread);
1546 }
1547
1548 /* Return the number of known LWPs in the tgid given by PID. */
1549
1550 static int
1551 num_lwps (int pid)
1552 {
1553 struct inferior_list_entry *inf, *tmp;
1554 int count = 0;
1555
1556 ALL_INFERIORS (&all_threads, inf, tmp)
1557 {
1558 if (ptid_get_pid (inf->id) == pid)
1559 count++;
1560 }
1561
1562 return count;
1563 }
1564
1565 /* The arguments passed to iterate_over_lwps. */
1566
1567 struct iterate_over_lwps_args
1568 {
1569 /* The FILTER argument passed to iterate_over_lwps. */
1570 ptid_t filter;
1571
1572 /* The CALLBACK argument passed to iterate_over_lwps. */
1573 iterate_over_lwps_ftype *callback;
1574
1575 /* The DATA argument passed to iterate_over_lwps. */
1576 void *data;
1577 };
1578
1579 /* Callback for find_inferior used by iterate_over_lwps to filter
1580 calls to the callback supplied to that function. Returning a
1581 nonzero value causes find_inferiors to stop iterating and return
1582 the current inferior_list_entry. Returning zero indicates that
1583 find_inferiors should continue iterating. */
1584
1585 static int
1586 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1587 {
1588 struct iterate_over_lwps_args *args
1589 = (struct iterate_over_lwps_args *) args_p;
1590
1591 if (ptid_match (entry->id, args->filter))
1592 {
1593 struct thread_info *thr = (struct thread_info *) entry;
1594 struct lwp_info *lwp = get_thread_lwp (thr);
1595
1596 return (*args->callback) (lwp, args->data);
1597 }
1598
1599 return 0;
1600 }
1601
1602 /* See nat/linux-nat.h. */
1603
1604 struct lwp_info *
1605 iterate_over_lwps (ptid_t filter,
1606 iterate_over_lwps_ftype callback,
1607 void *data)
1608 {
1609 struct iterate_over_lwps_args args = {filter, callback, data};
1610 struct inferior_list_entry *entry;
1611
1612 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1613 if (entry == NULL)
1614 return NULL;
1615
1616 return get_thread_lwp ((struct thread_info *) entry);
1617 }
1618
1619 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1620 their exits until all other threads in the group have exited. */
1621
1622 static void
1623 check_zombie_leaders (void)
1624 {
1625 struct process_info *proc, *tmp;
1626
1627 ALL_PROCESSES (proc, tmp)
1628 {
1629 pid_t leader_pid = pid_of (proc);
1630 struct lwp_info *leader_lp;
1631
1632 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1633
1634 if (debug_threads)
1635 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1636 "num_lwps=%d, zombie=%d\n",
1637 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1638 linux_proc_pid_is_zombie (leader_pid));
1639
1640 if (leader_lp != NULL
1641 /* Check if there are other threads in the group, as we may
1642 have raced with the inferior simply exiting. */
1643 && !last_thread_of_process_p (leader_pid)
1644 && linux_proc_pid_is_zombie (leader_pid))
1645 {
1646 /* A leader zombie can mean one of two things:
1647
1648 - It exited, and there's an exit status pending
1649 available, or only the leader exited (not the whole
1650 program). In the latter case, we can't waitpid the
1651 leader's exit status until all other threads are gone.
1652
1653 - There are 3 or more threads in the group, and a thread
1654 other than the leader exec'd. On an exec, the Linux
1655 kernel destroys all other threads (except the execing
1656 one) in the thread group, and resets the execing thread's
1657 tid to the tgid. No exit notification is sent for the
1658 execing thread -- from the ptracer's perspective, it
1659 appears as though the execing thread just vanishes.
1660 Until we reap all other threads except the leader and the
1661 execing thread, the leader will be zombie, and the
1662 execing thread will be in `D (disc sleep)'. As soon as
1663 all other threads are reaped, the execing thread changes
1664 it's tid to the tgid, and the previous (zombie) leader
1665 vanishes, giving place to the "new" leader. We could try
1666 distinguishing the exit and exec cases, by waiting once
1667 more, and seeing if something comes out, but it doesn't
1668 sound useful. The previous leader _does_ go away, and
1669 we'll re-add the new one once we see the exec event
1670 (which is just the same as what would happen if the
1671 previous leader did exit voluntarily before some other
1672 thread execs). */
1673
1674 if (debug_threads)
1675 fprintf (stderr,
1676 "CZL: Thread group leader %d zombie "
1677 "(it exited, or another thread execd).\n",
1678 leader_pid);
1679
1680 delete_lwp (leader_lp);
1681 }
1682 }
1683 }
1684
1685 /* Callback for `find_inferior'. Returns the first LWP that is not
1686 stopped. ARG is a PTID filter. */
1687
1688 static int
1689 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1690 {
1691 struct thread_info *thr = (struct thread_info *) entry;
1692 struct lwp_info *lwp;
1693 ptid_t filter = *(ptid_t *) arg;
1694
1695 if (!ptid_match (ptid_of (thr), filter))
1696 return 0;
1697
1698 lwp = get_thread_lwp (thr);
1699 if (!lwp->stopped)
1700 return 1;
1701
1702 return 0;
1703 }
1704
1705 /* This function should only be called if the LWP got a SIGTRAP.
1706
1707 Handle any tracepoint steps or hits. Return true if a tracepoint
1708 event was handled, 0 otherwise. */
1709
1710 static int
1711 handle_tracepoints (struct lwp_info *lwp)
1712 {
1713 struct thread_info *tinfo = get_lwp_thread (lwp);
1714 int tpoint_related_event = 0;
1715
1716 gdb_assert (lwp->suspended == 0);
1717
1718 /* If this tracepoint hit causes a tracing stop, we'll immediately
1719 uninsert tracepoints. To do this, we temporarily pause all
1720 threads, unpatch away, and then unpause threads. We need to make
1721 sure the unpausing doesn't resume LWP too. */
1722 lwp->suspended++;
1723
1724 /* And we need to be sure that any all-threads-stopping doesn't try
1725 to move threads out of the jump pads, as it could deadlock the
1726 inferior (LWP could be in the jump pad, maybe even holding the
1727 lock.) */
1728
1729 /* Do any necessary step collect actions. */
1730 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1731
1732 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1733
1734 /* See if we just hit a tracepoint and do its main collect
1735 actions. */
1736 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1737
1738 lwp->suspended--;
1739
1740 gdb_assert (lwp->suspended == 0);
1741 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1742
1743 if (tpoint_related_event)
1744 {
1745 if (debug_threads)
1746 debug_printf ("got a tracepoint event\n");
1747 return 1;
1748 }
1749
1750 return 0;
1751 }
1752
1753 /* Convenience wrapper. Returns true if LWP is presently collecting a
1754 fast tracepoint. */
1755
1756 static int
1757 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1758 struct fast_tpoint_collect_status *status)
1759 {
1760 CORE_ADDR thread_area;
1761 struct thread_info *thread = get_lwp_thread (lwp);
1762
1763 if (the_low_target.get_thread_area == NULL)
1764 return 0;
1765
1766 /* Get the thread area address. This is used to recognize which
1767 thread is which when tracing with the in-process agent library.
1768 We don't read anything from the address, and treat it as opaque;
1769 it's the address itself that we assume is unique per-thread. */
1770 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1771 return 0;
1772
1773 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1774 }
1775
1776 /* The reason we resume in the caller, is because we want to be able
1777 to pass lwp->status_pending as WSTAT, and we need to clear
1778 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1779 refuses to resume. */
1780
1781 static int
1782 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1783 {
1784 struct thread_info *saved_thread;
1785
1786 saved_thread = current_thread;
1787 current_thread = get_lwp_thread (lwp);
1788
1789 if ((wstat == NULL
1790 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1791 && supports_fast_tracepoints ()
1792 && agent_loaded_p ())
1793 {
1794 struct fast_tpoint_collect_status status;
1795 int r;
1796
1797 if (debug_threads)
1798 debug_printf ("Checking whether LWP %ld needs to move out of the "
1799 "jump pad.\n",
1800 lwpid_of (current_thread));
1801
1802 r = linux_fast_tracepoint_collecting (lwp, &status);
1803
1804 if (wstat == NULL
1805 || (WSTOPSIG (*wstat) != SIGILL
1806 && WSTOPSIG (*wstat) != SIGFPE
1807 && WSTOPSIG (*wstat) != SIGSEGV
1808 && WSTOPSIG (*wstat) != SIGBUS))
1809 {
1810 lwp->collecting_fast_tracepoint = r;
1811
1812 if (r != 0)
1813 {
1814 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1815 {
1816 /* Haven't executed the original instruction yet.
1817 Set breakpoint there, and wait till it's hit,
1818 then single-step until exiting the jump pad. */
1819 lwp->exit_jump_pad_bkpt
1820 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1821 }
1822
1823 if (debug_threads)
1824 debug_printf ("Checking whether LWP %ld needs to move out of "
1825 "the jump pad...it does\n",
1826 lwpid_of (current_thread));
1827 current_thread = saved_thread;
1828
1829 return 1;
1830 }
1831 }
1832 else
1833 {
1834 /* If we get a synchronous signal while collecting, *and*
1835 while executing the (relocated) original instruction,
1836 reset the PC to point at the tpoint address, before
1837 reporting to GDB. Otherwise, it's an IPA lib bug: just
1838 report the signal to GDB, and pray for the best. */
1839
1840 lwp->collecting_fast_tracepoint = 0;
1841
1842 if (r != 0
1843 && (status.adjusted_insn_addr <= lwp->stop_pc
1844 && lwp->stop_pc < status.adjusted_insn_addr_end))
1845 {
1846 siginfo_t info;
1847 struct regcache *regcache;
1848
1849 /* The si_addr on a few signals references the address
1850 of the faulting instruction. Adjust that as
1851 well. */
1852 if ((WSTOPSIG (*wstat) == SIGILL
1853 || WSTOPSIG (*wstat) == SIGFPE
1854 || WSTOPSIG (*wstat) == SIGBUS
1855 || WSTOPSIG (*wstat) == SIGSEGV)
1856 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1857 (PTRACE_TYPE_ARG3) 0, &info) == 0
1858 /* Final check just to make sure we don't clobber
1859 the siginfo of non-kernel-sent signals. */
1860 && (uintptr_t) info.si_addr == lwp->stop_pc)
1861 {
1862 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1863 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1864 (PTRACE_TYPE_ARG3) 0, &info);
1865 }
1866
1867 regcache = get_thread_regcache (current_thread, 1);
1868 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1869 lwp->stop_pc = status.tpoint_addr;
1870
1871 /* Cancel any fast tracepoint lock this thread was
1872 holding. */
1873 force_unlock_trace_buffer ();
1874 }
1875
1876 if (lwp->exit_jump_pad_bkpt != NULL)
1877 {
1878 if (debug_threads)
1879 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1880 "stopping all threads momentarily.\n");
1881
1882 stop_all_lwps (1, lwp);
1883
1884 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1885 lwp->exit_jump_pad_bkpt = NULL;
1886
1887 unstop_all_lwps (1, lwp);
1888
1889 gdb_assert (lwp->suspended >= 0);
1890 }
1891 }
1892 }
1893
1894 if (debug_threads)
1895 debug_printf ("Checking whether LWP %ld needs to move out of the "
1896 "jump pad...no\n",
1897 lwpid_of (current_thread));
1898
1899 current_thread = saved_thread;
1900 return 0;
1901 }
1902
1903 /* Enqueue one signal in the "signals to report later when out of the
1904 jump pad" list. */
1905
1906 static void
1907 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1908 {
1909 struct pending_signals *p_sig;
1910 struct thread_info *thread = get_lwp_thread (lwp);
1911
1912 if (debug_threads)
1913 debug_printf ("Deferring signal %d for LWP %ld.\n",
1914 WSTOPSIG (*wstat), lwpid_of (thread));
1915
1916 if (debug_threads)
1917 {
1918 struct pending_signals *sig;
1919
1920 for (sig = lwp->pending_signals_to_report;
1921 sig != NULL;
1922 sig = sig->prev)
1923 debug_printf (" Already queued %d\n",
1924 sig->signal);
1925
1926 debug_printf (" (no more currently queued signals)\n");
1927 }
1928
1929 /* Don't enqueue non-RT signals if they are already in the deferred
1930 queue. (SIGSTOP being the easiest signal to see ending up here
1931 twice) */
1932 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1933 {
1934 struct pending_signals *sig;
1935
1936 for (sig = lwp->pending_signals_to_report;
1937 sig != NULL;
1938 sig = sig->prev)
1939 {
1940 if (sig->signal == WSTOPSIG (*wstat))
1941 {
1942 if (debug_threads)
1943 debug_printf ("Not requeuing already queued non-RT signal %d"
1944 " for LWP %ld\n",
1945 sig->signal,
1946 lwpid_of (thread));
1947 return;
1948 }
1949 }
1950 }
1951
1952 p_sig = xmalloc (sizeof (*p_sig));
1953 p_sig->prev = lwp->pending_signals_to_report;
1954 p_sig->signal = WSTOPSIG (*wstat);
1955 memset (&p_sig->info, 0, sizeof (siginfo_t));
1956 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1957 &p_sig->info);
1958
1959 lwp->pending_signals_to_report = p_sig;
1960 }
1961
1962 /* Dequeue one signal from the "signals to report later when out of
1963 the jump pad" list. */
1964
1965 static int
1966 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1967 {
1968 struct thread_info *thread = get_lwp_thread (lwp);
1969
1970 if (lwp->pending_signals_to_report != NULL)
1971 {
1972 struct pending_signals **p_sig;
1973
1974 p_sig = &lwp->pending_signals_to_report;
1975 while ((*p_sig)->prev != NULL)
1976 p_sig = &(*p_sig)->prev;
1977
1978 *wstat = W_STOPCODE ((*p_sig)->signal);
1979 if ((*p_sig)->info.si_signo != 0)
1980 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1981 &(*p_sig)->info);
1982 free (*p_sig);
1983 *p_sig = NULL;
1984
1985 if (debug_threads)
1986 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1987 WSTOPSIG (*wstat), lwpid_of (thread));
1988
1989 if (debug_threads)
1990 {
1991 struct pending_signals *sig;
1992
1993 for (sig = lwp->pending_signals_to_report;
1994 sig != NULL;
1995 sig = sig->prev)
1996 debug_printf (" Still queued %d\n",
1997 sig->signal);
1998
1999 debug_printf (" (no more queued signals)\n");
2000 }
2001
2002 return 1;
2003 }
2004
2005 return 0;
2006 }
2007
2008 /* Fetch the possibly triggered data watchpoint info and store it in
2009 CHILD.
2010
2011 On some archs, like x86, that use debug registers to set
2012 watchpoints, it's possible that the way to know which watched
2013 address trapped, is to check the register that is used to select
2014 which address to watch. Problem is, between setting the watchpoint
2015 and reading back which data address trapped, the user may change
2016 the set of watchpoints, and, as a consequence, GDB changes the
2017 debug registers in the inferior. To avoid reading back a stale
2018 stopped-data-address when that happens, we cache in LP the fact
2019 that a watchpoint trapped, and the corresponding data address, as
2020 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2021 registers meanwhile, we have the cached data we can rely on. */
2022
2023 static int
2024 check_stopped_by_watchpoint (struct lwp_info *child)
2025 {
2026 if (the_low_target.stopped_by_watchpoint != NULL)
2027 {
2028 struct thread_info *saved_thread;
2029
2030 saved_thread = current_thread;
2031 current_thread = get_lwp_thread (child);
2032
2033 if (the_low_target.stopped_by_watchpoint ())
2034 {
2035 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2036
2037 if (the_low_target.stopped_data_address != NULL)
2038 child->stopped_data_address
2039 = the_low_target.stopped_data_address ();
2040 else
2041 child->stopped_data_address = 0;
2042 }
2043
2044 current_thread = saved_thread;
2045 }
2046
2047 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2048 }
2049
2050 /* Return the ptrace options that we want to try to enable. */
2051
2052 static int
2053 linux_low_ptrace_options (int attached)
2054 {
2055 int options = 0;
2056
2057 if (!attached)
2058 options |= PTRACE_O_EXITKILL;
2059
2060 if (report_fork_events)
2061 options |= PTRACE_O_TRACEFORK;
2062
2063 if (report_vfork_events)
2064 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2065
2066 return options;
2067 }
2068
2069 /* Do low-level handling of the event, and check if we should go on
2070 and pass it to caller code. Return the affected lwp if we are, or
2071 NULL otherwise. */
2072
2073 static struct lwp_info *
2074 linux_low_filter_event (int lwpid, int wstat)
2075 {
2076 struct lwp_info *child;
2077 struct thread_info *thread;
2078 int have_stop_pc = 0;
2079
2080 child = find_lwp_pid (pid_to_ptid (lwpid));
2081
2082 /* If we didn't find a process, one of two things presumably happened:
2083 - A process we started and then detached from has exited. Ignore it.
2084 - A process we are controlling has forked and the new child's stop
2085 was reported to us by the kernel. Save its PID. */
2086 if (child == NULL && WIFSTOPPED (wstat))
2087 {
2088 add_to_pid_list (&stopped_pids, lwpid, wstat);
2089 return NULL;
2090 }
2091 else if (child == NULL)
2092 return NULL;
2093
2094 thread = get_lwp_thread (child);
2095
2096 child->stopped = 1;
2097
2098 child->last_status = wstat;
2099
2100 /* Check if the thread has exited. */
2101 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2102 {
2103 if (debug_threads)
2104 debug_printf ("LLFE: %d exited.\n", lwpid);
2105 if (num_lwps (pid_of (thread)) > 1)
2106 {
2107
2108 /* If there is at least one more LWP, then the exit signal was
2109 not the end of the debugged application and should be
2110 ignored. */
2111 delete_lwp (child);
2112 return NULL;
2113 }
2114 else
2115 {
2116 /* This was the last lwp in the process. Since events are
2117 serialized to GDB core, and we can't report this one
2118 right now, but GDB core and the other target layers will
2119 want to be notified about the exit code/signal, leave the
2120 status pending for the next time we're able to report
2121 it. */
2122 mark_lwp_dead (child, wstat);
2123 return child;
2124 }
2125 }
2126
2127 gdb_assert (WIFSTOPPED (wstat));
2128
2129 if (WIFSTOPPED (wstat))
2130 {
2131 struct process_info *proc;
2132
2133 /* Architecture-specific setup after inferior is running. This
2134 needs to happen after we have attached to the inferior and it
2135 is stopped for the first time, but before we access any
2136 inferior registers. */
2137 proc = find_process_pid (pid_of (thread));
2138 if (proc->priv->new_inferior)
2139 {
2140 struct thread_info *saved_thread;
2141
2142 saved_thread = current_thread;
2143 current_thread = thread;
2144
2145 the_low_target.arch_setup ();
2146
2147 current_thread = saved_thread;
2148
2149 proc->priv->new_inferior = 0;
2150 }
2151 }
2152
2153 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2154 {
2155 struct process_info *proc = find_process_pid (pid_of (thread));
2156 int options = linux_low_ptrace_options (proc->attached);
2157
2158 linux_enable_event_reporting (lwpid, options);
2159 child->must_set_ptrace_flags = 0;
2160 }
2161
2162 /* Be careful to not overwrite stop_pc until
2163 check_stopped_by_breakpoint is called. */
2164 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2165 && linux_is_extended_waitstatus (wstat))
2166 {
2167 child->stop_pc = get_pc (child);
2168 if (handle_extended_wait (child, wstat))
2169 {
2170 /* The event has been handled, so just return without
2171 reporting it. */
2172 return NULL;
2173 }
2174 }
2175
2176 /* Check first whether this was a SW/HW breakpoint before checking
2177 watchpoints, because at least s390 can't tell the data address of
2178 hardware watchpoint hits, and returns stopped-by-watchpoint as
2179 long as there's a watchpoint set. */
2180 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2181 {
2182 if (check_stopped_by_breakpoint (child))
2183 have_stop_pc = 1;
2184 }
2185
2186 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2187 or hardware watchpoint. Check which is which if we got
2188 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2189 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2190 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
2191 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
2192 check_stopped_by_watchpoint (child);
2193
2194 if (!have_stop_pc)
2195 child->stop_pc = get_pc (child);
2196
2197 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2198 && child->stop_expected)
2199 {
2200 if (debug_threads)
2201 debug_printf ("Expected stop.\n");
2202 child->stop_expected = 0;
2203
2204 if (thread->last_resume_kind == resume_stop)
2205 {
2206 /* We want to report the stop to the core. Treat the
2207 SIGSTOP as a normal event. */
2208 if (debug_threads)
2209 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2210 target_pid_to_str (ptid_of (thread)));
2211 }
2212 else if (stopping_threads != NOT_STOPPING_THREADS)
2213 {
2214 /* Stopping threads. We don't want this SIGSTOP to end up
2215 pending. */
2216 if (debug_threads)
2217 debug_printf ("LLW: SIGSTOP caught for %s "
2218 "while stopping threads.\n",
2219 target_pid_to_str (ptid_of (thread)));
2220 return NULL;
2221 }
2222 else
2223 {
2224 /* This is a delayed SIGSTOP. Filter out the event. */
2225 if (debug_threads)
2226 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2227 child->stepping ? "step" : "continue",
2228 target_pid_to_str (ptid_of (thread)));
2229
2230 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2231 return NULL;
2232 }
2233 }
2234
2235 child->status_pending_p = 1;
2236 child->status_pending = wstat;
2237 return child;
2238 }
2239
2240 /* Resume LWPs that are currently stopped without any pending status
2241 to report, but are resumed from the core's perspective. */
2242
2243 static void
2244 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2245 {
2246 struct thread_info *thread = (struct thread_info *) entry;
2247 struct lwp_info *lp = get_thread_lwp (thread);
2248
2249 if (lp->stopped
2250 && !lp->status_pending_p
2251 && thread->last_resume_kind != resume_stop
2252 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2253 {
2254 int step = thread->last_resume_kind == resume_step;
2255
2256 if (debug_threads)
2257 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2258 target_pid_to_str (ptid_of (thread)),
2259 paddress (lp->stop_pc),
2260 step);
2261
2262 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2263 }
2264 }
2265
2266 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2267 match FILTER_PTID (leaving others pending). The PTIDs can be:
2268 minus_one_ptid, to specify any child; a pid PTID, specifying all
2269 lwps of a thread group; or a PTID representing a single lwp. Store
2270 the stop status through the status pointer WSTAT. OPTIONS is
2271 passed to the waitpid call. Return 0 if no event was found and
2272 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2273 was found. Return the PID of the stopped child otherwise. */
2274
2275 static int
2276 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2277 int *wstatp, int options)
2278 {
2279 struct thread_info *event_thread;
2280 struct lwp_info *event_child, *requested_child;
2281 sigset_t block_mask, prev_mask;
2282
2283 retry:
2284 /* N.B. event_thread points to the thread_info struct that contains
2285 event_child. Keep them in sync. */
2286 event_thread = NULL;
2287 event_child = NULL;
2288 requested_child = NULL;
2289
2290 /* Check for a lwp with a pending status. */
2291
2292 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2293 {
2294 event_thread = (struct thread_info *)
2295 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2296 if (event_thread != NULL)
2297 event_child = get_thread_lwp (event_thread);
2298 if (debug_threads && event_thread)
2299 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2300 }
2301 else if (!ptid_equal (filter_ptid, null_ptid))
2302 {
2303 requested_child = find_lwp_pid (filter_ptid);
2304
2305 if (stopping_threads == NOT_STOPPING_THREADS
2306 && requested_child->status_pending_p
2307 && requested_child->collecting_fast_tracepoint)
2308 {
2309 enqueue_one_deferred_signal (requested_child,
2310 &requested_child->status_pending);
2311 requested_child->status_pending_p = 0;
2312 requested_child->status_pending = 0;
2313 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2314 }
2315
2316 if (requested_child->suspended
2317 && requested_child->status_pending_p)
2318 {
2319 internal_error (__FILE__, __LINE__,
2320 "requesting an event out of a"
2321 " suspended child?");
2322 }
2323
2324 if (requested_child->status_pending_p)
2325 {
2326 event_child = requested_child;
2327 event_thread = get_lwp_thread (event_child);
2328 }
2329 }
2330
2331 if (event_child != NULL)
2332 {
2333 if (debug_threads)
2334 debug_printf ("Got an event from pending child %ld (%04x)\n",
2335 lwpid_of (event_thread), event_child->status_pending);
2336 *wstatp = event_child->status_pending;
2337 event_child->status_pending_p = 0;
2338 event_child->status_pending = 0;
2339 current_thread = event_thread;
2340 return lwpid_of (event_thread);
2341 }
2342
2343 /* But if we don't find a pending event, we'll have to wait.
2344
2345 We only enter this loop if no process has a pending wait status.
2346 Thus any action taken in response to a wait status inside this
2347 loop is responding as soon as we detect the status, not after any
2348 pending events. */
2349
2350 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2351 all signals while here. */
2352 sigfillset (&block_mask);
2353 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2354
2355 /* Always pull all events out of the kernel. We'll randomly select
2356 an event LWP out of all that have events, to prevent
2357 starvation. */
2358 while (event_child == NULL)
2359 {
2360 pid_t ret = 0;
2361
2362 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2363 quirks:
2364
2365 - If the thread group leader exits while other threads in the
2366 thread group still exist, waitpid(TGID, ...) hangs. That
2367 waitpid won't return an exit status until the other threads
2368 in the group are reaped.
2369
2370 - When a non-leader thread execs, that thread just vanishes
2371 without reporting an exit (so we'd hang if we waited for it
2372 explicitly in that case). The exec event is reported to
2373 the TGID pid (although we don't currently enable exec
2374 events). */
2375 errno = 0;
2376 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2377
2378 if (debug_threads)
2379 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2380 ret, errno ? strerror (errno) : "ERRNO-OK");
2381
2382 if (ret > 0)
2383 {
2384 if (debug_threads)
2385 {
2386 debug_printf ("LLW: waitpid %ld received %s\n",
2387 (long) ret, status_to_str (*wstatp));
2388 }
2389
2390 /* Filter all events. IOW, leave all events pending. We'll
2391 randomly select an event LWP out of all that have events
2392 below. */
2393 linux_low_filter_event (ret, *wstatp);
2394 /* Retry until nothing comes out of waitpid. A single
2395 SIGCHLD can indicate more than one child stopped. */
2396 continue;
2397 }
2398
2399 /* Now that we've pulled all events out of the kernel, resume
2400 LWPs that don't have an interesting event to report. */
2401 if (stopping_threads == NOT_STOPPING_THREADS)
2402 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2403
2404 /* ... and find an LWP with a status to report to the core, if
2405 any. */
2406 event_thread = (struct thread_info *)
2407 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2408 if (event_thread != NULL)
2409 {
2410 event_child = get_thread_lwp (event_thread);
2411 *wstatp = event_child->status_pending;
2412 event_child->status_pending_p = 0;
2413 event_child->status_pending = 0;
2414 break;
2415 }
2416
2417 /* Check for zombie thread group leaders. Those can't be reaped
2418 until all other threads in the thread group are. */
2419 check_zombie_leaders ();
2420
2421 /* If there are no resumed children left in the set of LWPs we
2422 want to wait for, bail. We can't just block in
2423 waitpid/sigsuspend, because lwps might have been left stopped
2424 in trace-stop state, and we'd be stuck forever waiting for
2425 their status to change (which would only happen if we resumed
2426 them). Even if WNOHANG is set, this return code is preferred
2427 over 0 (below), as it is more detailed. */
2428 if ((find_inferior (&all_threads,
2429 not_stopped_callback,
2430 &wait_ptid) == NULL))
2431 {
2432 if (debug_threads)
2433 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2434 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2435 return -1;
2436 }
2437
2438 /* No interesting event to report to the caller. */
2439 if ((options & WNOHANG))
2440 {
2441 if (debug_threads)
2442 debug_printf ("WNOHANG set, no event found\n");
2443
2444 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2445 return 0;
2446 }
2447
2448 /* Block until we get an event reported with SIGCHLD. */
2449 if (debug_threads)
2450 debug_printf ("sigsuspend'ing\n");
2451
2452 sigsuspend (&prev_mask);
2453 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2454 goto retry;
2455 }
2456
2457 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2458
2459 current_thread = event_thread;
2460
2461 /* Check for thread exit. */
2462 if (! WIFSTOPPED (*wstatp))
2463 {
2464 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2465
2466 if (debug_threads)
2467 debug_printf ("LWP %d is the last lwp of process. "
2468 "Process %ld exiting.\n",
2469 pid_of (event_thread), lwpid_of (event_thread));
2470 return lwpid_of (event_thread);
2471 }
2472
2473 return lwpid_of (event_thread);
2474 }
2475
2476 /* Wait for an event from child(ren) PTID. PTIDs can be:
2477 minus_one_ptid, to specify any child; a pid PTID, specifying all
2478 lwps of a thread group; or a PTID representing a single lwp. Store
2479 the stop status through the status pointer WSTAT. OPTIONS is
2480 passed to the waitpid call. Return 0 if no event was found and
2481 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2482 was found. Return the PID of the stopped child otherwise. */
2483
2484 static int
2485 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2486 {
2487 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2488 }
2489
2490 /* Count the LWP's that have had events. */
2491
2492 static int
2493 count_events_callback (struct inferior_list_entry *entry, void *data)
2494 {
2495 struct thread_info *thread = (struct thread_info *) entry;
2496 struct lwp_info *lp = get_thread_lwp (thread);
2497 int *count = data;
2498
2499 gdb_assert (count != NULL);
2500
2501 /* Count only resumed LWPs that have an event pending. */
2502 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2503 && lp->status_pending_p)
2504 (*count)++;
2505
2506 return 0;
2507 }
2508
2509 /* Select the LWP (if any) that is currently being single-stepped. */
2510
2511 static int
2512 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2513 {
2514 struct thread_info *thread = (struct thread_info *) entry;
2515 struct lwp_info *lp = get_thread_lwp (thread);
2516
2517 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2518 && thread->last_resume_kind == resume_step
2519 && lp->status_pending_p)
2520 return 1;
2521 else
2522 return 0;
2523 }
2524
2525 /* Select the Nth LWP that has had an event. */
2526
2527 static int
2528 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2529 {
2530 struct thread_info *thread = (struct thread_info *) entry;
2531 struct lwp_info *lp = get_thread_lwp (thread);
2532 int *selector = data;
2533
2534 gdb_assert (selector != NULL);
2535
2536 /* Select only resumed LWPs that have an event pending. */
2537 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2538 && lp->status_pending_p)
2539 if ((*selector)-- == 0)
2540 return 1;
2541
2542 return 0;
2543 }
2544
2545 /* Select one LWP out of those that have events pending. */
2546
2547 static void
2548 select_event_lwp (struct lwp_info **orig_lp)
2549 {
2550 int num_events = 0;
2551 int random_selector;
2552 struct thread_info *event_thread = NULL;
2553
2554 /* In all-stop, give preference to the LWP that is being
2555 single-stepped. There will be at most one, and it's the LWP that
2556 the core is most interested in. If we didn't do this, then we'd
2557 have to handle pending step SIGTRAPs somehow in case the core
2558 later continues the previously-stepped thread, otherwise we'd
2559 report the pending SIGTRAP, and the core, not having stepped the
2560 thread, wouldn't understand what the trap was for, and therefore
2561 would report it to the user as a random signal. */
2562 if (!non_stop)
2563 {
2564 event_thread
2565 = (struct thread_info *) find_inferior (&all_threads,
2566 select_singlestep_lwp_callback,
2567 NULL);
2568 if (event_thread != NULL)
2569 {
2570 if (debug_threads)
2571 debug_printf ("SEL: Select single-step %s\n",
2572 target_pid_to_str (ptid_of (event_thread)));
2573 }
2574 }
2575 if (event_thread == NULL)
2576 {
2577 /* No single-stepping LWP. Select one at random, out of those
2578 which have had events. */
2579
2580 /* First see how many events we have. */
2581 find_inferior (&all_threads, count_events_callback, &num_events);
2582 gdb_assert (num_events > 0);
2583
2584 /* Now randomly pick a LWP out of those that have had
2585 events. */
2586 random_selector = (int)
2587 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2588
2589 if (debug_threads && num_events > 1)
2590 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2591 num_events, random_selector);
2592
2593 event_thread
2594 = (struct thread_info *) find_inferior (&all_threads,
2595 select_event_lwp_callback,
2596 &random_selector);
2597 }
2598
2599 if (event_thread != NULL)
2600 {
2601 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2602
2603 /* Switch the event LWP. */
2604 *orig_lp = event_lp;
2605 }
2606 }
2607
2608 /* Decrement the suspend count of an LWP. */
2609
2610 static int
2611 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2612 {
2613 struct thread_info *thread = (struct thread_info *) entry;
2614 struct lwp_info *lwp = get_thread_lwp (thread);
2615
2616 /* Ignore EXCEPT. */
2617 if (lwp == except)
2618 return 0;
2619
2620 lwp->suspended--;
2621
2622 gdb_assert (lwp->suspended >= 0);
2623 return 0;
2624 }
2625
2626 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2627 NULL. */
2628
2629 static void
2630 unsuspend_all_lwps (struct lwp_info *except)
2631 {
2632 find_inferior (&all_threads, unsuspend_one_lwp, except);
2633 }
2634
2635 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2636 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2637 void *data);
2638 static int lwp_running (struct inferior_list_entry *entry, void *data);
2639 static ptid_t linux_wait_1 (ptid_t ptid,
2640 struct target_waitstatus *ourstatus,
2641 int target_options);
2642
2643 /* Stabilize threads (move out of jump pads).
2644
2645 If a thread is midway collecting a fast tracepoint, we need to
2646 finish the collection and move it out of the jump pad before
2647 reporting the signal.
2648
2649 This avoids recursion while collecting (when a signal arrives
2650 midway, and the signal handler itself collects), which would trash
2651 the trace buffer. In case the user set a breakpoint in a signal
2652 handler, this avoids the backtrace showing the jump pad, etc..
2653 Most importantly, there are certain things we can't do safely if
2654 threads are stopped in a jump pad (or in its callee's). For
2655 example:
2656
2657 - starting a new trace run. A thread still collecting the
2658 previous run, could trash the trace buffer when resumed. The trace
2659 buffer control structures would have been reset but the thread had
2660 no way to tell. The thread could even midway memcpy'ing to the
2661 buffer, which would mean that when resumed, it would clobber the
2662 trace buffer that had been set for a new run.
2663
2664 - we can't rewrite/reuse the jump pads for new tracepoints
2665 safely. Say you do tstart while a thread is stopped midway while
2666 collecting. When the thread is later resumed, it finishes the
2667 collection, and returns to the jump pad, to execute the original
2668 instruction that was under the tracepoint jump at the time the
2669 older run had been started. If the jump pad had been rewritten
2670 since for something else in the new run, the thread would now
2671 execute the wrong / random instructions. */
2672
2673 static void
2674 linux_stabilize_threads (void)
2675 {
2676 struct thread_info *saved_thread;
2677 struct thread_info *thread_stuck;
2678
2679 thread_stuck
2680 = (struct thread_info *) find_inferior (&all_threads,
2681 stuck_in_jump_pad_callback,
2682 NULL);
2683 if (thread_stuck != NULL)
2684 {
2685 if (debug_threads)
2686 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2687 lwpid_of (thread_stuck));
2688 return;
2689 }
2690
2691 saved_thread = current_thread;
2692
2693 stabilizing_threads = 1;
2694
2695 /* Kick 'em all. */
2696 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2697
2698 /* Loop until all are stopped out of the jump pads. */
2699 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2700 {
2701 struct target_waitstatus ourstatus;
2702 struct lwp_info *lwp;
2703 int wstat;
2704
2705 /* Note that we go through the full wait even loop. While
2706 moving threads out of jump pad, we need to be able to step
2707 over internal breakpoints and such. */
2708 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2709
2710 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2711 {
2712 lwp = get_thread_lwp (current_thread);
2713
2714 /* Lock it. */
2715 lwp->suspended++;
2716
2717 if (ourstatus.value.sig != GDB_SIGNAL_0
2718 || current_thread->last_resume_kind == resume_stop)
2719 {
2720 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2721 enqueue_one_deferred_signal (lwp, &wstat);
2722 }
2723 }
2724 }
2725
2726 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2727
2728 stabilizing_threads = 0;
2729
2730 current_thread = saved_thread;
2731
2732 if (debug_threads)
2733 {
2734 thread_stuck
2735 = (struct thread_info *) find_inferior (&all_threads,
2736 stuck_in_jump_pad_callback,
2737 NULL);
2738 if (thread_stuck != NULL)
2739 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2740 lwpid_of (thread_stuck));
2741 }
2742 }
2743
2744 static void async_file_mark (void);
2745
2746 /* Convenience function that is called when the kernel reports an
2747 event that is not passed out to GDB. */
2748
2749 static ptid_t
2750 ignore_event (struct target_waitstatus *ourstatus)
2751 {
2752 /* If we got an event, there may still be others, as a single
2753 SIGCHLD can indicate more than one child stopped. This forces
2754 another target_wait call. */
2755 async_file_mark ();
2756
2757 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2758 return null_ptid;
2759 }
2760
2761 /* Return non-zero if WAITSTATUS reflects an extended linux
2762 event. Otherwise, return zero. */
2763
2764 static int
2765 extended_event_reported (const struct target_waitstatus *waitstatus)
2766 {
2767 if (waitstatus == NULL)
2768 return 0;
2769
2770 return (waitstatus->kind == TARGET_WAITKIND_FORKED
2771 || waitstatus->kind == TARGET_WAITKIND_VFORKED
2772 || waitstatus->kind == TARGET_WAITKIND_VFORK_DONE);
2773 }
2774
2775 /* Wait for process, returns status. */
2776
2777 static ptid_t
2778 linux_wait_1 (ptid_t ptid,
2779 struct target_waitstatus *ourstatus, int target_options)
2780 {
2781 int w;
2782 struct lwp_info *event_child;
2783 int options;
2784 int pid;
2785 int step_over_finished;
2786 int bp_explains_trap;
2787 int maybe_internal_trap;
2788 int report_to_gdb;
2789 int trace_event;
2790 int in_step_range;
2791
2792 if (debug_threads)
2793 {
2794 debug_enter ();
2795 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2796 }
2797
2798 /* Translate generic target options into linux options. */
2799 options = __WALL;
2800 if (target_options & TARGET_WNOHANG)
2801 options |= WNOHANG;
2802
2803 bp_explains_trap = 0;
2804 trace_event = 0;
2805 in_step_range = 0;
2806 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2807
2808 if (ptid_equal (step_over_bkpt, null_ptid))
2809 pid = linux_wait_for_event (ptid, &w, options);
2810 else
2811 {
2812 if (debug_threads)
2813 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2814 target_pid_to_str (step_over_bkpt));
2815 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2816 }
2817
2818 if (pid == 0)
2819 {
2820 gdb_assert (target_options & TARGET_WNOHANG);
2821
2822 if (debug_threads)
2823 {
2824 debug_printf ("linux_wait_1 ret = null_ptid, "
2825 "TARGET_WAITKIND_IGNORE\n");
2826 debug_exit ();
2827 }
2828
2829 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2830 return null_ptid;
2831 }
2832 else if (pid == -1)
2833 {
2834 if (debug_threads)
2835 {
2836 debug_printf ("linux_wait_1 ret = null_ptid, "
2837 "TARGET_WAITKIND_NO_RESUMED\n");
2838 debug_exit ();
2839 }
2840
2841 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2842 return null_ptid;
2843 }
2844
2845 event_child = get_thread_lwp (current_thread);
2846
2847 /* linux_wait_for_event only returns an exit status for the last
2848 child of a process. Report it. */
2849 if (WIFEXITED (w) || WIFSIGNALED (w))
2850 {
2851 if (WIFEXITED (w))
2852 {
2853 ourstatus->kind = TARGET_WAITKIND_EXITED;
2854 ourstatus->value.integer = WEXITSTATUS (w);
2855
2856 if (debug_threads)
2857 {
2858 debug_printf ("linux_wait_1 ret = %s, exited with "
2859 "retcode %d\n",
2860 target_pid_to_str (ptid_of (current_thread)),
2861 WEXITSTATUS (w));
2862 debug_exit ();
2863 }
2864 }
2865 else
2866 {
2867 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2868 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2869
2870 if (debug_threads)
2871 {
2872 debug_printf ("linux_wait_1 ret = %s, terminated with "
2873 "signal %d\n",
2874 target_pid_to_str (ptid_of (current_thread)),
2875 WTERMSIG (w));
2876 debug_exit ();
2877 }
2878 }
2879
2880 return ptid_of (current_thread);
2881 }
2882
2883 /* If step-over executes a breakpoint instruction, it means a
2884 gdb/gdbserver breakpoint had been planted on top of a permanent
2885 breakpoint. The PC has been adjusted by
2886 check_stopped_by_breakpoint to point at the breakpoint address.
2887 Advance the PC manually past the breakpoint, otherwise the
2888 program would keep trapping the permanent breakpoint forever. */
2889 if (!ptid_equal (step_over_bkpt, null_ptid)
2890 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2891 {
2892 unsigned int increment_pc = the_low_target.breakpoint_len;
2893
2894 if (debug_threads)
2895 {
2896 debug_printf ("step-over for %s executed software breakpoint\n",
2897 target_pid_to_str (ptid_of (current_thread)));
2898 }
2899
2900 if (increment_pc != 0)
2901 {
2902 struct regcache *regcache
2903 = get_thread_regcache (current_thread, 1);
2904
2905 event_child->stop_pc += increment_pc;
2906 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2907
2908 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2909 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2910 }
2911 }
2912
2913 /* If this event was not handled before, and is not a SIGTRAP, we
2914 report it. SIGILL and SIGSEGV are also treated as traps in case
2915 a breakpoint is inserted at the current PC. If this target does
2916 not support internal breakpoints at all, we also report the
2917 SIGTRAP without further processing; it's of no concern to us. */
2918 maybe_internal_trap
2919 = (supports_breakpoints ()
2920 && (WSTOPSIG (w) == SIGTRAP
2921 || ((WSTOPSIG (w) == SIGILL
2922 || WSTOPSIG (w) == SIGSEGV)
2923 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2924
2925 if (maybe_internal_trap)
2926 {
2927 /* Handle anything that requires bookkeeping before deciding to
2928 report the event or continue waiting. */
2929
2930 /* First check if we can explain the SIGTRAP with an internal
2931 breakpoint, or if we should possibly report the event to GDB.
2932 Do this before anything that may remove or insert a
2933 breakpoint. */
2934 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2935
2936 /* We have a SIGTRAP, possibly a step-over dance has just
2937 finished. If so, tweak the state machine accordingly,
2938 reinsert breakpoints and delete any reinsert (software
2939 single-step) breakpoints. */
2940 step_over_finished = finish_step_over (event_child);
2941
2942 /* Now invoke the callbacks of any internal breakpoints there. */
2943 check_breakpoints (event_child->stop_pc);
2944
2945 /* Handle tracepoint data collecting. This may overflow the
2946 trace buffer, and cause a tracing stop, removing
2947 breakpoints. */
2948 trace_event = handle_tracepoints (event_child);
2949
2950 if (bp_explains_trap)
2951 {
2952 /* If we stepped or ran into an internal breakpoint, we've
2953 already handled it. So next time we resume (from this
2954 PC), we should step over it. */
2955 if (debug_threads)
2956 debug_printf ("Hit a gdbserver breakpoint.\n");
2957
2958 if (breakpoint_here (event_child->stop_pc))
2959 event_child->need_step_over = 1;
2960 }
2961 }
2962 else
2963 {
2964 /* We have some other signal, possibly a step-over dance was in
2965 progress, and it should be cancelled too. */
2966 step_over_finished = finish_step_over (event_child);
2967 }
2968
2969 /* We have all the data we need. Either report the event to GDB, or
2970 resume threads and keep waiting for more. */
2971
2972 /* If we're collecting a fast tracepoint, finish the collection and
2973 move out of the jump pad before delivering a signal. See
2974 linux_stabilize_threads. */
2975
2976 if (WIFSTOPPED (w)
2977 && WSTOPSIG (w) != SIGTRAP
2978 && supports_fast_tracepoints ()
2979 && agent_loaded_p ())
2980 {
2981 if (debug_threads)
2982 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2983 "to defer or adjust it.\n",
2984 WSTOPSIG (w), lwpid_of (current_thread));
2985
2986 /* Allow debugging the jump pad itself. */
2987 if (current_thread->last_resume_kind != resume_step
2988 && maybe_move_out_of_jump_pad (event_child, &w))
2989 {
2990 enqueue_one_deferred_signal (event_child, &w);
2991
2992 if (debug_threads)
2993 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2994 WSTOPSIG (w), lwpid_of (current_thread));
2995
2996 linux_resume_one_lwp (event_child, 0, 0, NULL);
2997
2998 return ignore_event (ourstatus);
2999 }
3000 }
3001
3002 if (event_child->collecting_fast_tracepoint)
3003 {
3004 if (debug_threads)
3005 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3006 "Check if we're already there.\n",
3007 lwpid_of (current_thread),
3008 event_child->collecting_fast_tracepoint);
3009
3010 trace_event = 1;
3011
3012 event_child->collecting_fast_tracepoint
3013 = linux_fast_tracepoint_collecting (event_child, NULL);
3014
3015 if (event_child->collecting_fast_tracepoint != 1)
3016 {
3017 /* No longer need this breakpoint. */
3018 if (event_child->exit_jump_pad_bkpt != NULL)
3019 {
3020 if (debug_threads)
3021 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3022 "stopping all threads momentarily.\n");
3023
3024 /* Other running threads could hit this breakpoint.
3025 We don't handle moribund locations like GDB does,
3026 instead we always pause all threads when removing
3027 breakpoints, so that any step-over or
3028 decr_pc_after_break adjustment is always taken
3029 care of while the breakpoint is still
3030 inserted. */
3031 stop_all_lwps (1, event_child);
3032
3033 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3034 event_child->exit_jump_pad_bkpt = NULL;
3035
3036 unstop_all_lwps (1, event_child);
3037
3038 gdb_assert (event_child->suspended >= 0);
3039 }
3040 }
3041
3042 if (event_child->collecting_fast_tracepoint == 0)
3043 {
3044 if (debug_threads)
3045 debug_printf ("fast tracepoint finished "
3046 "collecting successfully.\n");
3047
3048 /* We may have a deferred signal to report. */
3049 if (dequeue_one_deferred_signal (event_child, &w))
3050 {
3051 if (debug_threads)
3052 debug_printf ("dequeued one signal.\n");
3053 }
3054 else
3055 {
3056 if (debug_threads)
3057 debug_printf ("no deferred signals.\n");
3058
3059 if (stabilizing_threads)
3060 {
3061 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3062 ourstatus->value.sig = GDB_SIGNAL_0;
3063
3064 if (debug_threads)
3065 {
3066 debug_printf ("linux_wait_1 ret = %s, stopped "
3067 "while stabilizing threads\n",
3068 target_pid_to_str (ptid_of (current_thread)));
3069 debug_exit ();
3070 }
3071
3072 return ptid_of (current_thread);
3073 }
3074 }
3075 }
3076 }
3077
3078 /* Check whether GDB would be interested in this event. */
3079
3080 /* If GDB is not interested in this signal, don't stop other
3081 threads, and don't report it to GDB. Just resume the inferior
3082 right away. We do this for threading-related signals as well as
3083 any that GDB specifically requested we ignore. But never ignore
3084 SIGSTOP if we sent it ourselves, and do not ignore signals when
3085 stepping - they may require special handling to skip the signal
3086 handler. Also never ignore signals that could be caused by a
3087 breakpoint. */
3088 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3089 thread library? */
3090 if (WIFSTOPPED (w)
3091 && current_thread->last_resume_kind != resume_step
3092 && (
3093 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3094 (current_process ()->priv->thread_db != NULL
3095 && (WSTOPSIG (w) == __SIGRTMIN
3096 || WSTOPSIG (w) == __SIGRTMIN + 1))
3097 ||
3098 #endif
3099 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3100 && !(WSTOPSIG (w) == SIGSTOP
3101 && current_thread->last_resume_kind == resume_stop)
3102 && !linux_wstatus_maybe_breakpoint (w))))
3103 {
3104 siginfo_t info, *info_p;
3105
3106 if (debug_threads)
3107 debug_printf ("Ignored signal %d for LWP %ld.\n",
3108 WSTOPSIG (w), lwpid_of (current_thread));
3109
3110 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3111 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3112 info_p = &info;
3113 else
3114 info_p = NULL;
3115 linux_resume_one_lwp (event_child, event_child->stepping,
3116 WSTOPSIG (w), info_p);
3117 return ignore_event (ourstatus);
3118 }
3119
3120 /* Note that all addresses are always "out of the step range" when
3121 there's no range to begin with. */
3122 in_step_range = lwp_in_step_range (event_child);
3123
3124 /* If GDB wanted this thread to single step, and the thread is out
3125 of the step range, we always want to report the SIGTRAP, and let
3126 GDB handle it. Watchpoints should always be reported. So should
3127 signals we can't explain. A SIGTRAP we can't explain could be a
3128 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3129 do, we're be able to handle GDB breakpoints on top of internal
3130 breakpoints, by handling the internal breakpoint and still
3131 reporting the event to GDB. If we don't, we're out of luck, GDB
3132 won't see the breakpoint hit. */
3133 report_to_gdb = (!maybe_internal_trap
3134 || (current_thread->last_resume_kind == resume_step
3135 && !in_step_range)
3136 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3137 || (!step_over_finished && !in_step_range
3138 && !bp_explains_trap && !trace_event)
3139 || (gdb_breakpoint_here (event_child->stop_pc)
3140 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3141 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3142 || extended_event_reported (&event_child->waitstatus));
3143
3144 run_breakpoint_commands (event_child->stop_pc);
3145
3146 /* We found no reason GDB would want us to stop. We either hit one
3147 of our own breakpoints, or finished an internal step GDB
3148 shouldn't know about. */
3149 if (!report_to_gdb)
3150 {
3151 if (debug_threads)
3152 {
3153 if (bp_explains_trap)
3154 debug_printf ("Hit a gdbserver breakpoint.\n");
3155 if (step_over_finished)
3156 debug_printf ("Step-over finished.\n");
3157 if (trace_event)
3158 debug_printf ("Tracepoint event.\n");
3159 if (lwp_in_step_range (event_child))
3160 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3161 paddress (event_child->stop_pc),
3162 paddress (event_child->step_range_start),
3163 paddress (event_child->step_range_end));
3164 if (extended_event_reported (&event_child->waitstatus))
3165 {
3166 char *str = target_waitstatus_to_string (ourstatus);
3167 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3168 lwpid_of (get_lwp_thread (event_child)), str);
3169 xfree (str);
3170 }
3171 }
3172
3173 /* We're not reporting this breakpoint to GDB, so apply the
3174 decr_pc_after_break adjustment to the inferior's regcache
3175 ourselves. */
3176
3177 if (the_low_target.set_pc != NULL)
3178 {
3179 struct regcache *regcache
3180 = get_thread_regcache (current_thread, 1);
3181 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3182 }
3183
3184 /* We may have finished stepping over a breakpoint. If so,
3185 we've stopped and suspended all LWPs momentarily except the
3186 stepping one. This is where we resume them all again. We're
3187 going to keep waiting, so use proceed, which handles stepping
3188 over the next breakpoint. */
3189 if (debug_threads)
3190 debug_printf ("proceeding all threads.\n");
3191
3192 if (step_over_finished)
3193 unsuspend_all_lwps (event_child);
3194
3195 proceed_all_lwps ();
3196 return ignore_event (ourstatus);
3197 }
3198
3199 if (debug_threads)
3200 {
3201 if (current_thread->last_resume_kind == resume_step)
3202 {
3203 if (event_child->step_range_start == event_child->step_range_end)
3204 debug_printf ("GDB wanted to single-step, reporting event.\n");
3205 else if (!lwp_in_step_range (event_child))
3206 debug_printf ("Out of step range, reporting event.\n");
3207 }
3208 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3209 debug_printf ("Stopped by watchpoint.\n");
3210 else if (gdb_breakpoint_here (event_child->stop_pc))
3211 debug_printf ("Stopped by GDB breakpoint.\n");
3212 if (debug_threads)
3213 debug_printf ("Hit a non-gdbserver trap event.\n");
3214 }
3215
3216 /* Alright, we're going to report a stop. */
3217
3218 if (!stabilizing_threads)
3219 {
3220 /* In all-stop, stop all threads. */
3221 if (!non_stop)
3222 stop_all_lwps (0, NULL);
3223
3224 /* If we're not waiting for a specific LWP, choose an event LWP
3225 from among those that have had events. Giving equal priority
3226 to all LWPs that have had events helps prevent
3227 starvation. */
3228 if (ptid_equal (ptid, minus_one_ptid))
3229 {
3230 event_child->status_pending_p = 1;
3231 event_child->status_pending = w;
3232
3233 select_event_lwp (&event_child);
3234
3235 /* current_thread and event_child must stay in sync. */
3236 current_thread = get_lwp_thread (event_child);
3237
3238 event_child->status_pending_p = 0;
3239 w = event_child->status_pending;
3240 }
3241
3242 if (step_over_finished)
3243 {
3244 if (!non_stop)
3245 {
3246 /* If we were doing a step-over, all other threads but
3247 the stepping one had been paused in start_step_over,
3248 with their suspend counts incremented. We don't want
3249 to do a full unstop/unpause, because we're in
3250 all-stop mode (so we want threads stopped), but we
3251 still need to unsuspend the other threads, to
3252 decrement their `suspended' count back. */
3253 unsuspend_all_lwps (event_child);
3254 }
3255 else
3256 {
3257 /* If we just finished a step-over, then all threads had
3258 been momentarily paused. In all-stop, that's fine,
3259 we want threads stopped by now anyway. In non-stop,
3260 we need to re-resume threads that GDB wanted to be
3261 running. */
3262 unstop_all_lwps (1, event_child);
3263 }
3264 }
3265
3266 /* Stabilize threads (move out of jump pads). */
3267 if (!non_stop)
3268 stabilize_threads ();
3269 }
3270 else
3271 {
3272 /* If we just finished a step-over, then all threads had been
3273 momentarily paused. In all-stop, that's fine, we want
3274 threads stopped by now anyway. In non-stop, we need to
3275 re-resume threads that GDB wanted to be running. */
3276 if (step_over_finished)
3277 unstop_all_lwps (1, event_child);
3278 }
3279
3280 if (extended_event_reported (&event_child->waitstatus))
3281 {
3282 /* If the reported event is a fork, vfork or exec, let GDB know. */
3283 ourstatus->kind = event_child->waitstatus.kind;
3284 ourstatus->value = event_child->waitstatus.value;
3285
3286 /* Clear the event lwp's waitstatus since we handled it already. */
3287 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3288 }
3289 else
3290 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3291
3292 /* Now that we've selected our final event LWP, un-adjust its PC if
3293 it was a software breakpoint, and the client doesn't know we can
3294 adjust the breakpoint ourselves. */
3295 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3296 && !swbreak_feature)
3297 {
3298 int decr_pc = the_low_target.decr_pc_after_break;
3299
3300 if (decr_pc != 0)
3301 {
3302 struct regcache *regcache
3303 = get_thread_regcache (current_thread, 1);
3304 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3305 }
3306 }
3307
3308 if (current_thread->last_resume_kind == resume_stop
3309 && WSTOPSIG (w) == SIGSTOP)
3310 {
3311 /* A thread that has been requested to stop by GDB with vCont;t,
3312 and it stopped cleanly, so report as SIG0. The use of
3313 SIGSTOP is an implementation detail. */
3314 ourstatus->value.sig = GDB_SIGNAL_0;
3315 }
3316 else if (current_thread->last_resume_kind == resume_stop
3317 && WSTOPSIG (w) != SIGSTOP)
3318 {
3319 /* A thread that has been requested to stop by GDB with vCont;t,
3320 but, it stopped for other reasons. */
3321 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3322 }
3323 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3324 {
3325 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3326 }
3327
3328 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3329
3330 if (debug_threads)
3331 {
3332 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3333 target_pid_to_str (ptid_of (current_thread)),
3334 ourstatus->kind, ourstatus->value.sig);
3335 debug_exit ();
3336 }
3337
3338 return ptid_of (current_thread);
3339 }
3340
3341 /* Get rid of any pending event in the pipe. */
3342 static void
3343 async_file_flush (void)
3344 {
3345 int ret;
3346 char buf;
3347
3348 do
3349 ret = read (linux_event_pipe[0], &buf, 1);
3350 while (ret >= 0 || (ret == -1 && errno == EINTR));
3351 }
3352
3353 /* Put something in the pipe, so the event loop wakes up. */
3354 static void
3355 async_file_mark (void)
3356 {
3357 int ret;
3358
3359 async_file_flush ();
3360
3361 do
3362 ret = write (linux_event_pipe[1], "+", 1);
3363 while (ret == 0 || (ret == -1 && errno == EINTR));
3364
3365 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3366 be awakened anyway. */
3367 }
3368
3369 static ptid_t
3370 linux_wait (ptid_t ptid,
3371 struct target_waitstatus *ourstatus, int target_options)
3372 {
3373 ptid_t event_ptid;
3374
3375 /* Flush the async file first. */
3376 if (target_is_async_p ())
3377 async_file_flush ();
3378
3379 do
3380 {
3381 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3382 }
3383 while ((target_options & TARGET_WNOHANG) == 0
3384 && ptid_equal (event_ptid, null_ptid)
3385 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3386
3387 /* If at least one stop was reported, there may be more. A single
3388 SIGCHLD can signal more than one child stop. */
3389 if (target_is_async_p ()
3390 && (target_options & TARGET_WNOHANG) != 0
3391 && !ptid_equal (event_ptid, null_ptid))
3392 async_file_mark ();
3393
3394 return event_ptid;
3395 }
3396
3397 /* Send a signal to an LWP. */
3398
3399 static int
3400 kill_lwp (unsigned long lwpid, int signo)
3401 {
3402 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3403 fails, then we are not using nptl threads and we should be using kill. */
3404
3405 #ifdef __NR_tkill
3406 {
3407 static int tkill_failed;
3408
3409 if (!tkill_failed)
3410 {
3411 int ret;
3412
3413 errno = 0;
3414 ret = syscall (__NR_tkill, lwpid, signo);
3415 if (errno != ENOSYS)
3416 return ret;
3417 tkill_failed = 1;
3418 }
3419 }
3420 #endif
3421
3422 return kill (lwpid, signo);
3423 }
3424
3425 void
3426 linux_stop_lwp (struct lwp_info *lwp)
3427 {
3428 send_sigstop (lwp);
3429 }
3430
3431 static void
3432 send_sigstop (struct lwp_info *lwp)
3433 {
3434 int pid;
3435
3436 pid = lwpid_of (get_lwp_thread (lwp));
3437
3438 /* If we already have a pending stop signal for this process, don't
3439 send another. */
3440 if (lwp->stop_expected)
3441 {
3442 if (debug_threads)
3443 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3444
3445 return;
3446 }
3447
3448 if (debug_threads)
3449 debug_printf ("Sending sigstop to lwp %d\n", pid);
3450
3451 lwp->stop_expected = 1;
3452 kill_lwp (pid, SIGSTOP);
3453 }
3454
3455 static int
3456 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3457 {
3458 struct thread_info *thread = (struct thread_info *) entry;
3459 struct lwp_info *lwp = get_thread_lwp (thread);
3460
3461 /* Ignore EXCEPT. */
3462 if (lwp == except)
3463 return 0;
3464
3465 if (lwp->stopped)
3466 return 0;
3467
3468 send_sigstop (lwp);
3469 return 0;
3470 }
3471
3472 /* Increment the suspend count of an LWP, and stop it, if not stopped
3473 yet. */
3474 static int
3475 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3476 void *except)
3477 {
3478 struct thread_info *thread = (struct thread_info *) entry;
3479 struct lwp_info *lwp = get_thread_lwp (thread);
3480
3481 /* Ignore EXCEPT. */
3482 if (lwp == except)
3483 return 0;
3484
3485 lwp->suspended++;
3486
3487 return send_sigstop_callback (entry, except);
3488 }
3489
3490 static void
3491 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3492 {
3493 /* It's dead, really. */
3494 lwp->dead = 1;
3495
3496 /* Store the exit status for later. */
3497 lwp->status_pending_p = 1;
3498 lwp->status_pending = wstat;
3499
3500 /* Prevent trying to stop it. */
3501 lwp->stopped = 1;
3502
3503 /* No further stops are expected from a dead lwp. */
3504 lwp->stop_expected = 0;
3505 }
3506
3507 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3508
3509 static void
3510 wait_for_sigstop (void)
3511 {
3512 struct thread_info *saved_thread;
3513 ptid_t saved_tid;
3514 int wstat;
3515 int ret;
3516
3517 saved_thread = current_thread;
3518 if (saved_thread != NULL)
3519 saved_tid = saved_thread->entry.id;
3520 else
3521 saved_tid = null_ptid; /* avoid bogus unused warning */
3522
3523 if (debug_threads)
3524 debug_printf ("wait_for_sigstop: pulling events\n");
3525
3526 /* Passing NULL_PTID as filter indicates we want all events to be
3527 left pending. Eventually this returns when there are no
3528 unwaited-for children left. */
3529 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3530 &wstat, __WALL);
3531 gdb_assert (ret == -1);
3532
3533 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3534 current_thread = saved_thread;
3535 else
3536 {
3537 if (debug_threads)
3538 debug_printf ("Previously current thread died.\n");
3539
3540 if (non_stop)
3541 {
3542 /* We can't change the current inferior behind GDB's back,
3543 otherwise, a subsequent command may apply to the wrong
3544 process. */
3545 current_thread = NULL;
3546 }
3547 else
3548 {
3549 /* Set a valid thread as current. */
3550 set_desired_thread (0);
3551 }
3552 }
3553 }
3554
3555 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3556 move it out, because we need to report the stop event to GDB. For
3557 example, if the user puts a breakpoint in the jump pad, it's
3558 because she wants to debug it. */
3559
3560 static int
3561 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3562 {
3563 struct thread_info *thread = (struct thread_info *) entry;
3564 struct lwp_info *lwp = get_thread_lwp (thread);
3565
3566 gdb_assert (lwp->suspended == 0);
3567 gdb_assert (lwp->stopped);
3568
3569 /* Allow debugging the jump pad, gdb_collect, etc.. */
3570 return (supports_fast_tracepoints ()
3571 && agent_loaded_p ()
3572 && (gdb_breakpoint_here (lwp->stop_pc)
3573 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3574 || thread->last_resume_kind == resume_step)
3575 && linux_fast_tracepoint_collecting (lwp, NULL));
3576 }
3577
3578 static void
3579 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3580 {
3581 struct thread_info *thread = (struct thread_info *) entry;
3582 struct lwp_info *lwp = get_thread_lwp (thread);
3583 int *wstat;
3584
3585 gdb_assert (lwp->suspended == 0);
3586 gdb_assert (lwp->stopped);
3587
3588 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3589
3590 /* Allow debugging the jump pad, gdb_collect, etc. */
3591 if (!gdb_breakpoint_here (lwp->stop_pc)
3592 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3593 && thread->last_resume_kind != resume_step
3594 && maybe_move_out_of_jump_pad (lwp, wstat))
3595 {
3596 if (debug_threads)
3597 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3598 lwpid_of (thread));
3599
3600 if (wstat)
3601 {
3602 lwp->status_pending_p = 0;
3603 enqueue_one_deferred_signal (lwp, wstat);
3604
3605 if (debug_threads)
3606 debug_printf ("Signal %d for LWP %ld deferred "
3607 "(in jump pad)\n",
3608 WSTOPSIG (*wstat), lwpid_of (thread));
3609 }
3610
3611 linux_resume_one_lwp (lwp, 0, 0, NULL);
3612 }
3613 else
3614 lwp->suspended++;
3615 }
3616
3617 static int
3618 lwp_running (struct inferior_list_entry *entry, void *data)
3619 {
3620 struct thread_info *thread = (struct thread_info *) entry;
3621 struct lwp_info *lwp = get_thread_lwp (thread);
3622
3623 if (lwp->dead)
3624 return 0;
3625 if (lwp->stopped)
3626 return 0;
3627 return 1;
3628 }
3629
3630 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3631 If SUSPEND, then also increase the suspend count of every LWP,
3632 except EXCEPT. */
3633
3634 static void
3635 stop_all_lwps (int suspend, struct lwp_info *except)
3636 {
3637 /* Should not be called recursively. */
3638 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3639
3640 if (debug_threads)
3641 {
3642 debug_enter ();
3643 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3644 suspend ? "stop-and-suspend" : "stop",
3645 except != NULL
3646 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3647 : "none");
3648 }
3649
3650 stopping_threads = (suspend
3651 ? STOPPING_AND_SUSPENDING_THREADS
3652 : STOPPING_THREADS);
3653
3654 if (suspend)
3655 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3656 else
3657 find_inferior (&all_threads, send_sigstop_callback, except);
3658 wait_for_sigstop ();
3659 stopping_threads = NOT_STOPPING_THREADS;
3660
3661 if (debug_threads)
3662 {
3663 debug_printf ("stop_all_lwps done, setting stopping_threads "
3664 "back to !stopping\n");
3665 debug_exit ();
3666 }
3667 }
3668
3669 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3670 SIGNAL is nonzero, give it that signal. */
3671
3672 static void
3673 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3674 int step, int signal, siginfo_t *info)
3675 {
3676 struct thread_info *thread = get_lwp_thread (lwp);
3677 struct thread_info *saved_thread;
3678 int fast_tp_collecting;
3679
3680 if (lwp->stopped == 0)
3681 return;
3682
3683 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3684
3685 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3686
3687 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3688 user used the "jump" command, or "set $pc = foo"). */
3689 if (lwp->stop_pc != get_pc (lwp))
3690 {
3691 /* Collecting 'while-stepping' actions doesn't make sense
3692 anymore. */
3693 release_while_stepping_state_list (thread);
3694 }
3695
3696 /* If we have pending signals or status, and a new signal, enqueue the
3697 signal. Also enqueue the signal if we are waiting to reinsert a
3698 breakpoint; it will be picked up again below. */
3699 if (signal != 0
3700 && (lwp->status_pending_p
3701 || lwp->pending_signals != NULL
3702 || lwp->bp_reinsert != 0
3703 || fast_tp_collecting))
3704 {
3705 struct pending_signals *p_sig;
3706 p_sig = xmalloc (sizeof (*p_sig));
3707 p_sig->prev = lwp->pending_signals;
3708 p_sig->signal = signal;
3709 if (info == NULL)
3710 memset (&p_sig->info, 0, sizeof (siginfo_t));
3711 else
3712 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3713 lwp->pending_signals = p_sig;
3714 }
3715
3716 if (lwp->status_pending_p)
3717 {
3718 if (debug_threads)
3719 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3720 " has pending status\n",
3721 lwpid_of (thread), step ? "step" : "continue", signal,
3722 lwp->stop_expected ? "expected" : "not expected");
3723 return;
3724 }
3725
3726 saved_thread = current_thread;
3727 current_thread = thread;
3728
3729 if (debug_threads)
3730 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3731 lwpid_of (thread), step ? "step" : "continue", signal,
3732 lwp->stop_expected ? "expected" : "not expected");
3733
3734 /* This bit needs some thinking about. If we get a signal that
3735 we must report while a single-step reinsert is still pending,
3736 we often end up resuming the thread. It might be better to
3737 (ew) allow a stack of pending events; then we could be sure that
3738 the reinsert happened right away and not lose any signals.
3739
3740 Making this stack would also shrink the window in which breakpoints are
3741 uninserted (see comment in linux_wait_for_lwp) but not enough for
3742 complete correctness, so it won't solve that problem. It may be
3743 worthwhile just to solve this one, however. */
3744 if (lwp->bp_reinsert != 0)
3745 {
3746 if (debug_threads)
3747 debug_printf (" pending reinsert at 0x%s\n",
3748 paddress (lwp->bp_reinsert));
3749
3750 if (can_hardware_single_step ())
3751 {
3752 if (fast_tp_collecting == 0)
3753 {
3754 if (step == 0)
3755 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3756 if (lwp->suspended)
3757 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3758 lwp->suspended);
3759 }
3760
3761 step = 1;
3762 }
3763
3764 /* Postpone any pending signal. It was enqueued above. */
3765 signal = 0;
3766 }
3767
3768 if (fast_tp_collecting == 1)
3769 {
3770 if (debug_threads)
3771 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3772 " (exit-jump-pad-bkpt)\n",
3773 lwpid_of (thread));
3774
3775 /* Postpone any pending signal. It was enqueued above. */
3776 signal = 0;
3777 }
3778 else if (fast_tp_collecting == 2)
3779 {
3780 if (debug_threads)
3781 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3782 " single-stepping\n",
3783 lwpid_of (thread));
3784
3785 if (can_hardware_single_step ())
3786 step = 1;
3787 else
3788 {
3789 internal_error (__FILE__, __LINE__,
3790 "moving out of jump pad single-stepping"
3791 " not implemented on this target");
3792 }
3793
3794 /* Postpone any pending signal. It was enqueued above. */
3795 signal = 0;
3796 }
3797
3798 /* If we have while-stepping actions in this thread set it stepping.
3799 If we have a signal to deliver, it may or may not be set to
3800 SIG_IGN, we don't know. Assume so, and allow collecting
3801 while-stepping into a signal handler. A possible smart thing to
3802 do would be to set an internal breakpoint at the signal return
3803 address, continue, and carry on catching this while-stepping
3804 action only when that breakpoint is hit. A future
3805 enhancement. */
3806 if (thread->while_stepping != NULL
3807 && can_hardware_single_step ())
3808 {
3809 if (debug_threads)
3810 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3811 lwpid_of (thread));
3812 step = 1;
3813 }
3814
3815 if (the_low_target.get_pc != NULL)
3816 {
3817 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3818
3819 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3820
3821 if (debug_threads)
3822 {
3823 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3824 (long) lwp->stop_pc);
3825 }
3826 }
3827
3828 /* If we have pending signals, consume one unless we are trying to
3829 reinsert a breakpoint or we're trying to finish a fast tracepoint
3830 collect. */
3831 if (lwp->pending_signals != NULL
3832 && lwp->bp_reinsert == 0
3833 && fast_tp_collecting == 0)
3834 {
3835 struct pending_signals **p_sig;
3836
3837 p_sig = &lwp->pending_signals;
3838 while ((*p_sig)->prev != NULL)
3839 p_sig = &(*p_sig)->prev;
3840
3841 signal = (*p_sig)->signal;
3842 if ((*p_sig)->info.si_signo != 0)
3843 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3844 &(*p_sig)->info);
3845
3846 free (*p_sig);
3847 *p_sig = NULL;
3848 }
3849
3850 if (the_low_target.prepare_to_resume != NULL)
3851 the_low_target.prepare_to_resume (lwp);
3852
3853 regcache_invalidate_thread (thread);
3854 errno = 0;
3855 lwp->stepping = step;
3856 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3857 (PTRACE_TYPE_ARG3) 0,
3858 /* Coerce to a uintptr_t first to avoid potential gcc warning
3859 of coercing an 8 byte integer to a 4 byte pointer. */
3860 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3861
3862 current_thread = saved_thread;
3863 if (errno)
3864 perror_with_name ("resuming thread");
3865
3866 /* Successfully resumed. Clear state that no longer makes sense,
3867 and mark the LWP as running. Must not do this before resuming
3868 otherwise if that fails other code will be confused. E.g., we'd
3869 later try to stop the LWP and hang forever waiting for a stop
3870 status. Note that we must not throw after this is cleared,
3871 otherwise handle_zombie_lwp_error would get confused. */
3872 lwp->stopped = 0;
3873 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3874 }
3875
3876 /* Called when we try to resume a stopped LWP and that errors out. If
3877 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
3878 or about to become), discard the error, clear any pending status
3879 the LWP may have, and return true (we'll collect the exit status
3880 soon enough). Otherwise, return false. */
3881
3882 static int
3883 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
3884 {
3885 struct thread_info *thread = get_lwp_thread (lp);
3886
3887 /* If we get an error after resuming the LWP successfully, we'd
3888 confuse !T state for the LWP being gone. */
3889 gdb_assert (lp->stopped);
3890
3891 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
3892 because even if ptrace failed with ESRCH, the tracee may be "not
3893 yet fully dead", but already refusing ptrace requests. In that
3894 case the tracee has 'R (Running)' state for a little bit
3895 (observed in Linux 3.18). See also the note on ESRCH in the
3896 ptrace(2) man page. Instead, check whether the LWP has any state
3897 other than ptrace-stopped. */
3898
3899 /* Don't assume anything if /proc/PID/status can't be read. */
3900 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
3901 {
3902 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3903 lp->status_pending_p = 0;
3904 return 1;
3905 }
3906 return 0;
3907 }
3908
3909 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
3910 disappears while we try to resume it. */
3911
3912 static void
3913 linux_resume_one_lwp (struct lwp_info *lwp,
3914 int step, int signal, siginfo_t *info)
3915 {
3916 TRY
3917 {
3918 linux_resume_one_lwp_throw (lwp, step, signal, info);
3919 }
3920 CATCH (ex, RETURN_MASK_ERROR)
3921 {
3922 if (!check_ptrace_stopped_lwp_gone (lwp))
3923 throw_exception (ex);
3924 }
3925 END_CATCH
3926 }
3927
3928 struct thread_resume_array
3929 {
3930 struct thread_resume *resume;
3931 size_t n;
3932 };
3933
3934 /* This function is called once per thread via find_inferior.
3935 ARG is a pointer to a thread_resume_array struct.
3936 We look up the thread specified by ENTRY in ARG, and mark the thread
3937 with a pointer to the appropriate resume request.
3938
3939 This algorithm is O(threads * resume elements), but resume elements
3940 is small (and will remain small at least until GDB supports thread
3941 suspension). */
3942
3943 static int
3944 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3945 {
3946 struct thread_info *thread = (struct thread_info *) entry;
3947 struct lwp_info *lwp = get_thread_lwp (thread);
3948 int ndx;
3949 struct thread_resume_array *r;
3950
3951 r = arg;
3952
3953 for (ndx = 0; ndx < r->n; ndx++)
3954 {
3955 ptid_t ptid = r->resume[ndx].thread;
3956 if (ptid_equal (ptid, minus_one_ptid)
3957 || ptid_equal (ptid, entry->id)
3958 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3959 of PID'. */
3960 || (ptid_get_pid (ptid) == pid_of (thread)
3961 && (ptid_is_pid (ptid)
3962 || ptid_get_lwp (ptid) == -1)))
3963 {
3964 if (r->resume[ndx].kind == resume_stop
3965 && thread->last_resume_kind == resume_stop)
3966 {
3967 if (debug_threads)
3968 debug_printf ("already %s LWP %ld at GDB's request\n",
3969 (thread->last_status.kind
3970 == TARGET_WAITKIND_STOPPED)
3971 ? "stopped"
3972 : "stopping",
3973 lwpid_of (thread));
3974
3975 continue;
3976 }
3977
3978 lwp->resume = &r->resume[ndx];
3979 thread->last_resume_kind = lwp->resume->kind;
3980
3981 lwp->step_range_start = lwp->resume->step_range_start;
3982 lwp->step_range_end = lwp->resume->step_range_end;
3983
3984 /* If we had a deferred signal to report, dequeue one now.
3985 This can happen if LWP gets more than one signal while
3986 trying to get out of a jump pad. */
3987 if (lwp->stopped
3988 && !lwp->status_pending_p
3989 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3990 {
3991 lwp->status_pending_p = 1;
3992
3993 if (debug_threads)
3994 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3995 "leaving status pending.\n",
3996 WSTOPSIG (lwp->status_pending),
3997 lwpid_of (thread));
3998 }
3999
4000 return 0;
4001 }
4002 }
4003
4004 /* No resume action for this thread. */
4005 lwp->resume = NULL;
4006
4007 return 0;
4008 }
4009
4010 /* find_inferior callback for linux_resume.
4011 Set *FLAG_P if this lwp has an interesting status pending. */
4012
4013 static int
4014 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4015 {
4016 struct thread_info *thread = (struct thread_info *) entry;
4017 struct lwp_info *lwp = get_thread_lwp (thread);
4018
4019 /* LWPs which will not be resumed are not interesting, because
4020 we might not wait for them next time through linux_wait. */
4021 if (lwp->resume == NULL)
4022 return 0;
4023
4024 if (thread_still_has_status_pending_p (thread))
4025 * (int *) flag_p = 1;
4026
4027 return 0;
4028 }
4029
4030 /* Return 1 if this lwp that GDB wants running is stopped at an
4031 internal breakpoint that we need to step over. It assumes that any
4032 required STOP_PC adjustment has already been propagated to the
4033 inferior's regcache. */
4034
4035 static int
4036 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4037 {
4038 struct thread_info *thread = (struct thread_info *) entry;
4039 struct lwp_info *lwp = get_thread_lwp (thread);
4040 struct thread_info *saved_thread;
4041 CORE_ADDR pc;
4042
4043 /* LWPs which will not be resumed are not interesting, because we
4044 might not wait for them next time through linux_wait. */
4045
4046 if (!lwp->stopped)
4047 {
4048 if (debug_threads)
4049 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4050 lwpid_of (thread));
4051 return 0;
4052 }
4053
4054 if (thread->last_resume_kind == resume_stop)
4055 {
4056 if (debug_threads)
4057 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4058 " stopped\n",
4059 lwpid_of (thread));
4060 return 0;
4061 }
4062
4063 gdb_assert (lwp->suspended >= 0);
4064
4065 if (lwp->suspended)
4066 {
4067 if (debug_threads)
4068 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4069 lwpid_of (thread));
4070 return 0;
4071 }
4072
4073 if (!lwp->need_step_over)
4074 {
4075 if (debug_threads)
4076 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4077 }
4078
4079 if (lwp->status_pending_p)
4080 {
4081 if (debug_threads)
4082 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4083 " status.\n",
4084 lwpid_of (thread));
4085 return 0;
4086 }
4087
4088 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4089 or we have. */
4090 pc = get_pc (lwp);
4091
4092 /* If the PC has changed since we stopped, then don't do anything,
4093 and let the breakpoint/tracepoint be hit. This happens if, for
4094 instance, GDB handled the decr_pc_after_break subtraction itself,
4095 GDB is OOL stepping this thread, or the user has issued a "jump"
4096 command, or poked thread's registers herself. */
4097 if (pc != lwp->stop_pc)
4098 {
4099 if (debug_threads)
4100 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4101 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4102 lwpid_of (thread),
4103 paddress (lwp->stop_pc), paddress (pc));
4104
4105 lwp->need_step_over = 0;
4106 return 0;
4107 }
4108
4109 saved_thread = current_thread;
4110 current_thread = thread;
4111
4112 /* We can only step over breakpoints we know about. */
4113 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4114 {
4115 /* Don't step over a breakpoint that GDB expects to hit
4116 though. If the condition is being evaluated on the target's side
4117 and it evaluate to false, step over this breakpoint as well. */
4118 if (gdb_breakpoint_here (pc)
4119 && gdb_condition_true_at_breakpoint (pc)
4120 && gdb_no_commands_at_breakpoint (pc))
4121 {
4122 if (debug_threads)
4123 debug_printf ("Need step over [LWP %ld]? yes, but found"
4124 " GDB breakpoint at 0x%s; skipping step over\n",
4125 lwpid_of (thread), paddress (pc));
4126
4127 current_thread = saved_thread;
4128 return 0;
4129 }
4130 else
4131 {
4132 if (debug_threads)
4133 debug_printf ("Need step over [LWP %ld]? yes, "
4134 "found breakpoint at 0x%s\n",
4135 lwpid_of (thread), paddress (pc));
4136
4137 /* We've found an lwp that needs stepping over --- return 1 so
4138 that find_inferior stops looking. */
4139 current_thread = saved_thread;
4140
4141 /* If the step over is cancelled, this is set again. */
4142 lwp->need_step_over = 0;
4143 return 1;
4144 }
4145 }
4146
4147 current_thread = saved_thread;
4148
4149 if (debug_threads)
4150 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4151 " at 0x%s\n",
4152 lwpid_of (thread), paddress (pc));
4153
4154 return 0;
4155 }
4156
4157 /* Start a step-over operation on LWP. When LWP stopped at a
4158 breakpoint, to make progress, we need to remove the breakpoint out
4159 of the way. If we let other threads run while we do that, they may
4160 pass by the breakpoint location and miss hitting it. To avoid
4161 that, a step-over momentarily stops all threads while LWP is
4162 single-stepped while the breakpoint is temporarily uninserted from
4163 the inferior. When the single-step finishes, we reinsert the
4164 breakpoint, and let all threads that are supposed to be running,
4165 run again.
4166
4167 On targets that don't support hardware single-step, we don't
4168 currently support full software single-stepping. Instead, we only
4169 support stepping over the thread event breakpoint, by asking the
4170 low target where to place a reinsert breakpoint. Since this
4171 routine assumes the breakpoint being stepped over is a thread event
4172 breakpoint, it usually assumes the return address of the current
4173 function is a good enough place to set the reinsert breakpoint. */
4174
4175 static int
4176 start_step_over (struct lwp_info *lwp)
4177 {
4178 struct thread_info *thread = get_lwp_thread (lwp);
4179 struct thread_info *saved_thread;
4180 CORE_ADDR pc;
4181 int step;
4182
4183 if (debug_threads)
4184 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4185 lwpid_of (thread));
4186
4187 stop_all_lwps (1, lwp);
4188 gdb_assert (lwp->suspended == 0);
4189
4190 if (debug_threads)
4191 debug_printf ("Done stopping all threads for step-over.\n");
4192
4193 /* Note, we should always reach here with an already adjusted PC,
4194 either by GDB (if we're resuming due to GDB's request), or by our
4195 caller, if we just finished handling an internal breakpoint GDB
4196 shouldn't care about. */
4197 pc = get_pc (lwp);
4198
4199 saved_thread = current_thread;
4200 current_thread = thread;
4201
4202 lwp->bp_reinsert = pc;
4203 uninsert_breakpoints_at (pc);
4204 uninsert_fast_tracepoint_jumps_at (pc);
4205
4206 if (can_hardware_single_step ())
4207 {
4208 step = 1;
4209 }
4210 else
4211 {
4212 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4213 set_reinsert_breakpoint (raddr);
4214 step = 0;
4215 }
4216
4217 current_thread = saved_thread;
4218
4219 linux_resume_one_lwp (lwp, step, 0, NULL);
4220
4221 /* Require next event from this LWP. */
4222 step_over_bkpt = thread->entry.id;
4223 return 1;
4224 }
4225
4226 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4227 start_step_over, if still there, and delete any reinsert
4228 breakpoints we've set, on non hardware single-step targets. */
4229
4230 static int
4231 finish_step_over (struct lwp_info *lwp)
4232 {
4233 if (lwp->bp_reinsert != 0)
4234 {
4235 if (debug_threads)
4236 debug_printf ("Finished step over.\n");
4237
4238 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4239 may be no breakpoint to reinsert there by now. */
4240 reinsert_breakpoints_at (lwp->bp_reinsert);
4241 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4242
4243 lwp->bp_reinsert = 0;
4244
4245 /* Delete any software-single-step reinsert breakpoints. No
4246 longer needed. We don't have to worry about other threads
4247 hitting this trap, and later not being able to explain it,
4248 because we were stepping over a breakpoint, and we hold all
4249 threads but LWP stopped while doing that. */
4250 if (!can_hardware_single_step ())
4251 delete_reinsert_breakpoints ();
4252
4253 step_over_bkpt = null_ptid;
4254 return 1;
4255 }
4256 else
4257 return 0;
4258 }
4259
4260 /* This function is called once per thread. We check the thread's resume
4261 request, which will tell us whether to resume, step, or leave the thread
4262 stopped; and what signal, if any, it should be sent.
4263
4264 For threads which we aren't explicitly told otherwise, we preserve
4265 the stepping flag; this is used for stepping over gdbserver-placed
4266 breakpoints.
4267
4268 If pending_flags was set in any thread, we queue any needed
4269 signals, since we won't actually resume. We already have a pending
4270 event to report, so we don't need to preserve any step requests;
4271 they should be re-issued if necessary. */
4272
4273 static int
4274 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4275 {
4276 struct thread_info *thread = (struct thread_info *) entry;
4277 struct lwp_info *lwp = get_thread_lwp (thread);
4278 int step;
4279 int leave_all_stopped = * (int *) arg;
4280 int leave_pending;
4281
4282 if (lwp->resume == NULL)
4283 return 0;
4284
4285 if (lwp->resume->kind == resume_stop)
4286 {
4287 if (debug_threads)
4288 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4289
4290 if (!lwp->stopped)
4291 {
4292 if (debug_threads)
4293 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4294
4295 /* Stop the thread, and wait for the event asynchronously,
4296 through the event loop. */
4297 send_sigstop (lwp);
4298 }
4299 else
4300 {
4301 if (debug_threads)
4302 debug_printf ("already stopped LWP %ld\n",
4303 lwpid_of (thread));
4304
4305 /* The LWP may have been stopped in an internal event that
4306 was not meant to be notified back to GDB (e.g., gdbserver
4307 breakpoint), so we should be reporting a stop event in
4308 this case too. */
4309
4310 /* If the thread already has a pending SIGSTOP, this is a
4311 no-op. Otherwise, something later will presumably resume
4312 the thread and this will cause it to cancel any pending
4313 operation, due to last_resume_kind == resume_stop. If
4314 the thread already has a pending status to report, we
4315 will still report it the next time we wait - see
4316 status_pending_p_callback. */
4317
4318 /* If we already have a pending signal to report, then
4319 there's no need to queue a SIGSTOP, as this means we're
4320 midway through moving the LWP out of the jumppad, and we
4321 will report the pending signal as soon as that is
4322 finished. */
4323 if (lwp->pending_signals_to_report == NULL)
4324 send_sigstop (lwp);
4325 }
4326
4327 /* For stop requests, we're done. */
4328 lwp->resume = NULL;
4329 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4330 return 0;
4331 }
4332
4333 /* If this thread which is about to be resumed has a pending status,
4334 then don't resume any threads - we can just report the pending
4335 status. Make sure to queue any signals that would otherwise be
4336 sent. In all-stop mode, we do this decision based on if *any*
4337 thread has a pending status. If there's a thread that needs the
4338 step-over-breakpoint dance, then don't resume any other thread
4339 but that particular one. */
4340 leave_pending = (lwp->status_pending_p || leave_all_stopped);
4341
4342 if (!leave_pending)
4343 {
4344 if (debug_threads)
4345 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4346
4347 step = (lwp->resume->kind == resume_step);
4348 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4349 }
4350 else
4351 {
4352 if (debug_threads)
4353 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4354
4355 /* If we have a new signal, enqueue the signal. */
4356 if (lwp->resume->sig != 0)
4357 {
4358 struct pending_signals *p_sig;
4359 p_sig = xmalloc (sizeof (*p_sig));
4360 p_sig->prev = lwp->pending_signals;
4361 p_sig->signal = lwp->resume->sig;
4362 memset (&p_sig->info, 0, sizeof (siginfo_t));
4363
4364 /* If this is the same signal we were previously stopped by,
4365 make sure to queue its siginfo. We can ignore the return
4366 value of ptrace; if it fails, we'll skip
4367 PTRACE_SETSIGINFO. */
4368 if (WIFSTOPPED (lwp->last_status)
4369 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4370 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4371 &p_sig->info);
4372
4373 lwp->pending_signals = p_sig;
4374 }
4375 }
4376
4377 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4378 lwp->resume = NULL;
4379 return 0;
4380 }
4381
4382 static void
4383 linux_resume (struct thread_resume *resume_info, size_t n)
4384 {
4385 struct thread_resume_array array = { resume_info, n };
4386 struct thread_info *need_step_over = NULL;
4387 int any_pending;
4388 int leave_all_stopped;
4389
4390 if (debug_threads)
4391 {
4392 debug_enter ();
4393 debug_printf ("linux_resume:\n");
4394 }
4395
4396 find_inferior (&all_threads, linux_set_resume_request, &array);
4397
4398 /* If there is a thread which would otherwise be resumed, which has
4399 a pending status, then don't resume any threads - we can just
4400 report the pending status. Make sure to queue any signals that
4401 would otherwise be sent. In non-stop mode, we'll apply this
4402 logic to each thread individually. We consume all pending events
4403 before considering to start a step-over (in all-stop). */
4404 any_pending = 0;
4405 if (!non_stop)
4406 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4407
4408 /* If there is a thread which would otherwise be resumed, which is
4409 stopped at a breakpoint that needs stepping over, then don't
4410 resume any threads - have it step over the breakpoint with all
4411 other threads stopped, then resume all threads again. Make sure
4412 to queue any signals that would otherwise be delivered or
4413 queued. */
4414 if (!any_pending && supports_breakpoints ())
4415 need_step_over
4416 = (struct thread_info *) find_inferior (&all_threads,
4417 need_step_over_p, NULL);
4418
4419 leave_all_stopped = (need_step_over != NULL || any_pending);
4420
4421 if (debug_threads)
4422 {
4423 if (need_step_over != NULL)
4424 debug_printf ("Not resuming all, need step over\n");
4425 else if (any_pending)
4426 debug_printf ("Not resuming, all-stop and found "
4427 "an LWP with pending status\n");
4428 else
4429 debug_printf ("Resuming, no pending status or step over needed\n");
4430 }
4431
4432 /* Even if we're leaving threads stopped, queue all signals we'd
4433 otherwise deliver. */
4434 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4435
4436 if (need_step_over)
4437 start_step_over (get_thread_lwp (need_step_over));
4438
4439 if (debug_threads)
4440 {
4441 debug_printf ("linux_resume done\n");
4442 debug_exit ();
4443 }
4444 }
4445
4446 /* This function is called once per thread. We check the thread's
4447 last resume request, which will tell us whether to resume, step, or
4448 leave the thread stopped. Any signal the client requested to be
4449 delivered has already been enqueued at this point.
4450
4451 If any thread that GDB wants running is stopped at an internal
4452 breakpoint that needs stepping over, we start a step-over operation
4453 on that particular thread, and leave all others stopped. */
4454
4455 static int
4456 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4457 {
4458 struct thread_info *thread = (struct thread_info *) entry;
4459 struct lwp_info *lwp = get_thread_lwp (thread);
4460 int step;
4461
4462 if (lwp == except)
4463 return 0;
4464
4465 if (debug_threads)
4466 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4467
4468 if (!lwp->stopped)
4469 {
4470 if (debug_threads)
4471 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4472 return 0;
4473 }
4474
4475 if (thread->last_resume_kind == resume_stop
4476 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4477 {
4478 if (debug_threads)
4479 debug_printf (" client wants LWP to remain %ld stopped\n",
4480 lwpid_of (thread));
4481 return 0;
4482 }
4483
4484 if (lwp->status_pending_p)
4485 {
4486 if (debug_threads)
4487 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4488 lwpid_of (thread));
4489 return 0;
4490 }
4491
4492 gdb_assert (lwp->suspended >= 0);
4493
4494 if (lwp->suspended)
4495 {
4496 if (debug_threads)
4497 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4498 return 0;
4499 }
4500
4501 if (thread->last_resume_kind == resume_stop
4502 && lwp->pending_signals_to_report == NULL
4503 && lwp->collecting_fast_tracepoint == 0)
4504 {
4505 /* We haven't reported this LWP as stopped yet (otherwise, the
4506 last_status.kind check above would catch it, and we wouldn't
4507 reach here. This LWP may have been momentarily paused by a
4508 stop_all_lwps call while handling for example, another LWP's
4509 step-over. In that case, the pending expected SIGSTOP signal
4510 that was queued at vCont;t handling time will have already
4511 been consumed by wait_for_sigstop, and so we need to requeue
4512 another one here. Note that if the LWP already has a SIGSTOP
4513 pending, this is a no-op. */
4514
4515 if (debug_threads)
4516 debug_printf ("Client wants LWP %ld to stop. "
4517 "Making sure it has a SIGSTOP pending\n",
4518 lwpid_of (thread));
4519
4520 send_sigstop (lwp);
4521 }
4522
4523 step = thread->last_resume_kind == resume_step;
4524 linux_resume_one_lwp (lwp, step, 0, NULL);
4525 return 0;
4526 }
4527
4528 static int
4529 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4530 {
4531 struct thread_info *thread = (struct thread_info *) entry;
4532 struct lwp_info *lwp = get_thread_lwp (thread);
4533
4534 if (lwp == except)
4535 return 0;
4536
4537 lwp->suspended--;
4538 gdb_assert (lwp->suspended >= 0);
4539
4540 return proceed_one_lwp (entry, except);
4541 }
4542
4543 /* When we finish a step-over, set threads running again. If there's
4544 another thread that may need a step-over, now's the time to start
4545 it. Eventually, we'll move all threads past their breakpoints. */
4546
4547 static void
4548 proceed_all_lwps (void)
4549 {
4550 struct thread_info *need_step_over;
4551
4552 /* If there is a thread which would otherwise be resumed, which is
4553 stopped at a breakpoint that needs stepping over, then don't
4554 resume any threads - have it step over the breakpoint with all
4555 other threads stopped, then resume all threads again. */
4556
4557 if (supports_breakpoints ())
4558 {
4559 need_step_over
4560 = (struct thread_info *) find_inferior (&all_threads,
4561 need_step_over_p, NULL);
4562
4563 if (need_step_over != NULL)
4564 {
4565 if (debug_threads)
4566 debug_printf ("proceed_all_lwps: found "
4567 "thread %ld needing a step-over\n",
4568 lwpid_of (need_step_over));
4569
4570 start_step_over (get_thread_lwp (need_step_over));
4571 return;
4572 }
4573 }
4574
4575 if (debug_threads)
4576 debug_printf ("Proceeding, no step-over needed\n");
4577
4578 find_inferior (&all_threads, proceed_one_lwp, NULL);
4579 }
4580
4581 /* Stopped LWPs that the client wanted to be running, that don't have
4582 pending statuses, are set to run again, except for EXCEPT, if not
4583 NULL. This undoes a stop_all_lwps call. */
4584
4585 static void
4586 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4587 {
4588 if (debug_threads)
4589 {
4590 debug_enter ();
4591 if (except)
4592 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4593 lwpid_of (get_lwp_thread (except)));
4594 else
4595 debug_printf ("unstopping all lwps\n");
4596 }
4597
4598 if (unsuspend)
4599 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4600 else
4601 find_inferior (&all_threads, proceed_one_lwp, except);
4602
4603 if (debug_threads)
4604 {
4605 debug_printf ("unstop_all_lwps done\n");
4606 debug_exit ();
4607 }
4608 }
4609
4610
4611 #ifdef HAVE_LINUX_REGSETS
4612
4613 #define use_linux_regsets 1
4614
4615 /* Returns true if REGSET has been disabled. */
4616
4617 static int
4618 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4619 {
4620 return (info->disabled_regsets != NULL
4621 && info->disabled_regsets[regset - info->regsets]);
4622 }
4623
4624 /* Disable REGSET. */
4625
4626 static void
4627 disable_regset (struct regsets_info *info, struct regset_info *regset)
4628 {
4629 int dr_offset;
4630
4631 dr_offset = regset - info->regsets;
4632 if (info->disabled_regsets == NULL)
4633 info->disabled_regsets = xcalloc (1, info->num_regsets);
4634 info->disabled_regsets[dr_offset] = 1;
4635 }
4636
4637 static int
4638 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4639 struct regcache *regcache)
4640 {
4641 struct regset_info *regset;
4642 int saw_general_regs = 0;
4643 int pid;
4644 struct iovec iov;
4645
4646 pid = lwpid_of (current_thread);
4647 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4648 {
4649 void *buf, *data;
4650 int nt_type, res;
4651
4652 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4653 continue;
4654
4655 buf = xmalloc (regset->size);
4656
4657 nt_type = regset->nt_type;
4658 if (nt_type)
4659 {
4660 iov.iov_base = buf;
4661 iov.iov_len = regset->size;
4662 data = (void *) &iov;
4663 }
4664 else
4665 data = buf;
4666
4667 #ifndef __sparc__
4668 res = ptrace (regset->get_request, pid,
4669 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4670 #else
4671 res = ptrace (regset->get_request, pid, data, nt_type);
4672 #endif
4673 if (res < 0)
4674 {
4675 if (errno == EIO)
4676 {
4677 /* If we get EIO on a regset, do not try it again for
4678 this process mode. */
4679 disable_regset (regsets_info, regset);
4680 }
4681 else if (errno == ENODATA)
4682 {
4683 /* ENODATA may be returned if the regset is currently
4684 not "active". This can happen in normal operation,
4685 so suppress the warning in this case. */
4686 }
4687 else
4688 {
4689 char s[256];
4690 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4691 pid);
4692 perror (s);
4693 }
4694 }
4695 else
4696 {
4697 if (regset->type == GENERAL_REGS)
4698 saw_general_regs = 1;
4699 regset->store_function (regcache, buf);
4700 }
4701 free (buf);
4702 }
4703 if (saw_general_regs)
4704 return 0;
4705 else
4706 return 1;
4707 }
4708
4709 static int
4710 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4711 struct regcache *regcache)
4712 {
4713 struct regset_info *regset;
4714 int saw_general_regs = 0;
4715 int pid;
4716 struct iovec iov;
4717
4718 pid = lwpid_of (current_thread);
4719 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4720 {
4721 void *buf, *data;
4722 int nt_type, res;
4723
4724 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4725 || regset->fill_function == NULL)
4726 continue;
4727
4728 buf = xmalloc (regset->size);
4729
4730 /* First fill the buffer with the current register set contents,
4731 in case there are any items in the kernel's regset that are
4732 not in gdbserver's regcache. */
4733
4734 nt_type = regset->nt_type;
4735 if (nt_type)
4736 {
4737 iov.iov_base = buf;
4738 iov.iov_len = regset->size;
4739 data = (void *) &iov;
4740 }
4741 else
4742 data = buf;
4743
4744 #ifndef __sparc__
4745 res = ptrace (regset->get_request, pid,
4746 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4747 #else
4748 res = ptrace (regset->get_request, pid, data, nt_type);
4749 #endif
4750
4751 if (res == 0)
4752 {
4753 /* Then overlay our cached registers on that. */
4754 regset->fill_function (regcache, buf);
4755
4756 /* Only now do we write the register set. */
4757 #ifndef __sparc__
4758 res = ptrace (regset->set_request, pid,
4759 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4760 #else
4761 res = ptrace (regset->set_request, pid, data, nt_type);
4762 #endif
4763 }
4764
4765 if (res < 0)
4766 {
4767 if (errno == EIO)
4768 {
4769 /* If we get EIO on a regset, do not try it again for
4770 this process mode. */
4771 disable_regset (regsets_info, regset);
4772 }
4773 else if (errno == ESRCH)
4774 {
4775 /* At this point, ESRCH should mean the process is
4776 already gone, in which case we simply ignore attempts
4777 to change its registers. See also the related
4778 comment in linux_resume_one_lwp. */
4779 free (buf);
4780 return 0;
4781 }
4782 else
4783 {
4784 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4785 }
4786 }
4787 else if (regset->type == GENERAL_REGS)
4788 saw_general_regs = 1;
4789 free (buf);
4790 }
4791 if (saw_general_regs)
4792 return 0;
4793 else
4794 return 1;
4795 }
4796
4797 #else /* !HAVE_LINUX_REGSETS */
4798
4799 #define use_linux_regsets 0
4800 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4801 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4802
4803 #endif
4804
4805 /* Return 1 if register REGNO is supported by one of the regset ptrace
4806 calls or 0 if it has to be transferred individually. */
4807
4808 static int
4809 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4810 {
4811 unsigned char mask = 1 << (regno % 8);
4812 size_t index = regno / 8;
4813
4814 return (use_linux_regsets
4815 && (regs_info->regset_bitmap == NULL
4816 || (regs_info->regset_bitmap[index] & mask) != 0));
4817 }
4818
4819 #ifdef HAVE_LINUX_USRREGS
4820
4821 int
4822 register_addr (const struct usrregs_info *usrregs, int regnum)
4823 {
4824 int addr;
4825
4826 if (regnum < 0 || regnum >= usrregs->num_regs)
4827 error ("Invalid register number %d.", regnum);
4828
4829 addr = usrregs->regmap[regnum];
4830
4831 return addr;
4832 }
4833
4834 /* Fetch one register. */
4835 static void
4836 fetch_register (const struct usrregs_info *usrregs,
4837 struct regcache *regcache, int regno)
4838 {
4839 CORE_ADDR regaddr;
4840 int i, size;
4841 char *buf;
4842 int pid;
4843
4844 if (regno >= usrregs->num_regs)
4845 return;
4846 if ((*the_low_target.cannot_fetch_register) (regno))
4847 return;
4848
4849 regaddr = register_addr (usrregs, regno);
4850 if (regaddr == -1)
4851 return;
4852
4853 size = ((register_size (regcache->tdesc, regno)
4854 + sizeof (PTRACE_XFER_TYPE) - 1)
4855 & -sizeof (PTRACE_XFER_TYPE));
4856 buf = alloca (size);
4857
4858 pid = lwpid_of (current_thread);
4859 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4860 {
4861 errno = 0;
4862 *(PTRACE_XFER_TYPE *) (buf + i) =
4863 ptrace (PTRACE_PEEKUSER, pid,
4864 /* Coerce to a uintptr_t first to avoid potential gcc warning
4865 of coercing an 8 byte integer to a 4 byte pointer. */
4866 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4867 regaddr += sizeof (PTRACE_XFER_TYPE);
4868 if (errno != 0)
4869 error ("reading register %d: %s", regno, strerror (errno));
4870 }
4871
4872 if (the_low_target.supply_ptrace_register)
4873 the_low_target.supply_ptrace_register (regcache, regno, buf);
4874 else
4875 supply_register (regcache, regno, buf);
4876 }
4877
4878 /* Store one register. */
4879 static void
4880 store_register (const struct usrregs_info *usrregs,
4881 struct regcache *regcache, int regno)
4882 {
4883 CORE_ADDR regaddr;
4884 int i, size;
4885 char *buf;
4886 int pid;
4887
4888 if (regno >= usrregs->num_regs)
4889 return;
4890 if ((*the_low_target.cannot_store_register) (regno))
4891 return;
4892
4893 regaddr = register_addr (usrregs, regno);
4894 if (regaddr == -1)
4895 return;
4896
4897 size = ((register_size (regcache->tdesc, regno)
4898 + sizeof (PTRACE_XFER_TYPE) - 1)
4899 & -sizeof (PTRACE_XFER_TYPE));
4900 buf = alloca (size);
4901 memset (buf, 0, size);
4902
4903 if (the_low_target.collect_ptrace_register)
4904 the_low_target.collect_ptrace_register (regcache, regno, buf);
4905 else
4906 collect_register (regcache, regno, buf);
4907
4908 pid = lwpid_of (current_thread);
4909 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4910 {
4911 errno = 0;
4912 ptrace (PTRACE_POKEUSER, pid,
4913 /* Coerce to a uintptr_t first to avoid potential gcc warning
4914 about coercing an 8 byte integer to a 4 byte pointer. */
4915 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4916 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4917 if (errno != 0)
4918 {
4919 /* At this point, ESRCH should mean the process is
4920 already gone, in which case we simply ignore attempts
4921 to change its registers. See also the related
4922 comment in linux_resume_one_lwp. */
4923 if (errno == ESRCH)
4924 return;
4925
4926 if ((*the_low_target.cannot_store_register) (regno) == 0)
4927 error ("writing register %d: %s", regno, strerror (errno));
4928 }
4929 regaddr += sizeof (PTRACE_XFER_TYPE);
4930 }
4931 }
4932
4933 /* Fetch all registers, or just one, from the child process.
4934 If REGNO is -1, do this for all registers, skipping any that are
4935 assumed to have been retrieved by regsets_fetch_inferior_registers,
4936 unless ALL is non-zero.
4937 Otherwise, REGNO specifies which register (so we can save time). */
4938 static void
4939 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4940 struct regcache *regcache, int regno, int all)
4941 {
4942 struct usrregs_info *usr = regs_info->usrregs;
4943
4944 if (regno == -1)
4945 {
4946 for (regno = 0; regno < usr->num_regs; regno++)
4947 if (all || !linux_register_in_regsets (regs_info, regno))
4948 fetch_register (usr, regcache, regno);
4949 }
4950 else
4951 fetch_register (usr, regcache, regno);
4952 }
4953
4954 /* Store our register values back into the inferior.
4955 If REGNO is -1, do this for all registers, skipping any that are
4956 assumed to have been saved by regsets_store_inferior_registers,
4957 unless ALL is non-zero.
4958 Otherwise, REGNO specifies which register (so we can save time). */
4959 static void
4960 usr_store_inferior_registers (const struct regs_info *regs_info,
4961 struct regcache *regcache, int regno, int all)
4962 {
4963 struct usrregs_info *usr = regs_info->usrregs;
4964
4965 if (regno == -1)
4966 {
4967 for (regno = 0; regno < usr->num_regs; regno++)
4968 if (all || !linux_register_in_regsets (regs_info, regno))
4969 store_register (usr, regcache, regno);
4970 }
4971 else
4972 store_register (usr, regcache, regno);
4973 }
4974
4975 #else /* !HAVE_LINUX_USRREGS */
4976
4977 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4978 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4979
4980 #endif
4981
4982
4983 void
4984 linux_fetch_registers (struct regcache *regcache, int regno)
4985 {
4986 int use_regsets;
4987 int all = 0;
4988 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4989
4990 if (regno == -1)
4991 {
4992 if (the_low_target.fetch_register != NULL
4993 && regs_info->usrregs != NULL)
4994 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4995 (*the_low_target.fetch_register) (regcache, regno);
4996
4997 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4998 if (regs_info->usrregs != NULL)
4999 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5000 }
5001 else
5002 {
5003 if (the_low_target.fetch_register != NULL
5004 && (*the_low_target.fetch_register) (regcache, regno))
5005 return;
5006
5007 use_regsets = linux_register_in_regsets (regs_info, regno);
5008 if (use_regsets)
5009 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5010 regcache);
5011 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5012 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5013 }
5014 }
5015
5016 void
5017 linux_store_registers (struct regcache *regcache, int regno)
5018 {
5019 int use_regsets;
5020 int all = 0;
5021 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5022
5023 if (regno == -1)
5024 {
5025 all = regsets_store_inferior_registers (regs_info->regsets_info,
5026 regcache);
5027 if (regs_info->usrregs != NULL)
5028 usr_store_inferior_registers (regs_info, regcache, regno, all);
5029 }
5030 else
5031 {
5032 use_regsets = linux_register_in_regsets (regs_info, regno);
5033 if (use_regsets)
5034 all = regsets_store_inferior_registers (regs_info->regsets_info,
5035 regcache);
5036 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5037 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5038 }
5039 }
5040
5041
5042 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5043 to debugger memory starting at MYADDR. */
5044
5045 static int
5046 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5047 {
5048 int pid = lwpid_of (current_thread);
5049 register PTRACE_XFER_TYPE *buffer;
5050 register CORE_ADDR addr;
5051 register int count;
5052 char filename[64];
5053 register int i;
5054 int ret;
5055 int fd;
5056
5057 /* Try using /proc. Don't bother for one word. */
5058 if (len >= 3 * sizeof (long))
5059 {
5060 int bytes;
5061
5062 /* We could keep this file open and cache it - possibly one per
5063 thread. That requires some juggling, but is even faster. */
5064 sprintf (filename, "/proc/%d/mem", pid);
5065 fd = open (filename, O_RDONLY | O_LARGEFILE);
5066 if (fd == -1)
5067 goto no_proc;
5068
5069 /* If pread64 is available, use it. It's faster if the kernel
5070 supports it (only one syscall), and it's 64-bit safe even on
5071 32-bit platforms (for instance, SPARC debugging a SPARC64
5072 application). */
5073 #ifdef HAVE_PREAD64
5074 bytes = pread64 (fd, myaddr, len, memaddr);
5075 #else
5076 bytes = -1;
5077 if (lseek (fd, memaddr, SEEK_SET) != -1)
5078 bytes = read (fd, myaddr, len);
5079 #endif
5080
5081 close (fd);
5082 if (bytes == len)
5083 return 0;
5084
5085 /* Some data was read, we'll try to get the rest with ptrace. */
5086 if (bytes > 0)
5087 {
5088 memaddr += bytes;
5089 myaddr += bytes;
5090 len -= bytes;
5091 }
5092 }
5093
5094 no_proc:
5095 /* Round starting address down to longword boundary. */
5096 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5097 /* Round ending address up; get number of longwords that makes. */
5098 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5099 / sizeof (PTRACE_XFER_TYPE));
5100 /* Allocate buffer of that many longwords. */
5101 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
5102
5103 /* Read all the longwords */
5104 errno = 0;
5105 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5106 {
5107 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5108 about coercing an 8 byte integer to a 4 byte pointer. */
5109 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5110 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5111 (PTRACE_TYPE_ARG4) 0);
5112 if (errno)
5113 break;
5114 }
5115 ret = errno;
5116
5117 /* Copy appropriate bytes out of the buffer. */
5118 if (i > 0)
5119 {
5120 i *= sizeof (PTRACE_XFER_TYPE);
5121 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5122 memcpy (myaddr,
5123 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5124 i < len ? i : len);
5125 }
5126
5127 return ret;
5128 }
5129
5130 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5131 memory at MEMADDR. On failure (cannot write to the inferior)
5132 returns the value of errno. Always succeeds if LEN is zero. */
5133
5134 static int
5135 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5136 {
5137 register int i;
5138 /* Round starting address down to longword boundary. */
5139 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5140 /* Round ending address up; get number of longwords that makes. */
5141 register int count
5142 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5143 / sizeof (PTRACE_XFER_TYPE);
5144
5145 /* Allocate buffer of that many longwords. */
5146 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
5147 alloca (count * sizeof (PTRACE_XFER_TYPE));
5148
5149 int pid = lwpid_of (current_thread);
5150
5151 if (len == 0)
5152 {
5153 /* Zero length write always succeeds. */
5154 return 0;
5155 }
5156
5157 if (debug_threads)
5158 {
5159 /* Dump up to four bytes. */
5160 unsigned int val = * (unsigned int *) myaddr;
5161 if (len == 1)
5162 val = val & 0xff;
5163 else if (len == 2)
5164 val = val & 0xffff;
5165 else if (len == 3)
5166 val = val & 0xffffff;
5167 debug_printf ("Writing %0*x to 0x%08lx in process %d\n",
5168 2 * ((len < 4) ? len : 4), val, (long)memaddr, pid);
5169 }
5170
5171 /* Fill start and end extra bytes of buffer with existing memory data. */
5172
5173 errno = 0;
5174 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5175 about coercing an 8 byte integer to a 4 byte pointer. */
5176 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5177 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5178 (PTRACE_TYPE_ARG4) 0);
5179 if (errno)
5180 return errno;
5181
5182 if (count > 1)
5183 {
5184 errno = 0;
5185 buffer[count - 1]
5186 = ptrace (PTRACE_PEEKTEXT, pid,
5187 /* Coerce to a uintptr_t first to avoid potential gcc warning
5188 about coercing an 8 byte integer to a 4 byte pointer. */
5189 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5190 * sizeof (PTRACE_XFER_TYPE)),
5191 (PTRACE_TYPE_ARG4) 0);
5192 if (errno)
5193 return errno;
5194 }
5195
5196 /* Copy data to be written over corresponding part of buffer. */
5197
5198 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5199 myaddr, len);
5200
5201 /* Write the entire buffer. */
5202
5203 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5204 {
5205 errno = 0;
5206 ptrace (PTRACE_POKETEXT, pid,
5207 /* Coerce to a uintptr_t first to avoid potential gcc warning
5208 about coercing an 8 byte integer to a 4 byte pointer. */
5209 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5210 (PTRACE_TYPE_ARG4) buffer[i]);
5211 if (errno)
5212 return errno;
5213 }
5214
5215 return 0;
5216 }
5217
5218 static void
5219 linux_look_up_symbols (void)
5220 {
5221 #ifdef USE_THREAD_DB
5222 struct process_info *proc = current_process ();
5223
5224 if (proc->priv->thread_db != NULL)
5225 return;
5226
5227 /* If the kernel supports tracing clones, then we don't need to
5228 use the magic thread event breakpoint to learn about
5229 threads. */
5230 thread_db_init (!linux_supports_traceclone ());
5231 #endif
5232 }
5233
5234 static void
5235 linux_request_interrupt (void)
5236 {
5237 extern unsigned long signal_pid;
5238
5239 /* Send a SIGINT to the process group. This acts just like the user
5240 typed a ^C on the controlling terminal. */
5241 kill (-signal_pid, SIGINT);
5242 }
5243
5244 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5245 to debugger memory starting at MYADDR. */
5246
5247 static int
5248 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5249 {
5250 char filename[PATH_MAX];
5251 int fd, n;
5252 int pid = lwpid_of (current_thread);
5253
5254 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5255
5256 fd = open (filename, O_RDONLY);
5257 if (fd < 0)
5258 return -1;
5259
5260 if (offset != (CORE_ADDR) 0
5261 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5262 n = -1;
5263 else
5264 n = read (fd, myaddr, len);
5265
5266 close (fd);
5267
5268 return n;
5269 }
5270
5271 /* These breakpoint and watchpoint related wrapper functions simply
5272 pass on the function call if the target has registered a
5273 corresponding function. */
5274
5275 static int
5276 linux_supports_z_point_type (char z_type)
5277 {
5278 return (the_low_target.supports_z_point_type != NULL
5279 && the_low_target.supports_z_point_type (z_type));
5280 }
5281
5282 static int
5283 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5284 int size, struct raw_breakpoint *bp)
5285 {
5286 if (type == raw_bkpt_type_sw)
5287 return insert_memory_breakpoint (bp);
5288 else if (the_low_target.insert_point != NULL)
5289 return the_low_target.insert_point (type, addr, size, bp);
5290 else
5291 /* Unsupported (see target.h). */
5292 return 1;
5293 }
5294
5295 static int
5296 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5297 int size, struct raw_breakpoint *bp)
5298 {
5299 if (type == raw_bkpt_type_sw)
5300 return remove_memory_breakpoint (bp);
5301 else if (the_low_target.remove_point != NULL)
5302 return the_low_target.remove_point (type, addr, size, bp);
5303 else
5304 /* Unsupported (see target.h). */
5305 return 1;
5306 }
5307
5308 /* Implement the to_stopped_by_sw_breakpoint target_ops
5309 method. */
5310
5311 static int
5312 linux_stopped_by_sw_breakpoint (void)
5313 {
5314 struct lwp_info *lwp = get_thread_lwp (current_thread);
5315
5316 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5317 }
5318
5319 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5320 method. */
5321
5322 static int
5323 linux_supports_stopped_by_sw_breakpoint (void)
5324 {
5325 return USE_SIGTRAP_SIGINFO;
5326 }
5327
5328 /* Implement the to_stopped_by_hw_breakpoint target_ops
5329 method. */
5330
5331 static int
5332 linux_stopped_by_hw_breakpoint (void)
5333 {
5334 struct lwp_info *lwp = get_thread_lwp (current_thread);
5335
5336 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5337 }
5338
5339 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5340 method. */
5341
5342 static int
5343 linux_supports_stopped_by_hw_breakpoint (void)
5344 {
5345 return USE_SIGTRAP_SIGINFO;
5346 }
5347
5348 /* Implement the supports_conditional_breakpoints target_ops
5349 method. */
5350
5351 static int
5352 linux_supports_conditional_breakpoints (void)
5353 {
5354 /* GDBserver needs to step over the breakpoint if the condition is
5355 false. GDBserver software single step is too simple, so disable
5356 conditional breakpoints if the target doesn't have hardware single
5357 step. */
5358 return can_hardware_single_step ();
5359 }
5360
5361 static int
5362 linux_stopped_by_watchpoint (void)
5363 {
5364 struct lwp_info *lwp = get_thread_lwp (current_thread);
5365
5366 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5367 }
5368
5369 static CORE_ADDR
5370 linux_stopped_data_address (void)
5371 {
5372 struct lwp_info *lwp = get_thread_lwp (current_thread);
5373
5374 return lwp->stopped_data_address;
5375 }
5376
5377 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5378 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5379 && defined(PT_TEXT_END_ADDR)
5380
5381 /* This is only used for targets that define PT_TEXT_ADDR,
5382 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5383 the target has different ways of acquiring this information, like
5384 loadmaps. */
5385
5386 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5387 to tell gdb about. */
5388
5389 static int
5390 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5391 {
5392 unsigned long text, text_end, data;
5393 int pid = lwpid_of (current_thread);
5394
5395 errno = 0;
5396
5397 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5398 (PTRACE_TYPE_ARG4) 0);
5399 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5400 (PTRACE_TYPE_ARG4) 0);
5401 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5402 (PTRACE_TYPE_ARG4) 0);
5403
5404 if (errno == 0)
5405 {
5406 /* Both text and data offsets produced at compile-time (and so
5407 used by gdb) are relative to the beginning of the program,
5408 with the data segment immediately following the text segment.
5409 However, the actual runtime layout in memory may put the data
5410 somewhere else, so when we send gdb a data base-address, we
5411 use the real data base address and subtract the compile-time
5412 data base-address from it (which is just the length of the
5413 text segment). BSS immediately follows data in both
5414 cases. */
5415 *text_p = text;
5416 *data_p = data - (text_end - text);
5417
5418 return 1;
5419 }
5420 return 0;
5421 }
5422 #endif
5423
5424 static int
5425 linux_qxfer_osdata (const char *annex,
5426 unsigned char *readbuf, unsigned const char *writebuf,
5427 CORE_ADDR offset, int len)
5428 {
5429 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5430 }
5431
5432 /* Convert a native/host siginfo object, into/from the siginfo in the
5433 layout of the inferiors' architecture. */
5434
5435 static void
5436 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5437 {
5438 int done = 0;
5439
5440 if (the_low_target.siginfo_fixup != NULL)
5441 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5442
5443 /* If there was no callback, or the callback didn't do anything,
5444 then just do a straight memcpy. */
5445 if (!done)
5446 {
5447 if (direction == 1)
5448 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5449 else
5450 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5451 }
5452 }
5453
5454 static int
5455 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5456 unsigned const char *writebuf, CORE_ADDR offset, int len)
5457 {
5458 int pid;
5459 siginfo_t siginfo;
5460 char inf_siginfo[sizeof (siginfo_t)];
5461
5462 if (current_thread == NULL)
5463 return -1;
5464
5465 pid = lwpid_of (current_thread);
5466
5467 if (debug_threads)
5468 debug_printf ("%s siginfo for lwp %d.\n",
5469 readbuf != NULL ? "Reading" : "Writing",
5470 pid);
5471
5472 if (offset >= sizeof (siginfo))
5473 return -1;
5474
5475 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5476 return -1;
5477
5478 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5479 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5480 inferior with a 64-bit GDBSERVER should look the same as debugging it
5481 with a 32-bit GDBSERVER, we need to convert it. */
5482 siginfo_fixup (&siginfo, inf_siginfo, 0);
5483
5484 if (offset + len > sizeof (siginfo))
5485 len = sizeof (siginfo) - offset;
5486
5487 if (readbuf != NULL)
5488 memcpy (readbuf, inf_siginfo + offset, len);
5489 else
5490 {
5491 memcpy (inf_siginfo + offset, writebuf, len);
5492
5493 /* Convert back to ptrace layout before flushing it out. */
5494 siginfo_fixup (&siginfo, inf_siginfo, 1);
5495
5496 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5497 return -1;
5498 }
5499
5500 return len;
5501 }
5502
5503 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5504 so we notice when children change state; as the handler for the
5505 sigsuspend in my_waitpid. */
5506
5507 static void
5508 sigchld_handler (int signo)
5509 {
5510 int old_errno = errno;
5511
5512 if (debug_threads)
5513 {
5514 do
5515 {
5516 /* fprintf is not async-signal-safe, so call write
5517 directly. */
5518 if (write (2, "sigchld_handler\n",
5519 sizeof ("sigchld_handler\n") - 1) < 0)
5520 break; /* just ignore */
5521 } while (0);
5522 }
5523
5524 if (target_is_async_p ())
5525 async_file_mark (); /* trigger a linux_wait */
5526
5527 errno = old_errno;
5528 }
5529
5530 static int
5531 linux_supports_non_stop (void)
5532 {
5533 return 1;
5534 }
5535
5536 static int
5537 linux_async (int enable)
5538 {
5539 int previous = target_is_async_p ();
5540
5541 if (debug_threads)
5542 debug_printf ("linux_async (%d), previous=%d\n",
5543 enable, previous);
5544
5545 if (previous != enable)
5546 {
5547 sigset_t mask;
5548 sigemptyset (&mask);
5549 sigaddset (&mask, SIGCHLD);
5550
5551 sigprocmask (SIG_BLOCK, &mask, NULL);
5552
5553 if (enable)
5554 {
5555 if (pipe (linux_event_pipe) == -1)
5556 {
5557 linux_event_pipe[0] = -1;
5558 linux_event_pipe[1] = -1;
5559 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5560
5561 warning ("creating event pipe failed.");
5562 return previous;
5563 }
5564
5565 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5566 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5567
5568 /* Register the event loop handler. */
5569 add_file_handler (linux_event_pipe[0],
5570 handle_target_event, NULL);
5571
5572 /* Always trigger a linux_wait. */
5573 async_file_mark ();
5574 }
5575 else
5576 {
5577 delete_file_handler (linux_event_pipe[0]);
5578
5579 close (linux_event_pipe[0]);
5580 close (linux_event_pipe[1]);
5581 linux_event_pipe[0] = -1;
5582 linux_event_pipe[1] = -1;
5583 }
5584
5585 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5586 }
5587
5588 return previous;
5589 }
5590
5591 static int
5592 linux_start_non_stop (int nonstop)
5593 {
5594 /* Register or unregister from event-loop accordingly. */
5595 linux_async (nonstop);
5596
5597 if (target_is_async_p () != (nonstop != 0))
5598 return -1;
5599
5600 return 0;
5601 }
5602
5603 static int
5604 linux_supports_multi_process (void)
5605 {
5606 return 1;
5607 }
5608
5609 /* Check if fork events are supported. */
5610
5611 static int
5612 linux_supports_fork_events (void)
5613 {
5614 return linux_supports_tracefork ();
5615 }
5616
5617 /* Check if vfork events are supported. */
5618
5619 static int
5620 linux_supports_vfork_events (void)
5621 {
5622 return linux_supports_tracefork ();
5623 }
5624
5625 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5626 options for the specified lwp. */
5627
5628 static int
5629 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5630 void *args)
5631 {
5632 struct thread_info *thread = (struct thread_info *) entry;
5633 struct lwp_info *lwp = get_thread_lwp (thread);
5634
5635 if (!lwp->stopped)
5636 {
5637 /* Stop the lwp so we can modify its ptrace options. */
5638 lwp->must_set_ptrace_flags = 1;
5639 linux_stop_lwp (lwp);
5640 }
5641 else
5642 {
5643 /* Already stopped; go ahead and set the ptrace options. */
5644 struct process_info *proc = find_process_pid (pid_of (thread));
5645 int options = linux_low_ptrace_options (proc->attached);
5646
5647 linux_enable_event_reporting (lwpid_of (thread), options);
5648 lwp->must_set_ptrace_flags = 0;
5649 }
5650
5651 return 0;
5652 }
5653
5654 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5655 ptrace flags for all inferiors. This is in case the new GDB connection
5656 doesn't support the same set of events that the previous one did. */
5657
5658 static void
5659 linux_handle_new_gdb_connection (void)
5660 {
5661 pid_t pid;
5662
5663 /* Request that all the lwps reset their ptrace options. */
5664 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
5665 }
5666
5667 static int
5668 linux_supports_disable_randomization (void)
5669 {
5670 #ifdef HAVE_PERSONALITY
5671 return 1;
5672 #else
5673 return 0;
5674 #endif
5675 }
5676
5677 static int
5678 linux_supports_agent (void)
5679 {
5680 return 1;
5681 }
5682
5683 static int
5684 linux_supports_range_stepping (void)
5685 {
5686 if (*the_low_target.supports_range_stepping == NULL)
5687 return 0;
5688
5689 return (*the_low_target.supports_range_stepping) ();
5690 }
5691
5692 /* Enumerate spufs IDs for process PID. */
5693 static int
5694 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5695 {
5696 int pos = 0;
5697 int written = 0;
5698 char path[128];
5699 DIR *dir;
5700 struct dirent *entry;
5701
5702 sprintf (path, "/proc/%ld/fd", pid);
5703 dir = opendir (path);
5704 if (!dir)
5705 return -1;
5706
5707 rewinddir (dir);
5708 while ((entry = readdir (dir)) != NULL)
5709 {
5710 struct stat st;
5711 struct statfs stfs;
5712 int fd;
5713
5714 fd = atoi (entry->d_name);
5715 if (!fd)
5716 continue;
5717
5718 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5719 if (stat (path, &st) != 0)
5720 continue;
5721 if (!S_ISDIR (st.st_mode))
5722 continue;
5723
5724 if (statfs (path, &stfs) != 0)
5725 continue;
5726 if (stfs.f_type != SPUFS_MAGIC)
5727 continue;
5728
5729 if (pos >= offset && pos + 4 <= offset + len)
5730 {
5731 *(unsigned int *)(buf + pos - offset) = fd;
5732 written += 4;
5733 }
5734 pos += 4;
5735 }
5736
5737 closedir (dir);
5738 return written;
5739 }
5740
5741 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5742 object type, using the /proc file system. */
5743 static int
5744 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5745 unsigned const char *writebuf,
5746 CORE_ADDR offset, int len)
5747 {
5748 long pid = lwpid_of (current_thread);
5749 char buf[128];
5750 int fd = 0;
5751 int ret = 0;
5752
5753 if (!writebuf && !readbuf)
5754 return -1;
5755
5756 if (!*annex)
5757 {
5758 if (!readbuf)
5759 return -1;
5760 else
5761 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5762 }
5763
5764 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5765 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5766 if (fd <= 0)
5767 return -1;
5768
5769 if (offset != 0
5770 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5771 {
5772 close (fd);
5773 return 0;
5774 }
5775
5776 if (writebuf)
5777 ret = write (fd, writebuf, (size_t) len);
5778 else
5779 ret = read (fd, readbuf, (size_t) len);
5780
5781 close (fd);
5782 return ret;
5783 }
5784
5785 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5786 struct target_loadseg
5787 {
5788 /* Core address to which the segment is mapped. */
5789 Elf32_Addr addr;
5790 /* VMA recorded in the program header. */
5791 Elf32_Addr p_vaddr;
5792 /* Size of this segment in memory. */
5793 Elf32_Word p_memsz;
5794 };
5795
5796 # if defined PT_GETDSBT
5797 struct target_loadmap
5798 {
5799 /* Protocol version number, must be zero. */
5800 Elf32_Word version;
5801 /* Pointer to the DSBT table, its size, and the DSBT index. */
5802 unsigned *dsbt_table;
5803 unsigned dsbt_size, dsbt_index;
5804 /* Number of segments in this map. */
5805 Elf32_Word nsegs;
5806 /* The actual memory map. */
5807 struct target_loadseg segs[/*nsegs*/];
5808 };
5809 # define LINUX_LOADMAP PT_GETDSBT
5810 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5811 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5812 # else
5813 struct target_loadmap
5814 {
5815 /* Protocol version number, must be zero. */
5816 Elf32_Half version;
5817 /* Number of segments in this map. */
5818 Elf32_Half nsegs;
5819 /* The actual memory map. */
5820 struct target_loadseg segs[/*nsegs*/];
5821 };
5822 # define LINUX_LOADMAP PTRACE_GETFDPIC
5823 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5824 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5825 # endif
5826
5827 static int
5828 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5829 unsigned char *myaddr, unsigned int len)
5830 {
5831 int pid = lwpid_of (current_thread);
5832 int addr = -1;
5833 struct target_loadmap *data = NULL;
5834 unsigned int actual_length, copy_length;
5835
5836 if (strcmp (annex, "exec") == 0)
5837 addr = (int) LINUX_LOADMAP_EXEC;
5838 else if (strcmp (annex, "interp") == 0)
5839 addr = (int) LINUX_LOADMAP_INTERP;
5840 else
5841 return -1;
5842
5843 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5844 return -1;
5845
5846 if (data == NULL)
5847 return -1;
5848
5849 actual_length = sizeof (struct target_loadmap)
5850 + sizeof (struct target_loadseg) * data->nsegs;
5851
5852 if (offset < 0 || offset > actual_length)
5853 return -1;
5854
5855 copy_length = actual_length - offset < len ? actual_length - offset : len;
5856 memcpy (myaddr, (char *) data + offset, copy_length);
5857 return copy_length;
5858 }
5859 #else
5860 # define linux_read_loadmap NULL
5861 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5862
5863 static void
5864 linux_process_qsupported (const char *query)
5865 {
5866 if (the_low_target.process_qsupported != NULL)
5867 the_low_target.process_qsupported (query);
5868 }
5869
5870 static int
5871 linux_supports_tracepoints (void)
5872 {
5873 if (*the_low_target.supports_tracepoints == NULL)
5874 return 0;
5875
5876 return (*the_low_target.supports_tracepoints) ();
5877 }
5878
5879 static CORE_ADDR
5880 linux_read_pc (struct regcache *regcache)
5881 {
5882 if (the_low_target.get_pc == NULL)
5883 return 0;
5884
5885 return (*the_low_target.get_pc) (regcache);
5886 }
5887
5888 static void
5889 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5890 {
5891 gdb_assert (the_low_target.set_pc != NULL);
5892
5893 (*the_low_target.set_pc) (regcache, pc);
5894 }
5895
5896 static int
5897 linux_thread_stopped (struct thread_info *thread)
5898 {
5899 return get_thread_lwp (thread)->stopped;
5900 }
5901
5902 /* This exposes stop-all-threads functionality to other modules. */
5903
5904 static void
5905 linux_pause_all (int freeze)
5906 {
5907 stop_all_lwps (freeze, NULL);
5908 }
5909
5910 /* This exposes unstop-all-threads functionality to other gdbserver
5911 modules. */
5912
5913 static void
5914 linux_unpause_all (int unfreeze)
5915 {
5916 unstop_all_lwps (unfreeze, NULL);
5917 }
5918
5919 static int
5920 linux_prepare_to_access_memory (void)
5921 {
5922 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5923 running LWP. */
5924 if (non_stop)
5925 linux_pause_all (1);
5926 return 0;
5927 }
5928
5929 static void
5930 linux_done_accessing_memory (void)
5931 {
5932 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5933 running LWP. */
5934 if (non_stop)
5935 linux_unpause_all (1);
5936 }
5937
5938 static int
5939 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5940 CORE_ADDR collector,
5941 CORE_ADDR lockaddr,
5942 ULONGEST orig_size,
5943 CORE_ADDR *jump_entry,
5944 CORE_ADDR *trampoline,
5945 ULONGEST *trampoline_size,
5946 unsigned char *jjump_pad_insn,
5947 ULONGEST *jjump_pad_insn_size,
5948 CORE_ADDR *adjusted_insn_addr,
5949 CORE_ADDR *adjusted_insn_addr_end,
5950 char *err)
5951 {
5952 return (*the_low_target.install_fast_tracepoint_jump_pad)
5953 (tpoint, tpaddr, collector, lockaddr, orig_size,
5954 jump_entry, trampoline, trampoline_size,
5955 jjump_pad_insn, jjump_pad_insn_size,
5956 adjusted_insn_addr, adjusted_insn_addr_end,
5957 err);
5958 }
5959
5960 static struct emit_ops *
5961 linux_emit_ops (void)
5962 {
5963 if (the_low_target.emit_ops != NULL)
5964 return (*the_low_target.emit_ops) ();
5965 else
5966 return NULL;
5967 }
5968
5969 static int
5970 linux_get_min_fast_tracepoint_insn_len (void)
5971 {
5972 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5973 }
5974
5975 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5976
5977 static int
5978 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5979 CORE_ADDR *phdr_memaddr, int *num_phdr)
5980 {
5981 char filename[PATH_MAX];
5982 int fd;
5983 const int auxv_size = is_elf64
5984 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5985 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5986
5987 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5988
5989 fd = open (filename, O_RDONLY);
5990 if (fd < 0)
5991 return 1;
5992
5993 *phdr_memaddr = 0;
5994 *num_phdr = 0;
5995 while (read (fd, buf, auxv_size) == auxv_size
5996 && (*phdr_memaddr == 0 || *num_phdr == 0))
5997 {
5998 if (is_elf64)
5999 {
6000 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6001
6002 switch (aux->a_type)
6003 {
6004 case AT_PHDR:
6005 *phdr_memaddr = aux->a_un.a_val;
6006 break;
6007 case AT_PHNUM:
6008 *num_phdr = aux->a_un.a_val;
6009 break;
6010 }
6011 }
6012 else
6013 {
6014 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6015
6016 switch (aux->a_type)
6017 {
6018 case AT_PHDR:
6019 *phdr_memaddr = aux->a_un.a_val;
6020 break;
6021 case AT_PHNUM:
6022 *num_phdr = aux->a_un.a_val;
6023 break;
6024 }
6025 }
6026 }
6027
6028 close (fd);
6029
6030 if (*phdr_memaddr == 0 || *num_phdr == 0)
6031 {
6032 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6033 "phdr_memaddr = %ld, phdr_num = %d",
6034 (long) *phdr_memaddr, *num_phdr);
6035 return 2;
6036 }
6037
6038 return 0;
6039 }
6040
6041 /* Linearly traverse pheaders and look for P_TYPE pheader. */
6042
6043 static const void *
6044 find_phdr (const int is_elf64, const void *const phdr_begin,
6045 const void *const phdr_end, const ULONGEST p_type)
6046 {
6047 #define PHDR_NEXT(hdrp) ((const void *) ((const gdb_byte *) (hdrp) + \
6048 ELFXX_SIZEOF (is_elf64, *hdrp)))
6049
6050 const ElfXX_Phdr *phdr = phdr_begin;
6051
6052 while (PHDR_NEXT (phdr) <= phdr_end)
6053 {
6054 if (ELFXX_FLD (is_elf64, *phdr, p_type) == p_type)
6055 return phdr;
6056 phdr = PHDR_NEXT (phdr);
6057 }
6058
6059 return NULL;
6060 #undef PHDR_NEXT
6061 }
6062
6063 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6064
6065 static CORE_ADDR
6066 get_dynamic (const int pid, const int is_elf64)
6067 {
6068 CORE_ADDR phdr_memaddr, relocation;
6069 int num_phdr;
6070 unsigned char *phdr_buf;
6071 const ElfXX_Phdr *phdr;
6072 const int phdr_size = ELFXX_SIZEOF (is_elf64, *phdr);
6073
6074 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6075 return 0;
6076
6077 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6078 phdr_buf = alloca (num_phdr * phdr_size);
6079
6080 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6081 return 0;
6082
6083 /* Compute relocation: it is expected to be 0 for "regular" executables,
6084 non-zero for PIE ones. */
6085 relocation = -1;
6086 phdr = find_phdr (is_elf64, phdr_buf, phdr_buf + num_phdr * phdr_size,
6087 PT_PHDR);
6088 if (phdr != NULL)
6089 relocation = phdr_memaddr - ELFXX_FLD (is_elf64, *phdr, p_vaddr);
6090 if (relocation == -1)
6091 {
6092 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6093 any real world executables, including PIE executables, have always
6094 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6095 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6096 or present DT_DEBUG anyway (fpc binaries are statically linked).
6097
6098 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6099
6100 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6101
6102 return 0;
6103 }
6104
6105 phdr = find_phdr (is_elf64, phdr_buf, phdr_buf + num_phdr * phdr_size,
6106 PT_DYNAMIC);
6107
6108 if (phdr != NULL)
6109 return ELFXX_FLD (is_elf64, *phdr, p_vaddr) + relocation;
6110
6111 return 0;
6112 }
6113
6114 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6115 can be 0 if the inferior does not yet have the library list initialized.
6116 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6117 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6118
6119 static CORE_ADDR
6120 get_r_debug (const int pid, const int is_elf64)
6121 {
6122 CORE_ADDR dynamic_memaddr;
6123 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6124 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6125 CORE_ADDR map = -1;
6126
6127 dynamic_memaddr = get_dynamic (pid, is_elf64);
6128 if (dynamic_memaddr == 0)
6129 return map;
6130
6131 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6132 {
6133 if (is_elf64)
6134 {
6135 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6136 #ifdef DT_MIPS_RLD_MAP
6137 union
6138 {
6139 Elf64_Xword map;
6140 unsigned char buf[sizeof (Elf64_Xword)];
6141 }
6142 rld_map;
6143
6144 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6145 {
6146 if (linux_read_memory (dyn->d_un.d_val,
6147 rld_map.buf, sizeof (rld_map.buf)) == 0)
6148 return rld_map.map;
6149 else
6150 break;
6151 }
6152 #endif /* DT_MIPS_RLD_MAP */
6153
6154 if (dyn->d_tag == DT_DEBUG && map == -1)
6155 map = dyn->d_un.d_val;
6156
6157 if (dyn->d_tag == DT_NULL)
6158 break;
6159 }
6160 else
6161 {
6162 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6163 #ifdef DT_MIPS_RLD_MAP
6164 union
6165 {
6166 Elf32_Word map;
6167 unsigned char buf[sizeof (Elf32_Word)];
6168 }
6169 rld_map;
6170
6171 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6172 {
6173 if (linux_read_memory (dyn->d_un.d_val,
6174 rld_map.buf, sizeof (rld_map.buf)) == 0)
6175 return rld_map.map;
6176 else
6177 break;
6178 }
6179 #endif /* DT_MIPS_RLD_MAP */
6180
6181 if (dyn->d_tag == DT_DEBUG && map == -1)
6182 map = dyn->d_un.d_val;
6183
6184 if (dyn->d_tag == DT_NULL)
6185 break;
6186 }
6187
6188 dynamic_memaddr += dyn_size;
6189 }
6190
6191 return map;
6192 }
6193
6194 /* Read one pointer from MEMADDR in the inferior. */
6195
6196 static int
6197 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6198 {
6199 int ret;
6200
6201 /* Go through a union so this works on either big or little endian
6202 hosts, when the inferior's pointer size is smaller than the size
6203 of CORE_ADDR. It is assumed the inferior's endianness is the
6204 same of the superior's. */
6205 union
6206 {
6207 CORE_ADDR core_addr;
6208 unsigned int ui;
6209 unsigned char uc;
6210 } addr;
6211
6212 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6213 if (ret == 0)
6214 {
6215 if (ptr_size == sizeof (CORE_ADDR))
6216 *ptr = addr.core_addr;
6217 else if (ptr_size == sizeof (unsigned int))
6218 *ptr = addr.ui;
6219 else
6220 gdb_assert_not_reached ("unhandled pointer size");
6221 }
6222 return ret;
6223 }
6224
6225 struct link_map_offsets
6226 {
6227 /* Offset and size of r_debug.r_version. */
6228 int r_version_offset;
6229
6230 /* Offset and size of r_debug.r_map. */
6231 int r_map_offset;
6232
6233 /* Offset to l_addr field in struct link_map. */
6234 int l_addr_offset;
6235
6236 /* Offset to l_name field in struct link_map. */
6237 int l_name_offset;
6238
6239 /* Offset to l_ld field in struct link_map. */
6240 int l_ld_offset;
6241
6242 /* Offset to l_next field in struct link_map. */
6243 int l_next_offset;
6244
6245 /* Offset to l_prev field in struct link_map. */
6246 int l_prev_offset;
6247 };
6248
6249
6250 /* Structure for holding a mapping. Only mapping
6251 containing l_ld can have hex_build_id set. */
6252
6253 struct mapping_entry
6254 {
6255 /* Fields are populated from linux_find_memory_region parameters. */
6256
6257 ULONGEST vaddr;
6258 ULONGEST size;
6259 ULONGEST offset;
6260 ULONGEST inode;
6261
6262 /* Hex encoded string allocated using xmalloc, and
6263 needs to be freed. It can be NULL. */
6264
6265 char *hex_build_id;
6266 };
6267
6268 typedef struct mapping_entry mapping_entry_s;
6269
6270 DEF_VEC_O(mapping_entry_s);
6271
6272 /* Free vector of mapping_entry_s objects. */
6273
6274 static void
6275 free_mapping_entry_vec (VEC (mapping_entry_s) *lst)
6276 {
6277 int ix;
6278 mapping_entry_s *p;
6279
6280 for (ix = 0; VEC_iterate (mapping_entry_s, lst, ix, p); ++ix)
6281 xfree (p->hex_build_id);
6282
6283 VEC_free (mapping_entry_s, lst);
6284 }
6285
6286 /* Used for finding a mapping containing the given
6287 l_ld passed in K. */
6288
6289 static int
6290 compare_mapping_entry_range (const void *const k, const void *const b)
6291 {
6292 const ULONGEST key = *(const CORE_ADDR *) k;
6293 const mapping_entry_s *const p = b;
6294
6295 if (key < p->vaddr)
6296 return -1;
6297
6298 if (key < p->vaddr + p->size)
6299 return 0;
6300
6301 return 1;
6302 }
6303
6304 struct find_memory_region_callback_data
6305 {
6306 unsigned is_elf64;
6307
6308 /* Return. Must be freed with free_mapping_entry_vec. */
6309 VEC (mapping_entry_s) *list;
6310 };
6311
6312 /* Read build-id from PT_NOTE.
6313 Argument LOAD_ADDR represents run time virtual address corresponding to
6314 the beginning of the first loadable segment. L_ADDR is displacement
6315 as supplied by the dynamic linker. */
6316
6317 static void
6318 read_build_id (struct find_memory_region_callback_data *const p,
6319 mapping_entry_s *const bil, const CORE_ADDR load_addr,
6320 const CORE_ADDR l_addr)
6321 {
6322 const int is_elf64 = p->is_elf64;
6323 ElfXX_Ehdr ehdr;
6324
6325 if (linux_read_memory (load_addr, (unsigned char *) &ehdr,
6326 ELFXX_SIZEOF (is_elf64, ehdr)) == 0
6327 && ELFXX_FLD (is_elf64, ehdr, e_ident[EI_MAG0]) == ELFMAG0
6328 && ELFXX_FLD (is_elf64, ehdr, e_ident[EI_MAG1]) == ELFMAG1
6329 && ELFXX_FLD (is_elf64, ehdr, e_ident[EI_MAG2]) == ELFMAG2
6330 && ELFXX_FLD (is_elf64, ehdr, e_ident[EI_MAG3]) == ELFMAG3)
6331 {
6332 const ElfXX_Phdr *phdr;
6333 void *phdr_buf;
6334 const unsigned e_phentsize = ELFXX_FLD (is_elf64, ehdr, e_phentsize);
6335
6336 if (ELFXX_FLD (is_elf64, ehdr, e_phnum) >= 100
6337 || e_phentsize != ELFXX_SIZEOF (is_elf64, *phdr))
6338 {
6339 /* Basic sanity check failed. */
6340 warning (_("Could not identify program header at %s."),
6341 paddress (load_addr));
6342 return;
6343 }
6344
6345 phdr_buf = alloca (ELFXX_FLD (is_elf64, ehdr, e_phnum) * e_phentsize);
6346
6347 if (linux_read_memory (load_addr + ELFXX_FLD (is_elf64, ehdr, e_phoff),
6348 phdr_buf,
6349 ELFXX_FLD (is_elf64, ehdr, e_phnum) * e_phentsize)
6350 != 0)
6351 {
6352 warning (_("Could not read program header at %s."),
6353 paddress (load_addr));
6354 return;
6355 }
6356
6357 phdr = phdr_buf;
6358
6359 for (;;)
6360 {
6361 gdb_byte *pt_note;
6362 const gdb_byte *pt_end;
6363 const ElfXX_Nhdr *nhdr;
6364 CORE_ADDR note_addr;
6365
6366 phdr = find_phdr (p->is_elf64, phdr, (gdb_byte *) phdr_buf
6367 + ELFXX_FLD (is_elf64, ehdr, e_phnum) * e_phentsize,
6368 PT_NOTE);
6369 if (phdr == NULL)
6370 break;
6371 pt_note = xmalloc (ELFXX_FLD (is_elf64, *phdr, p_memsz));
6372 note_addr = ELFXX_FLD (is_elf64, *phdr, p_vaddr) + l_addr;
6373 if (linux_read_memory (note_addr, pt_note,
6374 ELFXX_FLD (is_elf64, *phdr, p_memsz)) != 0)
6375 {
6376 xfree (pt_note);
6377 warning (_("Could not read note at address 0x%s"),
6378 paddress (note_addr));
6379 break;
6380 }
6381
6382 pt_end = pt_note + ELFXX_FLD (is_elf64, *phdr, p_memsz);
6383 nhdr = (void *) pt_note;
6384 while ((const gdb_byte *) nhdr < pt_end)
6385 {
6386 const size_t namesz
6387 = ELFXX_ROUNDUP_4 (is_elf64, ELFXX_FLD (is_elf64, *nhdr,
6388 n_namesz));
6389 const size_t descsz
6390 = ELFXX_ROUNDUP_4 (is_elf64, ELFXX_FLD (is_elf64, *nhdr,
6391 n_descsz));
6392 const size_t note_sz = (ELFXX_SIZEOF (is_elf64, *nhdr) + namesz
6393 + descsz);
6394
6395 if (((const gdb_byte *) nhdr + note_sz) > pt_end || note_sz == 0
6396 || descsz == 0)
6397 {
6398 warning (_("Malformed PT_NOTE at address 0x%s\n"),
6399 paddress (note_addr + (gdb_byte *) nhdr - pt_note));
6400 break;
6401 }
6402 if (ELFXX_FLD (is_elf64, *nhdr, n_type) == NT_GNU_BUILD_ID
6403 && ELFXX_FLD (is_elf64, *nhdr, n_namesz) == 4)
6404 {
6405 const char gnu[4] = "GNU\0";
6406 const char *const pname
6407 = (char *) nhdr + ELFXX_SIZEOF (is_elf64, *nhdr);
6408
6409 if (memcmp (pname, gnu, 4) == 0)
6410 {
6411 const size_t n_descsz = ELFXX_FLD (is_elf64, *nhdr,
6412 n_descsz);
6413
6414 bil->hex_build_id = xmalloc (n_descsz * 2 + 1);
6415 bin2hex ((const gdb_byte *) pname + namesz,
6416 bil->hex_build_id, n_descsz);
6417 xfree (pt_note);
6418 return;
6419 }
6420 }
6421 nhdr = (void *) ((gdb_byte *) nhdr + note_sz);
6422 }
6423 xfree (pt_note);
6424 }
6425 }
6426 }
6427
6428 static linux_find_memory_region_ftype find_memory_region_callback;
6429
6430 /* Add mapping_entry. See linux_find_memory_ftype for the parameters
6431 description. */
6432
6433 static int
6434 find_memory_region_callback (ULONGEST vaddr, ULONGEST size, ULONGEST offset,
6435 ULONGEST inode, int read, int write, int exec,
6436 int modified, const char *filename, void *data)
6437 {
6438 if (inode != 0)
6439 {
6440 struct find_memory_region_callback_data *const p = data;
6441 mapping_entry_s bil;
6442
6443 bil.vaddr = vaddr;
6444 bil.size = size;
6445 bil.offset = offset;
6446 bil.inode = inode;
6447 bil.hex_build_id = NULL;
6448
6449 VEC_safe_push (mapping_entry_s, p->list, &bil);
6450 }
6451
6452 /* Continue the traversal. */
6453 return 0;
6454 }
6455
6456 /* Linear reverse find starting from RBEGIN towards REND looking for
6457 the lowest vaddr mapping of the same inode and zero offset. */
6458
6459 static mapping_entry_s *
6460 lrfind_mapping_entry (mapping_entry_s *const rbegin,
6461 const mapping_entry_s *const rend)
6462 {
6463 mapping_entry_s *p;
6464
6465 for (p = rbegin - 1; p >= rend; --p)
6466 if (p->offset == 0 && p->inode == rbegin->inode)
6467 return p;
6468
6469 return NULL;
6470 }
6471
6472 /* Get build-id for the given L_LD, where L_LD corresponds to
6473 link_map.l_ld as specified by the dynamic linker.
6474 DATA must point to already filled list of mapping_entry elements.
6475
6476 If build-id had not been read, read it and cache in corresponding
6477 list element.
6478
6479 Return build_id as stored in the list element corresponding
6480 to L_LD.
6481
6482 NULL may be returned if build-id could not be fetched.
6483
6484 Returned string must not be freed explicitly. */
6485
6486 static const char *
6487 get_hex_build_id (const CORE_ADDR l_addr, const CORE_ADDR l_ld,
6488 struct find_memory_region_callback_data *const data)
6489 {
6490 mapping_entry_s *bil;
6491
6492 bil = bsearch (&l_ld, VEC_address (mapping_entry_s, data->list),
6493 VEC_length (mapping_entry_s, data->list),
6494 sizeof (mapping_entry_s), compare_mapping_entry_range);
6495
6496 if (bil == NULL)
6497 return NULL;
6498
6499 if (bil->hex_build_id == NULL)
6500 {
6501 mapping_entry_s *bil_min;
6502
6503 bil_min = lrfind_mapping_entry (bil, VEC_address (mapping_entry_s,
6504 data->list));
6505 if (bil_min != NULL)
6506 read_build_id (data, bil, bil_min->vaddr, l_addr);
6507 else
6508 {
6509 /* Do not try to find hex_build_id again. */
6510 bil->hex_build_id = xstrdup (BUILD_ID_INVALID);
6511 warning (_("Could not determine load address; mapping entry with "
6512 "offset 0 corresponding to l_ld = 0x%s could not be "
6513 "found; build-id can not be used."),
6514 paddress (l_ld));
6515 }
6516 }
6517
6518 return bil->hex_build_id;
6519 }
6520
6521 /* Construct qXfer:libraries-svr4:read reply. */
6522
6523 static int
6524 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6525 unsigned const char *writebuf,
6526 CORE_ADDR offset, int len)
6527 {
6528 char *document;
6529 unsigned document_len;
6530 struct process_info_private *const priv = current_process ()->priv;
6531 char filename[PATH_MAX];
6532 int pid, is_elf64;
6533 struct find_memory_region_callback_data data;
6534
6535 /* COREFILTER_ANON_PRIVATE and COREFILTER_ANON_SHARED do not have an
6536 associated file so it is not expected it could have an ELF header. */
6537 const enum filterflags filterflags = (COREFILTER_MAPPED_PRIVATE
6538 | COREFILTER_MAPPED_SHARED
6539 | COREFILTER_ELF_HEADERS
6540 | COREFILTER_HUGETLB_PRIVATE
6541 | COREFILTER_HUGETLB_SHARED);
6542
6543 static const struct link_map_offsets lmo_32bit_offsets =
6544 {
6545 0, /* r_version offset. */
6546 4, /* r_debug.r_map offset. */
6547 0, /* l_addr offset in link_map. */
6548 4, /* l_name offset in link_map. */
6549 8, /* l_ld offset in link_map. */
6550 12, /* l_next offset in link_map. */
6551 16 /* l_prev offset in link_map. */
6552 };
6553
6554 static const struct link_map_offsets lmo_64bit_offsets =
6555 {
6556 0, /* r_version offset. */
6557 8, /* r_debug.r_map offset. */
6558 0, /* l_addr offset in link_map. */
6559 8, /* l_name offset in link_map. */
6560 16, /* l_ld offset in link_map. */
6561 24, /* l_next offset in link_map. */
6562 32 /* l_prev offset in link_map. */
6563 };
6564 const struct link_map_offsets *lmo;
6565 unsigned int machine;
6566 int ptr_size;
6567 CORE_ADDR lm_addr = 0, lm_prev = 0;
6568 int allocated = 1024;
6569 char *p;
6570 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6571 int header_done = 0;
6572
6573 if (writebuf != NULL)
6574 return -2;
6575 if (readbuf == NULL)
6576 return -1;
6577
6578 pid = lwpid_of (current_thread);
6579 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6580 is_elf64 = elf_64_file_p (filename, &machine);
6581 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6582 ptr_size = is_elf64 ? 8 : 4;
6583
6584 data.is_elf64 = is_elf64;
6585 data.list = NULL;
6586 VEC_reserve (mapping_entry_s, data.list, 16);
6587 if (linux_find_memory_regions_full (pid, filterflags,
6588 find_memory_region_callback, &data)
6589 < 0)
6590 warning (_("Finding memory regions failed"));
6591
6592 while (annex[0] != '\0')
6593 {
6594 const char *sep;
6595 CORE_ADDR *addrp;
6596 int len;
6597
6598 sep = strchr (annex, '=');
6599 if (sep == NULL)
6600 break;
6601
6602 len = sep - annex;
6603 if (len == 5 && startswith (annex, "start"))
6604 addrp = &lm_addr;
6605 else if (len == 4 && startswith (annex, "prev"))
6606 addrp = &lm_prev;
6607 else
6608 {
6609 annex = strchr (sep, ';');
6610 if (annex == NULL)
6611 break;
6612 annex++;
6613 continue;
6614 }
6615
6616 annex = decode_address_to_semicolon (addrp, sep + 1);
6617 }
6618
6619 if (lm_addr == 0)
6620 {
6621 int r_version = 0;
6622
6623 if (priv->r_debug == 0)
6624 priv->r_debug = get_r_debug (pid, is_elf64);
6625
6626 /* We failed to find DT_DEBUG. Such situation will not change
6627 for this inferior - do not retry it. Report it to GDB as
6628 E01, see for the reasons at the GDB solib-svr4.c side. */
6629 if (priv->r_debug == (CORE_ADDR) -1)
6630 return -1;
6631
6632 if (priv->r_debug != 0)
6633 {
6634 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6635 (unsigned char *) &r_version,
6636 sizeof (r_version)) != 0
6637 || r_version != 1)
6638 {
6639 warning ("unexpected r_debug version %d", r_version);
6640 }
6641 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6642 &lm_addr, ptr_size) != 0)
6643 {
6644 warning ("unable to read r_map from 0x%lx",
6645 (long) priv->r_debug + lmo->r_map_offset);
6646 }
6647 }
6648 }
6649
6650 document = xmalloc (allocated);
6651 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6652 p = document + strlen (document);
6653
6654 while (lm_addr
6655 && read_one_ptr (lm_addr + lmo->l_name_offset,
6656 &l_name, ptr_size) == 0
6657 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6658 &l_addr, ptr_size) == 0
6659 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6660 &l_ld, ptr_size) == 0
6661 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6662 &l_prev, ptr_size) == 0
6663 && read_one_ptr (lm_addr + lmo->l_next_offset,
6664 &l_next, ptr_size) == 0)
6665 {
6666 unsigned char libname[PATH_MAX];
6667
6668 if (lm_prev != l_prev)
6669 {
6670 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6671 (long) lm_prev, (long) l_prev);
6672 break;
6673 }
6674
6675 /* Ignore the first entry even if it has valid name as the first entry
6676 corresponds to the main executable. The first entry should not be
6677 skipped if the dynamic loader was loaded late by a static executable
6678 (see solib-svr4.c parameter ignore_first). But in such case the main
6679 executable does not have PT_DYNAMIC present and this function already
6680 exited above due to failed get_r_debug. */
6681 if (lm_prev == 0)
6682 {
6683 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6684 p = p + strlen (p);
6685 }
6686 else
6687 {
6688 /* Not checking for error because reading may stop before
6689 we've got PATH_MAX worth of characters. */
6690 libname[0] = '\0';
6691 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6692 libname[sizeof (libname) - 1] = '\0';
6693 if (libname[0] != '\0')
6694 {
6695 /* 6x the size for xml_escape_text below. */
6696 size_t len = 6 * strlen ((char *) libname);
6697 char *name;
6698 const char *hex_enc_build_id = NULL;
6699
6700 if (!header_done)
6701 {
6702 /* Terminate `<library-list-svr4'. */
6703 *p++ = '>';
6704 header_done = 1;
6705 }
6706
6707 hex_enc_build_id = get_hex_build_id (l_addr, l_ld, &data);
6708
6709 while (allocated < (p - document + len + 200
6710 + (hex_enc_build_id != NULL
6711 ? strlen (hex_enc_build_id) : 0)))
6712 {
6713 /* Expand to guarantee sufficient storage. */
6714 uintptr_t document_len = p - document;
6715
6716 document = xrealloc (document, 2 * allocated);
6717 allocated *= 2;
6718 p = document + document_len;
6719 }
6720
6721 name = xml_escape_text ((char *) libname);
6722 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6723 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"",
6724 name, (unsigned long) lm_addr,
6725 (unsigned long) l_addr, (unsigned long) l_ld);
6726 if (hex_enc_build_id != NULL
6727 && strcmp (hex_enc_build_id, BUILD_ID_INVALID) != 0)
6728 p += sprintf (p, " build-id=\"%s\"", hex_enc_build_id);
6729 p += sprintf (p, "/>");
6730 free (name);
6731 }
6732 }
6733
6734 lm_prev = lm_addr;
6735 lm_addr = l_next;
6736 }
6737
6738 if (!header_done)
6739 {
6740 /* Empty list; terminate `<library-list-svr4'. */
6741 strcpy (p, "/>");
6742 }
6743 else
6744 strcpy (p, "</library-list-svr4>");
6745
6746 document_len = strlen (document);
6747 if (offset < document_len)
6748 document_len -= offset;
6749 else
6750 document_len = 0;
6751 if (len > document_len)
6752 len = document_len;
6753
6754 memcpy (readbuf, document + offset, len);
6755 xfree (document);
6756 free_mapping_entry_vec (data.list);
6757
6758 return len;
6759 }
6760
6761 #ifdef HAVE_LINUX_BTRACE
6762
6763 /* See to_enable_btrace target method. */
6764
6765 static struct btrace_target_info *
6766 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6767 {
6768 struct btrace_target_info *tinfo;
6769
6770 tinfo = linux_enable_btrace (ptid, conf);
6771
6772 if (tinfo != NULL && tinfo->ptr_bits == 0)
6773 {
6774 struct thread_info *thread = find_thread_ptid (ptid);
6775 struct regcache *regcache = get_thread_regcache (thread, 0);
6776
6777 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6778 }
6779
6780 return tinfo;
6781 }
6782
6783 /* See to_disable_btrace target method. */
6784
6785 static int
6786 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6787 {
6788 enum btrace_error err;
6789
6790 err = linux_disable_btrace (tinfo);
6791 return (err == BTRACE_ERR_NONE ? 0 : -1);
6792 }
6793
6794 /* Encode an Intel(R) Processor Trace configuration. */
6795
6796 static void
6797 linux_low_encode_pt_config (struct buffer *buffer,
6798 const struct btrace_data_pt_config *config)
6799 {
6800 buffer_grow_str (buffer, "<pt-config>\n");
6801
6802 switch (config->cpu.vendor)
6803 {
6804 case CV_INTEL:
6805 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6806 "model=\"%u\" stepping=\"%u\"/>\n",
6807 config->cpu.family, config->cpu.model,
6808 config->cpu.stepping);
6809 break;
6810
6811 default:
6812 break;
6813 }
6814
6815 buffer_grow_str (buffer, "</pt-config>\n");
6816 }
6817
6818 /* Encode a raw buffer. */
6819
6820 static void
6821 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6822 unsigned int size)
6823 {
6824 if (size == 0)
6825 return;
6826
6827 /* We use hex encoding - see common/rsp-low.h. */
6828 buffer_grow_str (buffer, "<raw>\n");
6829
6830 while (size-- > 0)
6831 {
6832 char elem[2];
6833
6834 elem[0] = tohex ((*data >> 4) & 0xf);
6835 elem[1] = tohex (*data++ & 0xf);
6836
6837 buffer_grow (buffer, elem, 2);
6838 }
6839
6840 buffer_grow_str (buffer, "</raw>\n");
6841 }
6842
6843 /* See to_read_btrace target method. */
6844
6845 static int
6846 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6847 int type)
6848 {
6849 struct btrace_data btrace;
6850 struct btrace_block *block;
6851 enum btrace_error err;
6852 int i;
6853
6854 btrace_data_init (&btrace);
6855
6856 err = linux_read_btrace (&btrace, tinfo, type);
6857 if (err != BTRACE_ERR_NONE)
6858 {
6859 if (err == BTRACE_ERR_OVERFLOW)
6860 buffer_grow_str0 (buffer, "E.Overflow.");
6861 else
6862 buffer_grow_str0 (buffer, "E.Generic Error.");
6863
6864 goto err;
6865 }
6866
6867 switch (btrace.format)
6868 {
6869 case BTRACE_FORMAT_NONE:
6870 buffer_grow_str0 (buffer, "E.No Trace.");
6871 goto err;
6872
6873 case BTRACE_FORMAT_BTS:
6874 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6875 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6876
6877 for (i = 0;
6878 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6879 i++)
6880 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6881 paddress (block->begin), paddress (block->end));
6882
6883 buffer_grow_str0 (buffer, "</btrace>\n");
6884 break;
6885
6886 case BTRACE_FORMAT_PT:
6887 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6888 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6889 buffer_grow_str (buffer, "<pt>\n");
6890
6891 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6892
6893 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6894 btrace.variant.pt.size);
6895
6896 buffer_grow_str (buffer, "</pt>\n");
6897 buffer_grow_str0 (buffer, "</btrace>\n");
6898 break;
6899
6900 default:
6901 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6902 goto err;
6903 }
6904
6905 btrace_data_fini (&btrace);
6906 return 0;
6907
6908 err:
6909 btrace_data_fini (&btrace);
6910 return -1;
6911 }
6912
6913 /* See to_btrace_conf target method. */
6914
6915 static int
6916 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6917 struct buffer *buffer)
6918 {
6919 const struct btrace_config *conf;
6920
6921 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6922 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6923
6924 conf = linux_btrace_conf (tinfo);
6925 if (conf != NULL)
6926 {
6927 switch (conf->format)
6928 {
6929 case BTRACE_FORMAT_NONE:
6930 break;
6931
6932 case BTRACE_FORMAT_BTS:
6933 buffer_xml_printf (buffer, "<bts");
6934 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6935 buffer_xml_printf (buffer, " />\n");
6936 break;
6937
6938 case BTRACE_FORMAT_PT:
6939 buffer_xml_printf (buffer, "<pt");
6940 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6941 buffer_xml_printf (buffer, "/>\n");
6942 break;
6943 }
6944 }
6945
6946 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6947 return 0;
6948 }
6949 #endif /* HAVE_LINUX_BTRACE */
6950
6951 /* See nat/linux-nat.h. */
6952
6953 ptid_t
6954 current_lwp_ptid (void)
6955 {
6956 return ptid_of (current_thread);
6957 }
6958
6959 static struct target_ops linux_target_ops = {
6960 linux_create_inferior,
6961 linux_attach,
6962 linux_kill,
6963 linux_detach,
6964 linux_mourn,
6965 linux_join,
6966 linux_thread_alive,
6967 linux_resume,
6968 linux_wait,
6969 linux_fetch_registers,
6970 linux_store_registers,
6971 linux_prepare_to_access_memory,
6972 linux_done_accessing_memory,
6973 linux_read_memory,
6974 linux_write_memory,
6975 linux_look_up_symbols,
6976 linux_request_interrupt,
6977 linux_read_auxv,
6978 linux_supports_z_point_type,
6979 linux_insert_point,
6980 linux_remove_point,
6981 linux_stopped_by_sw_breakpoint,
6982 linux_supports_stopped_by_sw_breakpoint,
6983 linux_stopped_by_hw_breakpoint,
6984 linux_supports_stopped_by_hw_breakpoint,
6985 linux_supports_conditional_breakpoints,
6986 linux_stopped_by_watchpoint,
6987 linux_stopped_data_address,
6988 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6989 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6990 && defined(PT_TEXT_END_ADDR)
6991 linux_read_offsets,
6992 #else
6993 NULL,
6994 #endif
6995 #ifdef USE_THREAD_DB
6996 thread_db_get_tls_address,
6997 #else
6998 NULL,
6999 #endif
7000 linux_qxfer_spu,
7001 hostio_last_error_from_errno,
7002 linux_qxfer_osdata,
7003 linux_xfer_siginfo,
7004 linux_supports_non_stop,
7005 linux_async,
7006 linux_start_non_stop,
7007 linux_supports_multi_process,
7008 linux_supports_fork_events,
7009 linux_supports_vfork_events,
7010 linux_handle_new_gdb_connection,
7011 #ifdef USE_THREAD_DB
7012 thread_db_handle_monitor_command,
7013 #else
7014 NULL,
7015 #endif
7016 linux_common_core_of_thread,
7017 linux_read_loadmap,
7018 linux_process_qsupported,
7019 linux_supports_tracepoints,
7020 linux_read_pc,
7021 linux_write_pc,
7022 linux_thread_stopped,
7023 NULL,
7024 linux_pause_all,
7025 linux_unpause_all,
7026 linux_stabilize_threads,
7027 linux_install_fast_tracepoint_jump_pad,
7028 linux_emit_ops,
7029 linux_supports_disable_randomization,
7030 linux_get_min_fast_tracepoint_insn_len,
7031 linux_qxfer_libraries_svr4,
7032 linux_supports_agent,
7033 #ifdef HAVE_LINUX_BTRACE
7034 linux_supports_btrace,
7035 linux_low_enable_btrace,
7036 linux_low_disable_btrace,
7037 linux_low_read_btrace,
7038 linux_low_btrace_conf,
7039 #else
7040 NULL,
7041 NULL,
7042 NULL,
7043 NULL,
7044 NULL,
7045 #endif
7046 linux_supports_range_stepping,
7047 linux_proc_pid_to_exec_file,
7048 linux_mntns_open_cloexec,
7049 linux_mntns_unlink,
7050 linux_mntns_readlink,
7051 };
7052
7053 static void
7054 linux_init_signals ()
7055 {
7056 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
7057 to find what the cancel signal actually is. */
7058 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
7059 signal (__SIGRTMIN+1, SIG_IGN);
7060 #endif
7061 }
7062
7063 #ifdef HAVE_LINUX_REGSETS
7064 void
7065 initialize_regsets_info (struct regsets_info *info)
7066 {
7067 for (info->num_regsets = 0;
7068 info->regsets[info->num_regsets].size >= 0;
7069 info->num_regsets++)
7070 ;
7071 }
7072 #endif
7073
7074 void
7075 initialize_low (void)
7076 {
7077 struct sigaction sigchld_action;
7078 memset (&sigchld_action, 0, sizeof (sigchld_action));
7079 set_target_ops (&linux_target_ops);
7080 set_breakpoint_data (the_low_target.breakpoint,
7081 the_low_target.breakpoint_len);
7082 linux_init_signals ();
7083 linux_ptrace_init_warnings ();
7084
7085 sigchld_action.sa_handler = sigchld_handler;
7086 sigemptyset (&sigchld_action.sa_mask);
7087 sigchld_action.sa_flags = SA_RESTART;
7088 sigaction (SIGCHLD, &sigchld_action, NULL);
7089
7090 initialize_low_arch ();
7091
7092 linux_check_ptrace_features ();
7093 }
This page took 0.251969 seconds and 5 git commands to generate.