gdbserver: fix killed-outside.exp
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23 #include "tdesc.h"
24 #include "rsp-low.h"
25
26 #include "nat/linux-nat.h"
27 #include "nat/linux-waitpid.h"
28 #include "gdb_wait.h"
29 #include "nat/gdb_ptrace.h"
30 #include "nat/linux-ptrace.h"
31 #include "nat/linux-procfs.h"
32 #include "nat/linux-personality.h"
33 #include <signal.h>
34 #include <sys/ioctl.h>
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <sys/syscall.h>
38 #include <sched.h>
39 #include <ctype.h>
40 #include <pwd.h>
41 #include <sys/types.h>
42 #include <dirent.h>
43 #include <sys/stat.h>
44 #include <sys/vfs.h>
45 #include <sys/uio.h>
46 #include "filestuff.h"
47 #include "tracepoint.h"
48 #include "hostio.h"
49 #ifndef ELFMAG0
50 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
51 then ELFMAG0 will have been defined. If it didn't get included by
52 gdb_proc_service.h then including it will likely introduce a duplicate
53 definition of elf_fpregset_t. */
54 #include <elf.h>
55 #endif
56 #include "nat/linux-namespaces.h"
57
58 #ifndef SPUFS_MAGIC
59 #define SPUFS_MAGIC 0x23c9b64e
60 #endif
61
62 #ifdef HAVE_PERSONALITY
63 # include <sys/personality.h>
64 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
65 # define ADDR_NO_RANDOMIZE 0x0040000
66 # endif
67 #endif
68
69 #ifndef O_LARGEFILE
70 #define O_LARGEFILE 0
71 #endif
72
73 /* Some targets did not define these ptrace constants from the start,
74 so gdbserver defines them locally here. In the future, these may
75 be removed after they are added to asm/ptrace.h. */
76 #if !(defined(PT_TEXT_ADDR) \
77 || defined(PT_DATA_ADDR) \
78 || defined(PT_TEXT_END_ADDR))
79 #if defined(__mcoldfire__)
80 /* These are still undefined in 3.10 kernels. */
81 #define PT_TEXT_ADDR 49*4
82 #define PT_DATA_ADDR 50*4
83 #define PT_TEXT_END_ADDR 51*4
84 /* BFIN already defines these since at least 2.6.32 kernels. */
85 #elif defined(BFIN)
86 #define PT_TEXT_ADDR 220
87 #define PT_TEXT_END_ADDR 224
88 #define PT_DATA_ADDR 228
89 /* These are still undefined in 3.10 kernels. */
90 #elif defined(__TMS320C6X__)
91 #define PT_TEXT_ADDR (0x10000*4)
92 #define PT_DATA_ADDR (0x10004*4)
93 #define PT_TEXT_END_ADDR (0x10008*4)
94 #endif
95 #endif
96
97 #ifdef HAVE_LINUX_BTRACE
98 # include "nat/linux-btrace.h"
99 # include "btrace-common.h"
100 #endif
101
102 #ifndef HAVE_ELF32_AUXV_T
103 /* Copied from glibc's elf.h. */
104 typedef struct
105 {
106 uint32_t a_type; /* Entry type */
107 union
108 {
109 uint32_t a_val; /* Integer value */
110 /* We use to have pointer elements added here. We cannot do that,
111 though, since it does not work when using 32-bit definitions
112 on 64-bit platforms and vice versa. */
113 } a_un;
114 } Elf32_auxv_t;
115 #endif
116
117 #ifndef HAVE_ELF64_AUXV_T
118 /* Copied from glibc's elf.h. */
119 typedef struct
120 {
121 uint64_t a_type; /* Entry type */
122 union
123 {
124 uint64_t a_val; /* Integer value */
125 /* We use to have pointer elements added here. We cannot do that,
126 though, since it does not work when using 32-bit definitions
127 on 64-bit platforms and vice versa. */
128 } a_un;
129 } Elf64_auxv_t;
130 #endif
131
132 /* Does the current host support PTRACE_GETREGSET? */
133 int have_ptrace_getregset = -1;
134
135 /* LWP accessors. */
136
137 /* See nat/linux-nat.h. */
138
139 ptid_t
140 ptid_of_lwp (struct lwp_info *lwp)
141 {
142 return ptid_of (get_lwp_thread (lwp));
143 }
144
145 /* See nat/linux-nat.h. */
146
147 void
148 lwp_set_arch_private_info (struct lwp_info *lwp,
149 struct arch_lwp_info *info)
150 {
151 lwp->arch_private = info;
152 }
153
154 /* See nat/linux-nat.h. */
155
156 struct arch_lwp_info *
157 lwp_arch_private_info (struct lwp_info *lwp)
158 {
159 return lwp->arch_private;
160 }
161
162 /* See nat/linux-nat.h. */
163
164 int
165 lwp_is_stopped (struct lwp_info *lwp)
166 {
167 return lwp->stopped;
168 }
169
170 /* See nat/linux-nat.h. */
171
172 enum target_stop_reason
173 lwp_stop_reason (struct lwp_info *lwp)
174 {
175 return lwp->stop_reason;
176 }
177
178 /* A list of all unknown processes which receive stop signals. Some
179 other process will presumably claim each of these as forked
180 children momentarily. */
181
182 struct simple_pid_list
183 {
184 /* The process ID. */
185 int pid;
186
187 /* The status as reported by waitpid. */
188 int status;
189
190 /* Next in chain. */
191 struct simple_pid_list *next;
192 };
193 struct simple_pid_list *stopped_pids;
194
195 /* Trivial list manipulation functions to keep track of a list of new
196 stopped processes. */
197
198 static void
199 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
200 {
201 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
202
203 new_pid->pid = pid;
204 new_pid->status = status;
205 new_pid->next = *listp;
206 *listp = new_pid;
207 }
208
209 static int
210 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
211 {
212 struct simple_pid_list **p;
213
214 for (p = listp; *p != NULL; p = &(*p)->next)
215 if ((*p)->pid == pid)
216 {
217 struct simple_pid_list *next = (*p)->next;
218
219 *statusp = (*p)->status;
220 xfree (*p);
221 *p = next;
222 return 1;
223 }
224 return 0;
225 }
226
227 enum stopping_threads_kind
228 {
229 /* Not stopping threads presently. */
230 NOT_STOPPING_THREADS,
231
232 /* Stopping threads. */
233 STOPPING_THREADS,
234
235 /* Stopping and suspending threads. */
236 STOPPING_AND_SUSPENDING_THREADS
237 };
238
239 /* This is set while stop_all_lwps is in effect. */
240 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
241
242 /* FIXME make into a target method? */
243 int using_threads = 1;
244
245 /* True if we're presently stabilizing threads (moving them out of
246 jump pads). */
247 static int stabilizing_threads;
248
249 static void linux_resume_one_lwp (struct lwp_info *lwp,
250 int step, int signal, siginfo_t *info);
251 static void linux_resume (struct thread_resume *resume_info, size_t n);
252 static void stop_all_lwps (int suspend, struct lwp_info *except);
253 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
254 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
255 int *wstat, int options);
256 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
257 static struct lwp_info *add_lwp (ptid_t ptid);
258 static void linux_mourn (struct process_info *process);
259 static int linux_stopped_by_watchpoint (void);
260 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
261 static int lwp_is_marked_dead (struct lwp_info *lwp);
262 static void proceed_all_lwps (void);
263 static int finish_step_over (struct lwp_info *lwp);
264 static int kill_lwp (unsigned long lwpid, int signo);
265 static void enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info);
266 static void complete_ongoing_step_over (void);
267
268 /* When the event-loop is doing a step-over, this points at the thread
269 being stepped. */
270 ptid_t step_over_bkpt;
271
272 /* True if the low target can hardware single-step. Such targets
273 don't need a BREAKPOINT_REINSERT_ADDR callback. */
274
275 static int
276 can_hardware_single_step (void)
277 {
278 return (the_low_target.breakpoint_reinsert_addr == NULL);
279 }
280
281 /* True if the low target supports memory breakpoints. If so, we'll
282 have a GET_PC implementation. */
283
284 static int
285 supports_breakpoints (void)
286 {
287 return (the_low_target.get_pc != NULL);
288 }
289
290 /* Returns true if this target can support fast tracepoints. This
291 does not mean that the in-process agent has been loaded in the
292 inferior. */
293
294 static int
295 supports_fast_tracepoints (void)
296 {
297 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
298 }
299
300 /* True if LWP is stopped in its stepping range. */
301
302 static int
303 lwp_in_step_range (struct lwp_info *lwp)
304 {
305 CORE_ADDR pc = lwp->stop_pc;
306
307 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
308 }
309
310 struct pending_signals
311 {
312 int signal;
313 siginfo_t info;
314 struct pending_signals *prev;
315 };
316
317 /* The read/write ends of the pipe registered as waitable file in the
318 event loop. */
319 static int linux_event_pipe[2] = { -1, -1 };
320
321 /* True if we're currently in async mode. */
322 #define target_is_async_p() (linux_event_pipe[0] != -1)
323
324 static void send_sigstop (struct lwp_info *lwp);
325 static void wait_for_sigstop (void);
326
327 /* Return non-zero if HEADER is a 64-bit ELF file. */
328
329 static int
330 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
331 {
332 if (header->e_ident[EI_MAG0] == ELFMAG0
333 && header->e_ident[EI_MAG1] == ELFMAG1
334 && header->e_ident[EI_MAG2] == ELFMAG2
335 && header->e_ident[EI_MAG3] == ELFMAG3)
336 {
337 *machine = header->e_machine;
338 return header->e_ident[EI_CLASS] == ELFCLASS64;
339
340 }
341 *machine = EM_NONE;
342 return -1;
343 }
344
345 /* Return non-zero if FILE is a 64-bit ELF file,
346 zero if the file is not a 64-bit ELF file,
347 and -1 if the file is not accessible or doesn't exist. */
348
349 static int
350 elf_64_file_p (const char *file, unsigned int *machine)
351 {
352 Elf64_Ehdr header;
353 int fd;
354
355 fd = open (file, O_RDONLY);
356 if (fd < 0)
357 return -1;
358
359 if (read (fd, &header, sizeof (header)) != sizeof (header))
360 {
361 close (fd);
362 return 0;
363 }
364 close (fd);
365
366 return elf_64_header_p (&header, machine);
367 }
368
369 /* Accepts an integer PID; Returns true if the executable PID is
370 running is a 64-bit ELF file.. */
371
372 int
373 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
374 {
375 char file[PATH_MAX];
376
377 sprintf (file, "/proc/%d/exe", pid);
378 return elf_64_file_p (file, machine);
379 }
380
381 static void
382 delete_lwp (struct lwp_info *lwp)
383 {
384 struct thread_info *thr = get_lwp_thread (lwp);
385
386 if (debug_threads)
387 debug_printf ("deleting %ld\n", lwpid_of (thr));
388
389 remove_thread (thr);
390 free (lwp->arch_private);
391 free (lwp);
392 }
393
394 /* Add a process to the common process list, and set its private
395 data. */
396
397 static struct process_info *
398 linux_add_process (int pid, int attached)
399 {
400 struct process_info *proc;
401
402 proc = add_process (pid, attached);
403 proc->priv = XCNEW (struct process_info_private);
404
405 if (the_low_target.new_process != NULL)
406 proc->priv->arch_private = the_low_target.new_process ();
407
408 return proc;
409 }
410
411 static CORE_ADDR get_pc (struct lwp_info *lwp);
412
413 /* Implement the arch_setup target_ops method. */
414
415 static void
416 linux_arch_setup (void)
417 {
418 the_low_target.arch_setup ();
419 }
420
421 /* Call the target arch_setup function on THREAD. */
422
423 static void
424 linux_arch_setup_thread (struct thread_info *thread)
425 {
426 struct thread_info *saved_thread;
427
428 saved_thread = current_thread;
429 current_thread = thread;
430
431 linux_arch_setup ();
432
433 current_thread = saved_thread;
434 }
435
436 /* Handle a GNU/Linux extended wait response. If we see a clone,
437 fork, or vfork event, we need to add the new LWP to our list
438 (and return 0 so as not to report the trap to higher layers).
439 If we see an exec event, we will modify ORIG_EVENT_LWP to point
440 to a new LWP representing the new program. */
441
442 static int
443 handle_extended_wait (struct lwp_info **orig_event_lwp, int wstat)
444 {
445 struct lwp_info *event_lwp = *orig_event_lwp;
446 int event = linux_ptrace_get_extended_event (wstat);
447 struct thread_info *event_thr = get_lwp_thread (event_lwp);
448 struct lwp_info *new_lwp;
449
450 gdb_assert (event_lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
451
452 if ((event == PTRACE_EVENT_FORK) || (event == PTRACE_EVENT_VFORK)
453 || (event == PTRACE_EVENT_CLONE))
454 {
455 ptid_t ptid;
456 unsigned long new_pid;
457 int ret, status;
458
459 /* Get the pid of the new lwp. */
460 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
461 &new_pid);
462
463 /* If we haven't already seen the new PID stop, wait for it now. */
464 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
465 {
466 /* The new child has a pending SIGSTOP. We can't affect it until it
467 hits the SIGSTOP, but we're already attached. */
468
469 ret = my_waitpid (new_pid, &status, __WALL);
470
471 if (ret == -1)
472 perror_with_name ("waiting for new child");
473 else if (ret != new_pid)
474 warning ("wait returned unexpected PID %d", ret);
475 else if (!WIFSTOPPED (status))
476 warning ("wait returned unexpected status 0x%x", status);
477 }
478
479 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
480 {
481 struct process_info *parent_proc;
482 struct process_info *child_proc;
483 struct lwp_info *child_lwp;
484 struct thread_info *child_thr;
485 struct target_desc *tdesc;
486
487 ptid = ptid_build (new_pid, new_pid, 0);
488
489 if (debug_threads)
490 {
491 debug_printf ("HEW: Got fork event from LWP %ld, "
492 "new child is %d\n",
493 ptid_get_lwp (ptid_of (event_thr)),
494 ptid_get_pid (ptid));
495 }
496
497 /* Add the new process to the tables and clone the breakpoint
498 lists of the parent. We need to do this even if the new process
499 will be detached, since we will need the process object and the
500 breakpoints to remove any breakpoints from memory when we
501 detach, and the client side will access registers. */
502 child_proc = linux_add_process (new_pid, 0);
503 gdb_assert (child_proc != NULL);
504 child_lwp = add_lwp (ptid);
505 gdb_assert (child_lwp != NULL);
506 child_lwp->stopped = 1;
507 child_lwp->must_set_ptrace_flags = 1;
508 child_lwp->status_pending_p = 0;
509 child_thr = get_lwp_thread (child_lwp);
510 child_thr->last_resume_kind = resume_stop;
511 child_thr->last_status.kind = TARGET_WAITKIND_STOPPED;
512
513 /* If we're suspending all threads, leave this one suspended
514 too. */
515 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
516 {
517 if (debug_threads)
518 debug_printf ("HEW: leaving child suspended\n");
519 child_lwp->suspended = 1;
520 }
521
522 parent_proc = get_thread_process (event_thr);
523 child_proc->attached = parent_proc->attached;
524 clone_all_breakpoints (&child_proc->breakpoints,
525 &child_proc->raw_breakpoints,
526 parent_proc->breakpoints);
527
528 tdesc = XNEW (struct target_desc);
529 copy_target_description (tdesc, parent_proc->tdesc);
530 child_proc->tdesc = tdesc;
531
532 /* Clone arch-specific process data. */
533 if (the_low_target.new_fork != NULL)
534 the_low_target.new_fork (parent_proc, child_proc);
535
536 /* Save fork info in the parent thread. */
537 if (event == PTRACE_EVENT_FORK)
538 event_lwp->waitstatus.kind = TARGET_WAITKIND_FORKED;
539 else if (event == PTRACE_EVENT_VFORK)
540 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORKED;
541
542 event_lwp->waitstatus.value.related_pid = ptid;
543
544 /* The status_pending field contains bits denoting the
545 extended event, so when the pending event is handled,
546 the handler will look at lwp->waitstatus. */
547 event_lwp->status_pending_p = 1;
548 event_lwp->status_pending = wstat;
549
550 /* Report the event. */
551 return 0;
552 }
553
554 if (debug_threads)
555 debug_printf ("HEW: Got clone event "
556 "from LWP %ld, new child is LWP %ld\n",
557 lwpid_of (event_thr), new_pid);
558
559 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
560 new_lwp = add_lwp (ptid);
561
562 /* Either we're going to immediately resume the new thread
563 or leave it stopped. linux_resume_one_lwp is a nop if it
564 thinks the thread is currently running, so set this first
565 before calling linux_resume_one_lwp. */
566 new_lwp->stopped = 1;
567
568 /* If we're suspending all threads, leave this one suspended
569 too. */
570 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
571 new_lwp->suspended = 1;
572
573 /* Normally we will get the pending SIGSTOP. But in some cases
574 we might get another signal delivered to the group first.
575 If we do get another signal, be sure not to lose it. */
576 if (WSTOPSIG (status) != SIGSTOP)
577 {
578 new_lwp->stop_expected = 1;
579 new_lwp->status_pending_p = 1;
580 new_lwp->status_pending = status;
581 }
582 else if (report_thread_events)
583 {
584 new_lwp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
585 new_lwp->status_pending_p = 1;
586 new_lwp->status_pending = status;
587 }
588
589 /* Don't report the event. */
590 return 1;
591 }
592 else if (event == PTRACE_EVENT_VFORK_DONE)
593 {
594 event_lwp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
595
596 /* Report the event. */
597 return 0;
598 }
599 else if (event == PTRACE_EVENT_EXEC && report_exec_events)
600 {
601 struct process_info *proc;
602 ptid_t event_ptid;
603 pid_t event_pid;
604
605 if (debug_threads)
606 {
607 debug_printf ("HEW: Got exec event from LWP %ld\n",
608 lwpid_of (event_thr));
609 }
610
611 /* Get the event ptid. */
612 event_ptid = ptid_of (event_thr);
613 event_pid = ptid_get_pid (event_ptid);
614
615 /* Delete the execing process and all its threads. */
616 proc = get_thread_process (event_thr);
617 linux_mourn (proc);
618 current_thread = NULL;
619
620 /* Create a new process/lwp/thread. */
621 proc = linux_add_process (event_pid, 0);
622 event_lwp = add_lwp (event_ptid);
623 event_thr = get_lwp_thread (event_lwp);
624 gdb_assert (current_thread == event_thr);
625 linux_arch_setup_thread (event_thr);
626
627 /* Set the event status. */
628 event_lwp->waitstatus.kind = TARGET_WAITKIND_EXECD;
629 event_lwp->waitstatus.value.execd_pathname
630 = xstrdup (linux_proc_pid_to_exec_file (lwpid_of (event_thr)));
631
632 /* Mark the exec status as pending. */
633 event_lwp->stopped = 1;
634 event_lwp->status_pending_p = 1;
635 event_lwp->status_pending = wstat;
636 event_thr->last_resume_kind = resume_continue;
637 event_thr->last_status.kind = TARGET_WAITKIND_IGNORE;
638
639 /* Report the event. */
640 *orig_event_lwp = event_lwp;
641 return 0;
642 }
643
644 internal_error (__FILE__, __LINE__, _("unknown ptrace event %d"), event);
645 }
646
647 /* Return the PC as read from the regcache of LWP, without any
648 adjustment. */
649
650 static CORE_ADDR
651 get_pc (struct lwp_info *lwp)
652 {
653 struct thread_info *saved_thread;
654 struct regcache *regcache;
655 CORE_ADDR pc;
656
657 if (the_low_target.get_pc == NULL)
658 return 0;
659
660 saved_thread = current_thread;
661 current_thread = get_lwp_thread (lwp);
662
663 regcache = get_thread_regcache (current_thread, 1);
664 pc = (*the_low_target.get_pc) (regcache);
665
666 if (debug_threads)
667 debug_printf ("pc is 0x%lx\n", (long) pc);
668
669 current_thread = saved_thread;
670 return pc;
671 }
672
673 /* This function should only be called if LWP got a SIGTRAP.
674 The SIGTRAP could mean several things.
675
676 On i386, where decr_pc_after_break is non-zero:
677
678 If we were single-stepping this process using PTRACE_SINGLESTEP, we
679 will get only the one SIGTRAP. The value of $eip will be the next
680 instruction. If the instruction we stepped over was a breakpoint,
681 we need to decrement the PC.
682
683 If we continue the process using PTRACE_CONT, we will get a
684 SIGTRAP when we hit a breakpoint. The value of $eip will be
685 the instruction after the breakpoint (i.e. needs to be
686 decremented). If we report the SIGTRAP to GDB, we must also
687 report the undecremented PC. If the breakpoint is removed, we
688 must resume at the decremented PC.
689
690 On a non-decr_pc_after_break machine with hardware or kernel
691 single-step:
692
693 If we either single-step a breakpoint instruction, or continue and
694 hit a breakpoint instruction, our PC will point at the breakpoint
695 instruction. */
696
697 static int
698 check_stopped_by_breakpoint (struct lwp_info *lwp)
699 {
700 CORE_ADDR pc;
701 CORE_ADDR sw_breakpoint_pc;
702 struct thread_info *saved_thread;
703 #if USE_SIGTRAP_SIGINFO
704 siginfo_t siginfo;
705 #endif
706
707 if (the_low_target.get_pc == NULL)
708 return 0;
709
710 pc = get_pc (lwp);
711 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
712
713 /* breakpoint_at reads from the current thread. */
714 saved_thread = current_thread;
715 current_thread = get_lwp_thread (lwp);
716
717 #if USE_SIGTRAP_SIGINFO
718 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
719 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
720 {
721 if (siginfo.si_signo == SIGTRAP)
722 {
723 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
724 {
725 if (debug_threads)
726 {
727 struct thread_info *thr = get_lwp_thread (lwp);
728
729 debug_printf ("CSBB: %s stopped by software breakpoint\n",
730 target_pid_to_str (ptid_of (thr)));
731 }
732
733 /* Back up the PC if necessary. */
734 if (pc != sw_breakpoint_pc)
735 {
736 struct regcache *regcache
737 = get_thread_regcache (current_thread, 1);
738 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
739 }
740
741 lwp->stop_pc = sw_breakpoint_pc;
742 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
743 current_thread = saved_thread;
744 return 1;
745 }
746 else if (siginfo.si_code == TRAP_HWBKPT)
747 {
748 if (debug_threads)
749 {
750 struct thread_info *thr = get_lwp_thread (lwp);
751
752 debug_printf ("CSBB: %s stopped by hardware "
753 "breakpoint/watchpoint\n",
754 target_pid_to_str (ptid_of (thr)));
755 }
756
757 lwp->stop_pc = pc;
758 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
759 current_thread = saved_thread;
760 return 1;
761 }
762 else if (siginfo.si_code == TRAP_TRACE)
763 {
764 if (debug_threads)
765 {
766 struct thread_info *thr = get_lwp_thread (lwp);
767
768 debug_printf ("CSBB: %s stopped by trace\n",
769 target_pid_to_str (ptid_of (thr)));
770 }
771
772 lwp->stop_reason = TARGET_STOPPED_BY_SINGLE_STEP;
773 }
774 }
775 }
776 #else
777 /* We may have just stepped a breakpoint instruction. E.g., in
778 non-stop mode, GDB first tells the thread A to step a range, and
779 then the user inserts a breakpoint inside the range. In that
780 case we need to report the breakpoint PC. */
781 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
782 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
783 {
784 if (debug_threads)
785 {
786 struct thread_info *thr = get_lwp_thread (lwp);
787
788 debug_printf ("CSBB: %s stopped by software breakpoint\n",
789 target_pid_to_str (ptid_of (thr)));
790 }
791
792 /* Back up the PC if necessary. */
793 if (pc != sw_breakpoint_pc)
794 {
795 struct regcache *regcache
796 = get_thread_regcache (current_thread, 1);
797 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
798 }
799
800 lwp->stop_pc = sw_breakpoint_pc;
801 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
802 current_thread = saved_thread;
803 return 1;
804 }
805
806 if (hardware_breakpoint_inserted_here (pc))
807 {
808 if (debug_threads)
809 {
810 struct thread_info *thr = get_lwp_thread (lwp);
811
812 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
813 target_pid_to_str (ptid_of (thr)));
814 }
815
816 lwp->stop_pc = pc;
817 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
818 current_thread = saved_thread;
819 return 1;
820 }
821 #endif
822
823 current_thread = saved_thread;
824 return 0;
825 }
826
827 static struct lwp_info *
828 add_lwp (ptid_t ptid)
829 {
830 struct lwp_info *lwp;
831
832 lwp = XCNEW (struct lwp_info);
833
834 lwp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
835
836 if (the_low_target.new_thread != NULL)
837 the_low_target.new_thread (lwp);
838
839 lwp->thread = add_thread (ptid, lwp);
840
841 return lwp;
842 }
843
844 /* Start an inferior process and returns its pid.
845 ALLARGS is a vector of program-name and args. */
846
847 static int
848 linux_create_inferior (char *program, char **allargs)
849 {
850 struct lwp_info *new_lwp;
851 int pid;
852 ptid_t ptid;
853 struct cleanup *restore_personality
854 = maybe_disable_address_space_randomization (disable_randomization);
855
856 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
857 pid = vfork ();
858 #else
859 pid = fork ();
860 #endif
861 if (pid < 0)
862 perror_with_name ("fork");
863
864 if (pid == 0)
865 {
866 close_most_fds ();
867 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
868
869 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
870 signal (__SIGRTMIN + 1, SIG_DFL);
871 #endif
872
873 setpgid (0, 0);
874
875 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
876 stdout to stderr so that inferior i/o doesn't corrupt the connection.
877 Also, redirect stdin to /dev/null. */
878 if (remote_connection_is_stdio ())
879 {
880 close (0);
881 open ("/dev/null", O_RDONLY);
882 dup2 (2, 1);
883 if (write (2, "stdin/stdout redirected\n",
884 sizeof ("stdin/stdout redirected\n") - 1) < 0)
885 {
886 /* Errors ignored. */;
887 }
888 }
889
890 execv (program, allargs);
891 if (errno == ENOENT)
892 execvp (program, allargs);
893
894 fprintf (stderr, "Cannot exec %s: %s.\n", program,
895 strerror (errno));
896 fflush (stderr);
897 _exit (0177);
898 }
899
900 do_cleanups (restore_personality);
901
902 linux_add_process (pid, 0);
903
904 ptid = ptid_build (pid, pid, 0);
905 new_lwp = add_lwp (ptid);
906 new_lwp->must_set_ptrace_flags = 1;
907
908 return pid;
909 }
910
911 /* Attach to an inferior process. Returns 0 on success, ERRNO on
912 error. */
913
914 int
915 linux_attach_lwp (ptid_t ptid)
916 {
917 struct lwp_info *new_lwp;
918 int lwpid = ptid_get_lwp (ptid);
919
920 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
921 != 0)
922 return errno;
923
924 new_lwp = add_lwp (ptid);
925
926 /* We need to wait for SIGSTOP before being able to make the next
927 ptrace call on this LWP. */
928 new_lwp->must_set_ptrace_flags = 1;
929
930 if (linux_proc_pid_is_stopped (lwpid))
931 {
932 if (debug_threads)
933 debug_printf ("Attached to a stopped process\n");
934
935 /* The process is definitely stopped. It is in a job control
936 stop, unless the kernel predates the TASK_STOPPED /
937 TASK_TRACED distinction, in which case it might be in a
938 ptrace stop. Make sure it is in a ptrace stop; from there we
939 can kill it, signal it, et cetera.
940
941 First make sure there is a pending SIGSTOP. Since we are
942 already attached, the process can not transition from stopped
943 to running without a PTRACE_CONT; so we know this signal will
944 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
945 probably already in the queue (unless this kernel is old
946 enough to use TASK_STOPPED for ptrace stops); but since
947 SIGSTOP is not an RT signal, it can only be queued once. */
948 kill_lwp (lwpid, SIGSTOP);
949
950 /* Finally, resume the stopped process. This will deliver the
951 SIGSTOP (or a higher priority signal, just like normal
952 PTRACE_ATTACH), which we'll catch later on. */
953 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
954 }
955
956 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
957 brings it to a halt.
958
959 There are several cases to consider here:
960
961 1) gdbserver has already attached to the process and is being notified
962 of a new thread that is being created.
963 In this case we should ignore that SIGSTOP and resume the
964 process. This is handled below by setting stop_expected = 1,
965 and the fact that add_thread sets last_resume_kind ==
966 resume_continue.
967
968 2) This is the first thread (the process thread), and we're attaching
969 to it via attach_inferior.
970 In this case we want the process thread to stop.
971 This is handled by having linux_attach set last_resume_kind ==
972 resume_stop after we return.
973
974 If the pid we are attaching to is also the tgid, we attach to and
975 stop all the existing threads. Otherwise, we attach to pid and
976 ignore any other threads in the same group as this pid.
977
978 3) GDB is connecting to gdbserver and is requesting an enumeration of all
979 existing threads.
980 In this case we want the thread to stop.
981 FIXME: This case is currently not properly handled.
982 We should wait for the SIGSTOP but don't. Things work apparently
983 because enough time passes between when we ptrace (ATTACH) and when
984 gdb makes the next ptrace call on the thread.
985
986 On the other hand, if we are currently trying to stop all threads, we
987 should treat the new thread as if we had sent it a SIGSTOP. This works
988 because we are guaranteed that the add_lwp call above added us to the
989 end of the list, and so the new thread has not yet reached
990 wait_for_sigstop (but will). */
991 new_lwp->stop_expected = 1;
992
993 return 0;
994 }
995
996 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
997 already attached. Returns true if a new LWP is found, false
998 otherwise. */
999
1000 static int
1001 attach_proc_task_lwp_callback (ptid_t ptid)
1002 {
1003 /* Is this a new thread? */
1004 if (find_thread_ptid (ptid) == NULL)
1005 {
1006 int lwpid = ptid_get_lwp (ptid);
1007 int err;
1008
1009 if (debug_threads)
1010 debug_printf ("Found new lwp %d\n", lwpid);
1011
1012 err = linux_attach_lwp (ptid);
1013
1014 /* Be quiet if we simply raced with the thread exiting. EPERM
1015 is returned if the thread's task still exists, and is marked
1016 as exited or zombie, as well as other conditions, so in that
1017 case, confirm the status in /proc/PID/status. */
1018 if (err == ESRCH
1019 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1020 {
1021 if (debug_threads)
1022 {
1023 debug_printf ("Cannot attach to lwp %d: "
1024 "thread is gone (%d: %s)\n",
1025 lwpid, err, strerror (err));
1026 }
1027 }
1028 else if (err != 0)
1029 {
1030 warning (_("Cannot attach to lwp %d: %s"),
1031 lwpid,
1032 linux_ptrace_attach_fail_reason_string (ptid, err));
1033 }
1034
1035 return 1;
1036 }
1037 return 0;
1038 }
1039
1040 static void async_file_mark (void);
1041
1042 /* Attach to PID. If PID is the tgid, attach to it and all
1043 of its threads. */
1044
1045 static int
1046 linux_attach (unsigned long pid)
1047 {
1048 struct process_info *proc;
1049 struct thread_info *initial_thread;
1050 ptid_t ptid = ptid_build (pid, pid, 0);
1051 int err;
1052
1053 /* Attach to PID. We will check for other threads
1054 soon. */
1055 err = linux_attach_lwp (ptid);
1056 if (err != 0)
1057 error ("Cannot attach to process %ld: %s",
1058 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
1059
1060 proc = linux_add_process (pid, 1);
1061
1062 /* Don't ignore the initial SIGSTOP if we just attached to this
1063 process. It will be collected by wait shortly. */
1064 initial_thread = find_thread_ptid (ptid_build (pid, pid, 0));
1065 initial_thread->last_resume_kind = resume_stop;
1066
1067 /* We must attach to every LWP. If /proc is mounted, use that to
1068 find them now. On the one hand, the inferior may be using raw
1069 clone instead of using pthreads. On the other hand, even if it
1070 is using pthreads, GDB may not be connected yet (thread_db needs
1071 to do symbol lookups, through qSymbol). Also, thread_db walks
1072 structures in the inferior's address space to find the list of
1073 threads/LWPs, and those structures may well be corrupted. Note
1074 that once thread_db is loaded, we'll still use it to list threads
1075 and associate pthread info with each LWP. */
1076 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
1077
1078 /* GDB will shortly read the xml target description for this
1079 process, to figure out the process' architecture. But the target
1080 description is only filled in when the first process/thread in
1081 the thread group reports its initial PTRACE_ATTACH SIGSTOP. Do
1082 that now, otherwise, if GDB is fast enough, it could read the
1083 target description _before_ that initial stop. */
1084 if (non_stop)
1085 {
1086 struct lwp_info *lwp;
1087 int wstat, lwpid;
1088 ptid_t pid_ptid = pid_to_ptid (pid);
1089
1090 lwpid = linux_wait_for_event_filtered (pid_ptid, pid_ptid,
1091 &wstat, __WALL);
1092 gdb_assert (lwpid > 0);
1093
1094 lwp = find_lwp_pid (pid_to_ptid (lwpid));
1095
1096 if (!WIFSTOPPED (wstat) || WSTOPSIG (wstat) != SIGSTOP)
1097 {
1098 lwp->status_pending_p = 1;
1099 lwp->status_pending = wstat;
1100 }
1101
1102 initial_thread->last_resume_kind = resume_continue;
1103
1104 async_file_mark ();
1105
1106 gdb_assert (proc->tdesc != NULL);
1107 }
1108
1109 return 0;
1110 }
1111
1112 struct counter
1113 {
1114 int pid;
1115 int count;
1116 };
1117
1118 static int
1119 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
1120 {
1121 struct counter *counter = (struct counter *) args;
1122
1123 if (ptid_get_pid (entry->id) == counter->pid)
1124 {
1125 if (++counter->count > 1)
1126 return 1;
1127 }
1128
1129 return 0;
1130 }
1131
1132 static int
1133 last_thread_of_process_p (int pid)
1134 {
1135 struct counter counter = { pid , 0 };
1136
1137 return (find_inferior (&all_threads,
1138 second_thread_of_pid_p, &counter) == NULL);
1139 }
1140
1141 /* Kill LWP. */
1142
1143 static void
1144 linux_kill_one_lwp (struct lwp_info *lwp)
1145 {
1146 struct thread_info *thr = get_lwp_thread (lwp);
1147 int pid = lwpid_of (thr);
1148
1149 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
1150 there is no signal context, and ptrace(PTRACE_KILL) (or
1151 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
1152 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
1153 alternative is to kill with SIGKILL. We only need one SIGKILL
1154 per process, not one for each thread. But since we still support
1155 linuxthreads, and we also support debugging programs using raw
1156 clone without CLONE_THREAD, we send one for each thread. For
1157 years, we used PTRACE_KILL only, so we're being a bit paranoid
1158 about some old kernels where PTRACE_KILL might work better
1159 (dubious if there are any such, but that's why it's paranoia), so
1160 we try SIGKILL first, PTRACE_KILL second, and so we're fine
1161 everywhere. */
1162
1163 errno = 0;
1164 kill_lwp (pid, SIGKILL);
1165 if (debug_threads)
1166 {
1167 int save_errno = errno;
1168
1169 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
1170 target_pid_to_str (ptid_of (thr)),
1171 save_errno ? strerror (save_errno) : "OK");
1172 }
1173
1174 errno = 0;
1175 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
1176 if (debug_threads)
1177 {
1178 int save_errno = errno;
1179
1180 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
1181 target_pid_to_str (ptid_of (thr)),
1182 save_errno ? strerror (save_errno) : "OK");
1183 }
1184 }
1185
1186 /* Kill LWP and wait for it to die. */
1187
1188 static void
1189 kill_wait_lwp (struct lwp_info *lwp)
1190 {
1191 struct thread_info *thr = get_lwp_thread (lwp);
1192 int pid = ptid_get_pid (ptid_of (thr));
1193 int lwpid = ptid_get_lwp (ptid_of (thr));
1194 int wstat;
1195 int res;
1196
1197 if (debug_threads)
1198 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
1199
1200 do
1201 {
1202 linux_kill_one_lwp (lwp);
1203
1204 /* Make sure it died. Notes:
1205
1206 - The loop is most likely unnecessary.
1207
1208 - We don't use linux_wait_for_event as that could delete lwps
1209 while we're iterating over them. We're not interested in
1210 any pending status at this point, only in making sure all
1211 wait status on the kernel side are collected until the
1212 process is reaped.
1213
1214 - We don't use __WALL here as the __WALL emulation relies on
1215 SIGCHLD, and killing a stopped process doesn't generate
1216 one, nor an exit status.
1217 */
1218 res = my_waitpid (lwpid, &wstat, 0);
1219 if (res == -1 && errno == ECHILD)
1220 res = my_waitpid (lwpid, &wstat, __WCLONE);
1221 } while (res > 0 && WIFSTOPPED (wstat));
1222
1223 /* Even if it was stopped, the child may have already disappeared.
1224 E.g., if it was killed by SIGKILL. */
1225 if (res < 0 && errno != ECHILD)
1226 perror_with_name ("kill_wait_lwp");
1227 }
1228
1229 /* Callback for `find_inferior'. Kills an lwp of a given process,
1230 except the leader. */
1231
1232 static int
1233 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
1234 {
1235 struct thread_info *thread = (struct thread_info *) entry;
1236 struct lwp_info *lwp = get_thread_lwp (thread);
1237 int pid = * (int *) args;
1238
1239 if (ptid_get_pid (entry->id) != pid)
1240 return 0;
1241
1242 /* We avoid killing the first thread here, because of a Linux kernel (at
1243 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
1244 the children get a chance to be reaped, it will remain a zombie
1245 forever. */
1246
1247 if (lwpid_of (thread) == pid)
1248 {
1249 if (debug_threads)
1250 debug_printf ("lkop: is last of process %s\n",
1251 target_pid_to_str (entry->id));
1252 return 0;
1253 }
1254
1255 kill_wait_lwp (lwp);
1256 return 0;
1257 }
1258
1259 static int
1260 linux_kill (int pid)
1261 {
1262 struct process_info *process;
1263 struct lwp_info *lwp;
1264
1265 process = find_process_pid (pid);
1266 if (process == NULL)
1267 return -1;
1268
1269 /* If we're killing a running inferior, make sure it is stopped
1270 first, as PTRACE_KILL will not work otherwise. */
1271 stop_all_lwps (0, NULL);
1272
1273 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1274
1275 /* See the comment in linux_kill_one_lwp. We did not kill the first
1276 thread in the list, so do so now. */
1277 lwp = find_lwp_pid (pid_to_ptid (pid));
1278
1279 if (lwp == NULL)
1280 {
1281 if (debug_threads)
1282 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1283 pid);
1284 }
1285 else
1286 kill_wait_lwp (lwp);
1287
1288 the_target->mourn (process);
1289
1290 /* Since we presently can only stop all lwps of all processes, we
1291 need to unstop lwps of other processes. */
1292 unstop_all_lwps (0, NULL);
1293 return 0;
1294 }
1295
1296 /* Get pending signal of THREAD, for detaching purposes. This is the
1297 signal the thread last stopped for, which we need to deliver to the
1298 thread when detaching, otherwise, it'd be suppressed/lost. */
1299
1300 static int
1301 get_detach_signal (struct thread_info *thread)
1302 {
1303 enum gdb_signal signo = GDB_SIGNAL_0;
1304 int status;
1305 struct lwp_info *lp = get_thread_lwp (thread);
1306
1307 if (lp->status_pending_p)
1308 status = lp->status_pending;
1309 else
1310 {
1311 /* If the thread had been suspended by gdbserver, and it stopped
1312 cleanly, then it'll have stopped with SIGSTOP. But we don't
1313 want to deliver that SIGSTOP. */
1314 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1315 || thread->last_status.value.sig == GDB_SIGNAL_0)
1316 return 0;
1317
1318 /* Otherwise, we may need to deliver the signal we
1319 intercepted. */
1320 status = lp->last_status;
1321 }
1322
1323 if (!WIFSTOPPED (status))
1324 {
1325 if (debug_threads)
1326 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1327 target_pid_to_str (ptid_of (thread)));
1328 return 0;
1329 }
1330
1331 /* Extended wait statuses aren't real SIGTRAPs. */
1332 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1333 {
1334 if (debug_threads)
1335 debug_printf ("GPS: lwp %s had stopped with extended "
1336 "status: no pending signal\n",
1337 target_pid_to_str (ptid_of (thread)));
1338 return 0;
1339 }
1340
1341 signo = gdb_signal_from_host (WSTOPSIG (status));
1342
1343 if (program_signals_p && !program_signals[signo])
1344 {
1345 if (debug_threads)
1346 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1347 target_pid_to_str (ptid_of (thread)),
1348 gdb_signal_to_string (signo));
1349 return 0;
1350 }
1351 else if (!program_signals_p
1352 /* If we have no way to know which signals GDB does not
1353 want to have passed to the program, assume
1354 SIGTRAP/SIGINT, which is GDB's default. */
1355 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1356 {
1357 if (debug_threads)
1358 debug_printf ("GPS: lwp %s had signal %s, "
1359 "but we don't know if we should pass it. "
1360 "Default to not.\n",
1361 target_pid_to_str (ptid_of (thread)),
1362 gdb_signal_to_string (signo));
1363 return 0;
1364 }
1365 else
1366 {
1367 if (debug_threads)
1368 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1369 target_pid_to_str (ptid_of (thread)),
1370 gdb_signal_to_string (signo));
1371
1372 return WSTOPSIG (status);
1373 }
1374 }
1375
1376 static int
1377 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1378 {
1379 struct thread_info *thread = (struct thread_info *) entry;
1380 struct lwp_info *lwp = get_thread_lwp (thread);
1381 int pid = * (int *) args;
1382 int sig;
1383
1384 if (ptid_get_pid (entry->id) != pid)
1385 return 0;
1386
1387 /* If there is a pending SIGSTOP, get rid of it. */
1388 if (lwp->stop_expected)
1389 {
1390 if (debug_threads)
1391 debug_printf ("Sending SIGCONT to %s\n",
1392 target_pid_to_str (ptid_of (thread)));
1393
1394 kill_lwp (lwpid_of (thread), SIGCONT);
1395 lwp->stop_expected = 0;
1396 }
1397
1398 /* Flush any pending changes to the process's registers. */
1399 regcache_invalidate_thread (thread);
1400
1401 /* Pass on any pending signal for this thread. */
1402 sig = get_detach_signal (thread);
1403
1404 /* Finally, let it resume. */
1405 if (the_low_target.prepare_to_resume != NULL)
1406 the_low_target.prepare_to_resume (lwp);
1407 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1408 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1409 error (_("Can't detach %s: %s"),
1410 target_pid_to_str (ptid_of (thread)),
1411 strerror (errno));
1412
1413 delete_lwp (lwp);
1414 return 0;
1415 }
1416
1417 static int
1418 linux_detach (int pid)
1419 {
1420 struct process_info *process;
1421
1422 process = find_process_pid (pid);
1423 if (process == NULL)
1424 return -1;
1425
1426 /* As there's a step over already in progress, let it finish first,
1427 otherwise nesting a stabilize_threads operation on top gets real
1428 messy. */
1429 complete_ongoing_step_over ();
1430
1431 /* Stop all threads before detaching. First, ptrace requires that
1432 the thread is stopped to sucessfully detach. Second, thread_db
1433 may need to uninstall thread event breakpoints from memory, which
1434 only works with a stopped process anyway. */
1435 stop_all_lwps (0, NULL);
1436
1437 #ifdef USE_THREAD_DB
1438 thread_db_detach (process);
1439 #endif
1440
1441 /* Stabilize threads (move out of jump pads). */
1442 stabilize_threads ();
1443
1444 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1445
1446 the_target->mourn (process);
1447
1448 /* Since we presently can only stop all lwps of all processes, we
1449 need to unstop lwps of other processes. */
1450 unstop_all_lwps (0, NULL);
1451 return 0;
1452 }
1453
1454 /* Remove all LWPs that belong to process PROC from the lwp list. */
1455
1456 static int
1457 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1458 {
1459 struct thread_info *thread = (struct thread_info *) entry;
1460 struct lwp_info *lwp = get_thread_lwp (thread);
1461 struct process_info *process = (struct process_info *) proc;
1462
1463 if (pid_of (thread) == pid_of (process))
1464 delete_lwp (lwp);
1465
1466 return 0;
1467 }
1468
1469 static void
1470 linux_mourn (struct process_info *process)
1471 {
1472 struct process_info_private *priv;
1473
1474 #ifdef USE_THREAD_DB
1475 thread_db_mourn (process);
1476 #endif
1477
1478 find_inferior (&all_threads, delete_lwp_callback, process);
1479
1480 /* Freeing all private data. */
1481 priv = process->priv;
1482 free (priv->arch_private);
1483 free (priv);
1484 process->priv = NULL;
1485
1486 remove_process (process);
1487 }
1488
1489 static void
1490 linux_join (int pid)
1491 {
1492 int status, ret;
1493
1494 do {
1495 ret = my_waitpid (pid, &status, 0);
1496 if (WIFEXITED (status) || WIFSIGNALED (status))
1497 break;
1498 } while (ret != -1 || errno != ECHILD);
1499 }
1500
1501 /* Return nonzero if the given thread is still alive. */
1502 static int
1503 linux_thread_alive (ptid_t ptid)
1504 {
1505 struct lwp_info *lwp = find_lwp_pid (ptid);
1506
1507 /* We assume we always know if a thread exits. If a whole process
1508 exited but we still haven't been able to report it to GDB, we'll
1509 hold on to the last lwp of the dead process. */
1510 if (lwp != NULL)
1511 return !lwp_is_marked_dead (lwp);
1512 else
1513 return 0;
1514 }
1515
1516 /* Return 1 if this lwp still has an interesting status pending. If
1517 not (e.g., it had stopped for a breakpoint that is gone), return
1518 false. */
1519
1520 static int
1521 thread_still_has_status_pending_p (struct thread_info *thread)
1522 {
1523 struct lwp_info *lp = get_thread_lwp (thread);
1524
1525 if (!lp->status_pending_p)
1526 return 0;
1527
1528 if (thread->last_resume_kind != resume_stop
1529 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1530 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1531 {
1532 struct thread_info *saved_thread;
1533 CORE_ADDR pc;
1534 int discard = 0;
1535
1536 gdb_assert (lp->last_status != 0);
1537
1538 pc = get_pc (lp);
1539
1540 saved_thread = current_thread;
1541 current_thread = thread;
1542
1543 if (pc != lp->stop_pc)
1544 {
1545 if (debug_threads)
1546 debug_printf ("PC of %ld changed\n",
1547 lwpid_of (thread));
1548 discard = 1;
1549 }
1550
1551 #if !USE_SIGTRAP_SIGINFO
1552 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1553 && !(*the_low_target.breakpoint_at) (pc))
1554 {
1555 if (debug_threads)
1556 debug_printf ("previous SW breakpoint of %ld gone\n",
1557 lwpid_of (thread));
1558 discard = 1;
1559 }
1560 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1561 && !hardware_breakpoint_inserted_here (pc))
1562 {
1563 if (debug_threads)
1564 debug_printf ("previous HW breakpoint of %ld gone\n",
1565 lwpid_of (thread));
1566 discard = 1;
1567 }
1568 #endif
1569
1570 current_thread = saved_thread;
1571
1572 if (discard)
1573 {
1574 if (debug_threads)
1575 debug_printf ("discarding pending breakpoint status\n");
1576 lp->status_pending_p = 0;
1577 return 0;
1578 }
1579 }
1580
1581 return 1;
1582 }
1583
1584 /* Returns true if LWP is resumed from the client's perspective. */
1585
1586 static int
1587 lwp_resumed (struct lwp_info *lwp)
1588 {
1589 struct thread_info *thread = get_lwp_thread (lwp);
1590
1591 if (thread->last_resume_kind != resume_stop)
1592 return 1;
1593
1594 /* Did gdb send us a `vCont;t', but we haven't reported the
1595 corresponding stop to gdb yet? If so, the thread is still
1596 resumed/running from gdb's perspective. */
1597 if (thread->last_resume_kind == resume_stop
1598 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1599 return 1;
1600
1601 return 0;
1602 }
1603
1604 /* Return 1 if this lwp has an interesting status pending. */
1605 static int
1606 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1607 {
1608 struct thread_info *thread = (struct thread_info *) entry;
1609 struct lwp_info *lp = get_thread_lwp (thread);
1610 ptid_t ptid = * (ptid_t *) arg;
1611
1612 /* Check if we're only interested in events from a specific process
1613 or a specific LWP. */
1614 if (!ptid_match (ptid_of (thread), ptid))
1615 return 0;
1616
1617 if (!lwp_resumed (lp))
1618 return 0;
1619
1620 if (lp->status_pending_p
1621 && !thread_still_has_status_pending_p (thread))
1622 {
1623 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1624 return 0;
1625 }
1626
1627 return lp->status_pending_p;
1628 }
1629
1630 static int
1631 same_lwp (struct inferior_list_entry *entry, void *data)
1632 {
1633 ptid_t ptid = *(ptid_t *) data;
1634 int lwp;
1635
1636 if (ptid_get_lwp (ptid) != 0)
1637 lwp = ptid_get_lwp (ptid);
1638 else
1639 lwp = ptid_get_pid (ptid);
1640
1641 if (ptid_get_lwp (entry->id) == lwp)
1642 return 1;
1643
1644 return 0;
1645 }
1646
1647 struct lwp_info *
1648 find_lwp_pid (ptid_t ptid)
1649 {
1650 struct inferior_list_entry *thread
1651 = find_inferior (&all_threads, same_lwp, &ptid);
1652
1653 if (thread == NULL)
1654 return NULL;
1655
1656 return get_thread_lwp ((struct thread_info *) thread);
1657 }
1658
1659 /* Return the number of known LWPs in the tgid given by PID. */
1660
1661 static int
1662 num_lwps (int pid)
1663 {
1664 struct inferior_list_entry *inf, *tmp;
1665 int count = 0;
1666
1667 ALL_INFERIORS (&all_threads, inf, tmp)
1668 {
1669 if (ptid_get_pid (inf->id) == pid)
1670 count++;
1671 }
1672
1673 return count;
1674 }
1675
1676 /* The arguments passed to iterate_over_lwps. */
1677
1678 struct iterate_over_lwps_args
1679 {
1680 /* The FILTER argument passed to iterate_over_lwps. */
1681 ptid_t filter;
1682
1683 /* The CALLBACK argument passed to iterate_over_lwps. */
1684 iterate_over_lwps_ftype *callback;
1685
1686 /* The DATA argument passed to iterate_over_lwps. */
1687 void *data;
1688 };
1689
1690 /* Callback for find_inferior used by iterate_over_lwps to filter
1691 calls to the callback supplied to that function. Returning a
1692 nonzero value causes find_inferiors to stop iterating and return
1693 the current inferior_list_entry. Returning zero indicates that
1694 find_inferiors should continue iterating. */
1695
1696 static int
1697 iterate_over_lwps_filter (struct inferior_list_entry *entry, void *args_p)
1698 {
1699 struct iterate_over_lwps_args *args
1700 = (struct iterate_over_lwps_args *) args_p;
1701
1702 if (ptid_match (entry->id, args->filter))
1703 {
1704 struct thread_info *thr = (struct thread_info *) entry;
1705 struct lwp_info *lwp = get_thread_lwp (thr);
1706
1707 return (*args->callback) (lwp, args->data);
1708 }
1709
1710 return 0;
1711 }
1712
1713 /* See nat/linux-nat.h. */
1714
1715 struct lwp_info *
1716 iterate_over_lwps (ptid_t filter,
1717 iterate_over_lwps_ftype callback,
1718 void *data)
1719 {
1720 struct iterate_over_lwps_args args = {filter, callback, data};
1721 struct inferior_list_entry *entry;
1722
1723 entry = find_inferior (&all_threads, iterate_over_lwps_filter, &args);
1724 if (entry == NULL)
1725 return NULL;
1726
1727 return get_thread_lwp ((struct thread_info *) entry);
1728 }
1729
1730 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1731 their exits until all other threads in the group have exited. */
1732
1733 static void
1734 check_zombie_leaders (void)
1735 {
1736 struct process_info *proc, *tmp;
1737
1738 ALL_PROCESSES (proc, tmp)
1739 {
1740 pid_t leader_pid = pid_of (proc);
1741 struct lwp_info *leader_lp;
1742
1743 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1744
1745 if (debug_threads)
1746 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1747 "num_lwps=%d, zombie=%d\n",
1748 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1749 linux_proc_pid_is_zombie (leader_pid));
1750
1751 if (leader_lp != NULL && !leader_lp->stopped
1752 /* Check if there are other threads in the group, as we may
1753 have raced with the inferior simply exiting. */
1754 && !last_thread_of_process_p (leader_pid)
1755 && linux_proc_pid_is_zombie (leader_pid))
1756 {
1757 /* A leader zombie can mean one of two things:
1758
1759 - It exited, and there's an exit status pending
1760 available, or only the leader exited (not the whole
1761 program). In the latter case, we can't waitpid the
1762 leader's exit status until all other threads are gone.
1763
1764 - There are 3 or more threads in the group, and a thread
1765 other than the leader exec'd. On an exec, the Linux
1766 kernel destroys all other threads (except the execing
1767 one) in the thread group, and resets the execing thread's
1768 tid to the tgid. No exit notification is sent for the
1769 execing thread -- from the ptracer's perspective, it
1770 appears as though the execing thread just vanishes.
1771 Until we reap all other threads except the leader and the
1772 execing thread, the leader will be zombie, and the
1773 execing thread will be in `D (disc sleep)'. As soon as
1774 all other threads are reaped, the execing thread changes
1775 it's tid to the tgid, and the previous (zombie) leader
1776 vanishes, giving place to the "new" leader. We could try
1777 distinguishing the exit and exec cases, by waiting once
1778 more, and seeing if something comes out, but it doesn't
1779 sound useful. The previous leader _does_ go away, and
1780 we'll re-add the new one once we see the exec event
1781 (which is just the same as what would happen if the
1782 previous leader did exit voluntarily before some other
1783 thread execs). */
1784
1785 if (debug_threads)
1786 fprintf (stderr,
1787 "CZL: Thread group leader %d zombie "
1788 "(it exited, or another thread execd).\n",
1789 leader_pid);
1790
1791 delete_lwp (leader_lp);
1792 }
1793 }
1794 }
1795
1796 /* Callback for `find_inferior'. Returns the first LWP that is not
1797 stopped. ARG is a PTID filter. */
1798
1799 static int
1800 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1801 {
1802 struct thread_info *thr = (struct thread_info *) entry;
1803 struct lwp_info *lwp;
1804 ptid_t filter = *(ptid_t *) arg;
1805
1806 if (!ptid_match (ptid_of (thr), filter))
1807 return 0;
1808
1809 lwp = get_thread_lwp (thr);
1810 if (!lwp->stopped)
1811 return 1;
1812
1813 return 0;
1814 }
1815
1816 /* Increment LWP's suspend count. */
1817
1818 static void
1819 lwp_suspended_inc (struct lwp_info *lwp)
1820 {
1821 lwp->suspended++;
1822
1823 if (debug_threads && lwp->suspended > 4)
1824 {
1825 struct thread_info *thread = get_lwp_thread (lwp);
1826
1827 debug_printf ("LWP %ld has a suspiciously high suspend count,"
1828 " suspended=%d\n", lwpid_of (thread), lwp->suspended);
1829 }
1830 }
1831
1832 /* Decrement LWP's suspend count. */
1833
1834 static void
1835 lwp_suspended_decr (struct lwp_info *lwp)
1836 {
1837 lwp->suspended--;
1838
1839 if (lwp->suspended < 0)
1840 {
1841 struct thread_info *thread = get_lwp_thread (lwp);
1842
1843 internal_error (__FILE__, __LINE__,
1844 "unsuspend LWP %ld, suspended=%d\n", lwpid_of (thread),
1845 lwp->suspended);
1846 }
1847 }
1848
1849 /* This function should only be called if the LWP got a SIGTRAP.
1850
1851 Handle any tracepoint steps or hits. Return true if a tracepoint
1852 event was handled, 0 otherwise. */
1853
1854 static int
1855 handle_tracepoints (struct lwp_info *lwp)
1856 {
1857 struct thread_info *tinfo = get_lwp_thread (lwp);
1858 int tpoint_related_event = 0;
1859
1860 gdb_assert (lwp->suspended == 0);
1861
1862 /* If this tracepoint hit causes a tracing stop, we'll immediately
1863 uninsert tracepoints. To do this, we temporarily pause all
1864 threads, unpatch away, and then unpause threads. We need to make
1865 sure the unpausing doesn't resume LWP too. */
1866 lwp_suspended_inc (lwp);
1867
1868 /* And we need to be sure that any all-threads-stopping doesn't try
1869 to move threads out of the jump pads, as it could deadlock the
1870 inferior (LWP could be in the jump pad, maybe even holding the
1871 lock.) */
1872
1873 /* Do any necessary step collect actions. */
1874 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1875
1876 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1877
1878 /* See if we just hit a tracepoint and do its main collect
1879 actions. */
1880 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1881
1882 lwp_suspended_decr (lwp);
1883
1884 gdb_assert (lwp->suspended == 0);
1885 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1886
1887 if (tpoint_related_event)
1888 {
1889 if (debug_threads)
1890 debug_printf ("got a tracepoint event\n");
1891 return 1;
1892 }
1893
1894 return 0;
1895 }
1896
1897 /* Convenience wrapper. Returns true if LWP is presently collecting a
1898 fast tracepoint. */
1899
1900 static int
1901 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1902 struct fast_tpoint_collect_status *status)
1903 {
1904 CORE_ADDR thread_area;
1905 struct thread_info *thread = get_lwp_thread (lwp);
1906
1907 if (the_low_target.get_thread_area == NULL)
1908 return 0;
1909
1910 /* Get the thread area address. This is used to recognize which
1911 thread is which when tracing with the in-process agent library.
1912 We don't read anything from the address, and treat it as opaque;
1913 it's the address itself that we assume is unique per-thread. */
1914 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1915 return 0;
1916
1917 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1918 }
1919
1920 /* The reason we resume in the caller, is because we want to be able
1921 to pass lwp->status_pending as WSTAT, and we need to clear
1922 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1923 refuses to resume. */
1924
1925 static int
1926 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1927 {
1928 struct thread_info *saved_thread;
1929
1930 saved_thread = current_thread;
1931 current_thread = get_lwp_thread (lwp);
1932
1933 if ((wstat == NULL
1934 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1935 && supports_fast_tracepoints ()
1936 && agent_loaded_p ())
1937 {
1938 struct fast_tpoint_collect_status status;
1939 int r;
1940
1941 if (debug_threads)
1942 debug_printf ("Checking whether LWP %ld needs to move out of the "
1943 "jump pad.\n",
1944 lwpid_of (current_thread));
1945
1946 r = linux_fast_tracepoint_collecting (lwp, &status);
1947
1948 if (wstat == NULL
1949 || (WSTOPSIG (*wstat) != SIGILL
1950 && WSTOPSIG (*wstat) != SIGFPE
1951 && WSTOPSIG (*wstat) != SIGSEGV
1952 && WSTOPSIG (*wstat) != SIGBUS))
1953 {
1954 lwp->collecting_fast_tracepoint = r;
1955
1956 if (r != 0)
1957 {
1958 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1959 {
1960 /* Haven't executed the original instruction yet.
1961 Set breakpoint there, and wait till it's hit,
1962 then single-step until exiting the jump pad. */
1963 lwp->exit_jump_pad_bkpt
1964 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1965 }
1966
1967 if (debug_threads)
1968 debug_printf ("Checking whether LWP %ld needs to move out of "
1969 "the jump pad...it does\n",
1970 lwpid_of (current_thread));
1971 current_thread = saved_thread;
1972
1973 return 1;
1974 }
1975 }
1976 else
1977 {
1978 /* If we get a synchronous signal while collecting, *and*
1979 while executing the (relocated) original instruction,
1980 reset the PC to point at the tpoint address, before
1981 reporting to GDB. Otherwise, it's an IPA lib bug: just
1982 report the signal to GDB, and pray for the best. */
1983
1984 lwp->collecting_fast_tracepoint = 0;
1985
1986 if (r != 0
1987 && (status.adjusted_insn_addr <= lwp->stop_pc
1988 && lwp->stop_pc < status.adjusted_insn_addr_end))
1989 {
1990 siginfo_t info;
1991 struct regcache *regcache;
1992
1993 /* The si_addr on a few signals references the address
1994 of the faulting instruction. Adjust that as
1995 well. */
1996 if ((WSTOPSIG (*wstat) == SIGILL
1997 || WSTOPSIG (*wstat) == SIGFPE
1998 || WSTOPSIG (*wstat) == SIGBUS
1999 || WSTOPSIG (*wstat) == SIGSEGV)
2000 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2001 (PTRACE_TYPE_ARG3) 0, &info) == 0
2002 /* Final check just to make sure we don't clobber
2003 the siginfo of non-kernel-sent signals. */
2004 && (uintptr_t) info.si_addr == lwp->stop_pc)
2005 {
2006 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
2007 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
2008 (PTRACE_TYPE_ARG3) 0, &info);
2009 }
2010
2011 regcache = get_thread_regcache (current_thread, 1);
2012 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
2013 lwp->stop_pc = status.tpoint_addr;
2014
2015 /* Cancel any fast tracepoint lock this thread was
2016 holding. */
2017 force_unlock_trace_buffer ();
2018 }
2019
2020 if (lwp->exit_jump_pad_bkpt != NULL)
2021 {
2022 if (debug_threads)
2023 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
2024 "stopping all threads momentarily.\n");
2025
2026 stop_all_lwps (1, lwp);
2027
2028 delete_breakpoint (lwp->exit_jump_pad_bkpt);
2029 lwp->exit_jump_pad_bkpt = NULL;
2030
2031 unstop_all_lwps (1, lwp);
2032
2033 gdb_assert (lwp->suspended >= 0);
2034 }
2035 }
2036 }
2037
2038 if (debug_threads)
2039 debug_printf ("Checking whether LWP %ld needs to move out of the "
2040 "jump pad...no\n",
2041 lwpid_of (current_thread));
2042
2043 current_thread = saved_thread;
2044 return 0;
2045 }
2046
2047 /* Enqueue one signal in the "signals to report later when out of the
2048 jump pad" list. */
2049
2050 static void
2051 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2052 {
2053 struct pending_signals *p_sig;
2054 struct thread_info *thread = get_lwp_thread (lwp);
2055
2056 if (debug_threads)
2057 debug_printf ("Deferring signal %d for LWP %ld.\n",
2058 WSTOPSIG (*wstat), lwpid_of (thread));
2059
2060 if (debug_threads)
2061 {
2062 struct pending_signals *sig;
2063
2064 for (sig = lwp->pending_signals_to_report;
2065 sig != NULL;
2066 sig = sig->prev)
2067 debug_printf (" Already queued %d\n",
2068 sig->signal);
2069
2070 debug_printf (" (no more currently queued signals)\n");
2071 }
2072
2073 /* Don't enqueue non-RT signals if they are already in the deferred
2074 queue. (SIGSTOP being the easiest signal to see ending up here
2075 twice) */
2076 if (WSTOPSIG (*wstat) < __SIGRTMIN)
2077 {
2078 struct pending_signals *sig;
2079
2080 for (sig = lwp->pending_signals_to_report;
2081 sig != NULL;
2082 sig = sig->prev)
2083 {
2084 if (sig->signal == WSTOPSIG (*wstat))
2085 {
2086 if (debug_threads)
2087 debug_printf ("Not requeuing already queued non-RT signal %d"
2088 " for LWP %ld\n",
2089 sig->signal,
2090 lwpid_of (thread));
2091 return;
2092 }
2093 }
2094 }
2095
2096 p_sig = XCNEW (struct pending_signals);
2097 p_sig->prev = lwp->pending_signals_to_report;
2098 p_sig->signal = WSTOPSIG (*wstat);
2099
2100 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2101 &p_sig->info);
2102
2103 lwp->pending_signals_to_report = p_sig;
2104 }
2105
2106 /* Dequeue one signal from the "signals to report later when out of
2107 the jump pad" list. */
2108
2109 static int
2110 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
2111 {
2112 struct thread_info *thread = get_lwp_thread (lwp);
2113
2114 if (lwp->pending_signals_to_report != NULL)
2115 {
2116 struct pending_signals **p_sig;
2117
2118 p_sig = &lwp->pending_signals_to_report;
2119 while ((*p_sig)->prev != NULL)
2120 p_sig = &(*p_sig)->prev;
2121
2122 *wstat = W_STOPCODE ((*p_sig)->signal);
2123 if ((*p_sig)->info.si_signo != 0)
2124 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
2125 &(*p_sig)->info);
2126 free (*p_sig);
2127 *p_sig = NULL;
2128
2129 if (debug_threads)
2130 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
2131 WSTOPSIG (*wstat), lwpid_of (thread));
2132
2133 if (debug_threads)
2134 {
2135 struct pending_signals *sig;
2136
2137 for (sig = lwp->pending_signals_to_report;
2138 sig != NULL;
2139 sig = sig->prev)
2140 debug_printf (" Still queued %d\n",
2141 sig->signal);
2142
2143 debug_printf (" (no more queued signals)\n");
2144 }
2145
2146 return 1;
2147 }
2148
2149 return 0;
2150 }
2151
2152 /* Fetch the possibly triggered data watchpoint info and store it in
2153 CHILD.
2154
2155 On some archs, like x86, that use debug registers to set
2156 watchpoints, it's possible that the way to know which watched
2157 address trapped, is to check the register that is used to select
2158 which address to watch. Problem is, between setting the watchpoint
2159 and reading back which data address trapped, the user may change
2160 the set of watchpoints, and, as a consequence, GDB changes the
2161 debug registers in the inferior. To avoid reading back a stale
2162 stopped-data-address when that happens, we cache in LP the fact
2163 that a watchpoint trapped, and the corresponding data address, as
2164 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
2165 registers meanwhile, we have the cached data we can rely on. */
2166
2167 static int
2168 check_stopped_by_watchpoint (struct lwp_info *child)
2169 {
2170 if (the_low_target.stopped_by_watchpoint != NULL)
2171 {
2172 struct thread_info *saved_thread;
2173
2174 saved_thread = current_thread;
2175 current_thread = get_lwp_thread (child);
2176
2177 if (the_low_target.stopped_by_watchpoint ())
2178 {
2179 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
2180
2181 if (the_low_target.stopped_data_address != NULL)
2182 child->stopped_data_address
2183 = the_low_target.stopped_data_address ();
2184 else
2185 child->stopped_data_address = 0;
2186 }
2187
2188 current_thread = saved_thread;
2189 }
2190
2191 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
2192 }
2193
2194 /* Return the ptrace options that we want to try to enable. */
2195
2196 static int
2197 linux_low_ptrace_options (int attached)
2198 {
2199 int options = 0;
2200
2201 if (!attached)
2202 options |= PTRACE_O_EXITKILL;
2203
2204 if (report_fork_events)
2205 options |= PTRACE_O_TRACEFORK;
2206
2207 if (report_vfork_events)
2208 options |= (PTRACE_O_TRACEVFORK | PTRACE_O_TRACEVFORKDONE);
2209
2210 if (report_exec_events)
2211 options |= PTRACE_O_TRACEEXEC;
2212
2213 return options;
2214 }
2215
2216 /* Do low-level handling of the event, and check if we should go on
2217 and pass it to caller code. Return the affected lwp if we are, or
2218 NULL otherwise. */
2219
2220 static struct lwp_info *
2221 linux_low_filter_event (int lwpid, int wstat)
2222 {
2223 struct lwp_info *child;
2224 struct thread_info *thread;
2225 int have_stop_pc = 0;
2226
2227 child = find_lwp_pid (pid_to_ptid (lwpid));
2228
2229 /* Check for stop events reported by a process we didn't already
2230 know about - anything not already in our LWP list.
2231
2232 If we're expecting to receive stopped processes after
2233 fork, vfork, and clone events, then we'll just add the
2234 new one to our list and go back to waiting for the event
2235 to be reported - the stopped process might be returned
2236 from waitpid before or after the event is.
2237
2238 But note the case of a non-leader thread exec'ing after the
2239 leader having exited, and gone from our lists (because
2240 check_zombie_leaders deleted it). The non-leader thread
2241 changes its tid to the tgid. */
2242
2243 if (WIFSTOPPED (wstat) && child == NULL && WSTOPSIG (wstat) == SIGTRAP
2244 && linux_ptrace_get_extended_event (wstat) == PTRACE_EVENT_EXEC)
2245 {
2246 ptid_t child_ptid;
2247
2248 /* A multi-thread exec after we had seen the leader exiting. */
2249 if (debug_threads)
2250 {
2251 debug_printf ("LLW: Re-adding thread group leader LWP %d"
2252 "after exec.\n", lwpid);
2253 }
2254
2255 child_ptid = ptid_build (lwpid, lwpid, 0);
2256 child = add_lwp (child_ptid);
2257 child->stopped = 1;
2258 current_thread = child->thread;
2259 }
2260
2261 /* If we didn't find a process, one of two things presumably happened:
2262 - A process we started and then detached from has exited. Ignore it.
2263 - A process we are controlling has forked and the new child's stop
2264 was reported to us by the kernel. Save its PID. */
2265 if (child == NULL && WIFSTOPPED (wstat))
2266 {
2267 add_to_pid_list (&stopped_pids, lwpid, wstat);
2268 return NULL;
2269 }
2270 else if (child == NULL)
2271 return NULL;
2272
2273 thread = get_lwp_thread (child);
2274
2275 child->stopped = 1;
2276
2277 child->last_status = wstat;
2278
2279 /* Check if the thread has exited. */
2280 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
2281 {
2282 if (debug_threads)
2283 debug_printf ("LLFE: %d exited.\n", lwpid);
2284 /* If there is at least one more LWP, then the exit signal was
2285 not the end of the debugged application and should be
2286 ignored, unless GDB wants to hear about thread exits. */
2287 if (report_thread_events
2288 || last_thread_of_process_p (pid_of (thread)))
2289 {
2290 /* Since events are serialized to GDB core, and we can't
2291 report this one right now. Leave the status pending for
2292 the next time we're able to report it. */
2293 mark_lwp_dead (child, wstat);
2294 return child;
2295 }
2296 else
2297 {
2298 delete_lwp (child);
2299 return NULL;
2300 }
2301 }
2302
2303 gdb_assert (WIFSTOPPED (wstat));
2304
2305 if (WIFSTOPPED (wstat))
2306 {
2307 struct process_info *proc;
2308
2309 /* Architecture-specific setup after inferior is running. */
2310 proc = find_process_pid (pid_of (thread));
2311 if (proc->tdesc == NULL)
2312 {
2313 if (proc->attached)
2314 {
2315 /* This needs to happen after we have attached to the
2316 inferior and it is stopped for the first time, but
2317 before we access any inferior registers. */
2318 linux_arch_setup_thread (thread);
2319 }
2320 else
2321 {
2322 /* The process is started, but GDBserver will do
2323 architecture-specific setup after the program stops at
2324 the first instruction. */
2325 child->status_pending_p = 1;
2326 child->status_pending = wstat;
2327 return child;
2328 }
2329 }
2330 }
2331
2332 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
2333 {
2334 struct process_info *proc = find_process_pid (pid_of (thread));
2335 int options = linux_low_ptrace_options (proc->attached);
2336
2337 linux_enable_event_reporting (lwpid, options);
2338 child->must_set_ptrace_flags = 0;
2339 }
2340
2341 /* Be careful to not overwrite stop_pc until
2342 check_stopped_by_breakpoint is called. */
2343 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2344 && linux_is_extended_waitstatus (wstat))
2345 {
2346 child->stop_pc = get_pc (child);
2347 if (handle_extended_wait (&child, wstat))
2348 {
2349 /* The event has been handled, so just return without
2350 reporting it. */
2351 return NULL;
2352 }
2353 }
2354
2355 /* Check first whether this was a SW/HW breakpoint before checking
2356 watchpoints, because at least s390 can't tell the data address of
2357 hardware watchpoint hits, and returns stopped-by-watchpoint as
2358 long as there's a watchpoint set. */
2359 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
2360 {
2361 if (check_stopped_by_breakpoint (child))
2362 have_stop_pc = 1;
2363 }
2364
2365 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2366 or hardware watchpoint. Check which is which if we got
2367 TARGET_STOPPED_BY_HW_BREAKPOINT. Likewise, we may have single
2368 stepped an instruction that triggered a watchpoint. In that
2369 case, on some architectures (such as x86), instead of
2370 TRAP_HWBKPT, si_code indicates TRAP_TRACE, and we need to check
2371 the debug registers separately. */
2372 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
2373 && child->stop_reason != TARGET_STOPPED_BY_SW_BREAKPOINT)
2374 check_stopped_by_watchpoint (child);
2375
2376 if (!have_stop_pc)
2377 child->stop_pc = get_pc (child);
2378
2379 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
2380 && child->stop_expected)
2381 {
2382 if (debug_threads)
2383 debug_printf ("Expected stop.\n");
2384 child->stop_expected = 0;
2385
2386 if (thread->last_resume_kind == resume_stop)
2387 {
2388 /* We want to report the stop to the core. Treat the
2389 SIGSTOP as a normal event. */
2390 if (debug_threads)
2391 debug_printf ("LLW: resume_stop SIGSTOP caught for %s.\n",
2392 target_pid_to_str (ptid_of (thread)));
2393 }
2394 else if (stopping_threads != NOT_STOPPING_THREADS)
2395 {
2396 /* Stopping threads. We don't want this SIGSTOP to end up
2397 pending. */
2398 if (debug_threads)
2399 debug_printf ("LLW: SIGSTOP caught for %s "
2400 "while stopping threads.\n",
2401 target_pid_to_str (ptid_of (thread)));
2402 return NULL;
2403 }
2404 else
2405 {
2406 /* This is a delayed SIGSTOP. Filter out the event. */
2407 if (debug_threads)
2408 debug_printf ("LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
2409 child->stepping ? "step" : "continue",
2410 target_pid_to_str (ptid_of (thread)));
2411
2412 linux_resume_one_lwp (child, child->stepping, 0, NULL);
2413 return NULL;
2414 }
2415 }
2416
2417 child->status_pending_p = 1;
2418 child->status_pending = wstat;
2419 return child;
2420 }
2421
2422 /* Resume LWPs that are currently stopped without any pending status
2423 to report, but are resumed from the core's perspective. */
2424
2425 static void
2426 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
2427 {
2428 struct thread_info *thread = (struct thread_info *) entry;
2429 struct lwp_info *lp = get_thread_lwp (thread);
2430
2431 if (lp->stopped
2432 && !lp->suspended
2433 && !lp->status_pending_p
2434 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
2435 {
2436 int step = thread->last_resume_kind == resume_step;
2437
2438 if (debug_threads)
2439 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2440 target_pid_to_str (ptid_of (thread)),
2441 paddress (lp->stop_pc),
2442 step);
2443
2444 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2445 }
2446 }
2447
2448 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2449 match FILTER_PTID (leaving others pending). The PTIDs can be:
2450 minus_one_ptid, to specify any child; a pid PTID, specifying all
2451 lwps of a thread group; or a PTID representing a single lwp. Store
2452 the stop status through the status pointer WSTAT. OPTIONS is
2453 passed to the waitpid call. Return 0 if no event was found and
2454 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2455 was found. Return the PID of the stopped child otherwise. */
2456
2457 static int
2458 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2459 int *wstatp, int options)
2460 {
2461 struct thread_info *event_thread;
2462 struct lwp_info *event_child, *requested_child;
2463 sigset_t block_mask, prev_mask;
2464
2465 retry:
2466 /* N.B. event_thread points to the thread_info struct that contains
2467 event_child. Keep them in sync. */
2468 event_thread = NULL;
2469 event_child = NULL;
2470 requested_child = NULL;
2471
2472 /* Check for a lwp with a pending status. */
2473
2474 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2475 {
2476 event_thread = (struct thread_info *)
2477 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2478 if (event_thread != NULL)
2479 event_child = get_thread_lwp (event_thread);
2480 if (debug_threads && event_thread)
2481 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2482 }
2483 else if (!ptid_equal (filter_ptid, null_ptid))
2484 {
2485 requested_child = find_lwp_pid (filter_ptid);
2486
2487 if (stopping_threads == NOT_STOPPING_THREADS
2488 && requested_child->status_pending_p
2489 && requested_child->collecting_fast_tracepoint)
2490 {
2491 enqueue_one_deferred_signal (requested_child,
2492 &requested_child->status_pending);
2493 requested_child->status_pending_p = 0;
2494 requested_child->status_pending = 0;
2495 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2496 }
2497
2498 if (requested_child->suspended
2499 && requested_child->status_pending_p)
2500 {
2501 internal_error (__FILE__, __LINE__,
2502 "requesting an event out of a"
2503 " suspended child?");
2504 }
2505
2506 if (requested_child->status_pending_p)
2507 {
2508 event_child = requested_child;
2509 event_thread = get_lwp_thread (event_child);
2510 }
2511 }
2512
2513 if (event_child != NULL)
2514 {
2515 if (debug_threads)
2516 debug_printf ("Got an event from pending child %ld (%04x)\n",
2517 lwpid_of (event_thread), event_child->status_pending);
2518 *wstatp = event_child->status_pending;
2519 event_child->status_pending_p = 0;
2520 event_child->status_pending = 0;
2521 current_thread = event_thread;
2522 return lwpid_of (event_thread);
2523 }
2524
2525 /* But if we don't find a pending event, we'll have to wait.
2526
2527 We only enter this loop if no process has a pending wait status.
2528 Thus any action taken in response to a wait status inside this
2529 loop is responding as soon as we detect the status, not after any
2530 pending events. */
2531
2532 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2533 all signals while here. */
2534 sigfillset (&block_mask);
2535 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2536
2537 /* Always pull all events out of the kernel. We'll randomly select
2538 an event LWP out of all that have events, to prevent
2539 starvation. */
2540 while (event_child == NULL)
2541 {
2542 pid_t ret = 0;
2543
2544 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2545 quirks:
2546
2547 - If the thread group leader exits while other threads in the
2548 thread group still exist, waitpid(TGID, ...) hangs. That
2549 waitpid won't return an exit status until the other threads
2550 in the group are reaped.
2551
2552 - When a non-leader thread execs, that thread just vanishes
2553 without reporting an exit (so we'd hang if we waited for it
2554 explicitly in that case). The exec event is reported to
2555 the TGID pid. */
2556 errno = 0;
2557 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2558
2559 if (debug_threads)
2560 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2561 ret, errno ? strerror (errno) : "ERRNO-OK");
2562
2563 if (ret > 0)
2564 {
2565 if (debug_threads)
2566 {
2567 debug_printf ("LLW: waitpid %ld received %s\n",
2568 (long) ret, status_to_str (*wstatp));
2569 }
2570
2571 /* Filter all events. IOW, leave all events pending. We'll
2572 randomly select an event LWP out of all that have events
2573 below. */
2574 linux_low_filter_event (ret, *wstatp);
2575 /* Retry until nothing comes out of waitpid. A single
2576 SIGCHLD can indicate more than one child stopped. */
2577 continue;
2578 }
2579
2580 /* Now that we've pulled all events out of the kernel, resume
2581 LWPs that don't have an interesting event to report. */
2582 if (stopping_threads == NOT_STOPPING_THREADS)
2583 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2584
2585 /* ... and find an LWP with a status to report to the core, if
2586 any. */
2587 event_thread = (struct thread_info *)
2588 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2589 if (event_thread != NULL)
2590 {
2591 event_child = get_thread_lwp (event_thread);
2592 *wstatp = event_child->status_pending;
2593 event_child->status_pending_p = 0;
2594 event_child->status_pending = 0;
2595 break;
2596 }
2597
2598 /* Check for zombie thread group leaders. Those can't be reaped
2599 until all other threads in the thread group are. */
2600 check_zombie_leaders ();
2601
2602 /* If there are no resumed children left in the set of LWPs we
2603 want to wait for, bail. We can't just block in
2604 waitpid/sigsuspend, because lwps might have been left stopped
2605 in trace-stop state, and we'd be stuck forever waiting for
2606 their status to change (which would only happen if we resumed
2607 them). Even if WNOHANG is set, this return code is preferred
2608 over 0 (below), as it is more detailed. */
2609 if ((find_inferior (&all_threads,
2610 not_stopped_callback,
2611 &wait_ptid) == NULL))
2612 {
2613 if (debug_threads)
2614 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2615 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2616 return -1;
2617 }
2618
2619 /* No interesting event to report to the caller. */
2620 if ((options & WNOHANG))
2621 {
2622 if (debug_threads)
2623 debug_printf ("WNOHANG set, no event found\n");
2624
2625 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2626 return 0;
2627 }
2628
2629 /* Block until we get an event reported with SIGCHLD. */
2630 if (debug_threads)
2631 debug_printf ("sigsuspend'ing\n");
2632
2633 sigsuspend (&prev_mask);
2634 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2635 goto retry;
2636 }
2637
2638 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2639
2640 current_thread = event_thread;
2641
2642 return lwpid_of (event_thread);
2643 }
2644
2645 /* Wait for an event from child(ren) PTID. PTIDs can be:
2646 minus_one_ptid, to specify any child; a pid PTID, specifying all
2647 lwps of a thread group; or a PTID representing a single lwp. Store
2648 the stop status through the status pointer WSTAT. OPTIONS is
2649 passed to the waitpid call. Return 0 if no event was found and
2650 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2651 was found. Return the PID of the stopped child otherwise. */
2652
2653 static int
2654 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2655 {
2656 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2657 }
2658
2659 /* Count the LWP's that have had events. */
2660
2661 static int
2662 count_events_callback (struct inferior_list_entry *entry, void *data)
2663 {
2664 struct thread_info *thread = (struct thread_info *) entry;
2665 struct lwp_info *lp = get_thread_lwp (thread);
2666 int *count = (int *) data;
2667
2668 gdb_assert (count != NULL);
2669
2670 /* Count only resumed LWPs that have an event pending. */
2671 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2672 && lp->status_pending_p)
2673 (*count)++;
2674
2675 return 0;
2676 }
2677
2678 /* Select the LWP (if any) that is currently being single-stepped. */
2679
2680 static int
2681 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2682 {
2683 struct thread_info *thread = (struct thread_info *) entry;
2684 struct lwp_info *lp = get_thread_lwp (thread);
2685
2686 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2687 && thread->last_resume_kind == resume_step
2688 && lp->status_pending_p)
2689 return 1;
2690 else
2691 return 0;
2692 }
2693
2694 /* Select the Nth LWP that has had an event. */
2695
2696 static int
2697 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2698 {
2699 struct thread_info *thread = (struct thread_info *) entry;
2700 struct lwp_info *lp = get_thread_lwp (thread);
2701 int *selector = (int *) data;
2702
2703 gdb_assert (selector != NULL);
2704
2705 /* Select only resumed LWPs that have an event pending. */
2706 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2707 && lp->status_pending_p)
2708 if ((*selector)-- == 0)
2709 return 1;
2710
2711 return 0;
2712 }
2713
2714 /* Select one LWP out of those that have events pending. */
2715
2716 static void
2717 select_event_lwp (struct lwp_info **orig_lp)
2718 {
2719 int num_events = 0;
2720 int random_selector;
2721 struct thread_info *event_thread = NULL;
2722
2723 /* In all-stop, give preference to the LWP that is being
2724 single-stepped. There will be at most one, and it's the LWP that
2725 the core is most interested in. If we didn't do this, then we'd
2726 have to handle pending step SIGTRAPs somehow in case the core
2727 later continues the previously-stepped thread, otherwise we'd
2728 report the pending SIGTRAP, and the core, not having stepped the
2729 thread, wouldn't understand what the trap was for, and therefore
2730 would report it to the user as a random signal. */
2731 if (!non_stop)
2732 {
2733 event_thread
2734 = (struct thread_info *) find_inferior (&all_threads,
2735 select_singlestep_lwp_callback,
2736 NULL);
2737 if (event_thread != NULL)
2738 {
2739 if (debug_threads)
2740 debug_printf ("SEL: Select single-step %s\n",
2741 target_pid_to_str (ptid_of (event_thread)));
2742 }
2743 }
2744 if (event_thread == NULL)
2745 {
2746 /* No single-stepping LWP. Select one at random, out of those
2747 which have had events. */
2748
2749 /* First see how many events we have. */
2750 find_inferior (&all_threads, count_events_callback, &num_events);
2751 gdb_assert (num_events > 0);
2752
2753 /* Now randomly pick a LWP out of those that have had
2754 events. */
2755 random_selector = (int)
2756 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2757
2758 if (debug_threads && num_events > 1)
2759 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2760 num_events, random_selector);
2761
2762 event_thread
2763 = (struct thread_info *) find_inferior (&all_threads,
2764 select_event_lwp_callback,
2765 &random_selector);
2766 }
2767
2768 if (event_thread != NULL)
2769 {
2770 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2771
2772 /* Switch the event LWP. */
2773 *orig_lp = event_lp;
2774 }
2775 }
2776
2777 /* Decrement the suspend count of an LWP. */
2778
2779 static int
2780 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2781 {
2782 struct thread_info *thread = (struct thread_info *) entry;
2783 struct lwp_info *lwp = get_thread_lwp (thread);
2784
2785 /* Ignore EXCEPT. */
2786 if (lwp == except)
2787 return 0;
2788
2789 lwp_suspended_decr (lwp);
2790 return 0;
2791 }
2792
2793 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2794 NULL. */
2795
2796 static void
2797 unsuspend_all_lwps (struct lwp_info *except)
2798 {
2799 find_inferior (&all_threads, unsuspend_one_lwp, except);
2800 }
2801
2802 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2803 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2804 void *data);
2805 static int lwp_running (struct inferior_list_entry *entry, void *data);
2806 static ptid_t linux_wait_1 (ptid_t ptid,
2807 struct target_waitstatus *ourstatus,
2808 int target_options);
2809
2810 /* Stabilize threads (move out of jump pads).
2811
2812 If a thread is midway collecting a fast tracepoint, we need to
2813 finish the collection and move it out of the jump pad before
2814 reporting the signal.
2815
2816 This avoids recursion while collecting (when a signal arrives
2817 midway, and the signal handler itself collects), which would trash
2818 the trace buffer. In case the user set a breakpoint in a signal
2819 handler, this avoids the backtrace showing the jump pad, etc..
2820 Most importantly, there are certain things we can't do safely if
2821 threads are stopped in a jump pad (or in its callee's). For
2822 example:
2823
2824 - starting a new trace run. A thread still collecting the
2825 previous run, could trash the trace buffer when resumed. The trace
2826 buffer control structures would have been reset but the thread had
2827 no way to tell. The thread could even midway memcpy'ing to the
2828 buffer, which would mean that when resumed, it would clobber the
2829 trace buffer that had been set for a new run.
2830
2831 - we can't rewrite/reuse the jump pads for new tracepoints
2832 safely. Say you do tstart while a thread is stopped midway while
2833 collecting. When the thread is later resumed, it finishes the
2834 collection, and returns to the jump pad, to execute the original
2835 instruction that was under the tracepoint jump at the time the
2836 older run had been started. If the jump pad had been rewritten
2837 since for something else in the new run, the thread would now
2838 execute the wrong / random instructions. */
2839
2840 static void
2841 linux_stabilize_threads (void)
2842 {
2843 struct thread_info *saved_thread;
2844 struct thread_info *thread_stuck;
2845
2846 thread_stuck
2847 = (struct thread_info *) find_inferior (&all_threads,
2848 stuck_in_jump_pad_callback,
2849 NULL);
2850 if (thread_stuck != NULL)
2851 {
2852 if (debug_threads)
2853 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2854 lwpid_of (thread_stuck));
2855 return;
2856 }
2857
2858 saved_thread = current_thread;
2859
2860 stabilizing_threads = 1;
2861
2862 /* Kick 'em all. */
2863 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2864
2865 /* Loop until all are stopped out of the jump pads. */
2866 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2867 {
2868 struct target_waitstatus ourstatus;
2869 struct lwp_info *lwp;
2870 int wstat;
2871
2872 /* Note that we go through the full wait even loop. While
2873 moving threads out of jump pad, we need to be able to step
2874 over internal breakpoints and such. */
2875 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2876
2877 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2878 {
2879 lwp = get_thread_lwp (current_thread);
2880
2881 /* Lock it. */
2882 lwp_suspended_inc (lwp);
2883
2884 if (ourstatus.value.sig != GDB_SIGNAL_0
2885 || current_thread->last_resume_kind == resume_stop)
2886 {
2887 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2888 enqueue_one_deferred_signal (lwp, &wstat);
2889 }
2890 }
2891 }
2892
2893 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2894
2895 stabilizing_threads = 0;
2896
2897 current_thread = saved_thread;
2898
2899 if (debug_threads)
2900 {
2901 thread_stuck
2902 = (struct thread_info *) find_inferior (&all_threads,
2903 stuck_in_jump_pad_callback,
2904 NULL);
2905 if (thread_stuck != NULL)
2906 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2907 lwpid_of (thread_stuck));
2908 }
2909 }
2910
2911 /* Convenience function that is called when the kernel reports an
2912 event that is not passed out to GDB. */
2913
2914 static ptid_t
2915 ignore_event (struct target_waitstatus *ourstatus)
2916 {
2917 /* If we got an event, there may still be others, as a single
2918 SIGCHLD can indicate more than one child stopped. This forces
2919 another target_wait call. */
2920 async_file_mark ();
2921
2922 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2923 return null_ptid;
2924 }
2925
2926 /* Convenience function that is called when the kernel reports an exit
2927 event. This decides whether to report the event to GDB as a
2928 process exit event, a thread exit event, or to suppress the
2929 event. */
2930
2931 static ptid_t
2932 filter_exit_event (struct lwp_info *event_child,
2933 struct target_waitstatus *ourstatus)
2934 {
2935 struct thread_info *thread = get_lwp_thread (event_child);
2936 ptid_t ptid = ptid_of (thread);
2937
2938 if (!last_thread_of_process_p (pid_of (thread)))
2939 {
2940 if (report_thread_events)
2941 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
2942 else
2943 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2944
2945 delete_lwp (event_child);
2946 }
2947 return ptid;
2948 }
2949
2950 /* Wait for process, returns status. */
2951
2952 static ptid_t
2953 linux_wait_1 (ptid_t ptid,
2954 struct target_waitstatus *ourstatus, int target_options)
2955 {
2956 int w;
2957 struct lwp_info *event_child;
2958 int options;
2959 int pid;
2960 int step_over_finished;
2961 int bp_explains_trap;
2962 int maybe_internal_trap;
2963 int report_to_gdb;
2964 int trace_event;
2965 int in_step_range;
2966
2967 if (debug_threads)
2968 {
2969 debug_enter ();
2970 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2971 }
2972
2973 /* Translate generic target options into linux options. */
2974 options = __WALL;
2975 if (target_options & TARGET_WNOHANG)
2976 options |= WNOHANG;
2977
2978 bp_explains_trap = 0;
2979 trace_event = 0;
2980 in_step_range = 0;
2981 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2982
2983 if (ptid_equal (step_over_bkpt, null_ptid))
2984 pid = linux_wait_for_event (ptid, &w, options);
2985 else
2986 {
2987 if (debug_threads)
2988 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2989 target_pid_to_str (step_over_bkpt));
2990 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2991 }
2992
2993 if (pid == 0)
2994 {
2995 gdb_assert (target_options & TARGET_WNOHANG);
2996
2997 if (debug_threads)
2998 {
2999 debug_printf ("linux_wait_1 ret = null_ptid, "
3000 "TARGET_WAITKIND_IGNORE\n");
3001 debug_exit ();
3002 }
3003
3004 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3005 return null_ptid;
3006 }
3007 else if (pid == -1)
3008 {
3009 if (debug_threads)
3010 {
3011 debug_printf ("linux_wait_1 ret = null_ptid, "
3012 "TARGET_WAITKIND_NO_RESUMED\n");
3013 debug_exit ();
3014 }
3015
3016 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3017 return null_ptid;
3018 }
3019
3020 event_child = get_thread_lwp (current_thread);
3021
3022 /* linux_wait_for_event only returns an exit status for the last
3023 child of a process. Report it. */
3024 if (WIFEXITED (w) || WIFSIGNALED (w))
3025 {
3026 if (WIFEXITED (w))
3027 {
3028 ourstatus->kind = TARGET_WAITKIND_EXITED;
3029 ourstatus->value.integer = WEXITSTATUS (w);
3030
3031 if (debug_threads)
3032 {
3033 debug_printf ("linux_wait_1 ret = %s, exited with "
3034 "retcode %d\n",
3035 target_pid_to_str (ptid_of (current_thread)),
3036 WEXITSTATUS (w));
3037 debug_exit ();
3038 }
3039 }
3040 else
3041 {
3042 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
3043 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
3044
3045 if (debug_threads)
3046 {
3047 debug_printf ("linux_wait_1 ret = %s, terminated with "
3048 "signal %d\n",
3049 target_pid_to_str (ptid_of (current_thread)),
3050 WTERMSIG (w));
3051 debug_exit ();
3052 }
3053 }
3054
3055 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3056 return filter_exit_event (event_child, ourstatus);
3057
3058 return ptid_of (current_thread);
3059 }
3060
3061 /* If step-over executes a breakpoint instruction, it means a
3062 gdb/gdbserver breakpoint had been planted on top of a permanent
3063 breakpoint. The PC has been adjusted by
3064 check_stopped_by_breakpoint to point at the breakpoint address.
3065 Advance the PC manually past the breakpoint, otherwise the
3066 program would keep trapping the permanent breakpoint forever. */
3067 if (!ptid_equal (step_over_bkpt, null_ptid)
3068 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
3069 {
3070 int increment_pc = 0;
3071 int breakpoint_kind = 0;
3072 CORE_ADDR stop_pc = event_child->stop_pc;
3073
3074 breakpoint_kind = the_target->breakpoint_kind_from_pc (&stop_pc);
3075 the_target->sw_breakpoint_from_kind (breakpoint_kind, &increment_pc);
3076
3077 if (debug_threads)
3078 {
3079 debug_printf ("step-over for %s executed software breakpoint\n",
3080 target_pid_to_str (ptid_of (current_thread)));
3081 }
3082
3083 if (increment_pc != 0)
3084 {
3085 struct regcache *regcache
3086 = get_thread_regcache (current_thread, 1);
3087
3088 event_child->stop_pc += increment_pc;
3089 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3090
3091 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
3092 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3093 }
3094 }
3095
3096 /* If this event was not handled before, and is not a SIGTRAP, we
3097 report it. SIGILL and SIGSEGV are also treated as traps in case
3098 a breakpoint is inserted at the current PC. If this target does
3099 not support internal breakpoints at all, we also report the
3100 SIGTRAP without further processing; it's of no concern to us. */
3101 maybe_internal_trap
3102 = (supports_breakpoints ()
3103 && (WSTOPSIG (w) == SIGTRAP
3104 || ((WSTOPSIG (w) == SIGILL
3105 || WSTOPSIG (w) == SIGSEGV)
3106 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
3107
3108 if (maybe_internal_trap)
3109 {
3110 /* Handle anything that requires bookkeeping before deciding to
3111 report the event or continue waiting. */
3112
3113 /* First check if we can explain the SIGTRAP with an internal
3114 breakpoint, or if we should possibly report the event to GDB.
3115 Do this before anything that may remove or insert a
3116 breakpoint. */
3117 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
3118
3119 /* We have a SIGTRAP, possibly a step-over dance has just
3120 finished. If so, tweak the state machine accordingly,
3121 reinsert breakpoints and delete any reinsert (software
3122 single-step) breakpoints. */
3123 step_over_finished = finish_step_over (event_child);
3124
3125 /* Now invoke the callbacks of any internal breakpoints there. */
3126 check_breakpoints (event_child->stop_pc);
3127
3128 /* Handle tracepoint data collecting. This may overflow the
3129 trace buffer, and cause a tracing stop, removing
3130 breakpoints. */
3131 trace_event = handle_tracepoints (event_child);
3132
3133 if (bp_explains_trap)
3134 {
3135 /* If we stepped or ran into an internal breakpoint, we've
3136 already handled it. So next time we resume (from this
3137 PC), we should step over it. */
3138 if (debug_threads)
3139 debug_printf ("Hit a gdbserver breakpoint.\n");
3140
3141 if (breakpoint_here (event_child->stop_pc))
3142 event_child->need_step_over = 1;
3143 }
3144 }
3145 else
3146 {
3147 /* We have some other signal, possibly a step-over dance was in
3148 progress, and it should be cancelled too. */
3149 step_over_finished = finish_step_over (event_child);
3150 }
3151
3152 /* We have all the data we need. Either report the event to GDB, or
3153 resume threads and keep waiting for more. */
3154
3155 /* If we're collecting a fast tracepoint, finish the collection and
3156 move out of the jump pad before delivering a signal. See
3157 linux_stabilize_threads. */
3158
3159 if (WIFSTOPPED (w)
3160 && WSTOPSIG (w) != SIGTRAP
3161 && supports_fast_tracepoints ()
3162 && agent_loaded_p ())
3163 {
3164 if (debug_threads)
3165 debug_printf ("Got signal %d for LWP %ld. Check if we need "
3166 "to defer or adjust it.\n",
3167 WSTOPSIG (w), lwpid_of (current_thread));
3168
3169 /* Allow debugging the jump pad itself. */
3170 if (current_thread->last_resume_kind != resume_step
3171 && maybe_move_out_of_jump_pad (event_child, &w))
3172 {
3173 enqueue_one_deferred_signal (event_child, &w);
3174
3175 if (debug_threads)
3176 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
3177 WSTOPSIG (w), lwpid_of (current_thread));
3178
3179 linux_resume_one_lwp (event_child, 0, 0, NULL);
3180
3181 return ignore_event (ourstatus);
3182 }
3183 }
3184
3185 if (event_child->collecting_fast_tracepoint)
3186 {
3187 if (debug_threads)
3188 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
3189 "Check if we're already there.\n",
3190 lwpid_of (current_thread),
3191 event_child->collecting_fast_tracepoint);
3192
3193 trace_event = 1;
3194
3195 event_child->collecting_fast_tracepoint
3196 = linux_fast_tracepoint_collecting (event_child, NULL);
3197
3198 if (event_child->collecting_fast_tracepoint != 1)
3199 {
3200 /* No longer need this breakpoint. */
3201 if (event_child->exit_jump_pad_bkpt != NULL)
3202 {
3203 if (debug_threads)
3204 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
3205 "stopping all threads momentarily.\n");
3206
3207 /* Other running threads could hit this breakpoint.
3208 We don't handle moribund locations like GDB does,
3209 instead we always pause all threads when removing
3210 breakpoints, so that any step-over or
3211 decr_pc_after_break adjustment is always taken
3212 care of while the breakpoint is still
3213 inserted. */
3214 stop_all_lwps (1, event_child);
3215
3216 delete_breakpoint (event_child->exit_jump_pad_bkpt);
3217 event_child->exit_jump_pad_bkpt = NULL;
3218
3219 unstop_all_lwps (1, event_child);
3220
3221 gdb_assert (event_child->suspended >= 0);
3222 }
3223 }
3224
3225 if (event_child->collecting_fast_tracepoint == 0)
3226 {
3227 if (debug_threads)
3228 debug_printf ("fast tracepoint finished "
3229 "collecting successfully.\n");
3230
3231 /* We may have a deferred signal to report. */
3232 if (dequeue_one_deferred_signal (event_child, &w))
3233 {
3234 if (debug_threads)
3235 debug_printf ("dequeued one signal.\n");
3236 }
3237 else
3238 {
3239 if (debug_threads)
3240 debug_printf ("no deferred signals.\n");
3241
3242 if (stabilizing_threads)
3243 {
3244 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3245 ourstatus->value.sig = GDB_SIGNAL_0;
3246
3247 if (debug_threads)
3248 {
3249 debug_printf ("linux_wait_1 ret = %s, stopped "
3250 "while stabilizing threads\n",
3251 target_pid_to_str (ptid_of (current_thread)));
3252 debug_exit ();
3253 }
3254
3255 return ptid_of (current_thread);
3256 }
3257 }
3258 }
3259 }
3260
3261 /* Check whether GDB would be interested in this event. */
3262
3263 /* If GDB is not interested in this signal, don't stop other
3264 threads, and don't report it to GDB. Just resume the inferior
3265 right away. We do this for threading-related signals as well as
3266 any that GDB specifically requested we ignore. But never ignore
3267 SIGSTOP if we sent it ourselves, and do not ignore signals when
3268 stepping - they may require special handling to skip the signal
3269 handler. Also never ignore signals that could be caused by a
3270 breakpoint. */
3271 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
3272 thread library? */
3273 if (WIFSTOPPED (w)
3274 && current_thread->last_resume_kind != resume_step
3275 && (
3276 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
3277 (current_process ()->priv->thread_db != NULL
3278 && (WSTOPSIG (w) == __SIGRTMIN
3279 || WSTOPSIG (w) == __SIGRTMIN + 1))
3280 ||
3281 #endif
3282 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
3283 && !(WSTOPSIG (w) == SIGSTOP
3284 && current_thread->last_resume_kind == resume_stop)
3285 && !linux_wstatus_maybe_breakpoint (w))))
3286 {
3287 siginfo_t info, *info_p;
3288
3289 if (debug_threads)
3290 debug_printf ("Ignored signal %d for LWP %ld.\n",
3291 WSTOPSIG (w), lwpid_of (current_thread));
3292
3293 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
3294 (PTRACE_TYPE_ARG3) 0, &info) == 0)
3295 info_p = &info;
3296 else
3297 info_p = NULL;
3298
3299 if (step_over_finished)
3300 {
3301 /* We cancelled this thread's step-over above. We still
3302 need to unsuspend all other LWPs, and set them back
3303 running again while the signal handler runs. */
3304 unsuspend_all_lwps (event_child);
3305
3306 /* Enqueue the pending signal info so that proceed_all_lwps
3307 doesn't lose it. */
3308 enqueue_pending_signal (event_child, WSTOPSIG (w), info_p);
3309
3310 proceed_all_lwps ();
3311 }
3312 else
3313 {
3314 linux_resume_one_lwp (event_child, event_child->stepping,
3315 WSTOPSIG (w), info_p);
3316 }
3317 return ignore_event (ourstatus);
3318 }
3319
3320 /* Note that all addresses are always "out of the step range" when
3321 there's no range to begin with. */
3322 in_step_range = lwp_in_step_range (event_child);
3323
3324 /* If GDB wanted this thread to single step, and the thread is out
3325 of the step range, we always want to report the SIGTRAP, and let
3326 GDB handle it. Watchpoints should always be reported. So should
3327 signals we can't explain. A SIGTRAP we can't explain could be a
3328 GDB breakpoint --- we may or not support Z0 breakpoints. If we
3329 do, we're be able to handle GDB breakpoints on top of internal
3330 breakpoints, by handling the internal breakpoint and still
3331 reporting the event to GDB. If we don't, we're out of luck, GDB
3332 won't see the breakpoint hit. If we see a single-step event but
3333 the thread should be continuing, don't pass the trap to gdb.
3334 That indicates that we had previously finished a single-step but
3335 left the single-step pending -- see
3336 complete_ongoing_step_over. */
3337 report_to_gdb = (!maybe_internal_trap
3338 || (current_thread->last_resume_kind == resume_step
3339 && !in_step_range)
3340 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3341 || (!in_step_range
3342 && !bp_explains_trap
3343 && !trace_event
3344 && !step_over_finished
3345 && !(current_thread->last_resume_kind == resume_continue
3346 && event_child->stop_reason == TARGET_STOPPED_BY_SINGLE_STEP))
3347 || (gdb_breakpoint_here (event_child->stop_pc)
3348 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
3349 && gdb_no_commands_at_breakpoint (event_child->stop_pc))
3350 || event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE);
3351
3352 run_breakpoint_commands (event_child->stop_pc);
3353
3354 /* We found no reason GDB would want us to stop. We either hit one
3355 of our own breakpoints, or finished an internal step GDB
3356 shouldn't know about. */
3357 if (!report_to_gdb)
3358 {
3359 if (debug_threads)
3360 {
3361 if (bp_explains_trap)
3362 debug_printf ("Hit a gdbserver breakpoint.\n");
3363 if (step_over_finished)
3364 debug_printf ("Step-over finished.\n");
3365 if (trace_event)
3366 debug_printf ("Tracepoint event.\n");
3367 if (lwp_in_step_range (event_child))
3368 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
3369 paddress (event_child->stop_pc),
3370 paddress (event_child->step_range_start),
3371 paddress (event_child->step_range_end));
3372 }
3373
3374 /* We're not reporting this breakpoint to GDB, so apply the
3375 decr_pc_after_break adjustment to the inferior's regcache
3376 ourselves. */
3377
3378 if (the_low_target.set_pc != NULL)
3379 {
3380 struct regcache *regcache
3381 = get_thread_regcache (current_thread, 1);
3382 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
3383 }
3384
3385 /* We may have finished stepping over a breakpoint. If so,
3386 we've stopped and suspended all LWPs momentarily except the
3387 stepping one. This is where we resume them all again. We're
3388 going to keep waiting, so use proceed, which handles stepping
3389 over the next breakpoint. */
3390 if (debug_threads)
3391 debug_printf ("proceeding all threads.\n");
3392
3393 if (step_over_finished)
3394 unsuspend_all_lwps (event_child);
3395
3396 proceed_all_lwps ();
3397 return ignore_event (ourstatus);
3398 }
3399
3400 if (debug_threads)
3401 {
3402 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3403 {
3404 char *str;
3405
3406 str = target_waitstatus_to_string (&event_child->waitstatus);
3407 debug_printf ("LWP %ld: extended event with waitstatus %s\n",
3408 lwpid_of (get_lwp_thread (event_child)), str);
3409 xfree (str);
3410 }
3411 if (current_thread->last_resume_kind == resume_step)
3412 {
3413 if (event_child->step_range_start == event_child->step_range_end)
3414 debug_printf ("GDB wanted to single-step, reporting event.\n");
3415 else if (!lwp_in_step_range (event_child))
3416 debug_printf ("Out of step range, reporting event.\n");
3417 }
3418 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
3419 debug_printf ("Stopped by watchpoint.\n");
3420 else if (gdb_breakpoint_here (event_child->stop_pc))
3421 debug_printf ("Stopped by GDB breakpoint.\n");
3422 if (debug_threads)
3423 debug_printf ("Hit a non-gdbserver trap event.\n");
3424 }
3425
3426 /* Alright, we're going to report a stop. */
3427
3428 if (!stabilizing_threads)
3429 {
3430 /* In all-stop, stop all threads. */
3431 if (!non_stop)
3432 stop_all_lwps (0, NULL);
3433
3434 /* If we're not waiting for a specific LWP, choose an event LWP
3435 from among those that have had events. Giving equal priority
3436 to all LWPs that have had events helps prevent
3437 starvation. */
3438 if (ptid_equal (ptid, minus_one_ptid))
3439 {
3440 event_child->status_pending_p = 1;
3441 event_child->status_pending = w;
3442
3443 select_event_lwp (&event_child);
3444
3445 /* current_thread and event_child must stay in sync. */
3446 current_thread = get_lwp_thread (event_child);
3447
3448 event_child->status_pending_p = 0;
3449 w = event_child->status_pending;
3450 }
3451
3452 if (step_over_finished)
3453 {
3454 if (!non_stop)
3455 {
3456 /* If we were doing a step-over, all other threads but
3457 the stepping one had been paused in start_step_over,
3458 with their suspend counts incremented. We don't want
3459 to do a full unstop/unpause, because we're in
3460 all-stop mode (so we want threads stopped), but we
3461 still need to unsuspend the other threads, to
3462 decrement their `suspended' count back. */
3463 unsuspend_all_lwps (event_child);
3464 }
3465 else
3466 {
3467 /* If we just finished a step-over, then all threads had
3468 been momentarily paused. In all-stop, that's fine,
3469 we want threads stopped by now anyway. In non-stop,
3470 we need to re-resume threads that GDB wanted to be
3471 running. */
3472 unstop_all_lwps (1, event_child);
3473 }
3474 }
3475
3476 /* Stabilize threads (move out of jump pads). */
3477 if (!non_stop)
3478 stabilize_threads ();
3479 }
3480 else
3481 {
3482 /* If we just finished a step-over, then all threads had been
3483 momentarily paused. In all-stop, that's fine, we want
3484 threads stopped by now anyway. In non-stop, we need to
3485 re-resume threads that GDB wanted to be running. */
3486 if (step_over_finished)
3487 unstop_all_lwps (1, event_child);
3488 }
3489
3490 if (event_child->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3491 {
3492 /* If the reported event is an exit, fork, vfork or exec, let
3493 GDB know. */
3494 *ourstatus = event_child->waitstatus;
3495 /* Clear the event lwp's waitstatus since we handled it already. */
3496 event_child->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3497 }
3498 else
3499 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3500
3501 /* Now that we've selected our final event LWP, un-adjust its PC if
3502 it was a software breakpoint, and the client doesn't know we can
3503 adjust the breakpoint ourselves. */
3504 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3505 && !swbreak_feature)
3506 {
3507 int decr_pc = the_low_target.decr_pc_after_break;
3508
3509 if (decr_pc != 0)
3510 {
3511 struct regcache *regcache
3512 = get_thread_regcache (current_thread, 1);
3513 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3514 }
3515 }
3516
3517 if (current_thread->last_resume_kind == resume_stop
3518 && WSTOPSIG (w) == SIGSTOP)
3519 {
3520 /* A thread that has been requested to stop by GDB with vCont;t,
3521 and it stopped cleanly, so report as SIG0. The use of
3522 SIGSTOP is an implementation detail. */
3523 ourstatus->value.sig = GDB_SIGNAL_0;
3524 }
3525 else if (current_thread->last_resume_kind == resume_stop
3526 && WSTOPSIG (w) != SIGSTOP)
3527 {
3528 /* A thread that has been requested to stop by GDB with vCont;t,
3529 but, it stopped for other reasons. */
3530 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3531 }
3532 else if (ourstatus->kind == TARGET_WAITKIND_STOPPED)
3533 {
3534 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3535 }
3536
3537 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3538
3539 if (debug_threads)
3540 {
3541 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3542 target_pid_to_str (ptid_of (current_thread)),
3543 ourstatus->kind, ourstatus->value.sig);
3544 debug_exit ();
3545 }
3546
3547 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3548 return filter_exit_event (event_child, ourstatus);
3549
3550 return ptid_of (current_thread);
3551 }
3552
3553 /* Get rid of any pending event in the pipe. */
3554 static void
3555 async_file_flush (void)
3556 {
3557 int ret;
3558 char buf;
3559
3560 do
3561 ret = read (linux_event_pipe[0], &buf, 1);
3562 while (ret >= 0 || (ret == -1 && errno == EINTR));
3563 }
3564
3565 /* Put something in the pipe, so the event loop wakes up. */
3566 static void
3567 async_file_mark (void)
3568 {
3569 int ret;
3570
3571 async_file_flush ();
3572
3573 do
3574 ret = write (linux_event_pipe[1], "+", 1);
3575 while (ret == 0 || (ret == -1 && errno == EINTR));
3576
3577 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3578 be awakened anyway. */
3579 }
3580
3581 static ptid_t
3582 linux_wait (ptid_t ptid,
3583 struct target_waitstatus *ourstatus, int target_options)
3584 {
3585 ptid_t event_ptid;
3586
3587 /* Flush the async file first. */
3588 if (target_is_async_p ())
3589 async_file_flush ();
3590
3591 do
3592 {
3593 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3594 }
3595 while ((target_options & TARGET_WNOHANG) == 0
3596 && ptid_equal (event_ptid, null_ptid)
3597 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3598
3599 /* If at least one stop was reported, there may be more. A single
3600 SIGCHLD can signal more than one child stop. */
3601 if (target_is_async_p ()
3602 && (target_options & TARGET_WNOHANG) != 0
3603 && !ptid_equal (event_ptid, null_ptid))
3604 async_file_mark ();
3605
3606 return event_ptid;
3607 }
3608
3609 /* Send a signal to an LWP. */
3610
3611 static int
3612 kill_lwp (unsigned long lwpid, int signo)
3613 {
3614 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3615 fails, then we are not using nptl threads and we should be using kill. */
3616
3617 #ifdef __NR_tkill
3618 {
3619 static int tkill_failed;
3620
3621 if (!tkill_failed)
3622 {
3623 int ret;
3624
3625 errno = 0;
3626 ret = syscall (__NR_tkill, lwpid, signo);
3627 if (errno != ENOSYS)
3628 return ret;
3629 tkill_failed = 1;
3630 }
3631 }
3632 #endif
3633
3634 return kill (lwpid, signo);
3635 }
3636
3637 void
3638 linux_stop_lwp (struct lwp_info *lwp)
3639 {
3640 send_sigstop (lwp);
3641 }
3642
3643 static void
3644 send_sigstop (struct lwp_info *lwp)
3645 {
3646 int pid;
3647
3648 pid = lwpid_of (get_lwp_thread (lwp));
3649
3650 /* If we already have a pending stop signal for this process, don't
3651 send another. */
3652 if (lwp->stop_expected)
3653 {
3654 if (debug_threads)
3655 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3656
3657 return;
3658 }
3659
3660 if (debug_threads)
3661 debug_printf ("Sending sigstop to lwp %d\n", pid);
3662
3663 lwp->stop_expected = 1;
3664 kill_lwp (pid, SIGSTOP);
3665 }
3666
3667 static int
3668 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3669 {
3670 struct thread_info *thread = (struct thread_info *) entry;
3671 struct lwp_info *lwp = get_thread_lwp (thread);
3672
3673 /* Ignore EXCEPT. */
3674 if (lwp == except)
3675 return 0;
3676
3677 if (lwp->stopped)
3678 return 0;
3679
3680 send_sigstop (lwp);
3681 return 0;
3682 }
3683
3684 /* Increment the suspend count of an LWP, and stop it, if not stopped
3685 yet. */
3686 static int
3687 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3688 void *except)
3689 {
3690 struct thread_info *thread = (struct thread_info *) entry;
3691 struct lwp_info *lwp = get_thread_lwp (thread);
3692
3693 /* Ignore EXCEPT. */
3694 if (lwp == except)
3695 return 0;
3696
3697 lwp_suspended_inc (lwp);
3698
3699 return send_sigstop_callback (entry, except);
3700 }
3701
3702 static void
3703 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3704 {
3705 /* Store the exit status for later. */
3706 lwp->status_pending_p = 1;
3707 lwp->status_pending = wstat;
3708
3709 /* Store in waitstatus as well, as there's nothing else to process
3710 for this event. */
3711 if (WIFEXITED (wstat))
3712 {
3713 lwp->waitstatus.kind = TARGET_WAITKIND_EXITED;
3714 lwp->waitstatus.value.integer = WEXITSTATUS (wstat);
3715 }
3716 else if (WIFSIGNALED (wstat))
3717 {
3718 lwp->waitstatus.kind = TARGET_WAITKIND_SIGNALLED;
3719 lwp->waitstatus.value.sig = gdb_signal_from_host (WTERMSIG (wstat));
3720 }
3721
3722 /* Prevent trying to stop it. */
3723 lwp->stopped = 1;
3724
3725 /* No further stops are expected from a dead lwp. */
3726 lwp->stop_expected = 0;
3727 }
3728
3729 /* Return true if LWP has exited already, and has a pending exit event
3730 to report to GDB. */
3731
3732 static int
3733 lwp_is_marked_dead (struct lwp_info *lwp)
3734 {
3735 return (lwp->status_pending_p
3736 && (WIFEXITED (lwp->status_pending)
3737 || WIFSIGNALED (lwp->status_pending)));
3738 }
3739
3740 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3741
3742 static void
3743 wait_for_sigstop (void)
3744 {
3745 struct thread_info *saved_thread;
3746 ptid_t saved_tid;
3747 int wstat;
3748 int ret;
3749
3750 saved_thread = current_thread;
3751 if (saved_thread != NULL)
3752 saved_tid = saved_thread->entry.id;
3753 else
3754 saved_tid = null_ptid; /* avoid bogus unused warning */
3755
3756 if (debug_threads)
3757 debug_printf ("wait_for_sigstop: pulling events\n");
3758
3759 /* Passing NULL_PTID as filter indicates we want all events to be
3760 left pending. Eventually this returns when there are no
3761 unwaited-for children left. */
3762 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3763 &wstat, __WALL);
3764 gdb_assert (ret == -1);
3765
3766 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3767 current_thread = saved_thread;
3768 else
3769 {
3770 if (debug_threads)
3771 debug_printf ("Previously current thread died.\n");
3772
3773 /* We can't change the current inferior behind GDB's back,
3774 otherwise, a subsequent command may apply to the wrong
3775 process. */
3776 current_thread = NULL;
3777 }
3778 }
3779
3780 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3781 move it out, because we need to report the stop event to GDB. For
3782 example, if the user puts a breakpoint in the jump pad, it's
3783 because she wants to debug it. */
3784
3785 static int
3786 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3787 {
3788 struct thread_info *thread = (struct thread_info *) entry;
3789 struct lwp_info *lwp = get_thread_lwp (thread);
3790
3791 if (lwp->suspended != 0)
3792 {
3793 internal_error (__FILE__, __LINE__,
3794 "LWP %ld is suspended, suspended=%d\n",
3795 lwpid_of (thread), lwp->suspended);
3796 }
3797 gdb_assert (lwp->stopped);
3798
3799 /* Allow debugging the jump pad, gdb_collect, etc.. */
3800 return (supports_fast_tracepoints ()
3801 && agent_loaded_p ()
3802 && (gdb_breakpoint_here (lwp->stop_pc)
3803 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3804 || thread->last_resume_kind == resume_step)
3805 && linux_fast_tracepoint_collecting (lwp, NULL));
3806 }
3807
3808 static void
3809 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3810 {
3811 struct thread_info *thread = (struct thread_info *) entry;
3812 struct thread_info *saved_thread;
3813 struct lwp_info *lwp = get_thread_lwp (thread);
3814 int *wstat;
3815
3816 if (lwp->suspended != 0)
3817 {
3818 internal_error (__FILE__, __LINE__,
3819 "LWP %ld is suspended, suspended=%d\n",
3820 lwpid_of (thread), lwp->suspended);
3821 }
3822 gdb_assert (lwp->stopped);
3823
3824 /* For gdb_breakpoint_here. */
3825 saved_thread = current_thread;
3826 current_thread = thread;
3827
3828 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3829
3830 /* Allow debugging the jump pad, gdb_collect, etc. */
3831 if (!gdb_breakpoint_here (lwp->stop_pc)
3832 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3833 && thread->last_resume_kind != resume_step
3834 && maybe_move_out_of_jump_pad (lwp, wstat))
3835 {
3836 if (debug_threads)
3837 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3838 lwpid_of (thread));
3839
3840 if (wstat)
3841 {
3842 lwp->status_pending_p = 0;
3843 enqueue_one_deferred_signal (lwp, wstat);
3844
3845 if (debug_threads)
3846 debug_printf ("Signal %d for LWP %ld deferred "
3847 "(in jump pad)\n",
3848 WSTOPSIG (*wstat), lwpid_of (thread));
3849 }
3850
3851 linux_resume_one_lwp (lwp, 0, 0, NULL);
3852 }
3853 else
3854 lwp_suspended_inc (lwp);
3855
3856 current_thread = saved_thread;
3857 }
3858
3859 static int
3860 lwp_running (struct inferior_list_entry *entry, void *data)
3861 {
3862 struct thread_info *thread = (struct thread_info *) entry;
3863 struct lwp_info *lwp = get_thread_lwp (thread);
3864
3865 if (lwp_is_marked_dead (lwp))
3866 return 0;
3867 if (lwp->stopped)
3868 return 0;
3869 return 1;
3870 }
3871
3872 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3873 If SUSPEND, then also increase the suspend count of every LWP,
3874 except EXCEPT. */
3875
3876 static void
3877 stop_all_lwps (int suspend, struct lwp_info *except)
3878 {
3879 /* Should not be called recursively. */
3880 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3881
3882 if (debug_threads)
3883 {
3884 debug_enter ();
3885 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3886 suspend ? "stop-and-suspend" : "stop",
3887 except != NULL
3888 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3889 : "none");
3890 }
3891
3892 stopping_threads = (suspend
3893 ? STOPPING_AND_SUSPENDING_THREADS
3894 : STOPPING_THREADS);
3895
3896 if (suspend)
3897 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3898 else
3899 find_inferior (&all_threads, send_sigstop_callback, except);
3900 wait_for_sigstop ();
3901 stopping_threads = NOT_STOPPING_THREADS;
3902
3903 if (debug_threads)
3904 {
3905 debug_printf ("stop_all_lwps done, setting stopping_threads "
3906 "back to !stopping\n");
3907 debug_exit ();
3908 }
3909 }
3910
3911 /* Enqueue one signal in the chain of signals which need to be
3912 delivered to this process on next resume. */
3913
3914 static void
3915 enqueue_pending_signal (struct lwp_info *lwp, int signal, siginfo_t *info)
3916 {
3917 struct pending_signals *p_sig = XNEW (struct pending_signals);
3918
3919 p_sig->prev = lwp->pending_signals;
3920 p_sig->signal = signal;
3921 if (info == NULL)
3922 memset (&p_sig->info, 0, sizeof (siginfo_t));
3923 else
3924 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3925 lwp->pending_signals = p_sig;
3926 }
3927
3928 /* Resume execution of LWP. If STEP is nonzero, single-step it. If
3929 SIGNAL is nonzero, give it that signal. */
3930
3931 static void
3932 linux_resume_one_lwp_throw (struct lwp_info *lwp,
3933 int step, int signal, siginfo_t *info)
3934 {
3935 struct thread_info *thread = get_lwp_thread (lwp);
3936 struct thread_info *saved_thread;
3937 int fast_tp_collecting;
3938 struct process_info *proc = get_thread_process (thread);
3939
3940 /* Note that target description may not be initialised
3941 (proc->tdesc == NULL) at this point because the program hasn't
3942 stopped at the first instruction yet. It means GDBserver skips
3943 the extra traps from the wrapper program (see option --wrapper).
3944 Code in this function that requires register access should be
3945 guarded by proc->tdesc == NULL or something else. */
3946
3947 if (lwp->stopped == 0)
3948 return;
3949
3950 gdb_assert (lwp->waitstatus.kind == TARGET_WAITKIND_IGNORE);
3951
3952 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3953
3954 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3955
3956 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3957 user used the "jump" command, or "set $pc = foo"). */
3958 if (thread->while_stepping != NULL && lwp->stop_pc != get_pc (lwp))
3959 {
3960 /* Collecting 'while-stepping' actions doesn't make sense
3961 anymore. */
3962 release_while_stepping_state_list (thread);
3963 }
3964
3965 /* If we have pending signals or status, and a new signal, enqueue the
3966 signal. Also enqueue the signal if we are waiting to reinsert a
3967 breakpoint; it will be picked up again below. */
3968 if (signal != 0
3969 && (lwp->status_pending_p
3970 || lwp->pending_signals != NULL
3971 || lwp->bp_reinsert != 0
3972 || fast_tp_collecting))
3973 {
3974 struct pending_signals *p_sig = XNEW (struct pending_signals);
3975
3976 p_sig->prev = lwp->pending_signals;
3977 p_sig->signal = signal;
3978 if (info == NULL)
3979 memset (&p_sig->info, 0, sizeof (siginfo_t));
3980 else
3981 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3982 lwp->pending_signals = p_sig;
3983 }
3984
3985 if (lwp->status_pending_p)
3986 {
3987 if (debug_threads)
3988 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3989 " has pending status\n",
3990 lwpid_of (thread), step ? "step" : "continue", signal,
3991 lwp->stop_expected ? "expected" : "not expected");
3992 return;
3993 }
3994
3995 saved_thread = current_thread;
3996 current_thread = thread;
3997
3998 if (debug_threads)
3999 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
4000 lwpid_of (thread), step ? "step" : "continue", signal,
4001 lwp->stop_expected ? "expected" : "not expected");
4002
4003 /* This bit needs some thinking about. If we get a signal that
4004 we must report while a single-step reinsert is still pending,
4005 we often end up resuming the thread. It might be better to
4006 (ew) allow a stack of pending events; then we could be sure that
4007 the reinsert happened right away and not lose any signals.
4008
4009 Making this stack would also shrink the window in which breakpoints are
4010 uninserted (see comment in linux_wait_for_lwp) but not enough for
4011 complete correctness, so it won't solve that problem. It may be
4012 worthwhile just to solve this one, however. */
4013 if (lwp->bp_reinsert != 0)
4014 {
4015 if (debug_threads)
4016 debug_printf (" pending reinsert at 0x%s\n",
4017 paddress (lwp->bp_reinsert));
4018
4019 if (can_hardware_single_step ())
4020 {
4021 if (fast_tp_collecting == 0)
4022 {
4023 if (step == 0)
4024 fprintf (stderr, "BAD - reinserting but not stepping.\n");
4025 if (lwp->suspended)
4026 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
4027 lwp->suspended);
4028 }
4029
4030 step = 1;
4031 }
4032
4033 /* Postpone any pending signal. It was enqueued above. */
4034 signal = 0;
4035 }
4036
4037 if (fast_tp_collecting == 1)
4038 {
4039 if (debug_threads)
4040 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4041 " (exit-jump-pad-bkpt)\n",
4042 lwpid_of (thread));
4043
4044 /* Postpone any pending signal. It was enqueued above. */
4045 signal = 0;
4046 }
4047 else if (fast_tp_collecting == 2)
4048 {
4049 if (debug_threads)
4050 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
4051 " single-stepping\n",
4052 lwpid_of (thread));
4053
4054 if (can_hardware_single_step ())
4055 step = 1;
4056 else
4057 {
4058 internal_error (__FILE__, __LINE__,
4059 "moving out of jump pad single-stepping"
4060 " not implemented on this target");
4061 }
4062
4063 /* Postpone any pending signal. It was enqueued above. */
4064 signal = 0;
4065 }
4066
4067 /* If we have while-stepping actions in this thread set it stepping.
4068 If we have a signal to deliver, it may or may not be set to
4069 SIG_IGN, we don't know. Assume so, and allow collecting
4070 while-stepping into a signal handler. A possible smart thing to
4071 do would be to set an internal breakpoint at the signal return
4072 address, continue, and carry on catching this while-stepping
4073 action only when that breakpoint is hit. A future
4074 enhancement. */
4075 if (thread->while_stepping != NULL
4076 && can_hardware_single_step ())
4077 {
4078 if (debug_threads)
4079 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
4080 lwpid_of (thread));
4081 step = 1;
4082 }
4083
4084 if (proc->tdesc != NULL && the_low_target.get_pc != NULL)
4085 {
4086 struct regcache *regcache = get_thread_regcache (current_thread, 1);
4087
4088 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
4089
4090 if (debug_threads)
4091 {
4092 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
4093 (long) lwp->stop_pc);
4094 }
4095 }
4096
4097 /* If we have pending signals, consume one unless we are trying to
4098 reinsert a breakpoint or we're trying to finish a fast tracepoint
4099 collect. */
4100 if (lwp->pending_signals != NULL
4101 && lwp->bp_reinsert == 0
4102 && fast_tp_collecting == 0)
4103 {
4104 struct pending_signals **p_sig;
4105
4106 p_sig = &lwp->pending_signals;
4107 while ((*p_sig)->prev != NULL)
4108 p_sig = &(*p_sig)->prev;
4109
4110 signal = (*p_sig)->signal;
4111 if ((*p_sig)->info.si_signo != 0)
4112 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4113 &(*p_sig)->info);
4114
4115 free (*p_sig);
4116 *p_sig = NULL;
4117 }
4118
4119 if (the_low_target.prepare_to_resume != NULL)
4120 the_low_target.prepare_to_resume (lwp);
4121
4122 regcache_invalidate_thread (thread);
4123 errno = 0;
4124 lwp->stepping = step;
4125 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
4126 (PTRACE_TYPE_ARG3) 0,
4127 /* Coerce to a uintptr_t first to avoid potential gcc warning
4128 of coercing an 8 byte integer to a 4 byte pointer. */
4129 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
4130
4131 current_thread = saved_thread;
4132 if (errno)
4133 perror_with_name ("resuming thread");
4134
4135 /* Successfully resumed. Clear state that no longer makes sense,
4136 and mark the LWP as running. Must not do this before resuming
4137 otherwise if that fails other code will be confused. E.g., we'd
4138 later try to stop the LWP and hang forever waiting for a stop
4139 status. Note that we must not throw after this is cleared,
4140 otherwise handle_zombie_lwp_error would get confused. */
4141 lwp->stopped = 0;
4142 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4143 }
4144
4145 /* Called when we try to resume a stopped LWP and that errors out. If
4146 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
4147 or about to become), discard the error, clear any pending status
4148 the LWP may have, and return true (we'll collect the exit status
4149 soon enough). Otherwise, return false. */
4150
4151 static int
4152 check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
4153 {
4154 struct thread_info *thread = get_lwp_thread (lp);
4155
4156 /* If we get an error after resuming the LWP successfully, we'd
4157 confuse !T state for the LWP being gone. */
4158 gdb_assert (lp->stopped);
4159
4160 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
4161 because even if ptrace failed with ESRCH, the tracee may be "not
4162 yet fully dead", but already refusing ptrace requests. In that
4163 case the tracee has 'R (Running)' state for a little bit
4164 (observed in Linux 3.18). See also the note on ESRCH in the
4165 ptrace(2) man page. Instead, check whether the LWP has any state
4166 other than ptrace-stopped. */
4167
4168 /* Don't assume anything if /proc/PID/status can't be read. */
4169 if (linux_proc_pid_is_trace_stopped_nowarn (lwpid_of (thread)) == 0)
4170 {
4171 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
4172 lp->status_pending_p = 0;
4173 return 1;
4174 }
4175 return 0;
4176 }
4177
4178 /* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
4179 disappears while we try to resume it. */
4180
4181 static void
4182 linux_resume_one_lwp (struct lwp_info *lwp,
4183 int step, int signal, siginfo_t *info)
4184 {
4185 TRY
4186 {
4187 linux_resume_one_lwp_throw (lwp, step, signal, info);
4188 }
4189 CATCH (ex, RETURN_MASK_ERROR)
4190 {
4191 if (!check_ptrace_stopped_lwp_gone (lwp))
4192 throw_exception (ex);
4193 }
4194 END_CATCH
4195 }
4196
4197 struct thread_resume_array
4198 {
4199 struct thread_resume *resume;
4200 size_t n;
4201 };
4202
4203 /* This function is called once per thread via find_inferior.
4204 ARG is a pointer to a thread_resume_array struct.
4205 We look up the thread specified by ENTRY in ARG, and mark the thread
4206 with a pointer to the appropriate resume request.
4207
4208 This algorithm is O(threads * resume elements), but resume elements
4209 is small (and will remain small at least until GDB supports thread
4210 suspension). */
4211
4212 static int
4213 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
4214 {
4215 struct thread_info *thread = (struct thread_info *) entry;
4216 struct lwp_info *lwp = get_thread_lwp (thread);
4217 int ndx;
4218 struct thread_resume_array *r;
4219
4220 r = (struct thread_resume_array *) arg;
4221
4222 for (ndx = 0; ndx < r->n; ndx++)
4223 {
4224 ptid_t ptid = r->resume[ndx].thread;
4225 if (ptid_equal (ptid, minus_one_ptid)
4226 || ptid_equal (ptid, entry->id)
4227 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
4228 of PID'. */
4229 || (ptid_get_pid (ptid) == pid_of (thread)
4230 && (ptid_is_pid (ptid)
4231 || ptid_get_lwp (ptid) == -1)))
4232 {
4233 if (r->resume[ndx].kind == resume_stop
4234 && thread->last_resume_kind == resume_stop)
4235 {
4236 if (debug_threads)
4237 debug_printf ("already %s LWP %ld at GDB's request\n",
4238 (thread->last_status.kind
4239 == TARGET_WAITKIND_STOPPED)
4240 ? "stopped"
4241 : "stopping",
4242 lwpid_of (thread));
4243
4244 continue;
4245 }
4246
4247 lwp->resume = &r->resume[ndx];
4248 thread->last_resume_kind = lwp->resume->kind;
4249
4250 lwp->step_range_start = lwp->resume->step_range_start;
4251 lwp->step_range_end = lwp->resume->step_range_end;
4252
4253 /* If we had a deferred signal to report, dequeue one now.
4254 This can happen if LWP gets more than one signal while
4255 trying to get out of a jump pad. */
4256 if (lwp->stopped
4257 && !lwp->status_pending_p
4258 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
4259 {
4260 lwp->status_pending_p = 1;
4261
4262 if (debug_threads)
4263 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
4264 "leaving status pending.\n",
4265 WSTOPSIG (lwp->status_pending),
4266 lwpid_of (thread));
4267 }
4268
4269 return 0;
4270 }
4271 }
4272
4273 /* No resume action for this thread. */
4274 lwp->resume = NULL;
4275
4276 return 0;
4277 }
4278
4279 /* find_inferior callback for linux_resume.
4280 Set *FLAG_P if this lwp has an interesting status pending. */
4281
4282 static int
4283 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
4284 {
4285 struct thread_info *thread = (struct thread_info *) entry;
4286 struct lwp_info *lwp = get_thread_lwp (thread);
4287
4288 /* LWPs which will not be resumed are not interesting, because
4289 we might not wait for them next time through linux_wait. */
4290 if (lwp->resume == NULL)
4291 return 0;
4292
4293 if (thread_still_has_status_pending_p (thread))
4294 * (int *) flag_p = 1;
4295
4296 return 0;
4297 }
4298
4299 /* Return 1 if this lwp that GDB wants running is stopped at an
4300 internal breakpoint that we need to step over. It assumes that any
4301 required STOP_PC adjustment has already been propagated to the
4302 inferior's regcache. */
4303
4304 static int
4305 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
4306 {
4307 struct thread_info *thread = (struct thread_info *) entry;
4308 struct lwp_info *lwp = get_thread_lwp (thread);
4309 struct thread_info *saved_thread;
4310 CORE_ADDR pc;
4311 struct process_info *proc = get_thread_process (thread);
4312
4313 /* GDBserver is skipping the extra traps from the wrapper program,
4314 don't have to do step over. */
4315 if (proc->tdesc == NULL)
4316 return 0;
4317
4318 /* LWPs which will not be resumed are not interesting, because we
4319 might not wait for them next time through linux_wait. */
4320
4321 if (!lwp->stopped)
4322 {
4323 if (debug_threads)
4324 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
4325 lwpid_of (thread));
4326 return 0;
4327 }
4328
4329 if (thread->last_resume_kind == resume_stop)
4330 {
4331 if (debug_threads)
4332 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
4333 " stopped\n",
4334 lwpid_of (thread));
4335 return 0;
4336 }
4337
4338 gdb_assert (lwp->suspended >= 0);
4339
4340 if (lwp->suspended)
4341 {
4342 if (debug_threads)
4343 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
4344 lwpid_of (thread));
4345 return 0;
4346 }
4347
4348 if (!lwp->need_step_over)
4349 {
4350 if (debug_threads)
4351 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
4352 }
4353
4354 if (lwp->status_pending_p)
4355 {
4356 if (debug_threads)
4357 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
4358 " status.\n",
4359 lwpid_of (thread));
4360 return 0;
4361 }
4362
4363 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
4364 or we have. */
4365 pc = get_pc (lwp);
4366
4367 /* If the PC has changed since we stopped, then don't do anything,
4368 and let the breakpoint/tracepoint be hit. This happens if, for
4369 instance, GDB handled the decr_pc_after_break subtraction itself,
4370 GDB is OOL stepping this thread, or the user has issued a "jump"
4371 command, or poked thread's registers herself. */
4372 if (pc != lwp->stop_pc)
4373 {
4374 if (debug_threads)
4375 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
4376 "Old stop_pc was 0x%s, PC is now 0x%s\n",
4377 lwpid_of (thread),
4378 paddress (lwp->stop_pc), paddress (pc));
4379
4380 lwp->need_step_over = 0;
4381 return 0;
4382 }
4383
4384 saved_thread = current_thread;
4385 current_thread = thread;
4386
4387 /* We can only step over breakpoints we know about. */
4388 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
4389 {
4390 /* Don't step over a breakpoint that GDB expects to hit
4391 though. If the condition is being evaluated on the target's side
4392 and it evaluate to false, step over this breakpoint as well. */
4393 if (gdb_breakpoint_here (pc)
4394 && gdb_condition_true_at_breakpoint (pc)
4395 && gdb_no_commands_at_breakpoint (pc))
4396 {
4397 if (debug_threads)
4398 debug_printf ("Need step over [LWP %ld]? yes, but found"
4399 " GDB breakpoint at 0x%s; skipping step over\n",
4400 lwpid_of (thread), paddress (pc));
4401
4402 current_thread = saved_thread;
4403 return 0;
4404 }
4405 else
4406 {
4407 if (debug_threads)
4408 debug_printf ("Need step over [LWP %ld]? yes, "
4409 "found breakpoint at 0x%s\n",
4410 lwpid_of (thread), paddress (pc));
4411
4412 /* We've found an lwp that needs stepping over --- return 1 so
4413 that find_inferior stops looking. */
4414 current_thread = saved_thread;
4415
4416 /* If the step over is cancelled, this is set again. */
4417 lwp->need_step_over = 0;
4418 return 1;
4419 }
4420 }
4421
4422 current_thread = saved_thread;
4423
4424 if (debug_threads)
4425 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
4426 " at 0x%s\n",
4427 lwpid_of (thread), paddress (pc));
4428
4429 return 0;
4430 }
4431
4432 /* Start a step-over operation on LWP. When LWP stopped at a
4433 breakpoint, to make progress, we need to remove the breakpoint out
4434 of the way. If we let other threads run while we do that, they may
4435 pass by the breakpoint location and miss hitting it. To avoid
4436 that, a step-over momentarily stops all threads while LWP is
4437 single-stepped while the breakpoint is temporarily uninserted from
4438 the inferior. When the single-step finishes, we reinsert the
4439 breakpoint, and let all threads that are supposed to be running,
4440 run again.
4441
4442 On targets that don't support hardware single-step, we don't
4443 currently support full software single-stepping. Instead, we only
4444 support stepping over the thread event breakpoint, by asking the
4445 low target where to place a reinsert breakpoint. Since this
4446 routine assumes the breakpoint being stepped over is a thread event
4447 breakpoint, it usually assumes the return address of the current
4448 function is a good enough place to set the reinsert breakpoint. */
4449
4450 static int
4451 start_step_over (struct lwp_info *lwp)
4452 {
4453 struct thread_info *thread = get_lwp_thread (lwp);
4454 struct thread_info *saved_thread;
4455 CORE_ADDR pc;
4456 int step;
4457
4458 if (debug_threads)
4459 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
4460 lwpid_of (thread));
4461
4462 stop_all_lwps (1, lwp);
4463
4464 if (lwp->suspended != 0)
4465 {
4466 internal_error (__FILE__, __LINE__,
4467 "LWP %ld suspended=%d\n", lwpid_of (thread),
4468 lwp->suspended);
4469 }
4470
4471 if (debug_threads)
4472 debug_printf ("Done stopping all threads for step-over.\n");
4473
4474 /* Note, we should always reach here with an already adjusted PC,
4475 either by GDB (if we're resuming due to GDB's request), or by our
4476 caller, if we just finished handling an internal breakpoint GDB
4477 shouldn't care about. */
4478 pc = get_pc (lwp);
4479
4480 saved_thread = current_thread;
4481 current_thread = thread;
4482
4483 lwp->bp_reinsert = pc;
4484 uninsert_breakpoints_at (pc);
4485 uninsert_fast_tracepoint_jumps_at (pc);
4486
4487 if (can_hardware_single_step ())
4488 {
4489 step = 1;
4490 }
4491 else
4492 {
4493 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
4494 set_reinsert_breakpoint (raddr);
4495 step = 0;
4496 }
4497
4498 current_thread = saved_thread;
4499
4500 linux_resume_one_lwp (lwp, step, 0, NULL);
4501
4502 /* Require next event from this LWP. */
4503 step_over_bkpt = thread->entry.id;
4504 return 1;
4505 }
4506
4507 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
4508 start_step_over, if still there, and delete any reinsert
4509 breakpoints we've set, on non hardware single-step targets. */
4510
4511 static int
4512 finish_step_over (struct lwp_info *lwp)
4513 {
4514 if (lwp->bp_reinsert != 0)
4515 {
4516 if (debug_threads)
4517 debug_printf ("Finished step over.\n");
4518
4519 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
4520 may be no breakpoint to reinsert there by now. */
4521 reinsert_breakpoints_at (lwp->bp_reinsert);
4522 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
4523
4524 lwp->bp_reinsert = 0;
4525
4526 /* Delete any software-single-step reinsert breakpoints. No
4527 longer needed. We don't have to worry about other threads
4528 hitting this trap, and later not being able to explain it,
4529 because we were stepping over a breakpoint, and we hold all
4530 threads but LWP stopped while doing that. */
4531 if (!can_hardware_single_step ())
4532 delete_reinsert_breakpoints ();
4533
4534 step_over_bkpt = null_ptid;
4535 return 1;
4536 }
4537 else
4538 return 0;
4539 }
4540
4541 /* If there's a step over in progress, wait until all threads stop
4542 (that is, until the stepping thread finishes its step), and
4543 unsuspend all lwps. The stepping thread ends with its status
4544 pending, which is processed later when we get back to processing
4545 events. */
4546
4547 static void
4548 complete_ongoing_step_over (void)
4549 {
4550 if (!ptid_equal (step_over_bkpt, null_ptid))
4551 {
4552 struct lwp_info *lwp;
4553 int wstat;
4554 int ret;
4555
4556 if (debug_threads)
4557 debug_printf ("detach: step over in progress, finish it first\n");
4558
4559 /* Passing NULL_PTID as filter indicates we want all events to
4560 be left pending. Eventually this returns when there are no
4561 unwaited-for children left. */
4562 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
4563 &wstat, __WALL);
4564 gdb_assert (ret == -1);
4565
4566 lwp = find_lwp_pid (step_over_bkpt);
4567 if (lwp != NULL)
4568 finish_step_over (lwp);
4569 step_over_bkpt = null_ptid;
4570 unsuspend_all_lwps (lwp);
4571 }
4572 }
4573
4574 /* This function is called once per thread. We check the thread's resume
4575 request, which will tell us whether to resume, step, or leave the thread
4576 stopped; and what signal, if any, it should be sent.
4577
4578 For threads which we aren't explicitly told otherwise, we preserve
4579 the stepping flag; this is used for stepping over gdbserver-placed
4580 breakpoints.
4581
4582 If pending_flags was set in any thread, we queue any needed
4583 signals, since we won't actually resume. We already have a pending
4584 event to report, so we don't need to preserve any step requests;
4585 they should be re-issued if necessary. */
4586
4587 static int
4588 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
4589 {
4590 struct thread_info *thread = (struct thread_info *) entry;
4591 struct lwp_info *lwp = get_thread_lwp (thread);
4592 int step;
4593 int leave_all_stopped = * (int *) arg;
4594 int leave_pending;
4595
4596 if (lwp->resume == NULL)
4597 return 0;
4598
4599 if (lwp->resume->kind == resume_stop)
4600 {
4601 if (debug_threads)
4602 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
4603
4604 if (!lwp->stopped)
4605 {
4606 if (debug_threads)
4607 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
4608
4609 /* Stop the thread, and wait for the event asynchronously,
4610 through the event loop. */
4611 send_sigstop (lwp);
4612 }
4613 else
4614 {
4615 if (debug_threads)
4616 debug_printf ("already stopped LWP %ld\n",
4617 lwpid_of (thread));
4618
4619 /* The LWP may have been stopped in an internal event that
4620 was not meant to be notified back to GDB (e.g., gdbserver
4621 breakpoint), so we should be reporting a stop event in
4622 this case too. */
4623
4624 /* If the thread already has a pending SIGSTOP, this is a
4625 no-op. Otherwise, something later will presumably resume
4626 the thread and this will cause it to cancel any pending
4627 operation, due to last_resume_kind == resume_stop. If
4628 the thread already has a pending status to report, we
4629 will still report it the next time we wait - see
4630 status_pending_p_callback. */
4631
4632 /* If we already have a pending signal to report, then
4633 there's no need to queue a SIGSTOP, as this means we're
4634 midway through moving the LWP out of the jumppad, and we
4635 will report the pending signal as soon as that is
4636 finished. */
4637 if (lwp->pending_signals_to_report == NULL)
4638 send_sigstop (lwp);
4639 }
4640
4641 /* For stop requests, we're done. */
4642 lwp->resume = NULL;
4643 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4644 return 0;
4645 }
4646
4647 /* If this thread which is about to be resumed has a pending status,
4648 then don't resume it - we can just report the pending status.
4649 Likewise if it is suspended, because e.g., another thread is
4650 stepping past a breakpoint. Make sure to queue any signals that
4651 would otherwise be sent. In all-stop mode, we do this decision
4652 based on if *any* thread has a pending status. If there's a
4653 thread that needs the step-over-breakpoint dance, then don't
4654 resume any other thread but that particular one. */
4655 leave_pending = (lwp->suspended
4656 || lwp->status_pending_p
4657 || leave_all_stopped);
4658
4659 if (!leave_pending)
4660 {
4661 if (debug_threads)
4662 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4663
4664 step = (lwp->resume->kind == resume_step);
4665 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4666 }
4667 else
4668 {
4669 if (debug_threads)
4670 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4671
4672 /* If we have a new signal, enqueue the signal. */
4673 if (lwp->resume->sig != 0)
4674 {
4675 struct pending_signals *p_sig = XCNEW (struct pending_signals);
4676
4677 p_sig->prev = lwp->pending_signals;
4678 p_sig->signal = lwp->resume->sig;
4679
4680 /* If this is the same signal we were previously stopped by,
4681 make sure to queue its siginfo. We can ignore the return
4682 value of ptrace; if it fails, we'll skip
4683 PTRACE_SETSIGINFO. */
4684 if (WIFSTOPPED (lwp->last_status)
4685 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4686 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4687 &p_sig->info);
4688
4689 lwp->pending_signals = p_sig;
4690 }
4691 }
4692
4693 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4694 lwp->resume = NULL;
4695 return 0;
4696 }
4697
4698 static void
4699 linux_resume (struct thread_resume *resume_info, size_t n)
4700 {
4701 struct thread_resume_array array = { resume_info, n };
4702 struct thread_info *need_step_over = NULL;
4703 int any_pending;
4704 int leave_all_stopped;
4705
4706 if (debug_threads)
4707 {
4708 debug_enter ();
4709 debug_printf ("linux_resume:\n");
4710 }
4711
4712 find_inferior (&all_threads, linux_set_resume_request, &array);
4713
4714 /* If there is a thread which would otherwise be resumed, which has
4715 a pending status, then don't resume any threads - we can just
4716 report the pending status. Make sure to queue any signals that
4717 would otherwise be sent. In non-stop mode, we'll apply this
4718 logic to each thread individually. We consume all pending events
4719 before considering to start a step-over (in all-stop). */
4720 any_pending = 0;
4721 if (!non_stop)
4722 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4723
4724 /* If there is a thread which would otherwise be resumed, which is
4725 stopped at a breakpoint that needs stepping over, then don't
4726 resume any threads - have it step over the breakpoint with all
4727 other threads stopped, then resume all threads again. Make sure
4728 to queue any signals that would otherwise be delivered or
4729 queued. */
4730 if (!any_pending && supports_breakpoints ())
4731 need_step_over
4732 = (struct thread_info *) find_inferior (&all_threads,
4733 need_step_over_p, NULL);
4734
4735 leave_all_stopped = (need_step_over != NULL || any_pending);
4736
4737 if (debug_threads)
4738 {
4739 if (need_step_over != NULL)
4740 debug_printf ("Not resuming all, need step over\n");
4741 else if (any_pending)
4742 debug_printf ("Not resuming, all-stop and found "
4743 "an LWP with pending status\n");
4744 else
4745 debug_printf ("Resuming, no pending status or step over needed\n");
4746 }
4747
4748 /* Even if we're leaving threads stopped, queue all signals we'd
4749 otherwise deliver. */
4750 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4751
4752 if (need_step_over)
4753 start_step_over (get_thread_lwp (need_step_over));
4754
4755 if (debug_threads)
4756 {
4757 debug_printf ("linux_resume done\n");
4758 debug_exit ();
4759 }
4760 }
4761
4762 /* This function is called once per thread. We check the thread's
4763 last resume request, which will tell us whether to resume, step, or
4764 leave the thread stopped. Any signal the client requested to be
4765 delivered has already been enqueued at this point.
4766
4767 If any thread that GDB wants running is stopped at an internal
4768 breakpoint that needs stepping over, we start a step-over operation
4769 on that particular thread, and leave all others stopped. */
4770
4771 static int
4772 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4773 {
4774 struct thread_info *thread = (struct thread_info *) entry;
4775 struct lwp_info *lwp = get_thread_lwp (thread);
4776 int step;
4777
4778 if (lwp == except)
4779 return 0;
4780
4781 if (debug_threads)
4782 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4783
4784 if (!lwp->stopped)
4785 {
4786 if (debug_threads)
4787 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4788 return 0;
4789 }
4790
4791 if (thread->last_resume_kind == resume_stop
4792 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4793 {
4794 if (debug_threads)
4795 debug_printf (" client wants LWP to remain %ld stopped\n",
4796 lwpid_of (thread));
4797 return 0;
4798 }
4799
4800 if (lwp->status_pending_p)
4801 {
4802 if (debug_threads)
4803 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4804 lwpid_of (thread));
4805 return 0;
4806 }
4807
4808 gdb_assert (lwp->suspended >= 0);
4809
4810 if (lwp->suspended)
4811 {
4812 if (debug_threads)
4813 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4814 return 0;
4815 }
4816
4817 if (thread->last_resume_kind == resume_stop
4818 && lwp->pending_signals_to_report == NULL
4819 && lwp->collecting_fast_tracepoint == 0)
4820 {
4821 /* We haven't reported this LWP as stopped yet (otherwise, the
4822 last_status.kind check above would catch it, and we wouldn't
4823 reach here. This LWP may have been momentarily paused by a
4824 stop_all_lwps call while handling for example, another LWP's
4825 step-over. In that case, the pending expected SIGSTOP signal
4826 that was queued at vCont;t handling time will have already
4827 been consumed by wait_for_sigstop, and so we need to requeue
4828 another one here. Note that if the LWP already has a SIGSTOP
4829 pending, this is a no-op. */
4830
4831 if (debug_threads)
4832 debug_printf ("Client wants LWP %ld to stop. "
4833 "Making sure it has a SIGSTOP pending\n",
4834 lwpid_of (thread));
4835
4836 send_sigstop (lwp);
4837 }
4838
4839 if (thread->last_resume_kind == resume_step)
4840 {
4841 if (debug_threads)
4842 debug_printf (" stepping LWP %ld, client wants it stepping\n",
4843 lwpid_of (thread));
4844 step = 1;
4845 }
4846 else if (lwp->bp_reinsert != 0)
4847 {
4848 if (debug_threads)
4849 debug_printf (" stepping LWP %ld, reinsert set\n",
4850 lwpid_of (thread));
4851 step = 1;
4852 }
4853 else
4854 step = 0;
4855
4856 linux_resume_one_lwp (lwp, step, 0, NULL);
4857 return 0;
4858 }
4859
4860 static int
4861 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4862 {
4863 struct thread_info *thread = (struct thread_info *) entry;
4864 struct lwp_info *lwp = get_thread_lwp (thread);
4865
4866 if (lwp == except)
4867 return 0;
4868
4869 lwp_suspended_decr (lwp);
4870
4871 return proceed_one_lwp (entry, except);
4872 }
4873
4874 /* When we finish a step-over, set threads running again. If there's
4875 another thread that may need a step-over, now's the time to start
4876 it. Eventually, we'll move all threads past their breakpoints. */
4877
4878 static void
4879 proceed_all_lwps (void)
4880 {
4881 struct thread_info *need_step_over;
4882
4883 /* If there is a thread which would otherwise be resumed, which is
4884 stopped at a breakpoint that needs stepping over, then don't
4885 resume any threads - have it step over the breakpoint with all
4886 other threads stopped, then resume all threads again. */
4887
4888 if (supports_breakpoints ())
4889 {
4890 need_step_over
4891 = (struct thread_info *) find_inferior (&all_threads,
4892 need_step_over_p, NULL);
4893
4894 if (need_step_over != NULL)
4895 {
4896 if (debug_threads)
4897 debug_printf ("proceed_all_lwps: found "
4898 "thread %ld needing a step-over\n",
4899 lwpid_of (need_step_over));
4900
4901 start_step_over (get_thread_lwp (need_step_over));
4902 return;
4903 }
4904 }
4905
4906 if (debug_threads)
4907 debug_printf ("Proceeding, no step-over needed\n");
4908
4909 find_inferior (&all_threads, proceed_one_lwp, NULL);
4910 }
4911
4912 /* Stopped LWPs that the client wanted to be running, that don't have
4913 pending statuses, are set to run again, except for EXCEPT, if not
4914 NULL. This undoes a stop_all_lwps call. */
4915
4916 static void
4917 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4918 {
4919 if (debug_threads)
4920 {
4921 debug_enter ();
4922 if (except)
4923 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4924 lwpid_of (get_lwp_thread (except)));
4925 else
4926 debug_printf ("unstopping all lwps\n");
4927 }
4928
4929 if (unsuspend)
4930 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4931 else
4932 find_inferior (&all_threads, proceed_one_lwp, except);
4933
4934 if (debug_threads)
4935 {
4936 debug_printf ("unstop_all_lwps done\n");
4937 debug_exit ();
4938 }
4939 }
4940
4941
4942 #ifdef HAVE_LINUX_REGSETS
4943
4944 #define use_linux_regsets 1
4945
4946 /* Returns true if REGSET has been disabled. */
4947
4948 static int
4949 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4950 {
4951 return (info->disabled_regsets != NULL
4952 && info->disabled_regsets[regset - info->regsets]);
4953 }
4954
4955 /* Disable REGSET. */
4956
4957 static void
4958 disable_regset (struct regsets_info *info, struct regset_info *regset)
4959 {
4960 int dr_offset;
4961
4962 dr_offset = regset - info->regsets;
4963 if (info->disabled_regsets == NULL)
4964 info->disabled_regsets = (char *) xcalloc (1, info->num_regsets);
4965 info->disabled_regsets[dr_offset] = 1;
4966 }
4967
4968 static int
4969 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4970 struct regcache *regcache)
4971 {
4972 struct regset_info *regset;
4973 int saw_general_regs = 0;
4974 int pid;
4975 struct iovec iov;
4976
4977 pid = lwpid_of (current_thread);
4978 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4979 {
4980 void *buf, *data;
4981 int nt_type, res;
4982
4983 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4984 continue;
4985
4986 buf = xmalloc (regset->size);
4987
4988 nt_type = regset->nt_type;
4989 if (nt_type)
4990 {
4991 iov.iov_base = buf;
4992 iov.iov_len = regset->size;
4993 data = (void *) &iov;
4994 }
4995 else
4996 data = buf;
4997
4998 #ifndef __sparc__
4999 res = ptrace (regset->get_request, pid,
5000 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5001 #else
5002 res = ptrace (regset->get_request, pid, data, nt_type);
5003 #endif
5004 if (res < 0)
5005 {
5006 if (errno == EIO)
5007 {
5008 /* If we get EIO on a regset, do not try it again for
5009 this process mode. */
5010 disable_regset (regsets_info, regset);
5011 }
5012 else if (errno == ENODATA)
5013 {
5014 /* ENODATA may be returned if the regset is currently
5015 not "active". This can happen in normal operation,
5016 so suppress the warning in this case. */
5017 }
5018 else
5019 {
5020 char s[256];
5021 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
5022 pid);
5023 perror (s);
5024 }
5025 }
5026 else
5027 {
5028 if (regset->type == GENERAL_REGS)
5029 saw_general_regs = 1;
5030 regset->store_function (regcache, buf);
5031 }
5032 free (buf);
5033 }
5034 if (saw_general_regs)
5035 return 0;
5036 else
5037 return 1;
5038 }
5039
5040 static int
5041 regsets_store_inferior_registers (struct regsets_info *regsets_info,
5042 struct regcache *regcache)
5043 {
5044 struct regset_info *regset;
5045 int saw_general_regs = 0;
5046 int pid;
5047 struct iovec iov;
5048
5049 pid = lwpid_of (current_thread);
5050 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
5051 {
5052 void *buf, *data;
5053 int nt_type, res;
5054
5055 if (regset->size == 0 || regset_disabled (regsets_info, regset)
5056 || regset->fill_function == NULL)
5057 continue;
5058
5059 buf = xmalloc (regset->size);
5060
5061 /* First fill the buffer with the current register set contents,
5062 in case there are any items in the kernel's regset that are
5063 not in gdbserver's regcache. */
5064
5065 nt_type = regset->nt_type;
5066 if (nt_type)
5067 {
5068 iov.iov_base = buf;
5069 iov.iov_len = regset->size;
5070 data = (void *) &iov;
5071 }
5072 else
5073 data = buf;
5074
5075 #ifndef __sparc__
5076 res = ptrace (regset->get_request, pid,
5077 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5078 #else
5079 res = ptrace (regset->get_request, pid, data, nt_type);
5080 #endif
5081
5082 if (res == 0)
5083 {
5084 /* Then overlay our cached registers on that. */
5085 regset->fill_function (regcache, buf);
5086
5087 /* Only now do we write the register set. */
5088 #ifndef __sparc__
5089 res = ptrace (regset->set_request, pid,
5090 (PTRACE_TYPE_ARG3) (long) nt_type, data);
5091 #else
5092 res = ptrace (regset->set_request, pid, data, nt_type);
5093 #endif
5094 }
5095
5096 if (res < 0)
5097 {
5098 if (errno == EIO)
5099 {
5100 /* If we get EIO on a regset, do not try it again for
5101 this process mode. */
5102 disable_regset (regsets_info, regset);
5103 }
5104 else if (errno == ESRCH)
5105 {
5106 /* At this point, ESRCH should mean the process is
5107 already gone, in which case we simply ignore attempts
5108 to change its registers. See also the related
5109 comment in linux_resume_one_lwp. */
5110 free (buf);
5111 return 0;
5112 }
5113 else
5114 {
5115 perror ("Warning: ptrace(regsets_store_inferior_registers)");
5116 }
5117 }
5118 else if (regset->type == GENERAL_REGS)
5119 saw_general_regs = 1;
5120 free (buf);
5121 }
5122 if (saw_general_regs)
5123 return 0;
5124 else
5125 return 1;
5126 }
5127
5128 #else /* !HAVE_LINUX_REGSETS */
5129
5130 #define use_linux_regsets 0
5131 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
5132 #define regsets_store_inferior_registers(regsets_info, regcache) 1
5133
5134 #endif
5135
5136 /* Return 1 if register REGNO is supported by one of the regset ptrace
5137 calls or 0 if it has to be transferred individually. */
5138
5139 static int
5140 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
5141 {
5142 unsigned char mask = 1 << (regno % 8);
5143 size_t index = regno / 8;
5144
5145 return (use_linux_regsets
5146 && (regs_info->regset_bitmap == NULL
5147 || (regs_info->regset_bitmap[index] & mask) != 0));
5148 }
5149
5150 #ifdef HAVE_LINUX_USRREGS
5151
5152 int
5153 register_addr (const struct usrregs_info *usrregs, int regnum)
5154 {
5155 int addr;
5156
5157 if (regnum < 0 || regnum >= usrregs->num_regs)
5158 error ("Invalid register number %d.", regnum);
5159
5160 addr = usrregs->regmap[regnum];
5161
5162 return addr;
5163 }
5164
5165 /* Fetch one register. */
5166 static void
5167 fetch_register (const struct usrregs_info *usrregs,
5168 struct regcache *regcache, int regno)
5169 {
5170 CORE_ADDR regaddr;
5171 int i, size;
5172 char *buf;
5173 int pid;
5174
5175 if (regno >= usrregs->num_regs)
5176 return;
5177 if ((*the_low_target.cannot_fetch_register) (regno))
5178 return;
5179
5180 regaddr = register_addr (usrregs, regno);
5181 if (regaddr == -1)
5182 return;
5183
5184 size = ((register_size (regcache->tdesc, regno)
5185 + sizeof (PTRACE_XFER_TYPE) - 1)
5186 & -sizeof (PTRACE_XFER_TYPE));
5187 buf = (char *) alloca (size);
5188
5189 pid = lwpid_of (current_thread);
5190 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5191 {
5192 errno = 0;
5193 *(PTRACE_XFER_TYPE *) (buf + i) =
5194 ptrace (PTRACE_PEEKUSER, pid,
5195 /* Coerce to a uintptr_t first to avoid potential gcc warning
5196 of coercing an 8 byte integer to a 4 byte pointer. */
5197 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
5198 regaddr += sizeof (PTRACE_XFER_TYPE);
5199 if (errno != 0)
5200 error ("reading register %d: %s", regno, strerror (errno));
5201 }
5202
5203 if (the_low_target.supply_ptrace_register)
5204 the_low_target.supply_ptrace_register (regcache, regno, buf);
5205 else
5206 supply_register (regcache, regno, buf);
5207 }
5208
5209 /* Store one register. */
5210 static void
5211 store_register (const struct usrregs_info *usrregs,
5212 struct regcache *regcache, int regno)
5213 {
5214 CORE_ADDR regaddr;
5215 int i, size;
5216 char *buf;
5217 int pid;
5218
5219 if (regno >= usrregs->num_regs)
5220 return;
5221 if ((*the_low_target.cannot_store_register) (regno))
5222 return;
5223
5224 regaddr = register_addr (usrregs, regno);
5225 if (regaddr == -1)
5226 return;
5227
5228 size = ((register_size (regcache->tdesc, regno)
5229 + sizeof (PTRACE_XFER_TYPE) - 1)
5230 & -sizeof (PTRACE_XFER_TYPE));
5231 buf = (char *) alloca (size);
5232 memset (buf, 0, size);
5233
5234 if (the_low_target.collect_ptrace_register)
5235 the_low_target.collect_ptrace_register (regcache, regno, buf);
5236 else
5237 collect_register (regcache, regno, buf);
5238
5239 pid = lwpid_of (current_thread);
5240 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
5241 {
5242 errno = 0;
5243 ptrace (PTRACE_POKEUSER, pid,
5244 /* Coerce to a uintptr_t first to avoid potential gcc warning
5245 about coercing an 8 byte integer to a 4 byte pointer. */
5246 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
5247 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
5248 if (errno != 0)
5249 {
5250 /* At this point, ESRCH should mean the process is
5251 already gone, in which case we simply ignore attempts
5252 to change its registers. See also the related
5253 comment in linux_resume_one_lwp. */
5254 if (errno == ESRCH)
5255 return;
5256
5257 if ((*the_low_target.cannot_store_register) (regno) == 0)
5258 error ("writing register %d: %s", regno, strerror (errno));
5259 }
5260 regaddr += sizeof (PTRACE_XFER_TYPE);
5261 }
5262 }
5263
5264 /* Fetch all registers, or just one, from the child process.
5265 If REGNO is -1, do this for all registers, skipping any that are
5266 assumed to have been retrieved by regsets_fetch_inferior_registers,
5267 unless ALL is non-zero.
5268 Otherwise, REGNO specifies which register (so we can save time). */
5269 static void
5270 usr_fetch_inferior_registers (const struct regs_info *regs_info,
5271 struct regcache *regcache, int regno, int all)
5272 {
5273 struct usrregs_info *usr = regs_info->usrregs;
5274
5275 if (regno == -1)
5276 {
5277 for (regno = 0; regno < usr->num_regs; regno++)
5278 if (all || !linux_register_in_regsets (regs_info, regno))
5279 fetch_register (usr, regcache, regno);
5280 }
5281 else
5282 fetch_register (usr, regcache, regno);
5283 }
5284
5285 /* Store our register values back into the inferior.
5286 If REGNO is -1, do this for all registers, skipping any that are
5287 assumed to have been saved by regsets_store_inferior_registers,
5288 unless ALL is non-zero.
5289 Otherwise, REGNO specifies which register (so we can save time). */
5290 static void
5291 usr_store_inferior_registers (const struct regs_info *regs_info,
5292 struct regcache *regcache, int regno, int all)
5293 {
5294 struct usrregs_info *usr = regs_info->usrregs;
5295
5296 if (regno == -1)
5297 {
5298 for (regno = 0; regno < usr->num_regs; regno++)
5299 if (all || !linux_register_in_regsets (regs_info, regno))
5300 store_register (usr, regcache, regno);
5301 }
5302 else
5303 store_register (usr, regcache, regno);
5304 }
5305
5306 #else /* !HAVE_LINUX_USRREGS */
5307
5308 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5309 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
5310
5311 #endif
5312
5313
5314 void
5315 linux_fetch_registers (struct regcache *regcache, int regno)
5316 {
5317 int use_regsets;
5318 int all = 0;
5319 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5320
5321 if (regno == -1)
5322 {
5323 if (the_low_target.fetch_register != NULL
5324 && regs_info->usrregs != NULL)
5325 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
5326 (*the_low_target.fetch_register) (regcache, regno);
5327
5328 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
5329 if (regs_info->usrregs != NULL)
5330 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
5331 }
5332 else
5333 {
5334 if (the_low_target.fetch_register != NULL
5335 && (*the_low_target.fetch_register) (regcache, regno))
5336 return;
5337
5338 use_regsets = linux_register_in_regsets (regs_info, regno);
5339 if (use_regsets)
5340 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
5341 regcache);
5342 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5343 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
5344 }
5345 }
5346
5347 void
5348 linux_store_registers (struct regcache *regcache, int regno)
5349 {
5350 int use_regsets;
5351 int all = 0;
5352 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
5353
5354 if (regno == -1)
5355 {
5356 all = regsets_store_inferior_registers (regs_info->regsets_info,
5357 regcache);
5358 if (regs_info->usrregs != NULL)
5359 usr_store_inferior_registers (regs_info, regcache, regno, all);
5360 }
5361 else
5362 {
5363 use_regsets = linux_register_in_regsets (regs_info, regno);
5364 if (use_regsets)
5365 all = regsets_store_inferior_registers (regs_info->regsets_info,
5366 regcache);
5367 if ((!use_regsets || all) && regs_info->usrregs != NULL)
5368 usr_store_inferior_registers (regs_info, regcache, regno, 1);
5369 }
5370 }
5371
5372
5373 /* Copy LEN bytes from inferior's memory starting at MEMADDR
5374 to debugger memory starting at MYADDR. */
5375
5376 static int
5377 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
5378 {
5379 int pid = lwpid_of (current_thread);
5380 register PTRACE_XFER_TYPE *buffer;
5381 register CORE_ADDR addr;
5382 register int count;
5383 char filename[64];
5384 register int i;
5385 int ret;
5386 int fd;
5387
5388 /* Try using /proc. Don't bother for one word. */
5389 if (len >= 3 * sizeof (long))
5390 {
5391 int bytes;
5392
5393 /* We could keep this file open and cache it - possibly one per
5394 thread. That requires some juggling, but is even faster. */
5395 sprintf (filename, "/proc/%d/mem", pid);
5396 fd = open (filename, O_RDONLY | O_LARGEFILE);
5397 if (fd == -1)
5398 goto no_proc;
5399
5400 /* If pread64 is available, use it. It's faster if the kernel
5401 supports it (only one syscall), and it's 64-bit safe even on
5402 32-bit platforms (for instance, SPARC debugging a SPARC64
5403 application). */
5404 #ifdef HAVE_PREAD64
5405 bytes = pread64 (fd, myaddr, len, memaddr);
5406 #else
5407 bytes = -1;
5408 if (lseek (fd, memaddr, SEEK_SET) != -1)
5409 bytes = read (fd, myaddr, len);
5410 #endif
5411
5412 close (fd);
5413 if (bytes == len)
5414 return 0;
5415
5416 /* Some data was read, we'll try to get the rest with ptrace. */
5417 if (bytes > 0)
5418 {
5419 memaddr += bytes;
5420 myaddr += bytes;
5421 len -= bytes;
5422 }
5423 }
5424
5425 no_proc:
5426 /* Round starting address down to longword boundary. */
5427 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5428 /* Round ending address up; get number of longwords that makes. */
5429 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5430 / sizeof (PTRACE_XFER_TYPE));
5431 /* Allocate buffer of that many longwords. */
5432 buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5433
5434 /* Read all the longwords */
5435 errno = 0;
5436 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5437 {
5438 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5439 about coercing an 8 byte integer to a 4 byte pointer. */
5440 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
5441 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5442 (PTRACE_TYPE_ARG4) 0);
5443 if (errno)
5444 break;
5445 }
5446 ret = errno;
5447
5448 /* Copy appropriate bytes out of the buffer. */
5449 if (i > 0)
5450 {
5451 i *= sizeof (PTRACE_XFER_TYPE);
5452 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
5453 memcpy (myaddr,
5454 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5455 i < len ? i : len);
5456 }
5457
5458 return ret;
5459 }
5460
5461 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
5462 memory at MEMADDR. On failure (cannot write to the inferior)
5463 returns the value of errno. Always succeeds if LEN is zero. */
5464
5465 static int
5466 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
5467 {
5468 register int i;
5469 /* Round starting address down to longword boundary. */
5470 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
5471 /* Round ending address up; get number of longwords that makes. */
5472 register int count
5473 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
5474 / sizeof (PTRACE_XFER_TYPE);
5475
5476 /* Allocate buffer of that many longwords. */
5477 register PTRACE_XFER_TYPE *buffer = XALLOCAVEC (PTRACE_XFER_TYPE, count);
5478
5479 int pid = lwpid_of (current_thread);
5480
5481 if (len == 0)
5482 {
5483 /* Zero length write always succeeds. */
5484 return 0;
5485 }
5486
5487 if (debug_threads)
5488 {
5489 /* Dump up to four bytes. */
5490 char str[4 * 2 + 1];
5491 char *p = str;
5492 int dump = len < 4 ? len : 4;
5493
5494 for (i = 0; i < dump; i++)
5495 {
5496 sprintf (p, "%02x", myaddr[i]);
5497 p += 2;
5498 }
5499 *p = '\0';
5500
5501 debug_printf ("Writing %s to 0x%08lx in process %d\n",
5502 str, (long) memaddr, pid);
5503 }
5504
5505 /* Fill start and end extra bytes of buffer with existing memory data. */
5506
5507 errno = 0;
5508 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
5509 about coercing an 8 byte integer to a 4 byte pointer. */
5510 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
5511 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5512 (PTRACE_TYPE_ARG4) 0);
5513 if (errno)
5514 return errno;
5515
5516 if (count > 1)
5517 {
5518 errno = 0;
5519 buffer[count - 1]
5520 = ptrace (PTRACE_PEEKTEXT, pid,
5521 /* Coerce to a uintptr_t first to avoid potential gcc warning
5522 about coercing an 8 byte integer to a 4 byte pointer. */
5523 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
5524 * sizeof (PTRACE_XFER_TYPE)),
5525 (PTRACE_TYPE_ARG4) 0);
5526 if (errno)
5527 return errno;
5528 }
5529
5530 /* Copy data to be written over corresponding part of buffer. */
5531
5532 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
5533 myaddr, len);
5534
5535 /* Write the entire buffer. */
5536
5537 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
5538 {
5539 errno = 0;
5540 ptrace (PTRACE_POKETEXT, pid,
5541 /* Coerce to a uintptr_t first to avoid potential gcc warning
5542 about coercing an 8 byte integer to a 4 byte pointer. */
5543 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
5544 (PTRACE_TYPE_ARG4) buffer[i]);
5545 if (errno)
5546 return errno;
5547 }
5548
5549 return 0;
5550 }
5551
5552 static void
5553 linux_look_up_symbols (void)
5554 {
5555 #ifdef USE_THREAD_DB
5556 struct process_info *proc = current_process ();
5557
5558 if (proc->priv->thread_db != NULL)
5559 return;
5560
5561 /* If the kernel supports tracing clones, then we don't need to
5562 use the magic thread event breakpoint to learn about
5563 threads. */
5564 thread_db_init (!linux_supports_traceclone ());
5565 #endif
5566 }
5567
5568 static void
5569 linux_request_interrupt (void)
5570 {
5571 extern unsigned long signal_pid;
5572
5573 /* Send a SIGINT to the process group. This acts just like the user
5574 typed a ^C on the controlling terminal. */
5575 kill (-signal_pid, SIGINT);
5576 }
5577
5578 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
5579 to debugger memory starting at MYADDR. */
5580
5581 static int
5582 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
5583 {
5584 char filename[PATH_MAX];
5585 int fd, n;
5586 int pid = lwpid_of (current_thread);
5587
5588 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5589
5590 fd = open (filename, O_RDONLY);
5591 if (fd < 0)
5592 return -1;
5593
5594 if (offset != (CORE_ADDR) 0
5595 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5596 n = -1;
5597 else
5598 n = read (fd, myaddr, len);
5599
5600 close (fd);
5601
5602 return n;
5603 }
5604
5605 /* These breakpoint and watchpoint related wrapper functions simply
5606 pass on the function call if the target has registered a
5607 corresponding function. */
5608
5609 static int
5610 linux_supports_z_point_type (char z_type)
5611 {
5612 return (the_low_target.supports_z_point_type != NULL
5613 && the_low_target.supports_z_point_type (z_type));
5614 }
5615
5616 static int
5617 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
5618 int size, struct raw_breakpoint *bp)
5619 {
5620 if (type == raw_bkpt_type_sw)
5621 return insert_memory_breakpoint (bp);
5622 else if (the_low_target.insert_point != NULL)
5623 return the_low_target.insert_point (type, addr, size, bp);
5624 else
5625 /* Unsupported (see target.h). */
5626 return 1;
5627 }
5628
5629 static int
5630 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
5631 int size, struct raw_breakpoint *bp)
5632 {
5633 if (type == raw_bkpt_type_sw)
5634 return remove_memory_breakpoint (bp);
5635 else if (the_low_target.remove_point != NULL)
5636 return the_low_target.remove_point (type, addr, size, bp);
5637 else
5638 /* Unsupported (see target.h). */
5639 return 1;
5640 }
5641
5642 /* Implement the to_stopped_by_sw_breakpoint target_ops
5643 method. */
5644
5645 static int
5646 linux_stopped_by_sw_breakpoint (void)
5647 {
5648 struct lwp_info *lwp = get_thread_lwp (current_thread);
5649
5650 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
5651 }
5652
5653 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
5654 method. */
5655
5656 static int
5657 linux_supports_stopped_by_sw_breakpoint (void)
5658 {
5659 return USE_SIGTRAP_SIGINFO;
5660 }
5661
5662 /* Implement the to_stopped_by_hw_breakpoint target_ops
5663 method. */
5664
5665 static int
5666 linux_stopped_by_hw_breakpoint (void)
5667 {
5668 struct lwp_info *lwp = get_thread_lwp (current_thread);
5669
5670 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
5671 }
5672
5673 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5674 method. */
5675
5676 static int
5677 linux_supports_stopped_by_hw_breakpoint (void)
5678 {
5679 return USE_SIGTRAP_SIGINFO;
5680 }
5681
5682 /* Implement the supports_hardware_single_step target_ops method. */
5683
5684 static int
5685 linux_supports_hardware_single_step (void)
5686 {
5687 return can_hardware_single_step ();
5688 }
5689
5690 static int
5691 linux_stopped_by_watchpoint (void)
5692 {
5693 struct lwp_info *lwp = get_thread_lwp (current_thread);
5694
5695 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5696 }
5697
5698 static CORE_ADDR
5699 linux_stopped_data_address (void)
5700 {
5701 struct lwp_info *lwp = get_thread_lwp (current_thread);
5702
5703 return lwp->stopped_data_address;
5704 }
5705
5706 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5707 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5708 && defined(PT_TEXT_END_ADDR)
5709
5710 /* This is only used for targets that define PT_TEXT_ADDR,
5711 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5712 the target has different ways of acquiring this information, like
5713 loadmaps. */
5714
5715 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5716 to tell gdb about. */
5717
5718 static int
5719 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5720 {
5721 unsigned long text, text_end, data;
5722 int pid = lwpid_of (current_thread);
5723
5724 errno = 0;
5725
5726 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5727 (PTRACE_TYPE_ARG4) 0);
5728 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5729 (PTRACE_TYPE_ARG4) 0);
5730 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5731 (PTRACE_TYPE_ARG4) 0);
5732
5733 if (errno == 0)
5734 {
5735 /* Both text and data offsets produced at compile-time (and so
5736 used by gdb) are relative to the beginning of the program,
5737 with the data segment immediately following the text segment.
5738 However, the actual runtime layout in memory may put the data
5739 somewhere else, so when we send gdb a data base-address, we
5740 use the real data base address and subtract the compile-time
5741 data base-address from it (which is just the length of the
5742 text segment). BSS immediately follows data in both
5743 cases. */
5744 *text_p = text;
5745 *data_p = data - (text_end - text);
5746
5747 return 1;
5748 }
5749 return 0;
5750 }
5751 #endif
5752
5753 static int
5754 linux_qxfer_osdata (const char *annex,
5755 unsigned char *readbuf, unsigned const char *writebuf,
5756 CORE_ADDR offset, int len)
5757 {
5758 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5759 }
5760
5761 /* Convert a native/host siginfo object, into/from the siginfo in the
5762 layout of the inferiors' architecture. */
5763
5764 static void
5765 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5766 {
5767 int done = 0;
5768
5769 if (the_low_target.siginfo_fixup != NULL)
5770 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5771
5772 /* If there was no callback, or the callback didn't do anything,
5773 then just do a straight memcpy. */
5774 if (!done)
5775 {
5776 if (direction == 1)
5777 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5778 else
5779 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5780 }
5781 }
5782
5783 static int
5784 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5785 unsigned const char *writebuf, CORE_ADDR offset, int len)
5786 {
5787 int pid;
5788 siginfo_t siginfo;
5789 char inf_siginfo[sizeof (siginfo_t)];
5790
5791 if (current_thread == NULL)
5792 return -1;
5793
5794 pid = lwpid_of (current_thread);
5795
5796 if (debug_threads)
5797 debug_printf ("%s siginfo for lwp %d.\n",
5798 readbuf != NULL ? "Reading" : "Writing",
5799 pid);
5800
5801 if (offset >= sizeof (siginfo))
5802 return -1;
5803
5804 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5805 return -1;
5806
5807 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5808 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5809 inferior with a 64-bit GDBSERVER should look the same as debugging it
5810 with a 32-bit GDBSERVER, we need to convert it. */
5811 siginfo_fixup (&siginfo, inf_siginfo, 0);
5812
5813 if (offset + len > sizeof (siginfo))
5814 len = sizeof (siginfo) - offset;
5815
5816 if (readbuf != NULL)
5817 memcpy (readbuf, inf_siginfo + offset, len);
5818 else
5819 {
5820 memcpy (inf_siginfo + offset, writebuf, len);
5821
5822 /* Convert back to ptrace layout before flushing it out. */
5823 siginfo_fixup (&siginfo, inf_siginfo, 1);
5824
5825 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5826 return -1;
5827 }
5828
5829 return len;
5830 }
5831
5832 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5833 so we notice when children change state; as the handler for the
5834 sigsuspend in my_waitpid. */
5835
5836 static void
5837 sigchld_handler (int signo)
5838 {
5839 int old_errno = errno;
5840
5841 if (debug_threads)
5842 {
5843 do
5844 {
5845 /* fprintf is not async-signal-safe, so call write
5846 directly. */
5847 if (write (2, "sigchld_handler\n",
5848 sizeof ("sigchld_handler\n") - 1) < 0)
5849 break; /* just ignore */
5850 } while (0);
5851 }
5852
5853 if (target_is_async_p ())
5854 async_file_mark (); /* trigger a linux_wait */
5855
5856 errno = old_errno;
5857 }
5858
5859 static int
5860 linux_supports_non_stop (void)
5861 {
5862 return 1;
5863 }
5864
5865 static int
5866 linux_async (int enable)
5867 {
5868 int previous = target_is_async_p ();
5869
5870 if (debug_threads)
5871 debug_printf ("linux_async (%d), previous=%d\n",
5872 enable, previous);
5873
5874 if (previous != enable)
5875 {
5876 sigset_t mask;
5877 sigemptyset (&mask);
5878 sigaddset (&mask, SIGCHLD);
5879
5880 sigprocmask (SIG_BLOCK, &mask, NULL);
5881
5882 if (enable)
5883 {
5884 if (pipe (linux_event_pipe) == -1)
5885 {
5886 linux_event_pipe[0] = -1;
5887 linux_event_pipe[1] = -1;
5888 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5889
5890 warning ("creating event pipe failed.");
5891 return previous;
5892 }
5893
5894 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5895 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5896
5897 /* Register the event loop handler. */
5898 add_file_handler (linux_event_pipe[0],
5899 handle_target_event, NULL);
5900
5901 /* Always trigger a linux_wait. */
5902 async_file_mark ();
5903 }
5904 else
5905 {
5906 delete_file_handler (linux_event_pipe[0]);
5907
5908 close (linux_event_pipe[0]);
5909 close (linux_event_pipe[1]);
5910 linux_event_pipe[0] = -1;
5911 linux_event_pipe[1] = -1;
5912 }
5913
5914 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5915 }
5916
5917 return previous;
5918 }
5919
5920 static int
5921 linux_start_non_stop (int nonstop)
5922 {
5923 /* Register or unregister from event-loop accordingly. */
5924 linux_async (nonstop);
5925
5926 if (target_is_async_p () != (nonstop != 0))
5927 return -1;
5928
5929 return 0;
5930 }
5931
5932 static int
5933 linux_supports_multi_process (void)
5934 {
5935 return 1;
5936 }
5937
5938 /* Check if fork events are supported. */
5939
5940 static int
5941 linux_supports_fork_events (void)
5942 {
5943 return linux_supports_tracefork ();
5944 }
5945
5946 /* Check if vfork events are supported. */
5947
5948 static int
5949 linux_supports_vfork_events (void)
5950 {
5951 return linux_supports_tracefork ();
5952 }
5953
5954 /* Check if exec events are supported. */
5955
5956 static int
5957 linux_supports_exec_events (void)
5958 {
5959 return linux_supports_traceexec ();
5960 }
5961
5962 /* Callback for 'find_inferior'. Set the (possibly changed) ptrace
5963 options for the specified lwp. */
5964
5965 static int
5966 reset_lwp_ptrace_options_callback (struct inferior_list_entry *entry,
5967 void *args)
5968 {
5969 struct thread_info *thread = (struct thread_info *) entry;
5970 struct lwp_info *lwp = get_thread_lwp (thread);
5971
5972 if (!lwp->stopped)
5973 {
5974 /* Stop the lwp so we can modify its ptrace options. */
5975 lwp->must_set_ptrace_flags = 1;
5976 linux_stop_lwp (lwp);
5977 }
5978 else
5979 {
5980 /* Already stopped; go ahead and set the ptrace options. */
5981 struct process_info *proc = find_process_pid (pid_of (thread));
5982 int options = linux_low_ptrace_options (proc->attached);
5983
5984 linux_enable_event_reporting (lwpid_of (thread), options);
5985 lwp->must_set_ptrace_flags = 0;
5986 }
5987
5988 return 0;
5989 }
5990
5991 /* Target hook for 'handle_new_gdb_connection'. Causes a reset of the
5992 ptrace flags for all inferiors. This is in case the new GDB connection
5993 doesn't support the same set of events that the previous one did. */
5994
5995 static void
5996 linux_handle_new_gdb_connection (void)
5997 {
5998 pid_t pid;
5999
6000 /* Request that all the lwps reset their ptrace options. */
6001 find_inferior (&all_threads, reset_lwp_ptrace_options_callback , &pid);
6002 }
6003
6004 static int
6005 linux_supports_disable_randomization (void)
6006 {
6007 #ifdef HAVE_PERSONALITY
6008 return 1;
6009 #else
6010 return 0;
6011 #endif
6012 }
6013
6014 static int
6015 linux_supports_agent (void)
6016 {
6017 return 1;
6018 }
6019
6020 static int
6021 linux_supports_range_stepping (void)
6022 {
6023 if (*the_low_target.supports_range_stepping == NULL)
6024 return 0;
6025
6026 return (*the_low_target.supports_range_stepping) ();
6027 }
6028
6029 /* Enumerate spufs IDs for process PID. */
6030 static int
6031 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
6032 {
6033 int pos = 0;
6034 int written = 0;
6035 char path[128];
6036 DIR *dir;
6037 struct dirent *entry;
6038
6039 sprintf (path, "/proc/%ld/fd", pid);
6040 dir = opendir (path);
6041 if (!dir)
6042 return -1;
6043
6044 rewinddir (dir);
6045 while ((entry = readdir (dir)) != NULL)
6046 {
6047 struct stat st;
6048 struct statfs stfs;
6049 int fd;
6050
6051 fd = atoi (entry->d_name);
6052 if (!fd)
6053 continue;
6054
6055 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
6056 if (stat (path, &st) != 0)
6057 continue;
6058 if (!S_ISDIR (st.st_mode))
6059 continue;
6060
6061 if (statfs (path, &stfs) != 0)
6062 continue;
6063 if (stfs.f_type != SPUFS_MAGIC)
6064 continue;
6065
6066 if (pos >= offset && pos + 4 <= offset + len)
6067 {
6068 *(unsigned int *)(buf + pos - offset) = fd;
6069 written += 4;
6070 }
6071 pos += 4;
6072 }
6073
6074 closedir (dir);
6075 return written;
6076 }
6077
6078 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
6079 object type, using the /proc file system. */
6080 static int
6081 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
6082 unsigned const char *writebuf,
6083 CORE_ADDR offset, int len)
6084 {
6085 long pid = lwpid_of (current_thread);
6086 char buf[128];
6087 int fd = 0;
6088 int ret = 0;
6089
6090 if (!writebuf && !readbuf)
6091 return -1;
6092
6093 if (!*annex)
6094 {
6095 if (!readbuf)
6096 return -1;
6097 else
6098 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
6099 }
6100
6101 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
6102 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
6103 if (fd <= 0)
6104 return -1;
6105
6106 if (offset != 0
6107 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
6108 {
6109 close (fd);
6110 return 0;
6111 }
6112
6113 if (writebuf)
6114 ret = write (fd, writebuf, (size_t) len);
6115 else
6116 ret = read (fd, readbuf, (size_t) len);
6117
6118 close (fd);
6119 return ret;
6120 }
6121
6122 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
6123 struct target_loadseg
6124 {
6125 /* Core address to which the segment is mapped. */
6126 Elf32_Addr addr;
6127 /* VMA recorded in the program header. */
6128 Elf32_Addr p_vaddr;
6129 /* Size of this segment in memory. */
6130 Elf32_Word p_memsz;
6131 };
6132
6133 # if defined PT_GETDSBT
6134 struct target_loadmap
6135 {
6136 /* Protocol version number, must be zero. */
6137 Elf32_Word version;
6138 /* Pointer to the DSBT table, its size, and the DSBT index. */
6139 unsigned *dsbt_table;
6140 unsigned dsbt_size, dsbt_index;
6141 /* Number of segments in this map. */
6142 Elf32_Word nsegs;
6143 /* The actual memory map. */
6144 struct target_loadseg segs[/*nsegs*/];
6145 };
6146 # define LINUX_LOADMAP PT_GETDSBT
6147 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
6148 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
6149 # else
6150 struct target_loadmap
6151 {
6152 /* Protocol version number, must be zero. */
6153 Elf32_Half version;
6154 /* Number of segments in this map. */
6155 Elf32_Half nsegs;
6156 /* The actual memory map. */
6157 struct target_loadseg segs[/*nsegs*/];
6158 };
6159 # define LINUX_LOADMAP PTRACE_GETFDPIC
6160 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
6161 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
6162 # endif
6163
6164 static int
6165 linux_read_loadmap (const char *annex, CORE_ADDR offset,
6166 unsigned char *myaddr, unsigned int len)
6167 {
6168 int pid = lwpid_of (current_thread);
6169 int addr = -1;
6170 struct target_loadmap *data = NULL;
6171 unsigned int actual_length, copy_length;
6172
6173 if (strcmp (annex, "exec") == 0)
6174 addr = (int) LINUX_LOADMAP_EXEC;
6175 else if (strcmp (annex, "interp") == 0)
6176 addr = (int) LINUX_LOADMAP_INTERP;
6177 else
6178 return -1;
6179
6180 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
6181 return -1;
6182
6183 if (data == NULL)
6184 return -1;
6185
6186 actual_length = sizeof (struct target_loadmap)
6187 + sizeof (struct target_loadseg) * data->nsegs;
6188
6189 if (offset < 0 || offset > actual_length)
6190 return -1;
6191
6192 copy_length = actual_length - offset < len ? actual_length - offset : len;
6193 memcpy (myaddr, (char *) data + offset, copy_length);
6194 return copy_length;
6195 }
6196 #else
6197 # define linux_read_loadmap NULL
6198 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
6199
6200 static void
6201 linux_process_qsupported (char **features, int count)
6202 {
6203 if (the_low_target.process_qsupported != NULL)
6204 the_low_target.process_qsupported (features, count);
6205 }
6206
6207 static int
6208 linux_supports_tracepoints (void)
6209 {
6210 if (*the_low_target.supports_tracepoints == NULL)
6211 return 0;
6212
6213 return (*the_low_target.supports_tracepoints) ();
6214 }
6215
6216 static CORE_ADDR
6217 linux_read_pc (struct regcache *regcache)
6218 {
6219 if (the_low_target.get_pc == NULL)
6220 return 0;
6221
6222 return (*the_low_target.get_pc) (regcache);
6223 }
6224
6225 static void
6226 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
6227 {
6228 gdb_assert (the_low_target.set_pc != NULL);
6229
6230 (*the_low_target.set_pc) (regcache, pc);
6231 }
6232
6233 static int
6234 linux_thread_stopped (struct thread_info *thread)
6235 {
6236 return get_thread_lwp (thread)->stopped;
6237 }
6238
6239 /* This exposes stop-all-threads functionality to other modules. */
6240
6241 static void
6242 linux_pause_all (int freeze)
6243 {
6244 stop_all_lwps (freeze, NULL);
6245 }
6246
6247 /* This exposes unstop-all-threads functionality to other gdbserver
6248 modules. */
6249
6250 static void
6251 linux_unpause_all (int unfreeze)
6252 {
6253 unstop_all_lwps (unfreeze, NULL);
6254 }
6255
6256 static int
6257 linux_prepare_to_access_memory (void)
6258 {
6259 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6260 running LWP. */
6261 if (non_stop)
6262 linux_pause_all (1);
6263 return 0;
6264 }
6265
6266 static void
6267 linux_done_accessing_memory (void)
6268 {
6269 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
6270 running LWP. */
6271 if (non_stop)
6272 linux_unpause_all (1);
6273 }
6274
6275 static int
6276 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
6277 CORE_ADDR collector,
6278 CORE_ADDR lockaddr,
6279 ULONGEST orig_size,
6280 CORE_ADDR *jump_entry,
6281 CORE_ADDR *trampoline,
6282 ULONGEST *trampoline_size,
6283 unsigned char *jjump_pad_insn,
6284 ULONGEST *jjump_pad_insn_size,
6285 CORE_ADDR *adjusted_insn_addr,
6286 CORE_ADDR *adjusted_insn_addr_end,
6287 char *err)
6288 {
6289 return (*the_low_target.install_fast_tracepoint_jump_pad)
6290 (tpoint, tpaddr, collector, lockaddr, orig_size,
6291 jump_entry, trampoline, trampoline_size,
6292 jjump_pad_insn, jjump_pad_insn_size,
6293 adjusted_insn_addr, adjusted_insn_addr_end,
6294 err);
6295 }
6296
6297 static struct emit_ops *
6298 linux_emit_ops (void)
6299 {
6300 if (the_low_target.emit_ops != NULL)
6301 return (*the_low_target.emit_ops) ();
6302 else
6303 return NULL;
6304 }
6305
6306 static int
6307 linux_get_min_fast_tracepoint_insn_len (void)
6308 {
6309 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
6310 }
6311
6312 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
6313
6314 static int
6315 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
6316 CORE_ADDR *phdr_memaddr, int *num_phdr)
6317 {
6318 char filename[PATH_MAX];
6319 int fd;
6320 const int auxv_size = is_elf64
6321 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
6322 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
6323
6324 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
6325
6326 fd = open (filename, O_RDONLY);
6327 if (fd < 0)
6328 return 1;
6329
6330 *phdr_memaddr = 0;
6331 *num_phdr = 0;
6332 while (read (fd, buf, auxv_size) == auxv_size
6333 && (*phdr_memaddr == 0 || *num_phdr == 0))
6334 {
6335 if (is_elf64)
6336 {
6337 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
6338
6339 switch (aux->a_type)
6340 {
6341 case AT_PHDR:
6342 *phdr_memaddr = aux->a_un.a_val;
6343 break;
6344 case AT_PHNUM:
6345 *num_phdr = aux->a_un.a_val;
6346 break;
6347 }
6348 }
6349 else
6350 {
6351 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
6352
6353 switch (aux->a_type)
6354 {
6355 case AT_PHDR:
6356 *phdr_memaddr = aux->a_un.a_val;
6357 break;
6358 case AT_PHNUM:
6359 *num_phdr = aux->a_un.a_val;
6360 break;
6361 }
6362 }
6363 }
6364
6365 close (fd);
6366
6367 if (*phdr_memaddr == 0 || *num_phdr == 0)
6368 {
6369 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
6370 "phdr_memaddr = %ld, phdr_num = %d",
6371 (long) *phdr_memaddr, *num_phdr);
6372 return 2;
6373 }
6374
6375 return 0;
6376 }
6377
6378 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
6379
6380 static CORE_ADDR
6381 get_dynamic (const int pid, const int is_elf64)
6382 {
6383 CORE_ADDR phdr_memaddr, relocation;
6384 int num_phdr, i;
6385 unsigned char *phdr_buf;
6386 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
6387
6388 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
6389 return 0;
6390
6391 gdb_assert (num_phdr < 100); /* Basic sanity check. */
6392 phdr_buf = (unsigned char *) alloca (num_phdr * phdr_size);
6393
6394 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
6395 return 0;
6396
6397 /* Compute relocation: it is expected to be 0 for "regular" executables,
6398 non-zero for PIE ones. */
6399 relocation = -1;
6400 for (i = 0; relocation == -1 && i < num_phdr; i++)
6401 if (is_elf64)
6402 {
6403 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6404
6405 if (p->p_type == PT_PHDR)
6406 relocation = phdr_memaddr - p->p_vaddr;
6407 }
6408 else
6409 {
6410 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6411
6412 if (p->p_type == PT_PHDR)
6413 relocation = phdr_memaddr - p->p_vaddr;
6414 }
6415
6416 if (relocation == -1)
6417 {
6418 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
6419 any real world executables, including PIE executables, have always
6420 PT_PHDR present. PT_PHDR is not present in some shared libraries or
6421 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
6422 or present DT_DEBUG anyway (fpc binaries are statically linked).
6423
6424 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
6425
6426 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
6427
6428 return 0;
6429 }
6430
6431 for (i = 0; i < num_phdr; i++)
6432 {
6433 if (is_elf64)
6434 {
6435 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
6436
6437 if (p->p_type == PT_DYNAMIC)
6438 return p->p_vaddr + relocation;
6439 }
6440 else
6441 {
6442 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
6443
6444 if (p->p_type == PT_DYNAMIC)
6445 return p->p_vaddr + relocation;
6446 }
6447 }
6448
6449 return 0;
6450 }
6451
6452 /* Return &_r_debug in the inferior, or -1 if not present. Return value
6453 can be 0 if the inferior does not yet have the library list initialized.
6454 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
6455 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
6456
6457 static CORE_ADDR
6458 get_r_debug (const int pid, const int is_elf64)
6459 {
6460 CORE_ADDR dynamic_memaddr;
6461 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
6462 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
6463 CORE_ADDR map = -1;
6464
6465 dynamic_memaddr = get_dynamic (pid, is_elf64);
6466 if (dynamic_memaddr == 0)
6467 return map;
6468
6469 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
6470 {
6471 if (is_elf64)
6472 {
6473 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
6474 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6475 union
6476 {
6477 Elf64_Xword map;
6478 unsigned char buf[sizeof (Elf64_Xword)];
6479 }
6480 rld_map;
6481 #endif
6482 #ifdef DT_MIPS_RLD_MAP
6483 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6484 {
6485 if (linux_read_memory (dyn->d_un.d_val,
6486 rld_map.buf, sizeof (rld_map.buf)) == 0)
6487 return rld_map.map;
6488 else
6489 break;
6490 }
6491 #endif /* DT_MIPS_RLD_MAP */
6492 #ifdef DT_MIPS_RLD_MAP_REL
6493 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6494 {
6495 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6496 rld_map.buf, sizeof (rld_map.buf)) == 0)
6497 return rld_map.map;
6498 else
6499 break;
6500 }
6501 #endif /* DT_MIPS_RLD_MAP_REL */
6502
6503 if (dyn->d_tag == DT_DEBUG && map == -1)
6504 map = dyn->d_un.d_val;
6505
6506 if (dyn->d_tag == DT_NULL)
6507 break;
6508 }
6509 else
6510 {
6511 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
6512 #if defined DT_MIPS_RLD_MAP || defined DT_MIPS_RLD_MAP_REL
6513 union
6514 {
6515 Elf32_Word map;
6516 unsigned char buf[sizeof (Elf32_Word)];
6517 }
6518 rld_map;
6519 #endif
6520 #ifdef DT_MIPS_RLD_MAP
6521 if (dyn->d_tag == DT_MIPS_RLD_MAP)
6522 {
6523 if (linux_read_memory (dyn->d_un.d_val,
6524 rld_map.buf, sizeof (rld_map.buf)) == 0)
6525 return rld_map.map;
6526 else
6527 break;
6528 }
6529 #endif /* DT_MIPS_RLD_MAP */
6530 #ifdef DT_MIPS_RLD_MAP_REL
6531 if (dyn->d_tag == DT_MIPS_RLD_MAP_REL)
6532 {
6533 if (linux_read_memory (dyn->d_un.d_val + dynamic_memaddr,
6534 rld_map.buf, sizeof (rld_map.buf)) == 0)
6535 return rld_map.map;
6536 else
6537 break;
6538 }
6539 #endif /* DT_MIPS_RLD_MAP_REL */
6540
6541 if (dyn->d_tag == DT_DEBUG && map == -1)
6542 map = dyn->d_un.d_val;
6543
6544 if (dyn->d_tag == DT_NULL)
6545 break;
6546 }
6547
6548 dynamic_memaddr += dyn_size;
6549 }
6550
6551 return map;
6552 }
6553
6554 /* Read one pointer from MEMADDR in the inferior. */
6555
6556 static int
6557 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
6558 {
6559 int ret;
6560
6561 /* Go through a union so this works on either big or little endian
6562 hosts, when the inferior's pointer size is smaller than the size
6563 of CORE_ADDR. It is assumed the inferior's endianness is the
6564 same of the superior's. */
6565 union
6566 {
6567 CORE_ADDR core_addr;
6568 unsigned int ui;
6569 unsigned char uc;
6570 } addr;
6571
6572 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
6573 if (ret == 0)
6574 {
6575 if (ptr_size == sizeof (CORE_ADDR))
6576 *ptr = addr.core_addr;
6577 else if (ptr_size == sizeof (unsigned int))
6578 *ptr = addr.ui;
6579 else
6580 gdb_assert_not_reached ("unhandled pointer size");
6581 }
6582 return ret;
6583 }
6584
6585 struct link_map_offsets
6586 {
6587 /* Offset and size of r_debug.r_version. */
6588 int r_version_offset;
6589
6590 /* Offset and size of r_debug.r_map. */
6591 int r_map_offset;
6592
6593 /* Offset to l_addr field in struct link_map. */
6594 int l_addr_offset;
6595
6596 /* Offset to l_name field in struct link_map. */
6597 int l_name_offset;
6598
6599 /* Offset to l_ld field in struct link_map. */
6600 int l_ld_offset;
6601
6602 /* Offset to l_next field in struct link_map. */
6603 int l_next_offset;
6604
6605 /* Offset to l_prev field in struct link_map. */
6606 int l_prev_offset;
6607 };
6608
6609 /* Construct qXfer:libraries-svr4:read reply. */
6610
6611 static int
6612 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
6613 unsigned const char *writebuf,
6614 CORE_ADDR offset, int len)
6615 {
6616 char *document;
6617 unsigned document_len;
6618 struct process_info_private *const priv = current_process ()->priv;
6619 char filename[PATH_MAX];
6620 int pid, is_elf64;
6621
6622 static const struct link_map_offsets lmo_32bit_offsets =
6623 {
6624 0, /* r_version offset. */
6625 4, /* r_debug.r_map offset. */
6626 0, /* l_addr offset in link_map. */
6627 4, /* l_name offset in link_map. */
6628 8, /* l_ld offset in link_map. */
6629 12, /* l_next offset in link_map. */
6630 16 /* l_prev offset in link_map. */
6631 };
6632
6633 static const struct link_map_offsets lmo_64bit_offsets =
6634 {
6635 0, /* r_version offset. */
6636 8, /* r_debug.r_map offset. */
6637 0, /* l_addr offset in link_map. */
6638 8, /* l_name offset in link_map. */
6639 16, /* l_ld offset in link_map. */
6640 24, /* l_next offset in link_map. */
6641 32 /* l_prev offset in link_map. */
6642 };
6643 const struct link_map_offsets *lmo;
6644 unsigned int machine;
6645 int ptr_size;
6646 CORE_ADDR lm_addr = 0, lm_prev = 0;
6647 int allocated = 1024;
6648 char *p;
6649 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
6650 int header_done = 0;
6651
6652 if (writebuf != NULL)
6653 return -2;
6654 if (readbuf == NULL)
6655 return -1;
6656
6657 pid = lwpid_of (current_thread);
6658 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
6659 is_elf64 = elf_64_file_p (filename, &machine);
6660 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
6661 ptr_size = is_elf64 ? 8 : 4;
6662
6663 while (annex[0] != '\0')
6664 {
6665 const char *sep;
6666 CORE_ADDR *addrp;
6667 int len;
6668
6669 sep = strchr (annex, '=');
6670 if (sep == NULL)
6671 break;
6672
6673 len = sep - annex;
6674 if (len == 5 && startswith (annex, "start"))
6675 addrp = &lm_addr;
6676 else if (len == 4 && startswith (annex, "prev"))
6677 addrp = &lm_prev;
6678 else
6679 {
6680 annex = strchr (sep, ';');
6681 if (annex == NULL)
6682 break;
6683 annex++;
6684 continue;
6685 }
6686
6687 annex = decode_address_to_semicolon (addrp, sep + 1);
6688 }
6689
6690 if (lm_addr == 0)
6691 {
6692 int r_version = 0;
6693
6694 if (priv->r_debug == 0)
6695 priv->r_debug = get_r_debug (pid, is_elf64);
6696
6697 /* We failed to find DT_DEBUG. Such situation will not change
6698 for this inferior - do not retry it. Report it to GDB as
6699 E01, see for the reasons at the GDB solib-svr4.c side. */
6700 if (priv->r_debug == (CORE_ADDR) -1)
6701 return -1;
6702
6703 if (priv->r_debug != 0)
6704 {
6705 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
6706 (unsigned char *) &r_version,
6707 sizeof (r_version)) != 0
6708 || r_version != 1)
6709 {
6710 warning ("unexpected r_debug version %d", r_version);
6711 }
6712 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
6713 &lm_addr, ptr_size) != 0)
6714 {
6715 warning ("unable to read r_map from 0x%lx",
6716 (long) priv->r_debug + lmo->r_map_offset);
6717 }
6718 }
6719 }
6720
6721 document = (char *) xmalloc (allocated);
6722 strcpy (document, "<library-list-svr4 version=\"1.0\"");
6723 p = document + strlen (document);
6724
6725 while (lm_addr
6726 && read_one_ptr (lm_addr + lmo->l_name_offset,
6727 &l_name, ptr_size) == 0
6728 && read_one_ptr (lm_addr + lmo->l_addr_offset,
6729 &l_addr, ptr_size) == 0
6730 && read_one_ptr (lm_addr + lmo->l_ld_offset,
6731 &l_ld, ptr_size) == 0
6732 && read_one_ptr (lm_addr + lmo->l_prev_offset,
6733 &l_prev, ptr_size) == 0
6734 && read_one_ptr (lm_addr + lmo->l_next_offset,
6735 &l_next, ptr_size) == 0)
6736 {
6737 unsigned char libname[PATH_MAX];
6738
6739 if (lm_prev != l_prev)
6740 {
6741 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
6742 (long) lm_prev, (long) l_prev);
6743 break;
6744 }
6745
6746 /* Ignore the first entry even if it has valid name as the first entry
6747 corresponds to the main executable. The first entry should not be
6748 skipped if the dynamic loader was loaded late by a static executable
6749 (see solib-svr4.c parameter ignore_first). But in such case the main
6750 executable does not have PT_DYNAMIC present and this function already
6751 exited above due to failed get_r_debug. */
6752 if (lm_prev == 0)
6753 {
6754 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
6755 p = p + strlen (p);
6756 }
6757 else
6758 {
6759 /* Not checking for error because reading may stop before
6760 we've got PATH_MAX worth of characters. */
6761 libname[0] = '\0';
6762 linux_read_memory (l_name, libname, sizeof (libname) - 1);
6763 libname[sizeof (libname) - 1] = '\0';
6764 if (libname[0] != '\0')
6765 {
6766 /* 6x the size for xml_escape_text below. */
6767 size_t len = 6 * strlen ((char *) libname);
6768 char *name;
6769
6770 if (!header_done)
6771 {
6772 /* Terminate `<library-list-svr4'. */
6773 *p++ = '>';
6774 header_done = 1;
6775 }
6776
6777 while (allocated < p - document + len + 200)
6778 {
6779 /* Expand to guarantee sufficient storage. */
6780 uintptr_t document_len = p - document;
6781
6782 document = (char *) xrealloc (document, 2 * allocated);
6783 allocated *= 2;
6784 p = document + document_len;
6785 }
6786
6787 name = xml_escape_text ((char *) libname);
6788 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6789 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6790 name, (unsigned long) lm_addr,
6791 (unsigned long) l_addr, (unsigned long) l_ld);
6792 free (name);
6793 }
6794 }
6795
6796 lm_prev = lm_addr;
6797 lm_addr = l_next;
6798 }
6799
6800 if (!header_done)
6801 {
6802 /* Empty list; terminate `<library-list-svr4'. */
6803 strcpy (p, "/>");
6804 }
6805 else
6806 strcpy (p, "</library-list-svr4>");
6807
6808 document_len = strlen (document);
6809 if (offset < document_len)
6810 document_len -= offset;
6811 else
6812 document_len = 0;
6813 if (len > document_len)
6814 len = document_len;
6815
6816 memcpy (readbuf, document + offset, len);
6817 xfree (document);
6818
6819 return len;
6820 }
6821
6822 #ifdef HAVE_LINUX_BTRACE
6823
6824 /* See to_disable_btrace target method. */
6825
6826 static int
6827 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6828 {
6829 enum btrace_error err;
6830
6831 err = linux_disable_btrace (tinfo);
6832 return (err == BTRACE_ERR_NONE ? 0 : -1);
6833 }
6834
6835 /* Encode an Intel(R) Processor Trace configuration. */
6836
6837 static void
6838 linux_low_encode_pt_config (struct buffer *buffer,
6839 const struct btrace_data_pt_config *config)
6840 {
6841 buffer_grow_str (buffer, "<pt-config>\n");
6842
6843 switch (config->cpu.vendor)
6844 {
6845 case CV_INTEL:
6846 buffer_xml_printf (buffer, "<cpu vendor=\"GenuineIntel\" family=\"%u\" "
6847 "model=\"%u\" stepping=\"%u\"/>\n",
6848 config->cpu.family, config->cpu.model,
6849 config->cpu.stepping);
6850 break;
6851
6852 default:
6853 break;
6854 }
6855
6856 buffer_grow_str (buffer, "</pt-config>\n");
6857 }
6858
6859 /* Encode a raw buffer. */
6860
6861 static void
6862 linux_low_encode_raw (struct buffer *buffer, const gdb_byte *data,
6863 unsigned int size)
6864 {
6865 if (size == 0)
6866 return;
6867
6868 /* We use hex encoding - see common/rsp-low.h. */
6869 buffer_grow_str (buffer, "<raw>\n");
6870
6871 while (size-- > 0)
6872 {
6873 char elem[2];
6874
6875 elem[0] = tohex ((*data >> 4) & 0xf);
6876 elem[1] = tohex (*data++ & 0xf);
6877
6878 buffer_grow (buffer, elem, 2);
6879 }
6880
6881 buffer_grow_str (buffer, "</raw>\n");
6882 }
6883
6884 /* See to_read_btrace target method. */
6885
6886 static int
6887 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6888 enum btrace_read_type type)
6889 {
6890 struct btrace_data btrace;
6891 struct btrace_block *block;
6892 enum btrace_error err;
6893 int i;
6894
6895 btrace_data_init (&btrace);
6896
6897 err = linux_read_btrace (&btrace, tinfo, type);
6898 if (err != BTRACE_ERR_NONE)
6899 {
6900 if (err == BTRACE_ERR_OVERFLOW)
6901 buffer_grow_str0 (buffer, "E.Overflow.");
6902 else
6903 buffer_grow_str0 (buffer, "E.Generic Error.");
6904
6905 goto err;
6906 }
6907
6908 switch (btrace.format)
6909 {
6910 case BTRACE_FORMAT_NONE:
6911 buffer_grow_str0 (buffer, "E.No Trace.");
6912 goto err;
6913
6914 case BTRACE_FORMAT_BTS:
6915 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6916 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6917
6918 for (i = 0;
6919 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6920 i++)
6921 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6922 paddress (block->begin), paddress (block->end));
6923
6924 buffer_grow_str0 (buffer, "</btrace>\n");
6925 break;
6926
6927 case BTRACE_FORMAT_PT:
6928 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6929 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6930 buffer_grow_str (buffer, "<pt>\n");
6931
6932 linux_low_encode_pt_config (buffer, &btrace.variant.pt.config);
6933
6934 linux_low_encode_raw (buffer, btrace.variant.pt.data,
6935 btrace.variant.pt.size);
6936
6937 buffer_grow_str (buffer, "</pt>\n");
6938 buffer_grow_str0 (buffer, "</btrace>\n");
6939 break;
6940
6941 default:
6942 buffer_grow_str0 (buffer, "E.Unsupported Trace Format.");
6943 goto err;
6944 }
6945
6946 btrace_data_fini (&btrace);
6947 return 0;
6948
6949 err:
6950 btrace_data_fini (&btrace);
6951 return -1;
6952 }
6953
6954 /* See to_btrace_conf target method. */
6955
6956 static int
6957 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6958 struct buffer *buffer)
6959 {
6960 const struct btrace_config *conf;
6961
6962 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6963 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6964
6965 conf = linux_btrace_conf (tinfo);
6966 if (conf != NULL)
6967 {
6968 switch (conf->format)
6969 {
6970 case BTRACE_FORMAT_NONE:
6971 break;
6972
6973 case BTRACE_FORMAT_BTS:
6974 buffer_xml_printf (buffer, "<bts");
6975 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6976 buffer_xml_printf (buffer, " />\n");
6977 break;
6978
6979 case BTRACE_FORMAT_PT:
6980 buffer_xml_printf (buffer, "<pt");
6981 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->pt.size);
6982 buffer_xml_printf (buffer, "/>\n");
6983 break;
6984 }
6985 }
6986
6987 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6988 return 0;
6989 }
6990 #endif /* HAVE_LINUX_BTRACE */
6991
6992 /* See nat/linux-nat.h. */
6993
6994 ptid_t
6995 current_lwp_ptid (void)
6996 {
6997 return ptid_of (current_thread);
6998 }
6999
7000 /* Implementation of the target_ops method "breakpoint_kind_from_pc". */
7001
7002 static int
7003 linux_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
7004 {
7005 if (the_low_target.breakpoint_kind_from_pc != NULL)
7006 return (*the_low_target.breakpoint_kind_from_pc) (pcptr);
7007 else
7008 return default_breakpoint_kind_from_pc (pcptr);
7009 }
7010
7011 /* Implementation of the target_ops method "sw_breakpoint_from_kind". */
7012
7013 static const gdb_byte *
7014 linux_sw_breakpoint_from_kind (int kind, int *size)
7015 {
7016 gdb_assert (the_low_target.sw_breakpoint_from_kind != NULL);
7017
7018 return (*the_low_target.sw_breakpoint_from_kind) (kind, size);
7019 }
7020
7021 static struct target_ops linux_target_ops = {
7022 linux_create_inferior,
7023 linux_arch_setup,
7024 linux_attach,
7025 linux_kill,
7026 linux_detach,
7027 linux_mourn,
7028 linux_join,
7029 linux_thread_alive,
7030 linux_resume,
7031 linux_wait,
7032 linux_fetch_registers,
7033 linux_store_registers,
7034 linux_prepare_to_access_memory,
7035 linux_done_accessing_memory,
7036 linux_read_memory,
7037 linux_write_memory,
7038 linux_look_up_symbols,
7039 linux_request_interrupt,
7040 linux_read_auxv,
7041 linux_supports_z_point_type,
7042 linux_insert_point,
7043 linux_remove_point,
7044 linux_stopped_by_sw_breakpoint,
7045 linux_supports_stopped_by_sw_breakpoint,
7046 linux_stopped_by_hw_breakpoint,
7047 linux_supports_stopped_by_hw_breakpoint,
7048 linux_supports_hardware_single_step,
7049 linux_stopped_by_watchpoint,
7050 linux_stopped_data_address,
7051 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
7052 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
7053 && defined(PT_TEXT_END_ADDR)
7054 linux_read_offsets,
7055 #else
7056 NULL,
7057 #endif
7058 #ifdef USE_THREAD_DB
7059 thread_db_get_tls_address,
7060 #else
7061 NULL,
7062 #endif
7063 linux_qxfer_spu,
7064 hostio_last_error_from_errno,
7065 linux_qxfer_osdata,
7066 linux_xfer_siginfo,
7067 linux_supports_non_stop,
7068 linux_async,
7069 linux_start_non_stop,
7070 linux_supports_multi_process,
7071 linux_supports_fork_events,
7072 linux_supports_vfork_events,
7073 linux_supports_exec_events,
7074 linux_handle_new_gdb_connection,
7075 #ifdef USE_THREAD_DB
7076 thread_db_handle_monitor_command,
7077 #else
7078 NULL,
7079 #endif
7080 linux_common_core_of_thread,
7081 linux_read_loadmap,
7082 linux_process_qsupported,
7083 linux_supports_tracepoints,
7084 linux_read_pc,
7085 linux_write_pc,
7086 linux_thread_stopped,
7087 NULL,
7088 linux_pause_all,
7089 linux_unpause_all,
7090 linux_stabilize_threads,
7091 linux_install_fast_tracepoint_jump_pad,
7092 linux_emit_ops,
7093 linux_supports_disable_randomization,
7094 linux_get_min_fast_tracepoint_insn_len,
7095 linux_qxfer_libraries_svr4,
7096 linux_supports_agent,
7097 #ifdef HAVE_LINUX_BTRACE
7098 linux_supports_btrace,
7099 linux_enable_btrace,
7100 linux_low_disable_btrace,
7101 linux_low_read_btrace,
7102 linux_low_btrace_conf,
7103 #else
7104 NULL,
7105 NULL,
7106 NULL,
7107 NULL,
7108 NULL,
7109 #endif
7110 linux_supports_range_stepping,
7111 linux_proc_pid_to_exec_file,
7112 linux_mntns_open_cloexec,
7113 linux_mntns_unlink,
7114 linux_mntns_readlink,
7115 linux_breakpoint_kind_from_pc,
7116 linux_sw_breakpoint_from_kind,
7117 linux_proc_tid_get_name,
7118 };
7119
7120 static void
7121 linux_init_signals ()
7122 {
7123 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
7124 to find what the cancel signal actually is. */
7125 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
7126 signal (__SIGRTMIN+1, SIG_IGN);
7127 #endif
7128 }
7129
7130 #ifdef HAVE_LINUX_REGSETS
7131 void
7132 initialize_regsets_info (struct regsets_info *info)
7133 {
7134 for (info->num_regsets = 0;
7135 info->regsets[info->num_regsets].size >= 0;
7136 info->num_regsets++)
7137 ;
7138 }
7139 #endif
7140
7141 void
7142 initialize_low (void)
7143 {
7144 struct sigaction sigchld_action;
7145
7146 memset (&sigchld_action, 0, sizeof (sigchld_action));
7147 set_target_ops (&linux_target_ops);
7148
7149 linux_init_signals ();
7150 linux_ptrace_init_warnings ();
7151
7152 sigchld_action.sa_handler = sigchld_handler;
7153 sigemptyset (&sigchld_action.sa_mask);
7154 sigchld_action.sa_flags = SA_RESTART;
7155 sigaction (SIGCHLD, &sigchld_action, NULL);
7156
7157 initialize_low_arch ();
7158
7159 linux_check_ptrace_features ();
7160 }
This page took 0.199917 seconds and 4 git commands to generate.