Fix non executable stack handling when calling functions in the inferior.
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #include <sys/ptrace.h>
28 #include "nat/linux-ptrace.h"
29 #include "nat/linux-procfs.h"
30 #include "nat/linux-personality.h"
31 #include <signal.h>
32 #include <sys/ioctl.h>
33 #include <fcntl.h>
34 #include <unistd.h>
35 #include <sys/syscall.h>
36 #include <sched.h>
37 #include <ctype.h>
38 #include <pwd.h>
39 #include <sys/types.h>
40 #include <dirent.h>
41 #include <sys/stat.h>
42 #include <sys/vfs.h>
43 #include <sys/uio.h>
44 #include "filestuff.h"
45 #include "tracepoint.h"
46 #include "hostio.h"
47 #ifndef ELFMAG0
48 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
49 then ELFMAG0 will have been defined. If it didn't get included by
50 gdb_proc_service.h then including it will likely introduce a duplicate
51 definition of elf_fpregset_t. */
52 #include <elf.h>
53 #endif
54
55 #ifndef SPUFS_MAGIC
56 #define SPUFS_MAGIC 0x23c9b64e
57 #endif
58
59 #ifdef HAVE_PERSONALITY
60 # include <sys/personality.h>
61 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
62 # define ADDR_NO_RANDOMIZE 0x0040000
63 # endif
64 #endif
65
66 #ifndef O_LARGEFILE
67 #define O_LARGEFILE 0
68 #endif
69
70 #ifndef W_STOPCODE
71 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
72 #endif
73
74 /* This is the kernel's hard limit. Not to be confused with
75 SIGRTMIN. */
76 #ifndef __SIGRTMIN
77 #define __SIGRTMIN 32
78 #endif
79
80 /* Some targets did not define these ptrace constants from the start,
81 so gdbserver defines them locally here. In the future, these may
82 be removed after they are added to asm/ptrace.h. */
83 #if !(defined(PT_TEXT_ADDR) \
84 || defined(PT_DATA_ADDR) \
85 || defined(PT_TEXT_END_ADDR))
86 #if defined(__mcoldfire__)
87 /* These are still undefined in 3.10 kernels. */
88 #define PT_TEXT_ADDR 49*4
89 #define PT_DATA_ADDR 50*4
90 #define PT_TEXT_END_ADDR 51*4
91 /* BFIN already defines these since at least 2.6.32 kernels. */
92 #elif defined(BFIN)
93 #define PT_TEXT_ADDR 220
94 #define PT_TEXT_END_ADDR 224
95 #define PT_DATA_ADDR 228
96 /* These are still undefined in 3.10 kernels. */
97 #elif defined(__TMS320C6X__)
98 #define PT_TEXT_ADDR (0x10000*4)
99 #define PT_DATA_ADDR (0x10004*4)
100 #define PT_TEXT_END_ADDR (0x10008*4)
101 #endif
102 #endif
103
104 #ifdef HAVE_LINUX_BTRACE
105 # include "nat/linux-btrace.h"
106 # include "btrace-common.h"
107 #endif
108
109 #ifndef HAVE_ELF32_AUXV_T
110 /* Copied from glibc's elf.h. */
111 typedef struct
112 {
113 uint32_t a_type; /* Entry type */
114 union
115 {
116 uint32_t a_val; /* Integer value */
117 /* We use to have pointer elements added here. We cannot do that,
118 though, since it does not work when using 32-bit definitions
119 on 64-bit platforms and vice versa. */
120 } a_un;
121 } Elf32_auxv_t;
122 #endif
123
124 #ifndef HAVE_ELF64_AUXV_T
125 /* Copied from glibc's elf.h. */
126 typedef struct
127 {
128 uint64_t a_type; /* Entry type */
129 union
130 {
131 uint64_t a_val; /* Integer value */
132 /* We use to have pointer elements added here. We cannot do that,
133 though, since it does not work when using 32-bit definitions
134 on 64-bit platforms and vice versa. */
135 } a_un;
136 } Elf64_auxv_t;
137 #endif
138
139 /* A list of all unknown processes which receive stop signals. Some
140 other process will presumably claim each of these as forked
141 children momentarily. */
142
143 struct simple_pid_list
144 {
145 /* The process ID. */
146 int pid;
147
148 /* The status as reported by waitpid. */
149 int status;
150
151 /* Next in chain. */
152 struct simple_pid_list *next;
153 };
154 struct simple_pid_list *stopped_pids;
155
156 /* Trivial list manipulation functions to keep track of a list of new
157 stopped processes. */
158
159 static void
160 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
161 {
162 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
163
164 new_pid->pid = pid;
165 new_pid->status = status;
166 new_pid->next = *listp;
167 *listp = new_pid;
168 }
169
170 static int
171 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
172 {
173 struct simple_pid_list **p;
174
175 for (p = listp; *p != NULL; p = &(*p)->next)
176 if ((*p)->pid == pid)
177 {
178 struct simple_pid_list *next = (*p)->next;
179
180 *statusp = (*p)->status;
181 xfree (*p);
182 *p = next;
183 return 1;
184 }
185 return 0;
186 }
187
188 enum stopping_threads_kind
189 {
190 /* Not stopping threads presently. */
191 NOT_STOPPING_THREADS,
192
193 /* Stopping threads. */
194 STOPPING_THREADS,
195
196 /* Stopping and suspending threads. */
197 STOPPING_AND_SUSPENDING_THREADS
198 };
199
200 /* This is set while stop_all_lwps is in effect. */
201 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
202
203 /* FIXME make into a target method? */
204 int using_threads = 1;
205
206 /* True if we're presently stabilizing threads (moving them out of
207 jump pads). */
208 static int stabilizing_threads;
209
210 static void linux_resume_one_lwp (struct lwp_info *lwp,
211 int step, int signal, siginfo_t *info);
212 static void linux_resume (struct thread_resume *resume_info, size_t n);
213 static void stop_all_lwps (int suspend, struct lwp_info *except);
214 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
215 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
216 int *wstat, int options);
217 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
218 static struct lwp_info *add_lwp (ptid_t ptid);
219 static int linux_stopped_by_watchpoint (void);
220 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
221 static void proceed_all_lwps (void);
222 static int finish_step_over (struct lwp_info *lwp);
223 static int kill_lwp (unsigned long lwpid, int signo);
224
225 /* When the event-loop is doing a step-over, this points at the thread
226 being stepped. */
227 ptid_t step_over_bkpt;
228
229 /* True if the low target can hardware single-step. Such targets
230 don't need a BREAKPOINT_REINSERT_ADDR callback. */
231
232 static int
233 can_hardware_single_step (void)
234 {
235 return (the_low_target.breakpoint_reinsert_addr == NULL);
236 }
237
238 /* True if the low target supports memory breakpoints. If so, we'll
239 have a GET_PC implementation. */
240
241 static int
242 supports_breakpoints (void)
243 {
244 return (the_low_target.get_pc != NULL);
245 }
246
247 /* Returns true if this target can support fast tracepoints. This
248 does not mean that the in-process agent has been loaded in the
249 inferior. */
250
251 static int
252 supports_fast_tracepoints (void)
253 {
254 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
255 }
256
257 /* True if LWP is stopped in its stepping range. */
258
259 static int
260 lwp_in_step_range (struct lwp_info *lwp)
261 {
262 CORE_ADDR pc = lwp->stop_pc;
263
264 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
265 }
266
267 struct pending_signals
268 {
269 int signal;
270 siginfo_t info;
271 struct pending_signals *prev;
272 };
273
274 /* The read/write ends of the pipe registered as waitable file in the
275 event loop. */
276 static int linux_event_pipe[2] = { -1, -1 };
277
278 /* True if we're currently in async mode. */
279 #define target_is_async_p() (linux_event_pipe[0] != -1)
280
281 static void send_sigstop (struct lwp_info *lwp);
282 static void wait_for_sigstop (void);
283
284 /* Return non-zero if HEADER is a 64-bit ELF file. */
285
286 static int
287 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
288 {
289 if (header->e_ident[EI_MAG0] == ELFMAG0
290 && header->e_ident[EI_MAG1] == ELFMAG1
291 && header->e_ident[EI_MAG2] == ELFMAG2
292 && header->e_ident[EI_MAG3] == ELFMAG3)
293 {
294 *machine = header->e_machine;
295 return header->e_ident[EI_CLASS] == ELFCLASS64;
296
297 }
298 *machine = EM_NONE;
299 return -1;
300 }
301
302 /* Return non-zero if FILE is a 64-bit ELF file,
303 zero if the file is not a 64-bit ELF file,
304 and -1 if the file is not accessible or doesn't exist. */
305
306 static int
307 elf_64_file_p (const char *file, unsigned int *machine)
308 {
309 Elf64_Ehdr header;
310 int fd;
311
312 fd = open (file, O_RDONLY);
313 if (fd < 0)
314 return -1;
315
316 if (read (fd, &header, sizeof (header)) != sizeof (header))
317 {
318 close (fd);
319 return 0;
320 }
321 close (fd);
322
323 return elf_64_header_p (&header, machine);
324 }
325
326 /* Accepts an integer PID; Returns true if the executable PID is
327 running is a 64-bit ELF file.. */
328
329 int
330 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
331 {
332 char file[PATH_MAX];
333
334 sprintf (file, "/proc/%d/exe", pid);
335 return elf_64_file_p (file, machine);
336 }
337
338 static void
339 delete_lwp (struct lwp_info *lwp)
340 {
341 struct thread_info *thr = get_lwp_thread (lwp);
342
343 if (debug_threads)
344 debug_printf ("deleting %ld\n", lwpid_of (thr));
345
346 remove_thread (thr);
347 free (lwp->arch_private);
348 free (lwp);
349 }
350
351 /* Add a process to the common process list, and set its private
352 data. */
353
354 static struct process_info *
355 linux_add_process (int pid, int attached)
356 {
357 struct process_info *proc;
358
359 proc = add_process (pid, attached);
360 proc->private = xcalloc (1, sizeof (*proc->private));
361
362 /* Set the arch when the first LWP stops. */
363 proc->private->new_inferior = 1;
364
365 if (the_low_target.new_process != NULL)
366 proc->private->arch_private = the_low_target.new_process ();
367
368 return proc;
369 }
370
371 static CORE_ADDR get_pc (struct lwp_info *lwp);
372
373 /* Handle a GNU/Linux extended wait response. If we see a clone
374 event, we need to add the new LWP to our list (and not report the
375 trap to higher layers). */
376
377 static void
378 handle_extended_wait (struct lwp_info *event_child, int wstat)
379 {
380 int event = linux_ptrace_get_extended_event (wstat);
381 struct thread_info *event_thr = get_lwp_thread (event_child);
382 struct lwp_info *new_lwp;
383
384 if (event == PTRACE_EVENT_CLONE)
385 {
386 ptid_t ptid;
387 unsigned long new_pid;
388 int ret, status;
389
390 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
391 &new_pid);
392
393 /* If we haven't already seen the new PID stop, wait for it now. */
394 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
395 {
396 /* The new child has a pending SIGSTOP. We can't affect it until it
397 hits the SIGSTOP, but we're already attached. */
398
399 ret = my_waitpid (new_pid, &status, __WALL);
400
401 if (ret == -1)
402 perror_with_name ("waiting for new child");
403 else if (ret != new_pid)
404 warning ("wait returned unexpected PID %d", ret);
405 else if (!WIFSTOPPED (status))
406 warning ("wait returned unexpected status 0x%x", status);
407 }
408
409 if (debug_threads)
410 debug_printf ("HEW: Got clone event "
411 "from LWP %ld, new child is LWP %ld\n",
412 lwpid_of (event_thr), new_pid);
413
414 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
415 new_lwp = add_lwp (ptid);
416
417 /* Either we're going to immediately resume the new thread
418 or leave it stopped. linux_resume_one_lwp is a nop if it
419 thinks the thread is currently running, so set this first
420 before calling linux_resume_one_lwp. */
421 new_lwp->stopped = 1;
422
423 /* If we're suspending all threads, leave this one suspended
424 too. */
425 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
426 new_lwp->suspended = 1;
427
428 /* Normally we will get the pending SIGSTOP. But in some cases
429 we might get another signal delivered to the group first.
430 If we do get another signal, be sure not to lose it. */
431 if (WSTOPSIG (status) != SIGSTOP)
432 {
433 new_lwp->stop_expected = 1;
434 new_lwp->status_pending_p = 1;
435 new_lwp->status_pending = status;
436 }
437 }
438 }
439
440 /* Return the PC as read from the regcache of LWP, without any
441 adjustment. */
442
443 static CORE_ADDR
444 get_pc (struct lwp_info *lwp)
445 {
446 struct thread_info *saved_thread;
447 struct regcache *regcache;
448 CORE_ADDR pc;
449
450 if (the_low_target.get_pc == NULL)
451 return 0;
452
453 saved_thread = current_thread;
454 current_thread = get_lwp_thread (lwp);
455
456 regcache = get_thread_regcache (current_thread, 1);
457 pc = (*the_low_target.get_pc) (regcache);
458
459 if (debug_threads)
460 debug_printf ("pc is 0x%lx\n", (long) pc);
461
462 current_thread = saved_thread;
463 return pc;
464 }
465
466 /* This function should only be called if LWP got a SIGTRAP.
467 The SIGTRAP could mean several things.
468
469 On i386, where decr_pc_after_break is non-zero:
470
471 If we were single-stepping this process using PTRACE_SINGLESTEP, we
472 will get only the one SIGTRAP. The value of $eip will be the next
473 instruction. If the instruction we stepped over was a breakpoint,
474 we need to decrement the PC.
475
476 If we continue the process using PTRACE_CONT, we will get a
477 SIGTRAP when we hit a breakpoint. The value of $eip will be
478 the instruction after the breakpoint (i.e. needs to be
479 decremented). If we report the SIGTRAP to GDB, we must also
480 report the undecremented PC. If the breakpoint is removed, we
481 must resume at the decremented PC.
482
483 On a non-decr_pc_after_break machine with hardware or kernel
484 single-step:
485
486 If we either single-step a breakpoint instruction, or continue and
487 hit a breakpoint instruction, our PC will point at the breakpoint
488 instruction. */
489
490 static int
491 check_stopped_by_breakpoint (struct lwp_info *lwp)
492 {
493 CORE_ADDR pc;
494 CORE_ADDR sw_breakpoint_pc;
495 struct thread_info *saved_thread;
496
497 if (the_low_target.get_pc == NULL)
498 return 0;
499
500 pc = get_pc (lwp);
501 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
502
503 /* breakpoint_at reads from the current thread. */
504 saved_thread = current_thread;
505 current_thread = get_lwp_thread (lwp);
506
507 /* We may have just stepped a breakpoint instruction. E.g., in
508 non-stop mode, GDB first tells the thread A to step a range, and
509 then the user inserts a breakpoint inside the range. In that
510 case, we need to report the breakpoint PC. But, when we're
511 trying to step past one of our own breakpoints, that happens to
512 have been placed on top of a permanent breakpoint instruction, we
513 shouldn't adjust the PC, otherwise the program would keep
514 trapping the permanent breakpoint forever. */
515 if ((!lwp->stepping
516 || (!ptid_equal (ptid_of (current_thread), step_over_bkpt)
517 && lwp->stop_pc == sw_breakpoint_pc))
518 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
519 {
520 if (debug_threads)
521 {
522 struct thread_info *thr = get_lwp_thread (lwp);
523
524 debug_printf ("CSBB: %s stopped by software breakpoint\n",
525 target_pid_to_str (ptid_of (thr)));
526 }
527
528 /* Back up the PC if necessary. */
529 if (pc != sw_breakpoint_pc)
530 {
531 struct regcache *regcache
532 = get_thread_regcache (current_thread, 1);
533 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
534 }
535
536 lwp->stop_pc = sw_breakpoint_pc;
537 lwp->stop_reason = LWP_STOPPED_BY_SW_BREAKPOINT;
538 current_thread = saved_thread;
539 return 1;
540 }
541
542 if (hardware_breakpoint_inserted_here (pc))
543 {
544 if (debug_threads)
545 {
546 struct thread_info *thr = get_lwp_thread (lwp);
547
548 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
549 target_pid_to_str (ptid_of (thr)));
550 }
551
552 lwp->stop_pc = pc;
553 lwp->stop_reason = LWP_STOPPED_BY_HW_BREAKPOINT;
554 current_thread = saved_thread;
555 return 1;
556 }
557
558 current_thread = saved_thread;
559 return 0;
560 }
561
562 static struct lwp_info *
563 add_lwp (ptid_t ptid)
564 {
565 struct lwp_info *lwp;
566
567 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
568 memset (lwp, 0, sizeof (*lwp));
569
570 if (the_low_target.new_thread != NULL)
571 lwp->arch_private = the_low_target.new_thread ();
572
573 lwp->thread = add_thread (ptid, lwp);
574
575 return lwp;
576 }
577
578 /* Start an inferior process and returns its pid.
579 ALLARGS is a vector of program-name and args. */
580
581 static int
582 linux_create_inferior (char *program, char **allargs)
583 {
584 struct lwp_info *new_lwp;
585 int pid;
586 ptid_t ptid;
587 struct cleanup *restore_personality
588 = maybe_disable_address_space_randomization (disable_randomization);
589
590 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
591 pid = vfork ();
592 #else
593 pid = fork ();
594 #endif
595 if (pid < 0)
596 perror_with_name ("fork");
597
598 if (pid == 0)
599 {
600 close_most_fds ();
601 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
602
603 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
604 signal (__SIGRTMIN + 1, SIG_DFL);
605 #endif
606
607 setpgid (0, 0);
608
609 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
610 stdout to stderr so that inferior i/o doesn't corrupt the connection.
611 Also, redirect stdin to /dev/null. */
612 if (remote_connection_is_stdio ())
613 {
614 close (0);
615 open ("/dev/null", O_RDONLY);
616 dup2 (2, 1);
617 if (write (2, "stdin/stdout redirected\n",
618 sizeof ("stdin/stdout redirected\n") - 1) < 0)
619 {
620 /* Errors ignored. */;
621 }
622 }
623
624 execv (program, allargs);
625 if (errno == ENOENT)
626 execvp (program, allargs);
627
628 fprintf (stderr, "Cannot exec %s: %s.\n", program,
629 strerror (errno));
630 fflush (stderr);
631 _exit (0177);
632 }
633
634 do_cleanups (restore_personality);
635
636 linux_add_process (pid, 0);
637
638 ptid = ptid_build (pid, pid, 0);
639 new_lwp = add_lwp (ptid);
640 new_lwp->must_set_ptrace_flags = 1;
641
642 return pid;
643 }
644
645 /* Attach to an inferior process. Returns 0 on success, ERRNO on
646 error. */
647
648 int
649 linux_attach_lwp (ptid_t ptid)
650 {
651 struct lwp_info *new_lwp;
652 int lwpid = ptid_get_lwp (ptid);
653
654 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
655 != 0)
656 return errno;
657
658 new_lwp = add_lwp (ptid);
659
660 /* We need to wait for SIGSTOP before being able to make the next
661 ptrace call on this LWP. */
662 new_lwp->must_set_ptrace_flags = 1;
663
664 if (linux_proc_pid_is_stopped (lwpid))
665 {
666 if (debug_threads)
667 debug_printf ("Attached to a stopped process\n");
668
669 /* The process is definitely stopped. It is in a job control
670 stop, unless the kernel predates the TASK_STOPPED /
671 TASK_TRACED distinction, in which case it might be in a
672 ptrace stop. Make sure it is in a ptrace stop; from there we
673 can kill it, signal it, et cetera.
674
675 First make sure there is a pending SIGSTOP. Since we are
676 already attached, the process can not transition from stopped
677 to running without a PTRACE_CONT; so we know this signal will
678 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
679 probably already in the queue (unless this kernel is old
680 enough to use TASK_STOPPED for ptrace stops); but since
681 SIGSTOP is not an RT signal, it can only be queued once. */
682 kill_lwp (lwpid, SIGSTOP);
683
684 /* Finally, resume the stopped process. This will deliver the
685 SIGSTOP (or a higher priority signal, just like normal
686 PTRACE_ATTACH), which we'll catch later on. */
687 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
688 }
689
690 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
691 brings it to a halt.
692
693 There are several cases to consider here:
694
695 1) gdbserver has already attached to the process and is being notified
696 of a new thread that is being created.
697 In this case we should ignore that SIGSTOP and resume the
698 process. This is handled below by setting stop_expected = 1,
699 and the fact that add_thread sets last_resume_kind ==
700 resume_continue.
701
702 2) This is the first thread (the process thread), and we're attaching
703 to it via attach_inferior.
704 In this case we want the process thread to stop.
705 This is handled by having linux_attach set last_resume_kind ==
706 resume_stop after we return.
707
708 If the pid we are attaching to is also the tgid, we attach to and
709 stop all the existing threads. Otherwise, we attach to pid and
710 ignore any other threads in the same group as this pid.
711
712 3) GDB is connecting to gdbserver and is requesting an enumeration of all
713 existing threads.
714 In this case we want the thread to stop.
715 FIXME: This case is currently not properly handled.
716 We should wait for the SIGSTOP but don't. Things work apparently
717 because enough time passes between when we ptrace (ATTACH) and when
718 gdb makes the next ptrace call on the thread.
719
720 On the other hand, if we are currently trying to stop all threads, we
721 should treat the new thread as if we had sent it a SIGSTOP. This works
722 because we are guaranteed that the add_lwp call above added us to the
723 end of the list, and so the new thread has not yet reached
724 wait_for_sigstop (but will). */
725 new_lwp->stop_expected = 1;
726
727 return 0;
728 }
729
730 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
731 already attached. Returns true if a new LWP is found, false
732 otherwise. */
733
734 static int
735 attach_proc_task_lwp_callback (ptid_t ptid)
736 {
737 /* Is this a new thread? */
738 if (find_thread_ptid (ptid) == NULL)
739 {
740 int lwpid = ptid_get_lwp (ptid);
741 int err;
742
743 if (debug_threads)
744 debug_printf ("Found new lwp %d\n", lwpid);
745
746 err = linux_attach_lwp (ptid);
747
748 /* Be quiet if we simply raced with the thread exiting. EPERM
749 is returned if the thread's task still exists, and is marked
750 as exited or zombie, as well as other conditions, so in that
751 case, confirm the status in /proc/PID/status. */
752 if (err == ESRCH
753 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
754 {
755 if (debug_threads)
756 {
757 debug_printf ("Cannot attach to lwp %d: "
758 "thread is gone (%d: %s)\n",
759 lwpid, err, strerror (err));
760 }
761 }
762 else if (err != 0)
763 {
764 warning (_("Cannot attach to lwp %d: %s"),
765 lwpid,
766 linux_ptrace_attach_fail_reason_string (ptid, err));
767 }
768
769 return 1;
770 }
771 return 0;
772 }
773
774 /* Attach to PID. If PID is the tgid, attach to it and all
775 of its threads. */
776
777 static int
778 linux_attach (unsigned long pid)
779 {
780 ptid_t ptid = ptid_build (pid, pid, 0);
781 int err;
782
783 /* Attach to PID. We will check for other threads
784 soon. */
785 err = linux_attach_lwp (ptid);
786 if (err != 0)
787 error ("Cannot attach to process %ld: %s",
788 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
789
790 linux_add_process (pid, 1);
791
792 if (!non_stop)
793 {
794 struct thread_info *thread;
795
796 /* Don't ignore the initial SIGSTOP if we just attached to this
797 process. It will be collected by wait shortly. */
798 thread = find_thread_ptid (ptid_build (pid, pid, 0));
799 thread->last_resume_kind = resume_stop;
800 }
801
802 /* We must attach to every LWP. If /proc is mounted, use that to
803 find them now. On the one hand, the inferior may be using raw
804 clone instead of using pthreads. On the other hand, even if it
805 is using pthreads, GDB may not be connected yet (thread_db needs
806 to do symbol lookups, through qSymbol). Also, thread_db walks
807 structures in the inferior's address space to find the list of
808 threads/LWPs, and those structures may well be corrupted. Note
809 that once thread_db is loaded, we'll still use it to list threads
810 and associate pthread info with each LWP. */
811 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
812 return 0;
813 }
814
815 struct counter
816 {
817 int pid;
818 int count;
819 };
820
821 static int
822 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
823 {
824 struct counter *counter = args;
825
826 if (ptid_get_pid (entry->id) == counter->pid)
827 {
828 if (++counter->count > 1)
829 return 1;
830 }
831
832 return 0;
833 }
834
835 static int
836 last_thread_of_process_p (int pid)
837 {
838 struct counter counter = { pid , 0 };
839
840 return (find_inferior (&all_threads,
841 second_thread_of_pid_p, &counter) == NULL);
842 }
843
844 /* Kill LWP. */
845
846 static void
847 linux_kill_one_lwp (struct lwp_info *lwp)
848 {
849 struct thread_info *thr = get_lwp_thread (lwp);
850 int pid = lwpid_of (thr);
851
852 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
853 there is no signal context, and ptrace(PTRACE_KILL) (or
854 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
855 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
856 alternative is to kill with SIGKILL. We only need one SIGKILL
857 per process, not one for each thread. But since we still support
858 linuxthreads, and we also support debugging programs using raw
859 clone without CLONE_THREAD, we send one for each thread. For
860 years, we used PTRACE_KILL only, so we're being a bit paranoid
861 about some old kernels where PTRACE_KILL might work better
862 (dubious if there are any such, but that's why it's paranoia), so
863 we try SIGKILL first, PTRACE_KILL second, and so we're fine
864 everywhere. */
865
866 errno = 0;
867 kill_lwp (pid, SIGKILL);
868 if (debug_threads)
869 {
870 int save_errno = errno;
871
872 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
873 target_pid_to_str (ptid_of (thr)),
874 save_errno ? strerror (save_errno) : "OK");
875 }
876
877 errno = 0;
878 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
879 if (debug_threads)
880 {
881 int save_errno = errno;
882
883 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
884 target_pid_to_str (ptid_of (thr)),
885 save_errno ? strerror (save_errno) : "OK");
886 }
887 }
888
889 /* Kill LWP and wait for it to die. */
890
891 static void
892 kill_wait_lwp (struct lwp_info *lwp)
893 {
894 struct thread_info *thr = get_lwp_thread (lwp);
895 int pid = ptid_get_pid (ptid_of (thr));
896 int lwpid = ptid_get_lwp (ptid_of (thr));
897 int wstat;
898 int res;
899
900 if (debug_threads)
901 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
902
903 do
904 {
905 linux_kill_one_lwp (lwp);
906
907 /* Make sure it died. Notes:
908
909 - The loop is most likely unnecessary.
910
911 - We don't use linux_wait_for_event as that could delete lwps
912 while we're iterating over them. We're not interested in
913 any pending status at this point, only in making sure all
914 wait status on the kernel side are collected until the
915 process is reaped.
916
917 - We don't use __WALL here as the __WALL emulation relies on
918 SIGCHLD, and killing a stopped process doesn't generate
919 one, nor an exit status.
920 */
921 res = my_waitpid (lwpid, &wstat, 0);
922 if (res == -1 && errno == ECHILD)
923 res = my_waitpid (lwpid, &wstat, __WCLONE);
924 } while (res > 0 && WIFSTOPPED (wstat));
925
926 gdb_assert (res > 0);
927 }
928
929 /* Callback for `find_inferior'. Kills an lwp of a given process,
930 except the leader. */
931
932 static int
933 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
934 {
935 struct thread_info *thread = (struct thread_info *) entry;
936 struct lwp_info *lwp = get_thread_lwp (thread);
937 int pid = * (int *) args;
938
939 if (ptid_get_pid (entry->id) != pid)
940 return 0;
941
942 /* We avoid killing the first thread here, because of a Linux kernel (at
943 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
944 the children get a chance to be reaped, it will remain a zombie
945 forever. */
946
947 if (lwpid_of (thread) == pid)
948 {
949 if (debug_threads)
950 debug_printf ("lkop: is last of process %s\n",
951 target_pid_to_str (entry->id));
952 return 0;
953 }
954
955 kill_wait_lwp (lwp);
956 return 0;
957 }
958
959 static int
960 linux_kill (int pid)
961 {
962 struct process_info *process;
963 struct lwp_info *lwp;
964
965 process = find_process_pid (pid);
966 if (process == NULL)
967 return -1;
968
969 /* If we're killing a running inferior, make sure it is stopped
970 first, as PTRACE_KILL will not work otherwise. */
971 stop_all_lwps (0, NULL);
972
973 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
974
975 /* See the comment in linux_kill_one_lwp. We did not kill the first
976 thread in the list, so do so now. */
977 lwp = find_lwp_pid (pid_to_ptid (pid));
978
979 if (lwp == NULL)
980 {
981 if (debug_threads)
982 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
983 pid);
984 }
985 else
986 kill_wait_lwp (lwp);
987
988 the_target->mourn (process);
989
990 /* Since we presently can only stop all lwps of all processes, we
991 need to unstop lwps of other processes. */
992 unstop_all_lwps (0, NULL);
993 return 0;
994 }
995
996 /* Get pending signal of THREAD, for detaching purposes. This is the
997 signal the thread last stopped for, which we need to deliver to the
998 thread when detaching, otherwise, it'd be suppressed/lost. */
999
1000 static int
1001 get_detach_signal (struct thread_info *thread)
1002 {
1003 enum gdb_signal signo = GDB_SIGNAL_0;
1004 int status;
1005 struct lwp_info *lp = get_thread_lwp (thread);
1006
1007 if (lp->status_pending_p)
1008 status = lp->status_pending;
1009 else
1010 {
1011 /* If the thread had been suspended by gdbserver, and it stopped
1012 cleanly, then it'll have stopped with SIGSTOP. But we don't
1013 want to deliver that SIGSTOP. */
1014 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1015 || thread->last_status.value.sig == GDB_SIGNAL_0)
1016 return 0;
1017
1018 /* Otherwise, we may need to deliver the signal we
1019 intercepted. */
1020 status = lp->last_status;
1021 }
1022
1023 if (!WIFSTOPPED (status))
1024 {
1025 if (debug_threads)
1026 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1027 target_pid_to_str (ptid_of (thread)));
1028 return 0;
1029 }
1030
1031 /* Extended wait statuses aren't real SIGTRAPs. */
1032 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1033 {
1034 if (debug_threads)
1035 debug_printf ("GPS: lwp %s had stopped with extended "
1036 "status: no pending signal\n",
1037 target_pid_to_str (ptid_of (thread)));
1038 return 0;
1039 }
1040
1041 signo = gdb_signal_from_host (WSTOPSIG (status));
1042
1043 if (program_signals_p && !program_signals[signo])
1044 {
1045 if (debug_threads)
1046 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1047 target_pid_to_str (ptid_of (thread)),
1048 gdb_signal_to_string (signo));
1049 return 0;
1050 }
1051 else if (!program_signals_p
1052 /* If we have no way to know which signals GDB does not
1053 want to have passed to the program, assume
1054 SIGTRAP/SIGINT, which is GDB's default. */
1055 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1056 {
1057 if (debug_threads)
1058 debug_printf ("GPS: lwp %s had signal %s, "
1059 "but we don't know if we should pass it. "
1060 "Default to not.\n",
1061 target_pid_to_str (ptid_of (thread)),
1062 gdb_signal_to_string (signo));
1063 return 0;
1064 }
1065 else
1066 {
1067 if (debug_threads)
1068 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1069 target_pid_to_str (ptid_of (thread)),
1070 gdb_signal_to_string (signo));
1071
1072 return WSTOPSIG (status);
1073 }
1074 }
1075
1076 static int
1077 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1078 {
1079 struct thread_info *thread = (struct thread_info *) entry;
1080 struct lwp_info *lwp = get_thread_lwp (thread);
1081 int pid = * (int *) args;
1082 int sig;
1083
1084 if (ptid_get_pid (entry->id) != pid)
1085 return 0;
1086
1087 /* If there is a pending SIGSTOP, get rid of it. */
1088 if (lwp->stop_expected)
1089 {
1090 if (debug_threads)
1091 debug_printf ("Sending SIGCONT to %s\n",
1092 target_pid_to_str (ptid_of (thread)));
1093
1094 kill_lwp (lwpid_of (thread), SIGCONT);
1095 lwp->stop_expected = 0;
1096 }
1097
1098 /* Flush any pending changes to the process's registers. */
1099 regcache_invalidate_thread (thread);
1100
1101 /* Pass on any pending signal for this thread. */
1102 sig = get_detach_signal (thread);
1103
1104 /* Finally, let it resume. */
1105 if (the_low_target.prepare_to_resume != NULL)
1106 the_low_target.prepare_to_resume (lwp);
1107 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1108 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1109 error (_("Can't detach %s: %s"),
1110 target_pid_to_str (ptid_of (thread)),
1111 strerror (errno));
1112
1113 delete_lwp (lwp);
1114 return 0;
1115 }
1116
1117 static int
1118 linux_detach (int pid)
1119 {
1120 struct process_info *process;
1121
1122 process = find_process_pid (pid);
1123 if (process == NULL)
1124 return -1;
1125
1126 /* Stop all threads before detaching. First, ptrace requires that
1127 the thread is stopped to sucessfully detach. Second, thread_db
1128 may need to uninstall thread event breakpoints from memory, which
1129 only works with a stopped process anyway. */
1130 stop_all_lwps (0, NULL);
1131
1132 #ifdef USE_THREAD_DB
1133 thread_db_detach (process);
1134 #endif
1135
1136 /* Stabilize threads (move out of jump pads). */
1137 stabilize_threads ();
1138
1139 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1140
1141 the_target->mourn (process);
1142
1143 /* Since we presently can only stop all lwps of all processes, we
1144 need to unstop lwps of other processes. */
1145 unstop_all_lwps (0, NULL);
1146 return 0;
1147 }
1148
1149 /* Remove all LWPs that belong to process PROC from the lwp list. */
1150
1151 static int
1152 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1153 {
1154 struct thread_info *thread = (struct thread_info *) entry;
1155 struct lwp_info *lwp = get_thread_lwp (thread);
1156 struct process_info *process = proc;
1157
1158 if (pid_of (thread) == pid_of (process))
1159 delete_lwp (lwp);
1160
1161 return 0;
1162 }
1163
1164 static void
1165 linux_mourn (struct process_info *process)
1166 {
1167 struct process_info_private *priv;
1168
1169 #ifdef USE_THREAD_DB
1170 thread_db_mourn (process);
1171 #endif
1172
1173 find_inferior (&all_threads, delete_lwp_callback, process);
1174
1175 /* Freeing all private data. */
1176 priv = process->private;
1177 free (priv->arch_private);
1178 free (priv);
1179 process->private = NULL;
1180
1181 remove_process (process);
1182 }
1183
1184 static void
1185 linux_join (int pid)
1186 {
1187 int status, ret;
1188
1189 do {
1190 ret = my_waitpid (pid, &status, 0);
1191 if (WIFEXITED (status) || WIFSIGNALED (status))
1192 break;
1193 } while (ret != -1 || errno != ECHILD);
1194 }
1195
1196 /* Return nonzero if the given thread is still alive. */
1197 static int
1198 linux_thread_alive (ptid_t ptid)
1199 {
1200 struct lwp_info *lwp = find_lwp_pid (ptid);
1201
1202 /* We assume we always know if a thread exits. If a whole process
1203 exited but we still haven't been able to report it to GDB, we'll
1204 hold on to the last lwp of the dead process. */
1205 if (lwp != NULL)
1206 return !lwp->dead;
1207 else
1208 return 0;
1209 }
1210
1211 /* Return 1 if this lwp still has an interesting status pending. If
1212 not (e.g., it had stopped for a breakpoint that is gone), return
1213 false. */
1214
1215 static int
1216 thread_still_has_status_pending_p (struct thread_info *thread)
1217 {
1218 struct lwp_info *lp = get_thread_lwp (thread);
1219
1220 if (!lp->status_pending_p)
1221 return 0;
1222
1223 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1224 report any status pending the LWP may have. */
1225 if (thread->last_resume_kind == resume_stop
1226 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1227 return 0;
1228
1229 if (thread->last_resume_kind != resume_stop
1230 && (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT
1231 || lp->stop_reason == LWP_STOPPED_BY_HW_BREAKPOINT))
1232 {
1233 struct thread_info *saved_thread;
1234 CORE_ADDR pc;
1235 int discard = 0;
1236
1237 gdb_assert (lp->last_status != 0);
1238
1239 pc = get_pc (lp);
1240
1241 saved_thread = current_thread;
1242 current_thread = thread;
1243
1244 if (pc != lp->stop_pc)
1245 {
1246 if (debug_threads)
1247 debug_printf ("PC of %ld changed\n",
1248 lwpid_of (thread));
1249 discard = 1;
1250 }
1251 else if (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT
1252 && !(*the_low_target.breakpoint_at) (pc))
1253 {
1254 if (debug_threads)
1255 debug_printf ("previous SW breakpoint of %ld gone\n",
1256 lwpid_of (thread));
1257 discard = 1;
1258 }
1259 else if (lp->stop_reason == LWP_STOPPED_BY_HW_BREAKPOINT
1260 && !hardware_breakpoint_inserted_here (pc))
1261 {
1262 if (debug_threads)
1263 debug_printf ("previous HW breakpoint of %ld gone\n",
1264 lwpid_of (thread));
1265 discard = 1;
1266 }
1267
1268 current_thread = saved_thread;
1269
1270 if (discard)
1271 {
1272 if (debug_threads)
1273 debug_printf ("discarding pending breakpoint status\n");
1274 lp->status_pending_p = 0;
1275 return 0;
1276 }
1277 }
1278
1279 return 1;
1280 }
1281
1282 /* Return 1 if this lwp has an interesting status pending. */
1283 static int
1284 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1285 {
1286 struct thread_info *thread = (struct thread_info *) entry;
1287 struct lwp_info *lp = get_thread_lwp (thread);
1288 ptid_t ptid = * (ptid_t *) arg;
1289
1290 /* Check if we're only interested in events from a specific process
1291 or its lwps. */
1292 if (!ptid_equal (minus_one_ptid, ptid)
1293 && ptid_get_pid (ptid) != ptid_get_pid (thread->entry.id))
1294 return 0;
1295
1296 if (lp->status_pending_p
1297 && !thread_still_has_status_pending_p (thread))
1298 {
1299 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1300 return 0;
1301 }
1302
1303 return lp->status_pending_p;
1304 }
1305
1306 static int
1307 same_lwp (struct inferior_list_entry *entry, void *data)
1308 {
1309 ptid_t ptid = *(ptid_t *) data;
1310 int lwp;
1311
1312 if (ptid_get_lwp (ptid) != 0)
1313 lwp = ptid_get_lwp (ptid);
1314 else
1315 lwp = ptid_get_pid (ptid);
1316
1317 if (ptid_get_lwp (entry->id) == lwp)
1318 return 1;
1319
1320 return 0;
1321 }
1322
1323 struct lwp_info *
1324 find_lwp_pid (ptid_t ptid)
1325 {
1326 struct inferior_list_entry *thread
1327 = find_inferior (&all_threads, same_lwp, &ptid);
1328
1329 if (thread == NULL)
1330 return NULL;
1331
1332 return get_thread_lwp ((struct thread_info *) thread);
1333 }
1334
1335 /* Return the number of known LWPs in the tgid given by PID. */
1336
1337 static int
1338 num_lwps (int pid)
1339 {
1340 struct inferior_list_entry *inf, *tmp;
1341 int count = 0;
1342
1343 ALL_INFERIORS (&all_threads, inf, tmp)
1344 {
1345 if (ptid_get_pid (inf->id) == pid)
1346 count++;
1347 }
1348
1349 return count;
1350 }
1351
1352 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1353 their exits until all other threads in the group have exited. */
1354
1355 static void
1356 check_zombie_leaders (void)
1357 {
1358 struct process_info *proc, *tmp;
1359
1360 ALL_PROCESSES (proc, tmp)
1361 {
1362 pid_t leader_pid = pid_of (proc);
1363 struct lwp_info *leader_lp;
1364
1365 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1366
1367 if (debug_threads)
1368 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1369 "num_lwps=%d, zombie=%d\n",
1370 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1371 linux_proc_pid_is_zombie (leader_pid));
1372
1373 if (leader_lp != NULL
1374 /* Check if there are other threads in the group, as we may
1375 have raced with the inferior simply exiting. */
1376 && !last_thread_of_process_p (leader_pid)
1377 && linux_proc_pid_is_zombie (leader_pid))
1378 {
1379 /* A leader zombie can mean one of two things:
1380
1381 - It exited, and there's an exit status pending
1382 available, or only the leader exited (not the whole
1383 program). In the latter case, we can't waitpid the
1384 leader's exit status until all other threads are gone.
1385
1386 - There are 3 or more threads in the group, and a thread
1387 other than the leader exec'd. On an exec, the Linux
1388 kernel destroys all other threads (except the execing
1389 one) in the thread group, and resets the execing thread's
1390 tid to the tgid. No exit notification is sent for the
1391 execing thread -- from the ptracer's perspective, it
1392 appears as though the execing thread just vanishes.
1393 Until we reap all other threads except the leader and the
1394 execing thread, the leader will be zombie, and the
1395 execing thread will be in `D (disc sleep)'. As soon as
1396 all other threads are reaped, the execing thread changes
1397 it's tid to the tgid, and the previous (zombie) leader
1398 vanishes, giving place to the "new" leader. We could try
1399 distinguishing the exit and exec cases, by waiting once
1400 more, and seeing if something comes out, but it doesn't
1401 sound useful. The previous leader _does_ go away, and
1402 we'll re-add the new one once we see the exec event
1403 (which is just the same as what would happen if the
1404 previous leader did exit voluntarily before some other
1405 thread execs). */
1406
1407 if (debug_threads)
1408 fprintf (stderr,
1409 "CZL: Thread group leader %d zombie "
1410 "(it exited, or another thread execd).\n",
1411 leader_pid);
1412
1413 delete_lwp (leader_lp);
1414 }
1415 }
1416 }
1417
1418 /* Callback for `find_inferior'. Returns the first LWP that is not
1419 stopped. ARG is a PTID filter. */
1420
1421 static int
1422 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1423 {
1424 struct thread_info *thr = (struct thread_info *) entry;
1425 struct lwp_info *lwp;
1426 ptid_t filter = *(ptid_t *) arg;
1427
1428 if (!ptid_match (ptid_of (thr), filter))
1429 return 0;
1430
1431 lwp = get_thread_lwp (thr);
1432 if (!lwp->stopped)
1433 return 1;
1434
1435 return 0;
1436 }
1437
1438 /* This function should only be called if the LWP got a SIGTRAP.
1439
1440 Handle any tracepoint steps or hits. Return true if a tracepoint
1441 event was handled, 0 otherwise. */
1442
1443 static int
1444 handle_tracepoints (struct lwp_info *lwp)
1445 {
1446 struct thread_info *tinfo = get_lwp_thread (lwp);
1447 int tpoint_related_event = 0;
1448
1449 gdb_assert (lwp->suspended == 0);
1450
1451 /* If this tracepoint hit causes a tracing stop, we'll immediately
1452 uninsert tracepoints. To do this, we temporarily pause all
1453 threads, unpatch away, and then unpause threads. We need to make
1454 sure the unpausing doesn't resume LWP too. */
1455 lwp->suspended++;
1456
1457 /* And we need to be sure that any all-threads-stopping doesn't try
1458 to move threads out of the jump pads, as it could deadlock the
1459 inferior (LWP could be in the jump pad, maybe even holding the
1460 lock.) */
1461
1462 /* Do any necessary step collect actions. */
1463 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1464
1465 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1466
1467 /* See if we just hit a tracepoint and do its main collect
1468 actions. */
1469 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1470
1471 lwp->suspended--;
1472
1473 gdb_assert (lwp->suspended == 0);
1474 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1475
1476 if (tpoint_related_event)
1477 {
1478 if (debug_threads)
1479 debug_printf ("got a tracepoint event\n");
1480 return 1;
1481 }
1482
1483 return 0;
1484 }
1485
1486 /* Convenience wrapper. Returns true if LWP is presently collecting a
1487 fast tracepoint. */
1488
1489 static int
1490 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1491 struct fast_tpoint_collect_status *status)
1492 {
1493 CORE_ADDR thread_area;
1494 struct thread_info *thread = get_lwp_thread (lwp);
1495
1496 if (the_low_target.get_thread_area == NULL)
1497 return 0;
1498
1499 /* Get the thread area address. This is used to recognize which
1500 thread is which when tracing with the in-process agent library.
1501 We don't read anything from the address, and treat it as opaque;
1502 it's the address itself that we assume is unique per-thread. */
1503 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1504 return 0;
1505
1506 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1507 }
1508
1509 /* The reason we resume in the caller, is because we want to be able
1510 to pass lwp->status_pending as WSTAT, and we need to clear
1511 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1512 refuses to resume. */
1513
1514 static int
1515 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1516 {
1517 struct thread_info *saved_thread;
1518
1519 saved_thread = current_thread;
1520 current_thread = get_lwp_thread (lwp);
1521
1522 if ((wstat == NULL
1523 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1524 && supports_fast_tracepoints ()
1525 && agent_loaded_p ())
1526 {
1527 struct fast_tpoint_collect_status status;
1528 int r;
1529
1530 if (debug_threads)
1531 debug_printf ("Checking whether LWP %ld needs to move out of the "
1532 "jump pad.\n",
1533 lwpid_of (current_thread));
1534
1535 r = linux_fast_tracepoint_collecting (lwp, &status);
1536
1537 if (wstat == NULL
1538 || (WSTOPSIG (*wstat) != SIGILL
1539 && WSTOPSIG (*wstat) != SIGFPE
1540 && WSTOPSIG (*wstat) != SIGSEGV
1541 && WSTOPSIG (*wstat) != SIGBUS))
1542 {
1543 lwp->collecting_fast_tracepoint = r;
1544
1545 if (r != 0)
1546 {
1547 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1548 {
1549 /* Haven't executed the original instruction yet.
1550 Set breakpoint there, and wait till it's hit,
1551 then single-step until exiting the jump pad. */
1552 lwp->exit_jump_pad_bkpt
1553 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1554 }
1555
1556 if (debug_threads)
1557 debug_printf ("Checking whether LWP %ld needs to move out of "
1558 "the jump pad...it does\n",
1559 lwpid_of (current_thread));
1560 current_thread = saved_thread;
1561
1562 return 1;
1563 }
1564 }
1565 else
1566 {
1567 /* If we get a synchronous signal while collecting, *and*
1568 while executing the (relocated) original instruction,
1569 reset the PC to point at the tpoint address, before
1570 reporting to GDB. Otherwise, it's an IPA lib bug: just
1571 report the signal to GDB, and pray for the best. */
1572
1573 lwp->collecting_fast_tracepoint = 0;
1574
1575 if (r != 0
1576 && (status.adjusted_insn_addr <= lwp->stop_pc
1577 && lwp->stop_pc < status.adjusted_insn_addr_end))
1578 {
1579 siginfo_t info;
1580 struct regcache *regcache;
1581
1582 /* The si_addr on a few signals references the address
1583 of the faulting instruction. Adjust that as
1584 well. */
1585 if ((WSTOPSIG (*wstat) == SIGILL
1586 || WSTOPSIG (*wstat) == SIGFPE
1587 || WSTOPSIG (*wstat) == SIGBUS
1588 || WSTOPSIG (*wstat) == SIGSEGV)
1589 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1590 (PTRACE_TYPE_ARG3) 0, &info) == 0
1591 /* Final check just to make sure we don't clobber
1592 the siginfo of non-kernel-sent signals. */
1593 && (uintptr_t) info.si_addr == lwp->stop_pc)
1594 {
1595 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1596 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1597 (PTRACE_TYPE_ARG3) 0, &info);
1598 }
1599
1600 regcache = get_thread_regcache (current_thread, 1);
1601 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1602 lwp->stop_pc = status.tpoint_addr;
1603
1604 /* Cancel any fast tracepoint lock this thread was
1605 holding. */
1606 force_unlock_trace_buffer ();
1607 }
1608
1609 if (lwp->exit_jump_pad_bkpt != NULL)
1610 {
1611 if (debug_threads)
1612 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1613 "stopping all threads momentarily.\n");
1614
1615 stop_all_lwps (1, lwp);
1616
1617 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1618 lwp->exit_jump_pad_bkpt = NULL;
1619
1620 unstop_all_lwps (1, lwp);
1621
1622 gdb_assert (lwp->suspended >= 0);
1623 }
1624 }
1625 }
1626
1627 if (debug_threads)
1628 debug_printf ("Checking whether LWP %ld needs to move out of the "
1629 "jump pad...no\n",
1630 lwpid_of (current_thread));
1631
1632 current_thread = saved_thread;
1633 return 0;
1634 }
1635
1636 /* Enqueue one signal in the "signals to report later when out of the
1637 jump pad" list. */
1638
1639 static void
1640 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1641 {
1642 struct pending_signals *p_sig;
1643 struct thread_info *thread = get_lwp_thread (lwp);
1644
1645 if (debug_threads)
1646 debug_printf ("Deferring signal %d for LWP %ld.\n",
1647 WSTOPSIG (*wstat), lwpid_of (thread));
1648
1649 if (debug_threads)
1650 {
1651 struct pending_signals *sig;
1652
1653 for (sig = lwp->pending_signals_to_report;
1654 sig != NULL;
1655 sig = sig->prev)
1656 debug_printf (" Already queued %d\n",
1657 sig->signal);
1658
1659 debug_printf (" (no more currently queued signals)\n");
1660 }
1661
1662 /* Don't enqueue non-RT signals if they are already in the deferred
1663 queue. (SIGSTOP being the easiest signal to see ending up here
1664 twice) */
1665 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1666 {
1667 struct pending_signals *sig;
1668
1669 for (sig = lwp->pending_signals_to_report;
1670 sig != NULL;
1671 sig = sig->prev)
1672 {
1673 if (sig->signal == WSTOPSIG (*wstat))
1674 {
1675 if (debug_threads)
1676 debug_printf ("Not requeuing already queued non-RT signal %d"
1677 " for LWP %ld\n",
1678 sig->signal,
1679 lwpid_of (thread));
1680 return;
1681 }
1682 }
1683 }
1684
1685 p_sig = xmalloc (sizeof (*p_sig));
1686 p_sig->prev = lwp->pending_signals_to_report;
1687 p_sig->signal = WSTOPSIG (*wstat);
1688 memset (&p_sig->info, 0, sizeof (siginfo_t));
1689 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1690 &p_sig->info);
1691
1692 lwp->pending_signals_to_report = p_sig;
1693 }
1694
1695 /* Dequeue one signal from the "signals to report later when out of
1696 the jump pad" list. */
1697
1698 static int
1699 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1700 {
1701 struct thread_info *thread = get_lwp_thread (lwp);
1702
1703 if (lwp->pending_signals_to_report != NULL)
1704 {
1705 struct pending_signals **p_sig;
1706
1707 p_sig = &lwp->pending_signals_to_report;
1708 while ((*p_sig)->prev != NULL)
1709 p_sig = &(*p_sig)->prev;
1710
1711 *wstat = W_STOPCODE ((*p_sig)->signal);
1712 if ((*p_sig)->info.si_signo != 0)
1713 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1714 &(*p_sig)->info);
1715 free (*p_sig);
1716 *p_sig = NULL;
1717
1718 if (debug_threads)
1719 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1720 WSTOPSIG (*wstat), lwpid_of (thread));
1721
1722 if (debug_threads)
1723 {
1724 struct pending_signals *sig;
1725
1726 for (sig = lwp->pending_signals_to_report;
1727 sig != NULL;
1728 sig = sig->prev)
1729 debug_printf (" Still queued %d\n",
1730 sig->signal);
1731
1732 debug_printf (" (no more queued signals)\n");
1733 }
1734
1735 return 1;
1736 }
1737
1738 return 0;
1739 }
1740
1741 /* Fetch the possibly triggered data watchpoint info and store it in
1742 CHILD.
1743
1744 On some archs, like x86, that use debug registers to set
1745 watchpoints, it's possible that the way to know which watched
1746 address trapped, is to check the register that is used to select
1747 which address to watch. Problem is, between setting the watchpoint
1748 and reading back which data address trapped, the user may change
1749 the set of watchpoints, and, as a consequence, GDB changes the
1750 debug registers in the inferior. To avoid reading back a stale
1751 stopped-data-address when that happens, we cache in LP the fact
1752 that a watchpoint trapped, and the corresponding data address, as
1753 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1754 registers meanwhile, we have the cached data we can rely on. */
1755
1756 static int
1757 check_stopped_by_watchpoint (struct lwp_info *child)
1758 {
1759 if (the_low_target.stopped_by_watchpoint != NULL)
1760 {
1761 struct thread_info *saved_thread;
1762
1763 saved_thread = current_thread;
1764 current_thread = get_lwp_thread (child);
1765
1766 if (the_low_target.stopped_by_watchpoint ())
1767 {
1768 child->stop_reason = LWP_STOPPED_BY_WATCHPOINT;
1769
1770 if (the_low_target.stopped_data_address != NULL)
1771 child->stopped_data_address
1772 = the_low_target.stopped_data_address ();
1773 else
1774 child->stopped_data_address = 0;
1775 }
1776
1777 current_thread = saved_thread;
1778 }
1779
1780 return child->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
1781 }
1782
1783 /* Do low-level handling of the event, and check if we should go on
1784 and pass it to caller code. Return the affected lwp if we are, or
1785 NULL otherwise. */
1786
1787 static struct lwp_info *
1788 linux_low_filter_event (int lwpid, int wstat)
1789 {
1790 struct lwp_info *child;
1791 struct thread_info *thread;
1792 int have_stop_pc = 0;
1793
1794 child = find_lwp_pid (pid_to_ptid (lwpid));
1795
1796 /* If we didn't find a process, one of two things presumably happened:
1797 - A process we started and then detached from has exited. Ignore it.
1798 - A process we are controlling has forked and the new child's stop
1799 was reported to us by the kernel. Save its PID. */
1800 if (child == NULL && WIFSTOPPED (wstat))
1801 {
1802 add_to_pid_list (&stopped_pids, lwpid, wstat);
1803 return NULL;
1804 }
1805 else if (child == NULL)
1806 return NULL;
1807
1808 thread = get_lwp_thread (child);
1809
1810 child->stopped = 1;
1811
1812 child->last_status = wstat;
1813
1814 /* Check if the thread has exited. */
1815 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
1816 {
1817 if (debug_threads)
1818 debug_printf ("LLFE: %d exited.\n", lwpid);
1819 if (num_lwps (pid_of (thread)) > 1)
1820 {
1821
1822 /* If there is at least one more LWP, then the exit signal was
1823 not the end of the debugged application and should be
1824 ignored. */
1825 delete_lwp (child);
1826 return NULL;
1827 }
1828 else
1829 {
1830 /* This was the last lwp in the process. Since events are
1831 serialized to GDB core, and we can't report this one
1832 right now, but GDB core and the other target layers will
1833 want to be notified about the exit code/signal, leave the
1834 status pending for the next time we're able to report
1835 it. */
1836 mark_lwp_dead (child, wstat);
1837 return child;
1838 }
1839 }
1840
1841 gdb_assert (WIFSTOPPED (wstat));
1842
1843 if (WIFSTOPPED (wstat))
1844 {
1845 struct process_info *proc;
1846
1847 /* Architecture-specific setup after inferior is running. This
1848 needs to happen after we have attached to the inferior and it
1849 is stopped for the first time, but before we access any
1850 inferior registers. */
1851 proc = find_process_pid (pid_of (thread));
1852 if (proc->private->new_inferior)
1853 {
1854 struct thread_info *saved_thread;
1855
1856 saved_thread = current_thread;
1857 current_thread = thread;
1858
1859 the_low_target.arch_setup ();
1860
1861 current_thread = saved_thread;
1862
1863 proc->private->new_inferior = 0;
1864 }
1865 }
1866
1867 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
1868 {
1869 struct process_info *proc = find_process_pid (pid_of (thread));
1870
1871 linux_enable_event_reporting (lwpid, proc->attached);
1872 child->must_set_ptrace_flags = 0;
1873 }
1874
1875 /* Be careful to not overwrite stop_pc until
1876 check_stopped_by_breakpoint is called. */
1877 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1878 && linux_is_extended_waitstatus (wstat))
1879 {
1880 child->stop_pc = get_pc (child);
1881 handle_extended_wait (child, wstat);
1882 return NULL;
1883 }
1884
1885 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1886 && check_stopped_by_watchpoint (child))
1887 ;
1888 else if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
1889 {
1890 if (check_stopped_by_breakpoint (child))
1891 have_stop_pc = 1;
1892 }
1893
1894 if (!have_stop_pc)
1895 child->stop_pc = get_pc (child);
1896
1897 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
1898 && child->stop_expected)
1899 {
1900 if (debug_threads)
1901 debug_printf ("Expected stop.\n");
1902 child->stop_expected = 0;
1903
1904 if (thread->last_resume_kind == resume_stop)
1905 {
1906 /* We want to report the stop to the core. Treat the
1907 SIGSTOP as a normal event. */
1908 }
1909 else if (stopping_threads != NOT_STOPPING_THREADS)
1910 {
1911 /* Stopping threads. We don't want this SIGSTOP to end up
1912 pending. */
1913 return NULL;
1914 }
1915 else
1916 {
1917 /* Filter out the event. */
1918 linux_resume_one_lwp (child, child->stepping, 0, NULL);
1919 return NULL;
1920 }
1921 }
1922
1923 child->status_pending_p = 1;
1924 child->status_pending = wstat;
1925 return child;
1926 }
1927
1928 /* Resume LWPs that are currently stopped without any pending status
1929 to report, but are resumed from the core's perspective. */
1930
1931 static void
1932 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
1933 {
1934 struct thread_info *thread = (struct thread_info *) entry;
1935 struct lwp_info *lp = get_thread_lwp (thread);
1936
1937 if (lp->stopped
1938 && !lp->status_pending_p
1939 && thread->last_resume_kind != resume_stop
1940 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1941 {
1942 int step = thread->last_resume_kind == resume_step;
1943
1944 if (debug_threads)
1945 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
1946 target_pid_to_str (ptid_of (thread)),
1947 paddress (lp->stop_pc),
1948 step);
1949
1950 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
1951 }
1952 }
1953
1954 /* Wait for an event from child(ren) WAIT_PTID, and return any that
1955 match FILTER_PTID (leaving others pending). The PTIDs can be:
1956 minus_one_ptid, to specify any child; a pid PTID, specifying all
1957 lwps of a thread group; or a PTID representing a single lwp. Store
1958 the stop status through the status pointer WSTAT. OPTIONS is
1959 passed to the waitpid call. Return 0 if no event was found and
1960 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
1961 was found. Return the PID of the stopped child otherwise. */
1962
1963 static int
1964 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
1965 int *wstatp, int options)
1966 {
1967 struct thread_info *event_thread;
1968 struct lwp_info *event_child, *requested_child;
1969 sigset_t block_mask, prev_mask;
1970
1971 retry:
1972 /* N.B. event_thread points to the thread_info struct that contains
1973 event_child. Keep them in sync. */
1974 event_thread = NULL;
1975 event_child = NULL;
1976 requested_child = NULL;
1977
1978 /* Check for a lwp with a pending status. */
1979
1980 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
1981 {
1982 event_thread = (struct thread_info *)
1983 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
1984 if (event_thread != NULL)
1985 event_child = get_thread_lwp (event_thread);
1986 if (debug_threads && event_thread)
1987 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
1988 }
1989 else if (!ptid_equal (filter_ptid, null_ptid))
1990 {
1991 requested_child = find_lwp_pid (filter_ptid);
1992
1993 if (stopping_threads == NOT_STOPPING_THREADS
1994 && requested_child->status_pending_p
1995 && requested_child->collecting_fast_tracepoint)
1996 {
1997 enqueue_one_deferred_signal (requested_child,
1998 &requested_child->status_pending);
1999 requested_child->status_pending_p = 0;
2000 requested_child->status_pending = 0;
2001 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2002 }
2003
2004 if (requested_child->suspended
2005 && requested_child->status_pending_p)
2006 {
2007 internal_error (__FILE__, __LINE__,
2008 "requesting an event out of a"
2009 " suspended child?");
2010 }
2011
2012 if (requested_child->status_pending_p)
2013 {
2014 event_child = requested_child;
2015 event_thread = get_lwp_thread (event_child);
2016 }
2017 }
2018
2019 if (event_child != NULL)
2020 {
2021 if (debug_threads)
2022 debug_printf ("Got an event from pending child %ld (%04x)\n",
2023 lwpid_of (event_thread), event_child->status_pending);
2024 *wstatp = event_child->status_pending;
2025 event_child->status_pending_p = 0;
2026 event_child->status_pending = 0;
2027 current_thread = event_thread;
2028 return lwpid_of (event_thread);
2029 }
2030
2031 /* But if we don't find a pending event, we'll have to wait.
2032
2033 We only enter this loop if no process has a pending wait status.
2034 Thus any action taken in response to a wait status inside this
2035 loop is responding as soon as we detect the status, not after any
2036 pending events. */
2037
2038 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2039 all signals while here. */
2040 sigfillset (&block_mask);
2041 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2042
2043 /* Always pull all events out of the kernel. We'll randomly select
2044 an event LWP out of all that have events, to prevent
2045 starvation. */
2046 while (event_child == NULL)
2047 {
2048 pid_t ret = 0;
2049
2050 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2051 quirks:
2052
2053 - If the thread group leader exits while other threads in the
2054 thread group still exist, waitpid(TGID, ...) hangs. That
2055 waitpid won't return an exit status until the other threads
2056 in the group are reaped.
2057
2058 - When a non-leader thread execs, that thread just vanishes
2059 without reporting an exit (so we'd hang if we waited for it
2060 explicitly in that case). The exec event is reported to
2061 the TGID pid (although we don't currently enable exec
2062 events). */
2063 errno = 0;
2064 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2065
2066 if (debug_threads)
2067 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2068 ret, errno ? strerror (errno) : "ERRNO-OK");
2069
2070 if (ret > 0)
2071 {
2072 if (debug_threads)
2073 {
2074 debug_printf ("LLW: waitpid %ld received %s\n",
2075 (long) ret, status_to_str (*wstatp));
2076 }
2077
2078 /* Filter all events. IOW, leave all events pending. We'll
2079 randomly select an event LWP out of all that have events
2080 below. */
2081 linux_low_filter_event (ret, *wstatp);
2082 /* Retry until nothing comes out of waitpid. A single
2083 SIGCHLD can indicate more than one child stopped. */
2084 continue;
2085 }
2086
2087 /* Now that we've pulled all events out of the kernel, resume
2088 LWPs that don't have an interesting event to report. */
2089 if (stopping_threads == NOT_STOPPING_THREADS)
2090 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2091
2092 /* ... and find an LWP with a status to report to the core, if
2093 any. */
2094 event_thread = (struct thread_info *)
2095 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2096 if (event_thread != NULL)
2097 {
2098 event_child = get_thread_lwp (event_thread);
2099 *wstatp = event_child->status_pending;
2100 event_child->status_pending_p = 0;
2101 event_child->status_pending = 0;
2102 break;
2103 }
2104
2105 /* Check for zombie thread group leaders. Those can't be reaped
2106 until all other threads in the thread group are. */
2107 check_zombie_leaders ();
2108
2109 /* If there are no resumed children left in the set of LWPs we
2110 want to wait for, bail. We can't just block in
2111 waitpid/sigsuspend, because lwps might have been left stopped
2112 in trace-stop state, and we'd be stuck forever waiting for
2113 their status to change (which would only happen if we resumed
2114 them). Even if WNOHANG is set, this return code is preferred
2115 over 0 (below), as it is more detailed. */
2116 if ((find_inferior (&all_threads,
2117 not_stopped_callback,
2118 &wait_ptid) == NULL))
2119 {
2120 if (debug_threads)
2121 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2122 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2123 return -1;
2124 }
2125
2126 /* No interesting event to report to the caller. */
2127 if ((options & WNOHANG))
2128 {
2129 if (debug_threads)
2130 debug_printf ("WNOHANG set, no event found\n");
2131
2132 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2133 return 0;
2134 }
2135
2136 /* Block until we get an event reported with SIGCHLD. */
2137 if (debug_threads)
2138 debug_printf ("sigsuspend'ing\n");
2139
2140 sigsuspend (&prev_mask);
2141 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2142 goto retry;
2143 }
2144
2145 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2146
2147 current_thread = event_thread;
2148
2149 /* Check for thread exit. */
2150 if (! WIFSTOPPED (*wstatp))
2151 {
2152 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2153
2154 if (debug_threads)
2155 debug_printf ("LWP %d is the last lwp of process. "
2156 "Process %ld exiting.\n",
2157 pid_of (event_thread), lwpid_of (event_thread));
2158 return lwpid_of (event_thread);
2159 }
2160
2161 return lwpid_of (event_thread);
2162 }
2163
2164 /* Wait for an event from child(ren) PTID. PTIDs can be:
2165 minus_one_ptid, to specify any child; a pid PTID, specifying all
2166 lwps of a thread group; or a PTID representing a single lwp. Store
2167 the stop status through the status pointer WSTAT. OPTIONS is
2168 passed to the waitpid call. Return 0 if no event was found and
2169 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2170 was found. Return the PID of the stopped child otherwise. */
2171
2172 static int
2173 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2174 {
2175 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2176 }
2177
2178 /* Count the LWP's that have had events. */
2179
2180 static int
2181 count_events_callback (struct inferior_list_entry *entry, void *data)
2182 {
2183 struct thread_info *thread = (struct thread_info *) entry;
2184 int *count = data;
2185
2186 gdb_assert (count != NULL);
2187
2188 /* Count only resumed LWPs that have an event pending. */
2189 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2190 && thread->last_resume_kind != resume_stop
2191 && thread->status_pending_p)
2192 (*count)++;
2193
2194 return 0;
2195 }
2196
2197 /* Select the LWP (if any) that is currently being single-stepped. */
2198
2199 static int
2200 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2201 {
2202 struct thread_info *thread = (struct thread_info *) entry;
2203 struct lwp_info *lp = get_thread_lwp (thread);
2204
2205 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2206 && thread->last_resume_kind == resume_step
2207 && lp->status_pending_p)
2208 return 1;
2209 else
2210 return 0;
2211 }
2212
2213 /* Select the Nth LWP that has had a SIGTRAP event that should be
2214 reported to GDB. */
2215
2216 static int
2217 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2218 {
2219 struct thread_info *thread = (struct thread_info *) entry;
2220 int *selector = data;
2221
2222 gdb_assert (selector != NULL);
2223
2224 /* Select only resumed LWPs that have an event pending. */
2225 if (thread->last_resume_kind != resume_stop
2226 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2227 && thread->status_pending_p)
2228 if ((*selector)-- == 0)
2229 return 1;
2230
2231 return 0;
2232 }
2233
2234 /* Select one LWP out of those that have events pending. */
2235
2236 static void
2237 select_event_lwp (struct lwp_info **orig_lp)
2238 {
2239 int num_events = 0;
2240 int random_selector;
2241 struct thread_info *event_thread = NULL;
2242
2243 /* In all-stop, give preference to the LWP that is being
2244 single-stepped. There will be at most one, and it's the LWP that
2245 the core is most interested in. If we didn't do this, then we'd
2246 have to handle pending step SIGTRAPs somehow in case the core
2247 later continues the previously-stepped thread, otherwise we'd
2248 report the pending SIGTRAP, and the core, not having stepped the
2249 thread, wouldn't understand what the trap was for, and therefore
2250 would report it to the user as a random signal. */
2251 if (!non_stop)
2252 {
2253 event_thread
2254 = (struct thread_info *) find_inferior (&all_threads,
2255 select_singlestep_lwp_callback,
2256 NULL);
2257 if (event_thread != NULL)
2258 {
2259 if (debug_threads)
2260 debug_printf ("SEL: Select single-step %s\n",
2261 target_pid_to_str (ptid_of (event_thread)));
2262 }
2263 }
2264 if (event_thread == NULL)
2265 {
2266 /* No single-stepping LWP. Select one at random, out of those
2267 which have had SIGTRAP events. */
2268
2269 /* First see how many SIGTRAP events we have. */
2270 find_inferior (&all_threads, count_events_callback, &num_events);
2271
2272 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2273 random_selector = (int)
2274 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2275
2276 if (debug_threads && num_events > 1)
2277 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2278 num_events, random_selector);
2279
2280 event_thread
2281 = (struct thread_info *) find_inferior (&all_threads,
2282 select_event_lwp_callback,
2283 &random_selector);
2284 }
2285
2286 if (event_thread != NULL)
2287 {
2288 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2289
2290 /* Switch the event LWP. */
2291 *orig_lp = event_lp;
2292 }
2293 }
2294
2295 /* Decrement the suspend count of an LWP. */
2296
2297 static int
2298 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2299 {
2300 struct thread_info *thread = (struct thread_info *) entry;
2301 struct lwp_info *lwp = get_thread_lwp (thread);
2302
2303 /* Ignore EXCEPT. */
2304 if (lwp == except)
2305 return 0;
2306
2307 lwp->suspended--;
2308
2309 gdb_assert (lwp->suspended >= 0);
2310 return 0;
2311 }
2312
2313 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2314 NULL. */
2315
2316 static void
2317 unsuspend_all_lwps (struct lwp_info *except)
2318 {
2319 find_inferior (&all_threads, unsuspend_one_lwp, except);
2320 }
2321
2322 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2323 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2324 void *data);
2325 static int lwp_running (struct inferior_list_entry *entry, void *data);
2326 static ptid_t linux_wait_1 (ptid_t ptid,
2327 struct target_waitstatus *ourstatus,
2328 int target_options);
2329
2330 /* Stabilize threads (move out of jump pads).
2331
2332 If a thread is midway collecting a fast tracepoint, we need to
2333 finish the collection and move it out of the jump pad before
2334 reporting the signal.
2335
2336 This avoids recursion while collecting (when a signal arrives
2337 midway, and the signal handler itself collects), which would trash
2338 the trace buffer. In case the user set a breakpoint in a signal
2339 handler, this avoids the backtrace showing the jump pad, etc..
2340 Most importantly, there are certain things we can't do safely if
2341 threads are stopped in a jump pad (or in its callee's). For
2342 example:
2343
2344 - starting a new trace run. A thread still collecting the
2345 previous run, could trash the trace buffer when resumed. The trace
2346 buffer control structures would have been reset but the thread had
2347 no way to tell. The thread could even midway memcpy'ing to the
2348 buffer, which would mean that when resumed, it would clobber the
2349 trace buffer that had been set for a new run.
2350
2351 - we can't rewrite/reuse the jump pads for new tracepoints
2352 safely. Say you do tstart while a thread is stopped midway while
2353 collecting. When the thread is later resumed, it finishes the
2354 collection, and returns to the jump pad, to execute the original
2355 instruction that was under the tracepoint jump at the time the
2356 older run had been started. If the jump pad had been rewritten
2357 since for something else in the new run, the thread would now
2358 execute the wrong / random instructions. */
2359
2360 static void
2361 linux_stabilize_threads (void)
2362 {
2363 struct thread_info *saved_thread;
2364 struct thread_info *thread_stuck;
2365
2366 thread_stuck
2367 = (struct thread_info *) find_inferior (&all_threads,
2368 stuck_in_jump_pad_callback,
2369 NULL);
2370 if (thread_stuck != NULL)
2371 {
2372 if (debug_threads)
2373 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2374 lwpid_of (thread_stuck));
2375 return;
2376 }
2377
2378 saved_thread = current_thread;
2379
2380 stabilizing_threads = 1;
2381
2382 /* Kick 'em all. */
2383 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2384
2385 /* Loop until all are stopped out of the jump pads. */
2386 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2387 {
2388 struct target_waitstatus ourstatus;
2389 struct lwp_info *lwp;
2390 int wstat;
2391
2392 /* Note that we go through the full wait even loop. While
2393 moving threads out of jump pad, we need to be able to step
2394 over internal breakpoints and such. */
2395 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2396
2397 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2398 {
2399 lwp = get_thread_lwp (current_thread);
2400
2401 /* Lock it. */
2402 lwp->suspended++;
2403
2404 if (ourstatus.value.sig != GDB_SIGNAL_0
2405 || current_thread->last_resume_kind == resume_stop)
2406 {
2407 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2408 enqueue_one_deferred_signal (lwp, &wstat);
2409 }
2410 }
2411 }
2412
2413 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2414
2415 stabilizing_threads = 0;
2416
2417 current_thread = saved_thread;
2418
2419 if (debug_threads)
2420 {
2421 thread_stuck
2422 = (struct thread_info *) find_inferior (&all_threads,
2423 stuck_in_jump_pad_callback,
2424 NULL);
2425 if (thread_stuck != NULL)
2426 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2427 lwpid_of (thread_stuck));
2428 }
2429 }
2430
2431 static void async_file_mark (void);
2432
2433 /* Convenience function that is called when the kernel reports an
2434 event that is not passed out to GDB. */
2435
2436 static ptid_t
2437 ignore_event (struct target_waitstatus *ourstatus)
2438 {
2439 /* If we got an event, there may still be others, as a single
2440 SIGCHLD can indicate more than one child stopped. This forces
2441 another target_wait call. */
2442 async_file_mark ();
2443
2444 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2445 return null_ptid;
2446 }
2447
2448 /* Wait for process, returns status. */
2449
2450 static ptid_t
2451 linux_wait_1 (ptid_t ptid,
2452 struct target_waitstatus *ourstatus, int target_options)
2453 {
2454 int w;
2455 struct lwp_info *event_child;
2456 int options;
2457 int pid;
2458 int step_over_finished;
2459 int bp_explains_trap;
2460 int maybe_internal_trap;
2461 int report_to_gdb;
2462 int trace_event;
2463 int in_step_range;
2464
2465 if (debug_threads)
2466 {
2467 debug_enter ();
2468 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2469 }
2470
2471 /* Translate generic target options into linux options. */
2472 options = __WALL;
2473 if (target_options & TARGET_WNOHANG)
2474 options |= WNOHANG;
2475
2476 bp_explains_trap = 0;
2477 trace_event = 0;
2478 in_step_range = 0;
2479 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2480
2481 if (ptid_equal (step_over_bkpt, null_ptid))
2482 pid = linux_wait_for_event (ptid, &w, options);
2483 else
2484 {
2485 if (debug_threads)
2486 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2487 target_pid_to_str (step_over_bkpt));
2488 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2489 }
2490
2491 if (pid == 0)
2492 {
2493 gdb_assert (target_options & TARGET_WNOHANG);
2494
2495 if (debug_threads)
2496 {
2497 debug_printf ("linux_wait_1 ret = null_ptid, "
2498 "TARGET_WAITKIND_IGNORE\n");
2499 debug_exit ();
2500 }
2501
2502 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2503 return null_ptid;
2504 }
2505 else if (pid == -1)
2506 {
2507 if (debug_threads)
2508 {
2509 debug_printf ("linux_wait_1 ret = null_ptid, "
2510 "TARGET_WAITKIND_NO_RESUMED\n");
2511 debug_exit ();
2512 }
2513
2514 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2515 return null_ptid;
2516 }
2517
2518 event_child = get_thread_lwp (current_thread);
2519
2520 /* linux_wait_for_event only returns an exit status for the last
2521 child of a process. Report it. */
2522 if (WIFEXITED (w) || WIFSIGNALED (w))
2523 {
2524 if (WIFEXITED (w))
2525 {
2526 ourstatus->kind = TARGET_WAITKIND_EXITED;
2527 ourstatus->value.integer = WEXITSTATUS (w);
2528
2529 if (debug_threads)
2530 {
2531 debug_printf ("linux_wait_1 ret = %s, exited with "
2532 "retcode %d\n",
2533 target_pid_to_str (ptid_of (current_thread)),
2534 WEXITSTATUS (w));
2535 debug_exit ();
2536 }
2537 }
2538 else
2539 {
2540 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2541 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2542
2543 if (debug_threads)
2544 {
2545 debug_printf ("linux_wait_1 ret = %s, terminated with "
2546 "signal %d\n",
2547 target_pid_to_str (ptid_of (current_thread)),
2548 WTERMSIG (w));
2549 debug_exit ();
2550 }
2551 }
2552
2553 return ptid_of (current_thread);
2554 }
2555
2556 /* If this event was not handled before, and is not a SIGTRAP, we
2557 report it. SIGILL and SIGSEGV are also treated as traps in case
2558 a breakpoint is inserted at the current PC. If this target does
2559 not support internal breakpoints at all, we also report the
2560 SIGTRAP without further processing; it's of no concern to us. */
2561 maybe_internal_trap
2562 = (supports_breakpoints ()
2563 && (WSTOPSIG (w) == SIGTRAP
2564 || ((WSTOPSIG (w) == SIGILL
2565 || WSTOPSIG (w) == SIGSEGV)
2566 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2567
2568 if (maybe_internal_trap)
2569 {
2570 /* Handle anything that requires bookkeeping before deciding to
2571 report the event or continue waiting. */
2572
2573 /* First check if we can explain the SIGTRAP with an internal
2574 breakpoint, or if we should possibly report the event to GDB.
2575 Do this before anything that may remove or insert a
2576 breakpoint. */
2577 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2578
2579 /* We have a SIGTRAP, possibly a step-over dance has just
2580 finished. If so, tweak the state machine accordingly,
2581 reinsert breakpoints and delete any reinsert (software
2582 single-step) breakpoints. */
2583 step_over_finished = finish_step_over (event_child);
2584
2585 /* Now invoke the callbacks of any internal breakpoints there. */
2586 check_breakpoints (event_child->stop_pc);
2587
2588 /* Handle tracepoint data collecting. This may overflow the
2589 trace buffer, and cause a tracing stop, removing
2590 breakpoints. */
2591 trace_event = handle_tracepoints (event_child);
2592
2593 if (bp_explains_trap)
2594 {
2595 /* If we stepped or ran into an internal breakpoint, we've
2596 already handled it. So next time we resume (from this
2597 PC), we should step over it. */
2598 if (debug_threads)
2599 debug_printf ("Hit a gdbserver breakpoint.\n");
2600
2601 if (breakpoint_here (event_child->stop_pc))
2602 event_child->need_step_over = 1;
2603 }
2604 }
2605 else
2606 {
2607 /* We have some other signal, possibly a step-over dance was in
2608 progress, and it should be cancelled too. */
2609 step_over_finished = finish_step_over (event_child);
2610 }
2611
2612 /* We have all the data we need. Either report the event to GDB, or
2613 resume threads and keep waiting for more. */
2614
2615 /* If we're collecting a fast tracepoint, finish the collection and
2616 move out of the jump pad before delivering a signal. See
2617 linux_stabilize_threads. */
2618
2619 if (WIFSTOPPED (w)
2620 && WSTOPSIG (w) != SIGTRAP
2621 && supports_fast_tracepoints ()
2622 && agent_loaded_p ())
2623 {
2624 if (debug_threads)
2625 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2626 "to defer or adjust it.\n",
2627 WSTOPSIG (w), lwpid_of (current_thread));
2628
2629 /* Allow debugging the jump pad itself. */
2630 if (current_thread->last_resume_kind != resume_step
2631 && maybe_move_out_of_jump_pad (event_child, &w))
2632 {
2633 enqueue_one_deferred_signal (event_child, &w);
2634
2635 if (debug_threads)
2636 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2637 WSTOPSIG (w), lwpid_of (current_thread));
2638
2639 linux_resume_one_lwp (event_child, 0, 0, NULL);
2640
2641 return ignore_event (ourstatus);
2642 }
2643 }
2644
2645 if (event_child->collecting_fast_tracepoint)
2646 {
2647 if (debug_threads)
2648 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2649 "Check if we're already there.\n",
2650 lwpid_of (current_thread),
2651 event_child->collecting_fast_tracepoint);
2652
2653 trace_event = 1;
2654
2655 event_child->collecting_fast_tracepoint
2656 = linux_fast_tracepoint_collecting (event_child, NULL);
2657
2658 if (event_child->collecting_fast_tracepoint != 1)
2659 {
2660 /* No longer need this breakpoint. */
2661 if (event_child->exit_jump_pad_bkpt != NULL)
2662 {
2663 if (debug_threads)
2664 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2665 "stopping all threads momentarily.\n");
2666
2667 /* Other running threads could hit this breakpoint.
2668 We don't handle moribund locations like GDB does,
2669 instead we always pause all threads when removing
2670 breakpoints, so that any step-over or
2671 decr_pc_after_break adjustment is always taken
2672 care of while the breakpoint is still
2673 inserted. */
2674 stop_all_lwps (1, event_child);
2675
2676 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2677 event_child->exit_jump_pad_bkpt = NULL;
2678
2679 unstop_all_lwps (1, event_child);
2680
2681 gdb_assert (event_child->suspended >= 0);
2682 }
2683 }
2684
2685 if (event_child->collecting_fast_tracepoint == 0)
2686 {
2687 if (debug_threads)
2688 debug_printf ("fast tracepoint finished "
2689 "collecting successfully.\n");
2690
2691 /* We may have a deferred signal to report. */
2692 if (dequeue_one_deferred_signal (event_child, &w))
2693 {
2694 if (debug_threads)
2695 debug_printf ("dequeued one signal.\n");
2696 }
2697 else
2698 {
2699 if (debug_threads)
2700 debug_printf ("no deferred signals.\n");
2701
2702 if (stabilizing_threads)
2703 {
2704 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2705 ourstatus->value.sig = GDB_SIGNAL_0;
2706
2707 if (debug_threads)
2708 {
2709 debug_printf ("linux_wait_1 ret = %s, stopped "
2710 "while stabilizing threads\n",
2711 target_pid_to_str (ptid_of (current_thread)));
2712 debug_exit ();
2713 }
2714
2715 return ptid_of (current_thread);
2716 }
2717 }
2718 }
2719 }
2720
2721 /* Check whether GDB would be interested in this event. */
2722
2723 /* If GDB is not interested in this signal, don't stop other
2724 threads, and don't report it to GDB. Just resume the inferior
2725 right away. We do this for threading-related signals as well as
2726 any that GDB specifically requested we ignore. But never ignore
2727 SIGSTOP if we sent it ourselves, and do not ignore signals when
2728 stepping - they may require special handling to skip the signal
2729 handler. Also never ignore signals that could be caused by a
2730 breakpoint. */
2731 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2732 thread library? */
2733 if (WIFSTOPPED (w)
2734 && current_thread->last_resume_kind != resume_step
2735 && (
2736 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2737 (current_process ()->private->thread_db != NULL
2738 && (WSTOPSIG (w) == __SIGRTMIN
2739 || WSTOPSIG (w) == __SIGRTMIN + 1))
2740 ||
2741 #endif
2742 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2743 && !(WSTOPSIG (w) == SIGSTOP
2744 && current_thread->last_resume_kind == resume_stop)
2745 && !linux_wstatus_maybe_breakpoint (w))))
2746 {
2747 siginfo_t info, *info_p;
2748
2749 if (debug_threads)
2750 debug_printf ("Ignored signal %d for LWP %ld.\n",
2751 WSTOPSIG (w), lwpid_of (current_thread));
2752
2753 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2754 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2755 info_p = &info;
2756 else
2757 info_p = NULL;
2758 linux_resume_one_lwp (event_child, event_child->stepping,
2759 WSTOPSIG (w), info_p);
2760 return ignore_event (ourstatus);
2761 }
2762
2763 /* Note that all addresses are always "out of the step range" when
2764 there's no range to begin with. */
2765 in_step_range = lwp_in_step_range (event_child);
2766
2767 /* If GDB wanted this thread to single step, and the thread is out
2768 of the step range, we always want to report the SIGTRAP, and let
2769 GDB handle it. Watchpoints should always be reported. So should
2770 signals we can't explain. A SIGTRAP we can't explain could be a
2771 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2772 do, we're be able to handle GDB breakpoints on top of internal
2773 breakpoints, by handling the internal breakpoint and still
2774 reporting the event to GDB. If we don't, we're out of luck, GDB
2775 won't see the breakpoint hit. */
2776 report_to_gdb = (!maybe_internal_trap
2777 || (current_thread->last_resume_kind == resume_step
2778 && !in_step_range)
2779 || event_child->stop_reason == LWP_STOPPED_BY_WATCHPOINT
2780 || (!step_over_finished && !in_step_range
2781 && !bp_explains_trap && !trace_event)
2782 || (gdb_breakpoint_here (event_child->stop_pc)
2783 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2784 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2785
2786 run_breakpoint_commands (event_child->stop_pc);
2787
2788 /* We found no reason GDB would want us to stop. We either hit one
2789 of our own breakpoints, or finished an internal step GDB
2790 shouldn't know about. */
2791 if (!report_to_gdb)
2792 {
2793 if (debug_threads)
2794 {
2795 if (bp_explains_trap)
2796 debug_printf ("Hit a gdbserver breakpoint.\n");
2797 if (step_over_finished)
2798 debug_printf ("Step-over finished.\n");
2799 if (trace_event)
2800 debug_printf ("Tracepoint event.\n");
2801 if (lwp_in_step_range (event_child))
2802 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2803 paddress (event_child->stop_pc),
2804 paddress (event_child->step_range_start),
2805 paddress (event_child->step_range_end));
2806 }
2807
2808 /* We're not reporting this breakpoint to GDB, so apply the
2809 decr_pc_after_break adjustment to the inferior's regcache
2810 ourselves. */
2811
2812 if (the_low_target.set_pc != NULL)
2813 {
2814 struct regcache *regcache
2815 = get_thread_regcache (current_thread, 1);
2816 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2817 }
2818
2819 /* We may have finished stepping over a breakpoint. If so,
2820 we've stopped and suspended all LWPs momentarily except the
2821 stepping one. This is where we resume them all again. We're
2822 going to keep waiting, so use proceed, which handles stepping
2823 over the next breakpoint. */
2824 if (debug_threads)
2825 debug_printf ("proceeding all threads.\n");
2826
2827 if (step_over_finished)
2828 unsuspend_all_lwps (event_child);
2829
2830 proceed_all_lwps ();
2831 return ignore_event (ourstatus);
2832 }
2833
2834 if (debug_threads)
2835 {
2836 if (current_thread->last_resume_kind == resume_step)
2837 {
2838 if (event_child->step_range_start == event_child->step_range_end)
2839 debug_printf ("GDB wanted to single-step, reporting event.\n");
2840 else if (!lwp_in_step_range (event_child))
2841 debug_printf ("Out of step range, reporting event.\n");
2842 }
2843 if (event_child->stop_reason == LWP_STOPPED_BY_WATCHPOINT)
2844 debug_printf ("Stopped by watchpoint.\n");
2845 else if (gdb_breakpoint_here (event_child->stop_pc))
2846 debug_printf ("Stopped by GDB breakpoint.\n");
2847 if (debug_threads)
2848 debug_printf ("Hit a non-gdbserver trap event.\n");
2849 }
2850
2851 /* Alright, we're going to report a stop. */
2852
2853 if (!stabilizing_threads)
2854 {
2855 /* In all-stop, stop all threads. */
2856 if (!non_stop)
2857 stop_all_lwps (0, NULL);
2858
2859 /* If we're not waiting for a specific LWP, choose an event LWP
2860 from among those that have had events. Giving equal priority
2861 to all LWPs that have had events helps prevent
2862 starvation. */
2863 if (ptid_equal (ptid, minus_one_ptid))
2864 {
2865 event_child->status_pending_p = 1;
2866 event_child->status_pending = w;
2867
2868 select_event_lwp (&event_child);
2869
2870 /* current_thread and event_child must stay in sync. */
2871 current_thread = get_lwp_thread (event_child);
2872
2873 event_child->status_pending_p = 0;
2874 w = event_child->status_pending;
2875 }
2876
2877 if (step_over_finished)
2878 {
2879 if (!non_stop)
2880 {
2881 /* If we were doing a step-over, all other threads but
2882 the stepping one had been paused in start_step_over,
2883 with their suspend counts incremented. We don't want
2884 to do a full unstop/unpause, because we're in
2885 all-stop mode (so we want threads stopped), but we
2886 still need to unsuspend the other threads, to
2887 decrement their `suspended' count back. */
2888 unsuspend_all_lwps (event_child);
2889 }
2890 else
2891 {
2892 /* If we just finished a step-over, then all threads had
2893 been momentarily paused. In all-stop, that's fine,
2894 we want threads stopped by now anyway. In non-stop,
2895 we need to re-resume threads that GDB wanted to be
2896 running. */
2897 unstop_all_lwps (1, event_child);
2898 }
2899 }
2900
2901 /* Stabilize threads (move out of jump pads). */
2902 if (!non_stop)
2903 stabilize_threads ();
2904 }
2905 else
2906 {
2907 /* If we just finished a step-over, then all threads had been
2908 momentarily paused. In all-stop, that's fine, we want
2909 threads stopped by now anyway. In non-stop, we need to
2910 re-resume threads that GDB wanted to be running. */
2911 if (step_over_finished)
2912 unstop_all_lwps (1, event_child);
2913 }
2914
2915 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2916
2917 /* Now that we've selected our final event LWP, un-adjust its PC if
2918 it was a software breakpoint. */
2919 if (event_child->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT)
2920 {
2921 int decr_pc = the_low_target.decr_pc_after_break;
2922
2923 if (decr_pc != 0)
2924 {
2925 struct regcache *regcache
2926 = get_thread_regcache (current_thread, 1);
2927 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
2928 }
2929 }
2930
2931 if (current_thread->last_resume_kind == resume_stop
2932 && WSTOPSIG (w) == SIGSTOP)
2933 {
2934 /* A thread that has been requested to stop by GDB with vCont;t,
2935 and it stopped cleanly, so report as SIG0. The use of
2936 SIGSTOP is an implementation detail. */
2937 ourstatus->value.sig = GDB_SIGNAL_0;
2938 }
2939 else if (current_thread->last_resume_kind == resume_stop
2940 && WSTOPSIG (w) != SIGSTOP)
2941 {
2942 /* A thread that has been requested to stop by GDB with vCont;t,
2943 but, it stopped for other reasons. */
2944 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2945 }
2946 else
2947 {
2948 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2949 }
2950
2951 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2952
2953 if (debug_threads)
2954 {
2955 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
2956 target_pid_to_str (ptid_of (current_thread)),
2957 ourstatus->kind, ourstatus->value.sig);
2958 debug_exit ();
2959 }
2960
2961 return ptid_of (current_thread);
2962 }
2963
2964 /* Get rid of any pending event in the pipe. */
2965 static void
2966 async_file_flush (void)
2967 {
2968 int ret;
2969 char buf;
2970
2971 do
2972 ret = read (linux_event_pipe[0], &buf, 1);
2973 while (ret >= 0 || (ret == -1 && errno == EINTR));
2974 }
2975
2976 /* Put something in the pipe, so the event loop wakes up. */
2977 static void
2978 async_file_mark (void)
2979 {
2980 int ret;
2981
2982 async_file_flush ();
2983
2984 do
2985 ret = write (linux_event_pipe[1], "+", 1);
2986 while (ret == 0 || (ret == -1 && errno == EINTR));
2987
2988 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2989 be awakened anyway. */
2990 }
2991
2992 static ptid_t
2993 linux_wait (ptid_t ptid,
2994 struct target_waitstatus *ourstatus, int target_options)
2995 {
2996 ptid_t event_ptid;
2997
2998 /* Flush the async file first. */
2999 if (target_is_async_p ())
3000 async_file_flush ();
3001
3002 do
3003 {
3004 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3005 }
3006 while ((target_options & TARGET_WNOHANG) == 0
3007 && ptid_equal (event_ptid, null_ptid)
3008 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3009
3010 /* If at least one stop was reported, there may be more. A single
3011 SIGCHLD can signal more than one child stop. */
3012 if (target_is_async_p ()
3013 && (target_options & TARGET_WNOHANG) != 0
3014 && !ptid_equal (event_ptid, null_ptid))
3015 async_file_mark ();
3016
3017 return event_ptid;
3018 }
3019
3020 /* Send a signal to an LWP. */
3021
3022 static int
3023 kill_lwp (unsigned long lwpid, int signo)
3024 {
3025 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3026 fails, then we are not using nptl threads and we should be using kill. */
3027
3028 #ifdef __NR_tkill
3029 {
3030 static int tkill_failed;
3031
3032 if (!tkill_failed)
3033 {
3034 int ret;
3035
3036 errno = 0;
3037 ret = syscall (__NR_tkill, lwpid, signo);
3038 if (errno != ENOSYS)
3039 return ret;
3040 tkill_failed = 1;
3041 }
3042 }
3043 #endif
3044
3045 return kill (lwpid, signo);
3046 }
3047
3048 void
3049 linux_stop_lwp (struct lwp_info *lwp)
3050 {
3051 send_sigstop (lwp);
3052 }
3053
3054 static void
3055 send_sigstop (struct lwp_info *lwp)
3056 {
3057 int pid;
3058
3059 pid = lwpid_of (get_lwp_thread (lwp));
3060
3061 /* If we already have a pending stop signal for this process, don't
3062 send another. */
3063 if (lwp->stop_expected)
3064 {
3065 if (debug_threads)
3066 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3067
3068 return;
3069 }
3070
3071 if (debug_threads)
3072 debug_printf ("Sending sigstop to lwp %d\n", pid);
3073
3074 lwp->stop_expected = 1;
3075 kill_lwp (pid, SIGSTOP);
3076 }
3077
3078 static int
3079 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3080 {
3081 struct thread_info *thread = (struct thread_info *) entry;
3082 struct lwp_info *lwp = get_thread_lwp (thread);
3083
3084 /* Ignore EXCEPT. */
3085 if (lwp == except)
3086 return 0;
3087
3088 if (lwp->stopped)
3089 return 0;
3090
3091 send_sigstop (lwp);
3092 return 0;
3093 }
3094
3095 /* Increment the suspend count of an LWP, and stop it, if not stopped
3096 yet. */
3097 static int
3098 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3099 void *except)
3100 {
3101 struct thread_info *thread = (struct thread_info *) entry;
3102 struct lwp_info *lwp = get_thread_lwp (thread);
3103
3104 /* Ignore EXCEPT. */
3105 if (lwp == except)
3106 return 0;
3107
3108 lwp->suspended++;
3109
3110 return send_sigstop_callback (entry, except);
3111 }
3112
3113 static void
3114 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3115 {
3116 /* It's dead, really. */
3117 lwp->dead = 1;
3118
3119 /* Store the exit status for later. */
3120 lwp->status_pending_p = 1;
3121 lwp->status_pending = wstat;
3122
3123 /* Prevent trying to stop it. */
3124 lwp->stopped = 1;
3125
3126 /* No further stops are expected from a dead lwp. */
3127 lwp->stop_expected = 0;
3128 }
3129
3130 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3131
3132 static void
3133 wait_for_sigstop (void)
3134 {
3135 struct thread_info *saved_thread;
3136 ptid_t saved_tid;
3137 int wstat;
3138 int ret;
3139
3140 saved_thread = current_thread;
3141 if (saved_thread != NULL)
3142 saved_tid = saved_thread->entry.id;
3143 else
3144 saved_tid = null_ptid; /* avoid bogus unused warning */
3145
3146 if (debug_threads)
3147 debug_printf ("wait_for_sigstop: pulling events\n");
3148
3149 /* Passing NULL_PTID as filter indicates we want all events to be
3150 left pending. Eventually this returns when there are no
3151 unwaited-for children left. */
3152 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3153 &wstat, __WALL);
3154 gdb_assert (ret == -1);
3155
3156 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3157 current_thread = saved_thread;
3158 else
3159 {
3160 if (debug_threads)
3161 debug_printf ("Previously current thread died.\n");
3162
3163 if (non_stop)
3164 {
3165 /* We can't change the current inferior behind GDB's back,
3166 otherwise, a subsequent command may apply to the wrong
3167 process. */
3168 current_thread = NULL;
3169 }
3170 else
3171 {
3172 /* Set a valid thread as current. */
3173 set_desired_thread (0);
3174 }
3175 }
3176 }
3177
3178 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3179 move it out, because we need to report the stop event to GDB. For
3180 example, if the user puts a breakpoint in the jump pad, it's
3181 because she wants to debug it. */
3182
3183 static int
3184 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3185 {
3186 struct thread_info *thread = (struct thread_info *) entry;
3187 struct lwp_info *lwp = get_thread_lwp (thread);
3188
3189 gdb_assert (lwp->suspended == 0);
3190 gdb_assert (lwp->stopped);
3191
3192 /* Allow debugging the jump pad, gdb_collect, etc.. */
3193 return (supports_fast_tracepoints ()
3194 && agent_loaded_p ()
3195 && (gdb_breakpoint_here (lwp->stop_pc)
3196 || lwp->stop_reason == LWP_STOPPED_BY_WATCHPOINT
3197 || thread->last_resume_kind == resume_step)
3198 && linux_fast_tracepoint_collecting (lwp, NULL));
3199 }
3200
3201 static void
3202 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3203 {
3204 struct thread_info *thread = (struct thread_info *) entry;
3205 struct lwp_info *lwp = get_thread_lwp (thread);
3206 int *wstat;
3207
3208 gdb_assert (lwp->suspended == 0);
3209 gdb_assert (lwp->stopped);
3210
3211 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3212
3213 /* Allow debugging the jump pad, gdb_collect, etc. */
3214 if (!gdb_breakpoint_here (lwp->stop_pc)
3215 && lwp->stop_reason != LWP_STOPPED_BY_WATCHPOINT
3216 && thread->last_resume_kind != resume_step
3217 && maybe_move_out_of_jump_pad (lwp, wstat))
3218 {
3219 if (debug_threads)
3220 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3221 lwpid_of (thread));
3222
3223 if (wstat)
3224 {
3225 lwp->status_pending_p = 0;
3226 enqueue_one_deferred_signal (lwp, wstat);
3227
3228 if (debug_threads)
3229 debug_printf ("Signal %d for LWP %ld deferred "
3230 "(in jump pad)\n",
3231 WSTOPSIG (*wstat), lwpid_of (thread));
3232 }
3233
3234 linux_resume_one_lwp (lwp, 0, 0, NULL);
3235 }
3236 else
3237 lwp->suspended++;
3238 }
3239
3240 static int
3241 lwp_running (struct inferior_list_entry *entry, void *data)
3242 {
3243 struct thread_info *thread = (struct thread_info *) entry;
3244 struct lwp_info *lwp = get_thread_lwp (thread);
3245
3246 if (lwp->dead)
3247 return 0;
3248 if (lwp->stopped)
3249 return 0;
3250 return 1;
3251 }
3252
3253 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3254 If SUSPEND, then also increase the suspend count of every LWP,
3255 except EXCEPT. */
3256
3257 static void
3258 stop_all_lwps (int suspend, struct lwp_info *except)
3259 {
3260 /* Should not be called recursively. */
3261 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3262
3263 if (debug_threads)
3264 {
3265 debug_enter ();
3266 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3267 suspend ? "stop-and-suspend" : "stop",
3268 except != NULL
3269 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3270 : "none");
3271 }
3272
3273 stopping_threads = (suspend
3274 ? STOPPING_AND_SUSPENDING_THREADS
3275 : STOPPING_THREADS);
3276
3277 if (suspend)
3278 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3279 else
3280 find_inferior (&all_threads, send_sigstop_callback, except);
3281 wait_for_sigstop ();
3282 stopping_threads = NOT_STOPPING_THREADS;
3283
3284 if (debug_threads)
3285 {
3286 debug_printf ("stop_all_lwps done, setting stopping_threads "
3287 "back to !stopping\n");
3288 debug_exit ();
3289 }
3290 }
3291
3292 /* Resume execution of the inferior process.
3293 If STEP is nonzero, single-step it.
3294 If SIGNAL is nonzero, give it that signal. */
3295
3296 static void
3297 linux_resume_one_lwp (struct lwp_info *lwp,
3298 int step, int signal, siginfo_t *info)
3299 {
3300 struct thread_info *thread = get_lwp_thread (lwp);
3301 struct thread_info *saved_thread;
3302 int fast_tp_collecting;
3303
3304 if (lwp->stopped == 0)
3305 return;
3306
3307 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3308
3309 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3310
3311 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3312 user used the "jump" command, or "set $pc = foo"). */
3313 if (lwp->stop_pc != get_pc (lwp))
3314 {
3315 /* Collecting 'while-stepping' actions doesn't make sense
3316 anymore. */
3317 release_while_stepping_state_list (thread);
3318 }
3319
3320 /* If we have pending signals or status, and a new signal, enqueue the
3321 signal. Also enqueue the signal if we are waiting to reinsert a
3322 breakpoint; it will be picked up again below. */
3323 if (signal != 0
3324 && (lwp->status_pending_p
3325 || lwp->pending_signals != NULL
3326 || lwp->bp_reinsert != 0
3327 || fast_tp_collecting))
3328 {
3329 struct pending_signals *p_sig;
3330 p_sig = xmalloc (sizeof (*p_sig));
3331 p_sig->prev = lwp->pending_signals;
3332 p_sig->signal = signal;
3333 if (info == NULL)
3334 memset (&p_sig->info, 0, sizeof (siginfo_t));
3335 else
3336 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3337 lwp->pending_signals = p_sig;
3338 }
3339
3340 if (lwp->status_pending_p)
3341 {
3342 if (debug_threads)
3343 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3344 " has pending status\n",
3345 lwpid_of (thread), step ? "step" : "continue", signal,
3346 lwp->stop_expected ? "expected" : "not expected");
3347 return;
3348 }
3349
3350 saved_thread = current_thread;
3351 current_thread = thread;
3352
3353 if (debug_threads)
3354 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3355 lwpid_of (thread), step ? "step" : "continue", signal,
3356 lwp->stop_expected ? "expected" : "not expected");
3357
3358 /* This bit needs some thinking about. If we get a signal that
3359 we must report while a single-step reinsert is still pending,
3360 we often end up resuming the thread. It might be better to
3361 (ew) allow a stack of pending events; then we could be sure that
3362 the reinsert happened right away and not lose any signals.
3363
3364 Making this stack would also shrink the window in which breakpoints are
3365 uninserted (see comment in linux_wait_for_lwp) but not enough for
3366 complete correctness, so it won't solve that problem. It may be
3367 worthwhile just to solve this one, however. */
3368 if (lwp->bp_reinsert != 0)
3369 {
3370 if (debug_threads)
3371 debug_printf (" pending reinsert at 0x%s\n",
3372 paddress (lwp->bp_reinsert));
3373
3374 if (can_hardware_single_step ())
3375 {
3376 if (fast_tp_collecting == 0)
3377 {
3378 if (step == 0)
3379 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3380 if (lwp->suspended)
3381 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3382 lwp->suspended);
3383 }
3384
3385 step = 1;
3386 }
3387
3388 /* Postpone any pending signal. It was enqueued above. */
3389 signal = 0;
3390 }
3391
3392 if (fast_tp_collecting == 1)
3393 {
3394 if (debug_threads)
3395 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3396 " (exit-jump-pad-bkpt)\n",
3397 lwpid_of (thread));
3398
3399 /* Postpone any pending signal. It was enqueued above. */
3400 signal = 0;
3401 }
3402 else if (fast_tp_collecting == 2)
3403 {
3404 if (debug_threads)
3405 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3406 " single-stepping\n",
3407 lwpid_of (thread));
3408
3409 if (can_hardware_single_step ())
3410 step = 1;
3411 else
3412 {
3413 internal_error (__FILE__, __LINE__,
3414 "moving out of jump pad single-stepping"
3415 " not implemented on this target");
3416 }
3417
3418 /* Postpone any pending signal. It was enqueued above. */
3419 signal = 0;
3420 }
3421
3422 /* If we have while-stepping actions in this thread set it stepping.
3423 If we have a signal to deliver, it may or may not be set to
3424 SIG_IGN, we don't know. Assume so, and allow collecting
3425 while-stepping into a signal handler. A possible smart thing to
3426 do would be to set an internal breakpoint at the signal return
3427 address, continue, and carry on catching this while-stepping
3428 action only when that breakpoint is hit. A future
3429 enhancement. */
3430 if (thread->while_stepping != NULL
3431 && can_hardware_single_step ())
3432 {
3433 if (debug_threads)
3434 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3435 lwpid_of (thread));
3436 step = 1;
3437 }
3438
3439 if (the_low_target.get_pc != NULL)
3440 {
3441 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3442
3443 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3444
3445 if (debug_threads)
3446 {
3447 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3448 (long) lwp->stop_pc);
3449 }
3450 }
3451
3452 /* If we have pending signals, consume one unless we are trying to
3453 reinsert a breakpoint or we're trying to finish a fast tracepoint
3454 collect. */
3455 if (lwp->pending_signals != NULL
3456 && lwp->bp_reinsert == 0
3457 && fast_tp_collecting == 0)
3458 {
3459 struct pending_signals **p_sig;
3460
3461 p_sig = &lwp->pending_signals;
3462 while ((*p_sig)->prev != NULL)
3463 p_sig = &(*p_sig)->prev;
3464
3465 signal = (*p_sig)->signal;
3466 if ((*p_sig)->info.si_signo != 0)
3467 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3468 &(*p_sig)->info);
3469
3470 free (*p_sig);
3471 *p_sig = NULL;
3472 }
3473
3474 if (the_low_target.prepare_to_resume != NULL)
3475 the_low_target.prepare_to_resume (lwp);
3476
3477 regcache_invalidate_thread (thread);
3478 errno = 0;
3479 lwp->stopped = 0;
3480 lwp->stop_reason = LWP_STOPPED_BY_NO_REASON;
3481 lwp->stepping = step;
3482 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3483 (PTRACE_TYPE_ARG3) 0,
3484 /* Coerce to a uintptr_t first to avoid potential gcc warning
3485 of coercing an 8 byte integer to a 4 byte pointer. */
3486 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3487
3488 current_thread = saved_thread;
3489 if (errno)
3490 {
3491 /* ESRCH from ptrace either means that the thread was already
3492 running (an error) or that it is gone (a race condition). If
3493 it's gone, we will get a notification the next time we wait,
3494 so we can ignore the error. We could differentiate these
3495 two, but it's tricky without waiting; the thread still exists
3496 as a zombie, so sending it signal 0 would succeed. So just
3497 ignore ESRCH. */
3498 if (errno == ESRCH)
3499 return;
3500
3501 perror_with_name ("ptrace");
3502 }
3503 }
3504
3505 struct thread_resume_array
3506 {
3507 struct thread_resume *resume;
3508 size_t n;
3509 };
3510
3511 /* This function is called once per thread via find_inferior.
3512 ARG is a pointer to a thread_resume_array struct.
3513 We look up the thread specified by ENTRY in ARG, and mark the thread
3514 with a pointer to the appropriate resume request.
3515
3516 This algorithm is O(threads * resume elements), but resume elements
3517 is small (and will remain small at least until GDB supports thread
3518 suspension). */
3519
3520 static int
3521 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3522 {
3523 struct thread_info *thread = (struct thread_info *) entry;
3524 struct lwp_info *lwp = get_thread_lwp (thread);
3525 int ndx;
3526 struct thread_resume_array *r;
3527
3528 r = arg;
3529
3530 for (ndx = 0; ndx < r->n; ndx++)
3531 {
3532 ptid_t ptid = r->resume[ndx].thread;
3533 if (ptid_equal (ptid, minus_one_ptid)
3534 || ptid_equal (ptid, entry->id)
3535 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3536 of PID'. */
3537 || (ptid_get_pid (ptid) == pid_of (thread)
3538 && (ptid_is_pid (ptid)
3539 || ptid_get_lwp (ptid) == -1)))
3540 {
3541 if (r->resume[ndx].kind == resume_stop
3542 && thread->last_resume_kind == resume_stop)
3543 {
3544 if (debug_threads)
3545 debug_printf ("already %s LWP %ld at GDB's request\n",
3546 (thread->last_status.kind
3547 == TARGET_WAITKIND_STOPPED)
3548 ? "stopped"
3549 : "stopping",
3550 lwpid_of (thread));
3551
3552 continue;
3553 }
3554
3555 lwp->resume = &r->resume[ndx];
3556 thread->last_resume_kind = lwp->resume->kind;
3557
3558 lwp->step_range_start = lwp->resume->step_range_start;
3559 lwp->step_range_end = lwp->resume->step_range_end;
3560
3561 /* If we had a deferred signal to report, dequeue one now.
3562 This can happen if LWP gets more than one signal while
3563 trying to get out of a jump pad. */
3564 if (lwp->stopped
3565 && !lwp->status_pending_p
3566 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3567 {
3568 lwp->status_pending_p = 1;
3569
3570 if (debug_threads)
3571 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3572 "leaving status pending.\n",
3573 WSTOPSIG (lwp->status_pending),
3574 lwpid_of (thread));
3575 }
3576
3577 return 0;
3578 }
3579 }
3580
3581 /* No resume action for this thread. */
3582 lwp->resume = NULL;
3583
3584 return 0;
3585 }
3586
3587 /* find_inferior callback for linux_resume.
3588 Set *FLAG_P if this lwp has an interesting status pending. */
3589
3590 static int
3591 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3592 {
3593 struct thread_info *thread = (struct thread_info *) entry;
3594 struct lwp_info *lwp = get_thread_lwp (thread);
3595
3596 /* LWPs which will not be resumed are not interesting, because
3597 we might not wait for them next time through linux_wait. */
3598 if (lwp->resume == NULL)
3599 return 0;
3600
3601 if (thread_still_has_status_pending_p (thread))
3602 * (int *) flag_p = 1;
3603
3604 return 0;
3605 }
3606
3607 /* Return 1 if this lwp that GDB wants running is stopped at an
3608 internal breakpoint that we need to step over. It assumes that any
3609 required STOP_PC adjustment has already been propagated to the
3610 inferior's regcache. */
3611
3612 static int
3613 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3614 {
3615 struct thread_info *thread = (struct thread_info *) entry;
3616 struct lwp_info *lwp = get_thread_lwp (thread);
3617 struct thread_info *saved_thread;
3618 CORE_ADDR pc;
3619
3620 /* LWPs which will not be resumed are not interesting, because we
3621 might not wait for them next time through linux_wait. */
3622
3623 if (!lwp->stopped)
3624 {
3625 if (debug_threads)
3626 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3627 lwpid_of (thread));
3628 return 0;
3629 }
3630
3631 if (thread->last_resume_kind == resume_stop)
3632 {
3633 if (debug_threads)
3634 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3635 " stopped\n",
3636 lwpid_of (thread));
3637 return 0;
3638 }
3639
3640 gdb_assert (lwp->suspended >= 0);
3641
3642 if (lwp->suspended)
3643 {
3644 if (debug_threads)
3645 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3646 lwpid_of (thread));
3647 return 0;
3648 }
3649
3650 if (!lwp->need_step_over)
3651 {
3652 if (debug_threads)
3653 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
3654 }
3655
3656 if (lwp->status_pending_p)
3657 {
3658 if (debug_threads)
3659 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3660 " status.\n",
3661 lwpid_of (thread));
3662 return 0;
3663 }
3664
3665 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3666 or we have. */
3667 pc = get_pc (lwp);
3668
3669 /* If the PC has changed since we stopped, then don't do anything,
3670 and let the breakpoint/tracepoint be hit. This happens if, for
3671 instance, GDB handled the decr_pc_after_break subtraction itself,
3672 GDB is OOL stepping this thread, or the user has issued a "jump"
3673 command, or poked thread's registers herself. */
3674 if (pc != lwp->stop_pc)
3675 {
3676 if (debug_threads)
3677 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3678 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3679 lwpid_of (thread),
3680 paddress (lwp->stop_pc), paddress (pc));
3681
3682 lwp->need_step_over = 0;
3683 return 0;
3684 }
3685
3686 saved_thread = current_thread;
3687 current_thread = thread;
3688
3689 /* We can only step over breakpoints we know about. */
3690 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3691 {
3692 /* Don't step over a breakpoint that GDB expects to hit
3693 though. If the condition is being evaluated on the target's side
3694 and it evaluate to false, step over this breakpoint as well. */
3695 if (gdb_breakpoint_here (pc)
3696 && gdb_condition_true_at_breakpoint (pc)
3697 && gdb_no_commands_at_breakpoint (pc))
3698 {
3699 if (debug_threads)
3700 debug_printf ("Need step over [LWP %ld]? yes, but found"
3701 " GDB breakpoint at 0x%s; skipping step over\n",
3702 lwpid_of (thread), paddress (pc));
3703
3704 current_thread = saved_thread;
3705 return 0;
3706 }
3707 else
3708 {
3709 if (debug_threads)
3710 debug_printf ("Need step over [LWP %ld]? yes, "
3711 "found breakpoint at 0x%s\n",
3712 lwpid_of (thread), paddress (pc));
3713
3714 /* We've found an lwp that needs stepping over --- return 1 so
3715 that find_inferior stops looking. */
3716 current_thread = saved_thread;
3717
3718 /* If the step over is cancelled, this is set again. */
3719 lwp->need_step_over = 0;
3720 return 1;
3721 }
3722 }
3723
3724 current_thread = saved_thread;
3725
3726 if (debug_threads)
3727 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3728 " at 0x%s\n",
3729 lwpid_of (thread), paddress (pc));
3730
3731 return 0;
3732 }
3733
3734 /* Start a step-over operation on LWP. When LWP stopped at a
3735 breakpoint, to make progress, we need to remove the breakpoint out
3736 of the way. If we let other threads run while we do that, they may
3737 pass by the breakpoint location and miss hitting it. To avoid
3738 that, a step-over momentarily stops all threads while LWP is
3739 single-stepped while the breakpoint is temporarily uninserted from
3740 the inferior. When the single-step finishes, we reinsert the
3741 breakpoint, and let all threads that are supposed to be running,
3742 run again.
3743
3744 On targets that don't support hardware single-step, we don't
3745 currently support full software single-stepping. Instead, we only
3746 support stepping over the thread event breakpoint, by asking the
3747 low target where to place a reinsert breakpoint. Since this
3748 routine assumes the breakpoint being stepped over is a thread event
3749 breakpoint, it usually assumes the return address of the current
3750 function is a good enough place to set the reinsert breakpoint. */
3751
3752 static int
3753 start_step_over (struct lwp_info *lwp)
3754 {
3755 struct thread_info *thread = get_lwp_thread (lwp);
3756 struct thread_info *saved_thread;
3757 CORE_ADDR pc;
3758 int step;
3759
3760 if (debug_threads)
3761 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3762 lwpid_of (thread));
3763
3764 stop_all_lwps (1, lwp);
3765 gdb_assert (lwp->suspended == 0);
3766
3767 if (debug_threads)
3768 debug_printf ("Done stopping all threads for step-over.\n");
3769
3770 /* Note, we should always reach here with an already adjusted PC,
3771 either by GDB (if we're resuming due to GDB's request), or by our
3772 caller, if we just finished handling an internal breakpoint GDB
3773 shouldn't care about. */
3774 pc = get_pc (lwp);
3775
3776 saved_thread = current_thread;
3777 current_thread = thread;
3778
3779 lwp->bp_reinsert = pc;
3780 uninsert_breakpoints_at (pc);
3781 uninsert_fast_tracepoint_jumps_at (pc);
3782
3783 if (can_hardware_single_step ())
3784 {
3785 step = 1;
3786 }
3787 else
3788 {
3789 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3790 set_reinsert_breakpoint (raddr);
3791 step = 0;
3792 }
3793
3794 current_thread = saved_thread;
3795
3796 linux_resume_one_lwp (lwp, step, 0, NULL);
3797
3798 /* Require next event from this LWP. */
3799 step_over_bkpt = thread->entry.id;
3800 return 1;
3801 }
3802
3803 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3804 start_step_over, if still there, and delete any reinsert
3805 breakpoints we've set, on non hardware single-step targets. */
3806
3807 static int
3808 finish_step_over (struct lwp_info *lwp)
3809 {
3810 if (lwp->bp_reinsert != 0)
3811 {
3812 if (debug_threads)
3813 debug_printf ("Finished step over.\n");
3814
3815 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3816 may be no breakpoint to reinsert there by now. */
3817 reinsert_breakpoints_at (lwp->bp_reinsert);
3818 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3819
3820 lwp->bp_reinsert = 0;
3821
3822 /* Delete any software-single-step reinsert breakpoints. No
3823 longer needed. We don't have to worry about other threads
3824 hitting this trap, and later not being able to explain it,
3825 because we were stepping over a breakpoint, and we hold all
3826 threads but LWP stopped while doing that. */
3827 if (!can_hardware_single_step ())
3828 delete_reinsert_breakpoints ();
3829
3830 step_over_bkpt = null_ptid;
3831 return 1;
3832 }
3833 else
3834 return 0;
3835 }
3836
3837 /* This function is called once per thread. We check the thread's resume
3838 request, which will tell us whether to resume, step, or leave the thread
3839 stopped; and what signal, if any, it should be sent.
3840
3841 For threads which we aren't explicitly told otherwise, we preserve
3842 the stepping flag; this is used for stepping over gdbserver-placed
3843 breakpoints.
3844
3845 If pending_flags was set in any thread, we queue any needed
3846 signals, since we won't actually resume. We already have a pending
3847 event to report, so we don't need to preserve any step requests;
3848 they should be re-issued if necessary. */
3849
3850 static int
3851 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3852 {
3853 struct thread_info *thread = (struct thread_info *) entry;
3854 struct lwp_info *lwp = get_thread_lwp (thread);
3855 int step;
3856 int leave_all_stopped = * (int *) arg;
3857 int leave_pending;
3858
3859 if (lwp->resume == NULL)
3860 return 0;
3861
3862 if (lwp->resume->kind == resume_stop)
3863 {
3864 if (debug_threads)
3865 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
3866
3867 if (!lwp->stopped)
3868 {
3869 if (debug_threads)
3870 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
3871
3872 /* Stop the thread, and wait for the event asynchronously,
3873 through the event loop. */
3874 send_sigstop (lwp);
3875 }
3876 else
3877 {
3878 if (debug_threads)
3879 debug_printf ("already stopped LWP %ld\n",
3880 lwpid_of (thread));
3881
3882 /* The LWP may have been stopped in an internal event that
3883 was not meant to be notified back to GDB (e.g., gdbserver
3884 breakpoint), so we should be reporting a stop event in
3885 this case too. */
3886
3887 /* If the thread already has a pending SIGSTOP, this is a
3888 no-op. Otherwise, something later will presumably resume
3889 the thread and this will cause it to cancel any pending
3890 operation, due to last_resume_kind == resume_stop. If
3891 the thread already has a pending status to report, we
3892 will still report it the next time we wait - see
3893 status_pending_p_callback. */
3894
3895 /* If we already have a pending signal to report, then
3896 there's no need to queue a SIGSTOP, as this means we're
3897 midway through moving the LWP out of the jumppad, and we
3898 will report the pending signal as soon as that is
3899 finished. */
3900 if (lwp->pending_signals_to_report == NULL)
3901 send_sigstop (lwp);
3902 }
3903
3904 /* For stop requests, we're done. */
3905 lwp->resume = NULL;
3906 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3907 return 0;
3908 }
3909
3910 /* If this thread which is about to be resumed has a pending status,
3911 then don't resume any threads - we can just report the pending
3912 status. Make sure to queue any signals that would otherwise be
3913 sent. In all-stop mode, we do this decision based on if *any*
3914 thread has a pending status. If there's a thread that needs the
3915 step-over-breakpoint dance, then don't resume any other thread
3916 but that particular one. */
3917 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3918
3919 if (!leave_pending)
3920 {
3921 if (debug_threads)
3922 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
3923
3924 step = (lwp->resume->kind == resume_step);
3925 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3926 }
3927 else
3928 {
3929 if (debug_threads)
3930 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
3931
3932 /* If we have a new signal, enqueue the signal. */
3933 if (lwp->resume->sig != 0)
3934 {
3935 struct pending_signals *p_sig;
3936 p_sig = xmalloc (sizeof (*p_sig));
3937 p_sig->prev = lwp->pending_signals;
3938 p_sig->signal = lwp->resume->sig;
3939 memset (&p_sig->info, 0, sizeof (siginfo_t));
3940
3941 /* If this is the same signal we were previously stopped by,
3942 make sure to queue its siginfo. We can ignore the return
3943 value of ptrace; if it fails, we'll skip
3944 PTRACE_SETSIGINFO. */
3945 if (WIFSTOPPED (lwp->last_status)
3946 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3947 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3948 &p_sig->info);
3949
3950 lwp->pending_signals = p_sig;
3951 }
3952 }
3953
3954 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3955 lwp->resume = NULL;
3956 return 0;
3957 }
3958
3959 static void
3960 linux_resume (struct thread_resume *resume_info, size_t n)
3961 {
3962 struct thread_resume_array array = { resume_info, n };
3963 struct thread_info *need_step_over = NULL;
3964 int any_pending;
3965 int leave_all_stopped;
3966
3967 if (debug_threads)
3968 {
3969 debug_enter ();
3970 debug_printf ("linux_resume:\n");
3971 }
3972
3973 find_inferior (&all_threads, linux_set_resume_request, &array);
3974
3975 /* If there is a thread which would otherwise be resumed, which has
3976 a pending status, then don't resume any threads - we can just
3977 report the pending status. Make sure to queue any signals that
3978 would otherwise be sent. In non-stop mode, we'll apply this
3979 logic to each thread individually. We consume all pending events
3980 before considering to start a step-over (in all-stop). */
3981 any_pending = 0;
3982 if (!non_stop)
3983 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
3984
3985 /* If there is a thread which would otherwise be resumed, which is
3986 stopped at a breakpoint that needs stepping over, then don't
3987 resume any threads - have it step over the breakpoint with all
3988 other threads stopped, then resume all threads again. Make sure
3989 to queue any signals that would otherwise be delivered or
3990 queued. */
3991 if (!any_pending && supports_breakpoints ())
3992 need_step_over
3993 = (struct thread_info *) find_inferior (&all_threads,
3994 need_step_over_p, NULL);
3995
3996 leave_all_stopped = (need_step_over != NULL || any_pending);
3997
3998 if (debug_threads)
3999 {
4000 if (need_step_over != NULL)
4001 debug_printf ("Not resuming all, need step over\n");
4002 else if (any_pending)
4003 debug_printf ("Not resuming, all-stop and found "
4004 "an LWP with pending status\n");
4005 else
4006 debug_printf ("Resuming, no pending status or step over needed\n");
4007 }
4008
4009 /* Even if we're leaving threads stopped, queue all signals we'd
4010 otherwise deliver. */
4011 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4012
4013 if (need_step_over)
4014 start_step_over (get_thread_lwp (need_step_over));
4015
4016 if (debug_threads)
4017 {
4018 debug_printf ("linux_resume done\n");
4019 debug_exit ();
4020 }
4021 }
4022
4023 /* This function is called once per thread. We check the thread's
4024 last resume request, which will tell us whether to resume, step, or
4025 leave the thread stopped. Any signal the client requested to be
4026 delivered has already been enqueued at this point.
4027
4028 If any thread that GDB wants running is stopped at an internal
4029 breakpoint that needs stepping over, we start a step-over operation
4030 on that particular thread, and leave all others stopped. */
4031
4032 static int
4033 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4034 {
4035 struct thread_info *thread = (struct thread_info *) entry;
4036 struct lwp_info *lwp = get_thread_lwp (thread);
4037 int step;
4038
4039 if (lwp == except)
4040 return 0;
4041
4042 if (debug_threads)
4043 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4044
4045 if (!lwp->stopped)
4046 {
4047 if (debug_threads)
4048 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4049 return 0;
4050 }
4051
4052 if (thread->last_resume_kind == resume_stop
4053 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4054 {
4055 if (debug_threads)
4056 debug_printf (" client wants LWP to remain %ld stopped\n",
4057 lwpid_of (thread));
4058 return 0;
4059 }
4060
4061 if (lwp->status_pending_p)
4062 {
4063 if (debug_threads)
4064 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4065 lwpid_of (thread));
4066 return 0;
4067 }
4068
4069 gdb_assert (lwp->suspended >= 0);
4070
4071 if (lwp->suspended)
4072 {
4073 if (debug_threads)
4074 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4075 return 0;
4076 }
4077
4078 if (thread->last_resume_kind == resume_stop
4079 && lwp->pending_signals_to_report == NULL
4080 && lwp->collecting_fast_tracepoint == 0)
4081 {
4082 /* We haven't reported this LWP as stopped yet (otherwise, the
4083 last_status.kind check above would catch it, and we wouldn't
4084 reach here. This LWP may have been momentarily paused by a
4085 stop_all_lwps call while handling for example, another LWP's
4086 step-over. In that case, the pending expected SIGSTOP signal
4087 that was queued at vCont;t handling time will have already
4088 been consumed by wait_for_sigstop, and so we need to requeue
4089 another one here. Note that if the LWP already has a SIGSTOP
4090 pending, this is a no-op. */
4091
4092 if (debug_threads)
4093 debug_printf ("Client wants LWP %ld to stop. "
4094 "Making sure it has a SIGSTOP pending\n",
4095 lwpid_of (thread));
4096
4097 send_sigstop (lwp);
4098 }
4099
4100 step = thread->last_resume_kind == resume_step;
4101 linux_resume_one_lwp (lwp, step, 0, NULL);
4102 return 0;
4103 }
4104
4105 static int
4106 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4107 {
4108 struct thread_info *thread = (struct thread_info *) entry;
4109 struct lwp_info *lwp = get_thread_lwp (thread);
4110
4111 if (lwp == except)
4112 return 0;
4113
4114 lwp->suspended--;
4115 gdb_assert (lwp->suspended >= 0);
4116
4117 return proceed_one_lwp (entry, except);
4118 }
4119
4120 /* When we finish a step-over, set threads running again. If there's
4121 another thread that may need a step-over, now's the time to start
4122 it. Eventually, we'll move all threads past their breakpoints. */
4123
4124 static void
4125 proceed_all_lwps (void)
4126 {
4127 struct thread_info *need_step_over;
4128
4129 /* If there is a thread which would otherwise be resumed, which is
4130 stopped at a breakpoint that needs stepping over, then don't
4131 resume any threads - have it step over the breakpoint with all
4132 other threads stopped, then resume all threads again. */
4133
4134 if (supports_breakpoints ())
4135 {
4136 need_step_over
4137 = (struct thread_info *) find_inferior (&all_threads,
4138 need_step_over_p, NULL);
4139
4140 if (need_step_over != NULL)
4141 {
4142 if (debug_threads)
4143 debug_printf ("proceed_all_lwps: found "
4144 "thread %ld needing a step-over\n",
4145 lwpid_of (need_step_over));
4146
4147 start_step_over (get_thread_lwp (need_step_over));
4148 return;
4149 }
4150 }
4151
4152 if (debug_threads)
4153 debug_printf ("Proceeding, no step-over needed\n");
4154
4155 find_inferior (&all_threads, proceed_one_lwp, NULL);
4156 }
4157
4158 /* Stopped LWPs that the client wanted to be running, that don't have
4159 pending statuses, are set to run again, except for EXCEPT, if not
4160 NULL. This undoes a stop_all_lwps call. */
4161
4162 static void
4163 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4164 {
4165 if (debug_threads)
4166 {
4167 debug_enter ();
4168 if (except)
4169 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4170 lwpid_of (get_lwp_thread (except)));
4171 else
4172 debug_printf ("unstopping all lwps\n");
4173 }
4174
4175 if (unsuspend)
4176 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4177 else
4178 find_inferior (&all_threads, proceed_one_lwp, except);
4179
4180 if (debug_threads)
4181 {
4182 debug_printf ("unstop_all_lwps done\n");
4183 debug_exit ();
4184 }
4185 }
4186
4187
4188 #ifdef HAVE_LINUX_REGSETS
4189
4190 #define use_linux_regsets 1
4191
4192 /* Returns true if REGSET has been disabled. */
4193
4194 static int
4195 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4196 {
4197 return (info->disabled_regsets != NULL
4198 && info->disabled_regsets[regset - info->regsets]);
4199 }
4200
4201 /* Disable REGSET. */
4202
4203 static void
4204 disable_regset (struct regsets_info *info, struct regset_info *regset)
4205 {
4206 int dr_offset;
4207
4208 dr_offset = regset - info->regsets;
4209 if (info->disabled_regsets == NULL)
4210 info->disabled_regsets = xcalloc (1, info->num_regsets);
4211 info->disabled_regsets[dr_offset] = 1;
4212 }
4213
4214 static int
4215 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4216 struct regcache *regcache)
4217 {
4218 struct regset_info *regset;
4219 int saw_general_regs = 0;
4220 int pid;
4221 struct iovec iov;
4222
4223 pid = lwpid_of (current_thread);
4224 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4225 {
4226 void *buf, *data;
4227 int nt_type, res;
4228
4229 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4230 continue;
4231
4232 buf = xmalloc (regset->size);
4233
4234 nt_type = regset->nt_type;
4235 if (nt_type)
4236 {
4237 iov.iov_base = buf;
4238 iov.iov_len = regset->size;
4239 data = (void *) &iov;
4240 }
4241 else
4242 data = buf;
4243
4244 #ifndef __sparc__
4245 res = ptrace (regset->get_request, pid,
4246 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4247 #else
4248 res = ptrace (regset->get_request, pid, data, nt_type);
4249 #endif
4250 if (res < 0)
4251 {
4252 if (errno == EIO)
4253 {
4254 /* If we get EIO on a regset, do not try it again for
4255 this process mode. */
4256 disable_regset (regsets_info, regset);
4257 }
4258 else if (errno == ENODATA)
4259 {
4260 /* ENODATA may be returned if the regset is currently
4261 not "active". This can happen in normal operation,
4262 so suppress the warning in this case. */
4263 }
4264 else
4265 {
4266 char s[256];
4267 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4268 pid);
4269 perror (s);
4270 }
4271 }
4272 else
4273 {
4274 if (regset->type == GENERAL_REGS)
4275 saw_general_regs = 1;
4276 regset->store_function (regcache, buf);
4277 }
4278 free (buf);
4279 }
4280 if (saw_general_regs)
4281 return 0;
4282 else
4283 return 1;
4284 }
4285
4286 static int
4287 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4288 struct regcache *regcache)
4289 {
4290 struct regset_info *regset;
4291 int saw_general_regs = 0;
4292 int pid;
4293 struct iovec iov;
4294
4295 pid = lwpid_of (current_thread);
4296 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4297 {
4298 void *buf, *data;
4299 int nt_type, res;
4300
4301 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4302 || regset->fill_function == NULL)
4303 continue;
4304
4305 buf = xmalloc (regset->size);
4306
4307 /* First fill the buffer with the current register set contents,
4308 in case there are any items in the kernel's regset that are
4309 not in gdbserver's regcache. */
4310
4311 nt_type = regset->nt_type;
4312 if (nt_type)
4313 {
4314 iov.iov_base = buf;
4315 iov.iov_len = regset->size;
4316 data = (void *) &iov;
4317 }
4318 else
4319 data = buf;
4320
4321 #ifndef __sparc__
4322 res = ptrace (regset->get_request, pid,
4323 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4324 #else
4325 res = ptrace (regset->get_request, pid, data, nt_type);
4326 #endif
4327
4328 if (res == 0)
4329 {
4330 /* Then overlay our cached registers on that. */
4331 regset->fill_function (regcache, buf);
4332
4333 /* Only now do we write the register set. */
4334 #ifndef __sparc__
4335 res = ptrace (regset->set_request, pid,
4336 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4337 #else
4338 res = ptrace (regset->set_request, pid, data, nt_type);
4339 #endif
4340 }
4341
4342 if (res < 0)
4343 {
4344 if (errno == EIO)
4345 {
4346 /* If we get EIO on a regset, do not try it again for
4347 this process mode. */
4348 disable_regset (regsets_info, regset);
4349 }
4350 else if (errno == ESRCH)
4351 {
4352 /* At this point, ESRCH should mean the process is
4353 already gone, in which case we simply ignore attempts
4354 to change its registers. See also the related
4355 comment in linux_resume_one_lwp. */
4356 free (buf);
4357 return 0;
4358 }
4359 else
4360 {
4361 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4362 }
4363 }
4364 else if (regset->type == GENERAL_REGS)
4365 saw_general_regs = 1;
4366 free (buf);
4367 }
4368 if (saw_general_regs)
4369 return 0;
4370 else
4371 return 1;
4372 }
4373
4374 #else /* !HAVE_LINUX_REGSETS */
4375
4376 #define use_linux_regsets 0
4377 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4378 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4379
4380 #endif
4381
4382 /* Return 1 if register REGNO is supported by one of the regset ptrace
4383 calls or 0 if it has to be transferred individually. */
4384
4385 static int
4386 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4387 {
4388 unsigned char mask = 1 << (regno % 8);
4389 size_t index = regno / 8;
4390
4391 return (use_linux_regsets
4392 && (regs_info->regset_bitmap == NULL
4393 || (regs_info->regset_bitmap[index] & mask) != 0));
4394 }
4395
4396 #ifdef HAVE_LINUX_USRREGS
4397
4398 int
4399 register_addr (const struct usrregs_info *usrregs, int regnum)
4400 {
4401 int addr;
4402
4403 if (regnum < 0 || regnum >= usrregs->num_regs)
4404 error ("Invalid register number %d.", regnum);
4405
4406 addr = usrregs->regmap[regnum];
4407
4408 return addr;
4409 }
4410
4411 /* Fetch one register. */
4412 static void
4413 fetch_register (const struct usrregs_info *usrregs,
4414 struct regcache *regcache, int regno)
4415 {
4416 CORE_ADDR regaddr;
4417 int i, size;
4418 char *buf;
4419 int pid;
4420
4421 if (regno >= usrregs->num_regs)
4422 return;
4423 if ((*the_low_target.cannot_fetch_register) (regno))
4424 return;
4425
4426 regaddr = register_addr (usrregs, regno);
4427 if (regaddr == -1)
4428 return;
4429
4430 size = ((register_size (regcache->tdesc, regno)
4431 + sizeof (PTRACE_XFER_TYPE) - 1)
4432 & -sizeof (PTRACE_XFER_TYPE));
4433 buf = alloca (size);
4434
4435 pid = lwpid_of (current_thread);
4436 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4437 {
4438 errno = 0;
4439 *(PTRACE_XFER_TYPE *) (buf + i) =
4440 ptrace (PTRACE_PEEKUSER, pid,
4441 /* Coerce to a uintptr_t first to avoid potential gcc warning
4442 of coercing an 8 byte integer to a 4 byte pointer. */
4443 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4444 regaddr += sizeof (PTRACE_XFER_TYPE);
4445 if (errno != 0)
4446 error ("reading register %d: %s", regno, strerror (errno));
4447 }
4448
4449 if (the_low_target.supply_ptrace_register)
4450 the_low_target.supply_ptrace_register (regcache, regno, buf);
4451 else
4452 supply_register (regcache, regno, buf);
4453 }
4454
4455 /* Store one register. */
4456 static void
4457 store_register (const struct usrregs_info *usrregs,
4458 struct regcache *regcache, int regno)
4459 {
4460 CORE_ADDR regaddr;
4461 int i, size;
4462 char *buf;
4463 int pid;
4464
4465 if (regno >= usrregs->num_regs)
4466 return;
4467 if ((*the_low_target.cannot_store_register) (regno))
4468 return;
4469
4470 regaddr = register_addr (usrregs, regno);
4471 if (regaddr == -1)
4472 return;
4473
4474 size = ((register_size (regcache->tdesc, regno)
4475 + sizeof (PTRACE_XFER_TYPE) - 1)
4476 & -sizeof (PTRACE_XFER_TYPE));
4477 buf = alloca (size);
4478 memset (buf, 0, size);
4479
4480 if (the_low_target.collect_ptrace_register)
4481 the_low_target.collect_ptrace_register (regcache, regno, buf);
4482 else
4483 collect_register (regcache, regno, buf);
4484
4485 pid = lwpid_of (current_thread);
4486 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4487 {
4488 errno = 0;
4489 ptrace (PTRACE_POKEUSER, pid,
4490 /* Coerce to a uintptr_t first to avoid potential gcc warning
4491 about coercing an 8 byte integer to a 4 byte pointer. */
4492 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4493 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4494 if (errno != 0)
4495 {
4496 /* At this point, ESRCH should mean the process is
4497 already gone, in which case we simply ignore attempts
4498 to change its registers. See also the related
4499 comment in linux_resume_one_lwp. */
4500 if (errno == ESRCH)
4501 return;
4502
4503 if ((*the_low_target.cannot_store_register) (regno) == 0)
4504 error ("writing register %d: %s", regno, strerror (errno));
4505 }
4506 regaddr += sizeof (PTRACE_XFER_TYPE);
4507 }
4508 }
4509
4510 /* Fetch all registers, or just one, from the child process.
4511 If REGNO is -1, do this for all registers, skipping any that are
4512 assumed to have been retrieved by regsets_fetch_inferior_registers,
4513 unless ALL is non-zero.
4514 Otherwise, REGNO specifies which register (so we can save time). */
4515 static void
4516 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4517 struct regcache *regcache, int regno, int all)
4518 {
4519 struct usrregs_info *usr = regs_info->usrregs;
4520
4521 if (regno == -1)
4522 {
4523 for (regno = 0; regno < usr->num_regs; regno++)
4524 if (all || !linux_register_in_regsets (regs_info, regno))
4525 fetch_register (usr, regcache, regno);
4526 }
4527 else
4528 fetch_register (usr, regcache, regno);
4529 }
4530
4531 /* Store our register values back into the inferior.
4532 If REGNO is -1, do this for all registers, skipping any that are
4533 assumed to have been saved by regsets_store_inferior_registers,
4534 unless ALL is non-zero.
4535 Otherwise, REGNO specifies which register (so we can save time). */
4536 static void
4537 usr_store_inferior_registers (const struct regs_info *regs_info,
4538 struct regcache *regcache, int regno, int all)
4539 {
4540 struct usrregs_info *usr = regs_info->usrregs;
4541
4542 if (regno == -1)
4543 {
4544 for (regno = 0; regno < usr->num_regs; regno++)
4545 if (all || !linux_register_in_regsets (regs_info, regno))
4546 store_register (usr, regcache, regno);
4547 }
4548 else
4549 store_register (usr, regcache, regno);
4550 }
4551
4552 #else /* !HAVE_LINUX_USRREGS */
4553
4554 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4555 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4556
4557 #endif
4558
4559
4560 void
4561 linux_fetch_registers (struct regcache *regcache, int regno)
4562 {
4563 int use_regsets;
4564 int all = 0;
4565 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4566
4567 if (regno == -1)
4568 {
4569 if (the_low_target.fetch_register != NULL
4570 && regs_info->usrregs != NULL)
4571 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4572 (*the_low_target.fetch_register) (regcache, regno);
4573
4574 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4575 if (regs_info->usrregs != NULL)
4576 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4577 }
4578 else
4579 {
4580 if (the_low_target.fetch_register != NULL
4581 && (*the_low_target.fetch_register) (regcache, regno))
4582 return;
4583
4584 use_regsets = linux_register_in_regsets (regs_info, regno);
4585 if (use_regsets)
4586 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4587 regcache);
4588 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4589 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4590 }
4591 }
4592
4593 void
4594 linux_store_registers (struct regcache *regcache, int regno)
4595 {
4596 int use_regsets;
4597 int all = 0;
4598 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4599
4600 if (regno == -1)
4601 {
4602 all = regsets_store_inferior_registers (regs_info->regsets_info,
4603 regcache);
4604 if (regs_info->usrregs != NULL)
4605 usr_store_inferior_registers (regs_info, regcache, regno, all);
4606 }
4607 else
4608 {
4609 use_regsets = linux_register_in_regsets (regs_info, regno);
4610 if (use_regsets)
4611 all = regsets_store_inferior_registers (regs_info->regsets_info,
4612 regcache);
4613 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4614 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4615 }
4616 }
4617
4618
4619 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4620 to debugger memory starting at MYADDR. */
4621
4622 static int
4623 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4624 {
4625 int pid = lwpid_of (current_thread);
4626 register PTRACE_XFER_TYPE *buffer;
4627 register CORE_ADDR addr;
4628 register int count;
4629 char filename[64];
4630 register int i;
4631 int ret;
4632 int fd;
4633
4634 /* Try using /proc. Don't bother for one word. */
4635 if (len >= 3 * sizeof (long))
4636 {
4637 int bytes;
4638
4639 /* We could keep this file open and cache it - possibly one per
4640 thread. That requires some juggling, but is even faster. */
4641 sprintf (filename, "/proc/%d/mem", pid);
4642 fd = open (filename, O_RDONLY | O_LARGEFILE);
4643 if (fd == -1)
4644 goto no_proc;
4645
4646 /* If pread64 is available, use it. It's faster if the kernel
4647 supports it (only one syscall), and it's 64-bit safe even on
4648 32-bit platforms (for instance, SPARC debugging a SPARC64
4649 application). */
4650 #ifdef HAVE_PREAD64
4651 bytes = pread64 (fd, myaddr, len, memaddr);
4652 #else
4653 bytes = -1;
4654 if (lseek (fd, memaddr, SEEK_SET) != -1)
4655 bytes = read (fd, myaddr, len);
4656 #endif
4657
4658 close (fd);
4659 if (bytes == len)
4660 return 0;
4661
4662 /* Some data was read, we'll try to get the rest with ptrace. */
4663 if (bytes > 0)
4664 {
4665 memaddr += bytes;
4666 myaddr += bytes;
4667 len -= bytes;
4668 }
4669 }
4670
4671 no_proc:
4672 /* Round starting address down to longword boundary. */
4673 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4674 /* Round ending address up; get number of longwords that makes. */
4675 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4676 / sizeof (PTRACE_XFER_TYPE));
4677 /* Allocate buffer of that many longwords. */
4678 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4679
4680 /* Read all the longwords */
4681 errno = 0;
4682 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4683 {
4684 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4685 about coercing an 8 byte integer to a 4 byte pointer. */
4686 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4687 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4688 (PTRACE_TYPE_ARG4) 0);
4689 if (errno)
4690 break;
4691 }
4692 ret = errno;
4693
4694 /* Copy appropriate bytes out of the buffer. */
4695 if (i > 0)
4696 {
4697 i *= sizeof (PTRACE_XFER_TYPE);
4698 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4699 memcpy (myaddr,
4700 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4701 i < len ? i : len);
4702 }
4703
4704 return ret;
4705 }
4706
4707 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4708 memory at MEMADDR. On failure (cannot write to the inferior)
4709 returns the value of errno. Always succeeds if LEN is zero. */
4710
4711 static int
4712 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4713 {
4714 register int i;
4715 /* Round starting address down to longword boundary. */
4716 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4717 /* Round ending address up; get number of longwords that makes. */
4718 register int count
4719 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4720 / sizeof (PTRACE_XFER_TYPE);
4721
4722 /* Allocate buffer of that many longwords. */
4723 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4724 alloca (count * sizeof (PTRACE_XFER_TYPE));
4725
4726 int pid = lwpid_of (current_thread);
4727
4728 if (len == 0)
4729 {
4730 /* Zero length write always succeeds. */
4731 return 0;
4732 }
4733
4734 if (debug_threads)
4735 {
4736 /* Dump up to four bytes. */
4737 unsigned int val = * (unsigned int *) myaddr;
4738 if (len == 1)
4739 val = val & 0xff;
4740 else if (len == 2)
4741 val = val & 0xffff;
4742 else if (len == 3)
4743 val = val & 0xffffff;
4744 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4745 val, (long)memaddr);
4746 }
4747
4748 /* Fill start and end extra bytes of buffer with existing memory data. */
4749
4750 errno = 0;
4751 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4752 about coercing an 8 byte integer to a 4 byte pointer. */
4753 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4754 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4755 (PTRACE_TYPE_ARG4) 0);
4756 if (errno)
4757 return errno;
4758
4759 if (count > 1)
4760 {
4761 errno = 0;
4762 buffer[count - 1]
4763 = ptrace (PTRACE_PEEKTEXT, pid,
4764 /* Coerce to a uintptr_t first to avoid potential gcc warning
4765 about coercing an 8 byte integer to a 4 byte pointer. */
4766 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4767 * sizeof (PTRACE_XFER_TYPE)),
4768 (PTRACE_TYPE_ARG4) 0);
4769 if (errno)
4770 return errno;
4771 }
4772
4773 /* Copy data to be written over corresponding part of buffer. */
4774
4775 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4776 myaddr, len);
4777
4778 /* Write the entire buffer. */
4779
4780 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4781 {
4782 errno = 0;
4783 ptrace (PTRACE_POKETEXT, pid,
4784 /* Coerce to a uintptr_t first to avoid potential gcc warning
4785 about coercing an 8 byte integer to a 4 byte pointer. */
4786 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4787 (PTRACE_TYPE_ARG4) buffer[i]);
4788 if (errno)
4789 return errno;
4790 }
4791
4792 return 0;
4793 }
4794
4795 static void
4796 linux_look_up_symbols (void)
4797 {
4798 #ifdef USE_THREAD_DB
4799 struct process_info *proc = current_process ();
4800
4801 if (proc->private->thread_db != NULL)
4802 return;
4803
4804 /* If the kernel supports tracing clones, then we don't need to
4805 use the magic thread event breakpoint to learn about
4806 threads. */
4807 thread_db_init (!linux_supports_traceclone ());
4808 #endif
4809 }
4810
4811 static void
4812 linux_request_interrupt (void)
4813 {
4814 extern unsigned long signal_pid;
4815
4816 /* Send a SIGINT to the process group. This acts just like the user
4817 typed a ^C on the controlling terminal. */
4818 kill (-signal_pid, SIGINT);
4819 }
4820
4821 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4822 to debugger memory starting at MYADDR. */
4823
4824 static int
4825 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4826 {
4827 char filename[PATH_MAX];
4828 int fd, n;
4829 int pid = lwpid_of (current_thread);
4830
4831 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4832
4833 fd = open (filename, O_RDONLY);
4834 if (fd < 0)
4835 return -1;
4836
4837 if (offset != (CORE_ADDR) 0
4838 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4839 n = -1;
4840 else
4841 n = read (fd, myaddr, len);
4842
4843 close (fd);
4844
4845 return n;
4846 }
4847
4848 /* These breakpoint and watchpoint related wrapper functions simply
4849 pass on the function call if the target has registered a
4850 corresponding function. */
4851
4852 static int
4853 linux_supports_z_point_type (char z_type)
4854 {
4855 return (the_low_target.supports_z_point_type != NULL
4856 && the_low_target.supports_z_point_type (z_type));
4857 }
4858
4859 static int
4860 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
4861 int size, struct raw_breakpoint *bp)
4862 {
4863 if (the_low_target.insert_point != NULL)
4864 return the_low_target.insert_point (type, addr, size, bp);
4865 else
4866 /* Unsupported (see target.h). */
4867 return 1;
4868 }
4869
4870 static int
4871 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
4872 int size, struct raw_breakpoint *bp)
4873 {
4874 if (the_low_target.remove_point != NULL)
4875 return the_low_target.remove_point (type, addr, size, bp);
4876 else
4877 /* Unsupported (see target.h). */
4878 return 1;
4879 }
4880
4881 static int
4882 linux_stopped_by_watchpoint (void)
4883 {
4884 struct lwp_info *lwp = get_thread_lwp (current_thread);
4885
4886 return lwp->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
4887 }
4888
4889 static CORE_ADDR
4890 linux_stopped_data_address (void)
4891 {
4892 struct lwp_info *lwp = get_thread_lwp (current_thread);
4893
4894 return lwp->stopped_data_address;
4895 }
4896
4897 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4898 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4899 && defined(PT_TEXT_END_ADDR)
4900
4901 /* This is only used for targets that define PT_TEXT_ADDR,
4902 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4903 the target has different ways of acquiring this information, like
4904 loadmaps. */
4905
4906 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4907 to tell gdb about. */
4908
4909 static int
4910 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4911 {
4912 unsigned long text, text_end, data;
4913 int pid = lwpid_of (get_thread_lwp (current_thread));
4914
4915 errno = 0;
4916
4917 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4918 (PTRACE_TYPE_ARG4) 0);
4919 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4920 (PTRACE_TYPE_ARG4) 0);
4921 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4922 (PTRACE_TYPE_ARG4) 0);
4923
4924 if (errno == 0)
4925 {
4926 /* Both text and data offsets produced at compile-time (and so
4927 used by gdb) are relative to the beginning of the program,
4928 with the data segment immediately following the text segment.
4929 However, the actual runtime layout in memory may put the data
4930 somewhere else, so when we send gdb a data base-address, we
4931 use the real data base address and subtract the compile-time
4932 data base-address from it (which is just the length of the
4933 text segment). BSS immediately follows data in both
4934 cases. */
4935 *text_p = text;
4936 *data_p = data - (text_end - text);
4937
4938 return 1;
4939 }
4940 return 0;
4941 }
4942 #endif
4943
4944 static int
4945 linux_qxfer_osdata (const char *annex,
4946 unsigned char *readbuf, unsigned const char *writebuf,
4947 CORE_ADDR offset, int len)
4948 {
4949 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4950 }
4951
4952 /* Convert a native/host siginfo object, into/from the siginfo in the
4953 layout of the inferiors' architecture. */
4954
4955 static void
4956 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4957 {
4958 int done = 0;
4959
4960 if (the_low_target.siginfo_fixup != NULL)
4961 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4962
4963 /* If there was no callback, or the callback didn't do anything,
4964 then just do a straight memcpy. */
4965 if (!done)
4966 {
4967 if (direction == 1)
4968 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4969 else
4970 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4971 }
4972 }
4973
4974 static int
4975 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4976 unsigned const char *writebuf, CORE_ADDR offset, int len)
4977 {
4978 int pid;
4979 siginfo_t siginfo;
4980 char inf_siginfo[sizeof (siginfo_t)];
4981
4982 if (current_thread == NULL)
4983 return -1;
4984
4985 pid = lwpid_of (current_thread);
4986
4987 if (debug_threads)
4988 debug_printf ("%s siginfo for lwp %d.\n",
4989 readbuf != NULL ? "Reading" : "Writing",
4990 pid);
4991
4992 if (offset >= sizeof (siginfo))
4993 return -1;
4994
4995 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4996 return -1;
4997
4998 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4999 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5000 inferior with a 64-bit GDBSERVER should look the same as debugging it
5001 with a 32-bit GDBSERVER, we need to convert it. */
5002 siginfo_fixup (&siginfo, inf_siginfo, 0);
5003
5004 if (offset + len > sizeof (siginfo))
5005 len = sizeof (siginfo) - offset;
5006
5007 if (readbuf != NULL)
5008 memcpy (readbuf, inf_siginfo + offset, len);
5009 else
5010 {
5011 memcpy (inf_siginfo + offset, writebuf, len);
5012
5013 /* Convert back to ptrace layout before flushing it out. */
5014 siginfo_fixup (&siginfo, inf_siginfo, 1);
5015
5016 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5017 return -1;
5018 }
5019
5020 return len;
5021 }
5022
5023 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5024 so we notice when children change state; as the handler for the
5025 sigsuspend in my_waitpid. */
5026
5027 static void
5028 sigchld_handler (int signo)
5029 {
5030 int old_errno = errno;
5031
5032 if (debug_threads)
5033 {
5034 do
5035 {
5036 /* fprintf is not async-signal-safe, so call write
5037 directly. */
5038 if (write (2, "sigchld_handler\n",
5039 sizeof ("sigchld_handler\n") - 1) < 0)
5040 break; /* just ignore */
5041 } while (0);
5042 }
5043
5044 if (target_is_async_p ())
5045 async_file_mark (); /* trigger a linux_wait */
5046
5047 errno = old_errno;
5048 }
5049
5050 static int
5051 linux_supports_non_stop (void)
5052 {
5053 return 1;
5054 }
5055
5056 static int
5057 linux_async (int enable)
5058 {
5059 int previous = target_is_async_p ();
5060
5061 if (debug_threads)
5062 debug_printf ("linux_async (%d), previous=%d\n",
5063 enable, previous);
5064
5065 if (previous != enable)
5066 {
5067 sigset_t mask;
5068 sigemptyset (&mask);
5069 sigaddset (&mask, SIGCHLD);
5070
5071 sigprocmask (SIG_BLOCK, &mask, NULL);
5072
5073 if (enable)
5074 {
5075 if (pipe (linux_event_pipe) == -1)
5076 {
5077 linux_event_pipe[0] = -1;
5078 linux_event_pipe[1] = -1;
5079 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5080
5081 warning ("creating event pipe failed.");
5082 return previous;
5083 }
5084
5085 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5086 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5087
5088 /* Register the event loop handler. */
5089 add_file_handler (linux_event_pipe[0],
5090 handle_target_event, NULL);
5091
5092 /* Always trigger a linux_wait. */
5093 async_file_mark ();
5094 }
5095 else
5096 {
5097 delete_file_handler (linux_event_pipe[0]);
5098
5099 close (linux_event_pipe[0]);
5100 close (linux_event_pipe[1]);
5101 linux_event_pipe[0] = -1;
5102 linux_event_pipe[1] = -1;
5103 }
5104
5105 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5106 }
5107
5108 return previous;
5109 }
5110
5111 static int
5112 linux_start_non_stop (int nonstop)
5113 {
5114 /* Register or unregister from event-loop accordingly. */
5115 linux_async (nonstop);
5116
5117 if (target_is_async_p () != (nonstop != 0))
5118 return -1;
5119
5120 return 0;
5121 }
5122
5123 static int
5124 linux_supports_multi_process (void)
5125 {
5126 return 1;
5127 }
5128
5129 static int
5130 linux_supports_disable_randomization (void)
5131 {
5132 #ifdef HAVE_PERSONALITY
5133 return 1;
5134 #else
5135 return 0;
5136 #endif
5137 }
5138
5139 static int
5140 linux_supports_agent (void)
5141 {
5142 return 1;
5143 }
5144
5145 static int
5146 linux_supports_range_stepping (void)
5147 {
5148 if (*the_low_target.supports_range_stepping == NULL)
5149 return 0;
5150
5151 return (*the_low_target.supports_range_stepping) ();
5152 }
5153
5154 /* Enumerate spufs IDs for process PID. */
5155 static int
5156 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5157 {
5158 int pos = 0;
5159 int written = 0;
5160 char path[128];
5161 DIR *dir;
5162 struct dirent *entry;
5163
5164 sprintf (path, "/proc/%ld/fd", pid);
5165 dir = opendir (path);
5166 if (!dir)
5167 return -1;
5168
5169 rewinddir (dir);
5170 while ((entry = readdir (dir)) != NULL)
5171 {
5172 struct stat st;
5173 struct statfs stfs;
5174 int fd;
5175
5176 fd = atoi (entry->d_name);
5177 if (!fd)
5178 continue;
5179
5180 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5181 if (stat (path, &st) != 0)
5182 continue;
5183 if (!S_ISDIR (st.st_mode))
5184 continue;
5185
5186 if (statfs (path, &stfs) != 0)
5187 continue;
5188 if (stfs.f_type != SPUFS_MAGIC)
5189 continue;
5190
5191 if (pos >= offset && pos + 4 <= offset + len)
5192 {
5193 *(unsigned int *)(buf + pos - offset) = fd;
5194 written += 4;
5195 }
5196 pos += 4;
5197 }
5198
5199 closedir (dir);
5200 return written;
5201 }
5202
5203 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5204 object type, using the /proc file system. */
5205 static int
5206 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5207 unsigned const char *writebuf,
5208 CORE_ADDR offset, int len)
5209 {
5210 long pid = lwpid_of (current_thread);
5211 char buf[128];
5212 int fd = 0;
5213 int ret = 0;
5214
5215 if (!writebuf && !readbuf)
5216 return -1;
5217
5218 if (!*annex)
5219 {
5220 if (!readbuf)
5221 return -1;
5222 else
5223 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5224 }
5225
5226 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5227 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5228 if (fd <= 0)
5229 return -1;
5230
5231 if (offset != 0
5232 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5233 {
5234 close (fd);
5235 return 0;
5236 }
5237
5238 if (writebuf)
5239 ret = write (fd, writebuf, (size_t) len);
5240 else
5241 ret = read (fd, readbuf, (size_t) len);
5242
5243 close (fd);
5244 return ret;
5245 }
5246
5247 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5248 struct target_loadseg
5249 {
5250 /* Core address to which the segment is mapped. */
5251 Elf32_Addr addr;
5252 /* VMA recorded in the program header. */
5253 Elf32_Addr p_vaddr;
5254 /* Size of this segment in memory. */
5255 Elf32_Word p_memsz;
5256 };
5257
5258 # if defined PT_GETDSBT
5259 struct target_loadmap
5260 {
5261 /* Protocol version number, must be zero. */
5262 Elf32_Word version;
5263 /* Pointer to the DSBT table, its size, and the DSBT index. */
5264 unsigned *dsbt_table;
5265 unsigned dsbt_size, dsbt_index;
5266 /* Number of segments in this map. */
5267 Elf32_Word nsegs;
5268 /* The actual memory map. */
5269 struct target_loadseg segs[/*nsegs*/];
5270 };
5271 # define LINUX_LOADMAP PT_GETDSBT
5272 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5273 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5274 # else
5275 struct target_loadmap
5276 {
5277 /* Protocol version number, must be zero. */
5278 Elf32_Half version;
5279 /* Number of segments in this map. */
5280 Elf32_Half nsegs;
5281 /* The actual memory map. */
5282 struct target_loadseg segs[/*nsegs*/];
5283 };
5284 # define LINUX_LOADMAP PTRACE_GETFDPIC
5285 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5286 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5287 # endif
5288
5289 static int
5290 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5291 unsigned char *myaddr, unsigned int len)
5292 {
5293 int pid = lwpid_of (current_thread);
5294 int addr = -1;
5295 struct target_loadmap *data = NULL;
5296 unsigned int actual_length, copy_length;
5297
5298 if (strcmp (annex, "exec") == 0)
5299 addr = (int) LINUX_LOADMAP_EXEC;
5300 else if (strcmp (annex, "interp") == 0)
5301 addr = (int) LINUX_LOADMAP_INTERP;
5302 else
5303 return -1;
5304
5305 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5306 return -1;
5307
5308 if (data == NULL)
5309 return -1;
5310
5311 actual_length = sizeof (struct target_loadmap)
5312 + sizeof (struct target_loadseg) * data->nsegs;
5313
5314 if (offset < 0 || offset > actual_length)
5315 return -1;
5316
5317 copy_length = actual_length - offset < len ? actual_length - offset : len;
5318 memcpy (myaddr, (char *) data + offset, copy_length);
5319 return copy_length;
5320 }
5321 #else
5322 # define linux_read_loadmap NULL
5323 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5324
5325 static void
5326 linux_process_qsupported (const char *query)
5327 {
5328 if (the_low_target.process_qsupported != NULL)
5329 the_low_target.process_qsupported (query);
5330 }
5331
5332 static int
5333 linux_supports_tracepoints (void)
5334 {
5335 if (*the_low_target.supports_tracepoints == NULL)
5336 return 0;
5337
5338 return (*the_low_target.supports_tracepoints) ();
5339 }
5340
5341 static CORE_ADDR
5342 linux_read_pc (struct regcache *regcache)
5343 {
5344 if (the_low_target.get_pc == NULL)
5345 return 0;
5346
5347 return (*the_low_target.get_pc) (regcache);
5348 }
5349
5350 static void
5351 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5352 {
5353 gdb_assert (the_low_target.set_pc != NULL);
5354
5355 (*the_low_target.set_pc) (regcache, pc);
5356 }
5357
5358 static int
5359 linux_thread_stopped (struct thread_info *thread)
5360 {
5361 return get_thread_lwp (thread)->stopped;
5362 }
5363
5364 /* This exposes stop-all-threads functionality to other modules. */
5365
5366 static void
5367 linux_pause_all (int freeze)
5368 {
5369 stop_all_lwps (freeze, NULL);
5370 }
5371
5372 /* This exposes unstop-all-threads functionality to other gdbserver
5373 modules. */
5374
5375 static void
5376 linux_unpause_all (int unfreeze)
5377 {
5378 unstop_all_lwps (unfreeze, NULL);
5379 }
5380
5381 static int
5382 linux_prepare_to_access_memory (void)
5383 {
5384 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5385 running LWP. */
5386 if (non_stop)
5387 linux_pause_all (1);
5388 return 0;
5389 }
5390
5391 static void
5392 linux_done_accessing_memory (void)
5393 {
5394 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5395 running LWP. */
5396 if (non_stop)
5397 linux_unpause_all (1);
5398 }
5399
5400 static int
5401 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5402 CORE_ADDR collector,
5403 CORE_ADDR lockaddr,
5404 ULONGEST orig_size,
5405 CORE_ADDR *jump_entry,
5406 CORE_ADDR *trampoline,
5407 ULONGEST *trampoline_size,
5408 unsigned char *jjump_pad_insn,
5409 ULONGEST *jjump_pad_insn_size,
5410 CORE_ADDR *adjusted_insn_addr,
5411 CORE_ADDR *adjusted_insn_addr_end,
5412 char *err)
5413 {
5414 return (*the_low_target.install_fast_tracepoint_jump_pad)
5415 (tpoint, tpaddr, collector, lockaddr, orig_size,
5416 jump_entry, trampoline, trampoline_size,
5417 jjump_pad_insn, jjump_pad_insn_size,
5418 adjusted_insn_addr, adjusted_insn_addr_end,
5419 err);
5420 }
5421
5422 static struct emit_ops *
5423 linux_emit_ops (void)
5424 {
5425 if (the_low_target.emit_ops != NULL)
5426 return (*the_low_target.emit_ops) ();
5427 else
5428 return NULL;
5429 }
5430
5431 static int
5432 linux_get_min_fast_tracepoint_insn_len (void)
5433 {
5434 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5435 }
5436
5437 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5438
5439 static int
5440 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5441 CORE_ADDR *phdr_memaddr, int *num_phdr)
5442 {
5443 char filename[PATH_MAX];
5444 int fd;
5445 const int auxv_size = is_elf64
5446 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5447 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5448
5449 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5450
5451 fd = open (filename, O_RDONLY);
5452 if (fd < 0)
5453 return 1;
5454
5455 *phdr_memaddr = 0;
5456 *num_phdr = 0;
5457 while (read (fd, buf, auxv_size) == auxv_size
5458 && (*phdr_memaddr == 0 || *num_phdr == 0))
5459 {
5460 if (is_elf64)
5461 {
5462 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5463
5464 switch (aux->a_type)
5465 {
5466 case AT_PHDR:
5467 *phdr_memaddr = aux->a_un.a_val;
5468 break;
5469 case AT_PHNUM:
5470 *num_phdr = aux->a_un.a_val;
5471 break;
5472 }
5473 }
5474 else
5475 {
5476 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5477
5478 switch (aux->a_type)
5479 {
5480 case AT_PHDR:
5481 *phdr_memaddr = aux->a_un.a_val;
5482 break;
5483 case AT_PHNUM:
5484 *num_phdr = aux->a_un.a_val;
5485 break;
5486 }
5487 }
5488 }
5489
5490 close (fd);
5491
5492 if (*phdr_memaddr == 0 || *num_phdr == 0)
5493 {
5494 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5495 "phdr_memaddr = %ld, phdr_num = %d",
5496 (long) *phdr_memaddr, *num_phdr);
5497 return 2;
5498 }
5499
5500 return 0;
5501 }
5502
5503 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5504
5505 static CORE_ADDR
5506 get_dynamic (const int pid, const int is_elf64)
5507 {
5508 CORE_ADDR phdr_memaddr, relocation;
5509 int num_phdr, i;
5510 unsigned char *phdr_buf;
5511 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5512
5513 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5514 return 0;
5515
5516 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5517 phdr_buf = alloca (num_phdr * phdr_size);
5518
5519 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5520 return 0;
5521
5522 /* Compute relocation: it is expected to be 0 for "regular" executables,
5523 non-zero for PIE ones. */
5524 relocation = -1;
5525 for (i = 0; relocation == -1 && i < num_phdr; i++)
5526 if (is_elf64)
5527 {
5528 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5529
5530 if (p->p_type == PT_PHDR)
5531 relocation = phdr_memaddr - p->p_vaddr;
5532 }
5533 else
5534 {
5535 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5536
5537 if (p->p_type == PT_PHDR)
5538 relocation = phdr_memaddr - p->p_vaddr;
5539 }
5540
5541 if (relocation == -1)
5542 {
5543 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5544 any real world executables, including PIE executables, have always
5545 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5546 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5547 or present DT_DEBUG anyway (fpc binaries are statically linked).
5548
5549 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5550
5551 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5552
5553 return 0;
5554 }
5555
5556 for (i = 0; i < num_phdr; i++)
5557 {
5558 if (is_elf64)
5559 {
5560 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5561
5562 if (p->p_type == PT_DYNAMIC)
5563 return p->p_vaddr + relocation;
5564 }
5565 else
5566 {
5567 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5568
5569 if (p->p_type == PT_DYNAMIC)
5570 return p->p_vaddr + relocation;
5571 }
5572 }
5573
5574 return 0;
5575 }
5576
5577 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5578 can be 0 if the inferior does not yet have the library list initialized.
5579 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5580 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5581
5582 static CORE_ADDR
5583 get_r_debug (const int pid, const int is_elf64)
5584 {
5585 CORE_ADDR dynamic_memaddr;
5586 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5587 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5588 CORE_ADDR map = -1;
5589
5590 dynamic_memaddr = get_dynamic (pid, is_elf64);
5591 if (dynamic_memaddr == 0)
5592 return map;
5593
5594 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5595 {
5596 if (is_elf64)
5597 {
5598 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5599 #ifdef DT_MIPS_RLD_MAP
5600 union
5601 {
5602 Elf64_Xword map;
5603 unsigned char buf[sizeof (Elf64_Xword)];
5604 }
5605 rld_map;
5606
5607 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5608 {
5609 if (linux_read_memory (dyn->d_un.d_val,
5610 rld_map.buf, sizeof (rld_map.buf)) == 0)
5611 return rld_map.map;
5612 else
5613 break;
5614 }
5615 #endif /* DT_MIPS_RLD_MAP */
5616
5617 if (dyn->d_tag == DT_DEBUG && map == -1)
5618 map = dyn->d_un.d_val;
5619
5620 if (dyn->d_tag == DT_NULL)
5621 break;
5622 }
5623 else
5624 {
5625 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5626 #ifdef DT_MIPS_RLD_MAP
5627 union
5628 {
5629 Elf32_Word map;
5630 unsigned char buf[sizeof (Elf32_Word)];
5631 }
5632 rld_map;
5633
5634 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5635 {
5636 if (linux_read_memory (dyn->d_un.d_val,
5637 rld_map.buf, sizeof (rld_map.buf)) == 0)
5638 return rld_map.map;
5639 else
5640 break;
5641 }
5642 #endif /* DT_MIPS_RLD_MAP */
5643
5644 if (dyn->d_tag == DT_DEBUG && map == -1)
5645 map = dyn->d_un.d_val;
5646
5647 if (dyn->d_tag == DT_NULL)
5648 break;
5649 }
5650
5651 dynamic_memaddr += dyn_size;
5652 }
5653
5654 return map;
5655 }
5656
5657 /* Read one pointer from MEMADDR in the inferior. */
5658
5659 static int
5660 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5661 {
5662 int ret;
5663
5664 /* Go through a union so this works on either big or little endian
5665 hosts, when the inferior's pointer size is smaller than the size
5666 of CORE_ADDR. It is assumed the inferior's endianness is the
5667 same of the superior's. */
5668 union
5669 {
5670 CORE_ADDR core_addr;
5671 unsigned int ui;
5672 unsigned char uc;
5673 } addr;
5674
5675 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5676 if (ret == 0)
5677 {
5678 if (ptr_size == sizeof (CORE_ADDR))
5679 *ptr = addr.core_addr;
5680 else if (ptr_size == sizeof (unsigned int))
5681 *ptr = addr.ui;
5682 else
5683 gdb_assert_not_reached ("unhandled pointer size");
5684 }
5685 return ret;
5686 }
5687
5688 struct link_map_offsets
5689 {
5690 /* Offset and size of r_debug.r_version. */
5691 int r_version_offset;
5692
5693 /* Offset and size of r_debug.r_map. */
5694 int r_map_offset;
5695
5696 /* Offset to l_addr field in struct link_map. */
5697 int l_addr_offset;
5698
5699 /* Offset to l_name field in struct link_map. */
5700 int l_name_offset;
5701
5702 /* Offset to l_ld field in struct link_map. */
5703 int l_ld_offset;
5704
5705 /* Offset to l_next field in struct link_map. */
5706 int l_next_offset;
5707
5708 /* Offset to l_prev field in struct link_map. */
5709 int l_prev_offset;
5710 };
5711
5712 /* Construct qXfer:libraries-svr4:read reply. */
5713
5714 static int
5715 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5716 unsigned const char *writebuf,
5717 CORE_ADDR offset, int len)
5718 {
5719 char *document;
5720 unsigned document_len;
5721 struct process_info_private *const priv = current_process ()->private;
5722 char filename[PATH_MAX];
5723 int pid, is_elf64;
5724
5725 static const struct link_map_offsets lmo_32bit_offsets =
5726 {
5727 0, /* r_version offset. */
5728 4, /* r_debug.r_map offset. */
5729 0, /* l_addr offset in link_map. */
5730 4, /* l_name offset in link_map. */
5731 8, /* l_ld offset in link_map. */
5732 12, /* l_next offset in link_map. */
5733 16 /* l_prev offset in link_map. */
5734 };
5735
5736 static const struct link_map_offsets lmo_64bit_offsets =
5737 {
5738 0, /* r_version offset. */
5739 8, /* r_debug.r_map offset. */
5740 0, /* l_addr offset in link_map. */
5741 8, /* l_name offset in link_map. */
5742 16, /* l_ld offset in link_map. */
5743 24, /* l_next offset in link_map. */
5744 32 /* l_prev offset in link_map. */
5745 };
5746 const struct link_map_offsets *lmo;
5747 unsigned int machine;
5748 int ptr_size;
5749 CORE_ADDR lm_addr = 0, lm_prev = 0;
5750 int allocated = 1024;
5751 char *p;
5752 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5753 int header_done = 0;
5754
5755 if (writebuf != NULL)
5756 return -2;
5757 if (readbuf == NULL)
5758 return -1;
5759
5760 pid = lwpid_of (current_thread);
5761 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5762 is_elf64 = elf_64_file_p (filename, &machine);
5763 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5764 ptr_size = is_elf64 ? 8 : 4;
5765
5766 while (annex[0] != '\0')
5767 {
5768 const char *sep;
5769 CORE_ADDR *addrp;
5770 int len;
5771
5772 sep = strchr (annex, '=');
5773 if (sep == NULL)
5774 break;
5775
5776 len = sep - annex;
5777 if (len == 5 && strncmp (annex, "start", 5) == 0)
5778 addrp = &lm_addr;
5779 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5780 addrp = &lm_prev;
5781 else
5782 {
5783 annex = strchr (sep, ';');
5784 if (annex == NULL)
5785 break;
5786 annex++;
5787 continue;
5788 }
5789
5790 annex = decode_address_to_semicolon (addrp, sep + 1);
5791 }
5792
5793 if (lm_addr == 0)
5794 {
5795 int r_version = 0;
5796
5797 if (priv->r_debug == 0)
5798 priv->r_debug = get_r_debug (pid, is_elf64);
5799
5800 /* We failed to find DT_DEBUG. Such situation will not change
5801 for this inferior - do not retry it. Report it to GDB as
5802 E01, see for the reasons at the GDB solib-svr4.c side. */
5803 if (priv->r_debug == (CORE_ADDR) -1)
5804 return -1;
5805
5806 if (priv->r_debug != 0)
5807 {
5808 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5809 (unsigned char *) &r_version,
5810 sizeof (r_version)) != 0
5811 || r_version != 1)
5812 {
5813 warning ("unexpected r_debug version %d", r_version);
5814 }
5815 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5816 &lm_addr, ptr_size) != 0)
5817 {
5818 warning ("unable to read r_map from 0x%lx",
5819 (long) priv->r_debug + lmo->r_map_offset);
5820 }
5821 }
5822 }
5823
5824 document = xmalloc (allocated);
5825 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5826 p = document + strlen (document);
5827
5828 while (lm_addr
5829 && read_one_ptr (lm_addr + lmo->l_name_offset,
5830 &l_name, ptr_size) == 0
5831 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5832 &l_addr, ptr_size) == 0
5833 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5834 &l_ld, ptr_size) == 0
5835 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5836 &l_prev, ptr_size) == 0
5837 && read_one_ptr (lm_addr + lmo->l_next_offset,
5838 &l_next, ptr_size) == 0)
5839 {
5840 unsigned char libname[PATH_MAX];
5841
5842 if (lm_prev != l_prev)
5843 {
5844 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5845 (long) lm_prev, (long) l_prev);
5846 break;
5847 }
5848
5849 /* Ignore the first entry even if it has valid name as the first entry
5850 corresponds to the main executable. The first entry should not be
5851 skipped if the dynamic loader was loaded late by a static executable
5852 (see solib-svr4.c parameter ignore_first). But in such case the main
5853 executable does not have PT_DYNAMIC present and this function already
5854 exited above due to failed get_r_debug. */
5855 if (lm_prev == 0)
5856 {
5857 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5858 p = p + strlen (p);
5859 }
5860 else
5861 {
5862 /* Not checking for error because reading may stop before
5863 we've got PATH_MAX worth of characters. */
5864 libname[0] = '\0';
5865 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5866 libname[sizeof (libname) - 1] = '\0';
5867 if (libname[0] != '\0')
5868 {
5869 /* 6x the size for xml_escape_text below. */
5870 size_t len = 6 * strlen ((char *) libname);
5871 char *name;
5872
5873 if (!header_done)
5874 {
5875 /* Terminate `<library-list-svr4'. */
5876 *p++ = '>';
5877 header_done = 1;
5878 }
5879
5880 while (allocated < p - document + len + 200)
5881 {
5882 /* Expand to guarantee sufficient storage. */
5883 uintptr_t document_len = p - document;
5884
5885 document = xrealloc (document, 2 * allocated);
5886 allocated *= 2;
5887 p = document + document_len;
5888 }
5889
5890 name = xml_escape_text ((char *) libname);
5891 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5892 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5893 name, (unsigned long) lm_addr,
5894 (unsigned long) l_addr, (unsigned long) l_ld);
5895 free (name);
5896 }
5897 }
5898
5899 lm_prev = lm_addr;
5900 lm_addr = l_next;
5901 }
5902
5903 if (!header_done)
5904 {
5905 /* Empty list; terminate `<library-list-svr4'. */
5906 strcpy (p, "/>");
5907 }
5908 else
5909 strcpy (p, "</library-list-svr4>");
5910
5911 document_len = strlen (document);
5912 if (offset < document_len)
5913 document_len -= offset;
5914 else
5915 document_len = 0;
5916 if (len > document_len)
5917 len = document_len;
5918
5919 memcpy (readbuf, document + offset, len);
5920 xfree (document);
5921
5922 return len;
5923 }
5924
5925 #ifdef HAVE_LINUX_BTRACE
5926
5927 /* See to_enable_btrace target method. */
5928
5929 static struct btrace_target_info *
5930 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
5931 {
5932 struct btrace_target_info *tinfo;
5933
5934 tinfo = linux_enable_btrace (ptid, conf);
5935
5936 if (tinfo != NULL)
5937 {
5938 struct thread_info *thread = find_thread_ptid (ptid);
5939 struct regcache *regcache = get_thread_regcache (thread, 0);
5940
5941 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5942 }
5943
5944 return tinfo;
5945 }
5946
5947 /* See to_disable_btrace target method. */
5948
5949 static int
5950 linux_low_disable_btrace (struct btrace_target_info *tinfo)
5951 {
5952 enum btrace_error err;
5953
5954 err = linux_disable_btrace (tinfo);
5955 return (err == BTRACE_ERR_NONE ? 0 : -1);
5956 }
5957
5958 /* See to_read_btrace target method. */
5959
5960 static int
5961 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5962 int type)
5963 {
5964 struct btrace_data btrace;
5965 struct btrace_block *block;
5966 enum btrace_error err;
5967 int i;
5968
5969 btrace_data_init (&btrace);
5970
5971 err = linux_read_btrace (&btrace, tinfo, type);
5972 if (err != BTRACE_ERR_NONE)
5973 {
5974 if (err == BTRACE_ERR_OVERFLOW)
5975 buffer_grow_str0 (buffer, "E.Overflow.");
5976 else
5977 buffer_grow_str0 (buffer, "E.Generic Error.");
5978
5979 btrace_data_fini (&btrace);
5980 return -1;
5981 }
5982
5983 switch (btrace.format)
5984 {
5985 case BTRACE_FORMAT_NONE:
5986 buffer_grow_str0 (buffer, "E.No Trace.");
5987 break;
5988
5989 case BTRACE_FORMAT_BTS:
5990 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5991 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
5992
5993 for (i = 0;
5994 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
5995 i++)
5996 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
5997 paddress (block->begin), paddress (block->end));
5998
5999 buffer_grow_str0 (buffer, "</btrace>\n");
6000 break;
6001
6002 default:
6003 buffer_grow_str0 (buffer, "E.Unknown Trace Format.");
6004
6005 btrace_data_fini (&btrace);
6006 return -1;
6007 }
6008
6009 btrace_data_fini (&btrace);
6010 return 0;
6011 }
6012
6013 /* See to_btrace_conf target method. */
6014
6015 static int
6016 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6017 struct buffer *buffer)
6018 {
6019 const struct btrace_config *conf;
6020
6021 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6022 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6023
6024 conf = linux_btrace_conf (tinfo);
6025 if (conf != NULL)
6026 {
6027 switch (conf->format)
6028 {
6029 case BTRACE_FORMAT_NONE:
6030 break;
6031
6032 case BTRACE_FORMAT_BTS:
6033 buffer_xml_printf (buffer, "<bts");
6034 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6035 buffer_xml_printf (buffer, " />\n");
6036 break;
6037 }
6038 }
6039
6040 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6041 return 0;
6042 }
6043 #endif /* HAVE_LINUX_BTRACE */
6044
6045 static struct target_ops linux_target_ops = {
6046 linux_create_inferior,
6047 linux_attach,
6048 linux_kill,
6049 linux_detach,
6050 linux_mourn,
6051 linux_join,
6052 linux_thread_alive,
6053 linux_resume,
6054 linux_wait,
6055 linux_fetch_registers,
6056 linux_store_registers,
6057 linux_prepare_to_access_memory,
6058 linux_done_accessing_memory,
6059 linux_read_memory,
6060 linux_write_memory,
6061 linux_look_up_symbols,
6062 linux_request_interrupt,
6063 linux_read_auxv,
6064 linux_supports_z_point_type,
6065 linux_insert_point,
6066 linux_remove_point,
6067 linux_stopped_by_watchpoint,
6068 linux_stopped_data_address,
6069 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6070 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6071 && defined(PT_TEXT_END_ADDR)
6072 linux_read_offsets,
6073 #else
6074 NULL,
6075 #endif
6076 #ifdef USE_THREAD_DB
6077 thread_db_get_tls_address,
6078 #else
6079 NULL,
6080 #endif
6081 linux_qxfer_spu,
6082 hostio_last_error_from_errno,
6083 linux_qxfer_osdata,
6084 linux_xfer_siginfo,
6085 linux_supports_non_stop,
6086 linux_async,
6087 linux_start_non_stop,
6088 linux_supports_multi_process,
6089 #ifdef USE_THREAD_DB
6090 thread_db_handle_monitor_command,
6091 #else
6092 NULL,
6093 #endif
6094 linux_common_core_of_thread,
6095 linux_read_loadmap,
6096 linux_process_qsupported,
6097 linux_supports_tracepoints,
6098 linux_read_pc,
6099 linux_write_pc,
6100 linux_thread_stopped,
6101 NULL,
6102 linux_pause_all,
6103 linux_unpause_all,
6104 linux_stabilize_threads,
6105 linux_install_fast_tracepoint_jump_pad,
6106 linux_emit_ops,
6107 linux_supports_disable_randomization,
6108 linux_get_min_fast_tracepoint_insn_len,
6109 linux_qxfer_libraries_svr4,
6110 linux_supports_agent,
6111 #ifdef HAVE_LINUX_BTRACE
6112 linux_supports_btrace,
6113 linux_low_enable_btrace,
6114 linux_low_disable_btrace,
6115 linux_low_read_btrace,
6116 linux_low_btrace_conf,
6117 #else
6118 NULL,
6119 NULL,
6120 NULL,
6121 NULL,
6122 NULL,
6123 #endif
6124 linux_supports_range_stepping,
6125 };
6126
6127 static void
6128 linux_init_signals ()
6129 {
6130 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6131 to find what the cancel signal actually is. */
6132 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6133 signal (__SIGRTMIN+1, SIG_IGN);
6134 #endif
6135 }
6136
6137 #ifdef HAVE_LINUX_REGSETS
6138 void
6139 initialize_regsets_info (struct regsets_info *info)
6140 {
6141 for (info->num_regsets = 0;
6142 info->regsets[info->num_regsets].size >= 0;
6143 info->num_regsets++)
6144 ;
6145 }
6146 #endif
6147
6148 void
6149 initialize_low (void)
6150 {
6151 struct sigaction sigchld_action;
6152 memset (&sigchld_action, 0, sizeof (sigchld_action));
6153 set_target_ops (&linux_target_ops);
6154 set_breakpoint_data (the_low_target.breakpoint,
6155 the_low_target.breakpoint_len);
6156 linux_init_signals ();
6157 linux_ptrace_init_warnings ();
6158
6159 sigchld_action.sa_handler = sigchld_handler;
6160 sigemptyset (&sigchld_action.sa_mask);
6161 sigchld_action.sa_flags = SA_RESTART;
6162 sigaction (SIGCHLD, &sigchld_action, NULL);
6163
6164 initialize_low_arch ();
6165 }
This page took 0.404683 seconds and 4 git commands to generate.