New common function "startswith"
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #include <sys/ptrace.h>
28 #include "nat/linux-ptrace.h"
29 #include "nat/linux-procfs.h"
30 #include "nat/linux-personality.h"
31 #include <signal.h>
32 #include <sys/ioctl.h>
33 #include <fcntl.h>
34 #include <unistd.h>
35 #include <sys/syscall.h>
36 #include <sched.h>
37 #include <ctype.h>
38 #include <pwd.h>
39 #include <sys/types.h>
40 #include <dirent.h>
41 #include <sys/stat.h>
42 #include <sys/vfs.h>
43 #include <sys/uio.h>
44 #include "filestuff.h"
45 #include "tracepoint.h"
46 #include "hostio.h"
47 #ifndef ELFMAG0
48 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
49 then ELFMAG0 will have been defined. If it didn't get included by
50 gdb_proc_service.h then including it will likely introduce a duplicate
51 definition of elf_fpregset_t. */
52 #include <elf.h>
53 #endif
54
55 #ifndef SPUFS_MAGIC
56 #define SPUFS_MAGIC 0x23c9b64e
57 #endif
58
59 #ifdef HAVE_PERSONALITY
60 # include <sys/personality.h>
61 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
62 # define ADDR_NO_RANDOMIZE 0x0040000
63 # endif
64 #endif
65
66 #ifndef O_LARGEFILE
67 #define O_LARGEFILE 0
68 #endif
69
70 #ifndef W_STOPCODE
71 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
72 #endif
73
74 /* This is the kernel's hard limit. Not to be confused with
75 SIGRTMIN. */
76 #ifndef __SIGRTMIN
77 #define __SIGRTMIN 32
78 #endif
79
80 /* Some targets did not define these ptrace constants from the start,
81 so gdbserver defines them locally here. In the future, these may
82 be removed after they are added to asm/ptrace.h. */
83 #if !(defined(PT_TEXT_ADDR) \
84 || defined(PT_DATA_ADDR) \
85 || defined(PT_TEXT_END_ADDR))
86 #if defined(__mcoldfire__)
87 /* These are still undefined in 3.10 kernels. */
88 #define PT_TEXT_ADDR 49*4
89 #define PT_DATA_ADDR 50*4
90 #define PT_TEXT_END_ADDR 51*4
91 /* BFIN already defines these since at least 2.6.32 kernels. */
92 #elif defined(BFIN)
93 #define PT_TEXT_ADDR 220
94 #define PT_TEXT_END_ADDR 224
95 #define PT_DATA_ADDR 228
96 /* These are still undefined in 3.10 kernels. */
97 #elif defined(__TMS320C6X__)
98 #define PT_TEXT_ADDR (0x10000*4)
99 #define PT_DATA_ADDR (0x10004*4)
100 #define PT_TEXT_END_ADDR (0x10008*4)
101 #endif
102 #endif
103
104 #ifdef HAVE_LINUX_BTRACE
105 # include "nat/linux-btrace.h"
106 # include "btrace-common.h"
107 #endif
108
109 #ifndef HAVE_ELF32_AUXV_T
110 /* Copied from glibc's elf.h. */
111 typedef struct
112 {
113 uint32_t a_type; /* Entry type */
114 union
115 {
116 uint32_t a_val; /* Integer value */
117 /* We use to have pointer elements added here. We cannot do that,
118 though, since it does not work when using 32-bit definitions
119 on 64-bit platforms and vice versa. */
120 } a_un;
121 } Elf32_auxv_t;
122 #endif
123
124 #ifndef HAVE_ELF64_AUXV_T
125 /* Copied from glibc's elf.h. */
126 typedef struct
127 {
128 uint64_t a_type; /* Entry type */
129 union
130 {
131 uint64_t a_val; /* Integer value */
132 /* We use to have pointer elements added here. We cannot do that,
133 though, since it does not work when using 32-bit definitions
134 on 64-bit platforms and vice versa. */
135 } a_un;
136 } Elf64_auxv_t;
137 #endif
138
139 /* A list of all unknown processes which receive stop signals. Some
140 other process will presumably claim each of these as forked
141 children momentarily. */
142
143 struct simple_pid_list
144 {
145 /* The process ID. */
146 int pid;
147
148 /* The status as reported by waitpid. */
149 int status;
150
151 /* Next in chain. */
152 struct simple_pid_list *next;
153 };
154 struct simple_pid_list *stopped_pids;
155
156 /* Trivial list manipulation functions to keep track of a list of new
157 stopped processes. */
158
159 static void
160 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
161 {
162 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
163
164 new_pid->pid = pid;
165 new_pid->status = status;
166 new_pid->next = *listp;
167 *listp = new_pid;
168 }
169
170 static int
171 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
172 {
173 struct simple_pid_list **p;
174
175 for (p = listp; *p != NULL; p = &(*p)->next)
176 if ((*p)->pid == pid)
177 {
178 struct simple_pid_list *next = (*p)->next;
179
180 *statusp = (*p)->status;
181 xfree (*p);
182 *p = next;
183 return 1;
184 }
185 return 0;
186 }
187
188 enum stopping_threads_kind
189 {
190 /* Not stopping threads presently. */
191 NOT_STOPPING_THREADS,
192
193 /* Stopping threads. */
194 STOPPING_THREADS,
195
196 /* Stopping and suspending threads. */
197 STOPPING_AND_SUSPENDING_THREADS
198 };
199
200 /* This is set while stop_all_lwps is in effect. */
201 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
202
203 /* FIXME make into a target method? */
204 int using_threads = 1;
205
206 /* True if we're presently stabilizing threads (moving them out of
207 jump pads). */
208 static int stabilizing_threads;
209
210 static void linux_resume_one_lwp (struct lwp_info *lwp,
211 int step, int signal, siginfo_t *info);
212 static void linux_resume (struct thread_resume *resume_info, size_t n);
213 static void stop_all_lwps (int suspend, struct lwp_info *except);
214 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
215 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
216 int *wstat, int options);
217 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
218 static struct lwp_info *add_lwp (ptid_t ptid);
219 static int linux_stopped_by_watchpoint (void);
220 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
221 static void proceed_all_lwps (void);
222 static int finish_step_over (struct lwp_info *lwp);
223 static int kill_lwp (unsigned long lwpid, int signo);
224
225 /* When the event-loop is doing a step-over, this points at the thread
226 being stepped. */
227 ptid_t step_over_bkpt;
228
229 /* True if the low target can hardware single-step. Such targets
230 don't need a BREAKPOINT_REINSERT_ADDR callback. */
231
232 static int
233 can_hardware_single_step (void)
234 {
235 return (the_low_target.breakpoint_reinsert_addr == NULL);
236 }
237
238 /* True if the low target supports memory breakpoints. If so, we'll
239 have a GET_PC implementation. */
240
241 static int
242 supports_breakpoints (void)
243 {
244 return (the_low_target.get_pc != NULL);
245 }
246
247 /* Returns true if this target can support fast tracepoints. This
248 does not mean that the in-process agent has been loaded in the
249 inferior. */
250
251 static int
252 supports_fast_tracepoints (void)
253 {
254 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
255 }
256
257 /* True if LWP is stopped in its stepping range. */
258
259 static int
260 lwp_in_step_range (struct lwp_info *lwp)
261 {
262 CORE_ADDR pc = lwp->stop_pc;
263
264 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
265 }
266
267 struct pending_signals
268 {
269 int signal;
270 siginfo_t info;
271 struct pending_signals *prev;
272 };
273
274 /* The read/write ends of the pipe registered as waitable file in the
275 event loop. */
276 static int linux_event_pipe[2] = { -1, -1 };
277
278 /* True if we're currently in async mode. */
279 #define target_is_async_p() (linux_event_pipe[0] != -1)
280
281 static void send_sigstop (struct lwp_info *lwp);
282 static void wait_for_sigstop (void);
283
284 /* Return non-zero if HEADER is a 64-bit ELF file. */
285
286 static int
287 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
288 {
289 if (header->e_ident[EI_MAG0] == ELFMAG0
290 && header->e_ident[EI_MAG1] == ELFMAG1
291 && header->e_ident[EI_MAG2] == ELFMAG2
292 && header->e_ident[EI_MAG3] == ELFMAG3)
293 {
294 *machine = header->e_machine;
295 return header->e_ident[EI_CLASS] == ELFCLASS64;
296
297 }
298 *machine = EM_NONE;
299 return -1;
300 }
301
302 /* Return non-zero if FILE is a 64-bit ELF file,
303 zero if the file is not a 64-bit ELF file,
304 and -1 if the file is not accessible or doesn't exist. */
305
306 static int
307 elf_64_file_p (const char *file, unsigned int *machine)
308 {
309 Elf64_Ehdr header;
310 int fd;
311
312 fd = open (file, O_RDONLY);
313 if (fd < 0)
314 return -1;
315
316 if (read (fd, &header, sizeof (header)) != sizeof (header))
317 {
318 close (fd);
319 return 0;
320 }
321 close (fd);
322
323 return elf_64_header_p (&header, machine);
324 }
325
326 /* Accepts an integer PID; Returns true if the executable PID is
327 running is a 64-bit ELF file.. */
328
329 int
330 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
331 {
332 char file[PATH_MAX];
333
334 sprintf (file, "/proc/%d/exe", pid);
335 return elf_64_file_p (file, machine);
336 }
337
338 static void
339 delete_lwp (struct lwp_info *lwp)
340 {
341 struct thread_info *thr = get_lwp_thread (lwp);
342
343 if (debug_threads)
344 debug_printf ("deleting %ld\n", lwpid_of (thr));
345
346 remove_thread (thr);
347 free (lwp->arch_private);
348 free (lwp);
349 }
350
351 /* Add a process to the common process list, and set its private
352 data. */
353
354 static struct process_info *
355 linux_add_process (int pid, int attached)
356 {
357 struct process_info *proc;
358
359 proc = add_process (pid, attached);
360 proc->priv = xcalloc (1, sizeof (*proc->priv));
361
362 /* Set the arch when the first LWP stops. */
363 proc->priv->new_inferior = 1;
364
365 if (the_low_target.new_process != NULL)
366 proc->priv->arch_private = the_low_target.new_process ();
367
368 return proc;
369 }
370
371 static CORE_ADDR get_pc (struct lwp_info *lwp);
372
373 /* Handle a GNU/Linux extended wait response. If we see a clone
374 event, we need to add the new LWP to our list (and not report the
375 trap to higher layers). */
376
377 static void
378 handle_extended_wait (struct lwp_info *event_child, int wstat)
379 {
380 int event = linux_ptrace_get_extended_event (wstat);
381 struct thread_info *event_thr = get_lwp_thread (event_child);
382 struct lwp_info *new_lwp;
383
384 if (event == PTRACE_EVENT_CLONE)
385 {
386 ptid_t ptid;
387 unsigned long new_pid;
388 int ret, status;
389
390 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
391 &new_pid);
392
393 /* If we haven't already seen the new PID stop, wait for it now. */
394 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
395 {
396 /* The new child has a pending SIGSTOP. We can't affect it until it
397 hits the SIGSTOP, but we're already attached. */
398
399 ret = my_waitpid (new_pid, &status, __WALL);
400
401 if (ret == -1)
402 perror_with_name ("waiting for new child");
403 else if (ret != new_pid)
404 warning ("wait returned unexpected PID %d", ret);
405 else if (!WIFSTOPPED (status))
406 warning ("wait returned unexpected status 0x%x", status);
407 }
408
409 if (debug_threads)
410 debug_printf ("HEW: Got clone event "
411 "from LWP %ld, new child is LWP %ld\n",
412 lwpid_of (event_thr), new_pid);
413
414 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
415 new_lwp = add_lwp (ptid);
416
417 /* Either we're going to immediately resume the new thread
418 or leave it stopped. linux_resume_one_lwp is a nop if it
419 thinks the thread is currently running, so set this first
420 before calling linux_resume_one_lwp. */
421 new_lwp->stopped = 1;
422
423 /* If we're suspending all threads, leave this one suspended
424 too. */
425 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
426 new_lwp->suspended = 1;
427
428 /* Normally we will get the pending SIGSTOP. But in some cases
429 we might get another signal delivered to the group first.
430 If we do get another signal, be sure not to lose it. */
431 if (WSTOPSIG (status) != SIGSTOP)
432 {
433 new_lwp->stop_expected = 1;
434 new_lwp->status_pending_p = 1;
435 new_lwp->status_pending = status;
436 }
437 }
438 }
439
440 /* Return the PC as read from the regcache of LWP, without any
441 adjustment. */
442
443 static CORE_ADDR
444 get_pc (struct lwp_info *lwp)
445 {
446 struct thread_info *saved_thread;
447 struct regcache *regcache;
448 CORE_ADDR pc;
449
450 if (the_low_target.get_pc == NULL)
451 return 0;
452
453 saved_thread = current_thread;
454 current_thread = get_lwp_thread (lwp);
455
456 regcache = get_thread_regcache (current_thread, 1);
457 pc = (*the_low_target.get_pc) (regcache);
458
459 if (debug_threads)
460 debug_printf ("pc is 0x%lx\n", (long) pc);
461
462 current_thread = saved_thread;
463 return pc;
464 }
465
466 /* This function should only be called if LWP got a SIGTRAP.
467 The SIGTRAP could mean several things.
468
469 On i386, where decr_pc_after_break is non-zero:
470
471 If we were single-stepping this process using PTRACE_SINGLESTEP, we
472 will get only the one SIGTRAP. The value of $eip will be the next
473 instruction. If the instruction we stepped over was a breakpoint,
474 we need to decrement the PC.
475
476 If we continue the process using PTRACE_CONT, we will get a
477 SIGTRAP when we hit a breakpoint. The value of $eip will be
478 the instruction after the breakpoint (i.e. needs to be
479 decremented). If we report the SIGTRAP to GDB, we must also
480 report the undecremented PC. If the breakpoint is removed, we
481 must resume at the decremented PC.
482
483 On a non-decr_pc_after_break machine with hardware or kernel
484 single-step:
485
486 If we either single-step a breakpoint instruction, or continue and
487 hit a breakpoint instruction, our PC will point at the breakpoint
488 instruction. */
489
490 static int
491 check_stopped_by_breakpoint (struct lwp_info *lwp)
492 {
493 CORE_ADDR pc;
494 CORE_ADDR sw_breakpoint_pc;
495 struct thread_info *saved_thread;
496 #if USE_SIGTRAP_SIGINFO
497 siginfo_t siginfo;
498 #endif
499
500 if (the_low_target.get_pc == NULL)
501 return 0;
502
503 pc = get_pc (lwp);
504 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
505
506 /* breakpoint_at reads from the current thread. */
507 saved_thread = current_thread;
508 current_thread = get_lwp_thread (lwp);
509
510 #if USE_SIGTRAP_SIGINFO
511 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
512 (PTRACE_TYPE_ARG3) 0, &siginfo) == 0)
513 {
514 if (siginfo.si_signo == SIGTRAP)
515 {
516 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
517 {
518 if (debug_threads)
519 {
520 struct thread_info *thr = get_lwp_thread (lwp);
521
522 debug_printf ("CSBB: Push back software breakpoint for %s\n",
523 target_pid_to_str (ptid_of (thr)));
524 }
525
526 /* Back up the PC if necessary. */
527 if (pc != sw_breakpoint_pc)
528 {
529 struct regcache *regcache
530 = get_thread_regcache (current_thread, 1);
531 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
532 }
533
534 lwp->stop_pc = sw_breakpoint_pc;
535 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
536 current_thread = saved_thread;
537 return 1;
538 }
539 else if (siginfo.si_code == TRAP_HWBKPT)
540 {
541 if (debug_threads)
542 {
543 struct thread_info *thr = get_lwp_thread (lwp);
544
545 debug_printf ("CSBB: Push back hardware "
546 "breakpoint/watchpoint for %s\n",
547 target_pid_to_str (ptid_of (thr)));
548 }
549
550 lwp->stop_pc = pc;
551 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
552 current_thread = saved_thread;
553 return 1;
554 }
555 }
556 }
557 #else
558 /* We may have just stepped a breakpoint instruction. E.g., in
559 non-stop mode, GDB first tells the thread A to step a range, and
560 then the user inserts a breakpoint inside the range. In that
561 case we need to report the breakpoint PC. */
562 if ((!lwp->stepping || lwp->stop_pc == sw_breakpoint_pc)
563 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
564 {
565 if (debug_threads)
566 {
567 struct thread_info *thr = get_lwp_thread (lwp);
568
569 debug_printf ("CSBB: %s stopped by software breakpoint\n",
570 target_pid_to_str (ptid_of (thr)));
571 }
572
573 /* Back up the PC if necessary. */
574 if (pc != sw_breakpoint_pc)
575 {
576 struct regcache *regcache
577 = get_thread_regcache (current_thread, 1);
578 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
579 }
580
581 lwp->stop_pc = sw_breakpoint_pc;
582 lwp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
583 current_thread = saved_thread;
584 return 1;
585 }
586
587 if (hardware_breakpoint_inserted_here (pc))
588 {
589 if (debug_threads)
590 {
591 struct thread_info *thr = get_lwp_thread (lwp);
592
593 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
594 target_pid_to_str (ptid_of (thr)));
595 }
596
597 lwp->stop_pc = pc;
598 lwp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
599 current_thread = saved_thread;
600 return 1;
601 }
602 #endif
603
604 current_thread = saved_thread;
605 return 0;
606 }
607
608 static struct lwp_info *
609 add_lwp (ptid_t ptid)
610 {
611 struct lwp_info *lwp;
612
613 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
614 memset (lwp, 0, sizeof (*lwp));
615
616 if (the_low_target.new_thread != NULL)
617 lwp->arch_private = the_low_target.new_thread ();
618
619 lwp->thread = add_thread (ptid, lwp);
620
621 return lwp;
622 }
623
624 /* Start an inferior process and returns its pid.
625 ALLARGS is a vector of program-name and args. */
626
627 static int
628 linux_create_inferior (char *program, char **allargs)
629 {
630 struct lwp_info *new_lwp;
631 int pid;
632 ptid_t ptid;
633 struct cleanup *restore_personality
634 = maybe_disable_address_space_randomization (disable_randomization);
635
636 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
637 pid = vfork ();
638 #else
639 pid = fork ();
640 #endif
641 if (pid < 0)
642 perror_with_name ("fork");
643
644 if (pid == 0)
645 {
646 close_most_fds ();
647 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
648
649 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
650 signal (__SIGRTMIN + 1, SIG_DFL);
651 #endif
652
653 setpgid (0, 0);
654
655 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
656 stdout to stderr so that inferior i/o doesn't corrupt the connection.
657 Also, redirect stdin to /dev/null. */
658 if (remote_connection_is_stdio ())
659 {
660 close (0);
661 open ("/dev/null", O_RDONLY);
662 dup2 (2, 1);
663 if (write (2, "stdin/stdout redirected\n",
664 sizeof ("stdin/stdout redirected\n") - 1) < 0)
665 {
666 /* Errors ignored. */;
667 }
668 }
669
670 execv (program, allargs);
671 if (errno == ENOENT)
672 execvp (program, allargs);
673
674 fprintf (stderr, "Cannot exec %s: %s.\n", program,
675 strerror (errno));
676 fflush (stderr);
677 _exit (0177);
678 }
679
680 do_cleanups (restore_personality);
681
682 linux_add_process (pid, 0);
683
684 ptid = ptid_build (pid, pid, 0);
685 new_lwp = add_lwp (ptid);
686 new_lwp->must_set_ptrace_flags = 1;
687
688 return pid;
689 }
690
691 /* Attach to an inferior process. Returns 0 on success, ERRNO on
692 error. */
693
694 int
695 linux_attach_lwp (ptid_t ptid)
696 {
697 struct lwp_info *new_lwp;
698 int lwpid = ptid_get_lwp (ptid);
699
700 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
701 != 0)
702 return errno;
703
704 new_lwp = add_lwp (ptid);
705
706 /* We need to wait for SIGSTOP before being able to make the next
707 ptrace call on this LWP. */
708 new_lwp->must_set_ptrace_flags = 1;
709
710 if (linux_proc_pid_is_stopped (lwpid))
711 {
712 if (debug_threads)
713 debug_printf ("Attached to a stopped process\n");
714
715 /* The process is definitely stopped. It is in a job control
716 stop, unless the kernel predates the TASK_STOPPED /
717 TASK_TRACED distinction, in which case it might be in a
718 ptrace stop. Make sure it is in a ptrace stop; from there we
719 can kill it, signal it, et cetera.
720
721 First make sure there is a pending SIGSTOP. Since we are
722 already attached, the process can not transition from stopped
723 to running without a PTRACE_CONT; so we know this signal will
724 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
725 probably already in the queue (unless this kernel is old
726 enough to use TASK_STOPPED for ptrace stops); but since
727 SIGSTOP is not an RT signal, it can only be queued once. */
728 kill_lwp (lwpid, SIGSTOP);
729
730 /* Finally, resume the stopped process. This will deliver the
731 SIGSTOP (or a higher priority signal, just like normal
732 PTRACE_ATTACH), which we'll catch later on. */
733 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
734 }
735
736 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
737 brings it to a halt.
738
739 There are several cases to consider here:
740
741 1) gdbserver has already attached to the process and is being notified
742 of a new thread that is being created.
743 In this case we should ignore that SIGSTOP and resume the
744 process. This is handled below by setting stop_expected = 1,
745 and the fact that add_thread sets last_resume_kind ==
746 resume_continue.
747
748 2) This is the first thread (the process thread), and we're attaching
749 to it via attach_inferior.
750 In this case we want the process thread to stop.
751 This is handled by having linux_attach set last_resume_kind ==
752 resume_stop after we return.
753
754 If the pid we are attaching to is also the tgid, we attach to and
755 stop all the existing threads. Otherwise, we attach to pid and
756 ignore any other threads in the same group as this pid.
757
758 3) GDB is connecting to gdbserver and is requesting an enumeration of all
759 existing threads.
760 In this case we want the thread to stop.
761 FIXME: This case is currently not properly handled.
762 We should wait for the SIGSTOP but don't. Things work apparently
763 because enough time passes between when we ptrace (ATTACH) and when
764 gdb makes the next ptrace call on the thread.
765
766 On the other hand, if we are currently trying to stop all threads, we
767 should treat the new thread as if we had sent it a SIGSTOP. This works
768 because we are guaranteed that the add_lwp call above added us to the
769 end of the list, and so the new thread has not yet reached
770 wait_for_sigstop (but will). */
771 new_lwp->stop_expected = 1;
772
773 return 0;
774 }
775
776 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
777 already attached. Returns true if a new LWP is found, false
778 otherwise. */
779
780 static int
781 attach_proc_task_lwp_callback (ptid_t ptid)
782 {
783 /* Is this a new thread? */
784 if (find_thread_ptid (ptid) == NULL)
785 {
786 int lwpid = ptid_get_lwp (ptid);
787 int err;
788
789 if (debug_threads)
790 debug_printf ("Found new lwp %d\n", lwpid);
791
792 err = linux_attach_lwp (ptid);
793
794 /* Be quiet if we simply raced with the thread exiting. EPERM
795 is returned if the thread's task still exists, and is marked
796 as exited or zombie, as well as other conditions, so in that
797 case, confirm the status in /proc/PID/status. */
798 if (err == ESRCH
799 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
800 {
801 if (debug_threads)
802 {
803 debug_printf ("Cannot attach to lwp %d: "
804 "thread is gone (%d: %s)\n",
805 lwpid, err, strerror (err));
806 }
807 }
808 else if (err != 0)
809 {
810 warning (_("Cannot attach to lwp %d: %s"),
811 lwpid,
812 linux_ptrace_attach_fail_reason_string (ptid, err));
813 }
814
815 return 1;
816 }
817 return 0;
818 }
819
820 /* Attach to PID. If PID is the tgid, attach to it and all
821 of its threads. */
822
823 static int
824 linux_attach (unsigned long pid)
825 {
826 ptid_t ptid = ptid_build (pid, pid, 0);
827 int err;
828
829 /* Attach to PID. We will check for other threads
830 soon. */
831 err = linux_attach_lwp (ptid);
832 if (err != 0)
833 error ("Cannot attach to process %ld: %s",
834 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
835
836 linux_add_process (pid, 1);
837
838 if (!non_stop)
839 {
840 struct thread_info *thread;
841
842 /* Don't ignore the initial SIGSTOP if we just attached to this
843 process. It will be collected by wait shortly. */
844 thread = find_thread_ptid (ptid_build (pid, pid, 0));
845 thread->last_resume_kind = resume_stop;
846 }
847
848 /* We must attach to every LWP. If /proc is mounted, use that to
849 find them now. On the one hand, the inferior may be using raw
850 clone instead of using pthreads. On the other hand, even if it
851 is using pthreads, GDB may not be connected yet (thread_db needs
852 to do symbol lookups, through qSymbol). Also, thread_db walks
853 structures in the inferior's address space to find the list of
854 threads/LWPs, and those structures may well be corrupted. Note
855 that once thread_db is loaded, we'll still use it to list threads
856 and associate pthread info with each LWP. */
857 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
858 return 0;
859 }
860
861 struct counter
862 {
863 int pid;
864 int count;
865 };
866
867 static int
868 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
869 {
870 struct counter *counter = args;
871
872 if (ptid_get_pid (entry->id) == counter->pid)
873 {
874 if (++counter->count > 1)
875 return 1;
876 }
877
878 return 0;
879 }
880
881 static int
882 last_thread_of_process_p (int pid)
883 {
884 struct counter counter = { pid , 0 };
885
886 return (find_inferior (&all_threads,
887 second_thread_of_pid_p, &counter) == NULL);
888 }
889
890 /* Kill LWP. */
891
892 static void
893 linux_kill_one_lwp (struct lwp_info *lwp)
894 {
895 struct thread_info *thr = get_lwp_thread (lwp);
896 int pid = lwpid_of (thr);
897
898 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
899 there is no signal context, and ptrace(PTRACE_KILL) (or
900 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
901 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
902 alternative is to kill with SIGKILL. We only need one SIGKILL
903 per process, not one for each thread. But since we still support
904 linuxthreads, and we also support debugging programs using raw
905 clone without CLONE_THREAD, we send one for each thread. For
906 years, we used PTRACE_KILL only, so we're being a bit paranoid
907 about some old kernels where PTRACE_KILL might work better
908 (dubious if there are any such, but that's why it's paranoia), so
909 we try SIGKILL first, PTRACE_KILL second, and so we're fine
910 everywhere. */
911
912 errno = 0;
913 kill_lwp (pid, SIGKILL);
914 if (debug_threads)
915 {
916 int save_errno = errno;
917
918 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
919 target_pid_to_str (ptid_of (thr)),
920 save_errno ? strerror (save_errno) : "OK");
921 }
922
923 errno = 0;
924 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
925 if (debug_threads)
926 {
927 int save_errno = errno;
928
929 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
930 target_pid_to_str (ptid_of (thr)),
931 save_errno ? strerror (save_errno) : "OK");
932 }
933 }
934
935 /* Kill LWP and wait for it to die. */
936
937 static void
938 kill_wait_lwp (struct lwp_info *lwp)
939 {
940 struct thread_info *thr = get_lwp_thread (lwp);
941 int pid = ptid_get_pid (ptid_of (thr));
942 int lwpid = ptid_get_lwp (ptid_of (thr));
943 int wstat;
944 int res;
945
946 if (debug_threads)
947 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
948
949 do
950 {
951 linux_kill_one_lwp (lwp);
952
953 /* Make sure it died. Notes:
954
955 - The loop is most likely unnecessary.
956
957 - We don't use linux_wait_for_event as that could delete lwps
958 while we're iterating over them. We're not interested in
959 any pending status at this point, only in making sure all
960 wait status on the kernel side are collected until the
961 process is reaped.
962
963 - We don't use __WALL here as the __WALL emulation relies on
964 SIGCHLD, and killing a stopped process doesn't generate
965 one, nor an exit status.
966 */
967 res = my_waitpid (lwpid, &wstat, 0);
968 if (res == -1 && errno == ECHILD)
969 res = my_waitpid (lwpid, &wstat, __WCLONE);
970 } while (res > 0 && WIFSTOPPED (wstat));
971
972 gdb_assert (res > 0);
973 }
974
975 /* Callback for `find_inferior'. Kills an lwp of a given process,
976 except the leader. */
977
978 static int
979 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
980 {
981 struct thread_info *thread = (struct thread_info *) entry;
982 struct lwp_info *lwp = get_thread_lwp (thread);
983 int pid = * (int *) args;
984
985 if (ptid_get_pid (entry->id) != pid)
986 return 0;
987
988 /* We avoid killing the first thread here, because of a Linux kernel (at
989 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
990 the children get a chance to be reaped, it will remain a zombie
991 forever. */
992
993 if (lwpid_of (thread) == pid)
994 {
995 if (debug_threads)
996 debug_printf ("lkop: is last of process %s\n",
997 target_pid_to_str (entry->id));
998 return 0;
999 }
1000
1001 kill_wait_lwp (lwp);
1002 return 0;
1003 }
1004
1005 static int
1006 linux_kill (int pid)
1007 {
1008 struct process_info *process;
1009 struct lwp_info *lwp;
1010
1011 process = find_process_pid (pid);
1012 if (process == NULL)
1013 return -1;
1014
1015 /* If we're killing a running inferior, make sure it is stopped
1016 first, as PTRACE_KILL will not work otherwise. */
1017 stop_all_lwps (0, NULL);
1018
1019 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
1020
1021 /* See the comment in linux_kill_one_lwp. We did not kill the first
1022 thread in the list, so do so now. */
1023 lwp = find_lwp_pid (pid_to_ptid (pid));
1024
1025 if (lwp == NULL)
1026 {
1027 if (debug_threads)
1028 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
1029 pid);
1030 }
1031 else
1032 kill_wait_lwp (lwp);
1033
1034 the_target->mourn (process);
1035
1036 /* Since we presently can only stop all lwps of all processes, we
1037 need to unstop lwps of other processes. */
1038 unstop_all_lwps (0, NULL);
1039 return 0;
1040 }
1041
1042 /* Get pending signal of THREAD, for detaching purposes. This is the
1043 signal the thread last stopped for, which we need to deliver to the
1044 thread when detaching, otherwise, it'd be suppressed/lost. */
1045
1046 static int
1047 get_detach_signal (struct thread_info *thread)
1048 {
1049 enum gdb_signal signo = GDB_SIGNAL_0;
1050 int status;
1051 struct lwp_info *lp = get_thread_lwp (thread);
1052
1053 if (lp->status_pending_p)
1054 status = lp->status_pending;
1055 else
1056 {
1057 /* If the thread had been suspended by gdbserver, and it stopped
1058 cleanly, then it'll have stopped with SIGSTOP. But we don't
1059 want to deliver that SIGSTOP. */
1060 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1061 || thread->last_status.value.sig == GDB_SIGNAL_0)
1062 return 0;
1063
1064 /* Otherwise, we may need to deliver the signal we
1065 intercepted. */
1066 status = lp->last_status;
1067 }
1068
1069 if (!WIFSTOPPED (status))
1070 {
1071 if (debug_threads)
1072 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1073 target_pid_to_str (ptid_of (thread)));
1074 return 0;
1075 }
1076
1077 /* Extended wait statuses aren't real SIGTRAPs. */
1078 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1079 {
1080 if (debug_threads)
1081 debug_printf ("GPS: lwp %s had stopped with extended "
1082 "status: no pending signal\n",
1083 target_pid_to_str (ptid_of (thread)));
1084 return 0;
1085 }
1086
1087 signo = gdb_signal_from_host (WSTOPSIG (status));
1088
1089 if (program_signals_p && !program_signals[signo])
1090 {
1091 if (debug_threads)
1092 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1093 target_pid_to_str (ptid_of (thread)),
1094 gdb_signal_to_string (signo));
1095 return 0;
1096 }
1097 else if (!program_signals_p
1098 /* If we have no way to know which signals GDB does not
1099 want to have passed to the program, assume
1100 SIGTRAP/SIGINT, which is GDB's default. */
1101 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1102 {
1103 if (debug_threads)
1104 debug_printf ("GPS: lwp %s had signal %s, "
1105 "but we don't know if we should pass it. "
1106 "Default to not.\n",
1107 target_pid_to_str (ptid_of (thread)),
1108 gdb_signal_to_string (signo));
1109 return 0;
1110 }
1111 else
1112 {
1113 if (debug_threads)
1114 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1115 target_pid_to_str (ptid_of (thread)),
1116 gdb_signal_to_string (signo));
1117
1118 return WSTOPSIG (status);
1119 }
1120 }
1121
1122 static int
1123 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1124 {
1125 struct thread_info *thread = (struct thread_info *) entry;
1126 struct lwp_info *lwp = get_thread_lwp (thread);
1127 int pid = * (int *) args;
1128 int sig;
1129
1130 if (ptid_get_pid (entry->id) != pid)
1131 return 0;
1132
1133 /* If there is a pending SIGSTOP, get rid of it. */
1134 if (lwp->stop_expected)
1135 {
1136 if (debug_threads)
1137 debug_printf ("Sending SIGCONT to %s\n",
1138 target_pid_to_str (ptid_of (thread)));
1139
1140 kill_lwp (lwpid_of (thread), SIGCONT);
1141 lwp->stop_expected = 0;
1142 }
1143
1144 /* Flush any pending changes to the process's registers. */
1145 regcache_invalidate_thread (thread);
1146
1147 /* Pass on any pending signal for this thread. */
1148 sig = get_detach_signal (thread);
1149
1150 /* Finally, let it resume. */
1151 if (the_low_target.prepare_to_resume != NULL)
1152 the_low_target.prepare_to_resume (lwp);
1153 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1154 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1155 error (_("Can't detach %s: %s"),
1156 target_pid_to_str (ptid_of (thread)),
1157 strerror (errno));
1158
1159 delete_lwp (lwp);
1160 return 0;
1161 }
1162
1163 static int
1164 linux_detach (int pid)
1165 {
1166 struct process_info *process;
1167
1168 process = find_process_pid (pid);
1169 if (process == NULL)
1170 return -1;
1171
1172 /* Stop all threads before detaching. First, ptrace requires that
1173 the thread is stopped to sucessfully detach. Second, thread_db
1174 may need to uninstall thread event breakpoints from memory, which
1175 only works with a stopped process anyway. */
1176 stop_all_lwps (0, NULL);
1177
1178 #ifdef USE_THREAD_DB
1179 thread_db_detach (process);
1180 #endif
1181
1182 /* Stabilize threads (move out of jump pads). */
1183 stabilize_threads ();
1184
1185 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1186
1187 the_target->mourn (process);
1188
1189 /* Since we presently can only stop all lwps of all processes, we
1190 need to unstop lwps of other processes. */
1191 unstop_all_lwps (0, NULL);
1192 return 0;
1193 }
1194
1195 /* Remove all LWPs that belong to process PROC from the lwp list. */
1196
1197 static int
1198 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1199 {
1200 struct thread_info *thread = (struct thread_info *) entry;
1201 struct lwp_info *lwp = get_thread_lwp (thread);
1202 struct process_info *process = proc;
1203
1204 if (pid_of (thread) == pid_of (process))
1205 delete_lwp (lwp);
1206
1207 return 0;
1208 }
1209
1210 static void
1211 linux_mourn (struct process_info *process)
1212 {
1213 struct process_info_private *priv;
1214
1215 #ifdef USE_THREAD_DB
1216 thread_db_mourn (process);
1217 #endif
1218
1219 find_inferior (&all_threads, delete_lwp_callback, process);
1220
1221 /* Freeing all private data. */
1222 priv = process->priv;
1223 free (priv->arch_private);
1224 free (priv);
1225 process->priv = NULL;
1226
1227 remove_process (process);
1228 }
1229
1230 static void
1231 linux_join (int pid)
1232 {
1233 int status, ret;
1234
1235 do {
1236 ret = my_waitpid (pid, &status, 0);
1237 if (WIFEXITED (status) || WIFSIGNALED (status))
1238 break;
1239 } while (ret != -1 || errno != ECHILD);
1240 }
1241
1242 /* Return nonzero if the given thread is still alive. */
1243 static int
1244 linux_thread_alive (ptid_t ptid)
1245 {
1246 struct lwp_info *lwp = find_lwp_pid (ptid);
1247
1248 /* We assume we always know if a thread exits. If a whole process
1249 exited but we still haven't been able to report it to GDB, we'll
1250 hold on to the last lwp of the dead process. */
1251 if (lwp != NULL)
1252 return !lwp->dead;
1253 else
1254 return 0;
1255 }
1256
1257 /* Return 1 if this lwp still has an interesting status pending. If
1258 not (e.g., it had stopped for a breakpoint that is gone), return
1259 false. */
1260
1261 static int
1262 thread_still_has_status_pending_p (struct thread_info *thread)
1263 {
1264 struct lwp_info *lp = get_thread_lwp (thread);
1265
1266 if (!lp->status_pending_p)
1267 return 0;
1268
1269 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1270 report any status pending the LWP may have. */
1271 if (thread->last_resume_kind == resume_stop
1272 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1273 return 0;
1274
1275 if (thread->last_resume_kind != resume_stop
1276 && (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1277 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1278 {
1279 struct thread_info *saved_thread;
1280 CORE_ADDR pc;
1281 int discard = 0;
1282
1283 gdb_assert (lp->last_status != 0);
1284
1285 pc = get_pc (lp);
1286
1287 saved_thread = current_thread;
1288 current_thread = thread;
1289
1290 if (pc != lp->stop_pc)
1291 {
1292 if (debug_threads)
1293 debug_printf ("PC of %ld changed\n",
1294 lwpid_of (thread));
1295 discard = 1;
1296 }
1297
1298 #if !USE_SIGTRAP_SIGINFO
1299 else if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
1300 && !(*the_low_target.breakpoint_at) (pc))
1301 {
1302 if (debug_threads)
1303 debug_printf ("previous SW breakpoint of %ld gone\n",
1304 lwpid_of (thread));
1305 discard = 1;
1306 }
1307 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT
1308 && !hardware_breakpoint_inserted_here (pc))
1309 {
1310 if (debug_threads)
1311 debug_printf ("previous HW breakpoint of %ld gone\n",
1312 lwpid_of (thread));
1313 discard = 1;
1314 }
1315 #endif
1316
1317 current_thread = saved_thread;
1318
1319 if (discard)
1320 {
1321 if (debug_threads)
1322 debug_printf ("discarding pending breakpoint status\n");
1323 lp->status_pending_p = 0;
1324 return 0;
1325 }
1326 }
1327
1328 return 1;
1329 }
1330
1331 /* Return 1 if this lwp has an interesting status pending. */
1332 static int
1333 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1334 {
1335 struct thread_info *thread = (struct thread_info *) entry;
1336 struct lwp_info *lp = get_thread_lwp (thread);
1337 ptid_t ptid = * (ptid_t *) arg;
1338
1339 /* Check if we're only interested in events from a specific process
1340 or a specific LWP. */
1341 if (!ptid_match (ptid_of (thread), ptid))
1342 return 0;
1343
1344 if (lp->status_pending_p
1345 && !thread_still_has_status_pending_p (thread))
1346 {
1347 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1348 return 0;
1349 }
1350
1351 return lp->status_pending_p;
1352 }
1353
1354 static int
1355 same_lwp (struct inferior_list_entry *entry, void *data)
1356 {
1357 ptid_t ptid = *(ptid_t *) data;
1358 int lwp;
1359
1360 if (ptid_get_lwp (ptid) != 0)
1361 lwp = ptid_get_lwp (ptid);
1362 else
1363 lwp = ptid_get_pid (ptid);
1364
1365 if (ptid_get_lwp (entry->id) == lwp)
1366 return 1;
1367
1368 return 0;
1369 }
1370
1371 struct lwp_info *
1372 find_lwp_pid (ptid_t ptid)
1373 {
1374 struct inferior_list_entry *thread
1375 = find_inferior (&all_threads, same_lwp, &ptid);
1376
1377 if (thread == NULL)
1378 return NULL;
1379
1380 return get_thread_lwp ((struct thread_info *) thread);
1381 }
1382
1383 /* Return the number of known LWPs in the tgid given by PID. */
1384
1385 static int
1386 num_lwps (int pid)
1387 {
1388 struct inferior_list_entry *inf, *tmp;
1389 int count = 0;
1390
1391 ALL_INFERIORS (&all_threads, inf, tmp)
1392 {
1393 if (ptid_get_pid (inf->id) == pid)
1394 count++;
1395 }
1396
1397 return count;
1398 }
1399
1400 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1401 their exits until all other threads in the group have exited. */
1402
1403 static void
1404 check_zombie_leaders (void)
1405 {
1406 struct process_info *proc, *tmp;
1407
1408 ALL_PROCESSES (proc, tmp)
1409 {
1410 pid_t leader_pid = pid_of (proc);
1411 struct lwp_info *leader_lp;
1412
1413 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1414
1415 if (debug_threads)
1416 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1417 "num_lwps=%d, zombie=%d\n",
1418 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1419 linux_proc_pid_is_zombie (leader_pid));
1420
1421 if (leader_lp != NULL
1422 /* Check if there are other threads in the group, as we may
1423 have raced with the inferior simply exiting. */
1424 && !last_thread_of_process_p (leader_pid)
1425 && linux_proc_pid_is_zombie (leader_pid))
1426 {
1427 /* A leader zombie can mean one of two things:
1428
1429 - It exited, and there's an exit status pending
1430 available, or only the leader exited (not the whole
1431 program). In the latter case, we can't waitpid the
1432 leader's exit status until all other threads are gone.
1433
1434 - There are 3 or more threads in the group, and a thread
1435 other than the leader exec'd. On an exec, the Linux
1436 kernel destroys all other threads (except the execing
1437 one) in the thread group, and resets the execing thread's
1438 tid to the tgid. No exit notification is sent for the
1439 execing thread -- from the ptracer's perspective, it
1440 appears as though the execing thread just vanishes.
1441 Until we reap all other threads except the leader and the
1442 execing thread, the leader will be zombie, and the
1443 execing thread will be in `D (disc sleep)'. As soon as
1444 all other threads are reaped, the execing thread changes
1445 it's tid to the tgid, and the previous (zombie) leader
1446 vanishes, giving place to the "new" leader. We could try
1447 distinguishing the exit and exec cases, by waiting once
1448 more, and seeing if something comes out, but it doesn't
1449 sound useful. The previous leader _does_ go away, and
1450 we'll re-add the new one once we see the exec event
1451 (which is just the same as what would happen if the
1452 previous leader did exit voluntarily before some other
1453 thread execs). */
1454
1455 if (debug_threads)
1456 fprintf (stderr,
1457 "CZL: Thread group leader %d zombie "
1458 "(it exited, or another thread execd).\n",
1459 leader_pid);
1460
1461 delete_lwp (leader_lp);
1462 }
1463 }
1464 }
1465
1466 /* Callback for `find_inferior'. Returns the first LWP that is not
1467 stopped. ARG is a PTID filter. */
1468
1469 static int
1470 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1471 {
1472 struct thread_info *thr = (struct thread_info *) entry;
1473 struct lwp_info *lwp;
1474 ptid_t filter = *(ptid_t *) arg;
1475
1476 if (!ptid_match (ptid_of (thr), filter))
1477 return 0;
1478
1479 lwp = get_thread_lwp (thr);
1480 if (!lwp->stopped)
1481 return 1;
1482
1483 return 0;
1484 }
1485
1486 /* This function should only be called if the LWP got a SIGTRAP.
1487
1488 Handle any tracepoint steps or hits. Return true if a tracepoint
1489 event was handled, 0 otherwise. */
1490
1491 static int
1492 handle_tracepoints (struct lwp_info *lwp)
1493 {
1494 struct thread_info *tinfo = get_lwp_thread (lwp);
1495 int tpoint_related_event = 0;
1496
1497 gdb_assert (lwp->suspended == 0);
1498
1499 /* If this tracepoint hit causes a tracing stop, we'll immediately
1500 uninsert tracepoints. To do this, we temporarily pause all
1501 threads, unpatch away, and then unpause threads. We need to make
1502 sure the unpausing doesn't resume LWP too. */
1503 lwp->suspended++;
1504
1505 /* And we need to be sure that any all-threads-stopping doesn't try
1506 to move threads out of the jump pads, as it could deadlock the
1507 inferior (LWP could be in the jump pad, maybe even holding the
1508 lock.) */
1509
1510 /* Do any necessary step collect actions. */
1511 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1512
1513 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1514
1515 /* See if we just hit a tracepoint and do its main collect
1516 actions. */
1517 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1518
1519 lwp->suspended--;
1520
1521 gdb_assert (lwp->suspended == 0);
1522 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1523
1524 if (tpoint_related_event)
1525 {
1526 if (debug_threads)
1527 debug_printf ("got a tracepoint event\n");
1528 return 1;
1529 }
1530
1531 return 0;
1532 }
1533
1534 /* Convenience wrapper. Returns true if LWP is presently collecting a
1535 fast tracepoint. */
1536
1537 static int
1538 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1539 struct fast_tpoint_collect_status *status)
1540 {
1541 CORE_ADDR thread_area;
1542 struct thread_info *thread = get_lwp_thread (lwp);
1543
1544 if (the_low_target.get_thread_area == NULL)
1545 return 0;
1546
1547 /* Get the thread area address. This is used to recognize which
1548 thread is which when tracing with the in-process agent library.
1549 We don't read anything from the address, and treat it as opaque;
1550 it's the address itself that we assume is unique per-thread. */
1551 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1552 return 0;
1553
1554 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1555 }
1556
1557 /* The reason we resume in the caller, is because we want to be able
1558 to pass lwp->status_pending as WSTAT, and we need to clear
1559 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1560 refuses to resume. */
1561
1562 static int
1563 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1564 {
1565 struct thread_info *saved_thread;
1566
1567 saved_thread = current_thread;
1568 current_thread = get_lwp_thread (lwp);
1569
1570 if ((wstat == NULL
1571 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1572 && supports_fast_tracepoints ()
1573 && agent_loaded_p ())
1574 {
1575 struct fast_tpoint_collect_status status;
1576 int r;
1577
1578 if (debug_threads)
1579 debug_printf ("Checking whether LWP %ld needs to move out of the "
1580 "jump pad.\n",
1581 lwpid_of (current_thread));
1582
1583 r = linux_fast_tracepoint_collecting (lwp, &status);
1584
1585 if (wstat == NULL
1586 || (WSTOPSIG (*wstat) != SIGILL
1587 && WSTOPSIG (*wstat) != SIGFPE
1588 && WSTOPSIG (*wstat) != SIGSEGV
1589 && WSTOPSIG (*wstat) != SIGBUS))
1590 {
1591 lwp->collecting_fast_tracepoint = r;
1592
1593 if (r != 0)
1594 {
1595 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1596 {
1597 /* Haven't executed the original instruction yet.
1598 Set breakpoint there, and wait till it's hit,
1599 then single-step until exiting the jump pad. */
1600 lwp->exit_jump_pad_bkpt
1601 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1602 }
1603
1604 if (debug_threads)
1605 debug_printf ("Checking whether LWP %ld needs to move out of "
1606 "the jump pad...it does\n",
1607 lwpid_of (current_thread));
1608 current_thread = saved_thread;
1609
1610 return 1;
1611 }
1612 }
1613 else
1614 {
1615 /* If we get a synchronous signal while collecting, *and*
1616 while executing the (relocated) original instruction,
1617 reset the PC to point at the tpoint address, before
1618 reporting to GDB. Otherwise, it's an IPA lib bug: just
1619 report the signal to GDB, and pray for the best. */
1620
1621 lwp->collecting_fast_tracepoint = 0;
1622
1623 if (r != 0
1624 && (status.adjusted_insn_addr <= lwp->stop_pc
1625 && lwp->stop_pc < status.adjusted_insn_addr_end))
1626 {
1627 siginfo_t info;
1628 struct regcache *regcache;
1629
1630 /* The si_addr on a few signals references the address
1631 of the faulting instruction. Adjust that as
1632 well. */
1633 if ((WSTOPSIG (*wstat) == SIGILL
1634 || WSTOPSIG (*wstat) == SIGFPE
1635 || WSTOPSIG (*wstat) == SIGBUS
1636 || WSTOPSIG (*wstat) == SIGSEGV)
1637 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1638 (PTRACE_TYPE_ARG3) 0, &info) == 0
1639 /* Final check just to make sure we don't clobber
1640 the siginfo of non-kernel-sent signals. */
1641 && (uintptr_t) info.si_addr == lwp->stop_pc)
1642 {
1643 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1644 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1645 (PTRACE_TYPE_ARG3) 0, &info);
1646 }
1647
1648 regcache = get_thread_regcache (current_thread, 1);
1649 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1650 lwp->stop_pc = status.tpoint_addr;
1651
1652 /* Cancel any fast tracepoint lock this thread was
1653 holding. */
1654 force_unlock_trace_buffer ();
1655 }
1656
1657 if (lwp->exit_jump_pad_bkpt != NULL)
1658 {
1659 if (debug_threads)
1660 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1661 "stopping all threads momentarily.\n");
1662
1663 stop_all_lwps (1, lwp);
1664
1665 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1666 lwp->exit_jump_pad_bkpt = NULL;
1667
1668 unstop_all_lwps (1, lwp);
1669
1670 gdb_assert (lwp->suspended >= 0);
1671 }
1672 }
1673 }
1674
1675 if (debug_threads)
1676 debug_printf ("Checking whether LWP %ld needs to move out of the "
1677 "jump pad...no\n",
1678 lwpid_of (current_thread));
1679
1680 current_thread = saved_thread;
1681 return 0;
1682 }
1683
1684 /* Enqueue one signal in the "signals to report later when out of the
1685 jump pad" list. */
1686
1687 static void
1688 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1689 {
1690 struct pending_signals *p_sig;
1691 struct thread_info *thread = get_lwp_thread (lwp);
1692
1693 if (debug_threads)
1694 debug_printf ("Deferring signal %d for LWP %ld.\n",
1695 WSTOPSIG (*wstat), lwpid_of (thread));
1696
1697 if (debug_threads)
1698 {
1699 struct pending_signals *sig;
1700
1701 for (sig = lwp->pending_signals_to_report;
1702 sig != NULL;
1703 sig = sig->prev)
1704 debug_printf (" Already queued %d\n",
1705 sig->signal);
1706
1707 debug_printf (" (no more currently queued signals)\n");
1708 }
1709
1710 /* Don't enqueue non-RT signals if they are already in the deferred
1711 queue. (SIGSTOP being the easiest signal to see ending up here
1712 twice) */
1713 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1714 {
1715 struct pending_signals *sig;
1716
1717 for (sig = lwp->pending_signals_to_report;
1718 sig != NULL;
1719 sig = sig->prev)
1720 {
1721 if (sig->signal == WSTOPSIG (*wstat))
1722 {
1723 if (debug_threads)
1724 debug_printf ("Not requeuing already queued non-RT signal %d"
1725 " for LWP %ld\n",
1726 sig->signal,
1727 lwpid_of (thread));
1728 return;
1729 }
1730 }
1731 }
1732
1733 p_sig = xmalloc (sizeof (*p_sig));
1734 p_sig->prev = lwp->pending_signals_to_report;
1735 p_sig->signal = WSTOPSIG (*wstat);
1736 memset (&p_sig->info, 0, sizeof (siginfo_t));
1737 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1738 &p_sig->info);
1739
1740 lwp->pending_signals_to_report = p_sig;
1741 }
1742
1743 /* Dequeue one signal from the "signals to report later when out of
1744 the jump pad" list. */
1745
1746 static int
1747 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1748 {
1749 struct thread_info *thread = get_lwp_thread (lwp);
1750
1751 if (lwp->pending_signals_to_report != NULL)
1752 {
1753 struct pending_signals **p_sig;
1754
1755 p_sig = &lwp->pending_signals_to_report;
1756 while ((*p_sig)->prev != NULL)
1757 p_sig = &(*p_sig)->prev;
1758
1759 *wstat = W_STOPCODE ((*p_sig)->signal);
1760 if ((*p_sig)->info.si_signo != 0)
1761 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1762 &(*p_sig)->info);
1763 free (*p_sig);
1764 *p_sig = NULL;
1765
1766 if (debug_threads)
1767 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1768 WSTOPSIG (*wstat), lwpid_of (thread));
1769
1770 if (debug_threads)
1771 {
1772 struct pending_signals *sig;
1773
1774 for (sig = lwp->pending_signals_to_report;
1775 sig != NULL;
1776 sig = sig->prev)
1777 debug_printf (" Still queued %d\n",
1778 sig->signal);
1779
1780 debug_printf (" (no more queued signals)\n");
1781 }
1782
1783 return 1;
1784 }
1785
1786 return 0;
1787 }
1788
1789 /* Fetch the possibly triggered data watchpoint info and store it in
1790 CHILD.
1791
1792 On some archs, like x86, that use debug registers to set
1793 watchpoints, it's possible that the way to know which watched
1794 address trapped, is to check the register that is used to select
1795 which address to watch. Problem is, between setting the watchpoint
1796 and reading back which data address trapped, the user may change
1797 the set of watchpoints, and, as a consequence, GDB changes the
1798 debug registers in the inferior. To avoid reading back a stale
1799 stopped-data-address when that happens, we cache in LP the fact
1800 that a watchpoint trapped, and the corresponding data address, as
1801 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1802 registers meanwhile, we have the cached data we can rely on. */
1803
1804 static int
1805 check_stopped_by_watchpoint (struct lwp_info *child)
1806 {
1807 if (the_low_target.stopped_by_watchpoint != NULL)
1808 {
1809 struct thread_info *saved_thread;
1810
1811 saved_thread = current_thread;
1812 current_thread = get_lwp_thread (child);
1813
1814 if (the_low_target.stopped_by_watchpoint ())
1815 {
1816 child->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
1817
1818 if (the_low_target.stopped_data_address != NULL)
1819 child->stopped_data_address
1820 = the_low_target.stopped_data_address ();
1821 else
1822 child->stopped_data_address = 0;
1823 }
1824
1825 current_thread = saved_thread;
1826 }
1827
1828 return child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
1829 }
1830
1831 /* Do low-level handling of the event, and check if we should go on
1832 and pass it to caller code. Return the affected lwp if we are, or
1833 NULL otherwise. */
1834
1835 static struct lwp_info *
1836 linux_low_filter_event (int lwpid, int wstat)
1837 {
1838 struct lwp_info *child;
1839 struct thread_info *thread;
1840 int have_stop_pc = 0;
1841
1842 child = find_lwp_pid (pid_to_ptid (lwpid));
1843
1844 /* If we didn't find a process, one of two things presumably happened:
1845 - A process we started and then detached from has exited. Ignore it.
1846 - A process we are controlling has forked and the new child's stop
1847 was reported to us by the kernel. Save its PID. */
1848 if (child == NULL && WIFSTOPPED (wstat))
1849 {
1850 add_to_pid_list (&stopped_pids, lwpid, wstat);
1851 return NULL;
1852 }
1853 else if (child == NULL)
1854 return NULL;
1855
1856 thread = get_lwp_thread (child);
1857
1858 child->stopped = 1;
1859
1860 child->last_status = wstat;
1861
1862 /* Check if the thread has exited. */
1863 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
1864 {
1865 if (debug_threads)
1866 debug_printf ("LLFE: %d exited.\n", lwpid);
1867 if (num_lwps (pid_of (thread)) > 1)
1868 {
1869
1870 /* If there is at least one more LWP, then the exit signal was
1871 not the end of the debugged application and should be
1872 ignored. */
1873 delete_lwp (child);
1874 return NULL;
1875 }
1876 else
1877 {
1878 /* This was the last lwp in the process. Since events are
1879 serialized to GDB core, and we can't report this one
1880 right now, but GDB core and the other target layers will
1881 want to be notified about the exit code/signal, leave the
1882 status pending for the next time we're able to report
1883 it. */
1884 mark_lwp_dead (child, wstat);
1885 return child;
1886 }
1887 }
1888
1889 gdb_assert (WIFSTOPPED (wstat));
1890
1891 if (WIFSTOPPED (wstat))
1892 {
1893 struct process_info *proc;
1894
1895 /* Architecture-specific setup after inferior is running. This
1896 needs to happen after we have attached to the inferior and it
1897 is stopped for the first time, but before we access any
1898 inferior registers. */
1899 proc = find_process_pid (pid_of (thread));
1900 if (proc->priv->new_inferior)
1901 {
1902 struct thread_info *saved_thread;
1903
1904 saved_thread = current_thread;
1905 current_thread = thread;
1906
1907 the_low_target.arch_setup ();
1908
1909 current_thread = saved_thread;
1910
1911 proc->priv->new_inferior = 0;
1912 }
1913 }
1914
1915 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
1916 {
1917 struct process_info *proc = find_process_pid (pid_of (thread));
1918
1919 linux_enable_event_reporting (lwpid, proc->attached);
1920 child->must_set_ptrace_flags = 0;
1921 }
1922
1923 /* Be careful to not overwrite stop_pc until
1924 check_stopped_by_breakpoint is called. */
1925 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1926 && linux_is_extended_waitstatus (wstat))
1927 {
1928 child->stop_pc = get_pc (child);
1929 handle_extended_wait (child, wstat);
1930 return NULL;
1931 }
1932
1933 /* Check first whether this was a SW/HW breakpoint before checking
1934 watchpoints, because at least s390 can't tell the data address of
1935 hardware watchpoint hits, and returns stopped-by-watchpoint as
1936 long as there's a watchpoint set. */
1937 if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
1938 {
1939 if (check_stopped_by_breakpoint (child))
1940 have_stop_pc = 1;
1941 }
1942
1943 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
1944 or hardware watchpoint. Check which is which if we got
1945 TARGET_STOPPED_BY_HW_BREAKPOINT. */
1946 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1947 && (child->stop_reason == TARGET_STOPPED_BY_NO_REASON
1948 || child->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT))
1949 check_stopped_by_watchpoint (child);
1950
1951 if (!have_stop_pc)
1952 child->stop_pc = get_pc (child);
1953
1954 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
1955 && child->stop_expected)
1956 {
1957 if (debug_threads)
1958 debug_printf ("Expected stop.\n");
1959 child->stop_expected = 0;
1960
1961 if (thread->last_resume_kind == resume_stop)
1962 {
1963 /* We want to report the stop to the core. Treat the
1964 SIGSTOP as a normal event. */
1965 }
1966 else if (stopping_threads != NOT_STOPPING_THREADS)
1967 {
1968 /* Stopping threads. We don't want this SIGSTOP to end up
1969 pending. */
1970 return NULL;
1971 }
1972 else
1973 {
1974 /* Filter out the event. */
1975 linux_resume_one_lwp (child, child->stepping, 0, NULL);
1976 return NULL;
1977 }
1978 }
1979
1980 child->status_pending_p = 1;
1981 child->status_pending = wstat;
1982 return child;
1983 }
1984
1985 /* Resume LWPs that are currently stopped without any pending status
1986 to report, but are resumed from the core's perspective. */
1987
1988 static void
1989 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
1990 {
1991 struct thread_info *thread = (struct thread_info *) entry;
1992 struct lwp_info *lp = get_thread_lwp (thread);
1993
1994 if (lp->stopped
1995 && !lp->status_pending_p
1996 && thread->last_resume_kind != resume_stop
1997 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1998 {
1999 int step = thread->last_resume_kind == resume_step;
2000
2001 if (debug_threads)
2002 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
2003 target_pid_to_str (ptid_of (thread)),
2004 paddress (lp->stop_pc),
2005 step);
2006
2007 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
2008 }
2009 }
2010
2011 /* Wait for an event from child(ren) WAIT_PTID, and return any that
2012 match FILTER_PTID (leaving others pending). The PTIDs can be:
2013 minus_one_ptid, to specify any child; a pid PTID, specifying all
2014 lwps of a thread group; or a PTID representing a single lwp. Store
2015 the stop status through the status pointer WSTAT. OPTIONS is
2016 passed to the waitpid call. Return 0 if no event was found and
2017 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2018 was found. Return the PID of the stopped child otherwise. */
2019
2020 static int
2021 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
2022 int *wstatp, int options)
2023 {
2024 struct thread_info *event_thread;
2025 struct lwp_info *event_child, *requested_child;
2026 sigset_t block_mask, prev_mask;
2027
2028 retry:
2029 /* N.B. event_thread points to the thread_info struct that contains
2030 event_child. Keep them in sync. */
2031 event_thread = NULL;
2032 event_child = NULL;
2033 requested_child = NULL;
2034
2035 /* Check for a lwp with a pending status. */
2036
2037 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
2038 {
2039 event_thread = (struct thread_info *)
2040 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2041 if (event_thread != NULL)
2042 event_child = get_thread_lwp (event_thread);
2043 if (debug_threads && event_thread)
2044 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
2045 }
2046 else if (!ptid_equal (filter_ptid, null_ptid))
2047 {
2048 requested_child = find_lwp_pid (filter_ptid);
2049
2050 if (stopping_threads == NOT_STOPPING_THREADS
2051 && requested_child->status_pending_p
2052 && requested_child->collecting_fast_tracepoint)
2053 {
2054 enqueue_one_deferred_signal (requested_child,
2055 &requested_child->status_pending);
2056 requested_child->status_pending_p = 0;
2057 requested_child->status_pending = 0;
2058 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2059 }
2060
2061 if (requested_child->suspended
2062 && requested_child->status_pending_p)
2063 {
2064 internal_error (__FILE__, __LINE__,
2065 "requesting an event out of a"
2066 " suspended child?");
2067 }
2068
2069 if (requested_child->status_pending_p)
2070 {
2071 event_child = requested_child;
2072 event_thread = get_lwp_thread (event_child);
2073 }
2074 }
2075
2076 if (event_child != NULL)
2077 {
2078 if (debug_threads)
2079 debug_printf ("Got an event from pending child %ld (%04x)\n",
2080 lwpid_of (event_thread), event_child->status_pending);
2081 *wstatp = event_child->status_pending;
2082 event_child->status_pending_p = 0;
2083 event_child->status_pending = 0;
2084 current_thread = event_thread;
2085 return lwpid_of (event_thread);
2086 }
2087
2088 /* But if we don't find a pending event, we'll have to wait.
2089
2090 We only enter this loop if no process has a pending wait status.
2091 Thus any action taken in response to a wait status inside this
2092 loop is responding as soon as we detect the status, not after any
2093 pending events. */
2094
2095 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2096 all signals while here. */
2097 sigfillset (&block_mask);
2098 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2099
2100 /* Always pull all events out of the kernel. We'll randomly select
2101 an event LWP out of all that have events, to prevent
2102 starvation. */
2103 while (event_child == NULL)
2104 {
2105 pid_t ret = 0;
2106
2107 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2108 quirks:
2109
2110 - If the thread group leader exits while other threads in the
2111 thread group still exist, waitpid(TGID, ...) hangs. That
2112 waitpid won't return an exit status until the other threads
2113 in the group are reaped.
2114
2115 - When a non-leader thread execs, that thread just vanishes
2116 without reporting an exit (so we'd hang if we waited for it
2117 explicitly in that case). The exec event is reported to
2118 the TGID pid (although we don't currently enable exec
2119 events). */
2120 errno = 0;
2121 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2122
2123 if (debug_threads)
2124 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2125 ret, errno ? strerror (errno) : "ERRNO-OK");
2126
2127 if (ret > 0)
2128 {
2129 if (debug_threads)
2130 {
2131 debug_printf ("LLW: waitpid %ld received %s\n",
2132 (long) ret, status_to_str (*wstatp));
2133 }
2134
2135 /* Filter all events. IOW, leave all events pending. We'll
2136 randomly select an event LWP out of all that have events
2137 below. */
2138 linux_low_filter_event (ret, *wstatp);
2139 /* Retry until nothing comes out of waitpid. A single
2140 SIGCHLD can indicate more than one child stopped. */
2141 continue;
2142 }
2143
2144 /* Now that we've pulled all events out of the kernel, resume
2145 LWPs that don't have an interesting event to report. */
2146 if (stopping_threads == NOT_STOPPING_THREADS)
2147 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2148
2149 /* ... and find an LWP with a status to report to the core, if
2150 any. */
2151 event_thread = (struct thread_info *)
2152 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2153 if (event_thread != NULL)
2154 {
2155 event_child = get_thread_lwp (event_thread);
2156 *wstatp = event_child->status_pending;
2157 event_child->status_pending_p = 0;
2158 event_child->status_pending = 0;
2159 break;
2160 }
2161
2162 /* Check for zombie thread group leaders. Those can't be reaped
2163 until all other threads in the thread group are. */
2164 check_zombie_leaders ();
2165
2166 /* If there are no resumed children left in the set of LWPs we
2167 want to wait for, bail. We can't just block in
2168 waitpid/sigsuspend, because lwps might have been left stopped
2169 in trace-stop state, and we'd be stuck forever waiting for
2170 their status to change (which would only happen if we resumed
2171 them). Even if WNOHANG is set, this return code is preferred
2172 over 0 (below), as it is more detailed. */
2173 if ((find_inferior (&all_threads,
2174 not_stopped_callback,
2175 &wait_ptid) == NULL))
2176 {
2177 if (debug_threads)
2178 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2179 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2180 return -1;
2181 }
2182
2183 /* No interesting event to report to the caller. */
2184 if ((options & WNOHANG))
2185 {
2186 if (debug_threads)
2187 debug_printf ("WNOHANG set, no event found\n");
2188
2189 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2190 return 0;
2191 }
2192
2193 /* Block until we get an event reported with SIGCHLD. */
2194 if (debug_threads)
2195 debug_printf ("sigsuspend'ing\n");
2196
2197 sigsuspend (&prev_mask);
2198 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2199 goto retry;
2200 }
2201
2202 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2203
2204 current_thread = event_thread;
2205
2206 /* Check for thread exit. */
2207 if (! WIFSTOPPED (*wstatp))
2208 {
2209 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2210
2211 if (debug_threads)
2212 debug_printf ("LWP %d is the last lwp of process. "
2213 "Process %ld exiting.\n",
2214 pid_of (event_thread), lwpid_of (event_thread));
2215 return lwpid_of (event_thread);
2216 }
2217
2218 return lwpid_of (event_thread);
2219 }
2220
2221 /* Wait for an event from child(ren) PTID. PTIDs can be:
2222 minus_one_ptid, to specify any child; a pid PTID, specifying all
2223 lwps of a thread group; or a PTID representing a single lwp. Store
2224 the stop status through the status pointer WSTAT. OPTIONS is
2225 passed to the waitpid call. Return 0 if no event was found and
2226 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2227 was found. Return the PID of the stopped child otherwise. */
2228
2229 static int
2230 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2231 {
2232 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2233 }
2234
2235 /* Count the LWP's that have had events. */
2236
2237 static int
2238 count_events_callback (struct inferior_list_entry *entry, void *data)
2239 {
2240 struct thread_info *thread = (struct thread_info *) entry;
2241 int *count = data;
2242
2243 gdb_assert (count != NULL);
2244
2245 /* Count only resumed LWPs that have an event pending. */
2246 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2247 && thread->last_resume_kind != resume_stop
2248 && thread->status_pending_p)
2249 (*count)++;
2250
2251 return 0;
2252 }
2253
2254 /* Select the LWP (if any) that is currently being single-stepped. */
2255
2256 static int
2257 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2258 {
2259 struct thread_info *thread = (struct thread_info *) entry;
2260 struct lwp_info *lp = get_thread_lwp (thread);
2261
2262 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2263 && thread->last_resume_kind == resume_step
2264 && lp->status_pending_p)
2265 return 1;
2266 else
2267 return 0;
2268 }
2269
2270 /* Select the Nth LWP that has had a SIGTRAP event that should be
2271 reported to GDB. */
2272
2273 static int
2274 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2275 {
2276 struct thread_info *thread = (struct thread_info *) entry;
2277 int *selector = data;
2278
2279 gdb_assert (selector != NULL);
2280
2281 /* Select only resumed LWPs that have an event pending. */
2282 if (thread->last_resume_kind != resume_stop
2283 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2284 && thread->status_pending_p)
2285 if ((*selector)-- == 0)
2286 return 1;
2287
2288 return 0;
2289 }
2290
2291 /* Select one LWP out of those that have events pending. */
2292
2293 static void
2294 select_event_lwp (struct lwp_info **orig_lp)
2295 {
2296 int num_events = 0;
2297 int random_selector;
2298 struct thread_info *event_thread = NULL;
2299
2300 /* In all-stop, give preference to the LWP that is being
2301 single-stepped. There will be at most one, and it's the LWP that
2302 the core is most interested in. If we didn't do this, then we'd
2303 have to handle pending step SIGTRAPs somehow in case the core
2304 later continues the previously-stepped thread, otherwise we'd
2305 report the pending SIGTRAP, and the core, not having stepped the
2306 thread, wouldn't understand what the trap was for, and therefore
2307 would report it to the user as a random signal. */
2308 if (!non_stop)
2309 {
2310 event_thread
2311 = (struct thread_info *) find_inferior (&all_threads,
2312 select_singlestep_lwp_callback,
2313 NULL);
2314 if (event_thread != NULL)
2315 {
2316 if (debug_threads)
2317 debug_printf ("SEL: Select single-step %s\n",
2318 target_pid_to_str (ptid_of (event_thread)));
2319 }
2320 }
2321 if (event_thread == NULL)
2322 {
2323 /* No single-stepping LWP. Select one at random, out of those
2324 which have had SIGTRAP events. */
2325
2326 /* First see how many SIGTRAP events we have. */
2327 find_inferior (&all_threads, count_events_callback, &num_events);
2328
2329 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2330 random_selector = (int)
2331 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2332
2333 if (debug_threads && num_events > 1)
2334 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2335 num_events, random_selector);
2336
2337 event_thread
2338 = (struct thread_info *) find_inferior (&all_threads,
2339 select_event_lwp_callback,
2340 &random_selector);
2341 }
2342
2343 if (event_thread != NULL)
2344 {
2345 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2346
2347 /* Switch the event LWP. */
2348 *orig_lp = event_lp;
2349 }
2350 }
2351
2352 /* Decrement the suspend count of an LWP. */
2353
2354 static int
2355 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2356 {
2357 struct thread_info *thread = (struct thread_info *) entry;
2358 struct lwp_info *lwp = get_thread_lwp (thread);
2359
2360 /* Ignore EXCEPT. */
2361 if (lwp == except)
2362 return 0;
2363
2364 lwp->suspended--;
2365
2366 gdb_assert (lwp->suspended >= 0);
2367 return 0;
2368 }
2369
2370 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2371 NULL. */
2372
2373 static void
2374 unsuspend_all_lwps (struct lwp_info *except)
2375 {
2376 find_inferior (&all_threads, unsuspend_one_lwp, except);
2377 }
2378
2379 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2380 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2381 void *data);
2382 static int lwp_running (struct inferior_list_entry *entry, void *data);
2383 static ptid_t linux_wait_1 (ptid_t ptid,
2384 struct target_waitstatus *ourstatus,
2385 int target_options);
2386
2387 /* Stabilize threads (move out of jump pads).
2388
2389 If a thread is midway collecting a fast tracepoint, we need to
2390 finish the collection and move it out of the jump pad before
2391 reporting the signal.
2392
2393 This avoids recursion while collecting (when a signal arrives
2394 midway, and the signal handler itself collects), which would trash
2395 the trace buffer. In case the user set a breakpoint in a signal
2396 handler, this avoids the backtrace showing the jump pad, etc..
2397 Most importantly, there are certain things we can't do safely if
2398 threads are stopped in a jump pad (or in its callee's). For
2399 example:
2400
2401 - starting a new trace run. A thread still collecting the
2402 previous run, could trash the trace buffer when resumed. The trace
2403 buffer control structures would have been reset but the thread had
2404 no way to tell. The thread could even midway memcpy'ing to the
2405 buffer, which would mean that when resumed, it would clobber the
2406 trace buffer that had been set for a new run.
2407
2408 - we can't rewrite/reuse the jump pads for new tracepoints
2409 safely. Say you do tstart while a thread is stopped midway while
2410 collecting. When the thread is later resumed, it finishes the
2411 collection, and returns to the jump pad, to execute the original
2412 instruction that was under the tracepoint jump at the time the
2413 older run had been started. If the jump pad had been rewritten
2414 since for something else in the new run, the thread would now
2415 execute the wrong / random instructions. */
2416
2417 static void
2418 linux_stabilize_threads (void)
2419 {
2420 struct thread_info *saved_thread;
2421 struct thread_info *thread_stuck;
2422
2423 thread_stuck
2424 = (struct thread_info *) find_inferior (&all_threads,
2425 stuck_in_jump_pad_callback,
2426 NULL);
2427 if (thread_stuck != NULL)
2428 {
2429 if (debug_threads)
2430 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2431 lwpid_of (thread_stuck));
2432 return;
2433 }
2434
2435 saved_thread = current_thread;
2436
2437 stabilizing_threads = 1;
2438
2439 /* Kick 'em all. */
2440 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2441
2442 /* Loop until all are stopped out of the jump pads. */
2443 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2444 {
2445 struct target_waitstatus ourstatus;
2446 struct lwp_info *lwp;
2447 int wstat;
2448
2449 /* Note that we go through the full wait even loop. While
2450 moving threads out of jump pad, we need to be able to step
2451 over internal breakpoints and such. */
2452 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2453
2454 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2455 {
2456 lwp = get_thread_lwp (current_thread);
2457
2458 /* Lock it. */
2459 lwp->suspended++;
2460
2461 if (ourstatus.value.sig != GDB_SIGNAL_0
2462 || current_thread->last_resume_kind == resume_stop)
2463 {
2464 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2465 enqueue_one_deferred_signal (lwp, &wstat);
2466 }
2467 }
2468 }
2469
2470 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2471
2472 stabilizing_threads = 0;
2473
2474 current_thread = saved_thread;
2475
2476 if (debug_threads)
2477 {
2478 thread_stuck
2479 = (struct thread_info *) find_inferior (&all_threads,
2480 stuck_in_jump_pad_callback,
2481 NULL);
2482 if (thread_stuck != NULL)
2483 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2484 lwpid_of (thread_stuck));
2485 }
2486 }
2487
2488 static void async_file_mark (void);
2489
2490 /* Convenience function that is called when the kernel reports an
2491 event that is not passed out to GDB. */
2492
2493 static ptid_t
2494 ignore_event (struct target_waitstatus *ourstatus)
2495 {
2496 /* If we got an event, there may still be others, as a single
2497 SIGCHLD can indicate more than one child stopped. This forces
2498 another target_wait call. */
2499 async_file_mark ();
2500
2501 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2502 return null_ptid;
2503 }
2504
2505 /* Wait for process, returns status. */
2506
2507 static ptid_t
2508 linux_wait_1 (ptid_t ptid,
2509 struct target_waitstatus *ourstatus, int target_options)
2510 {
2511 int w;
2512 struct lwp_info *event_child;
2513 int options;
2514 int pid;
2515 int step_over_finished;
2516 int bp_explains_trap;
2517 int maybe_internal_trap;
2518 int report_to_gdb;
2519 int trace_event;
2520 int in_step_range;
2521
2522 if (debug_threads)
2523 {
2524 debug_enter ();
2525 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2526 }
2527
2528 /* Translate generic target options into linux options. */
2529 options = __WALL;
2530 if (target_options & TARGET_WNOHANG)
2531 options |= WNOHANG;
2532
2533 bp_explains_trap = 0;
2534 trace_event = 0;
2535 in_step_range = 0;
2536 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2537
2538 if (ptid_equal (step_over_bkpt, null_ptid))
2539 pid = linux_wait_for_event (ptid, &w, options);
2540 else
2541 {
2542 if (debug_threads)
2543 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2544 target_pid_to_str (step_over_bkpt));
2545 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2546 }
2547
2548 if (pid == 0)
2549 {
2550 gdb_assert (target_options & TARGET_WNOHANG);
2551
2552 if (debug_threads)
2553 {
2554 debug_printf ("linux_wait_1 ret = null_ptid, "
2555 "TARGET_WAITKIND_IGNORE\n");
2556 debug_exit ();
2557 }
2558
2559 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2560 return null_ptid;
2561 }
2562 else if (pid == -1)
2563 {
2564 if (debug_threads)
2565 {
2566 debug_printf ("linux_wait_1 ret = null_ptid, "
2567 "TARGET_WAITKIND_NO_RESUMED\n");
2568 debug_exit ();
2569 }
2570
2571 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2572 return null_ptid;
2573 }
2574
2575 event_child = get_thread_lwp (current_thread);
2576
2577 /* linux_wait_for_event only returns an exit status for the last
2578 child of a process. Report it. */
2579 if (WIFEXITED (w) || WIFSIGNALED (w))
2580 {
2581 if (WIFEXITED (w))
2582 {
2583 ourstatus->kind = TARGET_WAITKIND_EXITED;
2584 ourstatus->value.integer = WEXITSTATUS (w);
2585
2586 if (debug_threads)
2587 {
2588 debug_printf ("linux_wait_1 ret = %s, exited with "
2589 "retcode %d\n",
2590 target_pid_to_str (ptid_of (current_thread)),
2591 WEXITSTATUS (w));
2592 debug_exit ();
2593 }
2594 }
2595 else
2596 {
2597 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2598 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2599
2600 if (debug_threads)
2601 {
2602 debug_printf ("linux_wait_1 ret = %s, terminated with "
2603 "signal %d\n",
2604 target_pid_to_str (ptid_of (current_thread)),
2605 WTERMSIG (w));
2606 debug_exit ();
2607 }
2608 }
2609
2610 return ptid_of (current_thread);
2611 }
2612
2613 /* If step-over executes a breakpoint instruction, it means a
2614 gdb/gdbserver breakpoint had been planted on top of a permanent
2615 breakpoint. The PC has been adjusted by
2616 check_stopped_by_breakpoint to point at the breakpoint address.
2617 Advance the PC manually past the breakpoint, otherwise the
2618 program would keep trapping the permanent breakpoint forever. */
2619 if (!ptid_equal (step_over_bkpt, null_ptid)
2620 && event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2621 {
2622 unsigned int increment_pc = the_low_target.breakpoint_len;
2623
2624 if (debug_threads)
2625 {
2626 debug_printf ("step-over for %s executed software breakpoint\n",
2627 target_pid_to_str (ptid_of (current_thread)));
2628 }
2629
2630 if (increment_pc != 0)
2631 {
2632 struct regcache *regcache
2633 = get_thread_regcache (current_thread, 1);
2634
2635 event_child->stop_pc += increment_pc;
2636 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2637
2638 if (!(*the_low_target.breakpoint_at) (event_child->stop_pc))
2639 event_child->stop_reason = TARGET_STOPPED_BY_NO_REASON;
2640 }
2641 }
2642
2643 /* If this event was not handled before, and is not a SIGTRAP, we
2644 report it. SIGILL and SIGSEGV are also treated as traps in case
2645 a breakpoint is inserted at the current PC. If this target does
2646 not support internal breakpoints at all, we also report the
2647 SIGTRAP without further processing; it's of no concern to us. */
2648 maybe_internal_trap
2649 = (supports_breakpoints ()
2650 && (WSTOPSIG (w) == SIGTRAP
2651 || ((WSTOPSIG (w) == SIGILL
2652 || WSTOPSIG (w) == SIGSEGV)
2653 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2654
2655 if (maybe_internal_trap)
2656 {
2657 /* Handle anything that requires bookkeeping before deciding to
2658 report the event or continue waiting. */
2659
2660 /* First check if we can explain the SIGTRAP with an internal
2661 breakpoint, or if we should possibly report the event to GDB.
2662 Do this before anything that may remove or insert a
2663 breakpoint. */
2664 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2665
2666 /* We have a SIGTRAP, possibly a step-over dance has just
2667 finished. If so, tweak the state machine accordingly,
2668 reinsert breakpoints and delete any reinsert (software
2669 single-step) breakpoints. */
2670 step_over_finished = finish_step_over (event_child);
2671
2672 /* Now invoke the callbacks of any internal breakpoints there. */
2673 check_breakpoints (event_child->stop_pc);
2674
2675 /* Handle tracepoint data collecting. This may overflow the
2676 trace buffer, and cause a tracing stop, removing
2677 breakpoints. */
2678 trace_event = handle_tracepoints (event_child);
2679
2680 if (bp_explains_trap)
2681 {
2682 /* If we stepped or ran into an internal breakpoint, we've
2683 already handled it. So next time we resume (from this
2684 PC), we should step over it. */
2685 if (debug_threads)
2686 debug_printf ("Hit a gdbserver breakpoint.\n");
2687
2688 if (breakpoint_here (event_child->stop_pc))
2689 event_child->need_step_over = 1;
2690 }
2691 }
2692 else
2693 {
2694 /* We have some other signal, possibly a step-over dance was in
2695 progress, and it should be cancelled too. */
2696 step_over_finished = finish_step_over (event_child);
2697 }
2698
2699 /* We have all the data we need. Either report the event to GDB, or
2700 resume threads and keep waiting for more. */
2701
2702 /* If we're collecting a fast tracepoint, finish the collection and
2703 move out of the jump pad before delivering a signal. See
2704 linux_stabilize_threads. */
2705
2706 if (WIFSTOPPED (w)
2707 && WSTOPSIG (w) != SIGTRAP
2708 && supports_fast_tracepoints ()
2709 && agent_loaded_p ())
2710 {
2711 if (debug_threads)
2712 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2713 "to defer or adjust it.\n",
2714 WSTOPSIG (w), lwpid_of (current_thread));
2715
2716 /* Allow debugging the jump pad itself. */
2717 if (current_thread->last_resume_kind != resume_step
2718 && maybe_move_out_of_jump_pad (event_child, &w))
2719 {
2720 enqueue_one_deferred_signal (event_child, &w);
2721
2722 if (debug_threads)
2723 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2724 WSTOPSIG (w), lwpid_of (current_thread));
2725
2726 linux_resume_one_lwp (event_child, 0, 0, NULL);
2727
2728 return ignore_event (ourstatus);
2729 }
2730 }
2731
2732 if (event_child->collecting_fast_tracepoint)
2733 {
2734 if (debug_threads)
2735 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2736 "Check if we're already there.\n",
2737 lwpid_of (current_thread),
2738 event_child->collecting_fast_tracepoint);
2739
2740 trace_event = 1;
2741
2742 event_child->collecting_fast_tracepoint
2743 = linux_fast_tracepoint_collecting (event_child, NULL);
2744
2745 if (event_child->collecting_fast_tracepoint != 1)
2746 {
2747 /* No longer need this breakpoint. */
2748 if (event_child->exit_jump_pad_bkpt != NULL)
2749 {
2750 if (debug_threads)
2751 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2752 "stopping all threads momentarily.\n");
2753
2754 /* Other running threads could hit this breakpoint.
2755 We don't handle moribund locations like GDB does,
2756 instead we always pause all threads when removing
2757 breakpoints, so that any step-over or
2758 decr_pc_after_break adjustment is always taken
2759 care of while the breakpoint is still
2760 inserted. */
2761 stop_all_lwps (1, event_child);
2762
2763 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2764 event_child->exit_jump_pad_bkpt = NULL;
2765
2766 unstop_all_lwps (1, event_child);
2767
2768 gdb_assert (event_child->suspended >= 0);
2769 }
2770 }
2771
2772 if (event_child->collecting_fast_tracepoint == 0)
2773 {
2774 if (debug_threads)
2775 debug_printf ("fast tracepoint finished "
2776 "collecting successfully.\n");
2777
2778 /* We may have a deferred signal to report. */
2779 if (dequeue_one_deferred_signal (event_child, &w))
2780 {
2781 if (debug_threads)
2782 debug_printf ("dequeued one signal.\n");
2783 }
2784 else
2785 {
2786 if (debug_threads)
2787 debug_printf ("no deferred signals.\n");
2788
2789 if (stabilizing_threads)
2790 {
2791 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2792 ourstatus->value.sig = GDB_SIGNAL_0;
2793
2794 if (debug_threads)
2795 {
2796 debug_printf ("linux_wait_1 ret = %s, stopped "
2797 "while stabilizing threads\n",
2798 target_pid_to_str (ptid_of (current_thread)));
2799 debug_exit ();
2800 }
2801
2802 return ptid_of (current_thread);
2803 }
2804 }
2805 }
2806 }
2807
2808 /* Check whether GDB would be interested in this event. */
2809
2810 /* If GDB is not interested in this signal, don't stop other
2811 threads, and don't report it to GDB. Just resume the inferior
2812 right away. We do this for threading-related signals as well as
2813 any that GDB specifically requested we ignore. But never ignore
2814 SIGSTOP if we sent it ourselves, and do not ignore signals when
2815 stepping - they may require special handling to skip the signal
2816 handler. Also never ignore signals that could be caused by a
2817 breakpoint. */
2818 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2819 thread library? */
2820 if (WIFSTOPPED (w)
2821 && current_thread->last_resume_kind != resume_step
2822 && (
2823 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2824 (current_process ()->priv->thread_db != NULL
2825 && (WSTOPSIG (w) == __SIGRTMIN
2826 || WSTOPSIG (w) == __SIGRTMIN + 1))
2827 ||
2828 #endif
2829 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2830 && !(WSTOPSIG (w) == SIGSTOP
2831 && current_thread->last_resume_kind == resume_stop)
2832 && !linux_wstatus_maybe_breakpoint (w))))
2833 {
2834 siginfo_t info, *info_p;
2835
2836 if (debug_threads)
2837 debug_printf ("Ignored signal %d for LWP %ld.\n",
2838 WSTOPSIG (w), lwpid_of (current_thread));
2839
2840 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2841 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2842 info_p = &info;
2843 else
2844 info_p = NULL;
2845 linux_resume_one_lwp (event_child, event_child->stepping,
2846 WSTOPSIG (w), info_p);
2847 return ignore_event (ourstatus);
2848 }
2849
2850 /* Note that all addresses are always "out of the step range" when
2851 there's no range to begin with. */
2852 in_step_range = lwp_in_step_range (event_child);
2853
2854 /* If GDB wanted this thread to single step, and the thread is out
2855 of the step range, we always want to report the SIGTRAP, and let
2856 GDB handle it. Watchpoints should always be reported. So should
2857 signals we can't explain. A SIGTRAP we can't explain could be a
2858 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2859 do, we're be able to handle GDB breakpoints on top of internal
2860 breakpoints, by handling the internal breakpoint and still
2861 reporting the event to GDB. If we don't, we're out of luck, GDB
2862 won't see the breakpoint hit. */
2863 report_to_gdb = (!maybe_internal_trap
2864 || (current_thread->last_resume_kind == resume_step
2865 && !in_step_range)
2866 || event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
2867 || (!step_over_finished && !in_step_range
2868 && !bp_explains_trap && !trace_event)
2869 || (gdb_breakpoint_here (event_child->stop_pc)
2870 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2871 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2872
2873 run_breakpoint_commands (event_child->stop_pc);
2874
2875 /* We found no reason GDB would want us to stop. We either hit one
2876 of our own breakpoints, or finished an internal step GDB
2877 shouldn't know about. */
2878 if (!report_to_gdb)
2879 {
2880 if (debug_threads)
2881 {
2882 if (bp_explains_trap)
2883 debug_printf ("Hit a gdbserver breakpoint.\n");
2884 if (step_over_finished)
2885 debug_printf ("Step-over finished.\n");
2886 if (trace_event)
2887 debug_printf ("Tracepoint event.\n");
2888 if (lwp_in_step_range (event_child))
2889 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2890 paddress (event_child->stop_pc),
2891 paddress (event_child->step_range_start),
2892 paddress (event_child->step_range_end));
2893 }
2894
2895 /* We're not reporting this breakpoint to GDB, so apply the
2896 decr_pc_after_break adjustment to the inferior's regcache
2897 ourselves. */
2898
2899 if (the_low_target.set_pc != NULL)
2900 {
2901 struct regcache *regcache
2902 = get_thread_regcache (current_thread, 1);
2903 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2904 }
2905
2906 /* We may have finished stepping over a breakpoint. If so,
2907 we've stopped and suspended all LWPs momentarily except the
2908 stepping one. This is where we resume them all again. We're
2909 going to keep waiting, so use proceed, which handles stepping
2910 over the next breakpoint. */
2911 if (debug_threads)
2912 debug_printf ("proceeding all threads.\n");
2913
2914 if (step_over_finished)
2915 unsuspend_all_lwps (event_child);
2916
2917 proceed_all_lwps ();
2918 return ignore_event (ourstatus);
2919 }
2920
2921 if (debug_threads)
2922 {
2923 if (current_thread->last_resume_kind == resume_step)
2924 {
2925 if (event_child->step_range_start == event_child->step_range_end)
2926 debug_printf ("GDB wanted to single-step, reporting event.\n");
2927 else if (!lwp_in_step_range (event_child))
2928 debug_printf ("Out of step range, reporting event.\n");
2929 }
2930 if (event_child->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
2931 debug_printf ("Stopped by watchpoint.\n");
2932 else if (gdb_breakpoint_here (event_child->stop_pc))
2933 debug_printf ("Stopped by GDB breakpoint.\n");
2934 if (debug_threads)
2935 debug_printf ("Hit a non-gdbserver trap event.\n");
2936 }
2937
2938 /* Alright, we're going to report a stop. */
2939
2940 if (!stabilizing_threads)
2941 {
2942 /* In all-stop, stop all threads. */
2943 if (!non_stop)
2944 stop_all_lwps (0, NULL);
2945
2946 /* If we're not waiting for a specific LWP, choose an event LWP
2947 from among those that have had events. Giving equal priority
2948 to all LWPs that have had events helps prevent
2949 starvation. */
2950 if (ptid_equal (ptid, minus_one_ptid))
2951 {
2952 event_child->status_pending_p = 1;
2953 event_child->status_pending = w;
2954
2955 select_event_lwp (&event_child);
2956
2957 /* current_thread and event_child must stay in sync. */
2958 current_thread = get_lwp_thread (event_child);
2959
2960 event_child->status_pending_p = 0;
2961 w = event_child->status_pending;
2962 }
2963
2964 if (step_over_finished)
2965 {
2966 if (!non_stop)
2967 {
2968 /* If we were doing a step-over, all other threads but
2969 the stepping one had been paused in start_step_over,
2970 with their suspend counts incremented. We don't want
2971 to do a full unstop/unpause, because we're in
2972 all-stop mode (so we want threads stopped), but we
2973 still need to unsuspend the other threads, to
2974 decrement their `suspended' count back. */
2975 unsuspend_all_lwps (event_child);
2976 }
2977 else
2978 {
2979 /* If we just finished a step-over, then all threads had
2980 been momentarily paused. In all-stop, that's fine,
2981 we want threads stopped by now anyway. In non-stop,
2982 we need to re-resume threads that GDB wanted to be
2983 running. */
2984 unstop_all_lwps (1, event_child);
2985 }
2986 }
2987
2988 /* Stabilize threads (move out of jump pads). */
2989 if (!non_stop)
2990 stabilize_threads ();
2991 }
2992 else
2993 {
2994 /* If we just finished a step-over, then all threads had been
2995 momentarily paused. In all-stop, that's fine, we want
2996 threads stopped by now anyway. In non-stop, we need to
2997 re-resume threads that GDB wanted to be running. */
2998 if (step_over_finished)
2999 unstop_all_lwps (1, event_child);
3000 }
3001
3002 ourstatus->kind = TARGET_WAITKIND_STOPPED;
3003
3004 /* Now that we've selected our final event LWP, un-adjust its PC if
3005 it was a software breakpoint, and the client doesn't know we can
3006 adjust the breakpoint ourselves. */
3007 if (event_child->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3008 && !swbreak_feature)
3009 {
3010 int decr_pc = the_low_target.decr_pc_after_break;
3011
3012 if (decr_pc != 0)
3013 {
3014 struct regcache *regcache
3015 = get_thread_regcache (current_thread, 1);
3016 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
3017 }
3018 }
3019
3020 if (current_thread->last_resume_kind == resume_stop
3021 && WSTOPSIG (w) == SIGSTOP)
3022 {
3023 /* A thread that has been requested to stop by GDB with vCont;t,
3024 and it stopped cleanly, so report as SIG0. The use of
3025 SIGSTOP is an implementation detail. */
3026 ourstatus->value.sig = GDB_SIGNAL_0;
3027 }
3028 else if (current_thread->last_resume_kind == resume_stop
3029 && WSTOPSIG (w) != SIGSTOP)
3030 {
3031 /* A thread that has been requested to stop by GDB with vCont;t,
3032 but, it stopped for other reasons. */
3033 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3034 }
3035 else
3036 {
3037 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
3038 }
3039
3040 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
3041
3042 if (debug_threads)
3043 {
3044 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
3045 target_pid_to_str (ptid_of (current_thread)),
3046 ourstatus->kind, ourstatus->value.sig);
3047 debug_exit ();
3048 }
3049
3050 return ptid_of (current_thread);
3051 }
3052
3053 /* Get rid of any pending event in the pipe. */
3054 static void
3055 async_file_flush (void)
3056 {
3057 int ret;
3058 char buf;
3059
3060 do
3061 ret = read (linux_event_pipe[0], &buf, 1);
3062 while (ret >= 0 || (ret == -1 && errno == EINTR));
3063 }
3064
3065 /* Put something in the pipe, so the event loop wakes up. */
3066 static void
3067 async_file_mark (void)
3068 {
3069 int ret;
3070
3071 async_file_flush ();
3072
3073 do
3074 ret = write (linux_event_pipe[1], "+", 1);
3075 while (ret == 0 || (ret == -1 && errno == EINTR));
3076
3077 /* Ignore EAGAIN. If the pipe is full, the event loop will already
3078 be awakened anyway. */
3079 }
3080
3081 static ptid_t
3082 linux_wait (ptid_t ptid,
3083 struct target_waitstatus *ourstatus, int target_options)
3084 {
3085 ptid_t event_ptid;
3086
3087 /* Flush the async file first. */
3088 if (target_is_async_p ())
3089 async_file_flush ();
3090
3091 do
3092 {
3093 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3094 }
3095 while ((target_options & TARGET_WNOHANG) == 0
3096 && ptid_equal (event_ptid, null_ptid)
3097 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3098
3099 /* If at least one stop was reported, there may be more. A single
3100 SIGCHLD can signal more than one child stop. */
3101 if (target_is_async_p ()
3102 && (target_options & TARGET_WNOHANG) != 0
3103 && !ptid_equal (event_ptid, null_ptid))
3104 async_file_mark ();
3105
3106 return event_ptid;
3107 }
3108
3109 /* Send a signal to an LWP. */
3110
3111 static int
3112 kill_lwp (unsigned long lwpid, int signo)
3113 {
3114 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3115 fails, then we are not using nptl threads and we should be using kill. */
3116
3117 #ifdef __NR_tkill
3118 {
3119 static int tkill_failed;
3120
3121 if (!tkill_failed)
3122 {
3123 int ret;
3124
3125 errno = 0;
3126 ret = syscall (__NR_tkill, lwpid, signo);
3127 if (errno != ENOSYS)
3128 return ret;
3129 tkill_failed = 1;
3130 }
3131 }
3132 #endif
3133
3134 return kill (lwpid, signo);
3135 }
3136
3137 void
3138 linux_stop_lwp (struct lwp_info *lwp)
3139 {
3140 send_sigstop (lwp);
3141 }
3142
3143 static void
3144 send_sigstop (struct lwp_info *lwp)
3145 {
3146 int pid;
3147
3148 pid = lwpid_of (get_lwp_thread (lwp));
3149
3150 /* If we already have a pending stop signal for this process, don't
3151 send another. */
3152 if (lwp->stop_expected)
3153 {
3154 if (debug_threads)
3155 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3156
3157 return;
3158 }
3159
3160 if (debug_threads)
3161 debug_printf ("Sending sigstop to lwp %d\n", pid);
3162
3163 lwp->stop_expected = 1;
3164 kill_lwp (pid, SIGSTOP);
3165 }
3166
3167 static int
3168 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3169 {
3170 struct thread_info *thread = (struct thread_info *) entry;
3171 struct lwp_info *lwp = get_thread_lwp (thread);
3172
3173 /* Ignore EXCEPT. */
3174 if (lwp == except)
3175 return 0;
3176
3177 if (lwp->stopped)
3178 return 0;
3179
3180 send_sigstop (lwp);
3181 return 0;
3182 }
3183
3184 /* Increment the suspend count of an LWP, and stop it, if not stopped
3185 yet. */
3186 static int
3187 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3188 void *except)
3189 {
3190 struct thread_info *thread = (struct thread_info *) entry;
3191 struct lwp_info *lwp = get_thread_lwp (thread);
3192
3193 /* Ignore EXCEPT. */
3194 if (lwp == except)
3195 return 0;
3196
3197 lwp->suspended++;
3198
3199 return send_sigstop_callback (entry, except);
3200 }
3201
3202 static void
3203 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3204 {
3205 /* It's dead, really. */
3206 lwp->dead = 1;
3207
3208 /* Store the exit status for later. */
3209 lwp->status_pending_p = 1;
3210 lwp->status_pending = wstat;
3211
3212 /* Prevent trying to stop it. */
3213 lwp->stopped = 1;
3214
3215 /* No further stops are expected from a dead lwp. */
3216 lwp->stop_expected = 0;
3217 }
3218
3219 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3220
3221 static void
3222 wait_for_sigstop (void)
3223 {
3224 struct thread_info *saved_thread;
3225 ptid_t saved_tid;
3226 int wstat;
3227 int ret;
3228
3229 saved_thread = current_thread;
3230 if (saved_thread != NULL)
3231 saved_tid = saved_thread->entry.id;
3232 else
3233 saved_tid = null_ptid; /* avoid bogus unused warning */
3234
3235 if (debug_threads)
3236 debug_printf ("wait_for_sigstop: pulling events\n");
3237
3238 /* Passing NULL_PTID as filter indicates we want all events to be
3239 left pending. Eventually this returns when there are no
3240 unwaited-for children left. */
3241 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3242 &wstat, __WALL);
3243 gdb_assert (ret == -1);
3244
3245 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3246 current_thread = saved_thread;
3247 else
3248 {
3249 if (debug_threads)
3250 debug_printf ("Previously current thread died.\n");
3251
3252 if (non_stop)
3253 {
3254 /* We can't change the current inferior behind GDB's back,
3255 otherwise, a subsequent command may apply to the wrong
3256 process. */
3257 current_thread = NULL;
3258 }
3259 else
3260 {
3261 /* Set a valid thread as current. */
3262 set_desired_thread (0);
3263 }
3264 }
3265 }
3266
3267 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3268 move it out, because we need to report the stop event to GDB. For
3269 example, if the user puts a breakpoint in the jump pad, it's
3270 because she wants to debug it. */
3271
3272 static int
3273 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3274 {
3275 struct thread_info *thread = (struct thread_info *) entry;
3276 struct lwp_info *lwp = get_thread_lwp (thread);
3277
3278 gdb_assert (lwp->suspended == 0);
3279 gdb_assert (lwp->stopped);
3280
3281 /* Allow debugging the jump pad, gdb_collect, etc.. */
3282 return (supports_fast_tracepoints ()
3283 && agent_loaded_p ()
3284 && (gdb_breakpoint_here (lwp->stop_pc)
3285 || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT
3286 || thread->last_resume_kind == resume_step)
3287 && linux_fast_tracepoint_collecting (lwp, NULL));
3288 }
3289
3290 static void
3291 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3292 {
3293 struct thread_info *thread = (struct thread_info *) entry;
3294 struct lwp_info *lwp = get_thread_lwp (thread);
3295 int *wstat;
3296
3297 gdb_assert (lwp->suspended == 0);
3298 gdb_assert (lwp->stopped);
3299
3300 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3301
3302 /* Allow debugging the jump pad, gdb_collect, etc. */
3303 if (!gdb_breakpoint_here (lwp->stop_pc)
3304 && lwp->stop_reason != TARGET_STOPPED_BY_WATCHPOINT
3305 && thread->last_resume_kind != resume_step
3306 && maybe_move_out_of_jump_pad (lwp, wstat))
3307 {
3308 if (debug_threads)
3309 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3310 lwpid_of (thread));
3311
3312 if (wstat)
3313 {
3314 lwp->status_pending_p = 0;
3315 enqueue_one_deferred_signal (lwp, wstat);
3316
3317 if (debug_threads)
3318 debug_printf ("Signal %d for LWP %ld deferred "
3319 "(in jump pad)\n",
3320 WSTOPSIG (*wstat), lwpid_of (thread));
3321 }
3322
3323 linux_resume_one_lwp (lwp, 0, 0, NULL);
3324 }
3325 else
3326 lwp->suspended++;
3327 }
3328
3329 static int
3330 lwp_running (struct inferior_list_entry *entry, void *data)
3331 {
3332 struct thread_info *thread = (struct thread_info *) entry;
3333 struct lwp_info *lwp = get_thread_lwp (thread);
3334
3335 if (lwp->dead)
3336 return 0;
3337 if (lwp->stopped)
3338 return 0;
3339 return 1;
3340 }
3341
3342 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3343 If SUSPEND, then also increase the suspend count of every LWP,
3344 except EXCEPT. */
3345
3346 static void
3347 stop_all_lwps (int suspend, struct lwp_info *except)
3348 {
3349 /* Should not be called recursively. */
3350 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3351
3352 if (debug_threads)
3353 {
3354 debug_enter ();
3355 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3356 suspend ? "stop-and-suspend" : "stop",
3357 except != NULL
3358 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3359 : "none");
3360 }
3361
3362 stopping_threads = (suspend
3363 ? STOPPING_AND_SUSPENDING_THREADS
3364 : STOPPING_THREADS);
3365
3366 if (suspend)
3367 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3368 else
3369 find_inferior (&all_threads, send_sigstop_callback, except);
3370 wait_for_sigstop ();
3371 stopping_threads = NOT_STOPPING_THREADS;
3372
3373 if (debug_threads)
3374 {
3375 debug_printf ("stop_all_lwps done, setting stopping_threads "
3376 "back to !stopping\n");
3377 debug_exit ();
3378 }
3379 }
3380
3381 /* Resume execution of the inferior process.
3382 If STEP is nonzero, single-step it.
3383 If SIGNAL is nonzero, give it that signal. */
3384
3385 static void
3386 linux_resume_one_lwp (struct lwp_info *lwp,
3387 int step, int signal, siginfo_t *info)
3388 {
3389 struct thread_info *thread = get_lwp_thread (lwp);
3390 struct thread_info *saved_thread;
3391 int fast_tp_collecting;
3392
3393 if (lwp->stopped == 0)
3394 return;
3395
3396 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3397
3398 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3399
3400 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3401 user used the "jump" command, or "set $pc = foo"). */
3402 if (lwp->stop_pc != get_pc (lwp))
3403 {
3404 /* Collecting 'while-stepping' actions doesn't make sense
3405 anymore. */
3406 release_while_stepping_state_list (thread);
3407 }
3408
3409 /* If we have pending signals or status, and a new signal, enqueue the
3410 signal. Also enqueue the signal if we are waiting to reinsert a
3411 breakpoint; it will be picked up again below. */
3412 if (signal != 0
3413 && (lwp->status_pending_p
3414 || lwp->pending_signals != NULL
3415 || lwp->bp_reinsert != 0
3416 || fast_tp_collecting))
3417 {
3418 struct pending_signals *p_sig;
3419 p_sig = xmalloc (sizeof (*p_sig));
3420 p_sig->prev = lwp->pending_signals;
3421 p_sig->signal = signal;
3422 if (info == NULL)
3423 memset (&p_sig->info, 0, sizeof (siginfo_t));
3424 else
3425 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3426 lwp->pending_signals = p_sig;
3427 }
3428
3429 if (lwp->status_pending_p)
3430 {
3431 if (debug_threads)
3432 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3433 " has pending status\n",
3434 lwpid_of (thread), step ? "step" : "continue", signal,
3435 lwp->stop_expected ? "expected" : "not expected");
3436 return;
3437 }
3438
3439 saved_thread = current_thread;
3440 current_thread = thread;
3441
3442 if (debug_threads)
3443 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3444 lwpid_of (thread), step ? "step" : "continue", signal,
3445 lwp->stop_expected ? "expected" : "not expected");
3446
3447 /* This bit needs some thinking about. If we get a signal that
3448 we must report while a single-step reinsert is still pending,
3449 we often end up resuming the thread. It might be better to
3450 (ew) allow a stack of pending events; then we could be sure that
3451 the reinsert happened right away and not lose any signals.
3452
3453 Making this stack would also shrink the window in which breakpoints are
3454 uninserted (see comment in linux_wait_for_lwp) but not enough for
3455 complete correctness, so it won't solve that problem. It may be
3456 worthwhile just to solve this one, however. */
3457 if (lwp->bp_reinsert != 0)
3458 {
3459 if (debug_threads)
3460 debug_printf (" pending reinsert at 0x%s\n",
3461 paddress (lwp->bp_reinsert));
3462
3463 if (can_hardware_single_step ())
3464 {
3465 if (fast_tp_collecting == 0)
3466 {
3467 if (step == 0)
3468 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3469 if (lwp->suspended)
3470 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3471 lwp->suspended);
3472 }
3473
3474 step = 1;
3475 }
3476
3477 /* Postpone any pending signal. It was enqueued above. */
3478 signal = 0;
3479 }
3480
3481 if (fast_tp_collecting == 1)
3482 {
3483 if (debug_threads)
3484 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3485 " (exit-jump-pad-bkpt)\n",
3486 lwpid_of (thread));
3487
3488 /* Postpone any pending signal. It was enqueued above. */
3489 signal = 0;
3490 }
3491 else if (fast_tp_collecting == 2)
3492 {
3493 if (debug_threads)
3494 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3495 " single-stepping\n",
3496 lwpid_of (thread));
3497
3498 if (can_hardware_single_step ())
3499 step = 1;
3500 else
3501 {
3502 internal_error (__FILE__, __LINE__,
3503 "moving out of jump pad single-stepping"
3504 " not implemented on this target");
3505 }
3506
3507 /* Postpone any pending signal. It was enqueued above. */
3508 signal = 0;
3509 }
3510
3511 /* If we have while-stepping actions in this thread set it stepping.
3512 If we have a signal to deliver, it may or may not be set to
3513 SIG_IGN, we don't know. Assume so, and allow collecting
3514 while-stepping into a signal handler. A possible smart thing to
3515 do would be to set an internal breakpoint at the signal return
3516 address, continue, and carry on catching this while-stepping
3517 action only when that breakpoint is hit. A future
3518 enhancement. */
3519 if (thread->while_stepping != NULL
3520 && can_hardware_single_step ())
3521 {
3522 if (debug_threads)
3523 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3524 lwpid_of (thread));
3525 step = 1;
3526 }
3527
3528 if (the_low_target.get_pc != NULL)
3529 {
3530 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3531
3532 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3533
3534 if (debug_threads)
3535 {
3536 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3537 (long) lwp->stop_pc);
3538 }
3539 }
3540
3541 /* If we have pending signals, consume one unless we are trying to
3542 reinsert a breakpoint or we're trying to finish a fast tracepoint
3543 collect. */
3544 if (lwp->pending_signals != NULL
3545 && lwp->bp_reinsert == 0
3546 && fast_tp_collecting == 0)
3547 {
3548 struct pending_signals **p_sig;
3549
3550 p_sig = &lwp->pending_signals;
3551 while ((*p_sig)->prev != NULL)
3552 p_sig = &(*p_sig)->prev;
3553
3554 signal = (*p_sig)->signal;
3555 if ((*p_sig)->info.si_signo != 0)
3556 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3557 &(*p_sig)->info);
3558
3559 free (*p_sig);
3560 *p_sig = NULL;
3561 }
3562
3563 if (the_low_target.prepare_to_resume != NULL)
3564 the_low_target.prepare_to_resume (lwp);
3565
3566 regcache_invalidate_thread (thread);
3567 errno = 0;
3568 lwp->stopped = 0;
3569 lwp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
3570 lwp->stepping = step;
3571 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3572 (PTRACE_TYPE_ARG3) 0,
3573 /* Coerce to a uintptr_t first to avoid potential gcc warning
3574 of coercing an 8 byte integer to a 4 byte pointer. */
3575 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3576
3577 current_thread = saved_thread;
3578 if (errno)
3579 {
3580 /* ESRCH from ptrace either means that the thread was already
3581 running (an error) or that it is gone (a race condition). If
3582 it's gone, we will get a notification the next time we wait,
3583 so we can ignore the error. We could differentiate these
3584 two, but it's tricky without waiting; the thread still exists
3585 as a zombie, so sending it signal 0 would succeed. So just
3586 ignore ESRCH. */
3587 if (errno == ESRCH)
3588 return;
3589
3590 perror_with_name ("ptrace");
3591 }
3592 }
3593
3594 struct thread_resume_array
3595 {
3596 struct thread_resume *resume;
3597 size_t n;
3598 };
3599
3600 /* This function is called once per thread via find_inferior.
3601 ARG is a pointer to a thread_resume_array struct.
3602 We look up the thread specified by ENTRY in ARG, and mark the thread
3603 with a pointer to the appropriate resume request.
3604
3605 This algorithm is O(threads * resume elements), but resume elements
3606 is small (and will remain small at least until GDB supports thread
3607 suspension). */
3608
3609 static int
3610 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3611 {
3612 struct thread_info *thread = (struct thread_info *) entry;
3613 struct lwp_info *lwp = get_thread_lwp (thread);
3614 int ndx;
3615 struct thread_resume_array *r;
3616
3617 r = arg;
3618
3619 for (ndx = 0; ndx < r->n; ndx++)
3620 {
3621 ptid_t ptid = r->resume[ndx].thread;
3622 if (ptid_equal (ptid, minus_one_ptid)
3623 || ptid_equal (ptid, entry->id)
3624 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3625 of PID'. */
3626 || (ptid_get_pid (ptid) == pid_of (thread)
3627 && (ptid_is_pid (ptid)
3628 || ptid_get_lwp (ptid) == -1)))
3629 {
3630 if (r->resume[ndx].kind == resume_stop
3631 && thread->last_resume_kind == resume_stop)
3632 {
3633 if (debug_threads)
3634 debug_printf ("already %s LWP %ld at GDB's request\n",
3635 (thread->last_status.kind
3636 == TARGET_WAITKIND_STOPPED)
3637 ? "stopped"
3638 : "stopping",
3639 lwpid_of (thread));
3640
3641 continue;
3642 }
3643
3644 lwp->resume = &r->resume[ndx];
3645 thread->last_resume_kind = lwp->resume->kind;
3646
3647 lwp->step_range_start = lwp->resume->step_range_start;
3648 lwp->step_range_end = lwp->resume->step_range_end;
3649
3650 /* If we had a deferred signal to report, dequeue one now.
3651 This can happen if LWP gets more than one signal while
3652 trying to get out of a jump pad. */
3653 if (lwp->stopped
3654 && !lwp->status_pending_p
3655 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3656 {
3657 lwp->status_pending_p = 1;
3658
3659 if (debug_threads)
3660 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3661 "leaving status pending.\n",
3662 WSTOPSIG (lwp->status_pending),
3663 lwpid_of (thread));
3664 }
3665
3666 return 0;
3667 }
3668 }
3669
3670 /* No resume action for this thread. */
3671 lwp->resume = NULL;
3672
3673 return 0;
3674 }
3675
3676 /* find_inferior callback for linux_resume.
3677 Set *FLAG_P if this lwp has an interesting status pending. */
3678
3679 static int
3680 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3681 {
3682 struct thread_info *thread = (struct thread_info *) entry;
3683 struct lwp_info *lwp = get_thread_lwp (thread);
3684
3685 /* LWPs which will not be resumed are not interesting, because
3686 we might not wait for them next time through linux_wait. */
3687 if (lwp->resume == NULL)
3688 return 0;
3689
3690 if (thread_still_has_status_pending_p (thread))
3691 * (int *) flag_p = 1;
3692
3693 return 0;
3694 }
3695
3696 /* Return 1 if this lwp that GDB wants running is stopped at an
3697 internal breakpoint that we need to step over. It assumes that any
3698 required STOP_PC adjustment has already been propagated to the
3699 inferior's regcache. */
3700
3701 static int
3702 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3703 {
3704 struct thread_info *thread = (struct thread_info *) entry;
3705 struct lwp_info *lwp = get_thread_lwp (thread);
3706 struct thread_info *saved_thread;
3707 CORE_ADDR pc;
3708
3709 /* LWPs which will not be resumed are not interesting, because we
3710 might not wait for them next time through linux_wait. */
3711
3712 if (!lwp->stopped)
3713 {
3714 if (debug_threads)
3715 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3716 lwpid_of (thread));
3717 return 0;
3718 }
3719
3720 if (thread->last_resume_kind == resume_stop)
3721 {
3722 if (debug_threads)
3723 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3724 " stopped\n",
3725 lwpid_of (thread));
3726 return 0;
3727 }
3728
3729 gdb_assert (lwp->suspended >= 0);
3730
3731 if (lwp->suspended)
3732 {
3733 if (debug_threads)
3734 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3735 lwpid_of (thread));
3736 return 0;
3737 }
3738
3739 if (!lwp->need_step_over)
3740 {
3741 if (debug_threads)
3742 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
3743 }
3744
3745 if (lwp->status_pending_p)
3746 {
3747 if (debug_threads)
3748 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3749 " status.\n",
3750 lwpid_of (thread));
3751 return 0;
3752 }
3753
3754 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3755 or we have. */
3756 pc = get_pc (lwp);
3757
3758 /* If the PC has changed since we stopped, then don't do anything,
3759 and let the breakpoint/tracepoint be hit. This happens if, for
3760 instance, GDB handled the decr_pc_after_break subtraction itself,
3761 GDB is OOL stepping this thread, or the user has issued a "jump"
3762 command, or poked thread's registers herself. */
3763 if (pc != lwp->stop_pc)
3764 {
3765 if (debug_threads)
3766 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3767 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3768 lwpid_of (thread),
3769 paddress (lwp->stop_pc), paddress (pc));
3770
3771 lwp->need_step_over = 0;
3772 return 0;
3773 }
3774
3775 saved_thread = current_thread;
3776 current_thread = thread;
3777
3778 /* We can only step over breakpoints we know about. */
3779 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3780 {
3781 /* Don't step over a breakpoint that GDB expects to hit
3782 though. If the condition is being evaluated on the target's side
3783 and it evaluate to false, step over this breakpoint as well. */
3784 if (gdb_breakpoint_here (pc)
3785 && gdb_condition_true_at_breakpoint (pc)
3786 && gdb_no_commands_at_breakpoint (pc))
3787 {
3788 if (debug_threads)
3789 debug_printf ("Need step over [LWP %ld]? yes, but found"
3790 " GDB breakpoint at 0x%s; skipping step over\n",
3791 lwpid_of (thread), paddress (pc));
3792
3793 current_thread = saved_thread;
3794 return 0;
3795 }
3796 else
3797 {
3798 if (debug_threads)
3799 debug_printf ("Need step over [LWP %ld]? yes, "
3800 "found breakpoint at 0x%s\n",
3801 lwpid_of (thread), paddress (pc));
3802
3803 /* We've found an lwp that needs stepping over --- return 1 so
3804 that find_inferior stops looking. */
3805 current_thread = saved_thread;
3806
3807 /* If the step over is cancelled, this is set again. */
3808 lwp->need_step_over = 0;
3809 return 1;
3810 }
3811 }
3812
3813 current_thread = saved_thread;
3814
3815 if (debug_threads)
3816 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3817 " at 0x%s\n",
3818 lwpid_of (thread), paddress (pc));
3819
3820 return 0;
3821 }
3822
3823 /* Start a step-over operation on LWP. When LWP stopped at a
3824 breakpoint, to make progress, we need to remove the breakpoint out
3825 of the way. If we let other threads run while we do that, they may
3826 pass by the breakpoint location and miss hitting it. To avoid
3827 that, a step-over momentarily stops all threads while LWP is
3828 single-stepped while the breakpoint is temporarily uninserted from
3829 the inferior. When the single-step finishes, we reinsert the
3830 breakpoint, and let all threads that are supposed to be running,
3831 run again.
3832
3833 On targets that don't support hardware single-step, we don't
3834 currently support full software single-stepping. Instead, we only
3835 support stepping over the thread event breakpoint, by asking the
3836 low target where to place a reinsert breakpoint. Since this
3837 routine assumes the breakpoint being stepped over is a thread event
3838 breakpoint, it usually assumes the return address of the current
3839 function is a good enough place to set the reinsert breakpoint. */
3840
3841 static int
3842 start_step_over (struct lwp_info *lwp)
3843 {
3844 struct thread_info *thread = get_lwp_thread (lwp);
3845 struct thread_info *saved_thread;
3846 CORE_ADDR pc;
3847 int step;
3848
3849 if (debug_threads)
3850 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3851 lwpid_of (thread));
3852
3853 stop_all_lwps (1, lwp);
3854 gdb_assert (lwp->suspended == 0);
3855
3856 if (debug_threads)
3857 debug_printf ("Done stopping all threads for step-over.\n");
3858
3859 /* Note, we should always reach here with an already adjusted PC,
3860 either by GDB (if we're resuming due to GDB's request), or by our
3861 caller, if we just finished handling an internal breakpoint GDB
3862 shouldn't care about. */
3863 pc = get_pc (lwp);
3864
3865 saved_thread = current_thread;
3866 current_thread = thread;
3867
3868 lwp->bp_reinsert = pc;
3869 uninsert_breakpoints_at (pc);
3870 uninsert_fast_tracepoint_jumps_at (pc);
3871
3872 if (can_hardware_single_step ())
3873 {
3874 step = 1;
3875 }
3876 else
3877 {
3878 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3879 set_reinsert_breakpoint (raddr);
3880 step = 0;
3881 }
3882
3883 current_thread = saved_thread;
3884
3885 linux_resume_one_lwp (lwp, step, 0, NULL);
3886
3887 /* Require next event from this LWP. */
3888 step_over_bkpt = thread->entry.id;
3889 return 1;
3890 }
3891
3892 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3893 start_step_over, if still there, and delete any reinsert
3894 breakpoints we've set, on non hardware single-step targets. */
3895
3896 static int
3897 finish_step_over (struct lwp_info *lwp)
3898 {
3899 if (lwp->bp_reinsert != 0)
3900 {
3901 if (debug_threads)
3902 debug_printf ("Finished step over.\n");
3903
3904 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3905 may be no breakpoint to reinsert there by now. */
3906 reinsert_breakpoints_at (lwp->bp_reinsert);
3907 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3908
3909 lwp->bp_reinsert = 0;
3910
3911 /* Delete any software-single-step reinsert breakpoints. No
3912 longer needed. We don't have to worry about other threads
3913 hitting this trap, and later not being able to explain it,
3914 because we were stepping over a breakpoint, and we hold all
3915 threads but LWP stopped while doing that. */
3916 if (!can_hardware_single_step ())
3917 delete_reinsert_breakpoints ();
3918
3919 step_over_bkpt = null_ptid;
3920 return 1;
3921 }
3922 else
3923 return 0;
3924 }
3925
3926 /* This function is called once per thread. We check the thread's resume
3927 request, which will tell us whether to resume, step, or leave the thread
3928 stopped; and what signal, if any, it should be sent.
3929
3930 For threads which we aren't explicitly told otherwise, we preserve
3931 the stepping flag; this is used for stepping over gdbserver-placed
3932 breakpoints.
3933
3934 If pending_flags was set in any thread, we queue any needed
3935 signals, since we won't actually resume. We already have a pending
3936 event to report, so we don't need to preserve any step requests;
3937 they should be re-issued if necessary. */
3938
3939 static int
3940 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3941 {
3942 struct thread_info *thread = (struct thread_info *) entry;
3943 struct lwp_info *lwp = get_thread_lwp (thread);
3944 int step;
3945 int leave_all_stopped = * (int *) arg;
3946 int leave_pending;
3947
3948 if (lwp->resume == NULL)
3949 return 0;
3950
3951 if (lwp->resume->kind == resume_stop)
3952 {
3953 if (debug_threads)
3954 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
3955
3956 if (!lwp->stopped)
3957 {
3958 if (debug_threads)
3959 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
3960
3961 /* Stop the thread, and wait for the event asynchronously,
3962 through the event loop. */
3963 send_sigstop (lwp);
3964 }
3965 else
3966 {
3967 if (debug_threads)
3968 debug_printf ("already stopped LWP %ld\n",
3969 lwpid_of (thread));
3970
3971 /* The LWP may have been stopped in an internal event that
3972 was not meant to be notified back to GDB (e.g., gdbserver
3973 breakpoint), so we should be reporting a stop event in
3974 this case too. */
3975
3976 /* If the thread already has a pending SIGSTOP, this is a
3977 no-op. Otherwise, something later will presumably resume
3978 the thread and this will cause it to cancel any pending
3979 operation, due to last_resume_kind == resume_stop. If
3980 the thread already has a pending status to report, we
3981 will still report it the next time we wait - see
3982 status_pending_p_callback. */
3983
3984 /* If we already have a pending signal to report, then
3985 there's no need to queue a SIGSTOP, as this means we're
3986 midway through moving the LWP out of the jumppad, and we
3987 will report the pending signal as soon as that is
3988 finished. */
3989 if (lwp->pending_signals_to_report == NULL)
3990 send_sigstop (lwp);
3991 }
3992
3993 /* For stop requests, we're done. */
3994 lwp->resume = NULL;
3995 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3996 return 0;
3997 }
3998
3999 /* If this thread which is about to be resumed has a pending status,
4000 then don't resume any threads - we can just report the pending
4001 status. Make sure to queue any signals that would otherwise be
4002 sent. In all-stop mode, we do this decision based on if *any*
4003 thread has a pending status. If there's a thread that needs the
4004 step-over-breakpoint dance, then don't resume any other thread
4005 but that particular one. */
4006 leave_pending = (lwp->status_pending_p || leave_all_stopped);
4007
4008 if (!leave_pending)
4009 {
4010 if (debug_threads)
4011 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
4012
4013 step = (lwp->resume->kind == resume_step);
4014 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
4015 }
4016 else
4017 {
4018 if (debug_threads)
4019 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
4020
4021 /* If we have a new signal, enqueue the signal. */
4022 if (lwp->resume->sig != 0)
4023 {
4024 struct pending_signals *p_sig;
4025 p_sig = xmalloc (sizeof (*p_sig));
4026 p_sig->prev = lwp->pending_signals;
4027 p_sig->signal = lwp->resume->sig;
4028 memset (&p_sig->info, 0, sizeof (siginfo_t));
4029
4030 /* If this is the same signal we were previously stopped by,
4031 make sure to queue its siginfo. We can ignore the return
4032 value of ptrace; if it fails, we'll skip
4033 PTRACE_SETSIGINFO. */
4034 if (WIFSTOPPED (lwp->last_status)
4035 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
4036 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
4037 &p_sig->info);
4038
4039 lwp->pending_signals = p_sig;
4040 }
4041 }
4042
4043 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
4044 lwp->resume = NULL;
4045 return 0;
4046 }
4047
4048 static void
4049 linux_resume (struct thread_resume *resume_info, size_t n)
4050 {
4051 struct thread_resume_array array = { resume_info, n };
4052 struct thread_info *need_step_over = NULL;
4053 int any_pending;
4054 int leave_all_stopped;
4055
4056 if (debug_threads)
4057 {
4058 debug_enter ();
4059 debug_printf ("linux_resume:\n");
4060 }
4061
4062 find_inferior (&all_threads, linux_set_resume_request, &array);
4063
4064 /* If there is a thread which would otherwise be resumed, which has
4065 a pending status, then don't resume any threads - we can just
4066 report the pending status. Make sure to queue any signals that
4067 would otherwise be sent. In non-stop mode, we'll apply this
4068 logic to each thread individually. We consume all pending events
4069 before considering to start a step-over (in all-stop). */
4070 any_pending = 0;
4071 if (!non_stop)
4072 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
4073
4074 /* If there is a thread which would otherwise be resumed, which is
4075 stopped at a breakpoint that needs stepping over, then don't
4076 resume any threads - have it step over the breakpoint with all
4077 other threads stopped, then resume all threads again. Make sure
4078 to queue any signals that would otherwise be delivered or
4079 queued. */
4080 if (!any_pending && supports_breakpoints ())
4081 need_step_over
4082 = (struct thread_info *) find_inferior (&all_threads,
4083 need_step_over_p, NULL);
4084
4085 leave_all_stopped = (need_step_over != NULL || any_pending);
4086
4087 if (debug_threads)
4088 {
4089 if (need_step_over != NULL)
4090 debug_printf ("Not resuming all, need step over\n");
4091 else if (any_pending)
4092 debug_printf ("Not resuming, all-stop and found "
4093 "an LWP with pending status\n");
4094 else
4095 debug_printf ("Resuming, no pending status or step over needed\n");
4096 }
4097
4098 /* Even if we're leaving threads stopped, queue all signals we'd
4099 otherwise deliver. */
4100 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4101
4102 if (need_step_over)
4103 start_step_over (get_thread_lwp (need_step_over));
4104
4105 if (debug_threads)
4106 {
4107 debug_printf ("linux_resume done\n");
4108 debug_exit ();
4109 }
4110 }
4111
4112 /* This function is called once per thread. We check the thread's
4113 last resume request, which will tell us whether to resume, step, or
4114 leave the thread stopped. Any signal the client requested to be
4115 delivered has already been enqueued at this point.
4116
4117 If any thread that GDB wants running is stopped at an internal
4118 breakpoint that needs stepping over, we start a step-over operation
4119 on that particular thread, and leave all others stopped. */
4120
4121 static int
4122 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4123 {
4124 struct thread_info *thread = (struct thread_info *) entry;
4125 struct lwp_info *lwp = get_thread_lwp (thread);
4126 int step;
4127
4128 if (lwp == except)
4129 return 0;
4130
4131 if (debug_threads)
4132 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4133
4134 if (!lwp->stopped)
4135 {
4136 if (debug_threads)
4137 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4138 return 0;
4139 }
4140
4141 if (thread->last_resume_kind == resume_stop
4142 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4143 {
4144 if (debug_threads)
4145 debug_printf (" client wants LWP to remain %ld stopped\n",
4146 lwpid_of (thread));
4147 return 0;
4148 }
4149
4150 if (lwp->status_pending_p)
4151 {
4152 if (debug_threads)
4153 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4154 lwpid_of (thread));
4155 return 0;
4156 }
4157
4158 gdb_assert (lwp->suspended >= 0);
4159
4160 if (lwp->suspended)
4161 {
4162 if (debug_threads)
4163 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4164 return 0;
4165 }
4166
4167 if (thread->last_resume_kind == resume_stop
4168 && lwp->pending_signals_to_report == NULL
4169 && lwp->collecting_fast_tracepoint == 0)
4170 {
4171 /* We haven't reported this LWP as stopped yet (otherwise, the
4172 last_status.kind check above would catch it, and we wouldn't
4173 reach here. This LWP may have been momentarily paused by a
4174 stop_all_lwps call while handling for example, another LWP's
4175 step-over. In that case, the pending expected SIGSTOP signal
4176 that was queued at vCont;t handling time will have already
4177 been consumed by wait_for_sigstop, and so we need to requeue
4178 another one here. Note that if the LWP already has a SIGSTOP
4179 pending, this is a no-op. */
4180
4181 if (debug_threads)
4182 debug_printf ("Client wants LWP %ld to stop. "
4183 "Making sure it has a SIGSTOP pending\n",
4184 lwpid_of (thread));
4185
4186 send_sigstop (lwp);
4187 }
4188
4189 step = thread->last_resume_kind == resume_step;
4190 linux_resume_one_lwp (lwp, step, 0, NULL);
4191 return 0;
4192 }
4193
4194 static int
4195 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4196 {
4197 struct thread_info *thread = (struct thread_info *) entry;
4198 struct lwp_info *lwp = get_thread_lwp (thread);
4199
4200 if (lwp == except)
4201 return 0;
4202
4203 lwp->suspended--;
4204 gdb_assert (lwp->suspended >= 0);
4205
4206 return proceed_one_lwp (entry, except);
4207 }
4208
4209 /* When we finish a step-over, set threads running again. If there's
4210 another thread that may need a step-over, now's the time to start
4211 it. Eventually, we'll move all threads past their breakpoints. */
4212
4213 static void
4214 proceed_all_lwps (void)
4215 {
4216 struct thread_info *need_step_over;
4217
4218 /* If there is a thread which would otherwise be resumed, which is
4219 stopped at a breakpoint that needs stepping over, then don't
4220 resume any threads - have it step over the breakpoint with all
4221 other threads stopped, then resume all threads again. */
4222
4223 if (supports_breakpoints ())
4224 {
4225 need_step_over
4226 = (struct thread_info *) find_inferior (&all_threads,
4227 need_step_over_p, NULL);
4228
4229 if (need_step_over != NULL)
4230 {
4231 if (debug_threads)
4232 debug_printf ("proceed_all_lwps: found "
4233 "thread %ld needing a step-over\n",
4234 lwpid_of (need_step_over));
4235
4236 start_step_over (get_thread_lwp (need_step_over));
4237 return;
4238 }
4239 }
4240
4241 if (debug_threads)
4242 debug_printf ("Proceeding, no step-over needed\n");
4243
4244 find_inferior (&all_threads, proceed_one_lwp, NULL);
4245 }
4246
4247 /* Stopped LWPs that the client wanted to be running, that don't have
4248 pending statuses, are set to run again, except for EXCEPT, if not
4249 NULL. This undoes a stop_all_lwps call. */
4250
4251 static void
4252 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4253 {
4254 if (debug_threads)
4255 {
4256 debug_enter ();
4257 if (except)
4258 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4259 lwpid_of (get_lwp_thread (except)));
4260 else
4261 debug_printf ("unstopping all lwps\n");
4262 }
4263
4264 if (unsuspend)
4265 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4266 else
4267 find_inferior (&all_threads, proceed_one_lwp, except);
4268
4269 if (debug_threads)
4270 {
4271 debug_printf ("unstop_all_lwps done\n");
4272 debug_exit ();
4273 }
4274 }
4275
4276
4277 #ifdef HAVE_LINUX_REGSETS
4278
4279 #define use_linux_regsets 1
4280
4281 /* Returns true if REGSET has been disabled. */
4282
4283 static int
4284 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4285 {
4286 return (info->disabled_regsets != NULL
4287 && info->disabled_regsets[regset - info->regsets]);
4288 }
4289
4290 /* Disable REGSET. */
4291
4292 static void
4293 disable_regset (struct regsets_info *info, struct regset_info *regset)
4294 {
4295 int dr_offset;
4296
4297 dr_offset = regset - info->regsets;
4298 if (info->disabled_regsets == NULL)
4299 info->disabled_regsets = xcalloc (1, info->num_regsets);
4300 info->disabled_regsets[dr_offset] = 1;
4301 }
4302
4303 static int
4304 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4305 struct regcache *regcache)
4306 {
4307 struct regset_info *regset;
4308 int saw_general_regs = 0;
4309 int pid;
4310 struct iovec iov;
4311
4312 pid = lwpid_of (current_thread);
4313 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4314 {
4315 void *buf, *data;
4316 int nt_type, res;
4317
4318 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4319 continue;
4320
4321 buf = xmalloc (regset->size);
4322
4323 nt_type = regset->nt_type;
4324 if (nt_type)
4325 {
4326 iov.iov_base = buf;
4327 iov.iov_len = regset->size;
4328 data = (void *) &iov;
4329 }
4330 else
4331 data = buf;
4332
4333 #ifndef __sparc__
4334 res = ptrace (regset->get_request, pid,
4335 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4336 #else
4337 res = ptrace (regset->get_request, pid, data, nt_type);
4338 #endif
4339 if (res < 0)
4340 {
4341 if (errno == EIO)
4342 {
4343 /* If we get EIO on a regset, do not try it again for
4344 this process mode. */
4345 disable_regset (regsets_info, regset);
4346 }
4347 else if (errno == ENODATA)
4348 {
4349 /* ENODATA may be returned if the regset is currently
4350 not "active". This can happen in normal operation,
4351 so suppress the warning in this case. */
4352 }
4353 else
4354 {
4355 char s[256];
4356 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4357 pid);
4358 perror (s);
4359 }
4360 }
4361 else
4362 {
4363 if (regset->type == GENERAL_REGS)
4364 saw_general_regs = 1;
4365 regset->store_function (regcache, buf);
4366 }
4367 free (buf);
4368 }
4369 if (saw_general_regs)
4370 return 0;
4371 else
4372 return 1;
4373 }
4374
4375 static int
4376 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4377 struct regcache *regcache)
4378 {
4379 struct regset_info *regset;
4380 int saw_general_regs = 0;
4381 int pid;
4382 struct iovec iov;
4383
4384 pid = lwpid_of (current_thread);
4385 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4386 {
4387 void *buf, *data;
4388 int nt_type, res;
4389
4390 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4391 || regset->fill_function == NULL)
4392 continue;
4393
4394 buf = xmalloc (regset->size);
4395
4396 /* First fill the buffer with the current register set contents,
4397 in case there are any items in the kernel's regset that are
4398 not in gdbserver's regcache. */
4399
4400 nt_type = regset->nt_type;
4401 if (nt_type)
4402 {
4403 iov.iov_base = buf;
4404 iov.iov_len = regset->size;
4405 data = (void *) &iov;
4406 }
4407 else
4408 data = buf;
4409
4410 #ifndef __sparc__
4411 res = ptrace (regset->get_request, pid,
4412 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4413 #else
4414 res = ptrace (regset->get_request, pid, data, nt_type);
4415 #endif
4416
4417 if (res == 0)
4418 {
4419 /* Then overlay our cached registers on that. */
4420 regset->fill_function (regcache, buf);
4421
4422 /* Only now do we write the register set. */
4423 #ifndef __sparc__
4424 res = ptrace (regset->set_request, pid,
4425 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4426 #else
4427 res = ptrace (regset->set_request, pid, data, nt_type);
4428 #endif
4429 }
4430
4431 if (res < 0)
4432 {
4433 if (errno == EIO)
4434 {
4435 /* If we get EIO on a regset, do not try it again for
4436 this process mode. */
4437 disable_regset (regsets_info, regset);
4438 }
4439 else if (errno == ESRCH)
4440 {
4441 /* At this point, ESRCH should mean the process is
4442 already gone, in which case we simply ignore attempts
4443 to change its registers. See also the related
4444 comment in linux_resume_one_lwp. */
4445 free (buf);
4446 return 0;
4447 }
4448 else
4449 {
4450 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4451 }
4452 }
4453 else if (regset->type == GENERAL_REGS)
4454 saw_general_regs = 1;
4455 free (buf);
4456 }
4457 if (saw_general_regs)
4458 return 0;
4459 else
4460 return 1;
4461 }
4462
4463 #else /* !HAVE_LINUX_REGSETS */
4464
4465 #define use_linux_regsets 0
4466 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4467 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4468
4469 #endif
4470
4471 /* Return 1 if register REGNO is supported by one of the regset ptrace
4472 calls or 0 if it has to be transferred individually. */
4473
4474 static int
4475 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4476 {
4477 unsigned char mask = 1 << (regno % 8);
4478 size_t index = regno / 8;
4479
4480 return (use_linux_regsets
4481 && (regs_info->regset_bitmap == NULL
4482 || (regs_info->regset_bitmap[index] & mask) != 0));
4483 }
4484
4485 #ifdef HAVE_LINUX_USRREGS
4486
4487 int
4488 register_addr (const struct usrregs_info *usrregs, int regnum)
4489 {
4490 int addr;
4491
4492 if (regnum < 0 || regnum >= usrregs->num_regs)
4493 error ("Invalid register number %d.", regnum);
4494
4495 addr = usrregs->regmap[regnum];
4496
4497 return addr;
4498 }
4499
4500 /* Fetch one register. */
4501 static void
4502 fetch_register (const struct usrregs_info *usrregs,
4503 struct regcache *regcache, int regno)
4504 {
4505 CORE_ADDR regaddr;
4506 int i, size;
4507 char *buf;
4508 int pid;
4509
4510 if (regno >= usrregs->num_regs)
4511 return;
4512 if ((*the_low_target.cannot_fetch_register) (regno))
4513 return;
4514
4515 regaddr = register_addr (usrregs, regno);
4516 if (regaddr == -1)
4517 return;
4518
4519 size = ((register_size (regcache->tdesc, regno)
4520 + sizeof (PTRACE_XFER_TYPE) - 1)
4521 & -sizeof (PTRACE_XFER_TYPE));
4522 buf = alloca (size);
4523
4524 pid = lwpid_of (current_thread);
4525 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4526 {
4527 errno = 0;
4528 *(PTRACE_XFER_TYPE *) (buf + i) =
4529 ptrace (PTRACE_PEEKUSER, pid,
4530 /* Coerce to a uintptr_t first to avoid potential gcc warning
4531 of coercing an 8 byte integer to a 4 byte pointer. */
4532 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4533 regaddr += sizeof (PTRACE_XFER_TYPE);
4534 if (errno != 0)
4535 error ("reading register %d: %s", regno, strerror (errno));
4536 }
4537
4538 if (the_low_target.supply_ptrace_register)
4539 the_low_target.supply_ptrace_register (regcache, regno, buf);
4540 else
4541 supply_register (regcache, regno, buf);
4542 }
4543
4544 /* Store one register. */
4545 static void
4546 store_register (const struct usrregs_info *usrregs,
4547 struct regcache *regcache, int regno)
4548 {
4549 CORE_ADDR regaddr;
4550 int i, size;
4551 char *buf;
4552 int pid;
4553
4554 if (regno >= usrregs->num_regs)
4555 return;
4556 if ((*the_low_target.cannot_store_register) (regno))
4557 return;
4558
4559 regaddr = register_addr (usrregs, regno);
4560 if (regaddr == -1)
4561 return;
4562
4563 size = ((register_size (regcache->tdesc, regno)
4564 + sizeof (PTRACE_XFER_TYPE) - 1)
4565 & -sizeof (PTRACE_XFER_TYPE));
4566 buf = alloca (size);
4567 memset (buf, 0, size);
4568
4569 if (the_low_target.collect_ptrace_register)
4570 the_low_target.collect_ptrace_register (regcache, regno, buf);
4571 else
4572 collect_register (regcache, regno, buf);
4573
4574 pid = lwpid_of (current_thread);
4575 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4576 {
4577 errno = 0;
4578 ptrace (PTRACE_POKEUSER, pid,
4579 /* Coerce to a uintptr_t first to avoid potential gcc warning
4580 about coercing an 8 byte integer to a 4 byte pointer. */
4581 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4582 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4583 if (errno != 0)
4584 {
4585 /* At this point, ESRCH should mean the process is
4586 already gone, in which case we simply ignore attempts
4587 to change its registers. See also the related
4588 comment in linux_resume_one_lwp. */
4589 if (errno == ESRCH)
4590 return;
4591
4592 if ((*the_low_target.cannot_store_register) (regno) == 0)
4593 error ("writing register %d: %s", regno, strerror (errno));
4594 }
4595 regaddr += sizeof (PTRACE_XFER_TYPE);
4596 }
4597 }
4598
4599 /* Fetch all registers, or just one, from the child process.
4600 If REGNO is -1, do this for all registers, skipping any that are
4601 assumed to have been retrieved by regsets_fetch_inferior_registers,
4602 unless ALL is non-zero.
4603 Otherwise, REGNO specifies which register (so we can save time). */
4604 static void
4605 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4606 struct regcache *regcache, int regno, int all)
4607 {
4608 struct usrregs_info *usr = regs_info->usrregs;
4609
4610 if (regno == -1)
4611 {
4612 for (regno = 0; regno < usr->num_regs; regno++)
4613 if (all || !linux_register_in_regsets (regs_info, regno))
4614 fetch_register (usr, regcache, regno);
4615 }
4616 else
4617 fetch_register (usr, regcache, regno);
4618 }
4619
4620 /* Store our register values back into the inferior.
4621 If REGNO is -1, do this for all registers, skipping any that are
4622 assumed to have been saved by regsets_store_inferior_registers,
4623 unless ALL is non-zero.
4624 Otherwise, REGNO specifies which register (so we can save time). */
4625 static void
4626 usr_store_inferior_registers (const struct regs_info *regs_info,
4627 struct regcache *regcache, int regno, int all)
4628 {
4629 struct usrregs_info *usr = regs_info->usrregs;
4630
4631 if (regno == -1)
4632 {
4633 for (regno = 0; regno < usr->num_regs; regno++)
4634 if (all || !linux_register_in_regsets (regs_info, regno))
4635 store_register (usr, regcache, regno);
4636 }
4637 else
4638 store_register (usr, regcache, regno);
4639 }
4640
4641 #else /* !HAVE_LINUX_USRREGS */
4642
4643 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4644 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4645
4646 #endif
4647
4648
4649 void
4650 linux_fetch_registers (struct regcache *regcache, int regno)
4651 {
4652 int use_regsets;
4653 int all = 0;
4654 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4655
4656 if (regno == -1)
4657 {
4658 if (the_low_target.fetch_register != NULL
4659 && regs_info->usrregs != NULL)
4660 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4661 (*the_low_target.fetch_register) (regcache, regno);
4662
4663 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4664 if (regs_info->usrregs != NULL)
4665 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4666 }
4667 else
4668 {
4669 if (the_low_target.fetch_register != NULL
4670 && (*the_low_target.fetch_register) (regcache, regno))
4671 return;
4672
4673 use_regsets = linux_register_in_regsets (regs_info, regno);
4674 if (use_regsets)
4675 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4676 regcache);
4677 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4678 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4679 }
4680 }
4681
4682 void
4683 linux_store_registers (struct regcache *regcache, int regno)
4684 {
4685 int use_regsets;
4686 int all = 0;
4687 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4688
4689 if (regno == -1)
4690 {
4691 all = regsets_store_inferior_registers (regs_info->regsets_info,
4692 regcache);
4693 if (regs_info->usrregs != NULL)
4694 usr_store_inferior_registers (regs_info, regcache, regno, all);
4695 }
4696 else
4697 {
4698 use_regsets = linux_register_in_regsets (regs_info, regno);
4699 if (use_regsets)
4700 all = regsets_store_inferior_registers (regs_info->regsets_info,
4701 regcache);
4702 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4703 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4704 }
4705 }
4706
4707
4708 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4709 to debugger memory starting at MYADDR. */
4710
4711 static int
4712 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4713 {
4714 int pid = lwpid_of (current_thread);
4715 register PTRACE_XFER_TYPE *buffer;
4716 register CORE_ADDR addr;
4717 register int count;
4718 char filename[64];
4719 register int i;
4720 int ret;
4721 int fd;
4722
4723 /* Try using /proc. Don't bother for one word. */
4724 if (len >= 3 * sizeof (long))
4725 {
4726 int bytes;
4727
4728 /* We could keep this file open and cache it - possibly one per
4729 thread. That requires some juggling, but is even faster. */
4730 sprintf (filename, "/proc/%d/mem", pid);
4731 fd = open (filename, O_RDONLY | O_LARGEFILE);
4732 if (fd == -1)
4733 goto no_proc;
4734
4735 /* If pread64 is available, use it. It's faster if the kernel
4736 supports it (only one syscall), and it's 64-bit safe even on
4737 32-bit platforms (for instance, SPARC debugging a SPARC64
4738 application). */
4739 #ifdef HAVE_PREAD64
4740 bytes = pread64 (fd, myaddr, len, memaddr);
4741 #else
4742 bytes = -1;
4743 if (lseek (fd, memaddr, SEEK_SET) != -1)
4744 bytes = read (fd, myaddr, len);
4745 #endif
4746
4747 close (fd);
4748 if (bytes == len)
4749 return 0;
4750
4751 /* Some data was read, we'll try to get the rest with ptrace. */
4752 if (bytes > 0)
4753 {
4754 memaddr += bytes;
4755 myaddr += bytes;
4756 len -= bytes;
4757 }
4758 }
4759
4760 no_proc:
4761 /* Round starting address down to longword boundary. */
4762 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4763 /* Round ending address up; get number of longwords that makes. */
4764 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4765 / sizeof (PTRACE_XFER_TYPE));
4766 /* Allocate buffer of that many longwords. */
4767 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4768
4769 /* Read all the longwords */
4770 errno = 0;
4771 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4772 {
4773 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4774 about coercing an 8 byte integer to a 4 byte pointer. */
4775 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4776 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4777 (PTRACE_TYPE_ARG4) 0);
4778 if (errno)
4779 break;
4780 }
4781 ret = errno;
4782
4783 /* Copy appropriate bytes out of the buffer. */
4784 if (i > 0)
4785 {
4786 i *= sizeof (PTRACE_XFER_TYPE);
4787 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4788 memcpy (myaddr,
4789 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4790 i < len ? i : len);
4791 }
4792
4793 return ret;
4794 }
4795
4796 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4797 memory at MEMADDR. On failure (cannot write to the inferior)
4798 returns the value of errno. Always succeeds if LEN is zero. */
4799
4800 static int
4801 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4802 {
4803 register int i;
4804 /* Round starting address down to longword boundary. */
4805 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4806 /* Round ending address up; get number of longwords that makes. */
4807 register int count
4808 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4809 / sizeof (PTRACE_XFER_TYPE);
4810
4811 /* Allocate buffer of that many longwords. */
4812 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4813 alloca (count * sizeof (PTRACE_XFER_TYPE));
4814
4815 int pid = lwpid_of (current_thread);
4816
4817 if (len == 0)
4818 {
4819 /* Zero length write always succeeds. */
4820 return 0;
4821 }
4822
4823 if (debug_threads)
4824 {
4825 /* Dump up to four bytes. */
4826 unsigned int val = * (unsigned int *) myaddr;
4827 if (len == 1)
4828 val = val & 0xff;
4829 else if (len == 2)
4830 val = val & 0xffff;
4831 else if (len == 3)
4832 val = val & 0xffffff;
4833 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4834 val, (long)memaddr);
4835 }
4836
4837 /* Fill start and end extra bytes of buffer with existing memory data. */
4838
4839 errno = 0;
4840 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4841 about coercing an 8 byte integer to a 4 byte pointer. */
4842 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4843 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4844 (PTRACE_TYPE_ARG4) 0);
4845 if (errno)
4846 return errno;
4847
4848 if (count > 1)
4849 {
4850 errno = 0;
4851 buffer[count - 1]
4852 = ptrace (PTRACE_PEEKTEXT, pid,
4853 /* Coerce to a uintptr_t first to avoid potential gcc warning
4854 about coercing an 8 byte integer to a 4 byte pointer. */
4855 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4856 * sizeof (PTRACE_XFER_TYPE)),
4857 (PTRACE_TYPE_ARG4) 0);
4858 if (errno)
4859 return errno;
4860 }
4861
4862 /* Copy data to be written over corresponding part of buffer. */
4863
4864 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4865 myaddr, len);
4866
4867 /* Write the entire buffer. */
4868
4869 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4870 {
4871 errno = 0;
4872 ptrace (PTRACE_POKETEXT, pid,
4873 /* Coerce to a uintptr_t first to avoid potential gcc warning
4874 about coercing an 8 byte integer to a 4 byte pointer. */
4875 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4876 (PTRACE_TYPE_ARG4) buffer[i]);
4877 if (errno)
4878 return errno;
4879 }
4880
4881 return 0;
4882 }
4883
4884 static void
4885 linux_look_up_symbols (void)
4886 {
4887 #ifdef USE_THREAD_DB
4888 struct process_info *proc = current_process ();
4889
4890 if (proc->priv->thread_db != NULL)
4891 return;
4892
4893 /* If the kernel supports tracing clones, then we don't need to
4894 use the magic thread event breakpoint to learn about
4895 threads. */
4896 thread_db_init (!linux_supports_traceclone ());
4897 #endif
4898 }
4899
4900 static void
4901 linux_request_interrupt (void)
4902 {
4903 extern unsigned long signal_pid;
4904
4905 /* Send a SIGINT to the process group. This acts just like the user
4906 typed a ^C on the controlling terminal. */
4907 kill (-signal_pid, SIGINT);
4908 }
4909
4910 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4911 to debugger memory starting at MYADDR. */
4912
4913 static int
4914 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4915 {
4916 char filename[PATH_MAX];
4917 int fd, n;
4918 int pid = lwpid_of (current_thread);
4919
4920 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4921
4922 fd = open (filename, O_RDONLY);
4923 if (fd < 0)
4924 return -1;
4925
4926 if (offset != (CORE_ADDR) 0
4927 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4928 n = -1;
4929 else
4930 n = read (fd, myaddr, len);
4931
4932 close (fd);
4933
4934 return n;
4935 }
4936
4937 /* These breakpoint and watchpoint related wrapper functions simply
4938 pass on the function call if the target has registered a
4939 corresponding function. */
4940
4941 static int
4942 linux_supports_z_point_type (char z_type)
4943 {
4944 return (the_low_target.supports_z_point_type != NULL
4945 && the_low_target.supports_z_point_type (z_type));
4946 }
4947
4948 static int
4949 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
4950 int size, struct raw_breakpoint *bp)
4951 {
4952 if (the_low_target.insert_point != NULL)
4953 return the_low_target.insert_point (type, addr, size, bp);
4954 else
4955 /* Unsupported (see target.h). */
4956 return 1;
4957 }
4958
4959 static int
4960 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
4961 int size, struct raw_breakpoint *bp)
4962 {
4963 if (the_low_target.remove_point != NULL)
4964 return the_low_target.remove_point (type, addr, size, bp);
4965 else
4966 /* Unsupported (see target.h). */
4967 return 1;
4968 }
4969
4970 /* Implement the to_stopped_by_sw_breakpoint target_ops
4971 method. */
4972
4973 static int
4974 linux_stopped_by_sw_breakpoint (void)
4975 {
4976 struct lwp_info *lwp = get_thread_lwp (current_thread);
4977
4978 return (lwp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT);
4979 }
4980
4981 /* Implement the to_supports_stopped_by_sw_breakpoint target_ops
4982 method. */
4983
4984 static int
4985 linux_supports_stopped_by_sw_breakpoint (void)
4986 {
4987 return USE_SIGTRAP_SIGINFO;
4988 }
4989
4990 /* Implement the to_stopped_by_hw_breakpoint target_ops
4991 method. */
4992
4993 static int
4994 linux_stopped_by_hw_breakpoint (void)
4995 {
4996 struct lwp_info *lwp = get_thread_lwp (current_thread);
4997
4998 return (lwp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT);
4999 }
5000
5001 /* Implement the to_supports_stopped_by_hw_breakpoint target_ops
5002 method. */
5003
5004 static int
5005 linux_supports_stopped_by_hw_breakpoint (void)
5006 {
5007 return USE_SIGTRAP_SIGINFO;
5008 }
5009
5010 static int
5011 linux_stopped_by_watchpoint (void)
5012 {
5013 struct lwp_info *lwp = get_thread_lwp (current_thread);
5014
5015 return lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
5016 }
5017
5018 static CORE_ADDR
5019 linux_stopped_data_address (void)
5020 {
5021 struct lwp_info *lwp = get_thread_lwp (current_thread);
5022
5023 return lwp->stopped_data_address;
5024 }
5025
5026 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
5027 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
5028 && defined(PT_TEXT_END_ADDR)
5029
5030 /* This is only used for targets that define PT_TEXT_ADDR,
5031 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
5032 the target has different ways of acquiring this information, like
5033 loadmaps. */
5034
5035 /* Under uClinux, programs are loaded at non-zero offsets, which we need
5036 to tell gdb about. */
5037
5038 static int
5039 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
5040 {
5041 unsigned long text, text_end, data;
5042 int pid = lwpid_of (get_thread_lwp (current_thread));
5043
5044 errno = 0;
5045
5046 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
5047 (PTRACE_TYPE_ARG4) 0);
5048 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
5049 (PTRACE_TYPE_ARG4) 0);
5050 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
5051 (PTRACE_TYPE_ARG4) 0);
5052
5053 if (errno == 0)
5054 {
5055 /* Both text and data offsets produced at compile-time (and so
5056 used by gdb) are relative to the beginning of the program,
5057 with the data segment immediately following the text segment.
5058 However, the actual runtime layout in memory may put the data
5059 somewhere else, so when we send gdb a data base-address, we
5060 use the real data base address and subtract the compile-time
5061 data base-address from it (which is just the length of the
5062 text segment). BSS immediately follows data in both
5063 cases. */
5064 *text_p = text;
5065 *data_p = data - (text_end - text);
5066
5067 return 1;
5068 }
5069 return 0;
5070 }
5071 #endif
5072
5073 static int
5074 linux_qxfer_osdata (const char *annex,
5075 unsigned char *readbuf, unsigned const char *writebuf,
5076 CORE_ADDR offset, int len)
5077 {
5078 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5079 }
5080
5081 /* Convert a native/host siginfo object, into/from the siginfo in the
5082 layout of the inferiors' architecture. */
5083
5084 static void
5085 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
5086 {
5087 int done = 0;
5088
5089 if (the_low_target.siginfo_fixup != NULL)
5090 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
5091
5092 /* If there was no callback, or the callback didn't do anything,
5093 then just do a straight memcpy. */
5094 if (!done)
5095 {
5096 if (direction == 1)
5097 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5098 else
5099 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5100 }
5101 }
5102
5103 static int
5104 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
5105 unsigned const char *writebuf, CORE_ADDR offset, int len)
5106 {
5107 int pid;
5108 siginfo_t siginfo;
5109 char inf_siginfo[sizeof (siginfo_t)];
5110
5111 if (current_thread == NULL)
5112 return -1;
5113
5114 pid = lwpid_of (current_thread);
5115
5116 if (debug_threads)
5117 debug_printf ("%s siginfo for lwp %d.\n",
5118 readbuf != NULL ? "Reading" : "Writing",
5119 pid);
5120
5121 if (offset >= sizeof (siginfo))
5122 return -1;
5123
5124 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5125 return -1;
5126
5127 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
5128 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
5129 inferior with a 64-bit GDBSERVER should look the same as debugging it
5130 with a 32-bit GDBSERVER, we need to convert it. */
5131 siginfo_fixup (&siginfo, inf_siginfo, 0);
5132
5133 if (offset + len > sizeof (siginfo))
5134 len = sizeof (siginfo) - offset;
5135
5136 if (readbuf != NULL)
5137 memcpy (readbuf, inf_siginfo + offset, len);
5138 else
5139 {
5140 memcpy (inf_siginfo + offset, writebuf, len);
5141
5142 /* Convert back to ptrace layout before flushing it out. */
5143 siginfo_fixup (&siginfo, inf_siginfo, 1);
5144
5145 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5146 return -1;
5147 }
5148
5149 return len;
5150 }
5151
5152 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5153 so we notice when children change state; as the handler for the
5154 sigsuspend in my_waitpid. */
5155
5156 static void
5157 sigchld_handler (int signo)
5158 {
5159 int old_errno = errno;
5160
5161 if (debug_threads)
5162 {
5163 do
5164 {
5165 /* fprintf is not async-signal-safe, so call write
5166 directly. */
5167 if (write (2, "sigchld_handler\n",
5168 sizeof ("sigchld_handler\n") - 1) < 0)
5169 break; /* just ignore */
5170 } while (0);
5171 }
5172
5173 if (target_is_async_p ())
5174 async_file_mark (); /* trigger a linux_wait */
5175
5176 errno = old_errno;
5177 }
5178
5179 static int
5180 linux_supports_non_stop (void)
5181 {
5182 return 1;
5183 }
5184
5185 static int
5186 linux_async (int enable)
5187 {
5188 int previous = target_is_async_p ();
5189
5190 if (debug_threads)
5191 debug_printf ("linux_async (%d), previous=%d\n",
5192 enable, previous);
5193
5194 if (previous != enable)
5195 {
5196 sigset_t mask;
5197 sigemptyset (&mask);
5198 sigaddset (&mask, SIGCHLD);
5199
5200 sigprocmask (SIG_BLOCK, &mask, NULL);
5201
5202 if (enable)
5203 {
5204 if (pipe (linux_event_pipe) == -1)
5205 {
5206 linux_event_pipe[0] = -1;
5207 linux_event_pipe[1] = -1;
5208 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5209
5210 warning ("creating event pipe failed.");
5211 return previous;
5212 }
5213
5214 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5215 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5216
5217 /* Register the event loop handler. */
5218 add_file_handler (linux_event_pipe[0],
5219 handle_target_event, NULL);
5220
5221 /* Always trigger a linux_wait. */
5222 async_file_mark ();
5223 }
5224 else
5225 {
5226 delete_file_handler (linux_event_pipe[0]);
5227
5228 close (linux_event_pipe[0]);
5229 close (linux_event_pipe[1]);
5230 linux_event_pipe[0] = -1;
5231 linux_event_pipe[1] = -1;
5232 }
5233
5234 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5235 }
5236
5237 return previous;
5238 }
5239
5240 static int
5241 linux_start_non_stop (int nonstop)
5242 {
5243 /* Register or unregister from event-loop accordingly. */
5244 linux_async (nonstop);
5245
5246 if (target_is_async_p () != (nonstop != 0))
5247 return -1;
5248
5249 return 0;
5250 }
5251
5252 static int
5253 linux_supports_multi_process (void)
5254 {
5255 return 1;
5256 }
5257
5258 static int
5259 linux_supports_disable_randomization (void)
5260 {
5261 #ifdef HAVE_PERSONALITY
5262 return 1;
5263 #else
5264 return 0;
5265 #endif
5266 }
5267
5268 static int
5269 linux_supports_agent (void)
5270 {
5271 return 1;
5272 }
5273
5274 static int
5275 linux_supports_range_stepping (void)
5276 {
5277 if (*the_low_target.supports_range_stepping == NULL)
5278 return 0;
5279
5280 return (*the_low_target.supports_range_stepping) ();
5281 }
5282
5283 /* Enumerate spufs IDs for process PID. */
5284 static int
5285 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5286 {
5287 int pos = 0;
5288 int written = 0;
5289 char path[128];
5290 DIR *dir;
5291 struct dirent *entry;
5292
5293 sprintf (path, "/proc/%ld/fd", pid);
5294 dir = opendir (path);
5295 if (!dir)
5296 return -1;
5297
5298 rewinddir (dir);
5299 while ((entry = readdir (dir)) != NULL)
5300 {
5301 struct stat st;
5302 struct statfs stfs;
5303 int fd;
5304
5305 fd = atoi (entry->d_name);
5306 if (!fd)
5307 continue;
5308
5309 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5310 if (stat (path, &st) != 0)
5311 continue;
5312 if (!S_ISDIR (st.st_mode))
5313 continue;
5314
5315 if (statfs (path, &stfs) != 0)
5316 continue;
5317 if (stfs.f_type != SPUFS_MAGIC)
5318 continue;
5319
5320 if (pos >= offset && pos + 4 <= offset + len)
5321 {
5322 *(unsigned int *)(buf + pos - offset) = fd;
5323 written += 4;
5324 }
5325 pos += 4;
5326 }
5327
5328 closedir (dir);
5329 return written;
5330 }
5331
5332 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5333 object type, using the /proc file system. */
5334 static int
5335 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5336 unsigned const char *writebuf,
5337 CORE_ADDR offset, int len)
5338 {
5339 long pid = lwpid_of (current_thread);
5340 char buf[128];
5341 int fd = 0;
5342 int ret = 0;
5343
5344 if (!writebuf && !readbuf)
5345 return -1;
5346
5347 if (!*annex)
5348 {
5349 if (!readbuf)
5350 return -1;
5351 else
5352 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5353 }
5354
5355 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5356 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5357 if (fd <= 0)
5358 return -1;
5359
5360 if (offset != 0
5361 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5362 {
5363 close (fd);
5364 return 0;
5365 }
5366
5367 if (writebuf)
5368 ret = write (fd, writebuf, (size_t) len);
5369 else
5370 ret = read (fd, readbuf, (size_t) len);
5371
5372 close (fd);
5373 return ret;
5374 }
5375
5376 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5377 struct target_loadseg
5378 {
5379 /* Core address to which the segment is mapped. */
5380 Elf32_Addr addr;
5381 /* VMA recorded in the program header. */
5382 Elf32_Addr p_vaddr;
5383 /* Size of this segment in memory. */
5384 Elf32_Word p_memsz;
5385 };
5386
5387 # if defined PT_GETDSBT
5388 struct target_loadmap
5389 {
5390 /* Protocol version number, must be zero. */
5391 Elf32_Word version;
5392 /* Pointer to the DSBT table, its size, and the DSBT index. */
5393 unsigned *dsbt_table;
5394 unsigned dsbt_size, dsbt_index;
5395 /* Number of segments in this map. */
5396 Elf32_Word nsegs;
5397 /* The actual memory map. */
5398 struct target_loadseg segs[/*nsegs*/];
5399 };
5400 # define LINUX_LOADMAP PT_GETDSBT
5401 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5402 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5403 # else
5404 struct target_loadmap
5405 {
5406 /* Protocol version number, must be zero. */
5407 Elf32_Half version;
5408 /* Number of segments in this map. */
5409 Elf32_Half nsegs;
5410 /* The actual memory map. */
5411 struct target_loadseg segs[/*nsegs*/];
5412 };
5413 # define LINUX_LOADMAP PTRACE_GETFDPIC
5414 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5415 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5416 # endif
5417
5418 static int
5419 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5420 unsigned char *myaddr, unsigned int len)
5421 {
5422 int pid = lwpid_of (current_thread);
5423 int addr = -1;
5424 struct target_loadmap *data = NULL;
5425 unsigned int actual_length, copy_length;
5426
5427 if (strcmp (annex, "exec") == 0)
5428 addr = (int) LINUX_LOADMAP_EXEC;
5429 else if (strcmp (annex, "interp") == 0)
5430 addr = (int) LINUX_LOADMAP_INTERP;
5431 else
5432 return -1;
5433
5434 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5435 return -1;
5436
5437 if (data == NULL)
5438 return -1;
5439
5440 actual_length = sizeof (struct target_loadmap)
5441 + sizeof (struct target_loadseg) * data->nsegs;
5442
5443 if (offset < 0 || offset > actual_length)
5444 return -1;
5445
5446 copy_length = actual_length - offset < len ? actual_length - offset : len;
5447 memcpy (myaddr, (char *) data + offset, copy_length);
5448 return copy_length;
5449 }
5450 #else
5451 # define linux_read_loadmap NULL
5452 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5453
5454 static void
5455 linux_process_qsupported (const char *query)
5456 {
5457 if (the_low_target.process_qsupported != NULL)
5458 the_low_target.process_qsupported (query);
5459 }
5460
5461 static int
5462 linux_supports_tracepoints (void)
5463 {
5464 if (*the_low_target.supports_tracepoints == NULL)
5465 return 0;
5466
5467 return (*the_low_target.supports_tracepoints) ();
5468 }
5469
5470 static CORE_ADDR
5471 linux_read_pc (struct regcache *regcache)
5472 {
5473 if (the_low_target.get_pc == NULL)
5474 return 0;
5475
5476 return (*the_low_target.get_pc) (regcache);
5477 }
5478
5479 static void
5480 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5481 {
5482 gdb_assert (the_low_target.set_pc != NULL);
5483
5484 (*the_low_target.set_pc) (regcache, pc);
5485 }
5486
5487 static int
5488 linux_thread_stopped (struct thread_info *thread)
5489 {
5490 return get_thread_lwp (thread)->stopped;
5491 }
5492
5493 /* This exposes stop-all-threads functionality to other modules. */
5494
5495 static void
5496 linux_pause_all (int freeze)
5497 {
5498 stop_all_lwps (freeze, NULL);
5499 }
5500
5501 /* This exposes unstop-all-threads functionality to other gdbserver
5502 modules. */
5503
5504 static void
5505 linux_unpause_all (int unfreeze)
5506 {
5507 unstop_all_lwps (unfreeze, NULL);
5508 }
5509
5510 static int
5511 linux_prepare_to_access_memory (void)
5512 {
5513 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5514 running LWP. */
5515 if (non_stop)
5516 linux_pause_all (1);
5517 return 0;
5518 }
5519
5520 static void
5521 linux_done_accessing_memory (void)
5522 {
5523 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5524 running LWP. */
5525 if (non_stop)
5526 linux_unpause_all (1);
5527 }
5528
5529 static int
5530 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5531 CORE_ADDR collector,
5532 CORE_ADDR lockaddr,
5533 ULONGEST orig_size,
5534 CORE_ADDR *jump_entry,
5535 CORE_ADDR *trampoline,
5536 ULONGEST *trampoline_size,
5537 unsigned char *jjump_pad_insn,
5538 ULONGEST *jjump_pad_insn_size,
5539 CORE_ADDR *adjusted_insn_addr,
5540 CORE_ADDR *adjusted_insn_addr_end,
5541 char *err)
5542 {
5543 return (*the_low_target.install_fast_tracepoint_jump_pad)
5544 (tpoint, tpaddr, collector, lockaddr, orig_size,
5545 jump_entry, trampoline, trampoline_size,
5546 jjump_pad_insn, jjump_pad_insn_size,
5547 adjusted_insn_addr, adjusted_insn_addr_end,
5548 err);
5549 }
5550
5551 static struct emit_ops *
5552 linux_emit_ops (void)
5553 {
5554 if (the_low_target.emit_ops != NULL)
5555 return (*the_low_target.emit_ops) ();
5556 else
5557 return NULL;
5558 }
5559
5560 static int
5561 linux_get_min_fast_tracepoint_insn_len (void)
5562 {
5563 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5564 }
5565
5566 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5567
5568 static int
5569 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5570 CORE_ADDR *phdr_memaddr, int *num_phdr)
5571 {
5572 char filename[PATH_MAX];
5573 int fd;
5574 const int auxv_size = is_elf64
5575 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5576 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5577
5578 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5579
5580 fd = open (filename, O_RDONLY);
5581 if (fd < 0)
5582 return 1;
5583
5584 *phdr_memaddr = 0;
5585 *num_phdr = 0;
5586 while (read (fd, buf, auxv_size) == auxv_size
5587 && (*phdr_memaddr == 0 || *num_phdr == 0))
5588 {
5589 if (is_elf64)
5590 {
5591 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5592
5593 switch (aux->a_type)
5594 {
5595 case AT_PHDR:
5596 *phdr_memaddr = aux->a_un.a_val;
5597 break;
5598 case AT_PHNUM:
5599 *num_phdr = aux->a_un.a_val;
5600 break;
5601 }
5602 }
5603 else
5604 {
5605 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5606
5607 switch (aux->a_type)
5608 {
5609 case AT_PHDR:
5610 *phdr_memaddr = aux->a_un.a_val;
5611 break;
5612 case AT_PHNUM:
5613 *num_phdr = aux->a_un.a_val;
5614 break;
5615 }
5616 }
5617 }
5618
5619 close (fd);
5620
5621 if (*phdr_memaddr == 0 || *num_phdr == 0)
5622 {
5623 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5624 "phdr_memaddr = %ld, phdr_num = %d",
5625 (long) *phdr_memaddr, *num_phdr);
5626 return 2;
5627 }
5628
5629 return 0;
5630 }
5631
5632 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5633
5634 static CORE_ADDR
5635 get_dynamic (const int pid, const int is_elf64)
5636 {
5637 CORE_ADDR phdr_memaddr, relocation;
5638 int num_phdr, i;
5639 unsigned char *phdr_buf;
5640 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5641
5642 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5643 return 0;
5644
5645 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5646 phdr_buf = alloca (num_phdr * phdr_size);
5647
5648 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5649 return 0;
5650
5651 /* Compute relocation: it is expected to be 0 for "regular" executables,
5652 non-zero for PIE ones. */
5653 relocation = -1;
5654 for (i = 0; relocation == -1 && i < num_phdr; i++)
5655 if (is_elf64)
5656 {
5657 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5658
5659 if (p->p_type == PT_PHDR)
5660 relocation = phdr_memaddr - p->p_vaddr;
5661 }
5662 else
5663 {
5664 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5665
5666 if (p->p_type == PT_PHDR)
5667 relocation = phdr_memaddr - p->p_vaddr;
5668 }
5669
5670 if (relocation == -1)
5671 {
5672 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5673 any real world executables, including PIE executables, have always
5674 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5675 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5676 or present DT_DEBUG anyway (fpc binaries are statically linked).
5677
5678 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5679
5680 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5681
5682 return 0;
5683 }
5684
5685 for (i = 0; i < num_phdr; i++)
5686 {
5687 if (is_elf64)
5688 {
5689 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5690
5691 if (p->p_type == PT_DYNAMIC)
5692 return p->p_vaddr + relocation;
5693 }
5694 else
5695 {
5696 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5697
5698 if (p->p_type == PT_DYNAMIC)
5699 return p->p_vaddr + relocation;
5700 }
5701 }
5702
5703 return 0;
5704 }
5705
5706 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5707 can be 0 if the inferior does not yet have the library list initialized.
5708 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5709 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5710
5711 static CORE_ADDR
5712 get_r_debug (const int pid, const int is_elf64)
5713 {
5714 CORE_ADDR dynamic_memaddr;
5715 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5716 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5717 CORE_ADDR map = -1;
5718
5719 dynamic_memaddr = get_dynamic (pid, is_elf64);
5720 if (dynamic_memaddr == 0)
5721 return map;
5722
5723 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5724 {
5725 if (is_elf64)
5726 {
5727 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5728 #ifdef DT_MIPS_RLD_MAP
5729 union
5730 {
5731 Elf64_Xword map;
5732 unsigned char buf[sizeof (Elf64_Xword)];
5733 }
5734 rld_map;
5735
5736 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5737 {
5738 if (linux_read_memory (dyn->d_un.d_val,
5739 rld_map.buf, sizeof (rld_map.buf)) == 0)
5740 return rld_map.map;
5741 else
5742 break;
5743 }
5744 #endif /* DT_MIPS_RLD_MAP */
5745
5746 if (dyn->d_tag == DT_DEBUG && map == -1)
5747 map = dyn->d_un.d_val;
5748
5749 if (dyn->d_tag == DT_NULL)
5750 break;
5751 }
5752 else
5753 {
5754 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5755 #ifdef DT_MIPS_RLD_MAP
5756 union
5757 {
5758 Elf32_Word map;
5759 unsigned char buf[sizeof (Elf32_Word)];
5760 }
5761 rld_map;
5762
5763 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5764 {
5765 if (linux_read_memory (dyn->d_un.d_val,
5766 rld_map.buf, sizeof (rld_map.buf)) == 0)
5767 return rld_map.map;
5768 else
5769 break;
5770 }
5771 #endif /* DT_MIPS_RLD_MAP */
5772
5773 if (dyn->d_tag == DT_DEBUG && map == -1)
5774 map = dyn->d_un.d_val;
5775
5776 if (dyn->d_tag == DT_NULL)
5777 break;
5778 }
5779
5780 dynamic_memaddr += dyn_size;
5781 }
5782
5783 return map;
5784 }
5785
5786 /* Read one pointer from MEMADDR in the inferior. */
5787
5788 static int
5789 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5790 {
5791 int ret;
5792
5793 /* Go through a union so this works on either big or little endian
5794 hosts, when the inferior's pointer size is smaller than the size
5795 of CORE_ADDR. It is assumed the inferior's endianness is the
5796 same of the superior's. */
5797 union
5798 {
5799 CORE_ADDR core_addr;
5800 unsigned int ui;
5801 unsigned char uc;
5802 } addr;
5803
5804 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5805 if (ret == 0)
5806 {
5807 if (ptr_size == sizeof (CORE_ADDR))
5808 *ptr = addr.core_addr;
5809 else if (ptr_size == sizeof (unsigned int))
5810 *ptr = addr.ui;
5811 else
5812 gdb_assert_not_reached ("unhandled pointer size");
5813 }
5814 return ret;
5815 }
5816
5817 struct link_map_offsets
5818 {
5819 /* Offset and size of r_debug.r_version. */
5820 int r_version_offset;
5821
5822 /* Offset and size of r_debug.r_map. */
5823 int r_map_offset;
5824
5825 /* Offset to l_addr field in struct link_map. */
5826 int l_addr_offset;
5827
5828 /* Offset to l_name field in struct link_map. */
5829 int l_name_offset;
5830
5831 /* Offset to l_ld field in struct link_map. */
5832 int l_ld_offset;
5833
5834 /* Offset to l_next field in struct link_map. */
5835 int l_next_offset;
5836
5837 /* Offset to l_prev field in struct link_map. */
5838 int l_prev_offset;
5839 };
5840
5841 /* Construct qXfer:libraries-svr4:read reply. */
5842
5843 static int
5844 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5845 unsigned const char *writebuf,
5846 CORE_ADDR offset, int len)
5847 {
5848 char *document;
5849 unsigned document_len;
5850 struct process_info_private *const priv = current_process ()->priv;
5851 char filename[PATH_MAX];
5852 int pid, is_elf64;
5853
5854 static const struct link_map_offsets lmo_32bit_offsets =
5855 {
5856 0, /* r_version offset. */
5857 4, /* r_debug.r_map offset. */
5858 0, /* l_addr offset in link_map. */
5859 4, /* l_name offset in link_map. */
5860 8, /* l_ld offset in link_map. */
5861 12, /* l_next offset in link_map. */
5862 16 /* l_prev offset in link_map. */
5863 };
5864
5865 static const struct link_map_offsets lmo_64bit_offsets =
5866 {
5867 0, /* r_version offset. */
5868 8, /* r_debug.r_map offset. */
5869 0, /* l_addr offset in link_map. */
5870 8, /* l_name offset in link_map. */
5871 16, /* l_ld offset in link_map. */
5872 24, /* l_next offset in link_map. */
5873 32 /* l_prev offset in link_map. */
5874 };
5875 const struct link_map_offsets *lmo;
5876 unsigned int machine;
5877 int ptr_size;
5878 CORE_ADDR lm_addr = 0, lm_prev = 0;
5879 int allocated = 1024;
5880 char *p;
5881 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5882 int header_done = 0;
5883
5884 if (writebuf != NULL)
5885 return -2;
5886 if (readbuf == NULL)
5887 return -1;
5888
5889 pid = lwpid_of (current_thread);
5890 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5891 is_elf64 = elf_64_file_p (filename, &machine);
5892 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5893 ptr_size = is_elf64 ? 8 : 4;
5894
5895 while (annex[0] != '\0')
5896 {
5897 const char *sep;
5898 CORE_ADDR *addrp;
5899 int len;
5900
5901 sep = strchr (annex, '=');
5902 if (sep == NULL)
5903 break;
5904
5905 len = sep - annex;
5906 if (len == 5 && startswith (annex, "start"))
5907 addrp = &lm_addr;
5908 else if (len == 4 && startswith (annex, "prev"))
5909 addrp = &lm_prev;
5910 else
5911 {
5912 annex = strchr (sep, ';');
5913 if (annex == NULL)
5914 break;
5915 annex++;
5916 continue;
5917 }
5918
5919 annex = decode_address_to_semicolon (addrp, sep + 1);
5920 }
5921
5922 if (lm_addr == 0)
5923 {
5924 int r_version = 0;
5925
5926 if (priv->r_debug == 0)
5927 priv->r_debug = get_r_debug (pid, is_elf64);
5928
5929 /* We failed to find DT_DEBUG. Such situation will not change
5930 for this inferior - do not retry it. Report it to GDB as
5931 E01, see for the reasons at the GDB solib-svr4.c side. */
5932 if (priv->r_debug == (CORE_ADDR) -1)
5933 return -1;
5934
5935 if (priv->r_debug != 0)
5936 {
5937 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5938 (unsigned char *) &r_version,
5939 sizeof (r_version)) != 0
5940 || r_version != 1)
5941 {
5942 warning ("unexpected r_debug version %d", r_version);
5943 }
5944 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5945 &lm_addr, ptr_size) != 0)
5946 {
5947 warning ("unable to read r_map from 0x%lx",
5948 (long) priv->r_debug + lmo->r_map_offset);
5949 }
5950 }
5951 }
5952
5953 document = xmalloc (allocated);
5954 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5955 p = document + strlen (document);
5956
5957 while (lm_addr
5958 && read_one_ptr (lm_addr + lmo->l_name_offset,
5959 &l_name, ptr_size) == 0
5960 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5961 &l_addr, ptr_size) == 0
5962 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5963 &l_ld, ptr_size) == 0
5964 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5965 &l_prev, ptr_size) == 0
5966 && read_one_ptr (lm_addr + lmo->l_next_offset,
5967 &l_next, ptr_size) == 0)
5968 {
5969 unsigned char libname[PATH_MAX];
5970
5971 if (lm_prev != l_prev)
5972 {
5973 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5974 (long) lm_prev, (long) l_prev);
5975 break;
5976 }
5977
5978 /* Ignore the first entry even if it has valid name as the first entry
5979 corresponds to the main executable. The first entry should not be
5980 skipped if the dynamic loader was loaded late by a static executable
5981 (see solib-svr4.c parameter ignore_first). But in such case the main
5982 executable does not have PT_DYNAMIC present and this function already
5983 exited above due to failed get_r_debug. */
5984 if (lm_prev == 0)
5985 {
5986 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5987 p = p + strlen (p);
5988 }
5989 else
5990 {
5991 /* Not checking for error because reading may stop before
5992 we've got PATH_MAX worth of characters. */
5993 libname[0] = '\0';
5994 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5995 libname[sizeof (libname) - 1] = '\0';
5996 if (libname[0] != '\0')
5997 {
5998 /* 6x the size for xml_escape_text below. */
5999 size_t len = 6 * strlen ((char *) libname);
6000 char *name;
6001
6002 if (!header_done)
6003 {
6004 /* Terminate `<library-list-svr4'. */
6005 *p++ = '>';
6006 header_done = 1;
6007 }
6008
6009 while (allocated < p - document + len + 200)
6010 {
6011 /* Expand to guarantee sufficient storage. */
6012 uintptr_t document_len = p - document;
6013
6014 document = xrealloc (document, 2 * allocated);
6015 allocated *= 2;
6016 p = document + document_len;
6017 }
6018
6019 name = xml_escape_text ((char *) libname);
6020 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
6021 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
6022 name, (unsigned long) lm_addr,
6023 (unsigned long) l_addr, (unsigned long) l_ld);
6024 free (name);
6025 }
6026 }
6027
6028 lm_prev = lm_addr;
6029 lm_addr = l_next;
6030 }
6031
6032 if (!header_done)
6033 {
6034 /* Empty list; terminate `<library-list-svr4'. */
6035 strcpy (p, "/>");
6036 }
6037 else
6038 strcpy (p, "</library-list-svr4>");
6039
6040 document_len = strlen (document);
6041 if (offset < document_len)
6042 document_len -= offset;
6043 else
6044 document_len = 0;
6045 if (len > document_len)
6046 len = document_len;
6047
6048 memcpy (readbuf, document + offset, len);
6049 xfree (document);
6050
6051 return len;
6052 }
6053
6054 #ifdef HAVE_LINUX_BTRACE
6055
6056 /* See to_enable_btrace target method. */
6057
6058 static struct btrace_target_info *
6059 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
6060 {
6061 struct btrace_target_info *tinfo;
6062
6063 tinfo = linux_enable_btrace (ptid, conf);
6064
6065 if (tinfo != NULL && tinfo->ptr_bits == 0)
6066 {
6067 struct thread_info *thread = find_thread_ptid (ptid);
6068 struct regcache *regcache = get_thread_regcache (thread, 0);
6069
6070 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
6071 }
6072
6073 return tinfo;
6074 }
6075
6076 /* See to_disable_btrace target method. */
6077
6078 static int
6079 linux_low_disable_btrace (struct btrace_target_info *tinfo)
6080 {
6081 enum btrace_error err;
6082
6083 err = linux_disable_btrace (tinfo);
6084 return (err == BTRACE_ERR_NONE ? 0 : -1);
6085 }
6086
6087 /* See to_read_btrace target method. */
6088
6089 static int
6090 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
6091 int type)
6092 {
6093 struct btrace_data btrace;
6094 struct btrace_block *block;
6095 enum btrace_error err;
6096 int i;
6097
6098 btrace_data_init (&btrace);
6099
6100 err = linux_read_btrace (&btrace, tinfo, type);
6101 if (err != BTRACE_ERR_NONE)
6102 {
6103 if (err == BTRACE_ERR_OVERFLOW)
6104 buffer_grow_str0 (buffer, "E.Overflow.");
6105 else
6106 buffer_grow_str0 (buffer, "E.Generic Error.");
6107
6108 btrace_data_fini (&btrace);
6109 return -1;
6110 }
6111
6112 switch (btrace.format)
6113 {
6114 case BTRACE_FORMAT_NONE:
6115 buffer_grow_str0 (buffer, "E.No Trace.");
6116 break;
6117
6118 case BTRACE_FORMAT_BTS:
6119 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
6120 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
6121
6122 for (i = 0;
6123 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
6124 i++)
6125 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
6126 paddress (block->begin), paddress (block->end));
6127
6128 buffer_grow_str0 (buffer, "</btrace>\n");
6129 break;
6130
6131 default:
6132 buffer_grow_str0 (buffer, "E.Unknown Trace Format.");
6133
6134 btrace_data_fini (&btrace);
6135 return -1;
6136 }
6137
6138 btrace_data_fini (&btrace);
6139 return 0;
6140 }
6141
6142 /* See to_btrace_conf target method. */
6143
6144 static int
6145 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6146 struct buffer *buffer)
6147 {
6148 const struct btrace_config *conf;
6149
6150 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6151 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6152
6153 conf = linux_btrace_conf (tinfo);
6154 if (conf != NULL)
6155 {
6156 switch (conf->format)
6157 {
6158 case BTRACE_FORMAT_NONE:
6159 break;
6160
6161 case BTRACE_FORMAT_BTS:
6162 buffer_xml_printf (buffer, "<bts");
6163 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6164 buffer_xml_printf (buffer, " />\n");
6165 break;
6166 }
6167 }
6168
6169 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6170 return 0;
6171 }
6172 #endif /* HAVE_LINUX_BTRACE */
6173
6174 static struct target_ops linux_target_ops = {
6175 linux_create_inferior,
6176 linux_attach,
6177 linux_kill,
6178 linux_detach,
6179 linux_mourn,
6180 linux_join,
6181 linux_thread_alive,
6182 linux_resume,
6183 linux_wait,
6184 linux_fetch_registers,
6185 linux_store_registers,
6186 linux_prepare_to_access_memory,
6187 linux_done_accessing_memory,
6188 linux_read_memory,
6189 linux_write_memory,
6190 linux_look_up_symbols,
6191 linux_request_interrupt,
6192 linux_read_auxv,
6193 linux_supports_z_point_type,
6194 linux_insert_point,
6195 linux_remove_point,
6196 linux_stopped_by_sw_breakpoint,
6197 linux_supports_stopped_by_sw_breakpoint,
6198 linux_stopped_by_hw_breakpoint,
6199 linux_supports_stopped_by_hw_breakpoint,
6200 linux_stopped_by_watchpoint,
6201 linux_stopped_data_address,
6202 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6203 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6204 && defined(PT_TEXT_END_ADDR)
6205 linux_read_offsets,
6206 #else
6207 NULL,
6208 #endif
6209 #ifdef USE_THREAD_DB
6210 thread_db_get_tls_address,
6211 #else
6212 NULL,
6213 #endif
6214 linux_qxfer_spu,
6215 hostio_last_error_from_errno,
6216 linux_qxfer_osdata,
6217 linux_xfer_siginfo,
6218 linux_supports_non_stop,
6219 linux_async,
6220 linux_start_non_stop,
6221 linux_supports_multi_process,
6222 #ifdef USE_THREAD_DB
6223 thread_db_handle_monitor_command,
6224 #else
6225 NULL,
6226 #endif
6227 linux_common_core_of_thread,
6228 linux_read_loadmap,
6229 linux_process_qsupported,
6230 linux_supports_tracepoints,
6231 linux_read_pc,
6232 linux_write_pc,
6233 linux_thread_stopped,
6234 NULL,
6235 linux_pause_all,
6236 linux_unpause_all,
6237 linux_stabilize_threads,
6238 linux_install_fast_tracepoint_jump_pad,
6239 linux_emit_ops,
6240 linux_supports_disable_randomization,
6241 linux_get_min_fast_tracepoint_insn_len,
6242 linux_qxfer_libraries_svr4,
6243 linux_supports_agent,
6244 #ifdef HAVE_LINUX_BTRACE
6245 linux_supports_btrace,
6246 linux_low_enable_btrace,
6247 linux_low_disable_btrace,
6248 linux_low_read_btrace,
6249 linux_low_btrace_conf,
6250 #else
6251 NULL,
6252 NULL,
6253 NULL,
6254 NULL,
6255 NULL,
6256 #endif
6257 linux_supports_range_stepping,
6258 };
6259
6260 static void
6261 linux_init_signals ()
6262 {
6263 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6264 to find what the cancel signal actually is. */
6265 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6266 signal (__SIGRTMIN+1, SIG_IGN);
6267 #endif
6268 }
6269
6270 #ifdef HAVE_LINUX_REGSETS
6271 void
6272 initialize_regsets_info (struct regsets_info *info)
6273 {
6274 for (info->num_regsets = 0;
6275 info->regsets[info->num_regsets].size >= 0;
6276 info->num_regsets++)
6277 ;
6278 }
6279 #endif
6280
6281 void
6282 initialize_low (void)
6283 {
6284 struct sigaction sigchld_action;
6285 memset (&sigchld_action, 0, sizeof (sigchld_action));
6286 set_target_ops (&linux_target_ops);
6287 set_breakpoint_data (the_low_target.breakpoint,
6288 the_low_target.breakpoint_len);
6289 linux_init_signals ();
6290 linux_ptrace_init_warnings ();
6291
6292 sigchld_action.sa_handler = sigchld_handler;
6293 sigemptyset (&sigchld_action.sa_mask);
6294 sigchld_action.sa_flags = SA_RESTART;
6295 sigaction (SIGCHLD, &sigchld_action, NULL);
6296
6297 initialize_low_arch ();
6298 }
This page took 0.17227 seconds and 4 git commands to generate.