fix gdbserver/linux-low'c's pending status handling
[deliverable/binutils-gdb.git] / gdb / gdbserver / linux-low.c
1 /* Low level interface to ptrace, for the remote server for GDB.
2 Copyright (C) 1995-2015 Free Software Foundation, Inc.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "server.h"
20 #include "linux-low.h"
21 #include "nat/linux-osdata.h"
22 #include "agent.h"
23
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #include <sys/ptrace.h>
28 #include "nat/linux-ptrace.h"
29 #include "nat/linux-procfs.h"
30 #include "nat/linux-personality.h"
31 #include <signal.h>
32 #include <sys/ioctl.h>
33 #include <fcntl.h>
34 #include <unistd.h>
35 #include <sys/syscall.h>
36 #include <sched.h>
37 #include <ctype.h>
38 #include <pwd.h>
39 #include <sys/types.h>
40 #include <dirent.h>
41 #include <sys/stat.h>
42 #include <sys/vfs.h>
43 #include <sys/uio.h>
44 #include "filestuff.h"
45 #include "tracepoint.h"
46 #include "hostio.h"
47 #ifndef ELFMAG0
48 /* Don't include <linux/elf.h> here. If it got included by gdb_proc_service.h
49 then ELFMAG0 will have been defined. If it didn't get included by
50 gdb_proc_service.h then including it will likely introduce a duplicate
51 definition of elf_fpregset_t. */
52 #include <elf.h>
53 #endif
54
55 #ifndef SPUFS_MAGIC
56 #define SPUFS_MAGIC 0x23c9b64e
57 #endif
58
59 #ifdef HAVE_PERSONALITY
60 # include <sys/personality.h>
61 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
62 # define ADDR_NO_RANDOMIZE 0x0040000
63 # endif
64 #endif
65
66 #ifndef O_LARGEFILE
67 #define O_LARGEFILE 0
68 #endif
69
70 #ifndef W_STOPCODE
71 #define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
72 #endif
73
74 /* This is the kernel's hard limit. Not to be confused with
75 SIGRTMIN. */
76 #ifndef __SIGRTMIN
77 #define __SIGRTMIN 32
78 #endif
79
80 /* Some targets did not define these ptrace constants from the start,
81 so gdbserver defines them locally here. In the future, these may
82 be removed after they are added to asm/ptrace.h. */
83 #if !(defined(PT_TEXT_ADDR) \
84 || defined(PT_DATA_ADDR) \
85 || defined(PT_TEXT_END_ADDR))
86 #if defined(__mcoldfire__)
87 /* These are still undefined in 3.10 kernels. */
88 #define PT_TEXT_ADDR 49*4
89 #define PT_DATA_ADDR 50*4
90 #define PT_TEXT_END_ADDR 51*4
91 /* BFIN already defines these since at least 2.6.32 kernels. */
92 #elif defined(BFIN)
93 #define PT_TEXT_ADDR 220
94 #define PT_TEXT_END_ADDR 224
95 #define PT_DATA_ADDR 228
96 /* These are still undefined in 3.10 kernels. */
97 #elif defined(__TMS320C6X__)
98 #define PT_TEXT_ADDR (0x10000*4)
99 #define PT_DATA_ADDR (0x10004*4)
100 #define PT_TEXT_END_ADDR (0x10008*4)
101 #endif
102 #endif
103
104 #ifdef HAVE_LINUX_BTRACE
105 # include "nat/linux-btrace.h"
106 # include "btrace-common.h"
107 #endif
108
109 #ifndef HAVE_ELF32_AUXV_T
110 /* Copied from glibc's elf.h. */
111 typedef struct
112 {
113 uint32_t a_type; /* Entry type */
114 union
115 {
116 uint32_t a_val; /* Integer value */
117 /* We use to have pointer elements added here. We cannot do that,
118 though, since it does not work when using 32-bit definitions
119 on 64-bit platforms and vice versa. */
120 } a_un;
121 } Elf32_auxv_t;
122 #endif
123
124 #ifndef HAVE_ELF64_AUXV_T
125 /* Copied from glibc's elf.h. */
126 typedef struct
127 {
128 uint64_t a_type; /* Entry type */
129 union
130 {
131 uint64_t a_val; /* Integer value */
132 /* We use to have pointer elements added here. We cannot do that,
133 though, since it does not work when using 32-bit definitions
134 on 64-bit platforms and vice versa. */
135 } a_un;
136 } Elf64_auxv_t;
137 #endif
138
139 /* A list of all unknown processes which receive stop signals. Some
140 other process will presumably claim each of these as forked
141 children momentarily. */
142
143 struct simple_pid_list
144 {
145 /* The process ID. */
146 int pid;
147
148 /* The status as reported by waitpid. */
149 int status;
150
151 /* Next in chain. */
152 struct simple_pid_list *next;
153 };
154 struct simple_pid_list *stopped_pids;
155
156 /* Trivial list manipulation functions to keep track of a list of new
157 stopped processes. */
158
159 static void
160 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
161 {
162 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
163
164 new_pid->pid = pid;
165 new_pid->status = status;
166 new_pid->next = *listp;
167 *listp = new_pid;
168 }
169
170 static int
171 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
172 {
173 struct simple_pid_list **p;
174
175 for (p = listp; *p != NULL; p = &(*p)->next)
176 if ((*p)->pid == pid)
177 {
178 struct simple_pid_list *next = (*p)->next;
179
180 *statusp = (*p)->status;
181 xfree (*p);
182 *p = next;
183 return 1;
184 }
185 return 0;
186 }
187
188 enum stopping_threads_kind
189 {
190 /* Not stopping threads presently. */
191 NOT_STOPPING_THREADS,
192
193 /* Stopping threads. */
194 STOPPING_THREADS,
195
196 /* Stopping and suspending threads. */
197 STOPPING_AND_SUSPENDING_THREADS
198 };
199
200 /* This is set while stop_all_lwps is in effect. */
201 enum stopping_threads_kind stopping_threads = NOT_STOPPING_THREADS;
202
203 /* FIXME make into a target method? */
204 int using_threads = 1;
205
206 /* True if we're presently stabilizing threads (moving them out of
207 jump pads). */
208 static int stabilizing_threads;
209
210 static void linux_resume_one_lwp (struct lwp_info *lwp,
211 int step, int signal, siginfo_t *info);
212 static void linux_resume (struct thread_resume *resume_info, size_t n);
213 static void stop_all_lwps (int suspend, struct lwp_info *except);
214 static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
215 static int linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
216 int *wstat, int options);
217 static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
218 static struct lwp_info *add_lwp (ptid_t ptid);
219 static int linux_stopped_by_watchpoint (void);
220 static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
221 static void proceed_all_lwps (void);
222 static int finish_step_over (struct lwp_info *lwp);
223 static int kill_lwp (unsigned long lwpid, int signo);
224
225 /* When the event-loop is doing a step-over, this points at the thread
226 being stepped. */
227 ptid_t step_over_bkpt;
228
229 /* True if the low target can hardware single-step. Such targets
230 don't need a BREAKPOINT_REINSERT_ADDR callback. */
231
232 static int
233 can_hardware_single_step (void)
234 {
235 return (the_low_target.breakpoint_reinsert_addr == NULL);
236 }
237
238 /* True if the low target supports memory breakpoints. If so, we'll
239 have a GET_PC implementation. */
240
241 static int
242 supports_breakpoints (void)
243 {
244 return (the_low_target.get_pc != NULL);
245 }
246
247 /* Returns true if this target can support fast tracepoints. This
248 does not mean that the in-process agent has been loaded in the
249 inferior. */
250
251 static int
252 supports_fast_tracepoints (void)
253 {
254 return the_low_target.install_fast_tracepoint_jump_pad != NULL;
255 }
256
257 /* True if LWP is stopped in its stepping range. */
258
259 static int
260 lwp_in_step_range (struct lwp_info *lwp)
261 {
262 CORE_ADDR pc = lwp->stop_pc;
263
264 return (pc >= lwp->step_range_start && pc < lwp->step_range_end);
265 }
266
267 struct pending_signals
268 {
269 int signal;
270 siginfo_t info;
271 struct pending_signals *prev;
272 };
273
274 /* The read/write ends of the pipe registered as waitable file in the
275 event loop. */
276 static int linux_event_pipe[2] = { -1, -1 };
277
278 /* True if we're currently in async mode. */
279 #define target_is_async_p() (linux_event_pipe[0] != -1)
280
281 static void send_sigstop (struct lwp_info *lwp);
282 static void wait_for_sigstop (void);
283
284 /* Return non-zero if HEADER is a 64-bit ELF file. */
285
286 static int
287 elf_64_header_p (const Elf64_Ehdr *header, unsigned int *machine)
288 {
289 if (header->e_ident[EI_MAG0] == ELFMAG0
290 && header->e_ident[EI_MAG1] == ELFMAG1
291 && header->e_ident[EI_MAG2] == ELFMAG2
292 && header->e_ident[EI_MAG3] == ELFMAG3)
293 {
294 *machine = header->e_machine;
295 return header->e_ident[EI_CLASS] == ELFCLASS64;
296
297 }
298 *machine = EM_NONE;
299 return -1;
300 }
301
302 /* Return non-zero if FILE is a 64-bit ELF file,
303 zero if the file is not a 64-bit ELF file,
304 and -1 if the file is not accessible or doesn't exist. */
305
306 static int
307 elf_64_file_p (const char *file, unsigned int *machine)
308 {
309 Elf64_Ehdr header;
310 int fd;
311
312 fd = open (file, O_RDONLY);
313 if (fd < 0)
314 return -1;
315
316 if (read (fd, &header, sizeof (header)) != sizeof (header))
317 {
318 close (fd);
319 return 0;
320 }
321 close (fd);
322
323 return elf_64_header_p (&header, machine);
324 }
325
326 /* Accepts an integer PID; Returns true if the executable PID is
327 running is a 64-bit ELF file.. */
328
329 int
330 linux_pid_exe_is_elf_64_file (int pid, unsigned int *machine)
331 {
332 char file[PATH_MAX];
333
334 sprintf (file, "/proc/%d/exe", pid);
335 return elf_64_file_p (file, machine);
336 }
337
338 static void
339 delete_lwp (struct lwp_info *lwp)
340 {
341 struct thread_info *thr = get_lwp_thread (lwp);
342
343 if (debug_threads)
344 debug_printf ("deleting %ld\n", lwpid_of (thr));
345
346 remove_thread (thr);
347 free (lwp->arch_private);
348 free (lwp);
349 }
350
351 /* Add a process to the common process list, and set its private
352 data. */
353
354 static struct process_info *
355 linux_add_process (int pid, int attached)
356 {
357 struct process_info *proc;
358
359 proc = add_process (pid, attached);
360 proc->private = xcalloc (1, sizeof (*proc->private));
361
362 /* Set the arch when the first LWP stops. */
363 proc->private->new_inferior = 1;
364
365 if (the_low_target.new_process != NULL)
366 proc->private->arch_private = the_low_target.new_process ();
367
368 return proc;
369 }
370
371 static CORE_ADDR get_pc (struct lwp_info *lwp);
372
373 /* Handle a GNU/Linux extended wait response. If we see a clone
374 event, we need to add the new LWP to our list (and not report the
375 trap to higher layers). */
376
377 static void
378 handle_extended_wait (struct lwp_info *event_child, int wstat)
379 {
380 int event = linux_ptrace_get_extended_event (wstat);
381 struct thread_info *event_thr = get_lwp_thread (event_child);
382 struct lwp_info *new_lwp;
383
384 if (event == PTRACE_EVENT_CLONE)
385 {
386 ptid_t ptid;
387 unsigned long new_pid;
388 int ret, status;
389
390 ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_thr), (PTRACE_TYPE_ARG3) 0,
391 &new_pid);
392
393 /* If we haven't already seen the new PID stop, wait for it now. */
394 if (!pull_pid_from_list (&stopped_pids, new_pid, &status))
395 {
396 /* The new child has a pending SIGSTOP. We can't affect it until it
397 hits the SIGSTOP, but we're already attached. */
398
399 ret = my_waitpid (new_pid, &status, __WALL);
400
401 if (ret == -1)
402 perror_with_name ("waiting for new child");
403 else if (ret != new_pid)
404 warning ("wait returned unexpected PID %d", ret);
405 else if (!WIFSTOPPED (status))
406 warning ("wait returned unexpected status 0x%x", status);
407 }
408
409 if (debug_threads)
410 debug_printf ("HEW: Got clone event "
411 "from LWP %ld, new child is LWP %ld\n",
412 lwpid_of (event_thr), new_pid);
413
414 ptid = ptid_build (pid_of (event_thr), new_pid, 0);
415 new_lwp = add_lwp (ptid);
416
417 /* Either we're going to immediately resume the new thread
418 or leave it stopped. linux_resume_one_lwp is a nop if it
419 thinks the thread is currently running, so set this first
420 before calling linux_resume_one_lwp. */
421 new_lwp->stopped = 1;
422
423 /* If we're suspending all threads, leave this one suspended
424 too. */
425 if (stopping_threads == STOPPING_AND_SUSPENDING_THREADS)
426 new_lwp->suspended = 1;
427
428 /* Normally we will get the pending SIGSTOP. But in some cases
429 we might get another signal delivered to the group first.
430 If we do get another signal, be sure not to lose it. */
431 if (WSTOPSIG (status) != SIGSTOP)
432 {
433 new_lwp->stop_expected = 1;
434 new_lwp->status_pending_p = 1;
435 new_lwp->status_pending = status;
436 }
437 }
438 }
439
440 /* Return the PC as read from the regcache of LWP, without any
441 adjustment. */
442
443 static CORE_ADDR
444 get_pc (struct lwp_info *lwp)
445 {
446 struct thread_info *saved_thread;
447 struct regcache *regcache;
448 CORE_ADDR pc;
449
450 if (the_low_target.get_pc == NULL)
451 return 0;
452
453 saved_thread = current_thread;
454 current_thread = get_lwp_thread (lwp);
455
456 regcache = get_thread_regcache (current_thread, 1);
457 pc = (*the_low_target.get_pc) (regcache);
458
459 if (debug_threads)
460 debug_printf ("pc is 0x%lx\n", (long) pc);
461
462 current_thread = saved_thread;
463 return pc;
464 }
465
466 /* This function should only be called if LWP got a SIGTRAP.
467 The SIGTRAP could mean several things.
468
469 On i386, where decr_pc_after_break is non-zero:
470
471 If we were single-stepping this process using PTRACE_SINGLESTEP, we
472 will get only the one SIGTRAP. The value of $eip will be the next
473 instruction. If the instruction we stepped over was a breakpoint,
474 we need to decrement the PC.
475
476 If we continue the process using PTRACE_CONT, we will get a
477 SIGTRAP when we hit a breakpoint. The value of $eip will be
478 the instruction after the breakpoint (i.e. needs to be
479 decremented). If we report the SIGTRAP to GDB, we must also
480 report the undecremented PC. If the breakpoint is removed, we
481 must resume at the decremented PC.
482
483 On a non-decr_pc_after_break machine with hardware or kernel
484 single-step:
485
486 If we either single-step a breakpoint instruction, or continue and
487 hit a breakpoint instruction, our PC will point at the breakpoint
488 instruction. */
489
490 static int
491 check_stopped_by_breakpoint (struct lwp_info *lwp)
492 {
493 CORE_ADDR pc;
494 CORE_ADDR sw_breakpoint_pc;
495 struct thread_info *saved_thread;
496
497 if (the_low_target.get_pc == NULL)
498 return 0;
499
500 pc = get_pc (lwp);
501 sw_breakpoint_pc = pc - the_low_target.decr_pc_after_break;
502
503 /* breakpoint_at reads from the current thread. */
504 saved_thread = current_thread;
505 current_thread = get_lwp_thread (lwp);
506
507 /* We may have just stepped a breakpoint instruction. E.g., in
508 non-stop mode, GDB first tells the thread A to step a range, and
509 then the user inserts a breakpoint inside the range. In that
510 case, we need to report the breakpoint PC. But, when we're
511 trying to step past one of our own breakpoints, that happens to
512 have been placed on top of a permanent breakpoint instruction, we
513 shouldn't adjust the PC, otherwise the program would keep
514 trapping the permanent breakpoint forever. */
515 if ((!lwp->stepping
516 || (!ptid_equal (ptid_of (current_thread), step_over_bkpt)
517 && lwp->stop_pc == sw_breakpoint_pc))
518 && (*the_low_target.breakpoint_at) (sw_breakpoint_pc))
519 {
520 if (debug_threads)
521 {
522 struct thread_info *thr = get_lwp_thread (lwp);
523
524 debug_printf ("CSBB: %s stopped by software breakpoint\n",
525 target_pid_to_str (ptid_of (thr)));
526 }
527
528 /* Back up the PC if necessary. */
529 if (pc != sw_breakpoint_pc)
530 {
531 struct regcache *regcache
532 = get_thread_regcache (current_thread, 1);
533 (*the_low_target.set_pc) (regcache, sw_breakpoint_pc);
534 }
535
536 lwp->stop_pc = sw_breakpoint_pc;
537 lwp->stop_reason = LWP_STOPPED_BY_SW_BREAKPOINT;
538 current_thread = saved_thread;
539 return 1;
540 }
541
542 if (hardware_breakpoint_inserted_here (pc))
543 {
544 if (debug_threads)
545 {
546 struct thread_info *thr = get_lwp_thread (lwp);
547
548 debug_printf ("CSBB: %s stopped by hardware breakpoint\n",
549 target_pid_to_str (ptid_of (thr)));
550 }
551
552 lwp->stop_pc = pc;
553 lwp->stop_reason = LWP_STOPPED_BY_HW_BREAKPOINT;
554 current_thread = saved_thread;
555 return 1;
556 }
557
558 current_thread = saved_thread;
559 return 0;
560 }
561
562 static struct lwp_info *
563 add_lwp (ptid_t ptid)
564 {
565 struct lwp_info *lwp;
566
567 lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
568 memset (lwp, 0, sizeof (*lwp));
569
570 if (the_low_target.new_thread != NULL)
571 lwp->arch_private = the_low_target.new_thread ();
572
573 lwp->thread = add_thread (ptid, lwp);
574
575 return lwp;
576 }
577
578 /* Start an inferior process and returns its pid.
579 ALLARGS is a vector of program-name and args. */
580
581 static int
582 linux_create_inferior (char *program, char **allargs)
583 {
584 struct lwp_info *new_lwp;
585 int pid;
586 ptid_t ptid;
587 struct cleanup *restore_personality
588 = maybe_disable_address_space_randomization (disable_randomization);
589
590 #if defined(__UCLIBC__) && defined(HAS_NOMMU)
591 pid = vfork ();
592 #else
593 pid = fork ();
594 #endif
595 if (pid < 0)
596 perror_with_name ("fork");
597
598 if (pid == 0)
599 {
600 close_most_fds ();
601 ptrace (PTRACE_TRACEME, 0, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
602
603 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
604 signal (__SIGRTMIN + 1, SIG_DFL);
605 #endif
606
607 setpgid (0, 0);
608
609 /* If gdbserver is connected to gdb via stdio, redirect the inferior's
610 stdout to stderr so that inferior i/o doesn't corrupt the connection.
611 Also, redirect stdin to /dev/null. */
612 if (remote_connection_is_stdio ())
613 {
614 close (0);
615 open ("/dev/null", O_RDONLY);
616 dup2 (2, 1);
617 if (write (2, "stdin/stdout redirected\n",
618 sizeof ("stdin/stdout redirected\n") - 1) < 0)
619 {
620 /* Errors ignored. */;
621 }
622 }
623
624 execv (program, allargs);
625 if (errno == ENOENT)
626 execvp (program, allargs);
627
628 fprintf (stderr, "Cannot exec %s: %s.\n", program,
629 strerror (errno));
630 fflush (stderr);
631 _exit (0177);
632 }
633
634 do_cleanups (restore_personality);
635
636 linux_add_process (pid, 0);
637
638 ptid = ptid_build (pid, pid, 0);
639 new_lwp = add_lwp (ptid);
640 new_lwp->must_set_ptrace_flags = 1;
641
642 return pid;
643 }
644
645 /* Attach to an inferior process. Returns 0 on success, ERRNO on
646 error. */
647
648 int
649 linux_attach_lwp (ptid_t ptid)
650 {
651 struct lwp_info *new_lwp;
652 int lwpid = ptid_get_lwp (ptid);
653
654 if (ptrace (PTRACE_ATTACH, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0)
655 != 0)
656 return errno;
657
658 new_lwp = add_lwp (ptid);
659
660 /* We need to wait for SIGSTOP before being able to make the next
661 ptrace call on this LWP. */
662 new_lwp->must_set_ptrace_flags = 1;
663
664 if (linux_proc_pid_is_stopped (lwpid))
665 {
666 if (debug_threads)
667 debug_printf ("Attached to a stopped process\n");
668
669 /* The process is definitely stopped. It is in a job control
670 stop, unless the kernel predates the TASK_STOPPED /
671 TASK_TRACED distinction, in which case it might be in a
672 ptrace stop. Make sure it is in a ptrace stop; from there we
673 can kill it, signal it, et cetera.
674
675 First make sure there is a pending SIGSTOP. Since we are
676 already attached, the process can not transition from stopped
677 to running without a PTRACE_CONT; so we know this signal will
678 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
679 probably already in the queue (unless this kernel is old
680 enough to use TASK_STOPPED for ptrace stops); but since
681 SIGSTOP is not an RT signal, it can only be queued once. */
682 kill_lwp (lwpid, SIGSTOP);
683
684 /* Finally, resume the stopped process. This will deliver the
685 SIGSTOP (or a higher priority signal, just like normal
686 PTRACE_ATTACH), which we'll catch later on. */
687 ptrace (PTRACE_CONT, lwpid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
688 }
689
690 /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
691 brings it to a halt.
692
693 There are several cases to consider here:
694
695 1) gdbserver has already attached to the process and is being notified
696 of a new thread that is being created.
697 In this case we should ignore that SIGSTOP and resume the
698 process. This is handled below by setting stop_expected = 1,
699 and the fact that add_thread sets last_resume_kind ==
700 resume_continue.
701
702 2) This is the first thread (the process thread), and we're attaching
703 to it via attach_inferior.
704 In this case we want the process thread to stop.
705 This is handled by having linux_attach set last_resume_kind ==
706 resume_stop after we return.
707
708 If the pid we are attaching to is also the tgid, we attach to and
709 stop all the existing threads. Otherwise, we attach to pid and
710 ignore any other threads in the same group as this pid.
711
712 3) GDB is connecting to gdbserver and is requesting an enumeration of all
713 existing threads.
714 In this case we want the thread to stop.
715 FIXME: This case is currently not properly handled.
716 We should wait for the SIGSTOP but don't. Things work apparently
717 because enough time passes between when we ptrace (ATTACH) and when
718 gdb makes the next ptrace call on the thread.
719
720 On the other hand, if we are currently trying to stop all threads, we
721 should treat the new thread as if we had sent it a SIGSTOP. This works
722 because we are guaranteed that the add_lwp call above added us to the
723 end of the list, and so the new thread has not yet reached
724 wait_for_sigstop (but will). */
725 new_lwp->stop_expected = 1;
726
727 return 0;
728 }
729
730 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
731 already attached. Returns true if a new LWP is found, false
732 otherwise. */
733
734 static int
735 attach_proc_task_lwp_callback (ptid_t ptid)
736 {
737 /* Is this a new thread? */
738 if (find_thread_ptid (ptid) == NULL)
739 {
740 int lwpid = ptid_get_lwp (ptid);
741 int err;
742
743 if (debug_threads)
744 debug_printf ("Found new lwp %d\n", lwpid);
745
746 err = linux_attach_lwp (ptid);
747
748 /* Be quiet if we simply raced with the thread exiting. EPERM
749 is returned if the thread's task still exists, and is marked
750 as exited or zombie, as well as other conditions, so in that
751 case, confirm the status in /proc/PID/status. */
752 if (err == ESRCH
753 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
754 {
755 if (debug_threads)
756 {
757 debug_printf ("Cannot attach to lwp %d: "
758 "thread is gone (%d: %s)\n",
759 lwpid, err, strerror (err));
760 }
761 }
762 else if (err != 0)
763 {
764 warning (_("Cannot attach to lwp %d: %s"),
765 lwpid,
766 linux_ptrace_attach_fail_reason_string (ptid, err));
767 }
768
769 return 1;
770 }
771 return 0;
772 }
773
774 /* Attach to PID. If PID is the tgid, attach to it and all
775 of its threads. */
776
777 static int
778 linux_attach (unsigned long pid)
779 {
780 ptid_t ptid = ptid_build (pid, pid, 0);
781 int err;
782
783 /* Attach to PID. We will check for other threads
784 soon. */
785 err = linux_attach_lwp (ptid);
786 if (err != 0)
787 error ("Cannot attach to process %ld: %s",
788 pid, linux_ptrace_attach_fail_reason_string (ptid, err));
789
790 linux_add_process (pid, 1);
791
792 if (!non_stop)
793 {
794 struct thread_info *thread;
795
796 /* Don't ignore the initial SIGSTOP if we just attached to this
797 process. It will be collected by wait shortly. */
798 thread = find_thread_ptid (ptid_build (pid, pid, 0));
799 thread->last_resume_kind = resume_stop;
800 }
801
802 /* We must attach to every LWP. If /proc is mounted, use that to
803 find them now. On the one hand, the inferior may be using raw
804 clone instead of using pthreads. On the other hand, even if it
805 is using pthreads, GDB may not be connected yet (thread_db needs
806 to do symbol lookups, through qSymbol). Also, thread_db walks
807 structures in the inferior's address space to find the list of
808 threads/LWPs, and those structures may well be corrupted. Note
809 that once thread_db is loaded, we'll still use it to list threads
810 and associate pthread info with each LWP. */
811 linux_proc_attach_tgid_threads (pid, attach_proc_task_lwp_callback);
812 return 0;
813 }
814
815 struct counter
816 {
817 int pid;
818 int count;
819 };
820
821 static int
822 second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
823 {
824 struct counter *counter = args;
825
826 if (ptid_get_pid (entry->id) == counter->pid)
827 {
828 if (++counter->count > 1)
829 return 1;
830 }
831
832 return 0;
833 }
834
835 static int
836 last_thread_of_process_p (int pid)
837 {
838 struct counter counter = { pid , 0 };
839
840 return (find_inferior (&all_threads,
841 second_thread_of_pid_p, &counter) == NULL);
842 }
843
844 /* Kill LWP. */
845
846 static void
847 linux_kill_one_lwp (struct lwp_info *lwp)
848 {
849 struct thread_info *thr = get_lwp_thread (lwp);
850 int pid = lwpid_of (thr);
851
852 /* PTRACE_KILL is unreliable. After stepping into a signal handler,
853 there is no signal context, and ptrace(PTRACE_KILL) (or
854 ptrace(PTRACE_CONT, SIGKILL), pretty much the same) acts like
855 ptrace(CONT, pid, 0,0) and just resumes the tracee. A better
856 alternative is to kill with SIGKILL. We only need one SIGKILL
857 per process, not one for each thread. But since we still support
858 linuxthreads, and we also support debugging programs using raw
859 clone without CLONE_THREAD, we send one for each thread. For
860 years, we used PTRACE_KILL only, so we're being a bit paranoid
861 about some old kernels where PTRACE_KILL might work better
862 (dubious if there are any such, but that's why it's paranoia), so
863 we try SIGKILL first, PTRACE_KILL second, and so we're fine
864 everywhere. */
865
866 errno = 0;
867 kill_lwp (pid, SIGKILL);
868 if (debug_threads)
869 {
870 int save_errno = errno;
871
872 debug_printf ("LKL: kill_lwp (SIGKILL) %s, 0, 0 (%s)\n",
873 target_pid_to_str (ptid_of (thr)),
874 save_errno ? strerror (save_errno) : "OK");
875 }
876
877 errno = 0;
878 ptrace (PTRACE_KILL, pid, (PTRACE_TYPE_ARG3) 0, (PTRACE_TYPE_ARG4) 0);
879 if (debug_threads)
880 {
881 int save_errno = errno;
882
883 debug_printf ("LKL: PTRACE_KILL %s, 0, 0 (%s)\n",
884 target_pid_to_str (ptid_of (thr)),
885 save_errno ? strerror (save_errno) : "OK");
886 }
887 }
888
889 /* Kill LWP and wait for it to die. */
890
891 static void
892 kill_wait_lwp (struct lwp_info *lwp)
893 {
894 struct thread_info *thr = get_lwp_thread (lwp);
895 int pid = ptid_get_pid (ptid_of (thr));
896 int lwpid = ptid_get_lwp (ptid_of (thr));
897 int wstat;
898 int res;
899
900 if (debug_threads)
901 debug_printf ("kwl: killing lwp %d, for pid: %d\n", lwpid, pid);
902
903 do
904 {
905 linux_kill_one_lwp (lwp);
906
907 /* Make sure it died. Notes:
908
909 - The loop is most likely unnecessary.
910
911 - We don't use linux_wait_for_event as that could delete lwps
912 while we're iterating over them. We're not interested in
913 any pending status at this point, only in making sure all
914 wait status on the kernel side are collected until the
915 process is reaped.
916
917 - We don't use __WALL here as the __WALL emulation relies on
918 SIGCHLD, and killing a stopped process doesn't generate
919 one, nor an exit status.
920 */
921 res = my_waitpid (lwpid, &wstat, 0);
922 if (res == -1 && errno == ECHILD)
923 res = my_waitpid (lwpid, &wstat, __WCLONE);
924 } while (res > 0 && WIFSTOPPED (wstat));
925
926 gdb_assert (res > 0);
927 }
928
929 /* Callback for `find_inferior'. Kills an lwp of a given process,
930 except the leader. */
931
932 static int
933 kill_one_lwp_callback (struct inferior_list_entry *entry, void *args)
934 {
935 struct thread_info *thread = (struct thread_info *) entry;
936 struct lwp_info *lwp = get_thread_lwp (thread);
937 int pid = * (int *) args;
938
939 if (ptid_get_pid (entry->id) != pid)
940 return 0;
941
942 /* We avoid killing the first thread here, because of a Linux kernel (at
943 least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
944 the children get a chance to be reaped, it will remain a zombie
945 forever. */
946
947 if (lwpid_of (thread) == pid)
948 {
949 if (debug_threads)
950 debug_printf ("lkop: is last of process %s\n",
951 target_pid_to_str (entry->id));
952 return 0;
953 }
954
955 kill_wait_lwp (lwp);
956 return 0;
957 }
958
959 static int
960 linux_kill (int pid)
961 {
962 struct process_info *process;
963 struct lwp_info *lwp;
964
965 process = find_process_pid (pid);
966 if (process == NULL)
967 return -1;
968
969 /* If we're killing a running inferior, make sure it is stopped
970 first, as PTRACE_KILL will not work otherwise. */
971 stop_all_lwps (0, NULL);
972
973 find_inferior (&all_threads, kill_one_lwp_callback , &pid);
974
975 /* See the comment in linux_kill_one_lwp. We did not kill the first
976 thread in the list, so do so now. */
977 lwp = find_lwp_pid (pid_to_ptid (pid));
978
979 if (lwp == NULL)
980 {
981 if (debug_threads)
982 debug_printf ("lk_1: cannot find lwp for pid: %d\n",
983 pid);
984 }
985 else
986 kill_wait_lwp (lwp);
987
988 the_target->mourn (process);
989
990 /* Since we presently can only stop all lwps of all processes, we
991 need to unstop lwps of other processes. */
992 unstop_all_lwps (0, NULL);
993 return 0;
994 }
995
996 /* Get pending signal of THREAD, for detaching purposes. This is the
997 signal the thread last stopped for, which we need to deliver to the
998 thread when detaching, otherwise, it'd be suppressed/lost. */
999
1000 static int
1001 get_detach_signal (struct thread_info *thread)
1002 {
1003 enum gdb_signal signo = GDB_SIGNAL_0;
1004 int status;
1005 struct lwp_info *lp = get_thread_lwp (thread);
1006
1007 if (lp->status_pending_p)
1008 status = lp->status_pending;
1009 else
1010 {
1011 /* If the thread had been suspended by gdbserver, and it stopped
1012 cleanly, then it'll have stopped with SIGSTOP. But we don't
1013 want to deliver that SIGSTOP. */
1014 if (thread->last_status.kind != TARGET_WAITKIND_STOPPED
1015 || thread->last_status.value.sig == GDB_SIGNAL_0)
1016 return 0;
1017
1018 /* Otherwise, we may need to deliver the signal we
1019 intercepted. */
1020 status = lp->last_status;
1021 }
1022
1023 if (!WIFSTOPPED (status))
1024 {
1025 if (debug_threads)
1026 debug_printf ("GPS: lwp %s hasn't stopped: no pending signal\n",
1027 target_pid_to_str (ptid_of (thread)));
1028 return 0;
1029 }
1030
1031 /* Extended wait statuses aren't real SIGTRAPs. */
1032 if (WSTOPSIG (status) == SIGTRAP && linux_is_extended_waitstatus (status))
1033 {
1034 if (debug_threads)
1035 debug_printf ("GPS: lwp %s had stopped with extended "
1036 "status: no pending signal\n",
1037 target_pid_to_str (ptid_of (thread)));
1038 return 0;
1039 }
1040
1041 signo = gdb_signal_from_host (WSTOPSIG (status));
1042
1043 if (program_signals_p && !program_signals[signo])
1044 {
1045 if (debug_threads)
1046 debug_printf ("GPS: lwp %s had signal %s, but it is in nopass state\n",
1047 target_pid_to_str (ptid_of (thread)),
1048 gdb_signal_to_string (signo));
1049 return 0;
1050 }
1051 else if (!program_signals_p
1052 /* If we have no way to know which signals GDB does not
1053 want to have passed to the program, assume
1054 SIGTRAP/SIGINT, which is GDB's default. */
1055 && (signo == GDB_SIGNAL_TRAP || signo == GDB_SIGNAL_INT))
1056 {
1057 if (debug_threads)
1058 debug_printf ("GPS: lwp %s had signal %s, "
1059 "but we don't know if we should pass it. "
1060 "Default to not.\n",
1061 target_pid_to_str (ptid_of (thread)),
1062 gdb_signal_to_string (signo));
1063 return 0;
1064 }
1065 else
1066 {
1067 if (debug_threads)
1068 debug_printf ("GPS: lwp %s has pending signal %s: delivering it.\n",
1069 target_pid_to_str (ptid_of (thread)),
1070 gdb_signal_to_string (signo));
1071
1072 return WSTOPSIG (status);
1073 }
1074 }
1075
1076 static int
1077 linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
1078 {
1079 struct thread_info *thread = (struct thread_info *) entry;
1080 struct lwp_info *lwp = get_thread_lwp (thread);
1081 int pid = * (int *) args;
1082 int sig;
1083
1084 if (ptid_get_pid (entry->id) != pid)
1085 return 0;
1086
1087 /* If there is a pending SIGSTOP, get rid of it. */
1088 if (lwp->stop_expected)
1089 {
1090 if (debug_threads)
1091 debug_printf ("Sending SIGCONT to %s\n",
1092 target_pid_to_str (ptid_of (thread)));
1093
1094 kill_lwp (lwpid_of (thread), SIGCONT);
1095 lwp->stop_expected = 0;
1096 }
1097
1098 /* Flush any pending changes to the process's registers. */
1099 regcache_invalidate_thread (thread);
1100
1101 /* Pass on any pending signal for this thread. */
1102 sig = get_detach_signal (thread);
1103
1104 /* Finally, let it resume. */
1105 if (the_low_target.prepare_to_resume != NULL)
1106 the_low_target.prepare_to_resume (lwp);
1107 if (ptrace (PTRACE_DETACH, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1108 (PTRACE_TYPE_ARG4) (long) sig) < 0)
1109 error (_("Can't detach %s: %s"),
1110 target_pid_to_str (ptid_of (thread)),
1111 strerror (errno));
1112
1113 delete_lwp (lwp);
1114 return 0;
1115 }
1116
1117 static int
1118 linux_detach (int pid)
1119 {
1120 struct process_info *process;
1121
1122 process = find_process_pid (pid);
1123 if (process == NULL)
1124 return -1;
1125
1126 /* Stop all threads before detaching. First, ptrace requires that
1127 the thread is stopped to sucessfully detach. Second, thread_db
1128 may need to uninstall thread event breakpoints from memory, which
1129 only works with a stopped process anyway. */
1130 stop_all_lwps (0, NULL);
1131
1132 #ifdef USE_THREAD_DB
1133 thread_db_detach (process);
1134 #endif
1135
1136 /* Stabilize threads (move out of jump pads). */
1137 stabilize_threads ();
1138
1139 find_inferior (&all_threads, linux_detach_one_lwp, &pid);
1140
1141 the_target->mourn (process);
1142
1143 /* Since we presently can only stop all lwps of all processes, we
1144 need to unstop lwps of other processes. */
1145 unstop_all_lwps (0, NULL);
1146 return 0;
1147 }
1148
1149 /* Remove all LWPs that belong to process PROC from the lwp list. */
1150
1151 static int
1152 delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
1153 {
1154 struct thread_info *thread = (struct thread_info *) entry;
1155 struct lwp_info *lwp = get_thread_lwp (thread);
1156 struct process_info *process = proc;
1157
1158 if (pid_of (thread) == pid_of (process))
1159 delete_lwp (lwp);
1160
1161 return 0;
1162 }
1163
1164 static void
1165 linux_mourn (struct process_info *process)
1166 {
1167 struct process_info_private *priv;
1168
1169 #ifdef USE_THREAD_DB
1170 thread_db_mourn (process);
1171 #endif
1172
1173 find_inferior (&all_threads, delete_lwp_callback, process);
1174
1175 /* Freeing all private data. */
1176 priv = process->private;
1177 free (priv->arch_private);
1178 free (priv);
1179 process->private = NULL;
1180
1181 remove_process (process);
1182 }
1183
1184 static void
1185 linux_join (int pid)
1186 {
1187 int status, ret;
1188
1189 do {
1190 ret = my_waitpid (pid, &status, 0);
1191 if (WIFEXITED (status) || WIFSIGNALED (status))
1192 break;
1193 } while (ret != -1 || errno != ECHILD);
1194 }
1195
1196 /* Return nonzero if the given thread is still alive. */
1197 static int
1198 linux_thread_alive (ptid_t ptid)
1199 {
1200 struct lwp_info *lwp = find_lwp_pid (ptid);
1201
1202 /* We assume we always know if a thread exits. If a whole process
1203 exited but we still haven't been able to report it to GDB, we'll
1204 hold on to the last lwp of the dead process. */
1205 if (lwp != NULL)
1206 return !lwp->dead;
1207 else
1208 return 0;
1209 }
1210
1211 /* Return 1 if this lwp still has an interesting status pending. If
1212 not (e.g., it had stopped for a breakpoint that is gone), return
1213 false. */
1214
1215 static int
1216 thread_still_has_status_pending_p (struct thread_info *thread)
1217 {
1218 struct lwp_info *lp = get_thread_lwp (thread);
1219
1220 if (!lp->status_pending_p)
1221 return 0;
1222
1223 /* If we got a `vCont;t', but we haven't reported a stop yet, do
1224 report any status pending the LWP may have. */
1225 if (thread->last_resume_kind == resume_stop
1226 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
1227 return 0;
1228
1229 if (thread->last_resume_kind != resume_stop
1230 && (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT
1231 || lp->stop_reason == LWP_STOPPED_BY_HW_BREAKPOINT))
1232 {
1233 struct thread_info *saved_thread;
1234 CORE_ADDR pc;
1235 int discard = 0;
1236
1237 gdb_assert (lp->last_status != 0);
1238
1239 pc = get_pc (lp);
1240
1241 saved_thread = current_thread;
1242 current_thread = thread;
1243
1244 if (pc != lp->stop_pc)
1245 {
1246 if (debug_threads)
1247 debug_printf ("PC of %ld changed\n",
1248 lwpid_of (thread));
1249 discard = 1;
1250 }
1251 else if (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT
1252 && !(*the_low_target.breakpoint_at) (pc))
1253 {
1254 if (debug_threads)
1255 debug_printf ("previous SW breakpoint of %ld gone\n",
1256 lwpid_of (thread));
1257 discard = 1;
1258 }
1259 else if (lp->stop_reason == LWP_STOPPED_BY_HW_BREAKPOINT
1260 && !hardware_breakpoint_inserted_here (pc))
1261 {
1262 if (debug_threads)
1263 debug_printf ("previous HW breakpoint of %ld gone\n",
1264 lwpid_of (thread));
1265 discard = 1;
1266 }
1267
1268 current_thread = saved_thread;
1269
1270 if (discard)
1271 {
1272 if (debug_threads)
1273 debug_printf ("discarding pending breakpoint status\n");
1274 lp->status_pending_p = 0;
1275 return 0;
1276 }
1277 }
1278
1279 return 1;
1280 }
1281
1282 /* Return 1 if this lwp has an interesting status pending. */
1283 static int
1284 status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
1285 {
1286 struct thread_info *thread = (struct thread_info *) entry;
1287 struct lwp_info *lp = get_thread_lwp (thread);
1288 ptid_t ptid = * (ptid_t *) arg;
1289
1290 /* Check if we're only interested in events from a specific process
1291 or a specific LWP. */
1292 if (!ptid_match (ptid_of (thread), ptid))
1293 return 0;
1294
1295 if (lp->status_pending_p
1296 && !thread_still_has_status_pending_p (thread))
1297 {
1298 linux_resume_one_lwp (lp, lp->stepping, GDB_SIGNAL_0, NULL);
1299 return 0;
1300 }
1301
1302 return lp->status_pending_p;
1303 }
1304
1305 static int
1306 same_lwp (struct inferior_list_entry *entry, void *data)
1307 {
1308 ptid_t ptid = *(ptid_t *) data;
1309 int lwp;
1310
1311 if (ptid_get_lwp (ptid) != 0)
1312 lwp = ptid_get_lwp (ptid);
1313 else
1314 lwp = ptid_get_pid (ptid);
1315
1316 if (ptid_get_lwp (entry->id) == lwp)
1317 return 1;
1318
1319 return 0;
1320 }
1321
1322 struct lwp_info *
1323 find_lwp_pid (ptid_t ptid)
1324 {
1325 struct inferior_list_entry *thread
1326 = find_inferior (&all_threads, same_lwp, &ptid);
1327
1328 if (thread == NULL)
1329 return NULL;
1330
1331 return get_thread_lwp ((struct thread_info *) thread);
1332 }
1333
1334 /* Return the number of known LWPs in the tgid given by PID. */
1335
1336 static int
1337 num_lwps (int pid)
1338 {
1339 struct inferior_list_entry *inf, *tmp;
1340 int count = 0;
1341
1342 ALL_INFERIORS (&all_threads, inf, tmp)
1343 {
1344 if (ptid_get_pid (inf->id) == pid)
1345 count++;
1346 }
1347
1348 return count;
1349 }
1350
1351 /* Detect zombie thread group leaders, and "exit" them. We can't reap
1352 their exits until all other threads in the group have exited. */
1353
1354 static void
1355 check_zombie_leaders (void)
1356 {
1357 struct process_info *proc, *tmp;
1358
1359 ALL_PROCESSES (proc, tmp)
1360 {
1361 pid_t leader_pid = pid_of (proc);
1362 struct lwp_info *leader_lp;
1363
1364 leader_lp = find_lwp_pid (pid_to_ptid (leader_pid));
1365
1366 if (debug_threads)
1367 debug_printf ("leader_pid=%d, leader_lp!=NULL=%d, "
1368 "num_lwps=%d, zombie=%d\n",
1369 leader_pid, leader_lp!= NULL, num_lwps (leader_pid),
1370 linux_proc_pid_is_zombie (leader_pid));
1371
1372 if (leader_lp != NULL
1373 /* Check if there are other threads in the group, as we may
1374 have raced with the inferior simply exiting. */
1375 && !last_thread_of_process_p (leader_pid)
1376 && linux_proc_pid_is_zombie (leader_pid))
1377 {
1378 /* A leader zombie can mean one of two things:
1379
1380 - It exited, and there's an exit status pending
1381 available, or only the leader exited (not the whole
1382 program). In the latter case, we can't waitpid the
1383 leader's exit status until all other threads are gone.
1384
1385 - There are 3 or more threads in the group, and a thread
1386 other than the leader exec'd. On an exec, the Linux
1387 kernel destroys all other threads (except the execing
1388 one) in the thread group, and resets the execing thread's
1389 tid to the tgid. No exit notification is sent for the
1390 execing thread -- from the ptracer's perspective, it
1391 appears as though the execing thread just vanishes.
1392 Until we reap all other threads except the leader and the
1393 execing thread, the leader will be zombie, and the
1394 execing thread will be in `D (disc sleep)'. As soon as
1395 all other threads are reaped, the execing thread changes
1396 it's tid to the tgid, and the previous (zombie) leader
1397 vanishes, giving place to the "new" leader. We could try
1398 distinguishing the exit and exec cases, by waiting once
1399 more, and seeing if something comes out, but it doesn't
1400 sound useful. The previous leader _does_ go away, and
1401 we'll re-add the new one once we see the exec event
1402 (which is just the same as what would happen if the
1403 previous leader did exit voluntarily before some other
1404 thread execs). */
1405
1406 if (debug_threads)
1407 fprintf (stderr,
1408 "CZL: Thread group leader %d zombie "
1409 "(it exited, or another thread execd).\n",
1410 leader_pid);
1411
1412 delete_lwp (leader_lp);
1413 }
1414 }
1415 }
1416
1417 /* Callback for `find_inferior'. Returns the first LWP that is not
1418 stopped. ARG is a PTID filter. */
1419
1420 static int
1421 not_stopped_callback (struct inferior_list_entry *entry, void *arg)
1422 {
1423 struct thread_info *thr = (struct thread_info *) entry;
1424 struct lwp_info *lwp;
1425 ptid_t filter = *(ptid_t *) arg;
1426
1427 if (!ptid_match (ptid_of (thr), filter))
1428 return 0;
1429
1430 lwp = get_thread_lwp (thr);
1431 if (!lwp->stopped)
1432 return 1;
1433
1434 return 0;
1435 }
1436
1437 /* This function should only be called if the LWP got a SIGTRAP.
1438
1439 Handle any tracepoint steps or hits. Return true if a tracepoint
1440 event was handled, 0 otherwise. */
1441
1442 static int
1443 handle_tracepoints (struct lwp_info *lwp)
1444 {
1445 struct thread_info *tinfo = get_lwp_thread (lwp);
1446 int tpoint_related_event = 0;
1447
1448 gdb_assert (lwp->suspended == 0);
1449
1450 /* If this tracepoint hit causes a tracing stop, we'll immediately
1451 uninsert tracepoints. To do this, we temporarily pause all
1452 threads, unpatch away, and then unpause threads. We need to make
1453 sure the unpausing doesn't resume LWP too. */
1454 lwp->suspended++;
1455
1456 /* And we need to be sure that any all-threads-stopping doesn't try
1457 to move threads out of the jump pads, as it could deadlock the
1458 inferior (LWP could be in the jump pad, maybe even holding the
1459 lock.) */
1460
1461 /* Do any necessary step collect actions. */
1462 tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1463
1464 tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1465
1466 /* See if we just hit a tracepoint and do its main collect
1467 actions. */
1468 tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1469
1470 lwp->suspended--;
1471
1472 gdb_assert (lwp->suspended == 0);
1473 gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1474
1475 if (tpoint_related_event)
1476 {
1477 if (debug_threads)
1478 debug_printf ("got a tracepoint event\n");
1479 return 1;
1480 }
1481
1482 return 0;
1483 }
1484
1485 /* Convenience wrapper. Returns true if LWP is presently collecting a
1486 fast tracepoint. */
1487
1488 static int
1489 linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1490 struct fast_tpoint_collect_status *status)
1491 {
1492 CORE_ADDR thread_area;
1493 struct thread_info *thread = get_lwp_thread (lwp);
1494
1495 if (the_low_target.get_thread_area == NULL)
1496 return 0;
1497
1498 /* Get the thread area address. This is used to recognize which
1499 thread is which when tracing with the in-process agent library.
1500 We don't read anything from the address, and treat it as opaque;
1501 it's the address itself that we assume is unique per-thread. */
1502 if ((*the_low_target.get_thread_area) (lwpid_of (thread), &thread_area) == -1)
1503 return 0;
1504
1505 return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1506 }
1507
1508 /* The reason we resume in the caller, is because we want to be able
1509 to pass lwp->status_pending as WSTAT, and we need to clear
1510 status_pending_p before resuming, otherwise, linux_resume_one_lwp
1511 refuses to resume. */
1512
1513 static int
1514 maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1515 {
1516 struct thread_info *saved_thread;
1517
1518 saved_thread = current_thread;
1519 current_thread = get_lwp_thread (lwp);
1520
1521 if ((wstat == NULL
1522 || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1523 && supports_fast_tracepoints ()
1524 && agent_loaded_p ())
1525 {
1526 struct fast_tpoint_collect_status status;
1527 int r;
1528
1529 if (debug_threads)
1530 debug_printf ("Checking whether LWP %ld needs to move out of the "
1531 "jump pad.\n",
1532 lwpid_of (current_thread));
1533
1534 r = linux_fast_tracepoint_collecting (lwp, &status);
1535
1536 if (wstat == NULL
1537 || (WSTOPSIG (*wstat) != SIGILL
1538 && WSTOPSIG (*wstat) != SIGFPE
1539 && WSTOPSIG (*wstat) != SIGSEGV
1540 && WSTOPSIG (*wstat) != SIGBUS))
1541 {
1542 lwp->collecting_fast_tracepoint = r;
1543
1544 if (r != 0)
1545 {
1546 if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1547 {
1548 /* Haven't executed the original instruction yet.
1549 Set breakpoint there, and wait till it's hit,
1550 then single-step until exiting the jump pad. */
1551 lwp->exit_jump_pad_bkpt
1552 = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1553 }
1554
1555 if (debug_threads)
1556 debug_printf ("Checking whether LWP %ld needs to move out of "
1557 "the jump pad...it does\n",
1558 lwpid_of (current_thread));
1559 current_thread = saved_thread;
1560
1561 return 1;
1562 }
1563 }
1564 else
1565 {
1566 /* If we get a synchronous signal while collecting, *and*
1567 while executing the (relocated) original instruction,
1568 reset the PC to point at the tpoint address, before
1569 reporting to GDB. Otherwise, it's an IPA lib bug: just
1570 report the signal to GDB, and pray for the best. */
1571
1572 lwp->collecting_fast_tracepoint = 0;
1573
1574 if (r != 0
1575 && (status.adjusted_insn_addr <= lwp->stop_pc
1576 && lwp->stop_pc < status.adjusted_insn_addr_end))
1577 {
1578 siginfo_t info;
1579 struct regcache *regcache;
1580
1581 /* The si_addr on a few signals references the address
1582 of the faulting instruction. Adjust that as
1583 well. */
1584 if ((WSTOPSIG (*wstat) == SIGILL
1585 || WSTOPSIG (*wstat) == SIGFPE
1586 || WSTOPSIG (*wstat) == SIGBUS
1587 || WSTOPSIG (*wstat) == SIGSEGV)
1588 && ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
1589 (PTRACE_TYPE_ARG3) 0, &info) == 0
1590 /* Final check just to make sure we don't clobber
1591 the siginfo of non-kernel-sent signals. */
1592 && (uintptr_t) info.si_addr == lwp->stop_pc)
1593 {
1594 info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1595 ptrace (PTRACE_SETSIGINFO, lwpid_of (current_thread),
1596 (PTRACE_TYPE_ARG3) 0, &info);
1597 }
1598
1599 regcache = get_thread_regcache (current_thread, 1);
1600 (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1601 lwp->stop_pc = status.tpoint_addr;
1602
1603 /* Cancel any fast tracepoint lock this thread was
1604 holding. */
1605 force_unlock_trace_buffer ();
1606 }
1607
1608 if (lwp->exit_jump_pad_bkpt != NULL)
1609 {
1610 if (debug_threads)
1611 debug_printf ("Cancelling fast exit-jump-pad: removing bkpt. "
1612 "stopping all threads momentarily.\n");
1613
1614 stop_all_lwps (1, lwp);
1615
1616 delete_breakpoint (lwp->exit_jump_pad_bkpt);
1617 lwp->exit_jump_pad_bkpt = NULL;
1618
1619 unstop_all_lwps (1, lwp);
1620
1621 gdb_assert (lwp->suspended >= 0);
1622 }
1623 }
1624 }
1625
1626 if (debug_threads)
1627 debug_printf ("Checking whether LWP %ld needs to move out of the "
1628 "jump pad...no\n",
1629 lwpid_of (current_thread));
1630
1631 current_thread = saved_thread;
1632 return 0;
1633 }
1634
1635 /* Enqueue one signal in the "signals to report later when out of the
1636 jump pad" list. */
1637
1638 static void
1639 enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1640 {
1641 struct pending_signals *p_sig;
1642 struct thread_info *thread = get_lwp_thread (lwp);
1643
1644 if (debug_threads)
1645 debug_printf ("Deferring signal %d for LWP %ld.\n",
1646 WSTOPSIG (*wstat), lwpid_of (thread));
1647
1648 if (debug_threads)
1649 {
1650 struct pending_signals *sig;
1651
1652 for (sig = lwp->pending_signals_to_report;
1653 sig != NULL;
1654 sig = sig->prev)
1655 debug_printf (" Already queued %d\n",
1656 sig->signal);
1657
1658 debug_printf (" (no more currently queued signals)\n");
1659 }
1660
1661 /* Don't enqueue non-RT signals if they are already in the deferred
1662 queue. (SIGSTOP being the easiest signal to see ending up here
1663 twice) */
1664 if (WSTOPSIG (*wstat) < __SIGRTMIN)
1665 {
1666 struct pending_signals *sig;
1667
1668 for (sig = lwp->pending_signals_to_report;
1669 sig != NULL;
1670 sig = sig->prev)
1671 {
1672 if (sig->signal == WSTOPSIG (*wstat))
1673 {
1674 if (debug_threads)
1675 debug_printf ("Not requeuing already queued non-RT signal %d"
1676 " for LWP %ld\n",
1677 sig->signal,
1678 lwpid_of (thread));
1679 return;
1680 }
1681 }
1682 }
1683
1684 p_sig = xmalloc (sizeof (*p_sig));
1685 p_sig->prev = lwp->pending_signals_to_report;
1686 p_sig->signal = WSTOPSIG (*wstat);
1687 memset (&p_sig->info, 0, sizeof (siginfo_t));
1688 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1689 &p_sig->info);
1690
1691 lwp->pending_signals_to_report = p_sig;
1692 }
1693
1694 /* Dequeue one signal from the "signals to report later when out of
1695 the jump pad" list. */
1696
1697 static int
1698 dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1699 {
1700 struct thread_info *thread = get_lwp_thread (lwp);
1701
1702 if (lwp->pending_signals_to_report != NULL)
1703 {
1704 struct pending_signals **p_sig;
1705
1706 p_sig = &lwp->pending_signals_to_report;
1707 while ((*p_sig)->prev != NULL)
1708 p_sig = &(*p_sig)->prev;
1709
1710 *wstat = W_STOPCODE ((*p_sig)->signal);
1711 if ((*p_sig)->info.si_signo != 0)
1712 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
1713 &(*p_sig)->info);
1714 free (*p_sig);
1715 *p_sig = NULL;
1716
1717 if (debug_threads)
1718 debug_printf ("Reporting deferred signal %d for LWP %ld.\n",
1719 WSTOPSIG (*wstat), lwpid_of (thread));
1720
1721 if (debug_threads)
1722 {
1723 struct pending_signals *sig;
1724
1725 for (sig = lwp->pending_signals_to_report;
1726 sig != NULL;
1727 sig = sig->prev)
1728 debug_printf (" Still queued %d\n",
1729 sig->signal);
1730
1731 debug_printf (" (no more queued signals)\n");
1732 }
1733
1734 return 1;
1735 }
1736
1737 return 0;
1738 }
1739
1740 /* Fetch the possibly triggered data watchpoint info and store it in
1741 CHILD.
1742
1743 On some archs, like x86, that use debug registers to set
1744 watchpoints, it's possible that the way to know which watched
1745 address trapped, is to check the register that is used to select
1746 which address to watch. Problem is, between setting the watchpoint
1747 and reading back which data address trapped, the user may change
1748 the set of watchpoints, and, as a consequence, GDB changes the
1749 debug registers in the inferior. To avoid reading back a stale
1750 stopped-data-address when that happens, we cache in LP the fact
1751 that a watchpoint trapped, and the corresponding data address, as
1752 soon as we see CHILD stop with a SIGTRAP. If GDB changes the debug
1753 registers meanwhile, we have the cached data we can rely on. */
1754
1755 static int
1756 check_stopped_by_watchpoint (struct lwp_info *child)
1757 {
1758 if (the_low_target.stopped_by_watchpoint != NULL)
1759 {
1760 struct thread_info *saved_thread;
1761
1762 saved_thread = current_thread;
1763 current_thread = get_lwp_thread (child);
1764
1765 if (the_low_target.stopped_by_watchpoint ())
1766 {
1767 child->stop_reason = LWP_STOPPED_BY_WATCHPOINT;
1768
1769 if (the_low_target.stopped_data_address != NULL)
1770 child->stopped_data_address
1771 = the_low_target.stopped_data_address ();
1772 else
1773 child->stopped_data_address = 0;
1774 }
1775
1776 current_thread = saved_thread;
1777 }
1778
1779 return child->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
1780 }
1781
1782 /* Do low-level handling of the event, and check if we should go on
1783 and pass it to caller code. Return the affected lwp if we are, or
1784 NULL otherwise. */
1785
1786 static struct lwp_info *
1787 linux_low_filter_event (int lwpid, int wstat)
1788 {
1789 struct lwp_info *child;
1790 struct thread_info *thread;
1791 int have_stop_pc = 0;
1792
1793 child = find_lwp_pid (pid_to_ptid (lwpid));
1794
1795 /* If we didn't find a process, one of two things presumably happened:
1796 - A process we started and then detached from has exited. Ignore it.
1797 - A process we are controlling has forked and the new child's stop
1798 was reported to us by the kernel. Save its PID. */
1799 if (child == NULL && WIFSTOPPED (wstat))
1800 {
1801 add_to_pid_list (&stopped_pids, lwpid, wstat);
1802 return NULL;
1803 }
1804 else if (child == NULL)
1805 return NULL;
1806
1807 thread = get_lwp_thread (child);
1808
1809 child->stopped = 1;
1810
1811 child->last_status = wstat;
1812
1813 /* Check if the thread has exited. */
1814 if ((WIFEXITED (wstat) || WIFSIGNALED (wstat)))
1815 {
1816 if (debug_threads)
1817 debug_printf ("LLFE: %d exited.\n", lwpid);
1818 if (num_lwps (pid_of (thread)) > 1)
1819 {
1820
1821 /* If there is at least one more LWP, then the exit signal was
1822 not the end of the debugged application and should be
1823 ignored. */
1824 delete_lwp (child);
1825 return NULL;
1826 }
1827 else
1828 {
1829 /* This was the last lwp in the process. Since events are
1830 serialized to GDB core, and we can't report this one
1831 right now, but GDB core and the other target layers will
1832 want to be notified about the exit code/signal, leave the
1833 status pending for the next time we're able to report
1834 it. */
1835 mark_lwp_dead (child, wstat);
1836 return child;
1837 }
1838 }
1839
1840 gdb_assert (WIFSTOPPED (wstat));
1841
1842 if (WIFSTOPPED (wstat))
1843 {
1844 struct process_info *proc;
1845
1846 /* Architecture-specific setup after inferior is running. This
1847 needs to happen after we have attached to the inferior and it
1848 is stopped for the first time, but before we access any
1849 inferior registers. */
1850 proc = find_process_pid (pid_of (thread));
1851 if (proc->private->new_inferior)
1852 {
1853 struct thread_info *saved_thread;
1854
1855 saved_thread = current_thread;
1856 current_thread = thread;
1857
1858 the_low_target.arch_setup ();
1859
1860 current_thread = saved_thread;
1861
1862 proc->private->new_inferior = 0;
1863 }
1864 }
1865
1866 if (WIFSTOPPED (wstat) && child->must_set_ptrace_flags)
1867 {
1868 struct process_info *proc = find_process_pid (pid_of (thread));
1869
1870 linux_enable_event_reporting (lwpid, proc->attached);
1871 child->must_set_ptrace_flags = 0;
1872 }
1873
1874 /* Be careful to not overwrite stop_pc until
1875 check_stopped_by_breakpoint is called. */
1876 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1877 && linux_is_extended_waitstatus (wstat))
1878 {
1879 child->stop_pc = get_pc (child);
1880 handle_extended_wait (child, wstat);
1881 return NULL;
1882 }
1883
1884 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGTRAP
1885 && check_stopped_by_watchpoint (child))
1886 ;
1887 else if (WIFSTOPPED (wstat) && linux_wstatus_maybe_breakpoint (wstat))
1888 {
1889 if (check_stopped_by_breakpoint (child))
1890 have_stop_pc = 1;
1891 }
1892
1893 if (!have_stop_pc)
1894 child->stop_pc = get_pc (child);
1895
1896 if (WIFSTOPPED (wstat) && WSTOPSIG (wstat) == SIGSTOP
1897 && child->stop_expected)
1898 {
1899 if (debug_threads)
1900 debug_printf ("Expected stop.\n");
1901 child->stop_expected = 0;
1902
1903 if (thread->last_resume_kind == resume_stop)
1904 {
1905 /* We want to report the stop to the core. Treat the
1906 SIGSTOP as a normal event. */
1907 }
1908 else if (stopping_threads != NOT_STOPPING_THREADS)
1909 {
1910 /* Stopping threads. We don't want this SIGSTOP to end up
1911 pending. */
1912 return NULL;
1913 }
1914 else
1915 {
1916 /* Filter out the event. */
1917 linux_resume_one_lwp (child, child->stepping, 0, NULL);
1918 return NULL;
1919 }
1920 }
1921
1922 child->status_pending_p = 1;
1923 child->status_pending = wstat;
1924 return child;
1925 }
1926
1927 /* Resume LWPs that are currently stopped without any pending status
1928 to report, but are resumed from the core's perspective. */
1929
1930 static void
1931 resume_stopped_resumed_lwps (struct inferior_list_entry *entry)
1932 {
1933 struct thread_info *thread = (struct thread_info *) entry;
1934 struct lwp_info *lp = get_thread_lwp (thread);
1935
1936 if (lp->stopped
1937 && !lp->status_pending_p
1938 && thread->last_resume_kind != resume_stop
1939 && thread->last_status.kind == TARGET_WAITKIND_IGNORE)
1940 {
1941 int step = thread->last_resume_kind == resume_step;
1942
1943 if (debug_threads)
1944 debug_printf ("RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
1945 target_pid_to_str (ptid_of (thread)),
1946 paddress (lp->stop_pc),
1947 step);
1948
1949 linux_resume_one_lwp (lp, step, GDB_SIGNAL_0, NULL);
1950 }
1951 }
1952
1953 /* Wait for an event from child(ren) WAIT_PTID, and return any that
1954 match FILTER_PTID (leaving others pending). The PTIDs can be:
1955 minus_one_ptid, to specify any child; a pid PTID, specifying all
1956 lwps of a thread group; or a PTID representing a single lwp. Store
1957 the stop status through the status pointer WSTAT. OPTIONS is
1958 passed to the waitpid call. Return 0 if no event was found and
1959 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
1960 was found. Return the PID of the stopped child otherwise. */
1961
1962 static int
1963 linux_wait_for_event_filtered (ptid_t wait_ptid, ptid_t filter_ptid,
1964 int *wstatp, int options)
1965 {
1966 struct thread_info *event_thread;
1967 struct lwp_info *event_child, *requested_child;
1968 sigset_t block_mask, prev_mask;
1969
1970 retry:
1971 /* N.B. event_thread points to the thread_info struct that contains
1972 event_child. Keep them in sync. */
1973 event_thread = NULL;
1974 event_child = NULL;
1975 requested_child = NULL;
1976
1977 /* Check for a lwp with a pending status. */
1978
1979 if (ptid_equal (filter_ptid, minus_one_ptid) || ptid_is_pid (filter_ptid))
1980 {
1981 event_thread = (struct thread_info *)
1982 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
1983 if (event_thread != NULL)
1984 event_child = get_thread_lwp (event_thread);
1985 if (debug_threads && event_thread)
1986 debug_printf ("Got a pending child %ld\n", lwpid_of (event_thread));
1987 }
1988 else if (!ptid_equal (filter_ptid, null_ptid))
1989 {
1990 requested_child = find_lwp_pid (filter_ptid);
1991
1992 if (stopping_threads == NOT_STOPPING_THREADS
1993 && requested_child->status_pending_p
1994 && requested_child->collecting_fast_tracepoint)
1995 {
1996 enqueue_one_deferred_signal (requested_child,
1997 &requested_child->status_pending);
1998 requested_child->status_pending_p = 0;
1999 requested_child->status_pending = 0;
2000 linux_resume_one_lwp (requested_child, 0, 0, NULL);
2001 }
2002
2003 if (requested_child->suspended
2004 && requested_child->status_pending_p)
2005 {
2006 internal_error (__FILE__, __LINE__,
2007 "requesting an event out of a"
2008 " suspended child?");
2009 }
2010
2011 if (requested_child->status_pending_p)
2012 {
2013 event_child = requested_child;
2014 event_thread = get_lwp_thread (event_child);
2015 }
2016 }
2017
2018 if (event_child != NULL)
2019 {
2020 if (debug_threads)
2021 debug_printf ("Got an event from pending child %ld (%04x)\n",
2022 lwpid_of (event_thread), event_child->status_pending);
2023 *wstatp = event_child->status_pending;
2024 event_child->status_pending_p = 0;
2025 event_child->status_pending = 0;
2026 current_thread = event_thread;
2027 return lwpid_of (event_thread);
2028 }
2029
2030 /* But if we don't find a pending event, we'll have to wait.
2031
2032 We only enter this loop if no process has a pending wait status.
2033 Thus any action taken in response to a wait status inside this
2034 loop is responding as soon as we detect the status, not after any
2035 pending events. */
2036
2037 /* Make sure SIGCHLD is blocked until the sigsuspend below. Block
2038 all signals while here. */
2039 sigfillset (&block_mask);
2040 sigprocmask (SIG_BLOCK, &block_mask, &prev_mask);
2041
2042 /* Always pull all events out of the kernel. We'll randomly select
2043 an event LWP out of all that have events, to prevent
2044 starvation. */
2045 while (event_child == NULL)
2046 {
2047 pid_t ret = 0;
2048
2049 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
2050 quirks:
2051
2052 - If the thread group leader exits while other threads in the
2053 thread group still exist, waitpid(TGID, ...) hangs. That
2054 waitpid won't return an exit status until the other threads
2055 in the group are reaped.
2056
2057 - When a non-leader thread execs, that thread just vanishes
2058 without reporting an exit (so we'd hang if we waited for it
2059 explicitly in that case). The exec event is reported to
2060 the TGID pid (although we don't currently enable exec
2061 events). */
2062 errno = 0;
2063 ret = my_waitpid (-1, wstatp, options | WNOHANG);
2064
2065 if (debug_threads)
2066 debug_printf ("LWFE: waitpid(-1, ...) returned %d, %s\n",
2067 ret, errno ? strerror (errno) : "ERRNO-OK");
2068
2069 if (ret > 0)
2070 {
2071 if (debug_threads)
2072 {
2073 debug_printf ("LLW: waitpid %ld received %s\n",
2074 (long) ret, status_to_str (*wstatp));
2075 }
2076
2077 /* Filter all events. IOW, leave all events pending. We'll
2078 randomly select an event LWP out of all that have events
2079 below. */
2080 linux_low_filter_event (ret, *wstatp);
2081 /* Retry until nothing comes out of waitpid. A single
2082 SIGCHLD can indicate more than one child stopped. */
2083 continue;
2084 }
2085
2086 /* Now that we've pulled all events out of the kernel, resume
2087 LWPs that don't have an interesting event to report. */
2088 if (stopping_threads == NOT_STOPPING_THREADS)
2089 for_each_inferior (&all_threads, resume_stopped_resumed_lwps);
2090
2091 /* ... and find an LWP with a status to report to the core, if
2092 any. */
2093 event_thread = (struct thread_info *)
2094 find_inferior (&all_threads, status_pending_p_callback, &filter_ptid);
2095 if (event_thread != NULL)
2096 {
2097 event_child = get_thread_lwp (event_thread);
2098 *wstatp = event_child->status_pending;
2099 event_child->status_pending_p = 0;
2100 event_child->status_pending = 0;
2101 break;
2102 }
2103
2104 /* Check for zombie thread group leaders. Those can't be reaped
2105 until all other threads in the thread group are. */
2106 check_zombie_leaders ();
2107
2108 /* If there are no resumed children left in the set of LWPs we
2109 want to wait for, bail. We can't just block in
2110 waitpid/sigsuspend, because lwps might have been left stopped
2111 in trace-stop state, and we'd be stuck forever waiting for
2112 their status to change (which would only happen if we resumed
2113 them). Even if WNOHANG is set, this return code is preferred
2114 over 0 (below), as it is more detailed. */
2115 if ((find_inferior (&all_threads,
2116 not_stopped_callback,
2117 &wait_ptid) == NULL))
2118 {
2119 if (debug_threads)
2120 debug_printf ("LLW: exit (no unwaited-for LWP)\n");
2121 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2122 return -1;
2123 }
2124
2125 /* No interesting event to report to the caller. */
2126 if ((options & WNOHANG))
2127 {
2128 if (debug_threads)
2129 debug_printf ("WNOHANG set, no event found\n");
2130
2131 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2132 return 0;
2133 }
2134
2135 /* Block until we get an event reported with SIGCHLD. */
2136 if (debug_threads)
2137 debug_printf ("sigsuspend'ing\n");
2138
2139 sigsuspend (&prev_mask);
2140 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2141 goto retry;
2142 }
2143
2144 sigprocmask (SIG_SETMASK, &prev_mask, NULL);
2145
2146 current_thread = event_thread;
2147
2148 /* Check for thread exit. */
2149 if (! WIFSTOPPED (*wstatp))
2150 {
2151 gdb_assert (last_thread_of_process_p (pid_of (event_thread)));
2152
2153 if (debug_threads)
2154 debug_printf ("LWP %d is the last lwp of process. "
2155 "Process %ld exiting.\n",
2156 pid_of (event_thread), lwpid_of (event_thread));
2157 return lwpid_of (event_thread);
2158 }
2159
2160 return lwpid_of (event_thread);
2161 }
2162
2163 /* Wait for an event from child(ren) PTID. PTIDs can be:
2164 minus_one_ptid, to specify any child; a pid PTID, specifying all
2165 lwps of a thread group; or a PTID representing a single lwp. Store
2166 the stop status through the status pointer WSTAT. OPTIONS is
2167 passed to the waitpid call. Return 0 if no event was found and
2168 OPTIONS contains WNOHANG. Return -1 if no unwaited-for children
2169 was found. Return the PID of the stopped child otherwise. */
2170
2171 static int
2172 linux_wait_for_event (ptid_t ptid, int *wstatp, int options)
2173 {
2174 return linux_wait_for_event_filtered (ptid, ptid, wstatp, options);
2175 }
2176
2177 /* Count the LWP's that have had events. */
2178
2179 static int
2180 count_events_callback (struct inferior_list_entry *entry, void *data)
2181 {
2182 struct thread_info *thread = (struct thread_info *) entry;
2183 int *count = data;
2184
2185 gdb_assert (count != NULL);
2186
2187 /* Count only resumed LWPs that have an event pending. */
2188 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2189 && thread->last_resume_kind != resume_stop
2190 && thread->status_pending_p)
2191 (*count)++;
2192
2193 return 0;
2194 }
2195
2196 /* Select the LWP (if any) that is currently being single-stepped. */
2197
2198 static int
2199 select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
2200 {
2201 struct thread_info *thread = (struct thread_info *) entry;
2202 struct lwp_info *lp = get_thread_lwp (thread);
2203
2204 if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
2205 && thread->last_resume_kind == resume_step
2206 && lp->status_pending_p)
2207 return 1;
2208 else
2209 return 0;
2210 }
2211
2212 /* Select the Nth LWP that has had a SIGTRAP event that should be
2213 reported to GDB. */
2214
2215 static int
2216 select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
2217 {
2218 struct thread_info *thread = (struct thread_info *) entry;
2219 int *selector = data;
2220
2221 gdb_assert (selector != NULL);
2222
2223 /* Select only resumed LWPs that have an event pending. */
2224 if (thread->last_resume_kind != resume_stop
2225 && thread->last_status.kind == TARGET_WAITKIND_IGNORE
2226 && thread->status_pending_p)
2227 if ((*selector)-- == 0)
2228 return 1;
2229
2230 return 0;
2231 }
2232
2233 /* Select one LWP out of those that have events pending. */
2234
2235 static void
2236 select_event_lwp (struct lwp_info **orig_lp)
2237 {
2238 int num_events = 0;
2239 int random_selector;
2240 struct thread_info *event_thread = NULL;
2241
2242 /* In all-stop, give preference to the LWP that is being
2243 single-stepped. There will be at most one, and it's the LWP that
2244 the core is most interested in. If we didn't do this, then we'd
2245 have to handle pending step SIGTRAPs somehow in case the core
2246 later continues the previously-stepped thread, otherwise we'd
2247 report the pending SIGTRAP, and the core, not having stepped the
2248 thread, wouldn't understand what the trap was for, and therefore
2249 would report it to the user as a random signal. */
2250 if (!non_stop)
2251 {
2252 event_thread
2253 = (struct thread_info *) find_inferior (&all_threads,
2254 select_singlestep_lwp_callback,
2255 NULL);
2256 if (event_thread != NULL)
2257 {
2258 if (debug_threads)
2259 debug_printf ("SEL: Select single-step %s\n",
2260 target_pid_to_str (ptid_of (event_thread)));
2261 }
2262 }
2263 if (event_thread == NULL)
2264 {
2265 /* No single-stepping LWP. Select one at random, out of those
2266 which have had SIGTRAP events. */
2267
2268 /* First see how many SIGTRAP events we have. */
2269 find_inferior (&all_threads, count_events_callback, &num_events);
2270
2271 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2272 random_selector = (int)
2273 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2274
2275 if (debug_threads && num_events > 1)
2276 debug_printf ("SEL: Found %d SIGTRAP events, selecting #%d\n",
2277 num_events, random_selector);
2278
2279 event_thread
2280 = (struct thread_info *) find_inferior (&all_threads,
2281 select_event_lwp_callback,
2282 &random_selector);
2283 }
2284
2285 if (event_thread != NULL)
2286 {
2287 struct lwp_info *event_lp = get_thread_lwp (event_thread);
2288
2289 /* Switch the event LWP. */
2290 *orig_lp = event_lp;
2291 }
2292 }
2293
2294 /* Decrement the suspend count of an LWP. */
2295
2296 static int
2297 unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
2298 {
2299 struct thread_info *thread = (struct thread_info *) entry;
2300 struct lwp_info *lwp = get_thread_lwp (thread);
2301
2302 /* Ignore EXCEPT. */
2303 if (lwp == except)
2304 return 0;
2305
2306 lwp->suspended--;
2307
2308 gdb_assert (lwp->suspended >= 0);
2309 return 0;
2310 }
2311
2312 /* Decrement the suspend count of all LWPs, except EXCEPT, if non
2313 NULL. */
2314
2315 static void
2316 unsuspend_all_lwps (struct lwp_info *except)
2317 {
2318 find_inferior (&all_threads, unsuspend_one_lwp, except);
2319 }
2320
2321 static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
2322 static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
2323 void *data);
2324 static int lwp_running (struct inferior_list_entry *entry, void *data);
2325 static ptid_t linux_wait_1 (ptid_t ptid,
2326 struct target_waitstatus *ourstatus,
2327 int target_options);
2328
2329 /* Stabilize threads (move out of jump pads).
2330
2331 If a thread is midway collecting a fast tracepoint, we need to
2332 finish the collection and move it out of the jump pad before
2333 reporting the signal.
2334
2335 This avoids recursion while collecting (when a signal arrives
2336 midway, and the signal handler itself collects), which would trash
2337 the trace buffer. In case the user set a breakpoint in a signal
2338 handler, this avoids the backtrace showing the jump pad, etc..
2339 Most importantly, there are certain things we can't do safely if
2340 threads are stopped in a jump pad (or in its callee's). For
2341 example:
2342
2343 - starting a new trace run. A thread still collecting the
2344 previous run, could trash the trace buffer when resumed. The trace
2345 buffer control structures would have been reset but the thread had
2346 no way to tell. The thread could even midway memcpy'ing to the
2347 buffer, which would mean that when resumed, it would clobber the
2348 trace buffer that had been set for a new run.
2349
2350 - we can't rewrite/reuse the jump pads for new tracepoints
2351 safely. Say you do tstart while a thread is stopped midway while
2352 collecting. When the thread is later resumed, it finishes the
2353 collection, and returns to the jump pad, to execute the original
2354 instruction that was under the tracepoint jump at the time the
2355 older run had been started. If the jump pad had been rewritten
2356 since for something else in the new run, the thread would now
2357 execute the wrong / random instructions. */
2358
2359 static void
2360 linux_stabilize_threads (void)
2361 {
2362 struct thread_info *saved_thread;
2363 struct thread_info *thread_stuck;
2364
2365 thread_stuck
2366 = (struct thread_info *) find_inferior (&all_threads,
2367 stuck_in_jump_pad_callback,
2368 NULL);
2369 if (thread_stuck != NULL)
2370 {
2371 if (debug_threads)
2372 debug_printf ("can't stabilize, LWP %ld is stuck in jump pad\n",
2373 lwpid_of (thread_stuck));
2374 return;
2375 }
2376
2377 saved_thread = current_thread;
2378
2379 stabilizing_threads = 1;
2380
2381 /* Kick 'em all. */
2382 for_each_inferior (&all_threads, move_out_of_jump_pad_callback);
2383
2384 /* Loop until all are stopped out of the jump pads. */
2385 while (find_inferior (&all_threads, lwp_running, NULL) != NULL)
2386 {
2387 struct target_waitstatus ourstatus;
2388 struct lwp_info *lwp;
2389 int wstat;
2390
2391 /* Note that we go through the full wait even loop. While
2392 moving threads out of jump pad, we need to be able to step
2393 over internal breakpoints and such. */
2394 linux_wait_1 (minus_one_ptid, &ourstatus, 0);
2395
2396 if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
2397 {
2398 lwp = get_thread_lwp (current_thread);
2399
2400 /* Lock it. */
2401 lwp->suspended++;
2402
2403 if (ourstatus.value.sig != GDB_SIGNAL_0
2404 || current_thread->last_resume_kind == resume_stop)
2405 {
2406 wstat = W_STOPCODE (gdb_signal_to_host (ourstatus.value.sig));
2407 enqueue_one_deferred_signal (lwp, &wstat);
2408 }
2409 }
2410 }
2411
2412 find_inferior (&all_threads, unsuspend_one_lwp, NULL);
2413
2414 stabilizing_threads = 0;
2415
2416 current_thread = saved_thread;
2417
2418 if (debug_threads)
2419 {
2420 thread_stuck
2421 = (struct thread_info *) find_inferior (&all_threads,
2422 stuck_in_jump_pad_callback,
2423 NULL);
2424 if (thread_stuck != NULL)
2425 debug_printf ("couldn't stabilize, LWP %ld got stuck in jump pad\n",
2426 lwpid_of (thread_stuck));
2427 }
2428 }
2429
2430 static void async_file_mark (void);
2431
2432 /* Convenience function that is called when the kernel reports an
2433 event that is not passed out to GDB. */
2434
2435 static ptid_t
2436 ignore_event (struct target_waitstatus *ourstatus)
2437 {
2438 /* If we got an event, there may still be others, as a single
2439 SIGCHLD can indicate more than one child stopped. This forces
2440 another target_wait call. */
2441 async_file_mark ();
2442
2443 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2444 return null_ptid;
2445 }
2446
2447 /* Wait for process, returns status. */
2448
2449 static ptid_t
2450 linux_wait_1 (ptid_t ptid,
2451 struct target_waitstatus *ourstatus, int target_options)
2452 {
2453 int w;
2454 struct lwp_info *event_child;
2455 int options;
2456 int pid;
2457 int step_over_finished;
2458 int bp_explains_trap;
2459 int maybe_internal_trap;
2460 int report_to_gdb;
2461 int trace_event;
2462 int in_step_range;
2463
2464 if (debug_threads)
2465 {
2466 debug_enter ();
2467 debug_printf ("linux_wait_1: [%s]\n", target_pid_to_str (ptid));
2468 }
2469
2470 /* Translate generic target options into linux options. */
2471 options = __WALL;
2472 if (target_options & TARGET_WNOHANG)
2473 options |= WNOHANG;
2474
2475 bp_explains_trap = 0;
2476 trace_event = 0;
2477 in_step_range = 0;
2478 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2479
2480 if (ptid_equal (step_over_bkpt, null_ptid))
2481 pid = linux_wait_for_event (ptid, &w, options);
2482 else
2483 {
2484 if (debug_threads)
2485 debug_printf ("step_over_bkpt set [%s], doing a blocking wait\n",
2486 target_pid_to_str (step_over_bkpt));
2487 pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2488 }
2489
2490 if (pid == 0)
2491 {
2492 gdb_assert (target_options & TARGET_WNOHANG);
2493
2494 if (debug_threads)
2495 {
2496 debug_printf ("linux_wait_1 ret = null_ptid, "
2497 "TARGET_WAITKIND_IGNORE\n");
2498 debug_exit ();
2499 }
2500
2501 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2502 return null_ptid;
2503 }
2504 else if (pid == -1)
2505 {
2506 if (debug_threads)
2507 {
2508 debug_printf ("linux_wait_1 ret = null_ptid, "
2509 "TARGET_WAITKIND_NO_RESUMED\n");
2510 debug_exit ();
2511 }
2512
2513 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
2514 return null_ptid;
2515 }
2516
2517 event_child = get_thread_lwp (current_thread);
2518
2519 /* linux_wait_for_event only returns an exit status for the last
2520 child of a process. Report it. */
2521 if (WIFEXITED (w) || WIFSIGNALED (w))
2522 {
2523 if (WIFEXITED (w))
2524 {
2525 ourstatus->kind = TARGET_WAITKIND_EXITED;
2526 ourstatus->value.integer = WEXITSTATUS (w);
2527
2528 if (debug_threads)
2529 {
2530 debug_printf ("linux_wait_1 ret = %s, exited with "
2531 "retcode %d\n",
2532 target_pid_to_str (ptid_of (current_thread)),
2533 WEXITSTATUS (w));
2534 debug_exit ();
2535 }
2536 }
2537 else
2538 {
2539 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2540 ourstatus->value.sig = gdb_signal_from_host (WTERMSIG (w));
2541
2542 if (debug_threads)
2543 {
2544 debug_printf ("linux_wait_1 ret = %s, terminated with "
2545 "signal %d\n",
2546 target_pid_to_str (ptid_of (current_thread)),
2547 WTERMSIG (w));
2548 debug_exit ();
2549 }
2550 }
2551
2552 return ptid_of (current_thread);
2553 }
2554
2555 /* If this event was not handled before, and is not a SIGTRAP, we
2556 report it. SIGILL and SIGSEGV are also treated as traps in case
2557 a breakpoint is inserted at the current PC. If this target does
2558 not support internal breakpoints at all, we also report the
2559 SIGTRAP without further processing; it's of no concern to us. */
2560 maybe_internal_trap
2561 = (supports_breakpoints ()
2562 && (WSTOPSIG (w) == SIGTRAP
2563 || ((WSTOPSIG (w) == SIGILL
2564 || WSTOPSIG (w) == SIGSEGV)
2565 && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2566
2567 if (maybe_internal_trap)
2568 {
2569 /* Handle anything that requires bookkeeping before deciding to
2570 report the event or continue waiting. */
2571
2572 /* First check if we can explain the SIGTRAP with an internal
2573 breakpoint, or if we should possibly report the event to GDB.
2574 Do this before anything that may remove or insert a
2575 breakpoint. */
2576 bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2577
2578 /* We have a SIGTRAP, possibly a step-over dance has just
2579 finished. If so, tweak the state machine accordingly,
2580 reinsert breakpoints and delete any reinsert (software
2581 single-step) breakpoints. */
2582 step_over_finished = finish_step_over (event_child);
2583
2584 /* Now invoke the callbacks of any internal breakpoints there. */
2585 check_breakpoints (event_child->stop_pc);
2586
2587 /* Handle tracepoint data collecting. This may overflow the
2588 trace buffer, and cause a tracing stop, removing
2589 breakpoints. */
2590 trace_event = handle_tracepoints (event_child);
2591
2592 if (bp_explains_trap)
2593 {
2594 /* If we stepped or ran into an internal breakpoint, we've
2595 already handled it. So next time we resume (from this
2596 PC), we should step over it. */
2597 if (debug_threads)
2598 debug_printf ("Hit a gdbserver breakpoint.\n");
2599
2600 if (breakpoint_here (event_child->stop_pc))
2601 event_child->need_step_over = 1;
2602 }
2603 }
2604 else
2605 {
2606 /* We have some other signal, possibly a step-over dance was in
2607 progress, and it should be cancelled too. */
2608 step_over_finished = finish_step_over (event_child);
2609 }
2610
2611 /* We have all the data we need. Either report the event to GDB, or
2612 resume threads and keep waiting for more. */
2613
2614 /* If we're collecting a fast tracepoint, finish the collection and
2615 move out of the jump pad before delivering a signal. See
2616 linux_stabilize_threads. */
2617
2618 if (WIFSTOPPED (w)
2619 && WSTOPSIG (w) != SIGTRAP
2620 && supports_fast_tracepoints ()
2621 && agent_loaded_p ())
2622 {
2623 if (debug_threads)
2624 debug_printf ("Got signal %d for LWP %ld. Check if we need "
2625 "to defer or adjust it.\n",
2626 WSTOPSIG (w), lwpid_of (current_thread));
2627
2628 /* Allow debugging the jump pad itself. */
2629 if (current_thread->last_resume_kind != resume_step
2630 && maybe_move_out_of_jump_pad (event_child, &w))
2631 {
2632 enqueue_one_deferred_signal (event_child, &w);
2633
2634 if (debug_threads)
2635 debug_printf ("Signal %d for LWP %ld deferred (in jump pad)\n",
2636 WSTOPSIG (w), lwpid_of (current_thread));
2637
2638 linux_resume_one_lwp (event_child, 0, 0, NULL);
2639
2640 return ignore_event (ourstatus);
2641 }
2642 }
2643
2644 if (event_child->collecting_fast_tracepoint)
2645 {
2646 if (debug_threads)
2647 debug_printf ("LWP %ld was trying to move out of the jump pad (%d). "
2648 "Check if we're already there.\n",
2649 lwpid_of (current_thread),
2650 event_child->collecting_fast_tracepoint);
2651
2652 trace_event = 1;
2653
2654 event_child->collecting_fast_tracepoint
2655 = linux_fast_tracepoint_collecting (event_child, NULL);
2656
2657 if (event_child->collecting_fast_tracepoint != 1)
2658 {
2659 /* No longer need this breakpoint. */
2660 if (event_child->exit_jump_pad_bkpt != NULL)
2661 {
2662 if (debug_threads)
2663 debug_printf ("No longer need exit-jump-pad bkpt; removing it."
2664 "stopping all threads momentarily.\n");
2665
2666 /* Other running threads could hit this breakpoint.
2667 We don't handle moribund locations like GDB does,
2668 instead we always pause all threads when removing
2669 breakpoints, so that any step-over or
2670 decr_pc_after_break adjustment is always taken
2671 care of while the breakpoint is still
2672 inserted. */
2673 stop_all_lwps (1, event_child);
2674
2675 delete_breakpoint (event_child->exit_jump_pad_bkpt);
2676 event_child->exit_jump_pad_bkpt = NULL;
2677
2678 unstop_all_lwps (1, event_child);
2679
2680 gdb_assert (event_child->suspended >= 0);
2681 }
2682 }
2683
2684 if (event_child->collecting_fast_tracepoint == 0)
2685 {
2686 if (debug_threads)
2687 debug_printf ("fast tracepoint finished "
2688 "collecting successfully.\n");
2689
2690 /* We may have a deferred signal to report. */
2691 if (dequeue_one_deferred_signal (event_child, &w))
2692 {
2693 if (debug_threads)
2694 debug_printf ("dequeued one signal.\n");
2695 }
2696 else
2697 {
2698 if (debug_threads)
2699 debug_printf ("no deferred signals.\n");
2700
2701 if (stabilizing_threads)
2702 {
2703 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2704 ourstatus->value.sig = GDB_SIGNAL_0;
2705
2706 if (debug_threads)
2707 {
2708 debug_printf ("linux_wait_1 ret = %s, stopped "
2709 "while stabilizing threads\n",
2710 target_pid_to_str (ptid_of (current_thread)));
2711 debug_exit ();
2712 }
2713
2714 return ptid_of (current_thread);
2715 }
2716 }
2717 }
2718 }
2719
2720 /* Check whether GDB would be interested in this event. */
2721
2722 /* If GDB is not interested in this signal, don't stop other
2723 threads, and don't report it to GDB. Just resume the inferior
2724 right away. We do this for threading-related signals as well as
2725 any that GDB specifically requested we ignore. But never ignore
2726 SIGSTOP if we sent it ourselves, and do not ignore signals when
2727 stepping - they may require special handling to skip the signal
2728 handler. Also never ignore signals that could be caused by a
2729 breakpoint. */
2730 /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2731 thread library? */
2732 if (WIFSTOPPED (w)
2733 && current_thread->last_resume_kind != resume_step
2734 && (
2735 #if defined (USE_THREAD_DB) && !defined (__ANDROID__)
2736 (current_process ()->private->thread_db != NULL
2737 && (WSTOPSIG (w) == __SIGRTMIN
2738 || WSTOPSIG (w) == __SIGRTMIN + 1))
2739 ||
2740 #endif
2741 (pass_signals[gdb_signal_from_host (WSTOPSIG (w))]
2742 && !(WSTOPSIG (w) == SIGSTOP
2743 && current_thread->last_resume_kind == resume_stop)
2744 && !linux_wstatus_maybe_breakpoint (w))))
2745 {
2746 siginfo_t info, *info_p;
2747
2748 if (debug_threads)
2749 debug_printf ("Ignored signal %d for LWP %ld.\n",
2750 WSTOPSIG (w), lwpid_of (current_thread));
2751
2752 if (ptrace (PTRACE_GETSIGINFO, lwpid_of (current_thread),
2753 (PTRACE_TYPE_ARG3) 0, &info) == 0)
2754 info_p = &info;
2755 else
2756 info_p = NULL;
2757 linux_resume_one_lwp (event_child, event_child->stepping,
2758 WSTOPSIG (w), info_p);
2759 return ignore_event (ourstatus);
2760 }
2761
2762 /* Note that all addresses are always "out of the step range" when
2763 there's no range to begin with. */
2764 in_step_range = lwp_in_step_range (event_child);
2765
2766 /* If GDB wanted this thread to single step, and the thread is out
2767 of the step range, we always want to report the SIGTRAP, and let
2768 GDB handle it. Watchpoints should always be reported. So should
2769 signals we can't explain. A SIGTRAP we can't explain could be a
2770 GDB breakpoint --- we may or not support Z0 breakpoints. If we
2771 do, we're be able to handle GDB breakpoints on top of internal
2772 breakpoints, by handling the internal breakpoint and still
2773 reporting the event to GDB. If we don't, we're out of luck, GDB
2774 won't see the breakpoint hit. */
2775 report_to_gdb = (!maybe_internal_trap
2776 || (current_thread->last_resume_kind == resume_step
2777 && !in_step_range)
2778 || event_child->stop_reason == LWP_STOPPED_BY_WATCHPOINT
2779 || (!step_over_finished && !in_step_range
2780 && !bp_explains_trap && !trace_event)
2781 || (gdb_breakpoint_here (event_child->stop_pc)
2782 && gdb_condition_true_at_breakpoint (event_child->stop_pc)
2783 && gdb_no_commands_at_breakpoint (event_child->stop_pc)));
2784
2785 run_breakpoint_commands (event_child->stop_pc);
2786
2787 /* We found no reason GDB would want us to stop. We either hit one
2788 of our own breakpoints, or finished an internal step GDB
2789 shouldn't know about. */
2790 if (!report_to_gdb)
2791 {
2792 if (debug_threads)
2793 {
2794 if (bp_explains_trap)
2795 debug_printf ("Hit a gdbserver breakpoint.\n");
2796 if (step_over_finished)
2797 debug_printf ("Step-over finished.\n");
2798 if (trace_event)
2799 debug_printf ("Tracepoint event.\n");
2800 if (lwp_in_step_range (event_child))
2801 debug_printf ("Range stepping pc 0x%s [0x%s, 0x%s).\n",
2802 paddress (event_child->stop_pc),
2803 paddress (event_child->step_range_start),
2804 paddress (event_child->step_range_end));
2805 }
2806
2807 /* We're not reporting this breakpoint to GDB, so apply the
2808 decr_pc_after_break adjustment to the inferior's regcache
2809 ourselves. */
2810
2811 if (the_low_target.set_pc != NULL)
2812 {
2813 struct regcache *regcache
2814 = get_thread_regcache (current_thread, 1);
2815 (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2816 }
2817
2818 /* We may have finished stepping over a breakpoint. If so,
2819 we've stopped and suspended all LWPs momentarily except the
2820 stepping one. This is where we resume them all again. We're
2821 going to keep waiting, so use proceed, which handles stepping
2822 over the next breakpoint. */
2823 if (debug_threads)
2824 debug_printf ("proceeding all threads.\n");
2825
2826 if (step_over_finished)
2827 unsuspend_all_lwps (event_child);
2828
2829 proceed_all_lwps ();
2830 return ignore_event (ourstatus);
2831 }
2832
2833 if (debug_threads)
2834 {
2835 if (current_thread->last_resume_kind == resume_step)
2836 {
2837 if (event_child->step_range_start == event_child->step_range_end)
2838 debug_printf ("GDB wanted to single-step, reporting event.\n");
2839 else if (!lwp_in_step_range (event_child))
2840 debug_printf ("Out of step range, reporting event.\n");
2841 }
2842 if (event_child->stop_reason == LWP_STOPPED_BY_WATCHPOINT)
2843 debug_printf ("Stopped by watchpoint.\n");
2844 else if (gdb_breakpoint_here (event_child->stop_pc))
2845 debug_printf ("Stopped by GDB breakpoint.\n");
2846 if (debug_threads)
2847 debug_printf ("Hit a non-gdbserver trap event.\n");
2848 }
2849
2850 /* Alright, we're going to report a stop. */
2851
2852 if (!stabilizing_threads)
2853 {
2854 /* In all-stop, stop all threads. */
2855 if (!non_stop)
2856 stop_all_lwps (0, NULL);
2857
2858 /* If we're not waiting for a specific LWP, choose an event LWP
2859 from among those that have had events. Giving equal priority
2860 to all LWPs that have had events helps prevent
2861 starvation. */
2862 if (ptid_equal (ptid, minus_one_ptid))
2863 {
2864 event_child->status_pending_p = 1;
2865 event_child->status_pending = w;
2866
2867 select_event_lwp (&event_child);
2868
2869 /* current_thread and event_child must stay in sync. */
2870 current_thread = get_lwp_thread (event_child);
2871
2872 event_child->status_pending_p = 0;
2873 w = event_child->status_pending;
2874 }
2875
2876 if (step_over_finished)
2877 {
2878 if (!non_stop)
2879 {
2880 /* If we were doing a step-over, all other threads but
2881 the stepping one had been paused in start_step_over,
2882 with their suspend counts incremented. We don't want
2883 to do a full unstop/unpause, because we're in
2884 all-stop mode (so we want threads stopped), but we
2885 still need to unsuspend the other threads, to
2886 decrement their `suspended' count back. */
2887 unsuspend_all_lwps (event_child);
2888 }
2889 else
2890 {
2891 /* If we just finished a step-over, then all threads had
2892 been momentarily paused. In all-stop, that's fine,
2893 we want threads stopped by now anyway. In non-stop,
2894 we need to re-resume threads that GDB wanted to be
2895 running. */
2896 unstop_all_lwps (1, event_child);
2897 }
2898 }
2899
2900 /* Stabilize threads (move out of jump pads). */
2901 if (!non_stop)
2902 stabilize_threads ();
2903 }
2904 else
2905 {
2906 /* If we just finished a step-over, then all threads had been
2907 momentarily paused. In all-stop, that's fine, we want
2908 threads stopped by now anyway. In non-stop, we need to
2909 re-resume threads that GDB wanted to be running. */
2910 if (step_over_finished)
2911 unstop_all_lwps (1, event_child);
2912 }
2913
2914 ourstatus->kind = TARGET_WAITKIND_STOPPED;
2915
2916 /* Now that we've selected our final event LWP, un-adjust its PC if
2917 it was a software breakpoint. */
2918 if (event_child->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT)
2919 {
2920 int decr_pc = the_low_target.decr_pc_after_break;
2921
2922 if (decr_pc != 0)
2923 {
2924 struct regcache *regcache
2925 = get_thread_regcache (current_thread, 1);
2926 (*the_low_target.set_pc) (regcache, event_child->stop_pc + decr_pc);
2927 }
2928 }
2929
2930 if (current_thread->last_resume_kind == resume_stop
2931 && WSTOPSIG (w) == SIGSTOP)
2932 {
2933 /* A thread that has been requested to stop by GDB with vCont;t,
2934 and it stopped cleanly, so report as SIG0. The use of
2935 SIGSTOP is an implementation detail. */
2936 ourstatus->value.sig = GDB_SIGNAL_0;
2937 }
2938 else if (current_thread->last_resume_kind == resume_stop
2939 && WSTOPSIG (w) != SIGSTOP)
2940 {
2941 /* A thread that has been requested to stop by GDB with vCont;t,
2942 but, it stopped for other reasons. */
2943 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2944 }
2945 else
2946 {
2947 ourstatus->value.sig = gdb_signal_from_host (WSTOPSIG (w));
2948 }
2949
2950 gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2951
2952 if (debug_threads)
2953 {
2954 debug_printf ("linux_wait_1 ret = %s, %d, %d\n",
2955 target_pid_to_str (ptid_of (current_thread)),
2956 ourstatus->kind, ourstatus->value.sig);
2957 debug_exit ();
2958 }
2959
2960 return ptid_of (current_thread);
2961 }
2962
2963 /* Get rid of any pending event in the pipe. */
2964 static void
2965 async_file_flush (void)
2966 {
2967 int ret;
2968 char buf;
2969
2970 do
2971 ret = read (linux_event_pipe[0], &buf, 1);
2972 while (ret >= 0 || (ret == -1 && errno == EINTR));
2973 }
2974
2975 /* Put something in the pipe, so the event loop wakes up. */
2976 static void
2977 async_file_mark (void)
2978 {
2979 int ret;
2980
2981 async_file_flush ();
2982
2983 do
2984 ret = write (linux_event_pipe[1], "+", 1);
2985 while (ret == 0 || (ret == -1 && errno == EINTR));
2986
2987 /* Ignore EAGAIN. If the pipe is full, the event loop will already
2988 be awakened anyway. */
2989 }
2990
2991 static ptid_t
2992 linux_wait (ptid_t ptid,
2993 struct target_waitstatus *ourstatus, int target_options)
2994 {
2995 ptid_t event_ptid;
2996
2997 /* Flush the async file first. */
2998 if (target_is_async_p ())
2999 async_file_flush ();
3000
3001 do
3002 {
3003 event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
3004 }
3005 while ((target_options & TARGET_WNOHANG) == 0
3006 && ptid_equal (event_ptid, null_ptid)
3007 && ourstatus->kind == TARGET_WAITKIND_IGNORE);
3008
3009 /* If at least one stop was reported, there may be more. A single
3010 SIGCHLD can signal more than one child stop. */
3011 if (target_is_async_p ()
3012 && (target_options & TARGET_WNOHANG) != 0
3013 && !ptid_equal (event_ptid, null_ptid))
3014 async_file_mark ();
3015
3016 return event_ptid;
3017 }
3018
3019 /* Send a signal to an LWP. */
3020
3021 static int
3022 kill_lwp (unsigned long lwpid, int signo)
3023 {
3024 /* Use tkill, if possible, in case we are using nptl threads. If tkill
3025 fails, then we are not using nptl threads and we should be using kill. */
3026
3027 #ifdef __NR_tkill
3028 {
3029 static int tkill_failed;
3030
3031 if (!tkill_failed)
3032 {
3033 int ret;
3034
3035 errno = 0;
3036 ret = syscall (__NR_tkill, lwpid, signo);
3037 if (errno != ENOSYS)
3038 return ret;
3039 tkill_failed = 1;
3040 }
3041 }
3042 #endif
3043
3044 return kill (lwpid, signo);
3045 }
3046
3047 void
3048 linux_stop_lwp (struct lwp_info *lwp)
3049 {
3050 send_sigstop (lwp);
3051 }
3052
3053 static void
3054 send_sigstop (struct lwp_info *lwp)
3055 {
3056 int pid;
3057
3058 pid = lwpid_of (get_lwp_thread (lwp));
3059
3060 /* If we already have a pending stop signal for this process, don't
3061 send another. */
3062 if (lwp->stop_expected)
3063 {
3064 if (debug_threads)
3065 debug_printf ("Have pending sigstop for lwp %d\n", pid);
3066
3067 return;
3068 }
3069
3070 if (debug_threads)
3071 debug_printf ("Sending sigstop to lwp %d\n", pid);
3072
3073 lwp->stop_expected = 1;
3074 kill_lwp (pid, SIGSTOP);
3075 }
3076
3077 static int
3078 send_sigstop_callback (struct inferior_list_entry *entry, void *except)
3079 {
3080 struct thread_info *thread = (struct thread_info *) entry;
3081 struct lwp_info *lwp = get_thread_lwp (thread);
3082
3083 /* Ignore EXCEPT. */
3084 if (lwp == except)
3085 return 0;
3086
3087 if (lwp->stopped)
3088 return 0;
3089
3090 send_sigstop (lwp);
3091 return 0;
3092 }
3093
3094 /* Increment the suspend count of an LWP, and stop it, if not stopped
3095 yet. */
3096 static int
3097 suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
3098 void *except)
3099 {
3100 struct thread_info *thread = (struct thread_info *) entry;
3101 struct lwp_info *lwp = get_thread_lwp (thread);
3102
3103 /* Ignore EXCEPT. */
3104 if (lwp == except)
3105 return 0;
3106
3107 lwp->suspended++;
3108
3109 return send_sigstop_callback (entry, except);
3110 }
3111
3112 static void
3113 mark_lwp_dead (struct lwp_info *lwp, int wstat)
3114 {
3115 /* It's dead, really. */
3116 lwp->dead = 1;
3117
3118 /* Store the exit status for later. */
3119 lwp->status_pending_p = 1;
3120 lwp->status_pending = wstat;
3121
3122 /* Prevent trying to stop it. */
3123 lwp->stopped = 1;
3124
3125 /* No further stops are expected from a dead lwp. */
3126 lwp->stop_expected = 0;
3127 }
3128
3129 /* Wait for all children to stop for the SIGSTOPs we just queued. */
3130
3131 static void
3132 wait_for_sigstop (void)
3133 {
3134 struct thread_info *saved_thread;
3135 ptid_t saved_tid;
3136 int wstat;
3137 int ret;
3138
3139 saved_thread = current_thread;
3140 if (saved_thread != NULL)
3141 saved_tid = saved_thread->entry.id;
3142 else
3143 saved_tid = null_ptid; /* avoid bogus unused warning */
3144
3145 if (debug_threads)
3146 debug_printf ("wait_for_sigstop: pulling events\n");
3147
3148 /* Passing NULL_PTID as filter indicates we want all events to be
3149 left pending. Eventually this returns when there are no
3150 unwaited-for children left. */
3151 ret = linux_wait_for_event_filtered (minus_one_ptid, null_ptid,
3152 &wstat, __WALL);
3153 gdb_assert (ret == -1);
3154
3155 if (saved_thread == NULL || linux_thread_alive (saved_tid))
3156 current_thread = saved_thread;
3157 else
3158 {
3159 if (debug_threads)
3160 debug_printf ("Previously current thread died.\n");
3161
3162 if (non_stop)
3163 {
3164 /* We can't change the current inferior behind GDB's back,
3165 otherwise, a subsequent command may apply to the wrong
3166 process. */
3167 current_thread = NULL;
3168 }
3169 else
3170 {
3171 /* Set a valid thread as current. */
3172 set_desired_thread (0);
3173 }
3174 }
3175 }
3176
3177 /* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
3178 move it out, because we need to report the stop event to GDB. For
3179 example, if the user puts a breakpoint in the jump pad, it's
3180 because she wants to debug it. */
3181
3182 static int
3183 stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
3184 {
3185 struct thread_info *thread = (struct thread_info *) entry;
3186 struct lwp_info *lwp = get_thread_lwp (thread);
3187
3188 gdb_assert (lwp->suspended == 0);
3189 gdb_assert (lwp->stopped);
3190
3191 /* Allow debugging the jump pad, gdb_collect, etc.. */
3192 return (supports_fast_tracepoints ()
3193 && agent_loaded_p ()
3194 && (gdb_breakpoint_here (lwp->stop_pc)
3195 || lwp->stop_reason == LWP_STOPPED_BY_WATCHPOINT
3196 || thread->last_resume_kind == resume_step)
3197 && linux_fast_tracepoint_collecting (lwp, NULL));
3198 }
3199
3200 static void
3201 move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
3202 {
3203 struct thread_info *thread = (struct thread_info *) entry;
3204 struct lwp_info *lwp = get_thread_lwp (thread);
3205 int *wstat;
3206
3207 gdb_assert (lwp->suspended == 0);
3208 gdb_assert (lwp->stopped);
3209
3210 wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
3211
3212 /* Allow debugging the jump pad, gdb_collect, etc. */
3213 if (!gdb_breakpoint_here (lwp->stop_pc)
3214 && lwp->stop_reason != LWP_STOPPED_BY_WATCHPOINT
3215 && thread->last_resume_kind != resume_step
3216 && maybe_move_out_of_jump_pad (lwp, wstat))
3217 {
3218 if (debug_threads)
3219 debug_printf ("LWP %ld needs stabilizing (in jump pad)\n",
3220 lwpid_of (thread));
3221
3222 if (wstat)
3223 {
3224 lwp->status_pending_p = 0;
3225 enqueue_one_deferred_signal (lwp, wstat);
3226
3227 if (debug_threads)
3228 debug_printf ("Signal %d for LWP %ld deferred "
3229 "(in jump pad)\n",
3230 WSTOPSIG (*wstat), lwpid_of (thread));
3231 }
3232
3233 linux_resume_one_lwp (lwp, 0, 0, NULL);
3234 }
3235 else
3236 lwp->suspended++;
3237 }
3238
3239 static int
3240 lwp_running (struct inferior_list_entry *entry, void *data)
3241 {
3242 struct thread_info *thread = (struct thread_info *) entry;
3243 struct lwp_info *lwp = get_thread_lwp (thread);
3244
3245 if (lwp->dead)
3246 return 0;
3247 if (lwp->stopped)
3248 return 0;
3249 return 1;
3250 }
3251
3252 /* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
3253 If SUSPEND, then also increase the suspend count of every LWP,
3254 except EXCEPT. */
3255
3256 static void
3257 stop_all_lwps (int suspend, struct lwp_info *except)
3258 {
3259 /* Should not be called recursively. */
3260 gdb_assert (stopping_threads == NOT_STOPPING_THREADS);
3261
3262 if (debug_threads)
3263 {
3264 debug_enter ();
3265 debug_printf ("stop_all_lwps (%s, except=%s)\n",
3266 suspend ? "stop-and-suspend" : "stop",
3267 except != NULL
3268 ? target_pid_to_str (ptid_of (get_lwp_thread (except)))
3269 : "none");
3270 }
3271
3272 stopping_threads = (suspend
3273 ? STOPPING_AND_SUSPENDING_THREADS
3274 : STOPPING_THREADS);
3275
3276 if (suspend)
3277 find_inferior (&all_threads, suspend_and_send_sigstop_callback, except);
3278 else
3279 find_inferior (&all_threads, send_sigstop_callback, except);
3280 wait_for_sigstop ();
3281 stopping_threads = NOT_STOPPING_THREADS;
3282
3283 if (debug_threads)
3284 {
3285 debug_printf ("stop_all_lwps done, setting stopping_threads "
3286 "back to !stopping\n");
3287 debug_exit ();
3288 }
3289 }
3290
3291 /* Resume execution of the inferior process.
3292 If STEP is nonzero, single-step it.
3293 If SIGNAL is nonzero, give it that signal. */
3294
3295 static void
3296 linux_resume_one_lwp (struct lwp_info *lwp,
3297 int step, int signal, siginfo_t *info)
3298 {
3299 struct thread_info *thread = get_lwp_thread (lwp);
3300 struct thread_info *saved_thread;
3301 int fast_tp_collecting;
3302
3303 if (lwp->stopped == 0)
3304 return;
3305
3306 fast_tp_collecting = lwp->collecting_fast_tracepoint;
3307
3308 gdb_assert (!stabilizing_threads || fast_tp_collecting);
3309
3310 /* Cancel actions that rely on GDB not changing the PC (e.g., the
3311 user used the "jump" command, or "set $pc = foo"). */
3312 if (lwp->stop_pc != get_pc (lwp))
3313 {
3314 /* Collecting 'while-stepping' actions doesn't make sense
3315 anymore. */
3316 release_while_stepping_state_list (thread);
3317 }
3318
3319 /* If we have pending signals or status, and a new signal, enqueue the
3320 signal. Also enqueue the signal if we are waiting to reinsert a
3321 breakpoint; it will be picked up again below. */
3322 if (signal != 0
3323 && (lwp->status_pending_p
3324 || lwp->pending_signals != NULL
3325 || lwp->bp_reinsert != 0
3326 || fast_tp_collecting))
3327 {
3328 struct pending_signals *p_sig;
3329 p_sig = xmalloc (sizeof (*p_sig));
3330 p_sig->prev = lwp->pending_signals;
3331 p_sig->signal = signal;
3332 if (info == NULL)
3333 memset (&p_sig->info, 0, sizeof (siginfo_t));
3334 else
3335 memcpy (&p_sig->info, info, sizeof (siginfo_t));
3336 lwp->pending_signals = p_sig;
3337 }
3338
3339 if (lwp->status_pending_p)
3340 {
3341 if (debug_threads)
3342 debug_printf ("Not resuming lwp %ld (%s, signal %d, stop %s);"
3343 " has pending status\n",
3344 lwpid_of (thread), step ? "step" : "continue", signal,
3345 lwp->stop_expected ? "expected" : "not expected");
3346 return;
3347 }
3348
3349 saved_thread = current_thread;
3350 current_thread = thread;
3351
3352 if (debug_threads)
3353 debug_printf ("Resuming lwp %ld (%s, signal %d, stop %s)\n",
3354 lwpid_of (thread), step ? "step" : "continue", signal,
3355 lwp->stop_expected ? "expected" : "not expected");
3356
3357 /* This bit needs some thinking about. If we get a signal that
3358 we must report while a single-step reinsert is still pending,
3359 we often end up resuming the thread. It might be better to
3360 (ew) allow a stack of pending events; then we could be sure that
3361 the reinsert happened right away and not lose any signals.
3362
3363 Making this stack would also shrink the window in which breakpoints are
3364 uninserted (see comment in linux_wait_for_lwp) but not enough for
3365 complete correctness, so it won't solve that problem. It may be
3366 worthwhile just to solve this one, however. */
3367 if (lwp->bp_reinsert != 0)
3368 {
3369 if (debug_threads)
3370 debug_printf (" pending reinsert at 0x%s\n",
3371 paddress (lwp->bp_reinsert));
3372
3373 if (can_hardware_single_step ())
3374 {
3375 if (fast_tp_collecting == 0)
3376 {
3377 if (step == 0)
3378 fprintf (stderr, "BAD - reinserting but not stepping.\n");
3379 if (lwp->suspended)
3380 fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
3381 lwp->suspended);
3382 }
3383
3384 step = 1;
3385 }
3386
3387 /* Postpone any pending signal. It was enqueued above. */
3388 signal = 0;
3389 }
3390
3391 if (fast_tp_collecting == 1)
3392 {
3393 if (debug_threads)
3394 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3395 " (exit-jump-pad-bkpt)\n",
3396 lwpid_of (thread));
3397
3398 /* Postpone any pending signal. It was enqueued above. */
3399 signal = 0;
3400 }
3401 else if (fast_tp_collecting == 2)
3402 {
3403 if (debug_threads)
3404 debug_printf ("lwp %ld wants to get out of fast tracepoint jump pad"
3405 " single-stepping\n",
3406 lwpid_of (thread));
3407
3408 if (can_hardware_single_step ())
3409 step = 1;
3410 else
3411 {
3412 internal_error (__FILE__, __LINE__,
3413 "moving out of jump pad single-stepping"
3414 " not implemented on this target");
3415 }
3416
3417 /* Postpone any pending signal. It was enqueued above. */
3418 signal = 0;
3419 }
3420
3421 /* If we have while-stepping actions in this thread set it stepping.
3422 If we have a signal to deliver, it may or may not be set to
3423 SIG_IGN, we don't know. Assume so, and allow collecting
3424 while-stepping into a signal handler. A possible smart thing to
3425 do would be to set an internal breakpoint at the signal return
3426 address, continue, and carry on catching this while-stepping
3427 action only when that breakpoint is hit. A future
3428 enhancement. */
3429 if (thread->while_stepping != NULL
3430 && can_hardware_single_step ())
3431 {
3432 if (debug_threads)
3433 debug_printf ("lwp %ld has a while-stepping action -> forcing step.\n",
3434 lwpid_of (thread));
3435 step = 1;
3436 }
3437
3438 if (the_low_target.get_pc != NULL)
3439 {
3440 struct regcache *regcache = get_thread_regcache (current_thread, 1);
3441
3442 lwp->stop_pc = (*the_low_target.get_pc) (regcache);
3443
3444 if (debug_threads)
3445 {
3446 debug_printf (" %s from pc 0x%lx\n", step ? "step" : "continue",
3447 (long) lwp->stop_pc);
3448 }
3449 }
3450
3451 /* If we have pending signals, consume one unless we are trying to
3452 reinsert a breakpoint or we're trying to finish a fast tracepoint
3453 collect. */
3454 if (lwp->pending_signals != NULL
3455 && lwp->bp_reinsert == 0
3456 && fast_tp_collecting == 0)
3457 {
3458 struct pending_signals **p_sig;
3459
3460 p_sig = &lwp->pending_signals;
3461 while ((*p_sig)->prev != NULL)
3462 p_sig = &(*p_sig)->prev;
3463
3464 signal = (*p_sig)->signal;
3465 if ((*p_sig)->info.si_signo != 0)
3466 ptrace (PTRACE_SETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3467 &(*p_sig)->info);
3468
3469 free (*p_sig);
3470 *p_sig = NULL;
3471 }
3472
3473 if (the_low_target.prepare_to_resume != NULL)
3474 the_low_target.prepare_to_resume (lwp);
3475
3476 regcache_invalidate_thread (thread);
3477 errno = 0;
3478 lwp->stopped = 0;
3479 lwp->stop_reason = LWP_STOPPED_BY_NO_REASON;
3480 lwp->stepping = step;
3481 ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (thread),
3482 (PTRACE_TYPE_ARG3) 0,
3483 /* Coerce to a uintptr_t first to avoid potential gcc warning
3484 of coercing an 8 byte integer to a 4 byte pointer. */
3485 (PTRACE_TYPE_ARG4) (uintptr_t) signal);
3486
3487 current_thread = saved_thread;
3488 if (errno)
3489 {
3490 /* ESRCH from ptrace either means that the thread was already
3491 running (an error) or that it is gone (a race condition). If
3492 it's gone, we will get a notification the next time we wait,
3493 so we can ignore the error. We could differentiate these
3494 two, but it's tricky without waiting; the thread still exists
3495 as a zombie, so sending it signal 0 would succeed. So just
3496 ignore ESRCH. */
3497 if (errno == ESRCH)
3498 return;
3499
3500 perror_with_name ("ptrace");
3501 }
3502 }
3503
3504 struct thread_resume_array
3505 {
3506 struct thread_resume *resume;
3507 size_t n;
3508 };
3509
3510 /* This function is called once per thread via find_inferior.
3511 ARG is a pointer to a thread_resume_array struct.
3512 We look up the thread specified by ENTRY in ARG, and mark the thread
3513 with a pointer to the appropriate resume request.
3514
3515 This algorithm is O(threads * resume elements), but resume elements
3516 is small (and will remain small at least until GDB supports thread
3517 suspension). */
3518
3519 static int
3520 linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3521 {
3522 struct thread_info *thread = (struct thread_info *) entry;
3523 struct lwp_info *lwp = get_thread_lwp (thread);
3524 int ndx;
3525 struct thread_resume_array *r;
3526
3527 r = arg;
3528
3529 for (ndx = 0; ndx < r->n; ndx++)
3530 {
3531 ptid_t ptid = r->resume[ndx].thread;
3532 if (ptid_equal (ptid, minus_one_ptid)
3533 || ptid_equal (ptid, entry->id)
3534 /* Handle both 'pPID' and 'pPID.-1' as meaning 'all threads
3535 of PID'. */
3536 || (ptid_get_pid (ptid) == pid_of (thread)
3537 && (ptid_is_pid (ptid)
3538 || ptid_get_lwp (ptid) == -1)))
3539 {
3540 if (r->resume[ndx].kind == resume_stop
3541 && thread->last_resume_kind == resume_stop)
3542 {
3543 if (debug_threads)
3544 debug_printf ("already %s LWP %ld at GDB's request\n",
3545 (thread->last_status.kind
3546 == TARGET_WAITKIND_STOPPED)
3547 ? "stopped"
3548 : "stopping",
3549 lwpid_of (thread));
3550
3551 continue;
3552 }
3553
3554 lwp->resume = &r->resume[ndx];
3555 thread->last_resume_kind = lwp->resume->kind;
3556
3557 lwp->step_range_start = lwp->resume->step_range_start;
3558 lwp->step_range_end = lwp->resume->step_range_end;
3559
3560 /* If we had a deferred signal to report, dequeue one now.
3561 This can happen if LWP gets more than one signal while
3562 trying to get out of a jump pad. */
3563 if (lwp->stopped
3564 && !lwp->status_pending_p
3565 && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3566 {
3567 lwp->status_pending_p = 1;
3568
3569 if (debug_threads)
3570 debug_printf ("Dequeueing deferred signal %d for LWP %ld, "
3571 "leaving status pending.\n",
3572 WSTOPSIG (lwp->status_pending),
3573 lwpid_of (thread));
3574 }
3575
3576 return 0;
3577 }
3578 }
3579
3580 /* No resume action for this thread. */
3581 lwp->resume = NULL;
3582
3583 return 0;
3584 }
3585
3586 /* find_inferior callback for linux_resume.
3587 Set *FLAG_P if this lwp has an interesting status pending. */
3588
3589 static int
3590 resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3591 {
3592 struct thread_info *thread = (struct thread_info *) entry;
3593 struct lwp_info *lwp = get_thread_lwp (thread);
3594
3595 /* LWPs which will not be resumed are not interesting, because
3596 we might not wait for them next time through linux_wait. */
3597 if (lwp->resume == NULL)
3598 return 0;
3599
3600 if (thread_still_has_status_pending_p (thread))
3601 * (int *) flag_p = 1;
3602
3603 return 0;
3604 }
3605
3606 /* Return 1 if this lwp that GDB wants running is stopped at an
3607 internal breakpoint that we need to step over. It assumes that any
3608 required STOP_PC adjustment has already been propagated to the
3609 inferior's regcache. */
3610
3611 static int
3612 need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3613 {
3614 struct thread_info *thread = (struct thread_info *) entry;
3615 struct lwp_info *lwp = get_thread_lwp (thread);
3616 struct thread_info *saved_thread;
3617 CORE_ADDR pc;
3618
3619 /* LWPs which will not be resumed are not interesting, because we
3620 might not wait for them next time through linux_wait. */
3621
3622 if (!lwp->stopped)
3623 {
3624 if (debug_threads)
3625 debug_printf ("Need step over [LWP %ld]? Ignoring, not stopped\n",
3626 lwpid_of (thread));
3627 return 0;
3628 }
3629
3630 if (thread->last_resume_kind == resume_stop)
3631 {
3632 if (debug_threads)
3633 debug_printf ("Need step over [LWP %ld]? Ignoring, should remain"
3634 " stopped\n",
3635 lwpid_of (thread));
3636 return 0;
3637 }
3638
3639 gdb_assert (lwp->suspended >= 0);
3640
3641 if (lwp->suspended)
3642 {
3643 if (debug_threads)
3644 debug_printf ("Need step over [LWP %ld]? Ignoring, suspended\n",
3645 lwpid_of (thread));
3646 return 0;
3647 }
3648
3649 if (!lwp->need_step_over)
3650 {
3651 if (debug_threads)
3652 debug_printf ("Need step over [LWP %ld]? No\n", lwpid_of (thread));
3653 }
3654
3655 if (lwp->status_pending_p)
3656 {
3657 if (debug_threads)
3658 debug_printf ("Need step over [LWP %ld]? Ignoring, has pending"
3659 " status.\n",
3660 lwpid_of (thread));
3661 return 0;
3662 }
3663
3664 /* Note: PC, not STOP_PC. Either GDB has adjusted the PC already,
3665 or we have. */
3666 pc = get_pc (lwp);
3667
3668 /* If the PC has changed since we stopped, then don't do anything,
3669 and let the breakpoint/tracepoint be hit. This happens if, for
3670 instance, GDB handled the decr_pc_after_break subtraction itself,
3671 GDB is OOL stepping this thread, or the user has issued a "jump"
3672 command, or poked thread's registers herself. */
3673 if (pc != lwp->stop_pc)
3674 {
3675 if (debug_threads)
3676 debug_printf ("Need step over [LWP %ld]? Cancelling, PC was changed. "
3677 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3678 lwpid_of (thread),
3679 paddress (lwp->stop_pc), paddress (pc));
3680
3681 lwp->need_step_over = 0;
3682 return 0;
3683 }
3684
3685 saved_thread = current_thread;
3686 current_thread = thread;
3687
3688 /* We can only step over breakpoints we know about. */
3689 if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3690 {
3691 /* Don't step over a breakpoint that GDB expects to hit
3692 though. If the condition is being evaluated on the target's side
3693 and it evaluate to false, step over this breakpoint as well. */
3694 if (gdb_breakpoint_here (pc)
3695 && gdb_condition_true_at_breakpoint (pc)
3696 && gdb_no_commands_at_breakpoint (pc))
3697 {
3698 if (debug_threads)
3699 debug_printf ("Need step over [LWP %ld]? yes, but found"
3700 " GDB breakpoint at 0x%s; skipping step over\n",
3701 lwpid_of (thread), paddress (pc));
3702
3703 current_thread = saved_thread;
3704 return 0;
3705 }
3706 else
3707 {
3708 if (debug_threads)
3709 debug_printf ("Need step over [LWP %ld]? yes, "
3710 "found breakpoint at 0x%s\n",
3711 lwpid_of (thread), paddress (pc));
3712
3713 /* We've found an lwp that needs stepping over --- return 1 so
3714 that find_inferior stops looking. */
3715 current_thread = saved_thread;
3716
3717 /* If the step over is cancelled, this is set again. */
3718 lwp->need_step_over = 0;
3719 return 1;
3720 }
3721 }
3722
3723 current_thread = saved_thread;
3724
3725 if (debug_threads)
3726 debug_printf ("Need step over [LWP %ld]? No, no breakpoint found"
3727 " at 0x%s\n",
3728 lwpid_of (thread), paddress (pc));
3729
3730 return 0;
3731 }
3732
3733 /* Start a step-over operation on LWP. When LWP stopped at a
3734 breakpoint, to make progress, we need to remove the breakpoint out
3735 of the way. If we let other threads run while we do that, they may
3736 pass by the breakpoint location and miss hitting it. To avoid
3737 that, a step-over momentarily stops all threads while LWP is
3738 single-stepped while the breakpoint is temporarily uninserted from
3739 the inferior. When the single-step finishes, we reinsert the
3740 breakpoint, and let all threads that are supposed to be running,
3741 run again.
3742
3743 On targets that don't support hardware single-step, we don't
3744 currently support full software single-stepping. Instead, we only
3745 support stepping over the thread event breakpoint, by asking the
3746 low target where to place a reinsert breakpoint. Since this
3747 routine assumes the breakpoint being stepped over is a thread event
3748 breakpoint, it usually assumes the return address of the current
3749 function is a good enough place to set the reinsert breakpoint. */
3750
3751 static int
3752 start_step_over (struct lwp_info *lwp)
3753 {
3754 struct thread_info *thread = get_lwp_thread (lwp);
3755 struct thread_info *saved_thread;
3756 CORE_ADDR pc;
3757 int step;
3758
3759 if (debug_threads)
3760 debug_printf ("Starting step-over on LWP %ld. Stopping all threads\n",
3761 lwpid_of (thread));
3762
3763 stop_all_lwps (1, lwp);
3764 gdb_assert (lwp->suspended == 0);
3765
3766 if (debug_threads)
3767 debug_printf ("Done stopping all threads for step-over.\n");
3768
3769 /* Note, we should always reach here with an already adjusted PC,
3770 either by GDB (if we're resuming due to GDB's request), or by our
3771 caller, if we just finished handling an internal breakpoint GDB
3772 shouldn't care about. */
3773 pc = get_pc (lwp);
3774
3775 saved_thread = current_thread;
3776 current_thread = thread;
3777
3778 lwp->bp_reinsert = pc;
3779 uninsert_breakpoints_at (pc);
3780 uninsert_fast_tracepoint_jumps_at (pc);
3781
3782 if (can_hardware_single_step ())
3783 {
3784 step = 1;
3785 }
3786 else
3787 {
3788 CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3789 set_reinsert_breakpoint (raddr);
3790 step = 0;
3791 }
3792
3793 current_thread = saved_thread;
3794
3795 linux_resume_one_lwp (lwp, step, 0, NULL);
3796
3797 /* Require next event from this LWP. */
3798 step_over_bkpt = thread->entry.id;
3799 return 1;
3800 }
3801
3802 /* Finish a step-over. Reinsert the breakpoint we had uninserted in
3803 start_step_over, if still there, and delete any reinsert
3804 breakpoints we've set, on non hardware single-step targets. */
3805
3806 static int
3807 finish_step_over (struct lwp_info *lwp)
3808 {
3809 if (lwp->bp_reinsert != 0)
3810 {
3811 if (debug_threads)
3812 debug_printf ("Finished step over.\n");
3813
3814 /* Reinsert any breakpoint at LWP->BP_REINSERT. Note that there
3815 may be no breakpoint to reinsert there by now. */
3816 reinsert_breakpoints_at (lwp->bp_reinsert);
3817 reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3818
3819 lwp->bp_reinsert = 0;
3820
3821 /* Delete any software-single-step reinsert breakpoints. No
3822 longer needed. We don't have to worry about other threads
3823 hitting this trap, and later not being able to explain it,
3824 because we were stepping over a breakpoint, and we hold all
3825 threads but LWP stopped while doing that. */
3826 if (!can_hardware_single_step ())
3827 delete_reinsert_breakpoints ();
3828
3829 step_over_bkpt = null_ptid;
3830 return 1;
3831 }
3832 else
3833 return 0;
3834 }
3835
3836 /* This function is called once per thread. We check the thread's resume
3837 request, which will tell us whether to resume, step, or leave the thread
3838 stopped; and what signal, if any, it should be sent.
3839
3840 For threads which we aren't explicitly told otherwise, we preserve
3841 the stepping flag; this is used for stepping over gdbserver-placed
3842 breakpoints.
3843
3844 If pending_flags was set in any thread, we queue any needed
3845 signals, since we won't actually resume. We already have a pending
3846 event to report, so we don't need to preserve any step requests;
3847 they should be re-issued if necessary. */
3848
3849 static int
3850 linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3851 {
3852 struct thread_info *thread = (struct thread_info *) entry;
3853 struct lwp_info *lwp = get_thread_lwp (thread);
3854 int step;
3855 int leave_all_stopped = * (int *) arg;
3856 int leave_pending;
3857
3858 if (lwp->resume == NULL)
3859 return 0;
3860
3861 if (lwp->resume->kind == resume_stop)
3862 {
3863 if (debug_threads)
3864 debug_printf ("resume_stop request for LWP %ld\n", lwpid_of (thread));
3865
3866 if (!lwp->stopped)
3867 {
3868 if (debug_threads)
3869 debug_printf ("stopping LWP %ld\n", lwpid_of (thread));
3870
3871 /* Stop the thread, and wait for the event asynchronously,
3872 through the event loop. */
3873 send_sigstop (lwp);
3874 }
3875 else
3876 {
3877 if (debug_threads)
3878 debug_printf ("already stopped LWP %ld\n",
3879 lwpid_of (thread));
3880
3881 /* The LWP may have been stopped in an internal event that
3882 was not meant to be notified back to GDB (e.g., gdbserver
3883 breakpoint), so we should be reporting a stop event in
3884 this case too. */
3885
3886 /* If the thread already has a pending SIGSTOP, this is a
3887 no-op. Otherwise, something later will presumably resume
3888 the thread and this will cause it to cancel any pending
3889 operation, due to last_resume_kind == resume_stop. If
3890 the thread already has a pending status to report, we
3891 will still report it the next time we wait - see
3892 status_pending_p_callback. */
3893
3894 /* If we already have a pending signal to report, then
3895 there's no need to queue a SIGSTOP, as this means we're
3896 midway through moving the LWP out of the jumppad, and we
3897 will report the pending signal as soon as that is
3898 finished. */
3899 if (lwp->pending_signals_to_report == NULL)
3900 send_sigstop (lwp);
3901 }
3902
3903 /* For stop requests, we're done. */
3904 lwp->resume = NULL;
3905 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3906 return 0;
3907 }
3908
3909 /* If this thread which is about to be resumed has a pending status,
3910 then don't resume any threads - we can just report the pending
3911 status. Make sure to queue any signals that would otherwise be
3912 sent. In all-stop mode, we do this decision based on if *any*
3913 thread has a pending status. If there's a thread that needs the
3914 step-over-breakpoint dance, then don't resume any other thread
3915 but that particular one. */
3916 leave_pending = (lwp->status_pending_p || leave_all_stopped);
3917
3918 if (!leave_pending)
3919 {
3920 if (debug_threads)
3921 debug_printf ("resuming LWP %ld\n", lwpid_of (thread));
3922
3923 step = (lwp->resume->kind == resume_step);
3924 linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3925 }
3926 else
3927 {
3928 if (debug_threads)
3929 debug_printf ("leaving LWP %ld stopped\n", lwpid_of (thread));
3930
3931 /* If we have a new signal, enqueue the signal. */
3932 if (lwp->resume->sig != 0)
3933 {
3934 struct pending_signals *p_sig;
3935 p_sig = xmalloc (sizeof (*p_sig));
3936 p_sig->prev = lwp->pending_signals;
3937 p_sig->signal = lwp->resume->sig;
3938 memset (&p_sig->info, 0, sizeof (siginfo_t));
3939
3940 /* If this is the same signal we were previously stopped by,
3941 make sure to queue its siginfo. We can ignore the return
3942 value of ptrace; if it fails, we'll skip
3943 PTRACE_SETSIGINFO. */
3944 if (WIFSTOPPED (lwp->last_status)
3945 && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3946 ptrace (PTRACE_GETSIGINFO, lwpid_of (thread), (PTRACE_TYPE_ARG3) 0,
3947 &p_sig->info);
3948
3949 lwp->pending_signals = p_sig;
3950 }
3951 }
3952
3953 thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3954 lwp->resume = NULL;
3955 return 0;
3956 }
3957
3958 static void
3959 linux_resume (struct thread_resume *resume_info, size_t n)
3960 {
3961 struct thread_resume_array array = { resume_info, n };
3962 struct thread_info *need_step_over = NULL;
3963 int any_pending;
3964 int leave_all_stopped;
3965
3966 if (debug_threads)
3967 {
3968 debug_enter ();
3969 debug_printf ("linux_resume:\n");
3970 }
3971
3972 find_inferior (&all_threads, linux_set_resume_request, &array);
3973
3974 /* If there is a thread which would otherwise be resumed, which has
3975 a pending status, then don't resume any threads - we can just
3976 report the pending status. Make sure to queue any signals that
3977 would otherwise be sent. In non-stop mode, we'll apply this
3978 logic to each thread individually. We consume all pending events
3979 before considering to start a step-over (in all-stop). */
3980 any_pending = 0;
3981 if (!non_stop)
3982 find_inferior (&all_threads, resume_status_pending_p, &any_pending);
3983
3984 /* If there is a thread which would otherwise be resumed, which is
3985 stopped at a breakpoint that needs stepping over, then don't
3986 resume any threads - have it step over the breakpoint with all
3987 other threads stopped, then resume all threads again. Make sure
3988 to queue any signals that would otherwise be delivered or
3989 queued. */
3990 if (!any_pending && supports_breakpoints ())
3991 need_step_over
3992 = (struct thread_info *) find_inferior (&all_threads,
3993 need_step_over_p, NULL);
3994
3995 leave_all_stopped = (need_step_over != NULL || any_pending);
3996
3997 if (debug_threads)
3998 {
3999 if (need_step_over != NULL)
4000 debug_printf ("Not resuming all, need step over\n");
4001 else if (any_pending)
4002 debug_printf ("Not resuming, all-stop and found "
4003 "an LWP with pending status\n");
4004 else
4005 debug_printf ("Resuming, no pending status or step over needed\n");
4006 }
4007
4008 /* Even if we're leaving threads stopped, queue all signals we'd
4009 otherwise deliver. */
4010 find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
4011
4012 if (need_step_over)
4013 start_step_over (get_thread_lwp (need_step_over));
4014
4015 if (debug_threads)
4016 {
4017 debug_printf ("linux_resume done\n");
4018 debug_exit ();
4019 }
4020 }
4021
4022 /* This function is called once per thread. We check the thread's
4023 last resume request, which will tell us whether to resume, step, or
4024 leave the thread stopped. Any signal the client requested to be
4025 delivered has already been enqueued at this point.
4026
4027 If any thread that GDB wants running is stopped at an internal
4028 breakpoint that needs stepping over, we start a step-over operation
4029 on that particular thread, and leave all others stopped. */
4030
4031 static int
4032 proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4033 {
4034 struct thread_info *thread = (struct thread_info *) entry;
4035 struct lwp_info *lwp = get_thread_lwp (thread);
4036 int step;
4037
4038 if (lwp == except)
4039 return 0;
4040
4041 if (debug_threads)
4042 debug_printf ("proceed_one_lwp: lwp %ld\n", lwpid_of (thread));
4043
4044 if (!lwp->stopped)
4045 {
4046 if (debug_threads)
4047 debug_printf (" LWP %ld already running\n", lwpid_of (thread));
4048 return 0;
4049 }
4050
4051 if (thread->last_resume_kind == resume_stop
4052 && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
4053 {
4054 if (debug_threads)
4055 debug_printf (" client wants LWP to remain %ld stopped\n",
4056 lwpid_of (thread));
4057 return 0;
4058 }
4059
4060 if (lwp->status_pending_p)
4061 {
4062 if (debug_threads)
4063 debug_printf (" LWP %ld has pending status, leaving stopped\n",
4064 lwpid_of (thread));
4065 return 0;
4066 }
4067
4068 gdb_assert (lwp->suspended >= 0);
4069
4070 if (lwp->suspended)
4071 {
4072 if (debug_threads)
4073 debug_printf (" LWP %ld is suspended\n", lwpid_of (thread));
4074 return 0;
4075 }
4076
4077 if (thread->last_resume_kind == resume_stop
4078 && lwp->pending_signals_to_report == NULL
4079 && lwp->collecting_fast_tracepoint == 0)
4080 {
4081 /* We haven't reported this LWP as stopped yet (otherwise, the
4082 last_status.kind check above would catch it, and we wouldn't
4083 reach here. This LWP may have been momentarily paused by a
4084 stop_all_lwps call while handling for example, another LWP's
4085 step-over. In that case, the pending expected SIGSTOP signal
4086 that was queued at vCont;t handling time will have already
4087 been consumed by wait_for_sigstop, and so we need to requeue
4088 another one here. Note that if the LWP already has a SIGSTOP
4089 pending, this is a no-op. */
4090
4091 if (debug_threads)
4092 debug_printf ("Client wants LWP %ld to stop. "
4093 "Making sure it has a SIGSTOP pending\n",
4094 lwpid_of (thread));
4095
4096 send_sigstop (lwp);
4097 }
4098
4099 step = thread->last_resume_kind == resume_step;
4100 linux_resume_one_lwp (lwp, step, 0, NULL);
4101 return 0;
4102 }
4103
4104 static int
4105 unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
4106 {
4107 struct thread_info *thread = (struct thread_info *) entry;
4108 struct lwp_info *lwp = get_thread_lwp (thread);
4109
4110 if (lwp == except)
4111 return 0;
4112
4113 lwp->suspended--;
4114 gdb_assert (lwp->suspended >= 0);
4115
4116 return proceed_one_lwp (entry, except);
4117 }
4118
4119 /* When we finish a step-over, set threads running again. If there's
4120 another thread that may need a step-over, now's the time to start
4121 it. Eventually, we'll move all threads past their breakpoints. */
4122
4123 static void
4124 proceed_all_lwps (void)
4125 {
4126 struct thread_info *need_step_over;
4127
4128 /* If there is a thread which would otherwise be resumed, which is
4129 stopped at a breakpoint that needs stepping over, then don't
4130 resume any threads - have it step over the breakpoint with all
4131 other threads stopped, then resume all threads again. */
4132
4133 if (supports_breakpoints ())
4134 {
4135 need_step_over
4136 = (struct thread_info *) find_inferior (&all_threads,
4137 need_step_over_p, NULL);
4138
4139 if (need_step_over != NULL)
4140 {
4141 if (debug_threads)
4142 debug_printf ("proceed_all_lwps: found "
4143 "thread %ld needing a step-over\n",
4144 lwpid_of (need_step_over));
4145
4146 start_step_over (get_thread_lwp (need_step_over));
4147 return;
4148 }
4149 }
4150
4151 if (debug_threads)
4152 debug_printf ("Proceeding, no step-over needed\n");
4153
4154 find_inferior (&all_threads, proceed_one_lwp, NULL);
4155 }
4156
4157 /* Stopped LWPs that the client wanted to be running, that don't have
4158 pending statuses, are set to run again, except for EXCEPT, if not
4159 NULL. This undoes a stop_all_lwps call. */
4160
4161 static void
4162 unstop_all_lwps (int unsuspend, struct lwp_info *except)
4163 {
4164 if (debug_threads)
4165 {
4166 debug_enter ();
4167 if (except)
4168 debug_printf ("unstopping all lwps, except=(LWP %ld)\n",
4169 lwpid_of (get_lwp_thread (except)));
4170 else
4171 debug_printf ("unstopping all lwps\n");
4172 }
4173
4174 if (unsuspend)
4175 find_inferior (&all_threads, unsuspend_and_proceed_one_lwp, except);
4176 else
4177 find_inferior (&all_threads, proceed_one_lwp, except);
4178
4179 if (debug_threads)
4180 {
4181 debug_printf ("unstop_all_lwps done\n");
4182 debug_exit ();
4183 }
4184 }
4185
4186
4187 #ifdef HAVE_LINUX_REGSETS
4188
4189 #define use_linux_regsets 1
4190
4191 /* Returns true if REGSET has been disabled. */
4192
4193 static int
4194 regset_disabled (struct regsets_info *info, struct regset_info *regset)
4195 {
4196 return (info->disabled_regsets != NULL
4197 && info->disabled_regsets[regset - info->regsets]);
4198 }
4199
4200 /* Disable REGSET. */
4201
4202 static void
4203 disable_regset (struct regsets_info *info, struct regset_info *regset)
4204 {
4205 int dr_offset;
4206
4207 dr_offset = regset - info->regsets;
4208 if (info->disabled_regsets == NULL)
4209 info->disabled_regsets = xcalloc (1, info->num_regsets);
4210 info->disabled_regsets[dr_offset] = 1;
4211 }
4212
4213 static int
4214 regsets_fetch_inferior_registers (struct regsets_info *regsets_info,
4215 struct regcache *regcache)
4216 {
4217 struct regset_info *regset;
4218 int saw_general_regs = 0;
4219 int pid;
4220 struct iovec iov;
4221
4222 pid = lwpid_of (current_thread);
4223 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4224 {
4225 void *buf, *data;
4226 int nt_type, res;
4227
4228 if (regset->size == 0 || regset_disabled (regsets_info, regset))
4229 continue;
4230
4231 buf = xmalloc (regset->size);
4232
4233 nt_type = regset->nt_type;
4234 if (nt_type)
4235 {
4236 iov.iov_base = buf;
4237 iov.iov_len = regset->size;
4238 data = (void *) &iov;
4239 }
4240 else
4241 data = buf;
4242
4243 #ifndef __sparc__
4244 res = ptrace (regset->get_request, pid,
4245 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4246 #else
4247 res = ptrace (regset->get_request, pid, data, nt_type);
4248 #endif
4249 if (res < 0)
4250 {
4251 if (errno == EIO)
4252 {
4253 /* If we get EIO on a regset, do not try it again for
4254 this process mode. */
4255 disable_regset (regsets_info, regset);
4256 }
4257 else if (errno == ENODATA)
4258 {
4259 /* ENODATA may be returned if the regset is currently
4260 not "active". This can happen in normal operation,
4261 so suppress the warning in this case. */
4262 }
4263 else
4264 {
4265 char s[256];
4266 sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
4267 pid);
4268 perror (s);
4269 }
4270 }
4271 else
4272 {
4273 if (regset->type == GENERAL_REGS)
4274 saw_general_regs = 1;
4275 regset->store_function (regcache, buf);
4276 }
4277 free (buf);
4278 }
4279 if (saw_general_regs)
4280 return 0;
4281 else
4282 return 1;
4283 }
4284
4285 static int
4286 regsets_store_inferior_registers (struct regsets_info *regsets_info,
4287 struct regcache *regcache)
4288 {
4289 struct regset_info *regset;
4290 int saw_general_regs = 0;
4291 int pid;
4292 struct iovec iov;
4293
4294 pid = lwpid_of (current_thread);
4295 for (regset = regsets_info->regsets; regset->size >= 0; regset++)
4296 {
4297 void *buf, *data;
4298 int nt_type, res;
4299
4300 if (regset->size == 0 || regset_disabled (regsets_info, regset)
4301 || regset->fill_function == NULL)
4302 continue;
4303
4304 buf = xmalloc (regset->size);
4305
4306 /* First fill the buffer with the current register set contents,
4307 in case there are any items in the kernel's regset that are
4308 not in gdbserver's regcache. */
4309
4310 nt_type = regset->nt_type;
4311 if (nt_type)
4312 {
4313 iov.iov_base = buf;
4314 iov.iov_len = regset->size;
4315 data = (void *) &iov;
4316 }
4317 else
4318 data = buf;
4319
4320 #ifndef __sparc__
4321 res = ptrace (regset->get_request, pid,
4322 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4323 #else
4324 res = ptrace (regset->get_request, pid, data, nt_type);
4325 #endif
4326
4327 if (res == 0)
4328 {
4329 /* Then overlay our cached registers on that. */
4330 regset->fill_function (regcache, buf);
4331
4332 /* Only now do we write the register set. */
4333 #ifndef __sparc__
4334 res = ptrace (regset->set_request, pid,
4335 (PTRACE_TYPE_ARG3) (long) nt_type, data);
4336 #else
4337 res = ptrace (regset->set_request, pid, data, nt_type);
4338 #endif
4339 }
4340
4341 if (res < 0)
4342 {
4343 if (errno == EIO)
4344 {
4345 /* If we get EIO on a regset, do not try it again for
4346 this process mode. */
4347 disable_regset (regsets_info, regset);
4348 }
4349 else if (errno == ESRCH)
4350 {
4351 /* At this point, ESRCH should mean the process is
4352 already gone, in which case we simply ignore attempts
4353 to change its registers. See also the related
4354 comment in linux_resume_one_lwp. */
4355 free (buf);
4356 return 0;
4357 }
4358 else
4359 {
4360 perror ("Warning: ptrace(regsets_store_inferior_registers)");
4361 }
4362 }
4363 else if (regset->type == GENERAL_REGS)
4364 saw_general_regs = 1;
4365 free (buf);
4366 }
4367 if (saw_general_regs)
4368 return 0;
4369 else
4370 return 1;
4371 }
4372
4373 #else /* !HAVE_LINUX_REGSETS */
4374
4375 #define use_linux_regsets 0
4376 #define regsets_fetch_inferior_registers(regsets_info, regcache) 1
4377 #define regsets_store_inferior_registers(regsets_info, regcache) 1
4378
4379 #endif
4380
4381 /* Return 1 if register REGNO is supported by one of the regset ptrace
4382 calls or 0 if it has to be transferred individually. */
4383
4384 static int
4385 linux_register_in_regsets (const struct regs_info *regs_info, int regno)
4386 {
4387 unsigned char mask = 1 << (regno % 8);
4388 size_t index = regno / 8;
4389
4390 return (use_linux_regsets
4391 && (regs_info->regset_bitmap == NULL
4392 || (regs_info->regset_bitmap[index] & mask) != 0));
4393 }
4394
4395 #ifdef HAVE_LINUX_USRREGS
4396
4397 int
4398 register_addr (const struct usrregs_info *usrregs, int regnum)
4399 {
4400 int addr;
4401
4402 if (regnum < 0 || regnum >= usrregs->num_regs)
4403 error ("Invalid register number %d.", regnum);
4404
4405 addr = usrregs->regmap[regnum];
4406
4407 return addr;
4408 }
4409
4410 /* Fetch one register. */
4411 static void
4412 fetch_register (const struct usrregs_info *usrregs,
4413 struct regcache *regcache, int regno)
4414 {
4415 CORE_ADDR regaddr;
4416 int i, size;
4417 char *buf;
4418 int pid;
4419
4420 if (regno >= usrregs->num_regs)
4421 return;
4422 if ((*the_low_target.cannot_fetch_register) (regno))
4423 return;
4424
4425 regaddr = register_addr (usrregs, regno);
4426 if (regaddr == -1)
4427 return;
4428
4429 size = ((register_size (regcache->tdesc, regno)
4430 + sizeof (PTRACE_XFER_TYPE) - 1)
4431 & -sizeof (PTRACE_XFER_TYPE));
4432 buf = alloca (size);
4433
4434 pid = lwpid_of (current_thread);
4435 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4436 {
4437 errno = 0;
4438 *(PTRACE_XFER_TYPE *) (buf + i) =
4439 ptrace (PTRACE_PEEKUSER, pid,
4440 /* Coerce to a uintptr_t first to avoid potential gcc warning
4441 of coercing an 8 byte integer to a 4 byte pointer. */
4442 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr, (PTRACE_TYPE_ARG4) 0);
4443 regaddr += sizeof (PTRACE_XFER_TYPE);
4444 if (errno != 0)
4445 error ("reading register %d: %s", regno, strerror (errno));
4446 }
4447
4448 if (the_low_target.supply_ptrace_register)
4449 the_low_target.supply_ptrace_register (regcache, regno, buf);
4450 else
4451 supply_register (regcache, regno, buf);
4452 }
4453
4454 /* Store one register. */
4455 static void
4456 store_register (const struct usrregs_info *usrregs,
4457 struct regcache *regcache, int regno)
4458 {
4459 CORE_ADDR regaddr;
4460 int i, size;
4461 char *buf;
4462 int pid;
4463
4464 if (regno >= usrregs->num_regs)
4465 return;
4466 if ((*the_low_target.cannot_store_register) (regno))
4467 return;
4468
4469 regaddr = register_addr (usrregs, regno);
4470 if (regaddr == -1)
4471 return;
4472
4473 size = ((register_size (regcache->tdesc, regno)
4474 + sizeof (PTRACE_XFER_TYPE) - 1)
4475 & -sizeof (PTRACE_XFER_TYPE));
4476 buf = alloca (size);
4477 memset (buf, 0, size);
4478
4479 if (the_low_target.collect_ptrace_register)
4480 the_low_target.collect_ptrace_register (regcache, regno, buf);
4481 else
4482 collect_register (regcache, regno, buf);
4483
4484 pid = lwpid_of (current_thread);
4485 for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
4486 {
4487 errno = 0;
4488 ptrace (PTRACE_POKEUSER, pid,
4489 /* Coerce to a uintptr_t first to avoid potential gcc warning
4490 about coercing an 8 byte integer to a 4 byte pointer. */
4491 (PTRACE_TYPE_ARG3) (uintptr_t) regaddr,
4492 (PTRACE_TYPE_ARG4) *(PTRACE_XFER_TYPE *) (buf + i));
4493 if (errno != 0)
4494 {
4495 /* At this point, ESRCH should mean the process is
4496 already gone, in which case we simply ignore attempts
4497 to change its registers. See also the related
4498 comment in linux_resume_one_lwp. */
4499 if (errno == ESRCH)
4500 return;
4501
4502 if ((*the_low_target.cannot_store_register) (regno) == 0)
4503 error ("writing register %d: %s", regno, strerror (errno));
4504 }
4505 regaddr += sizeof (PTRACE_XFER_TYPE);
4506 }
4507 }
4508
4509 /* Fetch all registers, or just one, from the child process.
4510 If REGNO is -1, do this for all registers, skipping any that are
4511 assumed to have been retrieved by regsets_fetch_inferior_registers,
4512 unless ALL is non-zero.
4513 Otherwise, REGNO specifies which register (so we can save time). */
4514 static void
4515 usr_fetch_inferior_registers (const struct regs_info *regs_info,
4516 struct regcache *regcache, int regno, int all)
4517 {
4518 struct usrregs_info *usr = regs_info->usrregs;
4519
4520 if (regno == -1)
4521 {
4522 for (regno = 0; regno < usr->num_regs; regno++)
4523 if (all || !linux_register_in_regsets (regs_info, regno))
4524 fetch_register (usr, regcache, regno);
4525 }
4526 else
4527 fetch_register (usr, regcache, regno);
4528 }
4529
4530 /* Store our register values back into the inferior.
4531 If REGNO is -1, do this for all registers, skipping any that are
4532 assumed to have been saved by regsets_store_inferior_registers,
4533 unless ALL is non-zero.
4534 Otherwise, REGNO specifies which register (so we can save time). */
4535 static void
4536 usr_store_inferior_registers (const struct regs_info *regs_info,
4537 struct regcache *regcache, int regno, int all)
4538 {
4539 struct usrregs_info *usr = regs_info->usrregs;
4540
4541 if (regno == -1)
4542 {
4543 for (regno = 0; regno < usr->num_regs; regno++)
4544 if (all || !linux_register_in_regsets (regs_info, regno))
4545 store_register (usr, regcache, regno);
4546 }
4547 else
4548 store_register (usr, regcache, regno);
4549 }
4550
4551 #else /* !HAVE_LINUX_USRREGS */
4552
4553 #define usr_fetch_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4554 #define usr_store_inferior_registers(regs_info, regcache, regno, all) do {} while (0)
4555
4556 #endif
4557
4558
4559 void
4560 linux_fetch_registers (struct regcache *regcache, int regno)
4561 {
4562 int use_regsets;
4563 int all = 0;
4564 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4565
4566 if (regno == -1)
4567 {
4568 if (the_low_target.fetch_register != NULL
4569 && regs_info->usrregs != NULL)
4570 for (regno = 0; regno < regs_info->usrregs->num_regs; regno++)
4571 (*the_low_target.fetch_register) (regcache, regno);
4572
4573 all = regsets_fetch_inferior_registers (regs_info->regsets_info, regcache);
4574 if (regs_info->usrregs != NULL)
4575 usr_fetch_inferior_registers (regs_info, regcache, -1, all);
4576 }
4577 else
4578 {
4579 if (the_low_target.fetch_register != NULL
4580 && (*the_low_target.fetch_register) (regcache, regno))
4581 return;
4582
4583 use_regsets = linux_register_in_regsets (regs_info, regno);
4584 if (use_regsets)
4585 all = regsets_fetch_inferior_registers (regs_info->regsets_info,
4586 regcache);
4587 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4588 usr_fetch_inferior_registers (regs_info, regcache, regno, 1);
4589 }
4590 }
4591
4592 void
4593 linux_store_registers (struct regcache *regcache, int regno)
4594 {
4595 int use_regsets;
4596 int all = 0;
4597 const struct regs_info *regs_info = (*the_low_target.regs_info) ();
4598
4599 if (regno == -1)
4600 {
4601 all = regsets_store_inferior_registers (regs_info->regsets_info,
4602 regcache);
4603 if (regs_info->usrregs != NULL)
4604 usr_store_inferior_registers (regs_info, regcache, regno, all);
4605 }
4606 else
4607 {
4608 use_regsets = linux_register_in_regsets (regs_info, regno);
4609 if (use_regsets)
4610 all = regsets_store_inferior_registers (regs_info->regsets_info,
4611 regcache);
4612 if ((!use_regsets || all) && regs_info->usrregs != NULL)
4613 usr_store_inferior_registers (regs_info, regcache, regno, 1);
4614 }
4615 }
4616
4617
4618 /* Copy LEN bytes from inferior's memory starting at MEMADDR
4619 to debugger memory starting at MYADDR. */
4620
4621 static int
4622 linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
4623 {
4624 int pid = lwpid_of (current_thread);
4625 register PTRACE_XFER_TYPE *buffer;
4626 register CORE_ADDR addr;
4627 register int count;
4628 char filename[64];
4629 register int i;
4630 int ret;
4631 int fd;
4632
4633 /* Try using /proc. Don't bother for one word. */
4634 if (len >= 3 * sizeof (long))
4635 {
4636 int bytes;
4637
4638 /* We could keep this file open and cache it - possibly one per
4639 thread. That requires some juggling, but is even faster. */
4640 sprintf (filename, "/proc/%d/mem", pid);
4641 fd = open (filename, O_RDONLY | O_LARGEFILE);
4642 if (fd == -1)
4643 goto no_proc;
4644
4645 /* If pread64 is available, use it. It's faster if the kernel
4646 supports it (only one syscall), and it's 64-bit safe even on
4647 32-bit platforms (for instance, SPARC debugging a SPARC64
4648 application). */
4649 #ifdef HAVE_PREAD64
4650 bytes = pread64 (fd, myaddr, len, memaddr);
4651 #else
4652 bytes = -1;
4653 if (lseek (fd, memaddr, SEEK_SET) != -1)
4654 bytes = read (fd, myaddr, len);
4655 #endif
4656
4657 close (fd);
4658 if (bytes == len)
4659 return 0;
4660
4661 /* Some data was read, we'll try to get the rest with ptrace. */
4662 if (bytes > 0)
4663 {
4664 memaddr += bytes;
4665 myaddr += bytes;
4666 len -= bytes;
4667 }
4668 }
4669
4670 no_proc:
4671 /* Round starting address down to longword boundary. */
4672 addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4673 /* Round ending address up; get number of longwords that makes. */
4674 count = ((((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4675 / sizeof (PTRACE_XFER_TYPE));
4676 /* Allocate buffer of that many longwords. */
4677 buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4678
4679 /* Read all the longwords */
4680 errno = 0;
4681 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4682 {
4683 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4684 about coercing an 8 byte integer to a 4 byte pointer. */
4685 buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4686 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4687 (PTRACE_TYPE_ARG4) 0);
4688 if (errno)
4689 break;
4690 }
4691 ret = errno;
4692
4693 /* Copy appropriate bytes out of the buffer. */
4694 if (i > 0)
4695 {
4696 i *= sizeof (PTRACE_XFER_TYPE);
4697 i -= memaddr & (sizeof (PTRACE_XFER_TYPE) - 1);
4698 memcpy (myaddr,
4699 (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4700 i < len ? i : len);
4701 }
4702
4703 return ret;
4704 }
4705
4706 /* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4707 memory at MEMADDR. On failure (cannot write to the inferior)
4708 returns the value of errno. Always succeeds if LEN is zero. */
4709
4710 static int
4711 linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4712 {
4713 register int i;
4714 /* Round starting address down to longword boundary. */
4715 register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4716 /* Round ending address up; get number of longwords that makes. */
4717 register int count
4718 = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
4719 / sizeof (PTRACE_XFER_TYPE);
4720
4721 /* Allocate buffer of that many longwords. */
4722 register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *)
4723 alloca (count * sizeof (PTRACE_XFER_TYPE));
4724
4725 int pid = lwpid_of (current_thread);
4726
4727 if (len == 0)
4728 {
4729 /* Zero length write always succeeds. */
4730 return 0;
4731 }
4732
4733 if (debug_threads)
4734 {
4735 /* Dump up to four bytes. */
4736 unsigned int val = * (unsigned int *) myaddr;
4737 if (len == 1)
4738 val = val & 0xff;
4739 else if (len == 2)
4740 val = val & 0xffff;
4741 else if (len == 3)
4742 val = val & 0xffffff;
4743 debug_printf ("Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4744 val, (long)memaddr);
4745 }
4746
4747 /* Fill start and end extra bytes of buffer with existing memory data. */
4748
4749 errno = 0;
4750 /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4751 about coercing an 8 byte integer to a 4 byte pointer. */
4752 buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4753 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4754 (PTRACE_TYPE_ARG4) 0);
4755 if (errno)
4756 return errno;
4757
4758 if (count > 1)
4759 {
4760 errno = 0;
4761 buffer[count - 1]
4762 = ptrace (PTRACE_PEEKTEXT, pid,
4763 /* Coerce to a uintptr_t first to avoid potential gcc warning
4764 about coercing an 8 byte integer to a 4 byte pointer. */
4765 (PTRACE_TYPE_ARG3) (uintptr_t) (addr + (count - 1)
4766 * sizeof (PTRACE_XFER_TYPE)),
4767 (PTRACE_TYPE_ARG4) 0);
4768 if (errno)
4769 return errno;
4770 }
4771
4772 /* Copy data to be written over corresponding part of buffer. */
4773
4774 memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4775 myaddr, len);
4776
4777 /* Write the entire buffer. */
4778
4779 for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4780 {
4781 errno = 0;
4782 ptrace (PTRACE_POKETEXT, pid,
4783 /* Coerce to a uintptr_t first to avoid potential gcc warning
4784 about coercing an 8 byte integer to a 4 byte pointer. */
4785 (PTRACE_TYPE_ARG3) (uintptr_t) addr,
4786 (PTRACE_TYPE_ARG4) buffer[i]);
4787 if (errno)
4788 return errno;
4789 }
4790
4791 return 0;
4792 }
4793
4794 static void
4795 linux_look_up_symbols (void)
4796 {
4797 #ifdef USE_THREAD_DB
4798 struct process_info *proc = current_process ();
4799
4800 if (proc->private->thread_db != NULL)
4801 return;
4802
4803 /* If the kernel supports tracing clones, then we don't need to
4804 use the magic thread event breakpoint to learn about
4805 threads. */
4806 thread_db_init (!linux_supports_traceclone ());
4807 #endif
4808 }
4809
4810 static void
4811 linux_request_interrupt (void)
4812 {
4813 extern unsigned long signal_pid;
4814
4815 /* Send a SIGINT to the process group. This acts just like the user
4816 typed a ^C on the controlling terminal. */
4817 kill (-signal_pid, SIGINT);
4818 }
4819
4820 /* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4821 to debugger memory starting at MYADDR. */
4822
4823 static int
4824 linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4825 {
4826 char filename[PATH_MAX];
4827 int fd, n;
4828 int pid = lwpid_of (current_thread);
4829
4830 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4831
4832 fd = open (filename, O_RDONLY);
4833 if (fd < 0)
4834 return -1;
4835
4836 if (offset != (CORE_ADDR) 0
4837 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4838 n = -1;
4839 else
4840 n = read (fd, myaddr, len);
4841
4842 close (fd);
4843
4844 return n;
4845 }
4846
4847 /* These breakpoint and watchpoint related wrapper functions simply
4848 pass on the function call if the target has registered a
4849 corresponding function. */
4850
4851 static int
4852 linux_supports_z_point_type (char z_type)
4853 {
4854 return (the_low_target.supports_z_point_type != NULL
4855 && the_low_target.supports_z_point_type (z_type));
4856 }
4857
4858 static int
4859 linux_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
4860 int size, struct raw_breakpoint *bp)
4861 {
4862 if (the_low_target.insert_point != NULL)
4863 return the_low_target.insert_point (type, addr, size, bp);
4864 else
4865 /* Unsupported (see target.h). */
4866 return 1;
4867 }
4868
4869 static int
4870 linux_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
4871 int size, struct raw_breakpoint *bp)
4872 {
4873 if (the_low_target.remove_point != NULL)
4874 return the_low_target.remove_point (type, addr, size, bp);
4875 else
4876 /* Unsupported (see target.h). */
4877 return 1;
4878 }
4879
4880 static int
4881 linux_stopped_by_watchpoint (void)
4882 {
4883 struct lwp_info *lwp = get_thread_lwp (current_thread);
4884
4885 return lwp->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
4886 }
4887
4888 static CORE_ADDR
4889 linux_stopped_data_address (void)
4890 {
4891 struct lwp_info *lwp = get_thread_lwp (current_thread);
4892
4893 return lwp->stopped_data_address;
4894 }
4895
4896 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
4897 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
4898 && defined(PT_TEXT_END_ADDR)
4899
4900 /* This is only used for targets that define PT_TEXT_ADDR,
4901 PT_DATA_ADDR and PT_TEXT_END_ADDR. If those are not defined, supposedly
4902 the target has different ways of acquiring this information, like
4903 loadmaps. */
4904
4905 /* Under uClinux, programs are loaded at non-zero offsets, which we need
4906 to tell gdb about. */
4907
4908 static int
4909 linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4910 {
4911 unsigned long text, text_end, data;
4912 int pid = lwpid_of (get_thread_lwp (current_thread));
4913
4914 errno = 0;
4915
4916 text = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_ADDR,
4917 (PTRACE_TYPE_ARG4) 0);
4918 text_end = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_TEXT_END_ADDR,
4919 (PTRACE_TYPE_ARG4) 0);
4920 data = ptrace (PTRACE_PEEKUSER, pid, (PTRACE_TYPE_ARG3) PT_DATA_ADDR,
4921 (PTRACE_TYPE_ARG4) 0);
4922
4923 if (errno == 0)
4924 {
4925 /* Both text and data offsets produced at compile-time (and so
4926 used by gdb) are relative to the beginning of the program,
4927 with the data segment immediately following the text segment.
4928 However, the actual runtime layout in memory may put the data
4929 somewhere else, so when we send gdb a data base-address, we
4930 use the real data base address and subtract the compile-time
4931 data base-address from it (which is just the length of the
4932 text segment). BSS immediately follows data in both
4933 cases. */
4934 *text_p = text;
4935 *data_p = data - (text_end - text);
4936
4937 return 1;
4938 }
4939 return 0;
4940 }
4941 #endif
4942
4943 static int
4944 linux_qxfer_osdata (const char *annex,
4945 unsigned char *readbuf, unsigned const char *writebuf,
4946 CORE_ADDR offset, int len)
4947 {
4948 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4949 }
4950
4951 /* Convert a native/host siginfo object, into/from the siginfo in the
4952 layout of the inferiors' architecture. */
4953
4954 static void
4955 siginfo_fixup (siginfo_t *siginfo, void *inf_siginfo, int direction)
4956 {
4957 int done = 0;
4958
4959 if (the_low_target.siginfo_fixup != NULL)
4960 done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4961
4962 /* If there was no callback, or the callback didn't do anything,
4963 then just do a straight memcpy. */
4964 if (!done)
4965 {
4966 if (direction == 1)
4967 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4968 else
4969 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4970 }
4971 }
4972
4973 static int
4974 linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4975 unsigned const char *writebuf, CORE_ADDR offset, int len)
4976 {
4977 int pid;
4978 siginfo_t siginfo;
4979 char inf_siginfo[sizeof (siginfo_t)];
4980
4981 if (current_thread == NULL)
4982 return -1;
4983
4984 pid = lwpid_of (current_thread);
4985
4986 if (debug_threads)
4987 debug_printf ("%s siginfo for lwp %d.\n",
4988 readbuf != NULL ? "Reading" : "Writing",
4989 pid);
4990
4991 if (offset >= sizeof (siginfo))
4992 return -1;
4993
4994 if (ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
4995 return -1;
4996
4997 /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4998 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4999 inferior with a 64-bit GDBSERVER should look the same as debugging it
5000 with a 32-bit GDBSERVER, we need to convert it. */
5001 siginfo_fixup (&siginfo, inf_siginfo, 0);
5002
5003 if (offset + len > sizeof (siginfo))
5004 len = sizeof (siginfo) - offset;
5005
5006 if (readbuf != NULL)
5007 memcpy (readbuf, inf_siginfo + offset, len);
5008 else
5009 {
5010 memcpy (inf_siginfo + offset, writebuf, len);
5011
5012 /* Convert back to ptrace layout before flushing it out. */
5013 siginfo_fixup (&siginfo, inf_siginfo, 1);
5014
5015 if (ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo) != 0)
5016 return -1;
5017 }
5018
5019 return len;
5020 }
5021
5022 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5023 so we notice when children change state; as the handler for the
5024 sigsuspend in my_waitpid. */
5025
5026 static void
5027 sigchld_handler (int signo)
5028 {
5029 int old_errno = errno;
5030
5031 if (debug_threads)
5032 {
5033 do
5034 {
5035 /* fprintf is not async-signal-safe, so call write
5036 directly. */
5037 if (write (2, "sigchld_handler\n",
5038 sizeof ("sigchld_handler\n") - 1) < 0)
5039 break; /* just ignore */
5040 } while (0);
5041 }
5042
5043 if (target_is_async_p ())
5044 async_file_mark (); /* trigger a linux_wait */
5045
5046 errno = old_errno;
5047 }
5048
5049 static int
5050 linux_supports_non_stop (void)
5051 {
5052 return 1;
5053 }
5054
5055 static int
5056 linux_async (int enable)
5057 {
5058 int previous = target_is_async_p ();
5059
5060 if (debug_threads)
5061 debug_printf ("linux_async (%d), previous=%d\n",
5062 enable, previous);
5063
5064 if (previous != enable)
5065 {
5066 sigset_t mask;
5067 sigemptyset (&mask);
5068 sigaddset (&mask, SIGCHLD);
5069
5070 sigprocmask (SIG_BLOCK, &mask, NULL);
5071
5072 if (enable)
5073 {
5074 if (pipe (linux_event_pipe) == -1)
5075 {
5076 linux_event_pipe[0] = -1;
5077 linux_event_pipe[1] = -1;
5078 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5079
5080 warning ("creating event pipe failed.");
5081 return previous;
5082 }
5083
5084 fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
5085 fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
5086
5087 /* Register the event loop handler. */
5088 add_file_handler (linux_event_pipe[0],
5089 handle_target_event, NULL);
5090
5091 /* Always trigger a linux_wait. */
5092 async_file_mark ();
5093 }
5094 else
5095 {
5096 delete_file_handler (linux_event_pipe[0]);
5097
5098 close (linux_event_pipe[0]);
5099 close (linux_event_pipe[1]);
5100 linux_event_pipe[0] = -1;
5101 linux_event_pipe[1] = -1;
5102 }
5103
5104 sigprocmask (SIG_UNBLOCK, &mask, NULL);
5105 }
5106
5107 return previous;
5108 }
5109
5110 static int
5111 linux_start_non_stop (int nonstop)
5112 {
5113 /* Register or unregister from event-loop accordingly. */
5114 linux_async (nonstop);
5115
5116 if (target_is_async_p () != (nonstop != 0))
5117 return -1;
5118
5119 return 0;
5120 }
5121
5122 static int
5123 linux_supports_multi_process (void)
5124 {
5125 return 1;
5126 }
5127
5128 static int
5129 linux_supports_disable_randomization (void)
5130 {
5131 #ifdef HAVE_PERSONALITY
5132 return 1;
5133 #else
5134 return 0;
5135 #endif
5136 }
5137
5138 static int
5139 linux_supports_agent (void)
5140 {
5141 return 1;
5142 }
5143
5144 static int
5145 linux_supports_range_stepping (void)
5146 {
5147 if (*the_low_target.supports_range_stepping == NULL)
5148 return 0;
5149
5150 return (*the_low_target.supports_range_stepping) ();
5151 }
5152
5153 /* Enumerate spufs IDs for process PID. */
5154 static int
5155 spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
5156 {
5157 int pos = 0;
5158 int written = 0;
5159 char path[128];
5160 DIR *dir;
5161 struct dirent *entry;
5162
5163 sprintf (path, "/proc/%ld/fd", pid);
5164 dir = opendir (path);
5165 if (!dir)
5166 return -1;
5167
5168 rewinddir (dir);
5169 while ((entry = readdir (dir)) != NULL)
5170 {
5171 struct stat st;
5172 struct statfs stfs;
5173 int fd;
5174
5175 fd = atoi (entry->d_name);
5176 if (!fd)
5177 continue;
5178
5179 sprintf (path, "/proc/%ld/fd/%d", pid, fd);
5180 if (stat (path, &st) != 0)
5181 continue;
5182 if (!S_ISDIR (st.st_mode))
5183 continue;
5184
5185 if (statfs (path, &stfs) != 0)
5186 continue;
5187 if (stfs.f_type != SPUFS_MAGIC)
5188 continue;
5189
5190 if (pos >= offset && pos + 4 <= offset + len)
5191 {
5192 *(unsigned int *)(buf + pos - offset) = fd;
5193 written += 4;
5194 }
5195 pos += 4;
5196 }
5197
5198 closedir (dir);
5199 return written;
5200 }
5201
5202 /* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
5203 object type, using the /proc file system. */
5204 static int
5205 linux_qxfer_spu (const char *annex, unsigned char *readbuf,
5206 unsigned const char *writebuf,
5207 CORE_ADDR offset, int len)
5208 {
5209 long pid = lwpid_of (current_thread);
5210 char buf[128];
5211 int fd = 0;
5212 int ret = 0;
5213
5214 if (!writebuf && !readbuf)
5215 return -1;
5216
5217 if (!*annex)
5218 {
5219 if (!readbuf)
5220 return -1;
5221 else
5222 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5223 }
5224
5225 sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
5226 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5227 if (fd <= 0)
5228 return -1;
5229
5230 if (offset != 0
5231 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5232 {
5233 close (fd);
5234 return 0;
5235 }
5236
5237 if (writebuf)
5238 ret = write (fd, writebuf, (size_t) len);
5239 else
5240 ret = read (fd, readbuf, (size_t) len);
5241
5242 close (fd);
5243 return ret;
5244 }
5245
5246 #if defined PT_GETDSBT || defined PTRACE_GETFDPIC
5247 struct target_loadseg
5248 {
5249 /* Core address to which the segment is mapped. */
5250 Elf32_Addr addr;
5251 /* VMA recorded in the program header. */
5252 Elf32_Addr p_vaddr;
5253 /* Size of this segment in memory. */
5254 Elf32_Word p_memsz;
5255 };
5256
5257 # if defined PT_GETDSBT
5258 struct target_loadmap
5259 {
5260 /* Protocol version number, must be zero. */
5261 Elf32_Word version;
5262 /* Pointer to the DSBT table, its size, and the DSBT index. */
5263 unsigned *dsbt_table;
5264 unsigned dsbt_size, dsbt_index;
5265 /* Number of segments in this map. */
5266 Elf32_Word nsegs;
5267 /* The actual memory map. */
5268 struct target_loadseg segs[/*nsegs*/];
5269 };
5270 # define LINUX_LOADMAP PT_GETDSBT
5271 # define LINUX_LOADMAP_EXEC PTRACE_GETDSBT_EXEC
5272 # define LINUX_LOADMAP_INTERP PTRACE_GETDSBT_INTERP
5273 # else
5274 struct target_loadmap
5275 {
5276 /* Protocol version number, must be zero. */
5277 Elf32_Half version;
5278 /* Number of segments in this map. */
5279 Elf32_Half nsegs;
5280 /* The actual memory map. */
5281 struct target_loadseg segs[/*nsegs*/];
5282 };
5283 # define LINUX_LOADMAP PTRACE_GETFDPIC
5284 # define LINUX_LOADMAP_EXEC PTRACE_GETFDPIC_EXEC
5285 # define LINUX_LOADMAP_INTERP PTRACE_GETFDPIC_INTERP
5286 # endif
5287
5288 static int
5289 linux_read_loadmap (const char *annex, CORE_ADDR offset,
5290 unsigned char *myaddr, unsigned int len)
5291 {
5292 int pid = lwpid_of (current_thread);
5293 int addr = -1;
5294 struct target_loadmap *data = NULL;
5295 unsigned int actual_length, copy_length;
5296
5297 if (strcmp (annex, "exec") == 0)
5298 addr = (int) LINUX_LOADMAP_EXEC;
5299 else if (strcmp (annex, "interp") == 0)
5300 addr = (int) LINUX_LOADMAP_INTERP;
5301 else
5302 return -1;
5303
5304 if (ptrace (LINUX_LOADMAP, pid, addr, &data) != 0)
5305 return -1;
5306
5307 if (data == NULL)
5308 return -1;
5309
5310 actual_length = sizeof (struct target_loadmap)
5311 + sizeof (struct target_loadseg) * data->nsegs;
5312
5313 if (offset < 0 || offset > actual_length)
5314 return -1;
5315
5316 copy_length = actual_length - offset < len ? actual_length - offset : len;
5317 memcpy (myaddr, (char *) data + offset, copy_length);
5318 return copy_length;
5319 }
5320 #else
5321 # define linux_read_loadmap NULL
5322 #endif /* defined PT_GETDSBT || defined PTRACE_GETFDPIC */
5323
5324 static void
5325 linux_process_qsupported (const char *query)
5326 {
5327 if (the_low_target.process_qsupported != NULL)
5328 the_low_target.process_qsupported (query);
5329 }
5330
5331 static int
5332 linux_supports_tracepoints (void)
5333 {
5334 if (*the_low_target.supports_tracepoints == NULL)
5335 return 0;
5336
5337 return (*the_low_target.supports_tracepoints) ();
5338 }
5339
5340 static CORE_ADDR
5341 linux_read_pc (struct regcache *regcache)
5342 {
5343 if (the_low_target.get_pc == NULL)
5344 return 0;
5345
5346 return (*the_low_target.get_pc) (regcache);
5347 }
5348
5349 static void
5350 linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5351 {
5352 gdb_assert (the_low_target.set_pc != NULL);
5353
5354 (*the_low_target.set_pc) (regcache, pc);
5355 }
5356
5357 static int
5358 linux_thread_stopped (struct thread_info *thread)
5359 {
5360 return get_thread_lwp (thread)->stopped;
5361 }
5362
5363 /* This exposes stop-all-threads functionality to other modules. */
5364
5365 static void
5366 linux_pause_all (int freeze)
5367 {
5368 stop_all_lwps (freeze, NULL);
5369 }
5370
5371 /* This exposes unstop-all-threads functionality to other gdbserver
5372 modules. */
5373
5374 static void
5375 linux_unpause_all (int unfreeze)
5376 {
5377 unstop_all_lwps (unfreeze, NULL);
5378 }
5379
5380 static int
5381 linux_prepare_to_access_memory (void)
5382 {
5383 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5384 running LWP. */
5385 if (non_stop)
5386 linux_pause_all (1);
5387 return 0;
5388 }
5389
5390 static void
5391 linux_done_accessing_memory (void)
5392 {
5393 /* Neither ptrace nor /proc/PID/mem allow accessing memory through a
5394 running LWP. */
5395 if (non_stop)
5396 linux_unpause_all (1);
5397 }
5398
5399 static int
5400 linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5401 CORE_ADDR collector,
5402 CORE_ADDR lockaddr,
5403 ULONGEST orig_size,
5404 CORE_ADDR *jump_entry,
5405 CORE_ADDR *trampoline,
5406 ULONGEST *trampoline_size,
5407 unsigned char *jjump_pad_insn,
5408 ULONGEST *jjump_pad_insn_size,
5409 CORE_ADDR *adjusted_insn_addr,
5410 CORE_ADDR *adjusted_insn_addr_end,
5411 char *err)
5412 {
5413 return (*the_low_target.install_fast_tracepoint_jump_pad)
5414 (tpoint, tpaddr, collector, lockaddr, orig_size,
5415 jump_entry, trampoline, trampoline_size,
5416 jjump_pad_insn, jjump_pad_insn_size,
5417 adjusted_insn_addr, adjusted_insn_addr_end,
5418 err);
5419 }
5420
5421 static struct emit_ops *
5422 linux_emit_ops (void)
5423 {
5424 if (the_low_target.emit_ops != NULL)
5425 return (*the_low_target.emit_ops) ();
5426 else
5427 return NULL;
5428 }
5429
5430 static int
5431 linux_get_min_fast_tracepoint_insn_len (void)
5432 {
5433 return (*the_low_target.get_min_fast_tracepoint_insn_len) ();
5434 }
5435
5436 /* Extract &phdr and num_phdr in the inferior. Return 0 on success. */
5437
5438 static int
5439 get_phdr_phnum_from_proc_auxv (const int pid, const int is_elf64,
5440 CORE_ADDR *phdr_memaddr, int *num_phdr)
5441 {
5442 char filename[PATH_MAX];
5443 int fd;
5444 const int auxv_size = is_elf64
5445 ? sizeof (Elf64_auxv_t) : sizeof (Elf32_auxv_t);
5446 char buf[sizeof (Elf64_auxv_t)]; /* The larger of the two. */
5447
5448 xsnprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
5449
5450 fd = open (filename, O_RDONLY);
5451 if (fd < 0)
5452 return 1;
5453
5454 *phdr_memaddr = 0;
5455 *num_phdr = 0;
5456 while (read (fd, buf, auxv_size) == auxv_size
5457 && (*phdr_memaddr == 0 || *num_phdr == 0))
5458 {
5459 if (is_elf64)
5460 {
5461 Elf64_auxv_t *const aux = (Elf64_auxv_t *) buf;
5462
5463 switch (aux->a_type)
5464 {
5465 case AT_PHDR:
5466 *phdr_memaddr = aux->a_un.a_val;
5467 break;
5468 case AT_PHNUM:
5469 *num_phdr = aux->a_un.a_val;
5470 break;
5471 }
5472 }
5473 else
5474 {
5475 Elf32_auxv_t *const aux = (Elf32_auxv_t *) buf;
5476
5477 switch (aux->a_type)
5478 {
5479 case AT_PHDR:
5480 *phdr_memaddr = aux->a_un.a_val;
5481 break;
5482 case AT_PHNUM:
5483 *num_phdr = aux->a_un.a_val;
5484 break;
5485 }
5486 }
5487 }
5488
5489 close (fd);
5490
5491 if (*phdr_memaddr == 0 || *num_phdr == 0)
5492 {
5493 warning ("Unexpected missing AT_PHDR and/or AT_PHNUM: "
5494 "phdr_memaddr = %ld, phdr_num = %d",
5495 (long) *phdr_memaddr, *num_phdr);
5496 return 2;
5497 }
5498
5499 return 0;
5500 }
5501
5502 /* Return &_DYNAMIC (via PT_DYNAMIC) in the inferior, or 0 if not present. */
5503
5504 static CORE_ADDR
5505 get_dynamic (const int pid, const int is_elf64)
5506 {
5507 CORE_ADDR phdr_memaddr, relocation;
5508 int num_phdr, i;
5509 unsigned char *phdr_buf;
5510 const int phdr_size = is_elf64 ? sizeof (Elf64_Phdr) : sizeof (Elf32_Phdr);
5511
5512 if (get_phdr_phnum_from_proc_auxv (pid, is_elf64, &phdr_memaddr, &num_phdr))
5513 return 0;
5514
5515 gdb_assert (num_phdr < 100); /* Basic sanity check. */
5516 phdr_buf = alloca (num_phdr * phdr_size);
5517
5518 if (linux_read_memory (phdr_memaddr, phdr_buf, num_phdr * phdr_size))
5519 return 0;
5520
5521 /* Compute relocation: it is expected to be 0 for "regular" executables,
5522 non-zero for PIE ones. */
5523 relocation = -1;
5524 for (i = 0; relocation == -1 && i < num_phdr; i++)
5525 if (is_elf64)
5526 {
5527 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5528
5529 if (p->p_type == PT_PHDR)
5530 relocation = phdr_memaddr - p->p_vaddr;
5531 }
5532 else
5533 {
5534 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5535
5536 if (p->p_type == PT_PHDR)
5537 relocation = phdr_memaddr - p->p_vaddr;
5538 }
5539
5540 if (relocation == -1)
5541 {
5542 /* PT_PHDR is optional, but necessary for PIE in general. Fortunately
5543 any real world executables, including PIE executables, have always
5544 PT_PHDR present. PT_PHDR is not present in some shared libraries or
5545 in fpc (Free Pascal 2.4) binaries but neither of those have a need for
5546 or present DT_DEBUG anyway (fpc binaries are statically linked).
5547
5548 Therefore if there exists DT_DEBUG there is always also PT_PHDR.
5549
5550 GDB could find RELOCATION also from AT_ENTRY - e_entry. */
5551
5552 return 0;
5553 }
5554
5555 for (i = 0; i < num_phdr; i++)
5556 {
5557 if (is_elf64)
5558 {
5559 Elf64_Phdr *const p = (Elf64_Phdr *) (phdr_buf + i * phdr_size);
5560
5561 if (p->p_type == PT_DYNAMIC)
5562 return p->p_vaddr + relocation;
5563 }
5564 else
5565 {
5566 Elf32_Phdr *const p = (Elf32_Phdr *) (phdr_buf + i * phdr_size);
5567
5568 if (p->p_type == PT_DYNAMIC)
5569 return p->p_vaddr + relocation;
5570 }
5571 }
5572
5573 return 0;
5574 }
5575
5576 /* Return &_r_debug in the inferior, or -1 if not present. Return value
5577 can be 0 if the inferior does not yet have the library list initialized.
5578 We look for DT_MIPS_RLD_MAP first. MIPS executables use this instead of
5579 DT_DEBUG, although they sometimes contain an unused DT_DEBUG entry too. */
5580
5581 static CORE_ADDR
5582 get_r_debug (const int pid, const int is_elf64)
5583 {
5584 CORE_ADDR dynamic_memaddr;
5585 const int dyn_size = is_elf64 ? sizeof (Elf64_Dyn) : sizeof (Elf32_Dyn);
5586 unsigned char buf[sizeof (Elf64_Dyn)]; /* The larger of the two. */
5587 CORE_ADDR map = -1;
5588
5589 dynamic_memaddr = get_dynamic (pid, is_elf64);
5590 if (dynamic_memaddr == 0)
5591 return map;
5592
5593 while (linux_read_memory (dynamic_memaddr, buf, dyn_size) == 0)
5594 {
5595 if (is_elf64)
5596 {
5597 Elf64_Dyn *const dyn = (Elf64_Dyn *) buf;
5598 #ifdef DT_MIPS_RLD_MAP
5599 union
5600 {
5601 Elf64_Xword map;
5602 unsigned char buf[sizeof (Elf64_Xword)];
5603 }
5604 rld_map;
5605
5606 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5607 {
5608 if (linux_read_memory (dyn->d_un.d_val,
5609 rld_map.buf, sizeof (rld_map.buf)) == 0)
5610 return rld_map.map;
5611 else
5612 break;
5613 }
5614 #endif /* DT_MIPS_RLD_MAP */
5615
5616 if (dyn->d_tag == DT_DEBUG && map == -1)
5617 map = dyn->d_un.d_val;
5618
5619 if (dyn->d_tag == DT_NULL)
5620 break;
5621 }
5622 else
5623 {
5624 Elf32_Dyn *const dyn = (Elf32_Dyn *) buf;
5625 #ifdef DT_MIPS_RLD_MAP
5626 union
5627 {
5628 Elf32_Word map;
5629 unsigned char buf[sizeof (Elf32_Word)];
5630 }
5631 rld_map;
5632
5633 if (dyn->d_tag == DT_MIPS_RLD_MAP)
5634 {
5635 if (linux_read_memory (dyn->d_un.d_val,
5636 rld_map.buf, sizeof (rld_map.buf)) == 0)
5637 return rld_map.map;
5638 else
5639 break;
5640 }
5641 #endif /* DT_MIPS_RLD_MAP */
5642
5643 if (dyn->d_tag == DT_DEBUG && map == -1)
5644 map = dyn->d_un.d_val;
5645
5646 if (dyn->d_tag == DT_NULL)
5647 break;
5648 }
5649
5650 dynamic_memaddr += dyn_size;
5651 }
5652
5653 return map;
5654 }
5655
5656 /* Read one pointer from MEMADDR in the inferior. */
5657
5658 static int
5659 read_one_ptr (CORE_ADDR memaddr, CORE_ADDR *ptr, int ptr_size)
5660 {
5661 int ret;
5662
5663 /* Go through a union so this works on either big or little endian
5664 hosts, when the inferior's pointer size is smaller than the size
5665 of CORE_ADDR. It is assumed the inferior's endianness is the
5666 same of the superior's. */
5667 union
5668 {
5669 CORE_ADDR core_addr;
5670 unsigned int ui;
5671 unsigned char uc;
5672 } addr;
5673
5674 ret = linux_read_memory (memaddr, &addr.uc, ptr_size);
5675 if (ret == 0)
5676 {
5677 if (ptr_size == sizeof (CORE_ADDR))
5678 *ptr = addr.core_addr;
5679 else if (ptr_size == sizeof (unsigned int))
5680 *ptr = addr.ui;
5681 else
5682 gdb_assert_not_reached ("unhandled pointer size");
5683 }
5684 return ret;
5685 }
5686
5687 struct link_map_offsets
5688 {
5689 /* Offset and size of r_debug.r_version. */
5690 int r_version_offset;
5691
5692 /* Offset and size of r_debug.r_map. */
5693 int r_map_offset;
5694
5695 /* Offset to l_addr field in struct link_map. */
5696 int l_addr_offset;
5697
5698 /* Offset to l_name field in struct link_map. */
5699 int l_name_offset;
5700
5701 /* Offset to l_ld field in struct link_map. */
5702 int l_ld_offset;
5703
5704 /* Offset to l_next field in struct link_map. */
5705 int l_next_offset;
5706
5707 /* Offset to l_prev field in struct link_map. */
5708 int l_prev_offset;
5709 };
5710
5711 /* Construct qXfer:libraries-svr4:read reply. */
5712
5713 static int
5714 linux_qxfer_libraries_svr4 (const char *annex, unsigned char *readbuf,
5715 unsigned const char *writebuf,
5716 CORE_ADDR offset, int len)
5717 {
5718 char *document;
5719 unsigned document_len;
5720 struct process_info_private *const priv = current_process ()->private;
5721 char filename[PATH_MAX];
5722 int pid, is_elf64;
5723
5724 static const struct link_map_offsets lmo_32bit_offsets =
5725 {
5726 0, /* r_version offset. */
5727 4, /* r_debug.r_map offset. */
5728 0, /* l_addr offset in link_map. */
5729 4, /* l_name offset in link_map. */
5730 8, /* l_ld offset in link_map. */
5731 12, /* l_next offset in link_map. */
5732 16 /* l_prev offset in link_map. */
5733 };
5734
5735 static const struct link_map_offsets lmo_64bit_offsets =
5736 {
5737 0, /* r_version offset. */
5738 8, /* r_debug.r_map offset. */
5739 0, /* l_addr offset in link_map. */
5740 8, /* l_name offset in link_map. */
5741 16, /* l_ld offset in link_map. */
5742 24, /* l_next offset in link_map. */
5743 32 /* l_prev offset in link_map. */
5744 };
5745 const struct link_map_offsets *lmo;
5746 unsigned int machine;
5747 int ptr_size;
5748 CORE_ADDR lm_addr = 0, lm_prev = 0;
5749 int allocated = 1024;
5750 char *p;
5751 CORE_ADDR l_name, l_addr, l_ld, l_next, l_prev;
5752 int header_done = 0;
5753
5754 if (writebuf != NULL)
5755 return -2;
5756 if (readbuf == NULL)
5757 return -1;
5758
5759 pid = lwpid_of (current_thread);
5760 xsnprintf (filename, sizeof filename, "/proc/%d/exe", pid);
5761 is_elf64 = elf_64_file_p (filename, &machine);
5762 lmo = is_elf64 ? &lmo_64bit_offsets : &lmo_32bit_offsets;
5763 ptr_size = is_elf64 ? 8 : 4;
5764
5765 while (annex[0] != '\0')
5766 {
5767 const char *sep;
5768 CORE_ADDR *addrp;
5769 int len;
5770
5771 sep = strchr (annex, '=');
5772 if (sep == NULL)
5773 break;
5774
5775 len = sep - annex;
5776 if (len == 5 && strncmp (annex, "start", 5) == 0)
5777 addrp = &lm_addr;
5778 else if (len == 4 && strncmp (annex, "prev", 4) == 0)
5779 addrp = &lm_prev;
5780 else
5781 {
5782 annex = strchr (sep, ';');
5783 if (annex == NULL)
5784 break;
5785 annex++;
5786 continue;
5787 }
5788
5789 annex = decode_address_to_semicolon (addrp, sep + 1);
5790 }
5791
5792 if (lm_addr == 0)
5793 {
5794 int r_version = 0;
5795
5796 if (priv->r_debug == 0)
5797 priv->r_debug = get_r_debug (pid, is_elf64);
5798
5799 /* We failed to find DT_DEBUG. Such situation will not change
5800 for this inferior - do not retry it. Report it to GDB as
5801 E01, see for the reasons at the GDB solib-svr4.c side. */
5802 if (priv->r_debug == (CORE_ADDR) -1)
5803 return -1;
5804
5805 if (priv->r_debug != 0)
5806 {
5807 if (linux_read_memory (priv->r_debug + lmo->r_version_offset,
5808 (unsigned char *) &r_version,
5809 sizeof (r_version)) != 0
5810 || r_version != 1)
5811 {
5812 warning ("unexpected r_debug version %d", r_version);
5813 }
5814 else if (read_one_ptr (priv->r_debug + lmo->r_map_offset,
5815 &lm_addr, ptr_size) != 0)
5816 {
5817 warning ("unable to read r_map from 0x%lx",
5818 (long) priv->r_debug + lmo->r_map_offset);
5819 }
5820 }
5821 }
5822
5823 document = xmalloc (allocated);
5824 strcpy (document, "<library-list-svr4 version=\"1.0\"");
5825 p = document + strlen (document);
5826
5827 while (lm_addr
5828 && read_one_ptr (lm_addr + lmo->l_name_offset,
5829 &l_name, ptr_size) == 0
5830 && read_one_ptr (lm_addr + lmo->l_addr_offset,
5831 &l_addr, ptr_size) == 0
5832 && read_one_ptr (lm_addr + lmo->l_ld_offset,
5833 &l_ld, ptr_size) == 0
5834 && read_one_ptr (lm_addr + lmo->l_prev_offset,
5835 &l_prev, ptr_size) == 0
5836 && read_one_ptr (lm_addr + lmo->l_next_offset,
5837 &l_next, ptr_size) == 0)
5838 {
5839 unsigned char libname[PATH_MAX];
5840
5841 if (lm_prev != l_prev)
5842 {
5843 warning ("Corrupted shared library list: 0x%lx != 0x%lx",
5844 (long) lm_prev, (long) l_prev);
5845 break;
5846 }
5847
5848 /* Ignore the first entry even if it has valid name as the first entry
5849 corresponds to the main executable. The first entry should not be
5850 skipped if the dynamic loader was loaded late by a static executable
5851 (see solib-svr4.c parameter ignore_first). But in such case the main
5852 executable does not have PT_DYNAMIC present and this function already
5853 exited above due to failed get_r_debug. */
5854 if (lm_prev == 0)
5855 {
5856 sprintf (p, " main-lm=\"0x%lx\"", (unsigned long) lm_addr);
5857 p = p + strlen (p);
5858 }
5859 else
5860 {
5861 /* Not checking for error because reading may stop before
5862 we've got PATH_MAX worth of characters. */
5863 libname[0] = '\0';
5864 linux_read_memory (l_name, libname, sizeof (libname) - 1);
5865 libname[sizeof (libname) - 1] = '\0';
5866 if (libname[0] != '\0')
5867 {
5868 /* 6x the size for xml_escape_text below. */
5869 size_t len = 6 * strlen ((char *) libname);
5870 char *name;
5871
5872 if (!header_done)
5873 {
5874 /* Terminate `<library-list-svr4'. */
5875 *p++ = '>';
5876 header_done = 1;
5877 }
5878
5879 while (allocated < p - document + len + 200)
5880 {
5881 /* Expand to guarantee sufficient storage. */
5882 uintptr_t document_len = p - document;
5883
5884 document = xrealloc (document, 2 * allocated);
5885 allocated *= 2;
5886 p = document + document_len;
5887 }
5888
5889 name = xml_escape_text ((char *) libname);
5890 p += sprintf (p, "<library name=\"%s\" lm=\"0x%lx\" "
5891 "l_addr=\"0x%lx\" l_ld=\"0x%lx\"/>",
5892 name, (unsigned long) lm_addr,
5893 (unsigned long) l_addr, (unsigned long) l_ld);
5894 free (name);
5895 }
5896 }
5897
5898 lm_prev = lm_addr;
5899 lm_addr = l_next;
5900 }
5901
5902 if (!header_done)
5903 {
5904 /* Empty list; terminate `<library-list-svr4'. */
5905 strcpy (p, "/>");
5906 }
5907 else
5908 strcpy (p, "</library-list-svr4>");
5909
5910 document_len = strlen (document);
5911 if (offset < document_len)
5912 document_len -= offset;
5913 else
5914 document_len = 0;
5915 if (len > document_len)
5916 len = document_len;
5917
5918 memcpy (readbuf, document + offset, len);
5919 xfree (document);
5920
5921 return len;
5922 }
5923
5924 #ifdef HAVE_LINUX_BTRACE
5925
5926 /* See to_enable_btrace target method. */
5927
5928 static struct btrace_target_info *
5929 linux_low_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
5930 {
5931 struct btrace_target_info *tinfo;
5932
5933 tinfo = linux_enable_btrace (ptid, conf);
5934
5935 if (tinfo != NULL)
5936 {
5937 struct thread_info *thread = find_thread_ptid (ptid);
5938 struct regcache *regcache = get_thread_regcache (thread, 0);
5939
5940 tinfo->ptr_bits = register_size (regcache->tdesc, 0) * 8;
5941 }
5942
5943 return tinfo;
5944 }
5945
5946 /* See to_disable_btrace target method. */
5947
5948 static int
5949 linux_low_disable_btrace (struct btrace_target_info *tinfo)
5950 {
5951 enum btrace_error err;
5952
5953 err = linux_disable_btrace (tinfo);
5954 return (err == BTRACE_ERR_NONE ? 0 : -1);
5955 }
5956
5957 /* See to_read_btrace target method. */
5958
5959 static int
5960 linux_low_read_btrace (struct btrace_target_info *tinfo, struct buffer *buffer,
5961 int type)
5962 {
5963 struct btrace_data btrace;
5964 struct btrace_block *block;
5965 enum btrace_error err;
5966 int i;
5967
5968 btrace_data_init (&btrace);
5969
5970 err = linux_read_btrace (&btrace, tinfo, type);
5971 if (err != BTRACE_ERR_NONE)
5972 {
5973 if (err == BTRACE_ERR_OVERFLOW)
5974 buffer_grow_str0 (buffer, "E.Overflow.");
5975 else
5976 buffer_grow_str0 (buffer, "E.Generic Error.");
5977
5978 btrace_data_fini (&btrace);
5979 return -1;
5980 }
5981
5982 switch (btrace.format)
5983 {
5984 case BTRACE_FORMAT_NONE:
5985 buffer_grow_str0 (buffer, "E.No Trace.");
5986 break;
5987
5988 case BTRACE_FORMAT_BTS:
5989 buffer_grow_str (buffer, "<!DOCTYPE btrace SYSTEM \"btrace.dtd\">\n");
5990 buffer_grow_str (buffer, "<btrace version=\"1.0\">\n");
5991
5992 for (i = 0;
5993 VEC_iterate (btrace_block_s, btrace.variant.bts.blocks, i, block);
5994 i++)
5995 buffer_xml_printf (buffer, "<block begin=\"0x%s\" end=\"0x%s\"/>\n",
5996 paddress (block->begin), paddress (block->end));
5997
5998 buffer_grow_str0 (buffer, "</btrace>\n");
5999 break;
6000
6001 default:
6002 buffer_grow_str0 (buffer, "E.Unknown Trace Format.");
6003
6004 btrace_data_fini (&btrace);
6005 return -1;
6006 }
6007
6008 btrace_data_fini (&btrace);
6009 return 0;
6010 }
6011
6012 /* See to_btrace_conf target method. */
6013
6014 static int
6015 linux_low_btrace_conf (const struct btrace_target_info *tinfo,
6016 struct buffer *buffer)
6017 {
6018 const struct btrace_config *conf;
6019
6020 buffer_grow_str (buffer, "<!DOCTYPE btrace-conf SYSTEM \"btrace-conf.dtd\">\n");
6021 buffer_grow_str (buffer, "<btrace-conf version=\"1.0\">\n");
6022
6023 conf = linux_btrace_conf (tinfo);
6024 if (conf != NULL)
6025 {
6026 switch (conf->format)
6027 {
6028 case BTRACE_FORMAT_NONE:
6029 break;
6030
6031 case BTRACE_FORMAT_BTS:
6032 buffer_xml_printf (buffer, "<bts");
6033 buffer_xml_printf (buffer, " size=\"0x%x\"", conf->bts.size);
6034 buffer_xml_printf (buffer, " />\n");
6035 break;
6036 }
6037 }
6038
6039 buffer_grow_str0 (buffer, "</btrace-conf>\n");
6040 return 0;
6041 }
6042 #endif /* HAVE_LINUX_BTRACE */
6043
6044 static struct target_ops linux_target_ops = {
6045 linux_create_inferior,
6046 linux_attach,
6047 linux_kill,
6048 linux_detach,
6049 linux_mourn,
6050 linux_join,
6051 linux_thread_alive,
6052 linux_resume,
6053 linux_wait,
6054 linux_fetch_registers,
6055 linux_store_registers,
6056 linux_prepare_to_access_memory,
6057 linux_done_accessing_memory,
6058 linux_read_memory,
6059 linux_write_memory,
6060 linux_look_up_symbols,
6061 linux_request_interrupt,
6062 linux_read_auxv,
6063 linux_supports_z_point_type,
6064 linux_insert_point,
6065 linux_remove_point,
6066 linux_stopped_by_watchpoint,
6067 linux_stopped_data_address,
6068 #if defined(__UCLIBC__) && defined(HAS_NOMMU) \
6069 && defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) \
6070 && defined(PT_TEXT_END_ADDR)
6071 linux_read_offsets,
6072 #else
6073 NULL,
6074 #endif
6075 #ifdef USE_THREAD_DB
6076 thread_db_get_tls_address,
6077 #else
6078 NULL,
6079 #endif
6080 linux_qxfer_spu,
6081 hostio_last_error_from_errno,
6082 linux_qxfer_osdata,
6083 linux_xfer_siginfo,
6084 linux_supports_non_stop,
6085 linux_async,
6086 linux_start_non_stop,
6087 linux_supports_multi_process,
6088 #ifdef USE_THREAD_DB
6089 thread_db_handle_monitor_command,
6090 #else
6091 NULL,
6092 #endif
6093 linux_common_core_of_thread,
6094 linux_read_loadmap,
6095 linux_process_qsupported,
6096 linux_supports_tracepoints,
6097 linux_read_pc,
6098 linux_write_pc,
6099 linux_thread_stopped,
6100 NULL,
6101 linux_pause_all,
6102 linux_unpause_all,
6103 linux_stabilize_threads,
6104 linux_install_fast_tracepoint_jump_pad,
6105 linux_emit_ops,
6106 linux_supports_disable_randomization,
6107 linux_get_min_fast_tracepoint_insn_len,
6108 linux_qxfer_libraries_svr4,
6109 linux_supports_agent,
6110 #ifdef HAVE_LINUX_BTRACE
6111 linux_supports_btrace,
6112 linux_low_enable_btrace,
6113 linux_low_disable_btrace,
6114 linux_low_read_btrace,
6115 linux_low_btrace_conf,
6116 #else
6117 NULL,
6118 NULL,
6119 NULL,
6120 NULL,
6121 NULL,
6122 #endif
6123 linux_supports_range_stepping,
6124 };
6125
6126 static void
6127 linux_init_signals ()
6128 {
6129 /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
6130 to find what the cancel signal actually is. */
6131 #ifndef __ANDROID__ /* Bionic doesn't use SIGRTMIN the way glibc does. */
6132 signal (__SIGRTMIN+1, SIG_IGN);
6133 #endif
6134 }
6135
6136 #ifdef HAVE_LINUX_REGSETS
6137 void
6138 initialize_regsets_info (struct regsets_info *info)
6139 {
6140 for (info->num_regsets = 0;
6141 info->regsets[info->num_regsets].size >= 0;
6142 info->num_regsets++)
6143 ;
6144 }
6145 #endif
6146
6147 void
6148 initialize_low (void)
6149 {
6150 struct sigaction sigchld_action;
6151 memset (&sigchld_action, 0, sizeof (sigchld_action));
6152 set_target_ops (&linux_target_ops);
6153 set_breakpoint_data (the_low_target.breakpoint,
6154 the_low_target.breakpoint_len);
6155 linux_init_signals ();
6156 linux_ptrace_init_warnings ();
6157
6158 sigchld_action.sa_handler = sigchld_handler;
6159 sigemptyset (&sigchld_action.sa_mask);
6160 sigchld_action.sa_flags = SA_RESTART;
6161 sigaction (SIGCHLD, &sigchld_action, NULL);
6162
6163 initialize_low_arch ();
6164 }
This page took 0.197626 seconds and 4 git commands to generate.