* mips-tdep.c (mips_pc_is_mips16): Reverse the order of checks
[deliverable/binutils-gdb.git] / gdb / gdbserver / mem-break.c
1 /* Memory breakpoint operations for the remote server for GDB.
2 Copyright (C) 2002, 2003, 2005, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4
5 Contributed by MontaVista Software.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23
24 const unsigned char *breakpoint_data;
25 int breakpoint_len;
26
27 #define MAX_BREAKPOINT_LEN 8
28
29 /* GDB will never try to install multiple breakpoints at the same
30 address. But, we need to keep track of internal breakpoints too,
31 and so we do need to be able to install multiple breakpoints at the
32 same address transparently. We keep track of two different, and
33 closely related structures. A raw breakpoint, which manages the
34 low level, close to the metal aspect of a breakpoint. It holds the
35 breakpoint address, and a buffer holding a copy of the instructions
36 that would be in memory had not been a breakpoint there (we call
37 that the shadow memory of the breakpoint). We occasionally need to
38 temporarilly uninsert a breakpoint without the client knowing about
39 it (e.g., to step over an internal breakpoint), so we keep an
40 `inserted' state associated with this low level breakpoint
41 structure. There can only be one such object for a given address.
42 Then, we have (a bit higher level) breakpoints. This structure
43 holds a callback to be called whenever a breakpoint is hit, a
44 high-level type, and a link to a low level raw breakpoint. There
45 can be many high-level breakpoints at the same address, and all of
46 them will point to the same raw breakpoint, which is reference
47 counted. */
48
49 /* The low level, physical, raw breakpoint. */
50 struct raw_breakpoint
51 {
52 struct raw_breakpoint *next;
53
54 /* A reference count. Each high level breakpoint referencing this
55 raw breakpoint accounts for one reference. */
56 int refcount;
57
58 /* The breakpoint's insertion address. There can only be one raw
59 breakpoint for a given PC. */
60 CORE_ADDR pc;
61
62 /* The breakpoint's shadow memory. */
63 unsigned char old_data[MAX_BREAKPOINT_LEN];
64
65 /* Non-zero if this breakpoint is currently inserted in the
66 inferior. */
67 int inserted;
68
69 /* Non-zero if this breakpoint is currently disabled because we no
70 longer detect it as inserted. */
71 int shlib_disabled;
72 };
73
74 /* The type of a breakpoint. */
75 enum bkpt_type
76 {
77 /* A GDB breakpoint, requested with a Z0 packet. */
78 gdb_breakpoint,
79
80 /* A basic-software-single-step breakpoint. */
81 reinsert_breakpoint,
82
83 /* Any other breakpoint type that doesn't require specific
84 treatment goes here. E.g., an event breakpoint. */
85 other_breakpoint,
86 };
87
88 /* A high level (in gdbserver's perspective) breakpoint. */
89 struct breakpoint
90 {
91 struct breakpoint *next;
92
93 /* The breakpoint's type. */
94 enum bkpt_type type;
95
96 /* Link to this breakpoint's raw breakpoint. This is always
97 non-NULL. */
98 struct raw_breakpoint *raw;
99
100 /* Function to call when we hit this breakpoint. If it returns 1,
101 the breakpoint shall be deleted; 0 or if this callback is NULL,
102 it will be left inserted. */
103 int (*handler) (CORE_ADDR);
104 };
105
106 static struct raw_breakpoint *
107 find_raw_breakpoint_at (CORE_ADDR where)
108 {
109 struct process_info *proc = current_process ();
110 struct raw_breakpoint *bp;
111
112 for (bp = proc->raw_breakpoints; bp != NULL; bp = bp->next)
113 if (bp->pc == where)
114 return bp;
115
116 return NULL;
117 }
118
119 static struct raw_breakpoint *
120 set_raw_breakpoint_at (CORE_ADDR where)
121 {
122 struct process_info *proc = current_process ();
123 struct raw_breakpoint *bp;
124 int err;
125
126 if (breakpoint_data == NULL)
127 error ("Target does not support breakpoints.");
128
129 bp = find_raw_breakpoint_at (where);
130 if (bp != NULL)
131 {
132 bp->refcount++;
133 return bp;
134 }
135
136 bp = xcalloc (1, sizeof (*bp));
137 bp->pc = where;
138 bp->refcount = 1;
139
140 /* Note that there can be fast tracepoint jumps installed in the
141 same memory range, so to get at the original memory, we need to
142 use read_inferior_memory, which masks those out. */
143 err = read_inferior_memory (where, bp->old_data, breakpoint_len);
144 if (err != 0)
145 {
146 if (debug_threads)
147 fprintf (stderr,
148 "Failed to read shadow memory of"
149 " breakpoint at 0x%s (%s).\n",
150 paddress (where), strerror (err));
151 free (bp);
152 return NULL;
153 }
154
155 err = (*the_target->write_memory) (where, breakpoint_data,
156 breakpoint_len);
157 if (err != 0)
158 {
159 if (debug_threads)
160 fprintf (stderr,
161 "Failed to insert breakpoint at 0x%s (%s).\n",
162 paddress (where), strerror (err));
163 free (bp);
164 return NULL;
165 }
166
167 /* Link the breakpoint in. */
168 bp->inserted = 1;
169 bp->next = proc->raw_breakpoints;
170 proc->raw_breakpoints = bp;
171 return bp;
172 }
173
174 /* Notice that breakpoint traps are always installed on top of fast
175 tracepoint jumps. This is even if the fast tracepoint is installed
176 at a later time compared to when the breakpoint was installed.
177 This means that a stopping breakpoint or tracepoint has higher
178 "priority". In turn, this allows having fast and slow tracepoints
179 (and breakpoints) at the same address behave correctly. */
180
181
182 /* A fast tracepoint jump. */
183
184 struct fast_tracepoint_jump
185 {
186 struct fast_tracepoint_jump *next;
187
188 /* A reference count. GDB can install more than one fast tracepoint
189 at the same address (each with its own action list, for
190 example). */
191 int refcount;
192
193 /* The fast tracepoint's insertion address. There can only be one
194 of these for a given PC. */
195 CORE_ADDR pc;
196
197 /* Non-zero if this fast tracepoint jump is currently inserted in
198 the inferior. */
199 int inserted;
200
201 /* The length of the jump instruction. */
202 int length;
203
204 /* A poor-man's flexible array member, holding both the jump
205 instruction to insert, and a copy of the instruction that would
206 be in memory had not been a jump there (the shadow memory of the
207 tracepoint jump). */
208 unsigned char insn_and_shadow[0];
209 };
210
211 /* Fast tracepoint FP's jump instruction to insert. */
212 #define fast_tracepoint_jump_insn(fp) \
213 ((fp)->insn_and_shadow + 0)
214
215 /* The shadow memory of fast tracepoint jump FP. */
216 #define fast_tracepoint_jump_shadow(fp) \
217 ((fp)->insn_and_shadow + (fp)->length)
218
219
220 /* Return the fast tracepoint jump set at WHERE. */
221
222 static struct fast_tracepoint_jump *
223 find_fast_tracepoint_jump_at (CORE_ADDR where)
224 {
225 struct process_info *proc = current_process ();
226 struct fast_tracepoint_jump *jp;
227
228 for (jp = proc->fast_tracepoint_jumps; jp != NULL; jp = jp->next)
229 if (jp->pc == where)
230 return jp;
231
232 return NULL;
233 }
234
235 int
236 fast_tracepoint_jump_here (CORE_ADDR where)
237 {
238 struct fast_tracepoint_jump *jp = find_fast_tracepoint_jump_at (where);
239
240 return (jp != NULL);
241 }
242
243 int
244 delete_fast_tracepoint_jump (struct fast_tracepoint_jump *todel)
245 {
246 struct fast_tracepoint_jump *bp, **bp_link;
247 int ret;
248 struct process_info *proc = current_process ();
249
250 bp = proc->fast_tracepoint_jumps;
251 bp_link = &proc->fast_tracepoint_jumps;
252
253 while (bp)
254 {
255 if (bp == todel)
256 {
257 if (--bp->refcount == 0)
258 {
259 struct fast_tracepoint_jump *prev_bp_link = *bp_link;
260
261 /* Unlink it. */
262 *bp_link = bp->next;
263
264 /* Since there can be breakpoints inserted in the same
265 address range, we use `write_inferior_memory', which
266 takes care of layering breakpoints on top of fast
267 tracepoints, and on top of the buffer we pass it.
268 This works because we've already unlinked the fast
269 tracepoint jump above. Also note that we need to
270 pass the current shadow contents, because
271 write_inferior_memory updates any shadow memory with
272 what we pass here, and we want that to be a nop. */
273 ret = write_inferior_memory (bp->pc,
274 fast_tracepoint_jump_shadow (bp),
275 bp->length);
276 if (ret != 0)
277 {
278 /* Something went wrong, relink the jump. */
279 *bp_link = prev_bp_link;
280
281 if (debug_threads)
282 fprintf (stderr,
283 "Failed to uninsert fast tracepoint jump "
284 "at 0x%s (%s) while deleting it.\n",
285 paddress (bp->pc), strerror (ret));
286 return ret;
287 }
288
289 free (bp);
290 }
291
292 return 0;
293 }
294 else
295 {
296 bp_link = &bp->next;
297 bp = *bp_link;
298 }
299 }
300
301 warning ("Could not find fast tracepoint jump in list.");
302 return ENOENT;
303 }
304
305 void
306 inc_ref_fast_tracepoint_jump (struct fast_tracepoint_jump *jp)
307 {
308 jp->refcount++;
309 }
310
311 struct fast_tracepoint_jump *
312 set_fast_tracepoint_jump (CORE_ADDR where,
313 unsigned char *insn, ULONGEST length)
314 {
315 struct process_info *proc = current_process ();
316 struct fast_tracepoint_jump *jp;
317 int err;
318
319 /* We refcount fast tracepoint jumps. Check if we already know
320 about a jump at this address. */
321 jp = find_fast_tracepoint_jump_at (where);
322 if (jp != NULL)
323 {
324 jp->refcount++;
325 return jp;
326 }
327
328 /* We don't, so create a new object. Double the length, because the
329 flexible array member holds both the jump insn, and the
330 shadow. */
331 jp = xcalloc (1, sizeof (*jp) + (length * 2));
332 jp->pc = where;
333 jp->length = length;
334 memcpy (fast_tracepoint_jump_insn (jp), insn, length);
335 jp->refcount = 1;
336
337 /* Note that there can be trap breakpoints inserted in the same
338 address range. To access the original memory contents, we use
339 `read_inferior_memory', which masks out breakpoints. */
340 err = read_inferior_memory (where,
341 fast_tracepoint_jump_shadow (jp), jp->length);
342 if (err != 0)
343 {
344 if (debug_threads)
345 fprintf (stderr,
346 "Failed to read shadow memory of"
347 " fast tracepoint at 0x%s (%s).\n",
348 paddress (where), strerror (err));
349 free (jp);
350 return NULL;
351 }
352
353 /* Link the jump in. */
354 jp->inserted = 1;
355 jp->next = proc->fast_tracepoint_jumps;
356 proc->fast_tracepoint_jumps = jp;
357
358 /* Since there can be trap breakpoints inserted in the same address
359 range, we use use `write_inferior_memory', which takes care of
360 layering breakpoints on top of fast tracepoints, on top of the
361 buffer we pass it. This works because we've already linked in
362 the fast tracepoint jump above. Also note that we need to pass
363 the current shadow contents, because write_inferior_memory
364 updates any shadow memory with what we pass here, and we want
365 that to be a nop. */
366 err = write_inferior_memory (where, fast_tracepoint_jump_shadow (jp),
367 length);
368 if (err != 0)
369 {
370 if (debug_threads)
371 fprintf (stderr,
372 "Failed to insert fast tracepoint jump at 0x%s (%s).\n",
373 paddress (where), strerror (err));
374
375 /* Unlink it. */
376 proc->fast_tracepoint_jumps = jp->next;
377 free (jp);
378
379 return NULL;
380 }
381
382 return jp;
383 }
384
385 void
386 uninsert_fast_tracepoint_jumps_at (CORE_ADDR pc)
387 {
388 struct fast_tracepoint_jump *jp;
389 int err;
390
391 jp = find_fast_tracepoint_jump_at (pc);
392 if (jp == NULL)
393 {
394 /* This can happen when we remove all breakpoints while handling
395 a step-over. */
396 if (debug_threads)
397 fprintf (stderr,
398 "Could not find fast tracepoint jump at 0x%s "
399 "in list (uninserting).\n",
400 paddress (pc));
401 return;
402 }
403
404 if (jp->inserted)
405 {
406 jp->inserted = 0;
407
408 /* Since there can be trap breakpoints inserted in the same
409 address range, we use use `write_inferior_memory', which
410 takes care of layering breakpoints on top of fast
411 tracepoints, and on top of the buffer we pass it. This works
412 because we've already marked the fast tracepoint fast
413 tracepoint jump uninserted above. Also note that we need to
414 pass the current shadow contents, because
415 write_inferior_memory updates any shadow memory with what we
416 pass here, and we want that to be a nop. */
417 err = write_inferior_memory (jp->pc,
418 fast_tracepoint_jump_shadow (jp),
419 jp->length);
420 if (err != 0)
421 {
422 jp->inserted = 1;
423
424 if (debug_threads)
425 fprintf (stderr,
426 "Failed to uninsert fast tracepoint jump at 0x%s (%s).\n",
427 paddress (pc), strerror (err));
428 }
429 }
430 }
431
432 void
433 reinsert_fast_tracepoint_jumps_at (CORE_ADDR where)
434 {
435 struct fast_tracepoint_jump *jp;
436 int err;
437
438 jp = find_fast_tracepoint_jump_at (where);
439 if (jp == NULL)
440 {
441 /* This can happen when we remove breakpoints when a tracepoint
442 hit causes a tracing stop, while handling a step-over. */
443 if (debug_threads)
444 fprintf (stderr,
445 "Could not find fast tracepoint jump at 0x%s "
446 "in list (reinserting).\n",
447 paddress (where));
448 return;
449 }
450
451 if (jp->inserted)
452 error ("Jump already inserted at reinsert time.");
453
454 jp->inserted = 1;
455
456 /* Since there can be trap breakpoints inserted in the same address
457 range, we use `write_inferior_memory', which takes care of
458 layering breakpoints on top of fast tracepoints, and on top of
459 the buffer we pass it. This works because we've already marked
460 the fast tracepoint jump inserted above. Also note that we need
461 to pass the current shadow contents, because
462 write_inferior_memory updates any shadow memory with what we pass
463 here, and we want that to be a nop. */
464 err = write_inferior_memory (where,
465 fast_tracepoint_jump_shadow (jp), jp->length);
466 if (err != 0)
467 {
468 jp->inserted = 0;
469
470 if (debug_threads)
471 fprintf (stderr,
472 "Failed to reinsert fast tracepoint jump at 0x%s (%s).\n",
473 paddress (where), strerror (err));
474 }
475 }
476
477 struct breakpoint *
478 set_breakpoint_at (CORE_ADDR where, int (*handler) (CORE_ADDR))
479 {
480 struct process_info *proc = current_process ();
481 struct breakpoint *bp;
482 struct raw_breakpoint *raw;
483
484 raw = set_raw_breakpoint_at (where);
485
486 if (raw == NULL)
487 {
488 /* warn? */
489 return NULL;
490 }
491
492 bp = xcalloc (1, sizeof (struct breakpoint));
493 bp->type = other_breakpoint;
494
495 bp->raw = raw;
496 bp->handler = handler;
497
498 bp->next = proc->breakpoints;
499 proc->breakpoints = bp;
500
501 return bp;
502 }
503
504 static int
505 delete_raw_breakpoint (struct process_info *proc, struct raw_breakpoint *todel)
506 {
507 struct raw_breakpoint *bp, **bp_link;
508 int ret;
509
510 bp = proc->raw_breakpoints;
511 bp_link = &proc->raw_breakpoints;
512
513 while (bp)
514 {
515 if (bp == todel)
516 {
517 if (bp->inserted)
518 {
519 struct raw_breakpoint *prev_bp_link = *bp_link;
520
521 *bp_link = bp->next;
522
523 /* Since there can be trap breakpoints inserted in the
524 same address range, we use `write_inferior_memory',
525 which takes care of layering breakpoints on top of
526 fast tracepoints, and on top of the buffer we pass
527 it. This works because we've already unlinked the
528 fast tracepoint jump above. Also note that we need
529 to pass the current shadow contents, because
530 write_inferior_memory updates any shadow memory with
531 what we pass here, and we want that to be a nop. */
532 ret = write_inferior_memory (bp->pc, bp->old_data,
533 breakpoint_len);
534 if (ret != 0)
535 {
536 /* Something went wrong, relink the breakpoint. */
537 *bp_link = prev_bp_link;
538
539 if (debug_threads)
540 fprintf (stderr,
541 "Failed to uninsert raw breakpoint "
542 "at 0x%s (%s) while deleting it.\n",
543 paddress (bp->pc), strerror (ret));
544 return ret;
545 }
546
547 }
548 else
549 *bp_link = bp->next;
550
551 free (bp);
552 return 0;
553 }
554 else
555 {
556 bp_link = &bp->next;
557 bp = *bp_link;
558 }
559 }
560
561 warning ("Could not find raw breakpoint in list.");
562 return ENOENT;
563 }
564
565 static int
566 release_breakpoint (struct process_info *proc, struct breakpoint *bp)
567 {
568 int newrefcount;
569 int ret;
570
571 newrefcount = bp->raw->refcount - 1;
572 if (newrefcount == 0)
573 {
574 ret = delete_raw_breakpoint (proc, bp->raw);
575 if (ret != 0)
576 return ret;
577 }
578 else
579 bp->raw->refcount = newrefcount;
580
581 free (bp);
582
583 return 0;
584 }
585
586 static int
587 delete_breakpoint_1 (struct process_info *proc, struct breakpoint *todel)
588 {
589 struct breakpoint *bp, **bp_link;
590 int err;
591
592 bp = proc->breakpoints;
593 bp_link = &proc->breakpoints;
594
595 while (bp)
596 {
597 if (bp == todel)
598 {
599 *bp_link = bp->next;
600
601 err = release_breakpoint (proc, bp);
602 if (err != 0)
603 return err;
604
605 bp = *bp_link;
606 return 0;
607 }
608 else
609 {
610 bp_link = &bp->next;
611 bp = *bp_link;
612 }
613 }
614
615 warning ("Could not find breakpoint in list.");
616 return ENOENT;
617 }
618
619 int
620 delete_breakpoint (struct breakpoint *todel)
621 {
622 struct process_info *proc = current_process ();
623 return delete_breakpoint_1 (proc, todel);
624 }
625
626 static struct breakpoint *
627 find_gdb_breakpoint_at (CORE_ADDR where)
628 {
629 struct process_info *proc = current_process ();
630 struct breakpoint *bp;
631
632 for (bp = proc->breakpoints; bp != NULL; bp = bp->next)
633 if (bp->type == gdb_breakpoint && bp->raw->pc == where)
634 return bp;
635
636 return NULL;
637 }
638
639 int
640 set_gdb_breakpoint_at (CORE_ADDR where)
641 {
642 struct breakpoint *bp;
643
644 if (breakpoint_data == NULL)
645 return 1;
646
647 /* If we see GDB inserting a second breakpoint at the same address,
648 then the first breakpoint must have disappeared due to a shared
649 library unload. On targets where the shared libraries are
650 handled by userspace, like SVR4, for example, GDBserver can't
651 tell if a library was loaded or unloaded. Since we refcount
652 breakpoints, if we didn't do this, we'd just increase the
653 refcount of the previous breakpoint at this address, but the trap
654 was not planted in the inferior anymore, thus the breakpoint
655 would never be hit. */
656 bp = find_gdb_breakpoint_at (where);
657 if (bp != NULL)
658 {
659 delete_gdb_breakpoint_at (where);
660
661 /* Might as well validate all other breakpoints. */
662 validate_breakpoints ();
663 }
664
665 bp = set_breakpoint_at (where, NULL);
666 if (bp == NULL)
667 return -1;
668
669 bp->type = gdb_breakpoint;
670 return 0;
671 }
672
673 int
674 delete_gdb_breakpoint_at (CORE_ADDR addr)
675 {
676 struct breakpoint *bp;
677 int err;
678
679 if (breakpoint_data == NULL)
680 return 1;
681
682 bp = find_gdb_breakpoint_at (addr);
683 if (bp == NULL)
684 return -1;
685
686 err = delete_breakpoint (bp);
687 if (err)
688 return -1;
689
690 return 0;
691 }
692
693 int
694 gdb_breakpoint_here (CORE_ADDR where)
695 {
696 struct breakpoint *bp = find_gdb_breakpoint_at (where);
697
698 return (bp != NULL);
699 }
700
701 void
702 set_reinsert_breakpoint (CORE_ADDR stop_at)
703 {
704 struct breakpoint *bp;
705
706 bp = set_breakpoint_at (stop_at, NULL);
707 bp->type = reinsert_breakpoint;
708 }
709
710 void
711 delete_reinsert_breakpoints (void)
712 {
713 struct process_info *proc = current_process ();
714 struct breakpoint *bp, **bp_link;
715
716 bp = proc->breakpoints;
717 bp_link = &proc->breakpoints;
718
719 while (bp)
720 {
721 if (bp->type == reinsert_breakpoint)
722 {
723 *bp_link = bp->next;
724 release_breakpoint (proc, bp);
725 bp = *bp_link;
726 }
727 else
728 {
729 bp_link = &bp->next;
730 bp = *bp_link;
731 }
732 }
733 }
734
735 static void
736 uninsert_raw_breakpoint (struct raw_breakpoint *bp)
737 {
738 if (bp->inserted)
739 {
740 int err;
741
742 bp->inserted = 0;
743 /* Since there can be fast tracepoint jumps inserted in the same
744 address range, we use `write_inferior_memory', which takes
745 care of layering breakpoints on top of fast tracepoints, and
746 on top of the buffer we pass it. This works because we've
747 already unlinked the fast tracepoint jump above. Also note
748 that we need to pass the current shadow contents, because
749 write_inferior_memory updates any shadow memory with what we
750 pass here, and we want that to be a nop. */
751 err = write_inferior_memory (bp->pc, bp->old_data,
752 breakpoint_len);
753 if (err != 0)
754 {
755 bp->inserted = 1;
756
757 if (debug_threads)
758 fprintf (stderr,
759 "Failed to uninsert raw breakpoint at 0x%s (%s).\n",
760 paddress (bp->pc), strerror (err));
761 }
762 }
763 }
764
765 void
766 uninsert_breakpoints_at (CORE_ADDR pc)
767 {
768 struct raw_breakpoint *bp;
769
770 bp = find_raw_breakpoint_at (pc);
771 if (bp == NULL)
772 {
773 /* This can happen when we remove all breakpoints while handling
774 a step-over. */
775 if (debug_threads)
776 fprintf (stderr,
777 "Could not find breakpoint at 0x%s "
778 "in list (uninserting).\n",
779 paddress (pc));
780 return;
781 }
782
783 if (bp->inserted)
784 uninsert_raw_breakpoint (bp);
785 }
786
787 void
788 uninsert_all_breakpoints (void)
789 {
790 struct process_info *proc = current_process ();
791 struct raw_breakpoint *bp;
792
793 for (bp = proc->raw_breakpoints; bp != NULL; bp = bp->next)
794 if (bp->inserted)
795 uninsert_raw_breakpoint (bp);
796 }
797
798 static void
799 reinsert_raw_breakpoint (struct raw_breakpoint *bp)
800 {
801 int err;
802
803 if (bp->inserted)
804 error ("Breakpoint already inserted at reinsert time.");
805
806 err = (*the_target->write_memory) (bp->pc, breakpoint_data,
807 breakpoint_len);
808 if (err == 0)
809 bp->inserted = 1;
810 else if (debug_threads)
811 fprintf (stderr,
812 "Failed to reinsert breakpoint at 0x%s (%s).\n",
813 paddress (bp->pc), strerror (err));
814 }
815
816 void
817 reinsert_breakpoints_at (CORE_ADDR pc)
818 {
819 struct raw_breakpoint *bp;
820
821 bp = find_raw_breakpoint_at (pc);
822 if (bp == NULL)
823 {
824 /* This can happen when we remove all breakpoints while handling
825 a step-over. */
826 if (debug_threads)
827 fprintf (stderr,
828 "Could not find raw breakpoint at 0x%s "
829 "in list (reinserting).\n",
830 paddress (pc));
831 return;
832 }
833
834 reinsert_raw_breakpoint (bp);
835 }
836
837 void
838 reinsert_all_breakpoints (void)
839 {
840 struct process_info *proc = current_process ();
841 struct raw_breakpoint *bp;
842
843 for (bp = proc->raw_breakpoints; bp != NULL; bp = bp->next)
844 if (!bp->inserted)
845 reinsert_raw_breakpoint (bp);
846 }
847
848 void
849 check_breakpoints (CORE_ADDR stop_pc)
850 {
851 struct process_info *proc = current_process ();
852 struct breakpoint *bp, **bp_link;
853
854 bp = proc->breakpoints;
855 bp_link = &proc->breakpoints;
856
857 while (bp)
858 {
859 if (bp->raw->pc == stop_pc)
860 {
861 if (!bp->raw->inserted)
862 {
863 warning ("Hit a removed breakpoint?");
864 return;
865 }
866
867 if (bp->handler != NULL && (*bp->handler) (stop_pc))
868 {
869 *bp_link = bp->next;
870
871 release_breakpoint (proc, bp);
872
873 bp = *bp_link;
874 continue;
875 }
876 }
877
878 bp_link = &bp->next;
879 bp = *bp_link;
880 }
881 }
882
883 void
884 set_breakpoint_data (const unsigned char *bp_data, int bp_len)
885 {
886 breakpoint_data = bp_data;
887 breakpoint_len = bp_len;
888 }
889
890 int
891 breakpoint_here (CORE_ADDR addr)
892 {
893 return (find_raw_breakpoint_at (addr) != NULL);
894 }
895
896 int
897 breakpoint_inserted_here (CORE_ADDR addr)
898 {
899 struct raw_breakpoint *bp;
900
901 bp = find_raw_breakpoint_at (addr);
902
903 return (bp != NULL && bp->inserted);
904 }
905
906 static int
907 validate_inserted_breakpoint (struct raw_breakpoint *bp)
908 {
909 unsigned char *buf;
910 int err;
911
912 gdb_assert (bp->inserted);
913
914 buf = alloca (breakpoint_len);
915 err = (*the_target->read_memory) (bp->pc, buf, breakpoint_len);
916 if (err || memcmp (buf, breakpoint_data, breakpoint_len) != 0)
917 {
918 /* Tag it as gone. */
919 bp->inserted = 0;
920 bp->shlib_disabled = 1;
921 return 0;
922 }
923
924 return 1;
925 }
926
927 static void
928 delete_disabled_breakpoints (void)
929 {
930 struct process_info *proc = current_process ();
931 struct breakpoint *bp, *next;
932
933 for (bp = proc->breakpoints; bp != NULL; bp = next)
934 {
935 next = bp->next;
936 if (bp->raw->shlib_disabled)
937 delete_breakpoint_1 (proc, bp);
938 }
939 }
940
941 /* Check if breakpoints we inserted still appear to be inserted. They
942 may disappear due to a shared library unload, and worse, a new
943 shared library may be reloaded at the same address as the
944 previously unloaded one. If that happens, we should make sure that
945 the shadow memory of the old breakpoints isn't used when reading or
946 writing memory. */
947
948 void
949 validate_breakpoints (void)
950 {
951 struct process_info *proc = current_process ();
952 struct breakpoint *bp;
953
954 for (bp = proc->breakpoints; bp != NULL; bp = bp->next)
955 {
956 if (bp->raw->inserted)
957 validate_inserted_breakpoint (bp->raw);
958 }
959
960 delete_disabled_breakpoints ();
961 }
962
963 void
964 check_mem_read (CORE_ADDR mem_addr, unsigned char *buf, int mem_len)
965 {
966 struct process_info *proc = current_process ();
967 struct raw_breakpoint *bp = proc->raw_breakpoints;
968 struct fast_tracepoint_jump *jp = proc->fast_tracepoint_jumps;
969 CORE_ADDR mem_end = mem_addr + mem_len;
970 int disabled_one = 0;
971
972 for (; jp != NULL; jp = jp->next)
973 {
974 CORE_ADDR bp_end = jp->pc + jp->length;
975 CORE_ADDR start, end;
976 int copy_offset, copy_len, buf_offset;
977
978 if (mem_addr >= bp_end)
979 continue;
980 if (jp->pc >= mem_end)
981 continue;
982
983 start = jp->pc;
984 if (mem_addr > start)
985 start = mem_addr;
986
987 end = bp_end;
988 if (end > mem_end)
989 end = mem_end;
990
991 copy_len = end - start;
992 copy_offset = start - jp->pc;
993 buf_offset = start - mem_addr;
994
995 if (jp->inserted)
996 memcpy (buf + buf_offset,
997 fast_tracepoint_jump_shadow (jp) + copy_offset,
998 copy_len);
999 }
1000
1001 for (; bp != NULL; bp = bp->next)
1002 {
1003 CORE_ADDR bp_end = bp->pc + breakpoint_len;
1004 CORE_ADDR start, end;
1005 int copy_offset, copy_len, buf_offset;
1006
1007 if (mem_addr >= bp_end)
1008 continue;
1009 if (bp->pc >= mem_end)
1010 continue;
1011
1012 start = bp->pc;
1013 if (mem_addr > start)
1014 start = mem_addr;
1015
1016 end = bp_end;
1017 if (end > mem_end)
1018 end = mem_end;
1019
1020 copy_len = end - start;
1021 copy_offset = start - bp->pc;
1022 buf_offset = start - mem_addr;
1023
1024 if (bp->inserted)
1025 {
1026 if (validate_inserted_breakpoint (bp))
1027 memcpy (buf + buf_offset, bp->old_data + copy_offset, copy_len);
1028 else
1029 disabled_one = 1;
1030 }
1031 }
1032
1033 if (disabled_one)
1034 delete_disabled_breakpoints ();
1035 }
1036
1037 void
1038 check_mem_write (CORE_ADDR mem_addr, unsigned char *buf,
1039 const unsigned char *myaddr, int mem_len)
1040 {
1041 struct process_info *proc = current_process ();
1042 struct raw_breakpoint *bp = proc->raw_breakpoints;
1043 struct fast_tracepoint_jump *jp = proc->fast_tracepoint_jumps;
1044 CORE_ADDR mem_end = mem_addr + mem_len;
1045 int disabled_one = 0;
1046
1047 /* First fast tracepoint jumps, then breakpoint traps on top. */
1048
1049 for (; jp != NULL; jp = jp->next)
1050 {
1051 CORE_ADDR jp_end = jp->pc + jp->length;
1052 CORE_ADDR start, end;
1053 int copy_offset, copy_len, buf_offset;
1054
1055 if (mem_addr >= jp_end)
1056 continue;
1057 if (jp->pc >= mem_end)
1058 continue;
1059
1060 start = jp->pc;
1061 if (mem_addr > start)
1062 start = mem_addr;
1063
1064 end = jp_end;
1065 if (end > mem_end)
1066 end = mem_end;
1067
1068 copy_len = end - start;
1069 copy_offset = start - jp->pc;
1070 buf_offset = start - mem_addr;
1071
1072 memcpy (fast_tracepoint_jump_shadow (jp) + copy_offset,
1073 myaddr + buf_offset, copy_len);
1074 if (jp->inserted)
1075 memcpy (buf + buf_offset,
1076 fast_tracepoint_jump_insn (jp) + copy_offset, copy_len);
1077 }
1078
1079 for (; bp != NULL; bp = bp->next)
1080 {
1081 CORE_ADDR bp_end = bp->pc + breakpoint_len;
1082 CORE_ADDR start, end;
1083 int copy_offset, copy_len, buf_offset;
1084
1085 if (mem_addr >= bp_end)
1086 continue;
1087 if (bp->pc >= mem_end)
1088 continue;
1089
1090 start = bp->pc;
1091 if (mem_addr > start)
1092 start = mem_addr;
1093
1094 end = bp_end;
1095 if (end > mem_end)
1096 end = mem_end;
1097
1098 copy_len = end - start;
1099 copy_offset = start - bp->pc;
1100 buf_offset = start - mem_addr;
1101
1102 memcpy (bp->old_data + copy_offset, myaddr + buf_offset, copy_len);
1103 if (bp->inserted)
1104 {
1105 if (validate_inserted_breakpoint (bp))
1106 memcpy (buf + buf_offset, breakpoint_data + copy_offset, copy_len);
1107 else
1108 disabled_one = 1;
1109 }
1110 }
1111
1112 if (disabled_one)
1113 delete_disabled_breakpoints ();
1114 }
1115
1116 /* Delete all breakpoints, and un-insert them from the inferior. */
1117
1118 void
1119 delete_all_breakpoints (void)
1120 {
1121 struct process_info *proc = current_process ();
1122
1123 while (proc->breakpoints)
1124 delete_breakpoint_1 (proc, proc->breakpoints);
1125 }
1126
1127 /* Clear the "inserted" flag in all breakpoints. */
1128
1129 void
1130 mark_breakpoints_out (struct process_info *proc)
1131 {
1132 struct raw_breakpoint *raw_bp;
1133
1134 for (raw_bp = proc->raw_breakpoints; raw_bp != NULL; raw_bp = raw_bp->next)
1135 raw_bp->inserted = 0;
1136 }
1137
1138 /* Release all breakpoints, but do not try to un-insert them from the
1139 inferior. */
1140
1141 void
1142 free_all_breakpoints (struct process_info *proc)
1143 {
1144 mark_breakpoints_out (proc);
1145
1146 /* Note: use PROC explicitly instead of deferring to
1147 delete_all_breakpoints --- CURRENT_INFERIOR may already have been
1148 released when we get here. There should be no call to
1149 current_process from here on. */
1150 while (proc->breakpoints)
1151 delete_breakpoint_1 (proc, proc->breakpoints);
1152 }
This page took 0.053844 seconds and 5 git commands to generate.