*** empty log message ***
[deliverable/binutils-gdb.git] / gdb / gdbserver / mem-break.c
1 /* Memory breakpoint operations for the remote server for GDB.
2 Copyright (C) 2002, 2003, 2005, 2007, 2008, 2009, 2010, 2011
3 Free Software Foundation, Inc.
4
5 Contributed by MontaVista Software.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23
24 const unsigned char *breakpoint_data;
25 int breakpoint_len;
26
27 #define MAX_BREAKPOINT_LEN 8
28
29 /* GDB will never try to install multiple breakpoints at the same
30 address. But, we need to keep track of internal breakpoints too,
31 and so we do need to be able to install multiple breakpoints at the
32 same address transparently. We keep track of two different, and
33 closely related structures. A raw breakpoint, which manages the
34 low level, close to the metal aspect of a breakpoint. It holds the
35 breakpoint address, and a buffer holding a copy of the instructions
36 that would be in memory had not been a breakpoint there (we call
37 that the shadow memory of the breakpoint). We occasionally need to
38 temporarilly uninsert a breakpoint without the client knowing about
39 it (e.g., to step over an internal breakpoint), so we keep an
40 `inserted' state associated with this low level breakpoint
41 structure. There can only be one such object for a given address.
42 Then, we have (a bit higher level) breakpoints. This structure
43 holds a callback to be called whenever a breakpoint is hit, a
44 high-level type, and a link to a low level raw breakpoint. There
45 can be many high-level breakpoints at the same address, and all of
46 them will point to the same raw breakpoint, which is reference
47 counted. */
48
49 /* The low level, physical, raw breakpoint. */
50 struct raw_breakpoint
51 {
52 struct raw_breakpoint *next;
53
54 /* A reference count. Each high level breakpoint referencing this
55 raw breakpoint accounts for one reference. */
56 int refcount;
57
58 /* The breakpoint's insertion address. There can only be one raw
59 breakpoint for a given PC. */
60 CORE_ADDR pc;
61
62 /* The breakpoint's shadow memory. */
63 unsigned char old_data[MAX_BREAKPOINT_LEN];
64
65 /* Non-zero if this breakpoint is currently inserted in the
66 inferior. */
67 int inserted;
68
69 /* Non-zero if this breakpoint is currently disabled because we no
70 longer detect it as inserted. */
71 int shlib_disabled;
72 };
73
74 /* The type of a breakpoint. */
75 enum bkpt_type
76 {
77 /* A GDB breakpoint, requested with a Z0 packet. */
78 gdb_breakpoint,
79
80 /* A basic-software-single-step breakpoint. */
81 reinsert_breakpoint,
82
83 /* Any other breakpoint type that doesn't require specific
84 treatment goes here. E.g., an event breakpoint. */
85 other_breakpoint,
86 };
87
88 /* A high level (in gdbserver's perspective) breakpoint. */
89 struct breakpoint
90 {
91 struct breakpoint *next;
92
93 /* The breakpoint's type. */
94 enum bkpt_type type;
95
96 /* Link to this breakpoint's raw breakpoint. This is always
97 non-NULL. */
98 struct raw_breakpoint *raw;
99
100 /* Function to call when we hit this breakpoint. If it returns 1,
101 the breakpoint shall be deleted; 0 or if this callback is NULL,
102 it will be left inserted. */
103 int (*handler) (CORE_ADDR);
104 };
105
106 static struct raw_breakpoint *
107 find_raw_breakpoint_at (CORE_ADDR where)
108 {
109 struct process_info *proc = current_process ();
110 struct raw_breakpoint *bp;
111
112 for (bp = proc->raw_breakpoints; bp != NULL; bp = bp->next)
113 if (bp->pc == where)
114 return bp;
115
116 return NULL;
117 }
118
119 static struct raw_breakpoint *
120 set_raw_breakpoint_at (CORE_ADDR where)
121 {
122 struct process_info *proc = current_process ();
123 struct raw_breakpoint *bp;
124 int err;
125
126 if (breakpoint_data == NULL)
127 error ("Target does not support breakpoints.");
128
129 bp = find_raw_breakpoint_at (where);
130 if (bp != NULL)
131 {
132 bp->refcount++;
133 return bp;
134 }
135
136 bp = xcalloc (1, sizeof (*bp));
137 bp->pc = where;
138 bp->refcount = 1;
139
140 /* Note that there can be fast tracepoint jumps installed in the
141 same memory range, so to get at the original memory, we need to
142 use read_inferior_memory, which masks those out. */
143 err = read_inferior_memory (where, bp->old_data, breakpoint_len);
144 if (err != 0)
145 {
146 if (debug_threads)
147 fprintf (stderr,
148 "Failed to read shadow memory of"
149 " breakpoint at 0x%s (%s).\n",
150 paddress (where), strerror (err));
151 free (bp);
152 return NULL;
153 }
154
155 err = (*the_target->write_memory) (where, breakpoint_data,
156 breakpoint_len);
157 if (err != 0)
158 {
159 if (debug_threads)
160 fprintf (stderr,
161 "Failed to insert breakpoint at 0x%s (%s).\n",
162 paddress (where), strerror (err));
163 free (bp);
164 return NULL;
165 }
166
167 /* Link the breakpoint in. */
168 bp->inserted = 1;
169 bp->next = proc->raw_breakpoints;
170 proc->raw_breakpoints = bp;
171 return bp;
172 }
173
174 /* Notice that breakpoint traps are always installed on top of fast
175 tracepoint jumps. This is even if the fast tracepoint is installed
176 at a later time compared to when the breakpoint was installed.
177 This means that a stopping breakpoint or tracepoint has higher
178 "priority". In turn, this allows having fast and slow tracepoints
179 (and breakpoints) at the same address behave correctly. */
180
181
182 /* A fast tracepoint jump. */
183
184 struct fast_tracepoint_jump
185 {
186 struct fast_tracepoint_jump *next;
187
188 /* A reference count. GDB can install more than one fast tracepoint
189 at the same address (each with its own action list, for
190 example). */
191 int refcount;
192
193 /* The fast tracepoint's insertion address. There can only be one
194 of these for a given PC. */
195 CORE_ADDR pc;
196
197 /* Non-zero if this fast tracepoint jump is currently inserted in
198 the inferior. */
199 int inserted;
200
201 /* The length of the jump instruction. */
202 int length;
203
204 /* A poor-man's flexible array member, holding both the jump
205 instruction to insert, and a copy of the instruction that would
206 be in memory had not been a jump there (the shadow memory of the
207 tracepoint jump). */
208 unsigned char insn_and_shadow[0];
209 };
210
211 /* Fast tracepoint FP's jump instruction to insert. */
212 #define fast_tracepoint_jump_insn(fp) \
213 ((fp)->insn_and_shadow + 0)
214
215 /* The shadow memory of fast tracepoint jump FP. */
216 #define fast_tracepoint_jump_shadow(fp) \
217 ((fp)->insn_and_shadow + (fp)->length)
218
219
220 /* Return the fast tracepoint jump set at WHERE. */
221
222 static struct fast_tracepoint_jump *
223 find_fast_tracepoint_jump_at (CORE_ADDR where)
224 {
225 struct process_info *proc = current_process ();
226 struct fast_tracepoint_jump *jp;
227
228 for (jp = proc->fast_tracepoint_jumps; jp != NULL; jp = jp->next)
229 if (jp->pc == where)
230 return jp;
231
232 return NULL;
233 }
234
235 int
236 fast_tracepoint_jump_here (CORE_ADDR where)
237 {
238 struct fast_tracepoint_jump *jp = find_fast_tracepoint_jump_at (where);
239
240 return (jp != NULL);
241 }
242
243 int
244 delete_fast_tracepoint_jump (struct fast_tracepoint_jump *todel)
245 {
246 struct fast_tracepoint_jump *bp, **bp_link;
247 int ret;
248 struct process_info *proc = current_process ();
249
250 bp = proc->fast_tracepoint_jumps;
251 bp_link = &proc->fast_tracepoint_jumps;
252
253 while (bp)
254 {
255 if (bp == todel)
256 {
257 if (--bp->refcount == 0)
258 {
259 struct fast_tracepoint_jump *prev_bp_link = *bp_link;
260
261 /* Unlink it. */
262 *bp_link = bp->next;
263
264 /* Since there can be breakpoints inserted in the same
265 address range, we use `write_inferior_memory', which
266 takes care of layering breakpoints on top of fast
267 tracepoints, and on top of the buffer we pass it.
268 This works because we've already unlinked the fast
269 tracepoint jump above. Also note that we need to
270 pass the current shadow contents, because
271 write_inferior_memory updates any shadow memory with
272 what we pass here, and we want that to be a nop. */
273 ret = write_inferior_memory (bp->pc,
274 fast_tracepoint_jump_shadow (bp),
275 bp->length);
276 if (ret != 0)
277 {
278 /* Something went wrong, relink the jump. */
279 *bp_link = prev_bp_link;
280
281 if (debug_threads)
282 fprintf (stderr,
283 "Failed to uninsert fast tracepoint jump "
284 "at 0x%s (%s) while deleting it.\n",
285 paddress (bp->pc), strerror (ret));
286 return ret;
287 }
288
289 free (bp);
290 }
291
292 return 0;
293 }
294 else
295 {
296 bp_link = &bp->next;
297 bp = *bp_link;
298 }
299 }
300
301 warning ("Could not find fast tracepoint jump in list.");
302 return ENOENT;
303 }
304
305 struct fast_tracepoint_jump *
306 set_fast_tracepoint_jump (CORE_ADDR where,
307 unsigned char *insn, ULONGEST length)
308 {
309 struct process_info *proc = current_process ();
310 struct fast_tracepoint_jump *jp;
311 int err;
312
313 /* We refcount fast tracepoint jumps. Check if we already know
314 about a jump at this address. */
315 jp = find_fast_tracepoint_jump_at (where);
316 if (jp != NULL)
317 {
318 jp->refcount++;
319 return jp;
320 }
321
322 /* We don't, so create a new object. Double the length, because the
323 flexible array member holds both the jump insn, and the
324 shadow. */
325 jp = xcalloc (1, sizeof (*jp) + (length * 2));
326 jp->pc = where;
327 jp->length = length;
328 memcpy (fast_tracepoint_jump_insn (jp), insn, length);
329 jp->refcount = 1;
330
331 /* Note that there can be trap breakpoints inserted in the same
332 address range. To access the original memory contents, we use
333 `read_inferior_memory', which masks out breakpoints. */
334 err = read_inferior_memory (where,
335 fast_tracepoint_jump_shadow (jp), jp->length);
336 if (err != 0)
337 {
338 if (debug_threads)
339 fprintf (stderr,
340 "Failed to read shadow memory of"
341 " fast tracepoint at 0x%s (%s).\n",
342 paddress (where), strerror (err));
343 free (jp);
344 return NULL;
345 }
346
347 /* Link the jump in. */
348 jp->inserted = 1;
349 jp->next = proc->fast_tracepoint_jumps;
350 proc->fast_tracepoint_jumps = jp;
351
352 /* Since there can be trap breakpoints inserted in the same address
353 range, we use use `write_inferior_memory', which takes care of
354 layering breakpoints on top of fast tracepoints, on top of the
355 buffer we pass it. This works because we've already linked in
356 the fast tracepoint jump above. Also note that we need to pass
357 the current shadow contents, because write_inferior_memory
358 updates any shadow memory with what we pass here, and we want
359 that to be a nop. */
360 err = write_inferior_memory (where, fast_tracepoint_jump_shadow (jp),
361 length);
362 if (err != 0)
363 {
364 if (debug_threads)
365 fprintf (stderr,
366 "Failed to insert fast tracepoint jump at 0x%s (%s).\n",
367 paddress (where), strerror (err));
368
369 /* Unlink it. */
370 proc->fast_tracepoint_jumps = jp->next;
371 free (jp);
372
373 return NULL;
374 }
375
376 return jp;
377 }
378
379 void
380 uninsert_fast_tracepoint_jumps_at (CORE_ADDR pc)
381 {
382 struct fast_tracepoint_jump *jp;
383 int err;
384
385 jp = find_fast_tracepoint_jump_at (pc);
386 if (jp == NULL)
387 {
388 /* This can happen when we remove all breakpoints while handling
389 a step-over. */
390 if (debug_threads)
391 fprintf (stderr,
392 "Could not find fast tracepoint jump at 0x%s "
393 "in list (uninserting).\n",
394 paddress (pc));
395 return;
396 }
397
398 if (jp->inserted)
399 {
400 jp->inserted = 0;
401
402 /* Since there can be trap breakpoints inserted in the same
403 address range, we use use `write_inferior_memory', which
404 takes care of layering breakpoints on top of fast
405 tracepoints, and on top of the buffer we pass it. This works
406 because we've already marked the fast tracepoint fast
407 tracepoint jump uninserted above. Also note that we need to
408 pass the current shadow contents, because
409 write_inferior_memory updates any shadow memory with what we
410 pass here, and we want that to be a nop. */
411 err = write_inferior_memory (jp->pc,
412 fast_tracepoint_jump_shadow (jp),
413 jp->length);
414 if (err != 0)
415 {
416 jp->inserted = 1;
417
418 if (debug_threads)
419 fprintf (stderr,
420 "Failed to uninsert fast tracepoint jump at 0x%s (%s).\n",
421 paddress (pc), strerror (err));
422 }
423 }
424 }
425
426 void
427 reinsert_fast_tracepoint_jumps_at (CORE_ADDR where)
428 {
429 struct fast_tracepoint_jump *jp;
430 int err;
431
432 jp = find_fast_tracepoint_jump_at (where);
433 if (jp == NULL)
434 {
435 /* This can happen when we remove breakpoints when a tracepoint
436 hit causes a tracing stop, while handling a step-over. */
437 if (debug_threads)
438 fprintf (stderr,
439 "Could not find fast tracepoint jump at 0x%s "
440 "in list (reinserting).\n",
441 paddress (where));
442 return;
443 }
444
445 if (jp->inserted)
446 error ("Jump already inserted at reinsert time.");
447
448 jp->inserted = 1;
449
450 /* Since there can be trap breakpoints inserted in the same address
451 range, we use `write_inferior_memory', which takes care of
452 layering breakpoints on top of fast tracepoints, and on top of
453 the buffer we pass it. This works because we've already marked
454 the fast tracepoint jump inserted above. Also note that we need
455 to pass the current shadow contents, because
456 write_inferior_memory updates any shadow memory with what we pass
457 here, and we want that to be a nop. */
458 err = write_inferior_memory (where,
459 fast_tracepoint_jump_shadow (jp), jp->length);
460 if (err != 0)
461 {
462 jp->inserted = 0;
463
464 if (debug_threads)
465 fprintf (stderr,
466 "Failed to reinsert fast tracepoint jump at 0x%s (%s).\n",
467 paddress (where), strerror (err));
468 }
469 }
470
471 struct breakpoint *
472 set_breakpoint_at (CORE_ADDR where, int (*handler) (CORE_ADDR))
473 {
474 struct process_info *proc = current_process ();
475 struct breakpoint *bp;
476 struct raw_breakpoint *raw;
477
478 raw = set_raw_breakpoint_at (where);
479
480 if (raw == NULL)
481 {
482 /* warn? */
483 return NULL;
484 }
485
486 bp = xcalloc (1, sizeof (struct breakpoint));
487 bp->type = other_breakpoint;
488
489 bp->raw = raw;
490 bp->handler = handler;
491
492 bp->next = proc->breakpoints;
493 proc->breakpoints = bp;
494
495 return bp;
496 }
497
498 static int
499 delete_raw_breakpoint (struct process_info *proc, struct raw_breakpoint *todel)
500 {
501 struct raw_breakpoint *bp, **bp_link;
502 int ret;
503
504 bp = proc->raw_breakpoints;
505 bp_link = &proc->raw_breakpoints;
506
507 while (bp)
508 {
509 if (bp == todel)
510 {
511 if (bp->inserted)
512 {
513 struct raw_breakpoint *prev_bp_link = *bp_link;
514
515 *bp_link = bp->next;
516
517 /* Since there can be trap breakpoints inserted in the
518 same address range, we use `write_inferior_memory',
519 which takes care of layering breakpoints on top of
520 fast tracepoints, and on top of the buffer we pass
521 it. This works because we've already unlinked the
522 fast tracepoint jump above. Also note that we need
523 to pass the current shadow contents, because
524 write_inferior_memory updates any shadow memory with
525 what we pass here, and we want that to be a nop. */
526 ret = write_inferior_memory (bp->pc, bp->old_data,
527 breakpoint_len);
528 if (ret != 0)
529 {
530 /* Something went wrong, relink the breakpoint. */
531 *bp_link = prev_bp_link;
532
533 if (debug_threads)
534 fprintf (stderr,
535 "Failed to uninsert raw breakpoint "
536 "at 0x%s (%s) while deleting it.\n",
537 paddress (bp->pc), strerror (ret));
538 return ret;
539 }
540
541 }
542 else
543 *bp_link = bp->next;
544
545 free (bp);
546 return 0;
547 }
548 else
549 {
550 bp_link = &bp->next;
551 bp = *bp_link;
552 }
553 }
554
555 warning ("Could not find raw breakpoint in list.");
556 return ENOENT;
557 }
558
559 static int
560 release_breakpoint (struct process_info *proc, struct breakpoint *bp)
561 {
562 int newrefcount;
563 int ret;
564
565 newrefcount = bp->raw->refcount - 1;
566 if (newrefcount == 0)
567 {
568 ret = delete_raw_breakpoint (proc, bp->raw);
569 if (ret != 0)
570 return ret;
571 }
572 else
573 bp->raw->refcount = newrefcount;
574
575 free (bp);
576
577 return 0;
578 }
579
580 static int
581 delete_breakpoint_1 (struct process_info *proc, struct breakpoint *todel)
582 {
583 struct breakpoint *bp, **bp_link;
584 int err;
585
586 bp = proc->breakpoints;
587 bp_link = &proc->breakpoints;
588
589 while (bp)
590 {
591 if (bp == todel)
592 {
593 *bp_link = bp->next;
594
595 err = release_breakpoint (proc, bp);
596 if (err != 0)
597 return err;
598
599 bp = *bp_link;
600 return 0;
601 }
602 else
603 {
604 bp_link = &bp->next;
605 bp = *bp_link;
606 }
607 }
608
609 warning ("Could not find breakpoint in list.");
610 return ENOENT;
611 }
612
613 int
614 delete_breakpoint (struct breakpoint *todel)
615 {
616 struct process_info *proc = current_process ();
617 return delete_breakpoint_1 (proc, todel);
618 }
619
620 static struct breakpoint *
621 find_gdb_breakpoint_at (CORE_ADDR where)
622 {
623 struct process_info *proc = current_process ();
624 struct breakpoint *bp;
625
626 for (bp = proc->breakpoints; bp != NULL; bp = bp->next)
627 if (bp->type == gdb_breakpoint && bp->raw->pc == where)
628 return bp;
629
630 return NULL;
631 }
632
633 int
634 set_gdb_breakpoint_at (CORE_ADDR where)
635 {
636 struct breakpoint *bp;
637
638 if (breakpoint_data == NULL)
639 return 1;
640
641 /* If we see GDB inserting a second breakpoint at the same address,
642 then the first breakpoint must have disappeared due to a shared
643 library unload. On targets where the shared libraries are
644 handled by userspace, like SVR4, for example, GDBserver can't
645 tell if a library was loaded or unloaded. Since we refcount
646 breakpoints, if we didn't do this, we'd just increase the
647 refcount of the previous breakpoint at this address, but the trap
648 was not planted in the inferior anymore, thus the breakpoint
649 would never be hit. */
650 bp = find_gdb_breakpoint_at (where);
651 if (bp != NULL)
652 {
653 delete_gdb_breakpoint_at (where);
654
655 /* Might as well validate all other breakpoints. */
656 validate_breakpoints ();
657 }
658
659 bp = set_breakpoint_at (where, NULL);
660 if (bp == NULL)
661 return -1;
662
663 bp->type = gdb_breakpoint;
664 return 0;
665 }
666
667 int
668 delete_gdb_breakpoint_at (CORE_ADDR addr)
669 {
670 struct breakpoint *bp;
671 int err;
672
673 if (breakpoint_data == NULL)
674 return 1;
675
676 bp = find_gdb_breakpoint_at (addr);
677 if (bp == NULL)
678 return -1;
679
680 err = delete_breakpoint (bp);
681 if (err)
682 return -1;
683
684 return 0;
685 }
686
687 int
688 gdb_breakpoint_here (CORE_ADDR where)
689 {
690 struct breakpoint *bp = find_gdb_breakpoint_at (where);
691
692 return (bp != NULL);
693 }
694
695 void
696 set_reinsert_breakpoint (CORE_ADDR stop_at)
697 {
698 struct breakpoint *bp;
699
700 bp = set_breakpoint_at (stop_at, NULL);
701 bp->type = reinsert_breakpoint;
702 }
703
704 void
705 delete_reinsert_breakpoints (void)
706 {
707 struct process_info *proc = current_process ();
708 struct breakpoint *bp, **bp_link;
709
710 bp = proc->breakpoints;
711 bp_link = &proc->breakpoints;
712
713 while (bp)
714 {
715 if (bp->type == reinsert_breakpoint)
716 {
717 *bp_link = bp->next;
718 release_breakpoint (proc, bp);
719 bp = *bp_link;
720 }
721 else
722 {
723 bp_link = &bp->next;
724 bp = *bp_link;
725 }
726 }
727 }
728
729 static void
730 uninsert_raw_breakpoint (struct raw_breakpoint *bp)
731 {
732 if (bp->inserted)
733 {
734 int err;
735
736 bp->inserted = 0;
737 /* Since there can be fast tracepoint jumps inserted in the same
738 address range, we use `write_inferior_memory', which takes
739 care of layering breakpoints on top of fast tracepoints, and
740 on top of the buffer we pass it. This works because we've
741 already unlinked the fast tracepoint jump above. Also note
742 that we need to pass the current shadow contents, because
743 write_inferior_memory updates any shadow memory with what we
744 pass here, and we want that to be a nop. */
745 err = write_inferior_memory (bp->pc, bp->old_data,
746 breakpoint_len);
747 if (err != 0)
748 {
749 bp->inserted = 1;
750
751 if (debug_threads)
752 fprintf (stderr,
753 "Failed to uninsert raw breakpoint at 0x%s (%s).\n",
754 paddress (bp->pc), strerror (err));
755 }
756 }
757 }
758
759 void
760 uninsert_breakpoints_at (CORE_ADDR pc)
761 {
762 struct raw_breakpoint *bp;
763
764 bp = find_raw_breakpoint_at (pc);
765 if (bp == NULL)
766 {
767 /* This can happen when we remove all breakpoints while handling
768 a step-over. */
769 if (debug_threads)
770 fprintf (stderr,
771 "Could not find breakpoint at 0x%s "
772 "in list (uninserting).\n",
773 paddress (pc));
774 return;
775 }
776
777 if (bp->inserted)
778 uninsert_raw_breakpoint (bp);
779 }
780
781 void
782 uninsert_all_breakpoints (void)
783 {
784 struct process_info *proc = current_process ();
785 struct raw_breakpoint *bp;
786
787 for (bp = proc->raw_breakpoints; bp != NULL; bp = bp->next)
788 if (bp->inserted)
789 uninsert_raw_breakpoint (bp);
790 }
791
792 static void
793 reinsert_raw_breakpoint (struct raw_breakpoint *bp)
794 {
795 int err;
796
797 if (bp->inserted)
798 error ("Breakpoint already inserted at reinsert time.");
799
800 err = (*the_target->write_memory) (bp->pc, breakpoint_data,
801 breakpoint_len);
802 if (err == 0)
803 bp->inserted = 1;
804 else if (debug_threads)
805 fprintf (stderr,
806 "Failed to reinsert breakpoint at 0x%s (%s).\n",
807 paddress (bp->pc), strerror (err));
808 }
809
810 void
811 reinsert_breakpoints_at (CORE_ADDR pc)
812 {
813 struct raw_breakpoint *bp;
814
815 bp = find_raw_breakpoint_at (pc);
816 if (bp == NULL)
817 {
818 /* This can happen when we remove all breakpoints while handling
819 a step-over. */
820 if (debug_threads)
821 fprintf (stderr,
822 "Could not find raw breakpoint at 0x%s "
823 "in list (reinserting).\n",
824 paddress (pc));
825 return;
826 }
827
828 reinsert_raw_breakpoint (bp);
829 }
830
831 void
832 reinsert_all_breakpoints (void)
833 {
834 struct process_info *proc = current_process ();
835 struct raw_breakpoint *bp;
836
837 for (bp = proc->raw_breakpoints; bp != NULL; bp = bp->next)
838 if (!bp->inserted)
839 reinsert_raw_breakpoint (bp);
840 }
841
842 void
843 check_breakpoints (CORE_ADDR stop_pc)
844 {
845 struct process_info *proc = current_process ();
846 struct breakpoint *bp, **bp_link;
847
848 bp = proc->breakpoints;
849 bp_link = &proc->breakpoints;
850
851 while (bp)
852 {
853 if (bp->raw->pc == stop_pc)
854 {
855 if (!bp->raw->inserted)
856 {
857 warning ("Hit a removed breakpoint?");
858 return;
859 }
860
861 if (bp->handler != NULL && (*bp->handler) (stop_pc))
862 {
863 *bp_link = bp->next;
864
865 release_breakpoint (proc, bp);
866
867 bp = *bp_link;
868 continue;
869 }
870 }
871
872 bp_link = &bp->next;
873 bp = *bp_link;
874 }
875 }
876
877 void
878 set_breakpoint_data (const unsigned char *bp_data, int bp_len)
879 {
880 breakpoint_data = bp_data;
881 breakpoint_len = bp_len;
882 }
883
884 int
885 breakpoint_here (CORE_ADDR addr)
886 {
887 return (find_raw_breakpoint_at (addr) != NULL);
888 }
889
890 int
891 breakpoint_inserted_here (CORE_ADDR addr)
892 {
893 struct raw_breakpoint *bp;
894
895 bp = find_raw_breakpoint_at (addr);
896
897 return (bp != NULL && bp->inserted);
898 }
899
900 static int
901 validate_inserted_breakpoint (struct raw_breakpoint *bp)
902 {
903 unsigned char *buf;
904 int err;
905
906 gdb_assert (bp->inserted);
907
908 buf = alloca (breakpoint_len);
909 err = (*the_target->read_memory) (bp->pc, buf, breakpoint_len);
910 if (err || memcmp (buf, breakpoint_data, breakpoint_len) != 0)
911 {
912 /* Tag it as gone. */
913 bp->inserted = 0;
914 bp->shlib_disabled = 1;
915 return 0;
916 }
917
918 return 1;
919 }
920
921 static void
922 delete_disabled_breakpoints (void)
923 {
924 struct process_info *proc = current_process ();
925 struct breakpoint *bp, *next;
926
927 for (bp = proc->breakpoints; bp != NULL; bp = next)
928 {
929 next = bp->next;
930 if (bp->raw->shlib_disabled)
931 delete_breakpoint_1 (proc, bp);
932 }
933 }
934
935 /* Check if breakpoints we inserted still appear to be inserted. They
936 may disappear due to a shared library unload, and worse, a new
937 shared library may be reloaded at the same address as the
938 previously unloaded one. If that happens, we should make sure that
939 the shadow memory of the old breakpoints isn't used when reading or
940 writing memory. */
941
942 void
943 validate_breakpoints (void)
944 {
945 struct process_info *proc = current_process ();
946 struct breakpoint *bp;
947
948 for (bp = proc->breakpoints; bp != NULL; bp = bp->next)
949 {
950 if (bp->raw->inserted)
951 validate_inserted_breakpoint (bp->raw);
952 }
953
954 delete_disabled_breakpoints ();
955 }
956
957 void
958 check_mem_read (CORE_ADDR mem_addr, unsigned char *buf, int mem_len)
959 {
960 struct process_info *proc = current_process ();
961 struct raw_breakpoint *bp = proc->raw_breakpoints;
962 struct fast_tracepoint_jump *jp = proc->fast_tracepoint_jumps;
963 CORE_ADDR mem_end = mem_addr + mem_len;
964 int disabled_one = 0;
965
966 for (; jp != NULL; jp = jp->next)
967 {
968 CORE_ADDR bp_end = jp->pc + jp->length;
969 CORE_ADDR start, end;
970 int copy_offset, copy_len, buf_offset;
971
972 if (mem_addr >= bp_end)
973 continue;
974 if (jp->pc >= mem_end)
975 continue;
976
977 start = jp->pc;
978 if (mem_addr > start)
979 start = mem_addr;
980
981 end = bp_end;
982 if (end > mem_end)
983 end = mem_end;
984
985 copy_len = end - start;
986 copy_offset = start - jp->pc;
987 buf_offset = start - mem_addr;
988
989 if (jp->inserted)
990 memcpy (buf + buf_offset,
991 fast_tracepoint_jump_shadow (jp) + copy_offset,
992 copy_len);
993 }
994
995 for (; bp != NULL; bp = bp->next)
996 {
997 CORE_ADDR bp_end = bp->pc + breakpoint_len;
998 CORE_ADDR start, end;
999 int copy_offset, copy_len, buf_offset;
1000
1001 if (mem_addr >= bp_end)
1002 continue;
1003 if (bp->pc >= mem_end)
1004 continue;
1005
1006 start = bp->pc;
1007 if (mem_addr > start)
1008 start = mem_addr;
1009
1010 end = bp_end;
1011 if (end > mem_end)
1012 end = mem_end;
1013
1014 copy_len = end - start;
1015 copy_offset = start - bp->pc;
1016 buf_offset = start - mem_addr;
1017
1018 if (bp->inserted)
1019 {
1020 if (validate_inserted_breakpoint (bp))
1021 memcpy (buf + buf_offset, bp->old_data + copy_offset, copy_len);
1022 else
1023 disabled_one = 1;
1024 }
1025 }
1026
1027 if (disabled_one)
1028 delete_disabled_breakpoints ();
1029 }
1030
1031 void
1032 check_mem_write (CORE_ADDR mem_addr, unsigned char *buf, int mem_len)
1033 {
1034 struct process_info *proc = current_process ();
1035 struct raw_breakpoint *bp = proc->raw_breakpoints;
1036 struct fast_tracepoint_jump *jp = proc->fast_tracepoint_jumps;
1037 CORE_ADDR mem_end = mem_addr + mem_len;
1038 int disabled_one = 0;
1039
1040 /* First fast tracepoint jumps, then breakpoint traps on top. */
1041
1042 for (; jp != NULL; jp = jp->next)
1043 {
1044 CORE_ADDR jp_end = jp->pc + jp->length;
1045 CORE_ADDR start, end;
1046 int copy_offset, copy_len, buf_offset;
1047
1048 if (mem_addr >= jp_end)
1049 continue;
1050 if (jp->pc >= mem_end)
1051 continue;
1052
1053 start = jp->pc;
1054 if (mem_addr > start)
1055 start = mem_addr;
1056
1057 end = jp_end;
1058 if (end > mem_end)
1059 end = mem_end;
1060
1061 copy_len = end - start;
1062 copy_offset = start - jp->pc;
1063 buf_offset = start - mem_addr;
1064
1065 memcpy (fast_tracepoint_jump_shadow (jp) + copy_offset,
1066 buf + buf_offset, copy_len);
1067 if (jp->inserted)
1068 memcpy (buf + buf_offset,
1069 fast_tracepoint_jump_insn (jp) + copy_offset, copy_len);
1070 }
1071
1072 for (; bp != NULL; bp = bp->next)
1073 {
1074 CORE_ADDR bp_end = bp->pc + breakpoint_len;
1075 CORE_ADDR start, end;
1076 int copy_offset, copy_len, buf_offset;
1077
1078 if (mem_addr >= bp_end)
1079 continue;
1080 if (bp->pc >= mem_end)
1081 continue;
1082
1083 start = bp->pc;
1084 if (mem_addr > start)
1085 start = mem_addr;
1086
1087 end = bp_end;
1088 if (end > mem_end)
1089 end = mem_end;
1090
1091 copy_len = end - start;
1092 copy_offset = start - bp->pc;
1093 buf_offset = start - mem_addr;
1094
1095 memcpy (bp->old_data + copy_offset, buf + buf_offset, copy_len);
1096 if (bp->inserted)
1097 {
1098 if (validate_inserted_breakpoint (bp))
1099 memcpy (buf + buf_offset, breakpoint_data + copy_offset, copy_len);
1100 else
1101 disabled_one = 1;
1102 }
1103 }
1104
1105 if (disabled_one)
1106 delete_disabled_breakpoints ();
1107 }
1108
1109 /* Delete all breakpoints, and un-insert them from the inferior. */
1110
1111 void
1112 delete_all_breakpoints (void)
1113 {
1114 struct process_info *proc = current_process ();
1115
1116 while (proc->breakpoints)
1117 delete_breakpoint_1 (proc, proc->breakpoints);
1118 }
1119
1120 /* Clear the "inserted" flag in all breakpoints. */
1121
1122 void
1123 mark_breakpoints_out (struct process_info *proc)
1124 {
1125 struct raw_breakpoint *raw_bp;
1126
1127 for (raw_bp = proc->raw_breakpoints; raw_bp != NULL; raw_bp = raw_bp->next)
1128 raw_bp->inserted = 0;
1129 }
1130
1131 /* Release all breakpoints, but do not try to un-insert them from the
1132 inferior. */
1133
1134 void
1135 free_all_breakpoints (struct process_info *proc)
1136 {
1137 mark_breakpoints_out (proc);
1138
1139 /* Note: use PROC explicitly instead of deferring to
1140 delete_all_breakpoints --- CURRENT_INFERIOR may already have been
1141 released when we get here. There should be no call to
1142 current_process from here on. */
1143 while (proc->breakpoints)
1144 delete_breakpoint_1 (proc, proc->breakpoints);
1145 }
This page took 0.055944 seconds and 5 git commands to generate.