remove gdb_string.h
[deliverable/binutils-gdb.git] / gdb / dcache.c
1 /* Caching code for GDB, the GNU debugger.
2
3 Copyright (C) 1992-2013 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "dcache.h"
22 #include "gdbcmd.h"
23 #include <string.h>
24 #include "gdbcore.h"
25 #include "target.h"
26 #include "inferior.h"
27 #include "splay-tree.h"
28
29 /* Commands with a prefix of `{set,show} dcache'. */
30 static struct cmd_list_element *dcache_set_list = NULL;
31 static struct cmd_list_element *dcache_show_list = NULL;
32
33 /* The data cache could lead to incorrect results because it doesn't
34 know about volatile variables, thus making it impossible to debug
35 functions which use memory mapped I/O devices. Set the nocache
36 memory region attribute in those cases.
37
38 In general the dcache speeds up performance. Some speed improvement
39 comes from the actual caching mechanism, but the major gain is in
40 the reduction of the remote protocol overhead; instead of reading
41 or writing a large area of memory in 4 byte requests, the cache
42 bundles up the requests into LINE_SIZE chunks, reducing overhead
43 significantly. This is most useful when accessing a large amount
44 of data, such as when performing a backtrace.
45
46 The cache is a splay tree along with a linked list for replacement.
47 Each block caches a LINE_SIZE area of memory. Within each line we
48 remember the address of the line (which must be a multiple of
49 LINE_SIZE) and the actual data block.
50
51 Lines are only allocated as needed, so DCACHE_SIZE really specifies the
52 *maximum* number of lines in the cache.
53
54 At present, the cache is write-through rather than writeback: as soon
55 as data is written to the cache, it is also immediately written to
56 the target. Therefore, cache lines are never "dirty". Whether a given
57 line is valid or not depends on where it is stored in the dcache_struct;
58 there is no per-block valid flag. */
59
60 /* NOTE: Interaction of dcache and memory region attributes
61
62 As there is no requirement that memory region attributes be aligned
63 to or be a multiple of the dcache page size, dcache_read_line() and
64 dcache_write_line() must break up the page by memory region. If a
65 chunk does not have the cache attribute set, an invalid memory type
66 is set, etc., then the chunk is skipped. Those chunks are handled
67 in target_xfer_memory() (or target_xfer_memory_partial()).
68
69 This doesn't occur very often. The most common occurance is when
70 the last bit of the .text segment and the first bit of the .data
71 segment fall within the same dcache page with a ro/cacheable memory
72 region defined for the .text segment and a rw/non-cacheable memory
73 region defined for the .data segment. */
74
75 /* The maximum number of lines stored. The total size of the cache is
76 equal to DCACHE_SIZE times LINE_SIZE. */
77 #define DCACHE_DEFAULT_SIZE 4096
78 static unsigned dcache_size = DCACHE_DEFAULT_SIZE;
79
80 /* The default size of a cache line. Smaller values reduce the time taken to
81 read a single byte and make the cache more granular, but increase
82 overhead and reduce the effectiveness of the cache as a prefetcher. */
83 #define DCACHE_DEFAULT_LINE_SIZE 64
84 static unsigned dcache_line_size = DCACHE_DEFAULT_LINE_SIZE;
85
86 /* Each cache block holds LINE_SIZE bytes of data
87 starting at a multiple-of-LINE_SIZE address. */
88
89 #define LINE_SIZE_MASK(dcache) ((dcache->line_size - 1))
90 #define XFORM(dcache, x) ((x) & LINE_SIZE_MASK (dcache))
91 #define MASK(dcache, x) ((x) & ~LINE_SIZE_MASK (dcache))
92
93 struct dcache_block
94 {
95 /* For least-recently-allocated and free lists. */
96 struct dcache_block *prev;
97 struct dcache_block *next;
98
99 CORE_ADDR addr; /* address of data */
100 int refs; /* # hits */
101 gdb_byte data[1]; /* line_size bytes at given address */
102 };
103
104 struct dcache_struct
105 {
106 splay_tree tree;
107 struct dcache_block *oldest; /* least-recently-allocated list. */
108
109 /* The free list is maintained identically to OLDEST to simplify
110 the code: we only need one set of accessors. */
111 struct dcache_block *freelist;
112
113 /* The number of in-use lines in the cache. */
114 int size;
115 CORE_ADDR line_size; /* current line_size. */
116
117 /* The ptid of last inferior to use cache or null_ptid. */
118 ptid_t ptid;
119 };
120
121 typedef void (block_func) (struct dcache_block *block, void *param);
122
123 static struct dcache_block *dcache_hit (DCACHE *dcache, CORE_ADDR addr);
124
125 static int dcache_read_line (DCACHE *dcache, struct dcache_block *db);
126
127 static struct dcache_block *dcache_alloc (DCACHE *dcache, CORE_ADDR addr);
128
129 static void dcache_info (char *exp, int tty);
130
131 void _initialize_dcache (void);
132
133 static int dcache_enabled_p = 0; /* OBSOLETE */
134
135 static void
136 show_dcache_enabled_p (struct ui_file *file, int from_tty,
137 struct cmd_list_element *c, const char *value)
138 {
139 fprintf_filtered (file, _("Deprecated remotecache flag is %s.\n"), value);
140 }
141
142 static DCACHE *last_cache; /* Used by info dcache. */
143
144 /* Add BLOCK to circular block list BLIST, behind the block at *BLIST.
145 *BLIST is not updated (unless it was previously NULL of course).
146 This is for the least-recently-allocated list's sake:
147 BLIST points to the oldest block.
148 ??? This makes for poor cache usage of the free list,
149 but is it measurable? */
150
151 static void
152 append_block (struct dcache_block **blist, struct dcache_block *block)
153 {
154 if (*blist)
155 {
156 block->next = *blist;
157 block->prev = (*blist)->prev;
158 block->prev->next = block;
159 (*blist)->prev = block;
160 /* We don't update *BLIST here to maintain the invariant that for the
161 least-recently-allocated list *BLIST points to the oldest block. */
162 }
163 else
164 {
165 block->next = block;
166 block->prev = block;
167 *blist = block;
168 }
169 }
170
171 /* Remove BLOCK from circular block list BLIST. */
172
173 static void
174 remove_block (struct dcache_block **blist, struct dcache_block *block)
175 {
176 if (block->next == block)
177 {
178 *blist = NULL;
179 }
180 else
181 {
182 block->next->prev = block->prev;
183 block->prev->next = block->next;
184 /* If we removed the block *BLIST points to, shift it to the next block
185 to maintain the invariant that for the least-recently-allocated list
186 *BLIST points to the oldest block. */
187 if (*blist == block)
188 *blist = block->next;
189 }
190 }
191
192 /* Iterate over all elements in BLIST, calling FUNC.
193 PARAM is passed to FUNC.
194 FUNC may remove the block it's passed, but only that block. */
195
196 static void
197 for_each_block (struct dcache_block **blist, block_func *func, void *param)
198 {
199 struct dcache_block *db;
200
201 if (*blist == NULL)
202 return;
203
204 db = *blist;
205 do
206 {
207 struct dcache_block *next = db->next;
208
209 func (db, param);
210 db = next;
211 }
212 while (*blist && db != *blist);
213 }
214
215 /* BLOCK_FUNC routine for dcache_free. */
216
217 static void
218 free_block (struct dcache_block *block, void *param)
219 {
220 xfree (block);
221 }
222
223 /* Free a data cache. */
224
225 void
226 dcache_free (DCACHE *dcache)
227 {
228 if (last_cache == dcache)
229 last_cache = NULL;
230
231 splay_tree_delete (dcache->tree);
232 for_each_block (&dcache->oldest, free_block, NULL);
233 for_each_block (&dcache->freelist, free_block, NULL);
234 xfree (dcache);
235 }
236
237
238 /* BLOCK_FUNC function for dcache_invalidate.
239 This doesn't remove the block from the oldest list on purpose.
240 dcache_invalidate will do it later. */
241
242 static void
243 invalidate_block (struct dcache_block *block, void *param)
244 {
245 DCACHE *dcache = (DCACHE *) param;
246
247 splay_tree_remove (dcache->tree, (splay_tree_key) block->addr);
248 append_block (&dcache->freelist, block);
249 }
250
251 /* Free all the data cache blocks, thus discarding all cached data. */
252
253 void
254 dcache_invalidate (DCACHE *dcache)
255 {
256 for_each_block (&dcache->oldest, invalidate_block, dcache);
257
258 dcache->oldest = NULL;
259 dcache->size = 0;
260 dcache->ptid = null_ptid;
261
262 if (dcache->line_size != dcache_line_size)
263 {
264 /* We've been asked to use a different line size.
265 All of our freelist blocks are now the wrong size, so free them. */
266
267 for_each_block (&dcache->freelist, free_block, dcache);
268 dcache->freelist = NULL;
269 dcache->line_size = dcache_line_size;
270 }
271 }
272
273 /* Invalidate the line associated with ADDR. */
274
275 static void
276 dcache_invalidate_line (DCACHE *dcache, CORE_ADDR addr)
277 {
278 struct dcache_block *db = dcache_hit (dcache, addr);
279
280 if (db)
281 {
282 splay_tree_remove (dcache->tree, (splay_tree_key) db->addr);
283 remove_block (&dcache->oldest, db);
284 append_block (&dcache->freelist, db);
285 --dcache->size;
286 }
287 }
288
289 /* If addr is present in the dcache, return the address of the block
290 containing it. Otherwise return NULL. */
291
292 static struct dcache_block *
293 dcache_hit (DCACHE *dcache, CORE_ADDR addr)
294 {
295 struct dcache_block *db;
296
297 splay_tree_node node = splay_tree_lookup (dcache->tree,
298 (splay_tree_key) MASK (dcache, addr));
299
300 if (!node)
301 return NULL;
302
303 db = (struct dcache_block *) node->value;
304 db->refs++;
305 return db;
306 }
307
308 /* Fill a cache line from target memory.
309 The result is 1 for success, 0 if the (entire) cache line
310 wasn't readable. */
311
312 static int
313 dcache_read_line (DCACHE *dcache, struct dcache_block *db)
314 {
315 CORE_ADDR memaddr;
316 gdb_byte *myaddr;
317 int len;
318 int res;
319 int reg_len;
320 struct mem_region *region;
321
322 len = dcache->line_size;
323 memaddr = db->addr;
324 myaddr = db->data;
325
326 while (len > 0)
327 {
328 /* Don't overrun if this block is right at the end of the region. */
329 region = lookup_mem_region (memaddr);
330 if (region->hi == 0 || memaddr + len < region->hi)
331 reg_len = len;
332 else
333 reg_len = region->hi - memaddr;
334
335 /* Skip non-readable regions. The cache attribute can be ignored,
336 since we may be loading this for a stack access. */
337 if (region->attrib.mode == MEM_WO)
338 {
339 memaddr += reg_len;
340 myaddr += reg_len;
341 len -= reg_len;
342 continue;
343 }
344
345 res = target_read (&current_target, TARGET_OBJECT_RAW_MEMORY,
346 NULL, myaddr, memaddr, reg_len);
347 if (res < reg_len)
348 return 0;
349
350 memaddr += res;
351 myaddr += res;
352 len -= res;
353 }
354
355 return 1;
356 }
357
358 /* Get a free cache block, put or keep it on the valid list,
359 and return its address. */
360
361 static struct dcache_block *
362 dcache_alloc (DCACHE *dcache, CORE_ADDR addr)
363 {
364 struct dcache_block *db;
365
366 if (dcache->size >= dcache_size)
367 {
368 /* Evict the least recently allocated line. */
369 db = dcache->oldest;
370 remove_block (&dcache->oldest, db);
371
372 splay_tree_remove (dcache->tree, (splay_tree_key) db->addr);
373 }
374 else
375 {
376 db = dcache->freelist;
377 if (db)
378 remove_block (&dcache->freelist, db);
379 else
380 db = xmalloc (offsetof (struct dcache_block, data) +
381 dcache->line_size);
382
383 dcache->size++;
384 }
385
386 db->addr = MASK (dcache, addr);
387 db->refs = 0;
388
389 /* Put DB at the end of the list, it's the newest. */
390 append_block (&dcache->oldest, db);
391
392 splay_tree_insert (dcache->tree, (splay_tree_key) db->addr,
393 (splay_tree_value) db);
394
395 return db;
396 }
397
398 /* Using the data cache DCACHE, store in *PTR the contents of the byte at
399 address ADDR in the remote machine.
400
401 Returns 1 for success, 0 for error. */
402
403 static int
404 dcache_peek_byte (DCACHE *dcache, CORE_ADDR addr, gdb_byte *ptr)
405 {
406 struct dcache_block *db = dcache_hit (dcache, addr);
407
408 if (!db)
409 {
410 db = dcache_alloc (dcache, addr);
411
412 if (!dcache_read_line (dcache, db))
413 return 0;
414 }
415
416 *ptr = db->data[XFORM (dcache, addr)];
417 return 1;
418 }
419
420 /* Write the byte at PTR into ADDR in the data cache.
421
422 The caller is responsible for also promptly writing the data
423 through to target memory.
424
425 If addr is not in cache, this function does nothing; writing to
426 an area of memory which wasn't present in the cache doesn't cause
427 it to be loaded in.
428
429 Always return 1 (meaning success) to simplify dcache_xfer_memory. */
430
431 static int
432 dcache_poke_byte (DCACHE *dcache, CORE_ADDR addr, gdb_byte *ptr)
433 {
434 struct dcache_block *db = dcache_hit (dcache, addr);
435
436 if (db)
437 db->data[XFORM (dcache, addr)] = *ptr;
438
439 return 1;
440 }
441
442 static int
443 dcache_splay_tree_compare (splay_tree_key a, splay_tree_key b)
444 {
445 if (a > b)
446 return 1;
447 else if (a == b)
448 return 0;
449 else
450 return -1;
451 }
452
453 /* Allocate and initialize a data cache. */
454
455 DCACHE *
456 dcache_init (void)
457 {
458 DCACHE *dcache;
459
460 dcache = (DCACHE *) xmalloc (sizeof (*dcache));
461
462 dcache->tree = splay_tree_new (dcache_splay_tree_compare,
463 NULL,
464 NULL);
465
466 dcache->oldest = NULL;
467 dcache->freelist = NULL;
468 dcache->size = 0;
469 dcache->line_size = dcache_line_size;
470 dcache->ptid = null_ptid;
471 last_cache = dcache;
472
473 return dcache;
474 }
475
476
477 /* Read or write LEN bytes from inferior memory at MEMADDR, transferring
478 to or from debugger address MYADDR. Write to inferior if SHOULD_WRITE is
479 nonzero.
480
481 Return the number of bytes actually transfered, or -1 if the
482 transfer is not supported or otherwise fails. Return of a non-negative
483 value less than LEN indicates that no further transfer is possible.
484 NOTE: This is different than the to_xfer_partial interface, in which
485 positive values less than LEN mean further transfers may be possible. */
486
487 int
488 dcache_xfer_memory (struct target_ops *ops, DCACHE *dcache,
489 CORE_ADDR memaddr, gdb_byte *myaddr,
490 int len, int should_write)
491 {
492 int i;
493 int res;
494 int (*xfunc) (DCACHE *dcache, CORE_ADDR addr, gdb_byte *ptr);
495
496 xfunc = should_write ? dcache_poke_byte : dcache_peek_byte;
497
498 /* If this is a different inferior from what we've recorded,
499 flush the cache. */
500
501 if (! ptid_equal (inferior_ptid, dcache->ptid))
502 {
503 dcache_invalidate (dcache);
504 dcache->ptid = inferior_ptid;
505 }
506
507 /* Do write-through first, so that if it fails, we don't write to
508 the cache at all. */
509
510 if (should_write)
511 {
512 res = target_write (ops, TARGET_OBJECT_RAW_MEMORY,
513 NULL, myaddr, memaddr, len);
514 if (res <= 0)
515 return res;
516 /* Update LEN to what was actually written. */
517 len = res;
518 }
519
520 for (i = 0; i < len; i++)
521 {
522 if (!xfunc (dcache, memaddr + i, myaddr + i))
523 {
524 /* That failed. Discard its cache line so we don't have a
525 partially read line. */
526 dcache_invalidate_line (dcache, memaddr + i);
527 /* If we're writing, we still wrote LEN bytes. */
528 if (should_write)
529 return len;
530 else
531 return i;
532 }
533 }
534
535 return len;
536 }
537
538 /* FIXME: There would be some benefit to making the cache write-back and
539 moving the writeback operation to a higher layer, as it could occur
540 after a sequence of smaller writes have been completed (as when a stack
541 frame is constructed for an inferior function call). Note that only
542 moving it up one level to target_xfer_memory[_partial]() is not
543 sufficient since we want to coalesce memory transfers that are
544 "logically" connected but not actually a single call to one of the
545 memory transfer functions. */
546
547 /* Just update any cache lines which are already present. This is called
548 by memory_xfer_partial in cases where the access would otherwise not go
549 through the cache. */
550
551 void
552 dcache_update (DCACHE *dcache, CORE_ADDR memaddr, gdb_byte *myaddr, int len)
553 {
554 int i;
555
556 for (i = 0; i < len; i++)
557 dcache_poke_byte (dcache, memaddr + i, myaddr + i);
558 }
559
560 static void
561 dcache_print_line (int index)
562 {
563 splay_tree_node n;
564 struct dcache_block *db;
565 int i, j;
566
567 if (!last_cache)
568 {
569 printf_filtered (_("No data cache available.\n"));
570 return;
571 }
572
573 n = splay_tree_min (last_cache->tree);
574
575 for (i = index; i > 0; --i)
576 {
577 if (!n)
578 break;
579 n = splay_tree_successor (last_cache->tree, n->key);
580 }
581
582 if (!n)
583 {
584 printf_filtered (_("No such cache line exists.\n"));
585 return;
586 }
587
588 db = (struct dcache_block *) n->value;
589
590 printf_filtered (_("Line %d: address %s [%d hits]\n"),
591 index, paddress (target_gdbarch (), db->addr), db->refs);
592
593 for (j = 0; j < last_cache->line_size; j++)
594 {
595 printf_filtered ("%02x ", db->data[j]);
596
597 /* Print a newline every 16 bytes (48 characters). */
598 if ((j % 16 == 15) && (j != last_cache->line_size - 1))
599 printf_filtered ("\n");
600 }
601 printf_filtered ("\n");
602 }
603
604 static void
605 dcache_info (char *exp, int tty)
606 {
607 splay_tree_node n;
608 int i, refcount;
609
610 if (exp)
611 {
612 char *linestart;
613
614 i = strtol (exp, &linestart, 10);
615 if (linestart == exp || i < 0)
616 {
617 printf_filtered (_("Usage: info dcache [linenumber]\n"));
618 return;
619 }
620
621 dcache_print_line (i);
622 return;
623 }
624
625 printf_filtered (_("Dcache %u lines of %u bytes each.\n"),
626 dcache_size,
627 last_cache ? (unsigned) last_cache->line_size
628 : dcache_line_size);
629
630 if (!last_cache || ptid_equal (last_cache->ptid, null_ptid))
631 {
632 printf_filtered (_("No data cache available.\n"));
633 return;
634 }
635
636 printf_filtered (_("Contains data for %s\n"),
637 target_pid_to_str (last_cache->ptid));
638
639 refcount = 0;
640
641 n = splay_tree_min (last_cache->tree);
642 i = 0;
643
644 while (n)
645 {
646 struct dcache_block *db = (struct dcache_block *) n->value;
647
648 printf_filtered (_("Line %d: address %s [%d hits]\n"),
649 i, paddress (target_gdbarch (), db->addr), db->refs);
650 i++;
651 refcount += db->refs;
652
653 n = splay_tree_successor (last_cache->tree, n->key);
654 }
655
656 printf_filtered (_("Cache state: %d active lines, %d hits\n"), i, refcount);
657 }
658
659 static void
660 set_dcache_size (char *args, int from_tty,
661 struct cmd_list_element *c)
662 {
663 if (dcache_size == 0)
664 {
665 dcache_size = DCACHE_DEFAULT_SIZE;
666 error (_("Dcache size must be greater than 0."));
667 }
668 if (last_cache)
669 dcache_invalidate (last_cache);
670 }
671
672 static void
673 set_dcache_line_size (char *args, int from_tty,
674 struct cmd_list_element *c)
675 {
676 if (dcache_line_size < 2
677 || (dcache_line_size & (dcache_line_size - 1)) != 0)
678 {
679 unsigned d = dcache_line_size;
680 dcache_line_size = DCACHE_DEFAULT_LINE_SIZE;
681 error (_("Invalid dcache line size: %u (must be power of 2)."), d);
682 }
683 if (last_cache)
684 dcache_invalidate (last_cache);
685 }
686
687 static void
688 set_dcache_command (char *arg, int from_tty)
689 {
690 printf_unfiltered (
691 "\"set dcache\" must be followed by the name of a subcommand.\n");
692 help_list (dcache_set_list, "set dcache ", -1, gdb_stdout);
693 }
694
695 static void
696 show_dcache_command (char *args, int from_tty)
697 {
698 cmd_show_list (dcache_show_list, from_tty, "");
699 }
700
701 void
702 _initialize_dcache (void)
703 {
704 add_setshow_boolean_cmd ("remotecache", class_support,
705 &dcache_enabled_p, _("\
706 Set cache use for remote targets."), _("\
707 Show cache use for remote targets."), _("\
708 This used to enable the data cache for remote targets. The cache\n\
709 functionality is now controlled by the memory region system and the\n\
710 \"stack-cache\" flag; \"remotecache\" now does nothing and\n\
711 exists only for compatibility reasons."),
712 NULL,
713 show_dcache_enabled_p,
714 &setlist, &showlist);
715
716 add_info ("dcache", dcache_info,
717 _("\
718 Print information on the dcache performance.\n\
719 With no arguments, this command prints the cache configuration and a\n\
720 summary of each line in the cache. Use \"info dcache <lineno> to dump\"\n\
721 the contents of a given line."));
722
723 add_prefix_cmd ("dcache", class_obscure, set_dcache_command, _("\
724 Use this command to set number of lines in dcache and line-size."),
725 &dcache_set_list, "set dcache ", /*allow_unknown*/0, &setlist);
726 add_prefix_cmd ("dcache", class_obscure, show_dcache_command, _("\
727 Show dcachesettings."),
728 &dcache_show_list, "show dcache ", /*allow_unknown*/0, &showlist);
729
730 add_setshow_zuinteger_cmd ("line-size", class_obscure,
731 &dcache_line_size, _("\
732 Set dcache line size in bytes (must be power of 2)."), _("\
733 Show dcache line size."),
734 NULL,
735 set_dcache_line_size,
736 NULL,
737 &dcache_set_list, &dcache_show_list);
738 add_setshow_zuinteger_cmd ("size", class_obscure,
739 &dcache_size, _("\
740 Set number of dcache lines."), _("\
741 Show number of dcache lines."),
742 NULL,
743 set_dcache_size,
744 NULL,
745 &dcache_set_list, &dcache_show_list);
746 }
This page took 0.045705 seconds and 5 git commands to generate.