* gdbarch.sh (target_gdbarch): Remove macro.
[deliverable/binutils-gdb.git] / gdb / dcache.c
1 /* Caching code for GDB, the GNU debugger.
2
3 Copyright (C) 1992-1993, 1995-1996, 1998-2001, 2003, 2007-2012 Free
4 Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "dcache.h"
23 #include "gdbcmd.h"
24 #include "gdb_string.h"
25 #include "gdbcore.h"
26 #include "target.h"
27 #include "inferior.h"
28 #include "splay-tree.h"
29
30 /* Commands with a prefix of `{set,show} dcache'. */
31 static struct cmd_list_element *dcache_set_list = NULL;
32 static struct cmd_list_element *dcache_show_list = NULL;
33
34 /* The data cache could lead to incorrect results because it doesn't
35 know about volatile variables, thus making it impossible to debug
36 functions which use memory mapped I/O devices. Set the nocache
37 memory region attribute in those cases.
38
39 In general the dcache speeds up performance. Some speed improvement
40 comes from the actual caching mechanism, but the major gain is in
41 the reduction of the remote protocol overhead; instead of reading
42 or writing a large area of memory in 4 byte requests, the cache
43 bundles up the requests into LINE_SIZE chunks, reducing overhead
44 significantly. This is most useful when accessing a large amount
45 of data, such as when performing a backtrace.
46
47 The cache is a splay tree along with a linked list for replacement.
48 Each block caches a LINE_SIZE area of memory. Within each line we
49 remember the address of the line (which must be a multiple of
50 LINE_SIZE) and the actual data block.
51
52 Lines are only allocated as needed, so DCACHE_SIZE really specifies the
53 *maximum* number of lines in the cache.
54
55 At present, the cache is write-through rather than writeback: as soon
56 as data is written to the cache, it is also immediately written to
57 the target. Therefore, cache lines are never "dirty". Whether a given
58 line is valid or not depends on where it is stored in the dcache_struct;
59 there is no per-block valid flag. */
60
61 /* NOTE: Interaction of dcache and memory region attributes
62
63 As there is no requirement that memory region attributes be aligned
64 to or be a multiple of the dcache page size, dcache_read_line() and
65 dcache_write_line() must break up the page by memory region. If a
66 chunk does not have the cache attribute set, an invalid memory type
67 is set, etc., then the chunk is skipped. Those chunks are handled
68 in target_xfer_memory() (or target_xfer_memory_partial()).
69
70 This doesn't occur very often. The most common occurance is when
71 the last bit of the .text segment and the first bit of the .data
72 segment fall within the same dcache page with a ro/cacheable memory
73 region defined for the .text segment and a rw/non-cacheable memory
74 region defined for the .data segment. */
75
76 /* The maximum number of lines stored. The total size of the cache is
77 equal to DCACHE_SIZE times LINE_SIZE. */
78 #define DCACHE_DEFAULT_SIZE 4096
79 static unsigned dcache_size = DCACHE_DEFAULT_SIZE;
80
81 /* The default size of a cache line. Smaller values reduce the time taken to
82 read a single byte and make the cache more granular, but increase
83 overhead and reduce the effectiveness of the cache as a prefetcher. */
84 #define DCACHE_DEFAULT_LINE_SIZE 64
85 static unsigned dcache_line_size = DCACHE_DEFAULT_LINE_SIZE;
86
87 /* Each cache block holds LINE_SIZE bytes of data
88 starting at a multiple-of-LINE_SIZE address. */
89
90 #define LINE_SIZE_MASK(dcache) ((dcache->line_size - 1))
91 #define XFORM(dcache, x) ((x) & LINE_SIZE_MASK (dcache))
92 #define MASK(dcache, x) ((x) & ~LINE_SIZE_MASK (dcache))
93
94 struct dcache_block
95 {
96 /* For least-recently-allocated and free lists. */
97 struct dcache_block *prev;
98 struct dcache_block *next;
99
100 CORE_ADDR addr; /* address of data */
101 int refs; /* # hits */
102 gdb_byte data[1]; /* line_size bytes at given address */
103 };
104
105 struct dcache_struct
106 {
107 splay_tree tree;
108 struct dcache_block *oldest; /* least-recently-allocated list. */
109
110 /* The free list is maintained identically to OLDEST to simplify
111 the code: we only need one set of accessors. */
112 struct dcache_block *freelist;
113
114 /* The number of in-use lines in the cache. */
115 int size;
116 CORE_ADDR line_size; /* current line_size. */
117
118 /* The ptid of last inferior to use cache or null_ptid. */
119 ptid_t ptid;
120 };
121
122 typedef void (block_func) (struct dcache_block *block, void *param);
123
124 static struct dcache_block *dcache_hit (DCACHE *dcache, CORE_ADDR addr);
125
126 static int dcache_read_line (DCACHE *dcache, struct dcache_block *db);
127
128 static struct dcache_block *dcache_alloc (DCACHE *dcache, CORE_ADDR addr);
129
130 static void dcache_info (char *exp, int tty);
131
132 void _initialize_dcache (void);
133
134 static int dcache_enabled_p = 0; /* OBSOLETE */
135
136 static void
137 show_dcache_enabled_p (struct ui_file *file, int from_tty,
138 struct cmd_list_element *c, const char *value)
139 {
140 fprintf_filtered (file, _("Deprecated remotecache flag is %s.\n"), value);
141 }
142
143 static DCACHE *last_cache; /* Used by info dcache. */
144
145 /* Add BLOCK to circular block list BLIST, behind the block at *BLIST.
146 *BLIST is not updated (unless it was previously NULL of course).
147 This is for the least-recently-allocated list's sake:
148 BLIST points to the oldest block.
149 ??? This makes for poor cache usage of the free list,
150 but is it measurable? */
151
152 static void
153 append_block (struct dcache_block **blist, struct dcache_block *block)
154 {
155 if (*blist)
156 {
157 block->next = *blist;
158 block->prev = (*blist)->prev;
159 block->prev->next = block;
160 (*blist)->prev = block;
161 /* We don't update *BLIST here to maintain the invariant that for the
162 least-recently-allocated list *BLIST points to the oldest block. */
163 }
164 else
165 {
166 block->next = block;
167 block->prev = block;
168 *blist = block;
169 }
170 }
171
172 /* Remove BLOCK from circular block list BLIST. */
173
174 static void
175 remove_block (struct dcache_block **blist, struct dcache_block *block)
176 {
177 if (block->next == block)
178 {
179 *blist = NULL;
180 }
181 else
182 {
183 block->next->prev = block->prev;
184 block->prev->next = block->next;
185 /* If we removed the block *BLIST points to, shift it to the next block
186 to maintain the invariant that for the least-recently-allocated list
187 *BLIST points to the oldest block. */
188 if (*blist == block)
189 *blist = block->next;
190 }
191 }
192
193 /* Iterate over all elements in BLIST, calling FUNC.
194 PARAM is passed to FUNC.
195 FUNC may remove the block it's passed, but only that block. */
196
197 static void
198 for_each_block (struct dcache_block **blist, block_func *func, void *param)
199 {
200 struct dcache_block *db;
201
202 if (*blist == NULL)
203 return;
204
205 db = *blist;
206 do
207 {
208 struct dcache_block *next = db->next;
209
210 func (db, param);
211 db = next;
212 }
213 while (*blist && db != *blist);
214 }
215
216 /* BLOCK_FUNC routine for dcache_free. */
217
218 static void
219 free_block (struct dcache_block *block, void *param)
220 {
221 xfree (block);
222 }
223
224 /* Free a data cache. */
225
226 void
227 dcache_free (DCACHE *dcache)
228 {
229 if (last_cache == dcache)
230 last_cache = NULL;
231
232 splay_tree_delete (dcache->tree);
233 for_each_block (&dcache->oldest, free_block, NULL);
234 for_each_block (&dcache->freelist, free_block, NULL);
235 xfree (dcache);
236 }
237
238
239 /* BLOCK_FUNC function for dcache_invalidate.
240 This doesn't remove the block from the oldest list on purpose.
241 dcache_invalidate will do it later. */
242
243 static void
244 invalidate_block (struct dcache_block *block, void *param)
245 {
246 DCACHE *dcache = (DCACHE *) param;
247
248 splay_tree_remove (dcache->tree, (splay_tree_key) block->addr);
249 append_block (&dcache->freelist, block);
250 }
251
252 /* Free all the data cache blocks, thus discarding all cached data. */
253
254 void
255 dcache_invalidate (DCACHE *dcache)
256 {
257 for_each_block (&dcache->oldest, invalidate_block, dcache);
258
259 dcache->oldest = NULL;
260 dcache->size = 0;
261 dcache->ptid = null_ptid;
262
263 if (dcache->line_size != dcache_line_size)
264 {
265 /* We've been asked to use a different line size.
266 All of our freelist blocks are now the wrong size, so free them. */
267
268 for_each_block (&dcache->freelist, free_block, dcache);
269 dcache->freelist = NULL;
270 dcache->line_size = dcache_line_size;
271 }
272 }
273
274 /* Invalidate the line associated with ADDR. */
275
276 static void
277 dcache_invalidate_line (DCACHE *dcache, CORE_ADDR addr)
278 {
279 struct dcache_block *db = dcache_hit (dcache, addr);
280
281 if (db)
282 {
283 splay_tree_remove (dcache->tree, (splay_tree_key) db->addr);
284 remove_block (&dcache->oldest, db);
285 append_block (&dcache->freelist, db);
286 --dcache->size;
287 }
288 }
289
290 /* If addr is present in the dcache, return the address of the block
291 containing it. Otherwise return NULL. */
292
293 static struct dcache_block *
294 dcache_hit (DCACHE *dcache, CORE_ADDR addr)
295 {
296 struct dcache_block *db;
297
298 splay_tree_node node = splay_tree_lookup (dcache->tree,
299 (splay_tree_key) MASK (dcache, addr));
300
301 if (!node)
302 return NULL;
303
304 db = (struct dcache_block *) node->value;
305 db->refs++;
306 return db;
307 }
308
309 /* Fill a cache line from target memory.
310 The result is 1 for success, 0 if the (entire) cache line
311 wasn't readable. */
312
313 static int
314 dcache_read_line (DCACHE *dcache, struct dcache_block *db)
315 {
316 CORE_ADDR memaddr;
317 gdb_byte *myaddr;
318 int len;
319 int res;
320 int reg_len;
321 struct mem_region *region;
322
323 len = dcache->line_size;
324 memaddr = db->addr;
325 myaddr = db->data;
326
327 while (len > 0)
328 {
329 /* Don't overrun if this block is right at the end of the region. */
330 region = lookup_mem_region (memaddr);
331 if (region->hi == 0 || memaddr + len < region->hi)
332 reg_len = len;
333 else
334 reg_len = region->hi - memaddr;
335
336 /* Skip non-readable regions. The cache attribute can be ignored,
337 since we may be loading this for a stack access. */
338 if (region->attrib.mode == MEM_WO)
339 {
340 memaddr += reg_len;
341 myaddr += reg_len;
342 len -= reg_len;
343 continue;
344 }
345
346 res = target_read (&current_target, TARGET_OBJECT_RAW_MEMORY,
347 NULL, myaddr, memaddr, reg_len);
348 if (res < reg_len)
349 return 0;
350
351 memaddr += res;
352 myaddr += res;
353 len -= res;
354 }
355
356 return 1;
357 }
358
359 /* Get a free cache block, put or keep it on the valid list,
360 and return its address. */
361
362 static struct dcache_block *
363 dcache_alloc (DCACHE *dcache, CORE_ADDR addr)
364 {
365 struct dcache_block *db;
366
367 if (dcache->size >= dcache_size)
368 {
369 /* Evict the least recently allocated line. */
370 db = dcache->oldest;
371 remove_block (&dcache->oldest, db);
372
373 splay_tree_remove (dcache->tree, (splay_tree_key) db->addr);
374 }
375 else
376 {
377 db = dcache->freelist;
378 if (db)
379 remove_block (&dcache->freelist, db);
380 else
381 db = xmalloc (offsetof (struct dcache_block, data) +
382 dcache->line_size);
383
384 dcache->size++;
385 }
386
387 db->addr = MASK (dcache, addr);
388 db->refs = 0;
389
390 /* Put DB at the end of the list, it's the newest. */
391 append_block (&dcache->oldest, db);
392
393 splay_tree_insert (dcache->tree, (splay_tree_key) db->addr,
394 (splay_tree_value) db);
395
396 return db;
397 }
398
399 /* Using the data cache DCACHE, store in *PTR the contents of the byte at
400 address ADDR in the remote machine.
401
402 Returns 1 for success, 0 for error. */
403
404 static int
405 dcache_peek_byte (DCACHE *dcache, CORE_ADDR addr, gdb_byte *ptr)
406 {
407 struct dcache_block *db = dcache_hit (dcache, addr);
408
409 if (!db)
410 {
411 db = dcache_alloc (dcache, addr);
412
413 if (!dcache_read_line (dcache, db))
414 return 0;
415 }
416
417 *ptr = db->data[XFORM (dcache, addr)];
418 return 1;
419 }
420
421 /* Write the byte at PTR into ADDR in the data cache.
422
423 The caller is responsible for also promptly writing the data
424 through to target memory.
425
426 If addr is not in cache, this function does nothing; writing to
427 an area of memory which wasn't present in the cache doesn't cause
428 it to be loaded in.
429
430 Always return 1 (meaning success) to simplify dcache_xfer_memory. */
431
432 static int
433 dcache_poke_byte (DCACHE *dcache, CORE_ADDR addr, gdb_byte *ptr)
434 {
435 struct dcache_block *db = dcache_hit (dcache, addr);
436
437 if (db)
438 db->data[XFORM (dcache, addr)] = *ptr;
439
440 return 1;
441 }
442
443 static int
444 dcache_splay_tree_compare (splay_tree_key a, splay_tree_key b)
445 {
446 if (a > b)
447 return 1;
448 else if (a == b)
449 return 0;
450 else
451 return -1;
452 }
453
454 /* Allocate and initialize a data cache. */
455
456 DCACHE *
457 dcache_init (void)
458 {
459 DCACHE *dcache;
460
461 dcache = (DCACHE *) xmalloc (sizeof (*dcache));
462
463 dcache->tree = splay_tree_new (dcache_splay_tree_compare,
464 NULL,
465 NULL);
466
467 dcache->oldest = NULL;
468 dcache->freelist = NULL;
469 dcache->size = 0;
470 dcache->line_size = dcache_line_size;
471 dcache->ptid = null_ptid;
472 last_cache = dcache;
473
474 return dcache;
475 }
476
477
478 /* Read or write LEN bytes from inferior memory at MEMADDR, transferring
479 to or from debugger address MYADDR. Write to inferior if SHOULD_WRITE is
480 nonzero.
481
482 Return the number of bytes actually transfered, or -1 if the
483 transfer is not supported or otherwise fails. Return of a non-negative
484 value less than LEN indicates that no further transfer is possible.
485 NOTE: This is different than the to_xfer_partial interface, in which
486 positive values less than LEN mean further transfers may be possible. */
487
488 int
489 dcache_xfer_memory (struct target_ops *ops, DCACHE *dcache,
490 CORE_ADDR memaddr, gdb_byte *myaddr,
491 int len, int should_write)
492 {
493 int i;
494 int res;
495 int (*xfunc) (DCACHE *dcache, CORE_ADDR addr, gdb_byte *ptr);
496
497 xfunc = should_write ? dcache_poke_byte : dcache_peek_byte;
498
499 /* If this is a different inferior from what we've recorded,
500 flush the cache. */
501
502 if (! ptid_equal (inferior_ptid, dcache->ptid))
503 {
504 dcache_invalidate (dcache);
505 dcache->ptid = inferior_ptid;
506 }
507
508 /* Do write-through first, so that if it fails, we don't write to
509 the cache at all. */
510
511 if (should_write)
512 {
513 res = target_write (ops, TARGET_OBJECT_RAW_MEMORY,
514 NULL, myaddr, memaddr, len);
515 if (res <= 0)
516 return res;
517 /* Update LEN to what was actually written. */
518 len = res;
519 }
520
521 for (i = 0; i < len; i++)
522 {
523 if (!xfunc (dcache, memaddr + i, myaddr + i))
524 {
525 /* That failed. Discard its cache line so we don't have a
526 partially read line. */
527 dcache_invalidate_line (dcache, memaddr + i);
528 /* If we're writing, we still wrote LEN bytes. */
529 if (should_write)
530 return len;
531 else
532 return i;
533 }
534 }
535
536 return len;
537 }
538
539 /* FIXME: There would be some benefit to making the cache write-back and
540 moving the writeback operation to a higher layer, as it could occur
541 after a sequence of smaller writes have been completed (as when a stack
542 frame is constructed for an inferior function call). Note that only
543 moving it up one level to target_xfer_memory[_partial]() is not
544 sufficient since we want to coalesce memory transfers that are
545 "logically" connected but not actually a single call to one of the
546 memory transfer functions. */
547
548 /* Just update any cache lines which are already present. This is called
549 by memory_xfer_partial in cases where the access would otherwise not go
550 through the cache. */
551
552 void
553 dcache_update (DCACHE *dcache, CORE_ADDR memaddr, gdb_byte *myaddr, int len)
554 {
555 int i;
556
557 for (i = 0; i < len; i++)
558 dcache_poke_byte (dcache, memaddr + i, myaddr + i);
559 }
560
561 static void
562 dcache_print_line (int index)
563 {
564 splay_tree_node n;
565 struct dcache_block *db;
566 int i, j;
567
568 if (!last_cache)
569 {
570 printf_filtered (_("No data cache available.\n"));
571 return;
572 }
573
574 n = splay_tree_min (last_cache->tree);
575
576 for (i = index; i > 0; --i)
577 {
578 if (!n)
579 break;
580 n = splay_tree_successor (last_cache->tree, n->key);
581 }
582
583 if (!n)
584 {
585 printf_filtered (_("No such cache line exists.\n"));
586 return;
587 }
588
589 db = (struct dcache_block *) n->value;
590
591 printf_filtered (_("Line %d: address %s [%d hits]\n"),
592 index, paddress (target_gdbarch (), db->addr), db->refs);
593
594 for (j = 0; j < last_cache->line_size; j++)
595 {
596 printf_filtered ("%02x ", db->data[j]);
597
598 /* Print a newline every 16 bytes (48 characters). */
599 if ((j % 16 == 15) && (j != last_cache->line_size - 1))
600 printf_filtered ("\n");
601 }
602 printf_filtered ("\n");
603 }
604
605 static void
606 dcache_info (char *exp, int tty)
607 {
608 splay_tree_node n;
609 int i, refcount;
610
611 if (exp)
612 {
613 char *linestart;
614
615 i = strtol (exp, &linestart, 10);
616 if (linestart == exp || i < 0)
617 {
618 printf_filtered (_("Usage: info dcache [linenumber]\n"));
619 return;
620 }
621
622 dcache_print_line (i);
623 return;
624 }
625
626 printf_filtered (_("Dcache %u lines of %u bytes each.\n"),
627 dcache_size,
628 last_cache ? (unsigned) last_cache->line_size
629 : dcache_line_size);
630
631 if (!last_cache || ptid_equal (last_cache->ptid, null_ptid))
632 {
633 printf_filtered (_("No data cache available.\n"));
634 return;
635 }
636
637 printf_filtered (_("Contains data for %s\n"),
638 target_pid_to_str (last_cache->ptid));
639
640 refcount = 0;
641
642 n = splay_tree_min (last_cache->tree);
643 i = 0;
644
645 while (n)
646 {
647 struct dcache_block *db = (struct dcache_block *) n->value;
648
649 printf_filtered (_("Line %d: address %s [%d hits]\n"),
650 i, paddress (target_gdbarch (), db->addr), db->refs);
651 i++;
652 refcount += db->refs;
653
654 n = splay_tree_successor (last_cache->tree, n->key);
655 }
656
657 printf_filtered (_("Cache state: %d active lines, %d hits\n"), i, refcount);
658 }
659
660 static void
661 set_dcache_size (char *args, int from_tty,
662 struct cmd_list_element *c)
663 {
664 if (dcache_size == 0)
665 {
666 dcache_size = DCACHE_DEFAULT_SIZE;
667 error (_("Dcache size must be greater than 0."));
668 }
669 if (last_cache)
670 dcache_invalidate (last_cache);
671 }
672
673 static void
674 set_dcache_line_size (char *args, int from_tty,
675 struct cmd_list_element *c)
676 {
677 if (dcache_line_size < 2
678 || (dcache_line_size & (dcache_line_size - 1)) != 0)
679 {
680 unsigned d = dcache_line_size;
681 dcache_line_size = DCACHE_DEFAULT_LINE_SIZE;
682 error (_("Invalid dcache line size: %u (must be power of 2)."), d);
683 }
684 if (last_cache)
685 dcache_invalidate (last_cache);
686 }
687
688 static void
689 set_dcache_command (char *arg, int from_tty)
690 {
691 printf_unfiltered (
692 "\"set dcache\" must be followed by the name of a subcommand.\n");
693 help_list (dcache_set_list, "set dcache ", -1, gdb_stdout);
694 }
695
696 static void
697 show_dcache_command (char *args, int from_tty)
698 {
699 cmd_show_list (dcache_show_list, from_tty, "");
700 }
701
702 void
703 _initialize_dcache (void)
704 {
705 add_setshow_boolean_cmd ("remotecache", class_support,
706 &dcache_enabled_p, _("\
707 Set cache use for remote targets."), _("\
708 Show cache use for remote targets."), _("\
709 This used to enable the data cache for remote targets. The cache\n\
710 functionality is now controlled by the memory region system and the\n\
711 \"stack-cache\" flag; \"remotecache\" now does nothing and\n\
712 exists only for compatibility reasons."),
713 NULL,
714 show_dcache_enabled_p,
715 &setlist, &showlist);
716
717 add_info ("dcache", dcache_info,
718 _("\
719 Print information on the dcache performance.\n\
720 With no arguments, this command prints the cache configuration and a\n\
721 summary of each line in the cache. Use \"info dcache <lineno> to dump\"\n\
722 the contents of a given line."));
723
724 add_prefix_cmd ("dcache", class_obscure, set_dcache_command, _("\
725 Use this command to set number of lines in dcache and line-size."),
726 &dcache_set_list, "set dcache ", /*allow_unknown*/0, &setlist);
727 add_prefix_cmd ("dcache", class_obscure, show_dcache_command, _("\
728 Show dcachesettings."),
729 &dcache_show_list, "show dcache ", /*allow_unknown*/0, &showlist);
730
731 add_setshow_uinteger_cmd ("line-size", class_obscure,
732 &dcache_line_size, _("\
733 Set dcache line size in bytes (must be power of 2)."), _("\
734 Show dcache line size."),
735 NULL,
736 set_dcache_line_size,
737 NULL,
738 &dcache_set_list, &dcache_show_list);
739 add_setshow_uinteger_cmd ("size", class_obscure,
740 &dcache_size, _("\
741 Set number of dcache lines."), _("\
742 Show number of dcache lines."),
743 NULL,
744 set_dcache_size,
745 NULL,
746 &dcache_set_list, &dcache_show_list);
747 }
This page took 0.054081 seconds and 5 git commands to generate.