Commit | Line | Data |
---|---|---|
69517000 AC |
1 | /* Caching code for GDB, the GNU debugger. |
2 | ||
9b254dd1 | 3 | Copyright (C) 1992, 1993, 1995, 1996, 1998, 1999, 2000, 2001, 2003, 2007, |
4c38e0a4 | 4 | 2008, 2009, 2010 Free Software Foundation, Inc. |
c906108c SS |
5 | |
6 | This file is part of GDB. | |
7 | ||
8 | This program is free software; you can redistribute it and/or modify | |
9 | it under the terms of the GNU General Public License as published by | |
a9762ec7 | 10 | the Free Software Foundation; either version 3 of the License, or |
c906108c SS |
11 | (at your option) any later version. |
12 | ||
13 | This program is distributed in the hope that it will be useful, | |
14 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | GNU General Public License for more details. | |
17 | ||
18 | You should have received a copy of the GNU General Public License | |
a9762ec7 | 19 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ |
c906108c SS |
20 | |
21 | #include "defs.h" | |
22 | #include "dcache.h" | |
23 | #include "gdbcmd.h" | |
24 | #include "gdb_string.h" | |
25 | #include "gdbcore.h" | |
4930751a | 26 | #include "target.h" |
4e5d721f | 27 | #include "inferior.h" |
25f122dc | 28 | #include "splay-tree.h" |
c906108c | 29 | |
29e57380 C |
30 | /* The data cache could lead to incorrect results because it doesn't |
31 | know about volatile variables, thus making it impossible to debug | |
32 | functions which use memory mapped I/O devices. Set the nocache | |
33 | memory region attribute in those cases. | |
c906108c | 34 | |
25f122dc | 35 | In general the dcache speeds up performance. Some speed improvement |
c906108c SS |
36 | comes from the actual caching mechanism, but the major gain is in |
37 | the reduction of the remote protocol overhead; instead of reading | |
38 | or writing a large area of memory in 4 byte requests, the cache | |
25f122dc DE |
39 | bundles up the requests into LINE_SIZE chunks, reducing overhead |
40 | significantly. This is most useful when accessing a large amount | |
41 | of data, such as when performing a backtrace. | |
42 | ||
43 | The cache is a splay tree along with a linked list for replacement. | |
e124be18 MS |
44 | Each block caches a LINE_SIZE area of memory. Within each line we |
45 | remember the address of the line (which must be a multiple of | |
46 | LINE_SIZE) and the actual data block. | |
25f122dc DE |
47 | |
48 | Lines are only allocated as needed, so DCACHE_SIZE really specifies the | |
49 | *maximum* number of lines in the cache. | |
50 | ||
51 | At present, the cache is write-through rather than writeback: as soon | |
52 | as data is written to the cache, it is also immediately written to | |
53 | the target. Therefore, cache lines are never "dirty". Whether a given | |
54 | line is valid or not depends on where it is stored in the dcache_struct; | |
55 | there is no per-block valid flag. */ | |
c906108c | 56 | |
29e57380 | 57 | /* NOTE: Interaction of dcache and memory region attributes |
c906108c | 58 | |
29e57380 C |
59 | As there is no requirement that memory region attributes be aligned |
60 | to or be a multiple of the dcache page size, dcache_read_line() and | |
61 | dcache_write_line() must break up the page by memory region. If a | |
62 | chunk does not have the cache attribute set, an invalid memory type | |
63 | is set, etc., then the chunk is skipped. Those chunks are handled | |
64 | in target_xfer_memory() (or target_xfer_memory_partial()). | |
c906108c | 65 | |
29e57380 C |
66 | This doesn't occur very often. The most common occurance is when |
67 | the last bit of the .text segment and the first bit of the .data | |
68 | segment fall within the same dcache page with a ro/cacheable memory | |
69 | region defined for the .text segment and a rw/non-cacheable memory | |
25f122dc | 70 | region defined for the .data segment. */ |
c906108c | 71 | |
25f122dc DE |
72 | /* The maximum number of lines stored. The total size of the cache is |
73 | equal to DCACHE_SIZE times LINE_SIZE. */ | |
74 | #define DCACHE_SIZE 4096 | |
c906108c | 75 | |
25f122dc DE |
76 | /* The size of a cache line. Smaller values reduce the time taken to |
77 | read a single byte and make the cache more granular, but increase | |
78 | overhead and reduce the effectiveness of the cache as a prefetcher. */ | |
79 | #define LINE_SIZE_POWER 6 | |
c906108c SS |
80 | #define LINE_SIZE (1 << LINE_SIZE_POWER) |
81 | ||
82 | /* Each cache block holds LINE_SIZE bytes of data | |
83 | starting at a multiple-of-LINE_SIZE address. */ | |
84 | ||
c5aa993b | 85 | #define LINE_SIZE_MASK ((LINE_SIZE - 1)) |
c906108c SS |
86 | #define XFORM(x) ((x) & LINE_SIZE_MASK) |
87 | #define MASK(x) ((x) & ~LINE_SIZE_MASK) | |
88 | ||
c906108c | 89 | struct dcache_block |
25f122dc | 90 | { |
6ffb2242 DE |
91 | /* for least-recently-allocated and free lists */ |
92 | struct dcache_block *prev; | |
93 | struct dcache_block *next; | |
94 | ||
25f122dc DE |
95 | CORE_ADDR addr; /* address of data */ |
96 | gdb_byte data[LINE_SIZE]; /* bytes at given address */ | |
97 | int refs; /* # hits */ | |
98 | }; | |
29e57380 | 99 | |
c5aa993b | 100 | struct dcache_struct |
25f122dc DE |
101 | { |
102 | splay_tree tree; | |
6ffb2242 | 103 | struct dcache_block *oldest; /* least-recently-allocated list */ |
c906108c | 104 | |
6ffb2242 DE |
105 | /* The free list is maintained identically to OLDEST to simplify |
106 | the code: we only need one set of accessors. */ | |
25f122dc | 107 | struct dcache_block *freelist; |
c906108c | 108 | |
25f122dc DE |
109 | /* The number of in-use lines in the cache. */ |
110 | int size; | |
4e5d721f DE |
111 | |
112 | /* The ptid of last inferior to use cache or null_ptid. */ | |
113 | ptid_t ptid; | |
25f122dc | 114 | }; |
c906108c | 115 | |
6ffb2242 DE |
116 | typedef void (block_func) (struct dcache_block *block, void *param); |
117 | ||
8edbea78 | 118 | static struct dcache_block *dcache_hit (DCACHE *dcache, CORE_ADDR addr); |
c906108c | 119 | |
8edbea78 | 120 | static int dcache_read_line (DCACHE *dcache, struct dcache_block *db); |
c906108c | 121 | |
8edbea78 C |
122 | static struct dcache_block *dcache_alloc (DCACHE *dcache, CORE_ADDR addr); |
123 | ||
a14ed312 | 124 | static void dcache_info (char *exp, int tty); |
c906108c | 125 | |
a14ed312 | 126 | void _initialize_dcache (void); |
c906108c | 127 | |
4e5d721f | 128 | static int dcache_enabled_p = 0; /* OBSOLETE */ |
07128da0 | 129 | |
920d2a44 AC |
130 | static void |
131 | show_dcache_enabled_p (struct ui_file *file, int from_tty, | |
132 | struct cmd_list_element *c, const char *value) | |
133 | { | |
4e5d721f | 134 | fprintf_filtered (file, _("Deprecated remotecache flag is %s.\n"), value); |
920d2a44 AC |
135 | } |
136 | ||
25f122dc | 137 | static DCACHE *last_cache; /* Used by info dcache */ |
c906108c | 138 | |
6ffb2242 DE |
139 | /* Add BLOCK to circular block list BLIST, behind the block at *BLIST. |
140 | *BLIST is not updated (unless it was previously NULL of course). | |
141 | This is for the least-recently-allocated list's sake: | |
142 | BLIST points to the oldest block. | |
143 | ??? This makes for poor cache usage of the free list, | |
144 | but is it measurable? */ | |
c906108c | 145 | |
6ffb2242 DE |
146 | static void |
147 | append_block (struct dcache_block **blist, struct dcache_block *block) | |
c906108c | 148 | { |
6ffb2242 DE |
149 | if (*blist) |
150 | { | |
151 | block->next = *blist; | |
152 | block->prev = (*blist)->prev; | |
153 | block->prev->next = block; | |
154 | (*blist)->prev = block; | |
155 | /* We don't update *BLIST here to maintain the invariant that for the | |
156 | least-recently-allocated list *BLIST points to the oldest block. */ | |
157 | } | |
158 | else | |
159 | { | |
160 | block->next = block; | |
161 | block->prev = block; | |
162 | *blist = block; | |
163 | } | |
164 | } | |
c906108c | 165 | |
6ffb2242 | 166 | /* Remove BLOCK from circular block list BLIST. */ |
c906108c | 167 | |
6ffb2242 DE |
168 | static void |
169 | remove_block (struct dcache_block **blist, struct dcache_block *block) | |
170 | { | |
171 | if (block->next == block) | |
172 | { | |
173 | *blist = NULL; | |
174 | } | |
175 | else | |
c906108c | 176 | { |
6ffb2242 DE |
177 | block->next->prev = block->prev; |
178 | block->prev->next = block->next; | |
179 | /* If we removed the block *BLIST points to, shift it to the next block | |
180 | to maintain the invariant that for the least-recently-allocated list | |
181 | *BLIST points to the oldest block. */ | |
182 | if (*blist == block) | |
183 | *blist = block->next; | |
184 | } | |
185 | } | |
c906108c | 186 | |
6ffb2242 DE |
187 | /* Iterate over all elements in BLIST, calling FUNC. |
188 | PARAM is passed to FUNC. | |
189 | FUNC may remove the block it's passed, but only that block. */ | |
25f122dc | 190 | |
6ffb2242 DE |
191 | static void |
192 | for_each_block (struct dcache_block **blist, block_func *func, void *param) | |
193 | { | |
194 | struct dcache_block *db; | |
195 | ||
196 | if (*blist == NULL) | |
197 | return; | |
198 | ||
199 | db = *blist; | |
200 | do | |
201 | { | |
202 | struct dcache_block *next = db->next; | |
203 | ||
204 | func (db, param); | |
205 | db = next; | |
c906108c | 206 | } |
6ffb2242 DE |
207 | while (*blist && db != *blist); |
208 | } | |
209 | ||
210 | /* BLOCK_FUNC function for dcache_invalidate. | |
211 | This doesn't remove the block from the oldest list on purpose. | |
212 | dcache_invalidate will do it later. */ | |
213 | ||
214 | static void | |
215 | invalidate_block (struct dcache_block *block, void *param) | |
216 | { | |
217 | DCACHE *dcache = (DCACHE *) param; | |
218 | ||
219 | splay_tree_remove (dcache->tree, (splay_tree_key) block->addr); | |
220 | append_block (&dcache->freelist, block); | |
221 | } | |
222 | ||
223 | /* Free all the data cache blocks, thus discarding all cached data. */ | |
224 | ||
225 | void | |
226 | dcache_invalidate (DCACHE *dcache) | |
227 | { | |
228 | for_each_block (&dcache->oldest, invalidate_block, dcache); | |
c906108c | 229 | |
25f122dc | 230 | dcache->oldest = NULL; |
25f122dc | 231 | dcache->size = 0; |
4e5d721f DE |
232 | dcache->ptid = null_ptid; |
233 | } | |
234 | ||
235 | /* Invalidate the line associated with ADDR. */ | |
236 | ||
237 | static void | |
238 | dcache_invalidate_line (DCACHE *dcache, CORE_ADDR addr) | |
239 | { | |
240 | struct dcache_block *db = dcache_hit (dcache, addr); | |
241 | ||
242 | if (db) | |
243 | { | |
244 | splay_tree_remove (dcache->tree, (splay_tree_key) db->addr); | |
6ffb2242 DE |
245 | remove_block (&dcache->oldest, db); |
246 | append_block (&dcache->freelist, db); | |
4e5d721f DE |
247 | --dcache->size; |
248 | } | |
c906108c SS |
249 | } |
250 | ||
251 | /* If addr is present in the dcache, return the address of the block | |
7f79c47e | 252 | containing it. Otherwise return NULL. */ |
c906108c SS |
253 | |
254 | static struct dcache_block * | |
fba45db2 | 255 | dcache_hit (DCACHE *dcache, CORE_ADDR addr) |
c906108c | 256 | { |
52f0bd74 | 257 | struct dcache_block *db; |
c906108c | 258 | |
25f122dc DE |
259 | splay_tree_node node = splay_tree_lookup (dcache->tree, |
260 | (splay_tree_key) MASK (addr)); | |
c906108c | 261 | |
25f122dc DE |
262 | if (!node) |
263 | return NULL; | |
c906108c | 264 | |
25f122dc DE |
265 | db = (struct dcache_block *) node->value; |
266 | db->refs++; | |
267 | return db; | |
c906108c SS |
268 | } |
269 | ||
7f79c47e DE |
270 | /* Fill a cache line from target memory. |
271 | The result is 1 for success, 0 if the (entire) cache line | |
272 | wasn't readable. */ | |
c906108c | 273 | |
8edbea78 C |
274 | static int |
275 | dcache_read_line (DCACHE *dcache, struct dcache_block *db) | |
276 | { | |
277 | CORE_ADDR memaddr; | |
6c932e54 | 278 | gdb_byte *myaddr; |
8edbea78 C |
279 | int len; |
280 | int res; | |
29e57380 C |
281 | int reg_len; |
282 | struct mem_region *region; | |
8edbea78 | 283 | |
8edbea78 C |
284 | len = LINE_SIZE; |
285 | memaddr = db->addr; | |
286 | myaddr = db->data; | |
287 | ||
288 | while (len > 0) | |
289 | { | |
25f122dc DE |
290 | /* Don't overrun if this block is right at the end of the region. */ |
291 | region = lookup_mem_region (memaddr); | |
292 | if (region->hi == 0 || memaddr + len < region->hi) | |
29e57380 C |
293 | reg_len = len; |
294 | else | |
295 | reg_len = region->hi - memaddr; | |
296 | ||
4e5d721f DE |
297 | /* Skip non-readable regions. The cache attribute can be ignored, |
298 | since we may be loading this for a stack access. */ | |
299 | if (region->attrib.mode == MEM_WO) | |
29e57380 C |
300 | { |
301 | memaddr += reg_len; | |
302 | myaddr += reg_len; | |
303 | len -= reg_len; | |
304 | continue; | |
305 | } | |
306 | ||
cf7a04e8 DJ |
307 | res = target_read (¤t_target, TARGET_OBJECT_RAW_MEMORY, |
308 | NULL, myaddr, memaddr, reg_len); | |
309 | if (res < reg_len) | |
310 | return 0; | |
8edbea78 | 311 | |
cf7a04e8 DJ |
312 | memaddr += res; |
313 | myaddr += res; | |
314 | len -= res; | |
8edbea78 C |
315 | } |
316 | ||
8edbea78 C |
317 | return 1; |
318 | } | |
319 | ||
c906108c | 320 | /* Get a free cache block, put or keep it on the valid list, |
f1d7622b | 321 | and return its address. */ |
c906108c SS |
322 | |
323 | static struct dcache_block * | |
f1d7622b | 324 | dcache_alloc (DCACHE *dcache, CORE_ADDR addr) |
c906108c | 325 | { |
52f0bd74 | 326 | struct dcache_block *db; |
c906108c | 327 | |
25f122dc | 328 | if (dcache->size >= DCACHE_SIZE) |
c906108c | 329 | { |
6ffb2242 | 330 | /* Evict the least recently allocated line. */ |
25f122dc | 331 | db = dcache->oldest; |
6ffb2242 | 332 | remove_block (&dcache->oldest, db); |
25f122dc DE |
333 | |
334 | splay_tree_remove (dcache->tree, (splay_tree_key) db->addr); | |
c906108c SS |
335 | } |
336 | else | |
337 | { | |
25f122dc DE |
338 | db = dcache->freelist; |
339 | if (db) | |
6ffb2242 | 340 | remove_block (&dcache->freelist, db); |
25f122dc DE |
341 | else |
342 | db = xmalloc (sizeof (struct dcache_block)); | |
c906108c | 343 | |
25f122dc | 344 | dcache->size++; |
c906108c SS |
345 | } |
346 | ||
25f122dc | 347 | db->addr = MASK (addr); |
f1d7622b | 348 | db->refs = 0; |
f1d7622b | 349 | |
6ffb2242 DE |
350 | /* Put DB at the end of the list, it's the newest. */ |
351 | append_block (&dcache->oldest, db); | |
c906108c | 352 | |
25f122dc DE |
353 | splay_tree_insert (dcache->tree, (splay_tree_key) db->addr, |
354 | (splay_tree_value) db); | |
c906108c | 355 | |
25f122dc | 356 | return db; |
c906108c SS |
357 | } |
358 | ||
7f79c47e | 359 | /* Using the data cache DCACHE, store in *PTR the contents of the byte at |
8edbea78 C |
360 | address ADDR in the remote machine. |
361 | ||
25f122dc | 362 | Returns 1 for success, 0 for error. */ |
8edbea78 C |
363 | |
364 | static int | |
6c932e54 | 365 | dcache_peek_byte (DCACHE *dcache, CORE_ADDR addr, gdb_byte *ptr) |
8edbea78 | 366 | { |
52f0bd74 | 367 | struct dcache_block *db = dcache_hit (dcache, addr); |
8edbea78 C |
368 | |
369 | if (!db) | |
370 | { | |
371 | db = dcache_alloc (dcache, addr); | |
25f122dc DE |
372 | |
373 | if (!dcache_read_line (dcache, db)) | |
8edbea78 C |
374 | return 0; |
375 | } | |
376 | ||
377 | *ptr = db->data[XFORM (addr)]; | |
378 | return 1; | |
379 | } | |
380 | ||
c906108c | 381 | /* Write the byte at PTR into ADDR in the data cache. |
25f122dc DE |
382 | |
383 | The caller is responsible for also promptly writing the data | |
384 | through to target memory. | |
385 | ||
386 | If addr is not in cache, this function does nothing; writing to | |
387 | an area of memory which wasn't present in the cache doesn't cause | |
388 | it to be loaded in. | |
389 | ||
4e5d721f | 390 | Always return 1 (meaning success) to simplify dcache_xfer_memory. */ |
c906108c SS |
391 | |
392 | static int | |
6c932e54 | 393 | dcache_poke_byte (DCACHE *dcache, CORE_ADDR addr, gdb_byte *ptr) |
c906108c | 394 | { |
52f0bd74 | 395 | struct dcache_block *db = dcache_hit (dcache, addr); |
c906108c | 396 | |
25f122dc DE |
397 | if (db) |
398 | db->data[XFORM (addr)] = *ptr; | |
c906108c | 399 | |
c906108c SS |
400 | return 1; |
401 | } | |
402 | ||
25f122dc DE |
403 | static int |
404 | dcache_splay_tree_compare (splay_tree_key a, splay_tree_key b) | |
405 | { | |
406 | if (a > b) | |
407 | return 1; | |
408 | else if (a == b) | |
409 | return 0; | |
410 | else | |
411 | return -1; | |
412 | } | |
413 | ||
7f79c47e | 414 | /* Allocate and initialize a data cache. */ |
25f122dc | 415 | |
c906108c | 416 | DCACHE * |
4930751a | 417 | dcache_init (void) |
c906108c | 418 | { |
c906108c SS |
419 | DCACHE *dcache; |
420 | ||
421 | dcache = (DCACHE *) xmalloc (sizeof (*dcache)); | |
c906108c | 422 | |
25f122dc DE |
423 | dcache->tree = splay_tree_new (dcache_splay_tree_compare, |
424 | NULL, | |
425 | NULL); | |
c906108c | 426 | |
25f122dc | 427 | dcache->oldest = NULL; |
25f122dc DE |
428 | dcache->freelist = NULL; |
429 | dcache->size = 0; | |
4e5d721f | 430 | dcache->ptid = null_ptid; |
c906108c | 431 | last_cache = dcache; |
25f122dc | 432 | |
c906108c SS |
433 | return dcache; |
434 | } | |
435 | ||
6ffb2242 DE |
436 | /* BLOCK_FUNC routine for dcache_free. */ |
437 | ||
438 | static void | |
439 | free_block (struct dcache_block *block, void *param) | |
440 | { | |
441 | free (block); | |
442 | } | |
443 | ||
25f122dc DE |
444 | /* Free a data cache. */ |
445 | ||
e99586d5 C |
446 | void |
447 | dcache_free (DCACHE *dcache) | |
448 | { | |
449 | if (last_cache == dcache) | |
450 | last_cache = NULL; | |
451 | ||
25f122dc | 452 | splay_tree_delete (dcache->tree); |
6ffb2242 DE |
453 | for_each_block (&dcache->oldest, free_block, NULL); |
454 | for_each_block (&dcache->freelist, free_block, NULL); | |
b8c9b27d | 455 | xfree (dcache); |
e99586d5 C |
456 | } |
457 | ||
c906108c SS |
458 | /* Read or write LEN bytes from inferior memory at MEMADDR, transferring |
459 | to or from debugger address MYADDR. Write to inferior if SHOULD_WRITE is | |
460 | nonzero. | |
461 | ||
7f79c47e DE |
462 | Return the number of bytes actually transfered, or -1 if the |
463 | transfer is not supported or otherwise fails. Return of a non-negative | |
464 | value less than LEN indicates that no further transfer is possible. | |
465 | NOTE: This is different than the to_xfer_partial interface, in which | |
466 | positive values less than LEN mean further transfers may be possible. */ | |
c906108c SS |
467 | |
468 | int | |
25f122dc DE |
469 | dcache_xfer_memory (struct target_ops *ops, DCACHE *dcache, |
470 | CORE_ADDR memaddr, gdb_byte *myaddr, | |
1b0ba102 | 471 | int len, int should_write) |
c906108c SS |
472 | { |
473 | int i; | |
25f122dc | 474 | int res; |
6c932e54 | 475 | int (*xfunc) (DCACHE *dcache, CORE_ADDR addr, gdb_byte *ptr); |
9a619af0 | 476 | |
29e57380 | 477 | xfunc = should_write ? dcache_poke_byte : dcache_peek_byte; |
c906108c | 478 | |
4e5d721f DE |
479 | /* If this is a different inferior from what we've recorded, |
480 | flush the cache. */ | |
481 | ||
482 | if (! ptid_equal (inferior_ptid, dcache->ptid)) | |
483 | { | |
484 | dcache_invalidate (dcache); | |
485 | dcache->ptid = inferior_ptid; | |
486 | } | |
487 | ||
25f122dc DE |
488 | /* Do write-through first, so that if it fails, we don't write to |
489 | the cache at all. */ | |
490 | ||
491 | if (should_write) | |
492 | { | |
493 | res = target_write (ops, TARGET_OBJECT_RAW_MEMORY, | |
494 | NULL, myaddr, memaddr, len); | |
4e5d721f DE |
495 | if (res <= 0) |
496 | return res; | |
497 | /* Update LEN to what was actually written. */ | |
498 | len = res; | |
25f122dc DE |
499 | } |
500 | ||
29e57380 | 501 | for (i = 0; i < len; i++) |
c906108c | 502 | { |
29e57380 | 503 | if (!xfunc (dcache, memaddr + i, myaddr + i)) |
4e5d721f DE |
504 | { |
505 | /* That failed. Discard its cache line so we don't have a | |
506 | partially read line. */ | |
507 | dcache_invalidate_line (dcache, memaddr + i); | |
508 | /* If we're writing, we still wrote LEN bytes. */ | |
509 | if (should_write) | |
510 | return len; | |
511 | else | |
512 | return i; | |
513 | } | |
c906108c | 514 | } |
25f122dc DE |
515 | |
516 | return len; | |
517 | } | |
c906108c | 518 | |
25f122dc DE |
519 | /* FIXME: There would be some benefit to making the cache write-back and |
520 | moving the writeback operation to a higher layer, as it could occur | |
521 | after a sequence of smaller writes have been completed (as when a stack | |
522 | frame is constructed for an inferior function call). Note that only | |
523 | moving it up one level to target_xfer_memory[_partial]() is not | |
524 | sufficient since we want to coalesce memory transfers that are | |
525 | "logically" connected but not actually a single call to one of the | |
526 | memory transfer functions. */ | |
29e57380 | 527 | |
4e5d721f DE |
528 | /* Just update any cache lines which are already present. This is called |
529 | by memory_xfer_partial in cases where the access would otherwise not go | |
530 | through the cache. */ | |
531 | ||
532 | void | |
533 | dcache_update (DCACHE *dcache, CORE_ADDR memaddr, gdb_byte *myaddr, int len) | |
534 | { | |
535 | int i; | |
9a619af0 | 536 | |
4e5d721f DE |
537 | for (i = 0; i < len; i++) |
538 | dcache_poke_byte (dcache, memaddr + i, myaddr + i); | |
539 | } | |
540 | ||
25f122dc DE |
541 | static void |
542 | dcache_print_line (int index) | |
543 | { | |
544 | splay_tree_node n; | |
545 | struct dcache_block *db; | |
546 | int i, j; | |
547 | ||
548 | if (!last_cache) | |
549 | { | |
550 | printf_filtered (_("No data cache available.\n")); | |
551 | return; | |
552 | } | |
553 | ||
554 | n = splay_tree_min (last_cache->tree); | |
555 | ||
556 | for (i = index; i > 0; --i) | |
557 | { | |
558 | if (!n) | |
559 | break; | |
560 | n = splay_tree_successor (last_cache->tree, n->key); | |
561 | } | |
562 | ||
563 | if (!n) | |
564 | { | |
565 | printf_filtered (_("No such cache line exists.\n")); | |
566 | return; | |
567 | } | |
29e57380 | 568 | |
25f122dc DE |
569 | db = (struct dcache_block *) n->value; |
570 | ||
51939b3d DE |
571 | printf_filtered (_("Line %d: address %s [%d hits]\n"), |
572 | index, paddress (target_gdbarch, db->addr), db->refs); | |
25f122dc DE |
573 | |
574 | for (j = 0; j < LINE_SIZE; j++) | |
575 | { | |
576 | printf_filtered ("%02x ", db->data[j]); | |
577 | ||
578 | /* Print a newline every 16 bytes (48 characters) */ | |
579 | if ((j % 16 == 15) && (j != LINE_SIZE - 1)) | |
580 | printf_filtered ("\n"); | |
581 | } | |
582 | printf_filtered ("\n"); | |
c906108c SS |
583 | } |
584 | ||
c5aa993b | 585 | static void |
fba45db2 | 586 | dcache_info (char *exp, int tty) |
c906108c | 587 | { |
25f122dc | 588 | splay_tree_node n; |
9128a503 | 589 | int i, refcount; |
25f122dc DE |
590 | |
591 | if (exp) | |
592 | { | |
593 | char *linestart; | |
9a619af0 | 594 | |
25f122dc DE |
595 | i = strtol (exp, &linestart, 10); |
596 | if (linestart == exp || i < 0) | |
597 | { | |
598 | printf_filtered (_("Usage: info dcache [linenumber]\n")); | |
599 | return; | |
600 | } | |
c906108c | 601 | |
25f122dc DE |
602 | dcache_print_line (i); |
603 | return; | |
604 | } | |
605 | ||
606 | printf_filtered (_("Dcache line width %d, maximum size %d\n"), | |
c906108c SS |
607 | LINE_SIZE, DCACHE_SIZE); |
608 | ||
4e5d721f | 609 | if (!last_cache || ptid_equal (last_cache->ptid, null_ptid)) |
c906108c | 610 | { |
25f122dc DE |
611 | printf_filtered (_("No data cache available.\n")); |
612 | return; | |
613 | } | |
5e2039ea | 614 | |
4e5d721f DE |
615 | printf_filtered (_("Contains data for %s\n"), |
616 | target_pid_to_str (last_cache->ptid)); | |
617 | ||
25f122dc | 618 | refcount = 0; |
c906108c | 619 | |
25f122dc DE |
620 | n = splay_tree_min (last_cache->tree); |
621 | i = 0; | |
c906108c | 622 | |
25f122dc DE |
623 | while (n) |
624 | { | |
625 | struct dcache_block *db = (struct dcache_block *) n->value; | |
626 | ||
51939b3d DE |
627 | printf_filtered (_("Line %d: address %s [%d hits]\n"), |
628 | i, paddress (target_gdbarch, db->addr), db->refs); | |
25f122dc DE |
629 | i++; |
630 | refcount += db->refs; | |
631 | ||
632 | n = splay_tree_successor (last_cache->tree, n->key); | |
c906108c | 633 | } |
25f122dc DE |
634 | |
635 | printf_filtered (_("Cache state: %d active lines, %d hits\n"), i, refcount); | |
c906108c SS |
636 | } |
637 | ||
638 | void | |
fba45db2 | 639 | _initialize_dcache (void) |
c906108c | 640 | { |
5bf193a2 AC |
641 | add_setshow_boolean_cmd ("remotecache", class_support, |
642 | &dcache_enabled_p, _("\ | |
643 | Set cache use for remote targets."), _("\ | |
644 | Show cache use for remote targets."), _("\ | |
4e5d721f DE |
645 | This used to enable the data cache for remote targets. The cache\n\ |
646 | functionality is now controlled by the memory region system and the\n\ | |
647 | \"stack-cache\" flag; \"remotecache\" now does nothing and\n\ | |
648 | exists only for compatibility reasons."), | |
5bf193a2 | 649 | NULL, |
920d2a44 | 650 | show_dcache_enabled_p, |
5bf193a2 | 651 | &setlist, &showlist); |
c906108c SS |
652 | |
653 | add_info ("dcache", dcache_info, | |
07128da0 DE |
654 | _("\ |
655 | Print information on the dcache performance.\n\ | |
25f122dc DE |
656 | With no arguments, this command prints the cache configuration and a\n\ |
657 | summary of each line in the cache. Use \"info dcache <lineno> to dump\"\n\ | |
658 | the contents of a given line.")); | |
c906108c | 659 | } |