2003-10-06 Dave Brolley <brolley@redhat.com>
[deliverable/binutils-gdb.git] / sim / frv / cache.c
CommitLineData
b34f6357 1/* frv cache model.
e930b1f5 2 Copyright (C) 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
b34f6357
DB
3 Contributed by Red Hat.
4
5This file is part of the GNU simulators.
6
7This program is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 2, or (at your option)
10any later version.
11
12This program is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License along
18with this program; if not, write to the Free Software Foundation, Inc.,
1959 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
20
21#define WANT_CPU frvbf
22#define WANT_CPU_FRVBF
23
24#include "libiberty.h"
25#include "sim-main.h"
26#include "cache.h"
27#include "bfd.h"
28
29void
30frv_cache_init (SIM_CPU *cpu, FRV_CACHE *cache)
31{
32 int elements;
33 int i, j;
34 SIM_DESC sd;
35
36 /* Set defaults for fields which are not initialized. */
37 sd = CPU_STATE (cpu);
38 switch (STATE_ARCHITECTURE (sd)->mach)
39 {
40 case bfd_mach_fr400:
e930b1f5
DB
41 if (cache->configured_sets == 0)
42 cache->configured_sets = 128;
43 if (cache->configured_ways == 0)
44 cache->configured_ways = 2;
b34f6357
DB
45 if (cache->line_size == 0)
46 cache->line_size = 32;
47 if (cache->memory_latency == 0)
48 cache->memory_latency = 20;
49 break;
e930b1f5
DB
50 case bfd_mach_fr550:
51 if (cache->configured_sets == 0)
52 cache->configured_sets = 128;
53 if (cache->configured_ways == 0)
54 cache->configured_ways = 4;
55 if (cache->line_size == 0)
56 cache->line_size = 64;
57 if (cache->memory_latency == 0)
58 cache->memory_latency = 20;
59 break;
b34f6357 60 default:
e930b1f5
DB
61 if (cache->configured_sets == 0)
62 cache->configured_sets = 64;
63 if (cache->configured_ways == 0)
64 cache->configured_ways = 4;
b34f6357
DB
65 if (cache->line_size == 0)
66 cache->line_size = 64;
67 if (cache->memory_latency == 0)
68 cache->memory_latency = 20;
69 break;
70 }
71
e930b1f5
DB
72 frv_cache_reconfigure (cpu, cache);
73
b34f6357
DB
74 /* First allocate the cache storage based on the given dimensions. */
75 elements = cache->sets * cache->ways;
76 cache->tag_storage = (FRV_CACHE_TAG *)
77 zalloc (elements * sizeof (*cache->tag_storage));
78 cache->data_storage = (char *) xmalloc (elements * cache->line_size);
79
80 /* Initialize the pipelines and status buffers. */
81 for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
82 {
83 cache->pipeline[i].requests = NULL;
84 cache->pipeline[i].status.flush.valid = 0;
85 cache->pipeline[i].status.return_buffer.valid = 0;
86 cache->pipeline[i].status.return_buffer.data
87 = (char *) xmalloc (cache->line_size);
88 for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j)
89 cache->pipeline[i].stages[j].request = NULL;
90 }
91 cache->BARS.valid = 0;
92 cache->NARS.valid = 0;
93
94 /* Now set the cache state. */
95 cache->cpu = cpu;
96 cache->statistics.accesses = 0;
97 cache->statistics.hits = 0;
98}
99
100void
101frv_cache_term (FRV_CACHE *cache)
102{
103 /* Free the cache storage. */
104 free (cache->tag_storage);
105 free (cache->data_storage);
106 free (cache->pipeline[LS].status.return_buffer.data);
107 free (cache->pipeline[LD].status.return_buffer.data);
108}
109
e930b1f5
DB
110/* Reset the cache configuration based on registers in the cpu. */
111void
112frv_cache_reconfigure (SIM_CPU *current_cpu, FRV_CACHE *cache)
113{
114 int ihsr8;
115 int icdm;
116 SIM_DESC sd;
117
118 /* Set defaults for fields which are not initialized. */
119 sd = CPU_STATE (current_cpu);
120 switch (STATE_ARCHITECTURE (sd)->mach)
121 {
122 case bfd_mach_fr550:
123 if (cache == CPU_INSN_CACHE (current_cpu))
124 {
125 ihsr8 = GET_IHSR8 ();
126 icdm = GET_IHSR8_ICDM (ihsr8);
127 /* If IHSR8.ICDM is set, then the cache becomes a one way cache. */
128 if (icdm)
129 {
130 cache->sets = cache->sets * cache->ways;
131 cache->ways = 1;
132 break;
133 }
134 }
135 /* fall through */
136 default:
137 /* Set the cache to its original settings. */
138 cache->sets = cache->configured_sets;
139 cache->ways = cache->configured_ways;
140 break;
141 }
142}
143
b34f6357
DB
144/* Determine whether the given cache is enabled. */
145int
146frv_cache_enabled (FRV_CACHE *cache)
147{
148 SIM_CPU *current_cpu = cache->cpu;
149 int hsr0 = GET_HSR0 ();
150 if (GET_HSR0_ICE (hsr0) && cache == CPU_INSN_CACHE (current_cpu))
151 return 1;
152 if (GET_HSR0_DCE (hsr0) && cache == CPU_DATA_CACHE (current_cpu))
153 return 1;
154 return 0;
155}
156
e930b1f5
DB
157/* Determine whether the given address is RAM access, assuming that HSR0.RME
158 is set. */
159static int
160ram_access (FRV_CACHE *cache, USI address)
161{
162 int ihsr8;
163 int cwe;
164 USI start, end, way_size;
165 SIM_CPU *current_cpu = cache->cpu;
166 SIM_DESC sd = CPU_STATE (current_cpu);
167
168 switch (STATE_ARCHITECTURE (sd)->mach)
169 {
170 case bfd_mach_fr550:
171 /* IHSR8.DCWE or IHSR8.ICWE deternines which ways get RAM access. */
172 ihsr8 = GET_IHSR8 ();
173 if (cache == CPU_INSN_CACHE (current_cpu))
174 {
175 start = 0xfe000000;
176 end = 0xfe008000;
177 cwe = GET_IHSR8_ICWE (ihsr8);
178 }
179 else
180 {
181 start = 0xfe400000;
182 end = 0xfe408000;
183 cwe = GET_IHSR8_DCWE (ihsr8);
184 }
185 way_size = (end - start) / 4;
186 end -= way_size * cwe;
187 return address >= start && address < end;
188 default:
189 break;
190 }
191
192 return 1; /* RAM access */
193}
194
b34f6357
DB
195/* Determine whether the given address should be accessed without using
196 the cache. */
197static int
198non_cache_access (FRV_CACHE *cache, USI address)
199{
200 int hsr0;
201 SIM_DESC sd;
202 SIM_CPU *current_cpu = cache->cpu;
203
204 sd = CPU_STATE (current_cpu);
205 switch (STATE_ARCHITECTURE (sd)->mach)
206 {
207 case bfd_mach_fr400:
208 if (address >= 0xff000000
209 || address >= 0xfe000000 && address <= 0xfeffffff)
210 return 1; /* non-cache access */
e930b1f5
DB
211 case bfd_mach_fr550:
212 if (address >= 0xff000000
213 || address >= 0xfeff0000 && address <= 0xfeffffff)
214 return 1; /* non-cache access */
215 if (cache == CPU_INSN_CACHE (current_cpu))
216 {
217 if (address >= 0xfe000000 && address <= 0xfe007fff)
218 return 1; /* non-cache access */
219 }
220 else if (address >= 0xfe400000 && address <= 0xfe407fff)
221 return 1; /* non-cache access */
b34f6357
DB
222 default:
223 if (address >= 0xff000000
224 || address >= 0xfeff0000 && address <= 0xfeffffff)
225 return 1; /* non-cache access */
226 if (cache == CPU_INSN_CACHE (current_cpu))
227 {
228 if (address >= 0xfe000000 && address <= 0xfe003fff)
229 return 1; /* non-cache access */
230 }
231 else if (address >= 0xfe400000 && address <= 0xfe403fff)
232 return 1; /* non-cache access */
233 }
234
235 hsr0 = GET_HSR0 ();
236 if (GET_HSR0_RME (hsr0))
e930b1f5 237 return ram_access (cache, address);
b34f6357
DB
238
239 return 0; /* cache-access */
240}
241
242/* Find the cache line corresponding to the given address.
243 If it is found then 'return_tag' is set to point to the tag for that line
244 and 1 is returned.
245 If it is not found, 'return_tag' is set to point to the tag for the least
246 recently used line and 0 is returned.
247*/
248static int
249get_tag (FRV_CACHE *cache, SI address, FRV_CACHE_TAG **return_tag)
250{
251 int set;
252 int way;
253 int bits;
254 USI tag;
255 FRV_CACHE_TAG *found;
256 FRV_CACHE_TAG *available;
257
258 ++cache->statistics.accesses;
259
260 /* First calculate which set this address will fall into. Do this by
261 shifting out the bits representing the offset within the line and
262 then keeping enough bits to index the set. */
263 set = address & ~(cache->line_size - 1);
264 for (bits = cache->line_size - 1; bits != 0; bits >>= 1)
265 set >>= 1;
266 set &= (cache->sets - 1);
267
268 /* Now search the set for a valid tag which matches this address. At the
269 same time make note of the least recently used tag, which we will return
270 if no match is found. */
271 available = NULL;
272 tag = CACHE_ADDRESS_TAG (cache, address);
273 for (way = 0; way < cache->ways; ++way)
274 {
275 found = CACHE_TAG (cache, set, way);
276 /* This tag is available as the least recently used if it is the
277 least recently used seen so far and it is not locked. */
278 if (! found->locked && (available == NULL || available->lru > found->lru))
279 available = found;
280 if (found->valid && found->tag == tag)
281 {
282 *return_tag = found;
283 ++cache->statistics.hits;
284 return 1; /* found it */
285 }
286 }
287
288 *return_tag = available;
289 return 0; /* not found */
290}
291
292/* Write the given data out to memory. */
293static void
294write_data_to_memory (FRV_CACHE *cache, SI address, char *data, int length)
295{
296 SIM_CPU *cpu = cache->cpu;
297 IADDR pc = CPU_PC_GET (cpu);
298 int write_index = 0;
299
300 switch (length)
301 {
302 case 1:
303 default:
304 PROFILE_COUNT_WRITE (cpu, address, MODE_QI);
305 break;
306 case 2:
307 PROFILE_COUNT_WRITE (cpu, address, MODE_HI);
308 break;
309 case 4:
310 PROFILE_COUNT_WRITE (cpu, address, MODE_SI);
311 break;
312 case 8:
313 PROFILE_COUNT_WRITE (cpu, address, MODE_DI);
314 break;
315 }
316
317 for (write_index = 0; write_index < length; ++write_index)
318 {
319 /* TODO: Better way to copy memory than a byte at a time? */
320 sim_core_write_unaligned_1 (cpu, pc, write_map, address + write_index,
321 data[write_index]);
322 }
323}
324
325/* Write a cache line out to memory. */
326static void
327write_line_to_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
328{
329 SI address = tag->tag;
330 int set = CACHE_TAG_SET_NUMBER (cache, tag);
331 int bits;
332 for (bits = cache->line_size - 1; bits != 0; bits >>= 1)
333 set <<= 1;
334 address |= set;
335 write_data_to_memory (cache, address, tag->line, cache->line_size);
336}
337
338static void
339read_data_from_memory (SIM_CPU *current_cpu, SI address, char *buffer,
340 int length)
341{
342 PCADDR pc = CPU_PC_GET (current_cpu);
343 int i;
344 PROFILE_COUNT_READ (current_cpu, address, MODE_QI);
345 for (i = 0; i < length; ++i)
346 {
347 /* TODO: Better way to copy memory than a byte at a time? */
348 buffer[i] = sim_core_read_unaligned_1 (current_cpu, pc, read_map,
349 address + i);
350 }
351}
352
353/* Fill the given cache line from memory. */
354static void
355fill_line_from_memory (FRV_CACHE *cache, FRV_CACHE_TAG *tag, SI address)
356{
357 PCADDR pc;
358 int line_alignment;
359 SI read_address;
360 SIM_CPU *current_cpu = cache->cpu;
361
362 /* If this line is already valid and the cache is in copy-back mode, then
363 write this line to memory before refilling it.
364 Check the dirty bit first, since it is less likely to be set. */
365 if (tag->dirty && tag->valid)
366 {
367 int hsr0 = GET_HSR0 ();
368 if (GET_HSR0_CBM (hsr0))
369 write_line_to_memory (cache, tag);
370 }
371 else if (tag->line == NULL)
372 {
373 int line_index = tag - cache->tag_storage;
374 tag->line = cache->data_storage + (line_index * cache->line_size);
375 }
376
377 pc = CPU_PC_GET (current_cpu);
378 line_alignment = cache->line_size - 1;
379 read_address = address & ~line_alignment;
380 read_data_from_memory (current_cpu, read_address, tag->line,
381 cache->line_size);
382 tag->tag = CACHE_ADDRESS_TAG (cache, address);
383 tag->valid = 1;
384}
385
386/* Update the LRU information for the tags in the same set as the given tag. */
387static void
388set_most_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
389{
390 /* All tags in the same set are contiguous, so find the beginning of the
391 set by aligning to the size of a set. */
392 FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag);
393 FRV_CACHE_TAG *limit = item + cache->ways;
394
395 while (item < limit)
396 {
397 if (item->lru > tag->lru)
398 --item->lru;
399 ++item;
400 }
401 tag->lru = cache->ways; /* Mark as most recently used. */
402}
403
404/* Update the LRU information for the tags in the same set as the given tag. */
405static void
406set_least_recently_used (FRV_CACHE *cache, FRV_CACHE_TAG *tag)
407{
408 /* All tags in the same set are contiguous, so find the beginning of the
409 set by aligning to the size of a set. */
410 FRV_CACHE_TAG *item = cache->tag_storage + CACHE_TAG_SET_START (cache, tag);
411 FRV_CACHE_TAG *limit = item + cache->ways;
412
413 while (item < limit)
414 {
415 if (item->lru != 0 && item->lru < tag->lru)
416 ++item->lru;
417 ++item;
418 }
419 tag->lru = 0; /* Mark as least recently used. */
420}
421
422/* Find the line containing the given address and load it if it is not
423 already loaded.
424 Returns the tag of the requested line. */
425static FRV_CACHE_TAG *
426find_or_retrieve_cache_line (FRV_CACHE *cache, SI address)
427{
428 /* See if this data is already in the cache. */
429 FRV_CACHE_TAG *tag;
430 int found = get_tag (cache, address, &tag);
431
432 /* Fill the line from memory, if it is not valid. */
433 if (! found)
434 {
435 /* The tag could be NULL is all ways in the set were used and locked. */
436 if (tag == NULL)
437 return tag;
438
439 fill_line_from_memory (cache, tag, address);
440 tag->dirty = 0;
441 }
442
443 /* Update the LRU information for the tags in this set. */
444 set_most_recently_used (cache, tag);
445
446 return tag;
447}
448
449static void
450copy_line_to_return_buffer (FRV_CACHE *cache, int pipe, FRV_CACHE_TAG *tag,
451 SI address)
452{
453 /* A cache line was available for the data.
454 Copy the data from the cache line to the output buffer. */
455 memcpy (cache->pipeline[pipe].status.return_buffer.data,
456 tag->line, cache->line_size);
457 cache->pipeline[pipe].status.return_buffer.address
458 = address & ~(cache->line_size - 1);
459 cache->pipeline[pipe].status.return_buffer.valid = 1;
460}
461
462static void
463copy_memory_to_return_buffer (FRV_CACHE *cache, int pipe, SI address)
464{
465 address &= ~(cache->line_size - 1);
466 read_data_from_memory (cache->cpu, address,
467 cache->pipeline[pipe].status.return_buffer.data,
468 cache->line_size);
469 cache->pipeline[pipe].status.return_buffer.address = address;
470 cache->pipeline[pipe].status.return_buffer.valid = 1;
471}
472
473static void
474set_return_buffer_reqno (FRV_CACHE *cache, int pipe, unsigned reqno)
475{
476 cache->pipeline[pipe].status.return_buffer.reqno = reqno;
477}
478
479/* Read data from the given cache.
480 Returns the number of cycles required to obtain the data. */
481int
482frv_cache_read (FRV_CACHE *cache, int pipe, SI address)
483{
484 FRV_CACHE_TAG *tag;
485
486 if (non_cache_access (cache, address))
487 {
488 copy_memory_to_return_buffer (cache, pipe, address);
489 return 1;
490 }
491
492 tag = find_or_retrieve_cache_line (cache, address);
493
494 if (tag == NULL)
495 return 0; /* Indicate non-cache-access. */
496
497 /* A cache line was available for the data.
498 Copy the data from the cache line to the output buffer. */
499 copy_line_to_return_buffer (cache, pipe, tag, address);
500
501 return 1; /* TODO - number of cycles unknown */
502}
503
504/* Writes data through the given cache.
505 The data is assumed to be in target endian order.
506 Returns the number of cycles required to write the data. */
507int
508frv_cache_write (FRV_CACHE *cache, SI address, char *data, unsigned length)
509{
510 int copy_back;
511
512 /* See if this data is already in the cache. */
513 SIM_CPU *current_cpu = cache->cpu;
514 USI hsr0 = GET_HSR0 ();
515 FRV_CACHE_TAG *tag;
516 int found;
517
518 if (non_cache_access (cache, address))
519 {
520 write_data_to_memory (cache, address, data, length);
521 return 1;
522 }
523
524 found = get_tag (cache, address, &tag);
525
526 /* Write the data to the cache line if one was available and if it is
527 either a hit or a miss in copy-back mode.
528 The tag may be NULL if all ways were in use and locked on a miss.
529 */
530 copy_back = GET_HSR0_CBM (GET_HSR0 ());
531 if (tag != NULL && (found || copy_back))
532 {
533 int line_offset;
534 /* Load the line from memory first, if it was a miss. */
535 if (! found)
536 fill_line_from_memory (cache, tag, address);
537 line_offset = address & (cache->line_size - 1);
538 memcpy (tag->line + line_offset, data, length);
539 tag->dirty = 1;
540
541 /* Update the LRU information for the tags in this set. */
542 set_most_recently_used (cache, tag);
543 }
544
545 /* Write the data to memory if there was no line available or we are in
546 write-through (not copy-back mode). */
547 if (tag == NULL || ! copy_back)
548 {
549 write_data_to_memory (cache, address, data, length);
550 if (tag != NULL)
551 tag->dirty = 0;
552 }
553
554 return 1; /* TODO - number of cycles unknown */
555}
556
557/* Preload the cache line containing the given address. Lock the
558 data if requested.
559 Returns the number of cycles required to write the data. */
560int
561frv_cache_preload (FRV_CACHE *cache, SI address, USI length, int lock)
562{
563 int offset;
564 int lines;
565
566 if (non_cache_access (cache, address))
567 return 1;
568
569 /* preload at least 1 line. */
570 if (length == 0)
571 length = 1;
572
573 offset = address & (cache->line_size - 1);
574 lines = 1 + (offset + length - 1) / cache->line_size;
575
576 /* Careful with this loop -- length is unsigned. */
577 for (/**/; lines > 0; --lines)
578 {
579 FRV_CACHE_TAG *tag = find_or_retrieve_cache_line (cache, address);
580 if (lock && tag != NULL)
581 tag->locked = 1;
582 address += cache->line_size;
583 }
584
585 return 1; /* TODO - number of cycles unknown */
586}
587
588/* Unlock the cache line containing the given address.
589 Returns the number of cycles required to unlock the line. */
590int
591frv_cache_unlock (FRV_CACHE *cache, SI address)
592{
593 FRV_CACHE_TAG *tag;
594 int found;
595
596 if (non_cache_access (cache, address))
597 return 1;
598
599 found = get_tag (cache, address, &tag);
600
601 if (found)
602 tag->locked = 0;
603
604 return 1; /* TODO - number of cycles unknown */
605}
606
607static void
608invalidate_return_buffer (FRV_CACHE *cache, SI address)
609{
610 /* If this address is in one of the return buffers, then invalidate that
611 return buffer. */
612 address &= ~(cache->line_size - 1);
613 if (address == cache->pipeline[LS].status.return_buffer.address)
614 cache->pipeline[LS].status.return_buffer.valid = 0;
615 if (address == cache->pipeline[LD].status.return_buffer.address)
616 cache->pipeline[LD].status.return_buffer.valid = 0;
617}
618
619/* Invalidate the cache line containing the given address. Flush the
620 data if requested.
621 Returns the number of cycles required to write the data. */
622int
623frv_cache_invalidate (FRV_CACHE *cache, SI address, int flush)
624{
625 /* See if this data is already in the cache. */
626 FRV_CACHE_TAG *tag;
627 int found;
628
629 /* Check for non-cache access. This operation is still perfromed even if
630 the cache is not currently enabled. */
631 if (non_cache_access (cache, address))
632 return 1;
633
634 /* If the line is found, invalidate it. If a flush is requested, then flush
635 it if it is dirty. */
636 found = get_tag (cache, address, &tag);
637 if (found)
638 {
639 SIM_CPU *cpu;
640 /* If a flush is requested, then flush it if it is dirty. */
641 if (tag->dirty && flush)
642 write_line_to_memory (cache, tag);
643 set_least_recently_used (cache, tag);
644 tag->valid = 0;
645 tag->locked = 0;
646
647 /* If this is the insn cache, then flush the cpu's scache as well. */
648 cpu = cache->cpu;
649 if (cache == CPU_INSN_CACHE (cpu))
650 scache_flush_cpu (cpu);
651 }
652
653 invalidate_return_buffer (cache, address);
654
655 return 1; /* TODO - number of cycles unknown */
656}
657
658/* Invalidate the entire cache. Flush the data if requested. */
659int
660frv_cache_invalidate_all (FRV_CACHE *cache, int flush)
661{
662 /* See if this data is already in the cache. */
663 int elements = cache->sets * cache->ways;
664 FRV_CACHE_TAG *tag = cache->tag_storage;
665 SIM_CPU *cpu;
666 int i;
667
668 for(i = 0; i < elements; ++i, ++tag)
669 {
670 /* If a flush is requested, then flush it if it is dirty. */
671 if (tag->valid && tag->dirty && flush)
672 write_line_to_memory (cache, tag);
673 tag->valid = 0;
674 tag->locked = 0;
675 }
676
677
678 /* If this is the insn cache, then flush the cpu's scache as well. */
679 cpu = cache->cpu;
680 if (cache == CPU_INSN_CACHE (cpu))
681 scache_flush_cpu (cpu);
682
683 /* Invalidate both return buffers. */
684 cache->pipeline[LS].status.return_buffer.valid = 0;
685 cache->pipeline[LD].status.return_buffer.valid = 0;
686
687 return 1; /* TODO - number of cycles unknown */
688}
689
690/* ---------------------------------------------------------------------------
691 Functions for operating the cache in cycle accurate mode.
692 ------------------------------------------------------------------------- */
693/* Convert a VLIW slot to a cache pipeline index. */
694static int
695convert_slot_to_index (int slot)
696{
697 switch (slot)
698 {
699 case UNIT_I0:
700 case UNIT_C:
701 return LS;
702 case UNIT_I1:
703 return LD;
704 default:
705 abort ();
706 }
707 return 0;
708}
709
710/* Allocate free chains of cache requests. */
711#define FREE_CHAIN_SIZE 16
712static FRV_CACHE_REQUEST *frv_cache_request_free_chain = NULL;
713static FRV_CACHE_REQUEST *frv_store_request_free_chain = NULL;
714
715static void
716allocate_new_cache_requests (void)
717{
718 int i;
719 frv_cache_request_free_chain = xmalloc (FREE_CHAIN_SIZE
720 * sizeof (FRV_CACHE_REQUEST));
721 for (i = 0; i < FREE_CHAIN_SIZE - 1; ++i)
722 {
723 frv_cache_request_free_chain[i].next
724 = & frv_cache_request_free_chain[i + 1];
725 }
726
727 frv_cache_request_free_chain[FREE_CHAIN_SIZE - 1].next = NULL;
728}
729
730/* Return the next free request in the queue for the given cache pipeline. */
731static FRV_CACHE_REQUEST *
732new_cache_request (void)
733{
734 FRV_CACHE_REQUEST *req;
735
736 /* Allocate new elements for the free chain if necessary. */
737 if (frv_cache_request_free_chain == NULL)
738 allocate_new_cache_requests ();
739
740 req = frv_cache_request_free_chain;
741 frv_cache_request_free_chain = req->next;
742
743 return req;
744}
745
746/* Return the given cache request to the free chain. */
747static void
748free_cache_request (FRV_CACHE_REQUEST *req)
749{
750 if (req->kind == req_store)
751 {
752 req->next = frv_store_request_free_chain;
753 frv_store_request_free_chain = req;
754 }
755 else
756 {
757 req->next = frv_cache_request_free_chain;
758 frv_cache_request_free_chain = req;
759 }
760}
761
762/* Search the free chain for an existing store request with a buffer that's
763 large enough. */
764static FRV_CACHE_REQUEST *
765new_store_request (int length)
766{
767 FRV_CACHE_REQUEST *prev = NULL;
768 FRV_CACHE_REQUEST *req;
769 for (req = frv_store_request_free_chain; req != NULL; req = req->next)
770 {
771 if (req->u.store.length == length)
772 break;
773 prev = req;
774 }
775 if (req != NULL)
776 {
777 if (prev == NULL)
778 frv_store_request_free_chain = req->next;
779 else
780 prev->next = req->next;
781 return req;
782 }
783
784 /* No existing request buffer was found, so make a new one. */
785 req = new_cache_request ();
786 req->kind = req_store;
787 req->u.store.data = xmalloc (length);
788 req->u.store.length = length;
789 return req;
790}
791
792/* Remove the given request from the given pipeline. */
793static void
794pipeline_remove_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request)
795{
796 FRV_CACHE_REQUEST *next = request->next;
797 FRV_CACHE_REQUEST *prev = request->prev;
798
799 if (prev == NULL)
800 p->requests = next;
801 else
802 prev->next = next;
803
804 if (next != NULL)
805 next->prev = prev;
806}
807
808/* Add the given request to the given pipeline. */
809static void
810pipeline_add_request (FRV_CACHE_PIPELINE *p, FRV_CACHE_REQUEST *request)
811{
812 FRV_CACHE_REQUEST *prev = NULL;
813 FRV_CACHE_REQUEST *item;
814
815 /* Add the request in priority order. 0 is the highest priority. */
816 for (item = p->requests; item != NULL; item = item->next)
817 {
818 if (item->priority > request->priority)
819 break;
820 prev = item;
821 }
822
823 request->next = item;
824 request->prev = prev;
825 if (prev == NULL)
826 p->requests = request;
827 else
828 prev->next = request;
829 if (item != NULL)
830 item->prev = request;
831}
832
833/* Requeu the given request from the last of the given pipeline. */
834static void
835pipeline_requeue_request (FRV_CACHE_PIPELINE *p)
836{
837 FRV_CACHE_STAGE *stage = & p->stages[LAST_STAGE];
838 FRV_CACHE_REQUEST *req = stage->request;
839 stage->request = NULL;
840 pipeline_add_request (p, req);
841}
842
843/* Return the priority lower than the lowest one in this cache pipeline.
844 0 is the highest priority. */
845static int
846next_priority (FRV_CACHE *cache, FRV_CACHE_PIPELINE *pipeline)
847{
848 int i, j;
849 int pipe;
850 int lowest = 0;
851 FRV_CACHE_REQUEST *req;
852
853 /* Check the priorities of any queued items. */
854 for (req = pipeline->requests; req != NULL; req = req->next)
855 if (req->priority > lowest)
856 lowest = req->priority;
857
858 /* Check the priorities of items in the pipeline stages. */
859 for (i = FIRST_STAGE; i < FRV_CACHE_STAGES; ++i)
860 {
861 FRV_CACHE_STAGE *stage = & pipeline->stages[i];
862 if (stage->request != NULL && stage->request->priority > lowest)
863 lowest = stage->request->priority;
864 }
865
866 /* Check the priorities of load requests waiting in WAR. These are one
867 higher than the request that spawned them. */
868 for (i = 0; i < NUM_WARS; ++i)
869 {
870 FRV_CACHE_WAR *war = & pipeline->WAR[i];
871 if (war->valid && war->priority > lowest)
872 lowest = war->priority + 1;
873 }
874
875 /* Check the priorities of any BARS or NARS associated with this pipeline.
876 These are one higher than the request that spawned them. */
877 pipe = pipeline - cache->pipeline;
878 if (cache->BARS.valid && cache->BARS.pipe == pipe
879 && cache->BARS.priority > lowest)
880 lowest = cache->BARS.priority + 1;
881 if (cache->NARS.valid && cache->NARS.pipe == pipe
882 && cache->NARS.priority > lowest)
883 lowest = cache->NARS.priority + 1;
884
885 /* Return a priority 2 lower than the lowest found. This allows a WAR
886 request to be generated with a priority greater than this but less than
887 the next higher priority request. */
888 return lowest + 2;
889}
890
891static void
892add_WAR_request (FRV_CACHE_PIPELINE* pipeline, FRV_CACHE_WAR *war)
893{
894 /* Add the load request to the indexed pipeline. */
895 FRV_CACHE_REQUEST *req = new_cache_request ();
896 req->kind = req_WAR;
897 req->reqno = war->reqno;
898 req->priority = war->priority;
899 req->address = war->address;
900 req->u.WAR.preload = war->preload;
901 req->u.WAR.lock = war->lock;
902 pipeline_add_request (pipeline, req);
903}
904
905/* Remove the next request from the given pipeline and return it. */
906static FRV_CACHE_REQUEST *
907pipeline_next_request (FRV_CACHE_PIPELINE *p)
908{
909 FRV_CACHE_REQUEST *first = p->requests;
910 if (first != NULL)
911 pipeline_remove_request (p, first);
912 return first;
913}
914
915/* Return the request which is at the given stage of the given pipeline. */
916static FRV_CACHE_REQUEST *
917pipeline_stage_request (FRV_CACHE_PIPELINE *p, int stage)
918{
919 return p->stages[stage].request;
920}
921
922static void
923advance_pipelines (FRV_CACHE *cache)
924{
925 int stage;
926 int pipe;
927 FRV_CACHE_PIPELINE *pipelines = cache->pipeline;
928
929 /* Free the final stage requests. */
930 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
931 {
932 FRV_CACHE_REQUEST *req = pipelines[pipe].stages[LAST_STAGE].request;
933 if (req != NULL)
934 free_cache_request (req);
935 }
936
937 /* Shuffle the requests along the pipeline. */
938 for (stage = LAST_STAGE; stage > FIRST_STAGE; --stage)
939 {
940 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
941 pipelines[pipe].stages[stage] = pipelines[pipe].stages[stage - 1];
942 }
943
944 /* Add a new request to the pipeline. */
945 for (pipe = 0; pipe < FRV_CACHE_PIPELINES; ++pipe)
946 pipelines[pipe].stages[FIRST_STAGE].request
947 = pipeline_next_request (& pipelines[pipe]);
948}
949
950/* Handle a request for a load from the given address. */
951void
952frv_cache_request_load (FRV_CACHE *cache, unsigned reqno, SI address, int slot)
953{
954 FRV_CACHE_REQUEST *req;
955
956 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
957 int pipe = convert_slot_to_index (slot);
958 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
959
960 /* Add the load request to the indexed pipeline. */
961 req = new_cache_request ();
962 req->kind = req_load;
963 req->reqno = reqno;
964 req->priority = next_priority (cache, pipeline);
965 req->address = address;
966
967 pipeline_add_request (pipeline, req);
968}
969
970void
971frv_cache_request_store (FRV_CACHE *cache, SI address,
972 int slot, char *data, unsigned length)
973{
974 FRV_CACHE_REQUEST *req;
975
976 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
977 int pipe = convert_slot_to_index (slot);
978 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
979
980 /* Add the load request to the indexed pipeline. */
981 req = new_store_request (length);
982 req->kind = req_store;
983 req->reqno = NO_REQNO;
984 req->priority = next_priority (cache, pipeline);
985 req->address = address;
986 req->u.store.length = length;
987 memcpy (req->u.store.data, data, length);
988
989 pipeline_add_request (pipeline, req);
990 invalidate_return_buffer (cache, address);
991}
992
993/* Handle a request to invalidate the cache line containing the given address.
994 Flush the data if requested. */
995void
996frv_cache_request_invalidate (FRV_CACHE *cache, unsigned reqno, SI address,
997 int slot, int all, int flush)
998{
999 FRV_CACHE_REQUEST *req;
1000
1001 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1002 int pipe = convert_slot_to_index (slot);
1003 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1004
1005 /* Add the load request to the indexed pipeline. */
1006 req = new_cache_request ();
1007 req->kind = req_invalidate;
1008 req->reqno = reqno;
1009 req->priority = next_priority (cache, pipeline);
1010 req->address = address;
1011 req->u.invalidate.all = all;
1012 req->u.invalidate.flush = flush;
1013
1014 pipeline_add_request (pipeline, req);
1015}
1016
1017/* Handle a request to preload the cache line containing the given address. */
1018void
1019frv_cache_request_preload (FRV_CACHE *cache, SI address,
1020 int slot, int length, int lock)
1021{
1022 FRV_CACHE_REQUEST *req;
1023
1024 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1025 int pipe = convert_slot_to_index (slot);
1026 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1027
1028 /* Add the load request to the indexed pipeline. */
1029 req = new_cache_request ();
1030 req->kind = req_preload;
1031 req->reqno = NO_REQNO;
1032 req->priority = next_priority (cache, pipeline);
1033 req->address = address;
1034 req->u.preload.length = length;
1035 req->u.preload.lock = lock;
1036
1037 pipeline_add_request (pipeline, req);
1038 invalidate_return_buffer (cache, address);
1039}
1040
1041/* Handle a request to unlock the cache line containing the given address. */
1042void
1043frv_cache_request_unlock (FRV_CACHE *cache, SI address, int slot)
1044{
1045 FRV_CACHE_REQUEST *req;
1046
1047 /* slot is a UNIT_*. Convert it to a cache pipeline index. */
1048 int pipe = convert_slot_to_index (slot);
1049 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1050
1051 /* Add the load request to the indexed pipeline. */
1052 req = new_cache_request ();
1053 req->kind = req_unlock;
1054 req->reqno = NO_REQNO;
1055 req->priority = next_priority (cache, pipeline);
1056 req->address = address;
1057
1058 pipeline_add_request (pipeline, req);
1059}
1060
1061/* Check whether this address interferes with a pending request of
1062 higher priority. */
1063static int
1064address_interference (FRV_CACHE *cache, SI address, FRV_CACHE_REQUEST *req,
1065 int pipe)
1066{
1067 int i, j;
1068 int line_mask = ~(cache->line_size - 1);
1069 int other_pipe;
1070 int priority = req->priority;
1071 FRV_CACHE_REQUEST *other_req;
1072 SI other_address;
1073 SI all_address;
1074
1075 address &= line_mask;
1076 all_address = -1 & line_mask;
1077
1078 /* Check for collisions in the queue for this pipeline. */
1079 for (other_req = cache->pipeline[pipe].requests;
1080 other_req != NULL;
1081 other_req = other_req->next)
1082 {
1083 other_address = other_req->address & line_mask;
1084 if ((address == other_address || address == all_address)
1085 && priority > other_req->priority)
1086 return 1;
1087 }
1088
1089 /* Check for a collision in the the other pipeline. */
1090 other_pipe = pipe ^ 1;
1091 other_req = cache->pipeline[other_pipe].stages[LAST_STAGE].request;
1092 if (other_req != NULL)
1093 {
1094 other_address = other_req->address & line_mask;
1095 if (address == other_address || address == all_address)
1096 return 1;
1097 }
1098
1099 /* Check for a collision with load requests waiting in WAR. */
1100 for (i = LS; i < FRV_CACHE_PIPELINES; ++i)
1101 {
1102 for (j = 0; j < NUM_WARS; ++j)
1103 {
1104 FRV_CACHE_WAR *war = & cache->pipeline[i].WAR[j];
1105 if (war->valid
1106 && (address == (war->address & line_mask)
1107 || address == all_address)
1108 && priority > war->priority)
1109 return 1;
1110 }
1111 /* If this is not a WAR request, then yield to any WAR requests in
1112 either pipeline. */
1113 if (req->kind != req_WAR)
1114 {
1115 for (j = FIRST_STAGE; j < FRV_CACHE_STAGES; ++j)
1116 {
1117 other_req = cache->pipeline[i].stages[j].request;
1118 if (other_req != NULL && other_req->kind == req_WAR)
1119 return 1;
1120 }
1121 }
1122 }
1123
1124 /* Check for a collision with load requests waiting in ARS. */
1125 if (cache->BARS.valid
1126 && (address == (cache->BARS.address & line_mask)
1127 || address == all_address)
1128 && priority > cache->BARS.priority)
1129 return 1;
1130 if (cache->NARS.valid
1131 && (address == (cache->NARS.address & line_mask)
1132 || address == all_address)
1133 && priority > cache->NARS.priority)
1134 return 1;
1135
1136 return 0;
1137}
1138
1139/* Wait for a free WAR register in BARS or NARS. */
1140static void
1141wait_for_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req)
1142{
1143 FRV_CACHE_WAR war;
1144 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1145
1146 if (! cache->BARS.valid)
1147 {
1148 cache->BARS.pipe = pipe;
1149 cache->BARS.reqno = req->reqno;
1150 cache->BARS.address = req->address;
1151 cache->BARS.priority = req->priority - 1;
1152 switch (req->kind)
1153 {
1154 case req_load:
1155 cache->BARS.preload = 0;
1156 cache->BARS.lock = 0;
1157 break;
1158 case req_store:
1159 cache->BARS.preload = 1;
1160 cache->BARS.lock = 0;
1161 break;
1162 case req_preload:
1163 cache->BARS.preload = 1;
1164 cache->BARS.lock = req->u.preload.lock;
1165 break;
1166 }
1167 cache->BARS.valid = 1;
1168 return;
1169 }
1170 if (! cache->NARS.valid)
1171 {
1172 cache->NARS.pipe = pipe;
1173 cache->NARS.reqno = req->reqno;
1174 cache->NARS.address = req->address;
1175 cache->NARS.priority = req->priority - 1;
1176 switch (req->kind)
1177 {
1178 case req_load:
1179 cache->NARS.preload = 0;
1180 cache->NARS.lock = 0;
1181 break;
1182 case req_store:
1183 cache->NARS.preload = 1;
1184 cache->NARS.lock = 0;
1185 break;
1186 case req_preload:
1187 cache->NARS.preload = 1;
1188 cache->NARS.lock = req->u.preload.lock;
1189 break;
1190 }
1191 cache->NARS.valid = 1;
1192 return;
1193 }
1194 /* All wait registers are busy, so resubmit this request. */
1195 pipeline_requeue_request (pipeline);
1196}
1197
1198/* Find a free WAR register and wait for memory to fetch the data. */
1199static void
1200wait_in_WAR (FRV_CACHE* cache, int pipe, FRV_CACHE_REQUEST *req)
1201{
1202 int war;
1203 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1204
1205 /* Find a valid WAR to hold this request. */
1206 for (war = 0; war < NUM_WARS; ++war)
1207 if (! pipeline->WAR[war].valid)
1208 break;
1209 if (war >= NUM_WARS)
1210 {
1211 wait_for_WAR (cache, pipe, req);
1212 return;
1213 }
1214
1215 pipeline->WAR[war].address = req->address;
1216 pipeline->WAR[war].reqno = req->reqno;
1217 pipeline->WAR[war].priority = req->priority - 1;
1218 pipeline->WAR[war].latency = cache->memory_latency + 1;
1219 switch (req->kind)
1220 {
1221 case req_load:
1222 pipeline->WAR[war].preload = 0;
1223 pipeline->WAR[war].lock = 0;
1224 break;
1225 case req_store:
1226 pipeline->WAR[war].preload = 1;
1227 pipeline->WAR[war].lock = 0;
1228 break;
1229 case req_preload:
1230 pipeline->WAR[war].preload = 1;
1231 pipeline->WAR[war].lock = req->u.preload.lock;
1232 break;
1233 }
1234 pipeline->WAR[war].valid = 1;
1235}
1236
1237static void
1238handle_req_load (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1239{
1240 FRV_CACHE_TAG *tag;
1241 SI address = req->address;
1242
1243 /* If this address interferes with an existing request, then requeue it. */
1244 if (address_interference (cache, address, req, pipe))
1245 {
1246 pipeline_requeue_request (& cache->pipeline[pipe]);
1247 return;
1248 }
1249
1250 if (frv_cache_enabled (cache) && ! non_cache_access (cache, address))
1251 {
1252 int found = get_tag (cache, address, &tag);
1253
1254 /* If the data was found, return it to the caller. */
1255 if (found)
1256 {
1257 set_most_recently_used (cache, tag);
1258 copy_line_to_return_buffer (cache, pipe, tag, address);
1259 set_return_buffer_reqno (cache, pipe, req->reqno);
1260 return;
1261 }
1262 }
1263
1264 /* The data is not in the cache or this is a non-cache access. We need to
1265 wait for the memory unit to fetch it. Store this request in the WAR in
1266 the meantime. */
1267 wait_in_WAR (cache, pipe, req);
1268}
1269
1270static void
1271handle_req_preload (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1272{
1273 int found;
1274 FRV_CACHE_WAR war;
1275 FRV_CACHE_TAG *tag;
1276 int length;
1277 int lock;
1278 int offset;
1279 int lines;
1280 int line;
1281 SI address = req->address;
1282 SI cur_address;
1283
1284 if (! frv_cache_enabled (cache) || non_cache_access (cache, address))
1285 return;
1286
1287 /* preload at least 1 line. */
1288 length = req->u.preload.length;
1289 if (length == 0)
1290 length = 1;
1291
1292 /* Make sure that this request does not interfere with a pending request. */
1293 offset = address & (cache->line_size - 1);
1294 lines = 1 + (offset + length - 1) / cache->line_size;
1295 cur_address = address & ~(cache->line_size - 1);
1296 for (line = 0; line < lines; ++line)
1297 {
1298 /* If this address interferes with an existing request,
1299 then requeue it. */
1300 if (address_interference (cache, cur_address, req, pipe))
1301 {
1302 pipeline_requeue_request (& cache->pipeline[pipe]);
1303 return;
1304 }
1305 cur_address += cache->line_size;
1306 }
1307
1308 /* Now process each cache line. */
1309 /* Careful with this loop -- length is unsigned. */
1310 lock = req->u.preload.lock;
1311 cur_address = address & ~(cache->line_size - 1);
1312 for (line = 0; line < lines; ++line)
1313 {
1314 /* If the data was found, then lock it if requested. */
1315 found = get_tag (cache, cur_address, &tag);
1316 if (found)
1317 {
1318 if (lock)
1319 tag->locked = 1;
1320 }
1321 else
1322 {
1323 /* The data is not in the cache. We need to wait for the memory
1324 unit to fetch it. Store this request in the WAR in the meantime.
1325 */
1326 wait_in_WAR (cache, pipe, req);
1327 }
1328 cur_address += cache->line_size;
1329 }
1330}
1331
1332static void
1333handle_req_store (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1334{
1335 SIM_CPU *current_cpu;
1336 FRV_CACHE_TAG *tag;
1337 int found;
1338 int copy_back;
1339 SI address = req->address;
1340 char *data = req->u.store.data;
1341 int length = req->u.store.length;
1342
1343 /* If this address interferes with an existing request, then requeue it. */
1344 if (address_interference (cache, address, req, pipe))
1345 {
1346 pipeline_requeue_request (& cache->pipeline[pipe]);
1347 return;
1348 }
1349
1350 /* Non-cache access. Write the data directly to memory. */
1351 if (! frv_cache_enabled (cache) || non_cache_access (cache, address))
1352 {
1353 write_data_to_memory (cache, address, data, length);
1354 return;
1355 }
1356
1357 /* See if the data is in the cache. */
1358 found = get_tag (cache, address, &tag);
1359
1360 /* Write the data to the cache line if one was available and if it is
1361 either a hit or a miss in copy-back mode.
1362 The tag may be NULL if all ways were in use and locked on a miss.
1363 */
1364 current_cpu = cache->cpu;
1365 copy_back = GET_HSR0_CBM (GET_HSR0 ());
1366 if (tag != NULL && (found || copy_back))
1367 {
1368 int line_offset;
1369 /* Load the line from memory first, if it was a miss. */
1370 if (! found)
1371 {
1372 /* We need to wait for the memory unit to fetch the data.
1373 Store this request in the WAR and requeue the store request. */
1374 wait_in_WAR (cache, pipe, req);
1375 pipeline_requeue_request (& cache->pipeline[pipe]);
1376 /* Decrement the counts of accesses and hits because when the requeued
1377 request is processed again, it will appear to be a new access and
1378 a hit. */
1379 --cache->statistics.accesses;
1380 --cache->statistics.hits;
1381 return;
1382 }
1383 line_offset = address & (cache->line_size - 1);
1384 memcpy (tag->line + line_offset, data, length);
1385 invalidate_return_buffer (cache, address);
1386 tag->dirty = 1;
1387
1388 /* Update the LRU information for the tags in this set. */
1389 set_most_recently_used (cache, tag);
1390 }
1391
1392 /* Write the data to memory if there was no line available or we are in
1393 write-through (not copy-back mode). */
1394 if (tag == NULL || ! copy_back)
1395 {
1396 write_data_to_memory (cache, address, data, length);
1397 if (tag != NULL)
1398 tag->dirty = 0;
1399 }
1400}
1401
1402static void
1403handle_req_invalidate (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1404{
1405 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1406 SI address = req->address;
1407 SI interfere_address = req->u.invalidate.all ? -1 : address;
1408
1409 /* If this address interferes with an existing request, then requeue it. */
1410 if (address_interference (cache, interfere_address, req, pipe))
1411 {
1412 pipeline_requeue_request (pipeline);
1413 return;
1414 }
1415
1416 /* Invalidate the cache line now. This function already checks for
1417 non-cache access. */
1418 if (req->u.invalidate.all)
1419 frv_cache_invalidate_all (cache, req->u.invalidate.flush);
1420 else
1421 frv_cache_invalidate (cache, address, req->u.invalidate.flush);
1422 if (req->u.invalidate.flush)
1423 {
1424 pipeline->status.flush.reqno = req->reqno;
1425 pipeline->status.flush.address = address;
1426 pipeline->status.flush.valid = 1;
1427 }
1428}
1429
1430static void
1431handle_req_unlock (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1432{
1433 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1434 SI address = req->address;
1435
1436 /* If this address interferes with an existing request, then requeue it. */
1437 if (address_interference (cache, address, req, pipe))
1438 {
1439 pipeline_requeue_request (pipeline);
1440 return;
1441 }
1442
1443 /* Unlock the cache line. This function checks for non-cache access. */
1444 frv_cache_unlock (cache, address);
1445}
1446
1447static void
1448handle_req_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_REQUEST *req)
1449{
1450 char *buffer;
1451 FRV_CACHE_TAG *tag;
1452 SI address = req->address;
1453
1454 if (frv_cache_enabled (cache) && ! non_cache_access (cache, address))
1455 {
1456 /* Look for the data in the cache. The statistics of cache hit or
1457 miss have already been recorded, so save and restore the stats before
1458 and after obtaining the cache line. */
1459 FRV_CACHE_STATISTICS save_stats = cache->statistics;
1460 tag = find_or_retrieve_cache_line (cache, address);
1461 cache->statistics = save_stats;
1462 if (tag != NULL)
1463 {
1464 if (! req->u.WAR.preload)
1465 {
1466 copy_line_to_return_buffer (cache, pipe, tag, address);
1467 set_return_buffer_reqno (cache, pipe, req->reqno);
1468 }
1469 else
1470 {
1471 invalidate_return_buffer (cache, address);
1472 if (req->u.WAR.lock)
1473 tag->locked = 1;
1474 }
1475 return;
1476 }
1477 }
1478
1479 /* All cache lines in the set were locked, so just copy the data to the
1480 return buffer directly. */
1481 if (! req->u.WAR.preload)
1482 {
1483 copy_memory_to_return_buffer (cache, pipe, address);
1484 set_return_buffer_reqno (cache, pipe, req->reqno);
1485 }
1486}
1487
1488/* Resolve any conflicts and/or execute the given requests. */
1489static void
1490arbitrate_requests (FRV_CACHE *cache)
1491{
1492 int pipe;
1493 /* Simply execute the requests in the final pipeline stages. */
1494 for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe)
1495 {
1496 FRV_CACHE_REQUEST *req
1497 = pipeline_stage_request (& cache->pipeline[pipe], LAST_STAGE);
1498 /* Make sure that there is a request to handle. */
1499 if (req == NULL)
1500 continue;
1501
1502 /* Handle the request. */
1503 switch (req->kind)
1504 {
1505 case req_load:
1506 handle_req_load (cache, pipe, req);
1507 break;
1508 case req_store:
1509 handle_req_store (cache, pipe, req);
1510 break;
1511 case req_invalidate:
1512 handle_req_invalidate (cache, pipe, req);
1513 break;
1514 case req_preload:
1515 handle_req_preload (cache, pipe, req);
1516 break;
1517 case req_unlock:
1518 handle_req_unlock (cache, pipe, req);
1519 break;
1520 case req_WAR:
1521 handle_req_WAR (cache, pipe, req);
1522 break;
1523 default:
1524 abort ();
1525 }
1526 }
1527}
1528
1529/* Move a waiting ARS register to a free WAR register. */
1530static void
1531move_ARS_to_WAR (FRV_CACHE *cache, int pipe, FRV_CACHE_WAR *war)
1532{
1533 /* If BARS is valid for this pipe, then move it to the given WAR. Move
1534 NARS to BARS if it is valid. */
1535 if (cache->BARS.valid && cache->BARS.pipe == pipe)
1536 {
1537 war->address = cache->BARS.address;
1538 war->reqno = cache->BARS.reqno;
1539 war->priority = cache->BARS.priority;
1540 war->preload = cache->BARS.preload;
1541 war->lock = cache->BARS.lock;
1542 war->latency = cache->memory_latency + 1;
1543 war->valid = 1;
1544 if (cache->NARS.valid)
1545 {
1546 cache->BARS = cache->NARS;
1547 cache->NARS.valid = 0;
1548 }
1549 else
1550 cache->BARS.valid = 0;
1551 return;
1552 }
1553 /* If NARS is valid for this pipe, then move it to the given WAR. */
1554 if (cache->NARS.valid && cache->NARS.pipe == pipe)
1555 {
1556 war->address = cache->NARS.address;
1557 war->reqno = cache->NARS.reqno;
1558 war->priority = cache->NARS.priority;
1559 war->preload = cache->NARS.preload;
1560 war->lock = cache->NARS.lock;
1561 war->latency = cache->memory_latency + 1;
1562 war->valid = 1;
1563 cache->NARS.valid = 0;
1564 }
1565}
1566
1567/* Decrease the latencies of the various states in the cache. */
1568static void
1569decrease_latencies (FRV_CACHE *cache)
1570{
1571 int pipe, j;
1572 /* Check the WAR registers. */
1573 for (pipe = LS; pipe < FRV_CACHE_PIPELINES; ++pipe)
1574 {
1575 FRV_CACHE_PIPELINE *pipeline = & cache->pipeline[pipe];
1576 for (j = 0; j < NUM_WARS; ++j)
1577 {
1578 FRV_CACHE_WAR *war = & pipeline->WAR[j];
1579 if (war->valid)
1580 {
1581 --war->latency;
1582 /* If the latency has expired, then submit a WAR request to the
1583 pipeline. */
1584 if (war->latency <= 0)
1585 {
1586 add_WAR_request (pipeline, war);
1587 war->valid = 0;
1588 move_ARS_to_WAR (cache, pipe, war);
1589 }
1590 }
1591 }
1592 }
1593}
1594
1595/* Run the cache for the given number of cycles. */
1596void
1597frv_cache_run (FRV_CACHE *cache, int cycles)
1598{
1599 int i;
1600 for (i = 0; i < cycles; ++i)
1601 {
1602 advance_pipelines (cache);
1603 arbitrate_requests (cache);
1604 decrease_latencies (cache);
1605 }
1606}
1607
1608int
1609frv_cache_read_passive_SI (FRV_CACHE *cache, SI address, SI *value)
1610{
1611 SI offset;
1612 FRV_CACHE_TAG *tag;
1613
1614 if (non_cache_access (cache, address))
1615 return 0;
1616
1617 {
1618 FRV_CACHE_STATISTICS save_stats = cache->statistics;
1619 int found = get_tag (cache, address, &tag);
1620 cache->statistics = save_stats;
1621
1622 if (! found)
1623 return 0; /* Indicate non-cache-access. */
1624 }
1625
1626 /* A cache line was available for the data.
1627 Extract the target data from the line. */
1628 offset = address & (cache->line_size - 1);
b34f6357
DB
1629 *value = T2H_4 (*(SI *)(tag->line + offset));
1630 return 1;
1631}
1632
1633/* Check the return buffers of the data cache to see if the requested data is
1634 available. */
1635int
1636frv_cache_data_in_buffer (FRV_CACHE* cache, int pipe, SI address,
1637 unsigned reqno)
1638{
1639 return cache->pipeline[pipe].status.return_buffer.valid
1640 && cache->pipeline[pipe].status.return_buffer.reqno == reqno
1641 && cache->pipeline[pipe].status.return_buffer.address <= address
1642 && cache->pipeline[pipe].status.return_buffer.address + cache->line_size
1643 > address;
1644}
1645
1646/* Check to see if the requested data has been flushed. */
1647int
1648frv_cache_data_flushed (FRV_CACHE* cache, int pipe, SI address, unsigned reqno)
1649{
1650 return cache->pipeline[pipe].status.flush.valid
1651 && cache->pipeline[pipe].status.flush.reqno == reqno
1652 && cache->pipeline[pipe].status.flush.address <= address
1653 && cache->pipeline[pipe].status.flush.address + cache->line_size
1654 > address;
1655}
This page took 0.089369 seconds and 4 git commands to generate.