aclocal.m4: Don't enable inlining when cross-compiling.
[deliverable/binutils-gdb.git] / sim / mips / sim-main.c
1 /* Simulator for the MIPS architecture.
2
3 This file is part of the MIPS sim
4
5 THIS SOFTWARE IS NOT COPYRIGHTED
6
7 Cygnus offers the following for use in the public domain. Cygnus
8 makes no warranty with regard to the software or it's performance
9 and the user accepts the software "AS IS" with all faults.
10
11 CYGNUS DISCLAIMS ANY WARRANTIES, EXPRESS OR IMPLIED, WITH REGARD TO
12 THIS SOFTWARE INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
13 MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
14
15 $Revision$
16 $Date$
17
18 */
19
20 #ifndef SIM_MAIN_C
21 #define SIM_MAIN_C
22
23 #include "sim-main.h"
24
25 #if !(WITH_IGEN)
26 #define SIM_MANIFESTS
27 #include "oengine.c"
28 #undef SIM_MANIFESTS
29 #endif
30
31
32 /*---------------------------------------------------------------------------*/
33 /*-- simulator engine -------------------------------------------------------*/
34 /*---------------------------------------------------------------------------*/
35
36 /* Description from page A-22 of the "MIPS IV Instruction Set" manual
37 (revision 3.1) */
38 /* Translate a virtual address to a physical address and cache
39 coherence algorithm describing the mechanism used to resolve the
40 memory reference. Given the virtual address vAddr, and whether the
41 reference is to Instructions ot Data (IorD), find the corresponding
42 physical address (pAddr) and the cache coherence algorithm (CCA)
43 used to resolve the reference. If the virtual address is in one of
44 the unmapped address spaces the physical address and the CCA are
45 determined directly by the virtual address. If the virtual address
46 is in one of the mapped address spaces then the TLB is used to
47 determine the physical address and access type; if the required
48 translation is not present in the TLB or the desired access is not
49 permitted the function fails and an exception is taken.
50
51 NOTE: Normally (RAW == 0), when address translation fails, this
52 function raises an exception and does not return. */
53
54 INLINE_SIM_MAIN (int)
55 address_translation (SIM_DESC sd,
56 sim_cpu *cpu,
57 address_word cia,
58 address_word vAddr,
59 int IorD,
60 int LorS,
61 address_word *pAddr,
62 int *CCA,
63 int raw)
64 {
65 int res = -1; /* TRUE : Assume good return */
66
67 #ifdef DEBUG
68 sim_io_printf(sd,"AddressTranslation(0x%s,%s,%s,...);\n",pr_addr(vAddr),(IorD ? "isDATA" : "isINSTRUCTION"),(LorS ? "iSTORE" : "isLOAD"));
69 #endif
70
71 /* Check that the address is valid for this memory model */
72
73 /* For a simple (flat) memory model, we simply pass virtual
74 addressess through (mostly) unchanged. */
75 vAddr &= 0xFFFFFFFF;
76
77 *pAddr = vAddr; /* default for isTARGET */
78 *CCA = Uncached; /* not used for isHOST */
79
80 return(res);
81 }
82
83 /* Description from page A-23 of the "MIPS IV Instruction Set" manual
84 (revision 3.1) */
85 /* Prefetch data from memory. Prefetch is an advisory instruction for
86 which an implementation specific action is taken. The action taken
87 may increase performance, but must not change the meaning of the
88 program, or alter architecturally-visible state. */
89
90 INLINE_SIM_MAIN (void)
91 prefetch (SIM_DESC sd,
92 sim_cpu *cpu,
93 address_word cia,
94 int CCA,
95 address_word pAddr,
96 address_word vAddr,
97 int DATA,
98 int hint)
99 {
100 #ifdef DEBUG
101 sim_io_printf(sd,"Prefetch(%d,0x%s,0x%s,%d,%d);\n",CCA,pr_addr(pAddr),pr_addr(vAddr),DATA,hint);
102 #endif /* DEBUG */
103
104 /* For our simple memory model we do nothing */
105 return;
106 }
107
108 /* Description from page A-22 of the "MIPS IV Instruction Set" manual
109 (revision 3.1) */
110 /* Load a value from memory. Use the cache and main memory as
111 specified in the Cache Coherence Algorithm (CCA) and the sort of
112 access (IorD) to find the contents of AccessLength memory bytes
113 starting at physical location pAddr. The data is returned in the
114 fixed width naturally-aligned memory element (MemElem). The
115 low-order two (or three) bits of the address and the AccessLength
116 indicate which of the bytes within MemElem needs to be given to the
117 processor. If the memory access type of the reference is uncached
118 then only the referenced bytes are read from memory and valid
119 within the memory element. If the access type is cached, and the
120 data is not present in cache, an implementation specific size and
121 alignment block of memory is read and loaded into the cache to
122 satisfy a load reference. At a minimum, the block is the entire
123 memory element. */
124 INLINE_SIM_MAIN (void)
125 load_memory (SIM_DESC SD,
126 sim_cpu *CPU,
127 address_word cia,
128 uword64* memvalp,
129 uword64* memval1p,
130 int CCA,
131 unsigned int AccessLength,
132 address_word pAddr,
133 address_word vAddr,
134 int IorD)
135 {
136 uword64 value = 0;
137 uword64 value1 = 0;
138
139 #ifdef DEBUG
140 sim_io_printf(sd,"DBG: LoadMemory(%p,%p,%d,%d,0x%s,0x%s,%s)\n",memvalp,memval1p,CCA,AccessLength,pr_addr(pAddr),pr_addr(vAddr),(IorD ? "isDATA" : "isINSTRUCTION"));
141 #endif /* DEBUG */
142
143 #if defined(WARN_MEM)
144 if (CCA != uncached)
145 sim_io_eprintf(sd,"LoadMemory CCA (%d) is not uncached (currently all accesses treated as cached)\n",CCA);
146 #endif /* WARN_MEM */
147
148 /* If instruction fetch then we need to check that the two lo-order
149 bits are zero, otherwise raise a InstructionFetch exception: */
150 if ((IorD == isINSTRUCTION)
151 && ((pAddr & 0x3) != 0)
152 && (((pAddr & 0x1) != 0) || ((vAddr & 0x1) == 0)))
153 SignalExceptionInstructionFetch ();
154
155 if (((pAddr & LOADDRMASK) + AccessLength) > LOADDRMASK)
156 {
157 /* In reality this should be a Bus Error */
158 sim_io_error (SD, "LOAD AccessLength of %d would extend over %d bit aligned boundary for physical address 0x%s\n",
159 AccessLength,
160 (LOADDRMASK + 1) << 3,
161 pr_addr (pAddr));
162 }
163
164 #if defined(TRACE)
165 dotrace (SD, CPU, tracefh,((IorD == isDATA) ? 0 : 2),(unsigned int)(pAddr&0xFFFFFFFF),(AccessLength + 1),"load%s",((IorD == isDATA) ? "" : " instruction"));
166 #endif /* TRACE */
167
168 /* Read the specified number of bytes from memory. Adjust for
169 host/target byte ordering/ Align the least significant byte
170 read. */
171
172 switch (AccessLength)
173 {
174 case AccessLength_QUADWORD :
175 {
176 unsigned_16 val = sim_core_read_aligned_16 (CPU, NULL_CIA, read_map, pAddr);
177 value1 = VH8_16 (val);
178 value = VL8_16 (val);
179 break;
180 }
181 case AccessLength_DOUBLEWORD :
182 value = sim_core_read_aligned_8 (CPU, NULL_CIA,
183 read_map, pAddr);
184 break;
185 case AccessLength_SEPTIBYTE :
186 value = sim_core_read_misaligned_7 (CPU, NULL_CIA,
187 read_map, pAddr);
188 break;
189 case AccessLength_SEXTIBYTE :
190 value = sim_core_read_misaligned_6 (CPU, NULL_CIA,
191 read_map, pAddr);
192 break;
193 case AccessLength_QUINTIBYTE :
194 value = sim_core_read_misaligned_5 (CPU, NULL_CIA,
195 read_map, pAddr);
196 break;
197 case AccessLength_WORD :
198 value = sim_core_read_aligned_4 (CPU, NULL_CIA,
199 read_map, pAddr);
200 break;
201 case AccessLength_TRIPLEBYTE :
202 value = sim_core_read_misaligned_3 (CPU, NULL_CIA,
203 read_map, pAddr);
204 break;
205 case AccessLength_HALFWORD :
206 value = sim_core_read_aligned_2 (CPU, NULL_CIA,
207 read_map, pAddr);
208 break;
209 case AccessLength_BYTE :
210 value = sim_core_read_aligned_1 (CPU, NULL_CIA,
211 read_map, pAddr);
212 break;
213 default:
214 abort ();
215 }
216
217 #ifdef DEBUG
218 printf("DBG: LoadMemory() : (offset %d) : value = 0x%s%s\n",
219 (int)(pAddr & LOADDRMASK),pr_uword64(value1),pr_uword64(value));
220 #endif /* DEBUG */
221
222 /* See also store_memory. Position data in correct byte lanes. */
223 if (AccessLength <= LOADDRMASK)
224 {
225 if (BigEndianMem)
226 /* for big endian target, byte (pAddr&LOADDRMASK == 0) is
227 shifted to the most significant byte position. */
228 value <<= (((LOADDRMASK - (pAddr & LOADDRMASK)) - AccessLength) * 8);
229 else
230 /* For little endian target, byte (pAddr&LOADDRMASK == 0)
231 is already in the correct postition. */
232 value <<= ((pAddr & LOADDRMASK) * 8);
233 }
234
235 #ifdef DEBUG
236 printf("DBG: LoadMemory() : shifted value = 0x%s%s\n",
237 pr_uword64(value1),pr_uword64(value));
238 #endif /* DEBUG */
239
240 *memvalp = value;
241 if (memval1p) *memval1p = value1;
242 }
243
244
245 /* Description from page A-23 of the "MIPS IV Instruction Set" manual
246 (revision 3.1) */
247 /* Store a value to memory. The specified data is stored into the
248 physical location pAddr using the memory hierarchy (data caches and
249 main memory) as specified by the Cache Coherence Algorithm
250 (CCA). The MemElem contains the data for an aligned, fixed-width
251 memory element (word for 32-bit processors, doubleword for 64-bit
252 processors), though only the bytes that will actually be stored to
253 memory need to be valid. The low-order two (or three) bits of pAddr
254 and the AccessLength field indicates which of the bytes within the
255 MemElem data should actually be stored; only these bytes in memory
256 will be changed. */
257
258 INLINE_SIM_MAIN (void)
259 store_memory (SIM_DESC SD,
260 sim_cpu *CPU,
261 address_word cia,
262 int CCA,
263 unsigned int AccessLength,
264 uword64 MemElem,
265 uword64 MemElem1, /* High order 64 bits */
266 address_word pAddr,
267 address_word vAddr)
268 {
269 #ifdef DEBUG
270 sim_io_printf(sd,"DBG: StoreMemory(%d,%d,0x%s,0x%s,0x%s,0x%s)\n",CCA,AccessLength,pr_uword64(MemElem),pr_uword64(MemElem1),pr_addr(pAddr),pr_addr(vAddr));
271 #endif /* DEBUG */
272
273 #if defined(WARN_MEM)
274 if (CCA != uncached)
275 sim_io_eprintf(sd,"StoreMemory CCA (%d) is not uncached (currently all accesses treated as cached)\n",CCA);
276 #endif /* WARN_MEM */
277
278 if (((pAddr & LOADDRMASK) + AccessLength) > LOADDRMASK)
279 sim_io_error (SD, "STORE AccessLength of %d would extend over %d bit aligned boundary for physical address 0x%s\n",
280 AccessLength,
281 (LOADDRMASK + 1) << 3,
282 pr_addr(pAddr));
283
284 #if defined(TRACE)
285 dotrace (SD, CPU, tracefh,1,(unsigned int)(pAddr&0xFFFFFFFF),(AccessLength + 1),"store");
286 #endif /* TRACE */
287
288 #ifdef DEBUG
289 printf("DBG: StoreMemory: offset = %d MemElem = 0x%s%s\n",(unsigned int)(pAddr & LOADDRMASK),pr_uword64(MemElem1),pr_uword64(MemElem));
290 #endif /* DEBUG */
291
292 /* See also load_memory. Position data in correct byte lanes. */
293 if (AccessLength <= LOADDRMASK)
294 {
295 if (BigEndianMem)
296 /* for big endian target, byte (pAddr&LOADDRMASK == 0) is
297 shifted to the most significant byte position. */
298 MemElem >>= (((LOADDRMASK - (pAddr & LOADDRMASK)) - AccessLength) * 8);
299 else
300 /* For little endian target, byte (pAddr&LOADDRMASK == 0)
301 is already in the correct postition. */
302 MemElem >>= ((pAddr & LOADDRMASK) * 8);
303 }
304
305 #ifdef DEBUG
306 printf("DBG: StoreMemory: shift = %d MemElem = 0x%s%s\n",shift,pr_uword64(MemElem1),pr_uword64(MemElem));
307 #endif /* DEBUG */
308
309 switch (AccessLength)
310 {
311 case AccessLength_QUADWORD :
312 {
313 unsigned_16 val = U16_8 (MemElem1, MemElem);
314 sim_core_write_aligned_16 (CPU, NULL_CIA, write_map, pAddr, val);
315 break;
316 }
317 case AccessLength_DOUBLEWORD :
318 sim_core_write_aligned_8 (CPU, NULL_CIA,
319 write_map, pAddr, MemElem);
320 break;
321 case AccessLength_SEPTIBYTE :
322 sim_core_write_misaligned_7 (CPU, NULL_CIA,
323 write_map, pAddr, MemElem);
324 break;
325 case AccessLength_SEXTIBYTE :
326 sim_core_write_misaligned_6 (CPU, NULL_CIA,
327 write_map, pAddr, MemElem);
328 break;
329 case AccessLength_QUINTIBYTE :
330 sim_core_write_misaligned_5 (CPU, NULL_CIA,
331 write_map, pAddr, MemElem);
332 break;
333 case AccessLength_WORD :
334 sim_core_write_aligned_4 (CPU, NULL_CIA,
335 write_map, pAddr, MemElem);
336 break;
337 case AccessLength_TRIPLEBYTE :
338 sim_core_write_misaligned_3 (CPU, NULL_CIA,
339 write_map, pAddr, MemElem);
340 break;
341 case AccessLength_HALFWORD :
342 sim_core_write_aligned_2 (CPU, NULL_CIA,
343 write_map, pAddr, MemElem);
344 break;
345 case AccessLength_BYTE :
346 sim_core_write_aligned_1 (CPU, NULL_CIA,
347 write_map, pAddr, MemElem);
348 break;
349 default:
350 abort ();
351 }
352
353 return;
354 }
355
356
357 INLINE_SIM_MAIN (unsigned32)
358 ifetch32 (SIM_DESC SD,
359 sim_cpu *CPU,
360 address_word cia,
361 address_word vaddr)
362 {
363 /* Copy the action of the LW instruction */
364 address_word reverse = (ReverseEndian ? (LOADDRMASK >> 2) : 0);
365 address_word bigend = (BigEndianCPU ? (LOADDRMASK >> 2) : 0);
366 unsigned64 value;
367 address_word paddr;
368 unsigned32 instruction;
369 unsigned byte;
370 int cca;
371 AddressTranslation (vaddr, isINSTRUCTION, isLOAD, &paddr, &cca, isTARGET, isREAL);
372 paddr = ((paddr & ~LOADDRMASK) | ((paddr & LOADDRMASK) ^ (reverse << 2)));
373 LoadMemory (&value, NULL, cca, AccessLength_WORD, paddr, vaddr, isINSTRUCTION, isREAL);
374 byte = ((vaddr & LOADDRMASK) ^ (bigend << 2));
375 instruction = ((value >> (8 * byte)) & 0xFFFFFFFF);
376 return instruction;
377 }
378
379
380 /* Description from page A-26 of the "MIPS IV Instruction Set" manual (revision 3.1) */
381 /* Order loads and stores to synchronise shared memory. Perform the
382 action necessary to make the effects of groups of synchronizable
383 loads and stores indicated by stype occur in the same order for all
384 processors. */
385 INLINE_SIM_MAIN (void)
386 sync_operation (SIM_DESC sd,
387 sim_cpu *cpu,
388 address_word cia,
389 int stype)
390 {
391 #ifdef DEBUG
392 sim_io_printf(sd,"SyncOperation(%d) : TODO\n",stype);
393 #endif /* DEBUG */
394 return;
395 }
396
397 INLINE_SIM_MAIN (void)
398 cache_op (SIM_DESC SD,
399 sim_cpu *CPU,
400 address_word cia,
401 int op,
402 address_word pAddr,
403 address_word vAddr,
404 unsigned int instruction)
405 {
406 #if 1 /* stop warning message being displayed (we should really just remove the code) */
407 static int icache_warning = 1;
408 static int dcache_warning = 1;
409 #else
410 static int icache_warning = 0;
411 static int dcache_warning = 0;
412 #endif
413
414 /* If CP0 is not useable (User or Supervisor mode) and the CP0
415 enable bit in the Status Register is clear - a coprocessor
416 unusable exception is taken. */
417 #if 0
418 sim_io_printf(SD,"TODO: Cache availability checking (PC = 0x%s)\n",pr_addr(cia));
419 #endif
420
421 switch (op & 0x3) {
422 case 0: /* instruction cache */
423 switch (op >> 2) {
424 case 0: /* Index Invalidate */
425 case 1: /* Index Load Tag */
426 case 2: /* Index Store Tag */
427 case 4: /* Hit Invalidate */
428 case 5: /* Fill */
429 case 6: /* Hit Writeback */
430 if (!icache_warning)
431 {
432 sim_io_eprintf(SD,"Instruction CACHE operation %d to be coded\n",(op >> 2));
433 icache_warning = 1;
434 }
435 break;
436
437 default:
438 SignalException(ReservedInstruction,instruction);
439 break;
440 }
441 break;
442
443 case 1: /* data cache */
444 switch (op >> 2) {
445 case 0: /* Index Writeback Invalidate */
446 case 1: /* Index Load Tag */
447 case 2: /* Index Store Tag */
448 case 3: /* Create Dirty */
449 case 4: /* Hit Invalidate */
450 case 5: /* Hit Writeback Invalidate */
451 case 6: /* Hit Writeback */
452 if (!dcache_warning)
453 {
454 sim_io_eprintf(SD,"Data CACHE operation %d to be coded\n",(op >> 2));
455 dcache_warning = 1;
456 }
457 break;
458
459 default:
460 SignalException(ReservedInstruction,instruction);
461 break;
462 }
463 break;
464
465 default: /* unrecognised cache ID */
466 SignalException(ReservedInstruction,instruction);
467 break;
468 }
469
470 return;
471 }
472
473
474 INLINE_SIM_MAIN (void)
475 pending_tick (SIM_DESC SD,
476 sim_cpu *CPU,
477 address_word cia)
478 {
479 if (PENDING_TRACE)
480 sim_io_printf (SD, "PENDING_DRAIN - pending_in = %d, pending_out = %d, pending_total = %d\n", PENDING_IN, PENDING_OUT, PENDING_TOTAL);
481 if (PENDING_OUT != PENDING_IN)
482 {
483 int loop;
484 int index = PENDING_OUT;
485 int total = PENDING_TOTAL;
486 if (PENDING_TOTAL == 0)
487 sim_engine_abort (SD, CPU, cia, "PENDING_DRAIN - Mis-match on pending update pointers\n");
488 for (loop = 0; (loop < total); loop++)
489 {
490 if (PENDING_SLOT_DEST[index] != NULL)
491 {
492 PENDING_SLOT_DELAY[index] -= 1;
493 if (PENDING_SLOT_DELAY[index] == 0)
494 {
495 if (PENDING_SLOT_BIT[index] >= 0)
496 switch (PENDING_SLOT_SIZE[index])
497 {
498 case 32:
499 if (PENDING_SLOT_VALUE[index])
500 *(unsigned32*)PENDING_SLOT_DEST[index] |=
501 BIT32 (PENDING_SLOT_BIT[index]);
502 else
503 *(unsigned32*)PENDING_SLOT_DEST[index] &=
504 BIT32 (PENDING_SLOT_BIT[index]);
505 break;
506 case 64:
507 if (PENDING_SLOT_VALUE[index])
508 *(unsigned64*)PENDING_SLOT_DEST[index] |=
509 BIT64 (PENDING_SLOT_BIT[index]);
510 else
511 *(unsigned64*)PENDING_SLOT_DEST[index] &=
512 BIT64 (PENDING_SLOT_BIT[index]);
513 break;
514 break;
515 }
516 else
517 switch (PENDING_SLOT_SIZE[index])
518 {
519 case 32:
520 *(unsigned32*)PENDING_SLOT_DEST[index] =
521 PENDING_SLOT_VALUE[index];
522 break;
523 case 64:
524 *(unsigned64*)PENDING_SLOT_DEST[index] =
525 PENDING_SLOT_VALUE[index];
526 break;
527 }
528 }
529 if (PENDING_OUT == index)
530 {
531 PENDING_SLOT_DEST[index] = NULL;
532 PENDING_OUT = (PENDING_OUT + 1) % PSLOTS;
533 PENDING_TOTAL--;
534 }
535 }
536 }
537 index = (index + 1) % PSLOTS;
538 }
539 }
540
541
542 #endif
This page took 0.053016 seconds and 5 git commands to generate.