64c563a2a894a1217f946c547716d1c29ee65bf7
[deliverable/binutils-gdb.git] / sim / frv / interrupts.c
1 /* frv exception and interrupt support
2 Copyright (C) 1999-2021 Free Software Foundation, Inc.
3 Contributed by Red Hat.
4
5 This file is part of the GNU simulators.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 /* This must come before any other includes. */
21 #include "defs.h"
22
23 #define WANT_CPU frvbf
24 #define WANT_CPU_FRVBF
25
26 #include "sim-main.h"
27 #include "sim-signal.h"
28 #include "bfd.h"
29 #include <stdlib.h>
30 #include "cgen-mem.h"
31
32 /* FR-V Interrupt table.
33 Describes the interrupts supported by the FR-V.
34 This table *must* be maintained in order of interrupt priority as defined by
35 frv_interrupt_kind. */
36 #define DEFERRED 1
37 #define PRECISE 1
38 #define ITABLE_ENTRY(name, class, deferral, precision, offset) \
39 {FRV_##name, FRV_EC_##name, class, deferral, precision, offset}
40
41 struct frv_interrupt frv_interrupt_table[NUM_FRV_INTERRUPT_KINDS] =
42 {
43 /* External interrupts */
44 ITABLE_ENTRY(INTERRUPT_LEVEL_1, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x21),
45 ITABLE_ENTRY(INTERRUPT_LEVEL_2, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x22),
46 ITABLE_ENTRY(INTERRUPT_LEVEL_3, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x23),
47 ITABLE_ENTRY(INTERRUPT_LEVEL_4, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x24),
48 ITABLE_ENTRY(INTERRUPT_LEVEL_5, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x25),
49 ITABLE_ENTRY(INTERRUPT_LEVEL_6, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x26),
50 ITABLE_ENTRY(INTERRUPT_LEVEL_7, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x27),
51 ITABLE_ENTRY(INTERRUPT_LEVEL_8, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x28),
52 ITABLE_ENTRY(INTERRUPT_LEVEL_9, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x29),
53 ITABLE_ENTRY(INTERRUPT_LEVEL_10, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x2a),
54 ITABLE_ENTRY(INTERRUPT_LEVEL_11, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x2b),
55 ITABLE_ENTRY(INTERRUPT_LEVEL_12, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x2c),
56 ITABLE_ENTRY(INTERRUPT_LEVEL_13, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x2d),
57 ITABLE_ENTRY(INTERRUPT_LEVEL_14, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x2e),
58 ITABLE_ENTRY(INTERRUPT_LEVEL_15, FRV_EXTERNAL_INTERRUPT, !DEFERRED, !PRECISE, 0x2f),
59 /* Software interrupt */
60 ITABLE_ENTRY(TRAP_INSTRUCTION, FRV_SOFTWARE_INTERRUPT, !DEFERRED, !PRECISE, 0x80),
61 /* Program interrupts */
62 ITABLE_ENTRY(COMMIT_EXCEPTION, FRV_PROGRAM_INTERRUPT, !DEFERRED, !PRECISE, 0x19),
63 ITABLE_ENTRY(DIVISION_EXCEPTION, FRV_PROGRAM_INTERRUPT, !DEFERRED, !PRECISE, 0x17),
64 ITABLE_ENTRY(DATA_STORE_ERROR, FRV_PROGRAM_INTERRUPT, !DEFERRED, !PRECISE, 0x14),
65 ITABLE_ENTRY(DATA_ACCESS_EXCEPTION, FRV_PROGRAM_INTERRUPT, !DEFERRED, !PRECISE, 0x13),
66 ITABLE_ENTRY(DATA_ACCESS_MMU_MISS, FRV_PROGRAM_INTERRUPT, !DEFERRED, !PRECISE, 0x12),
67 ITABLE_ENTRY(DATA_ACCESS_ERROR, FRV_PROGRAM_INTERRUPT, !DEFERRED, !PRECISE, 0x11),
68 ITABLE_ENTRY(MP_EXCEPTION, FRV_PROGRAM_INTERRUPT, !DEFERRED, !PRECISE, 0x0e),
69 ITABLE_ENTRY(FP_EXCEPTION, FRV_PROGRAM_INTERRUPT, !DEFERRED, !PRECISE, 0x0d),
70 ITABLE_ENTRY(MEM_ADDRESS_NOT_ALIGNED, FRV_PROGRAM_INTERRUPT, !DEFERRED, !PRECISE, 0x10),
71 ITABLE_ENTRY(REGISTER_EXCEPTION, FRV_PROGRAM_INTERRUPT, !DEFERRED, PRECISE, 0x08),
72 ITABLE_ENTRY(MP_DISABLED, FRV_PROGRAM_INTERRUPT, !DEFERRED, PRECISE, 0x0b),
73 ITABLE_ENTRY(FP_DISABLED, FRV_PROGRAM_INTERRUPT, !DEFERRED, PRECISE, 0x0a),
74 ITABLE_ENTRY(PRIVILEGED_INSTRUCTION, FRV_PROGRAM_INTERRUPT, !DEFERRED, PRECISE, 0x06),
75 ITABLE_ENTRY(ILLEGAL_INSTRUCTION, FRV_PROGRAM_INTERRUPT, !DEFERRED, PRECISE, 0x07),
76 ITABLE_ENTRY(INSTRUCTION_ACCESS_EXCEPTION, FRV_PROGRAM_INTERRUPT, !DEFERRED, PRECISE, 0x03),
77 ITABLE_ENTRY(INSTRUCTION_ACCESS_ERROR, FRV_PROGRAM_INTERRUPT, !DEFERRED, PRECISE, 0x02),
78 ITABLE_ENTRY(INSTRUCTION_ACCESS_MMU_MISS, FRV_PROGRAM_INTERRUPT, !DEFERRED, PRECISE, 0x01),
79 ITABLE_ENTRY(COMPOUND_EXCEPTION, FRV_PROGRAM_INTERRUPT, !DEFERRED, !PRECISE, 0x20),
80 /* Break interrupt */
81 ITABLE_ENTRY(BREAK_EXCEPTION, FRV_BREAK_INTERRUPT, !DEFERRED, !PRECISE, 0xff),
82 /* Reset interrupt */
83 ITABLE_ENTRY(RESET, FRV_RESET_INTERRUPT, !DEFERRED, !PRECISE, 0x00)
84 };
85
86 /* The current interrupt state. */
87 struct frv_interrupt_state frv_interrupt_state;
88
89 /* maintain the address of the start of the previous VLIW insn sequence. */
90 IADDR previous_vliw_pc;
91
92 /* Add a break interrupt to the interrupt queue. */
93 struct frv_interrupt_queue_element *
94 frv_queue_break_interrupt (SIM_CPU *current_cpu)
95 {
96 return frv_queue_interrupt (current_cpu, FRV_BREAK_EXCEPTION);
97 }
98
99 /* Add a software interrupt to the interrupt queue. */
100 struct frv_interrupt_queue_element *
101 frv_queue_software_interrupt (SIM_CPU *current_cpu, SI offset)
102 {
103 struct frv_interrupt_queue_element *new_element
104 = frv_queue_interrupt (current_cpu, FRV_TRAP_INSTRUCTION);
105
106 struct frv_interrupt *interrupt = & frv_interrupt_table[new_element->kind];
107 interrupt->handler_offset = offset;
108
109 return new_element;
110 }
111
112 /* Add a program interrupt to the interrupt queue. */
113 struct frv_interrupt_queue_element *
114 frv_queue_program_interrupt (
115 SIM_CPU *current_cpu, enum frv_interrupt_kind kind
116 )
117 {
118 return frv_queue_interrupt (current_cpu, kind);
119 }
120
121 /* Add an external interrupt to the interrupt queue. */
122 struct frv_interrupt_queue_element *
123 frv_queue_external_interrupt (
124 SIM_CPU *current_cpu, enum frv_interrupt_kind kind
125 )
126 {
127 if (! GET_H_PSR_ET ()
128 || (kind != FRV_INTERRUPT_LEVEL_15 && kind < GET_H_PSR_PIL ()))
129 return NULL; /* Leave it for later. */
130
131 return frv_queue_interrupt (current_cpu, kind);
132 }
133
134 /* Add any interrupt to the interrupt queue. It will be added in reverse
135 priority order. This makes it easy to find the highest priority interrupt
136 at the end of the queue and to remove it after processing. */
137 struct frv_interrupt_queue_element *
138 frv_queue_interrupt (SIM_CPU *current_cpu, enum frv_interrupt_kind kind)
139 {
140 int i;
141 int j;
142 int limit = frv_interrupt_state.queue_index;
143 struct frv_interrupt_queue_element *new_element;
144 enum frv_interrupt_class iclass;
145
146 if (limit >= FRV_INTERRUPT_QUEUE_SIZE)
147 abort (); /* TODO: Make the queue dynamic */
148
149 /* Find the right place in the queue. */
150 for (i = 0; i < limit; ++i)
151 {
152 if (frv_interrupt_state.queue[i].kind >= kind)
153 break;
154 }
155
156 /* Don't queue two external interrupts of the same priority. */
157 iclass = frv_interrupt_table[kind].iclass;
158 if (i < limit && iclass == FRV_EXTERNAL_INTERRUPT)
159 {
160 if (frv_interrupt_state.queue[i].kind == kind)
161 return & frv_interrupt_state.queue[i];
162 }
163
164 /* Make room for the new interrupt in this spot. */
165 for (j = limit - 1; j >= i; --j)
166 frv_interrupt_state.queue[j + 1] = frv_interrupt_state.queue[j];
167
168 /* Add the new interrupt. */
169 frv_interrupt_state.queue_index++;
170 new_element = & frv_interrupt_state.queue[i];
171 new_element->kind = kind;
172 new_element->vpc = CPU_PC_GET (current_cpu);
173 new_element->u.data_written.length = 0;
174 frv_set_interrupt_queue_slot (current_cpu, new_element);
175
176 return new_element;
177 }
178
179 struct frv_interrupt_queue_element *
180 frv_queue_register_exception_interrupt (SIM_CPU *current_cpu, enum frv_rec rec)
181 {
182 struct frv_interrupt_queue_element *new_element =
183 frv_queue_program_interrupt (current_cpu, FRV_REGISTER_EXCEPTION);
184
185 new_element->u.rec = rec;
186
187 return new_element;
188 }
189
190 struct frv_interrupt_queue_element *
191 frv_queue_mem_address_not_aligned_interrupt (SIM_CPU *current_cpu, USI addr)
192 {
193 struct frv_interrupt_queue_element *new_element;
194 USI isr = GET_ISR ();
195
196 /* Make sure that this exception is not masked. */
197 if (GET_ISR_EMAM (isr))
198 return NULL;
199
200 /* Queue the interrupt. */
201 new_element = frv_queue_program_interrupt (current_cpu,
202 FRV_MEM_ADDRESS_NOT_ALIGNED);
203 new_element->eaddress = addr;
204 new_element->u.data_written = frv_interrupt_state.data_written;
205 frv_interrupt_state.data_written.length = 0;
206
207 return new_element;
208 }
209
210 struct frv_interrupt_queue_element *
211 frv_queue_data_access_error_interrupt (SIM_CPU *current_cpu, USI addr)
212 {
213 struct frv_interrupt_queue_element *new_element;
214 new_element = frv_queue_program_interrupt (current_cpu,
215 FRV_DATA_ACCESS_ERROR);
216 new_element->eaddress = addr;
217 return new_element;
218 }
219
220 struct frv_interrupt_queue_element *
221 frv_queue_data_access_exception_interrupt (SIM_CPU *current_cpu)
222 {
223 return frv_queue_program_interrupt (current_cpu, FRV_DATA_ACCESS_EXCEPTION);
224 }
225
226 struct frv_interrupt_queue_element *
227 frv_queue_instruction_access_error_interrupt (SIM_CPU *current_cpu)
228 {
229 return frv_queue_program_interrupt (current_cpu, FRV_INSTRUCTION_ACCESS_ERROR);
230 }
231
232 struct frv_interrupt_queue_element *
233 frv_queue_instruction_access_exception_interrupt (SIM_CPU *current_cpu)
234 {
235 return frv_queue_program_interrupt (current_cpu, FRV_INSTRUCTION_ACCESS_EXCEPTION);
236 }
237
238 struct frv_interrupt_queue_element *
239 frv_queue_illegal_instruction_interrupt (
240 SIM_CPU *current_cpu, const CGEN_INSN *insn
241 )
242 {
243 SIM_DESC sd = CPU_STATE (current_cpu);
244 switch (STATE_ARCHITECTURE (sd)->mach)
245 {
246 case bfd_mach_fr400:
247 case bfd_mach_fr450:
248 case bfd_mach_fr550:
249 break;
250 default:
251 /* Some machines generate fp_exception for this case. */
252 if (frv_is_float_insn (insn) || frv_is_media_insn (insn))
253 {
254 struct frv_fp_exception_info fp_info = {
255 FSR_NO_EXCEPTION, FTT_SEQUENCE_ERROR
256 };
257 return frv_queue_fp_exception_interrupt (current_cpu, & fp_info);
258 }
259 break;
260 }
261
262 return frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
263 }
264
265 struct frv_interrupt_queue_element *
266 frv_queue_privileged_instruction_interrupt (SIM_CPU *current_cpu, const CGEN_INSN *insn)
267 {
268 /* The fr550 has no privileged instruction interrupt. It uses
269 illegal_instruction. */
270 SIM_DESC sd = CPU_STATE (current_cpu);
271 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
272 return frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
273
274 return frv_queue_program_interrupt (current_cpu, FRV_PRIVILEGED_INSTRUCTION);
275 }
276
277 struct frv_interrupt_queue_element *
278 frv_queue_float_disabled_interrupt (SIM_CPU *current_cpu)
279 {
280 /* The fr550 has no fp_disabled interrupt. It uses illegal_instruction. */
281 SIM_DESC sd = CPU_STATE (current_cpu);
282 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
283 return frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
284
285 return frv_queue_program_interrupt (current_cpu, FRV_FP_DISABLED);
286 }
287
288 struct frv_interrupt_queue_element *
289 frv_queue_media_disabled_interrupt (SIM_CPU *current_cpu)
290 {
291 /* The fr550 has no mp_disabled interrupt. It uses illegal_instruction. */
292 SIM_DESC sd = CPU_STATE (current_cpu);
293 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
294 return frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
295
296 return frv_queue_program_interrupt (current_cpu, FRV_MP_DISABLED);
297 }
298
299 struct frv_interrupt_queue_element *
300 frv_queue_non_implemented_instruction_interrupt (
301 SIM_CPU *current_cpu, const CGEN_INSN *insn
302 )
303 {
304 SIM_DESC sd = CPU_STATE (current_cpu);
305 switch (STATE_ARCHITECTURE (sd)->mach)
306 {
307 case bfd_mach_fr400:
308 case bfd_mach_fr450:
309 case bfd_mach_fr550:
310 break;
311 default:
312 /* Some machines generate fp_exception or mp_exception for this case. */
313 if (frv_is_float_insn (insn))
314 {
315 struct frv_fp_exception_info fp_info = {
316 FSR_NO_EXCEPTION, FTT_UNIMPLEMENTED_FPOP
317 };
318 return frv_queue_fp_exception_interrupt (current_cpu, & fp_info);
319 }
320 if (frv_is_media_insn (insn))
321 {
322 frv_set_mp_exception_registers (current_cpu, MTT_UNIMPLEMENTED_MPOP,
323 0);
324 return NULL; /* no interrupt queued at this time. */
325 }
326 break;
327 }
328
329 return frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
330 }
331
332 /* Queue the given fp_exception interrupt. Also update fp_info by removing
333 masked interrupts and updating the 'slot' flield. */
334 struct frv_interrupt_queue_element *
335 frv_queue_fp_exception_interrupt (
336 SIM_CPU *current_cpu, struct frv_fp_exception_info *fp_info
337 )
338 {
339 SI fsr0 = GET_FSR (0);
340 int tem = GET_FSR_TEM (fsr0);
341 int aexc = GET_FSR_AEXC (fsr0);
342 struct frv_interrupt_queue_element *new_element = NULL;
343
344 /* Update AEXC with the interrupts that are masked. */
345 aexc |= fp_info->fsr_mask & ~tem;
346 SET_FSR_AEXC (fsr0, aexc);
347 SET_FSR (0, fsr0);
348
349 /* update fsr_mask with the exceptions that are enabled. */
350 fp_info->fsr_mask &= tem;
351
352 /* If there is an unmasked interrupt then queue it, unless
353 this was a non-excepting insn, in which case simply set the NE
354 status registers. */
355 if (frv_interrupt_state.ne_index != NE_NOFLAG
356 && fp_info->fsr_mask != FSR_NO_EXCEPTION)
357 {
358 SET_NE_FLAG (frv_interrupt_state.f_ne_flags,
359 frv_interrupt_state.ne_index);
360 /* TODO -- Set NESR for chips which support it. */
361 new_element = NULL;
362 }
363 else if (fp_info->fsr_mask != FSR_NO_EXCEPTION
364 || fp_info->ftt == FTT_UNIMPLEMENTED_FPOP
365 || fp_info->ftt == FTT_SEQUENCE_ERROR
366 || fp_info->ftt == FTT_INVALID_FR)
367 {
368 new_element = frv_queue_program_interrupt (current_cpu, FRV_FP_EXCEPTION);
369 new_element->u.fp_info = *fp_info;
370 }
371
372 return new_element;
373 }
374
375 struct frv_interrupt_queue_element *
376 frv_queue_division_exception_interrupt (SIM_CPU *current_cpu, enum frv_dtt dtt)
377 {
378 struct frv_interrupt_queue_element *new_element =
379 frv_queue_program_interrupt (current_cpu, FRV_DIVISION_EXCEPTION);
380
381 new_element->u.dtt = dtt;
382
383 return new_element;
384 }
385
386 /* Check for interrupts caused by illegal insn access. These conditions are
387 checked in the order specified by the fr400 and fr500 LSI specs. */
388 void
389 frv_detect_insn_access_interrupts (SIM_CPU *current_cpu, SCACHE *sc)
390 {
391
392 const CGEN_INSN *insn = sc->argbuf.idesc->idata;
393 SIM_DESC sd = CPU_STATE (current_cpu);
394 FRV_VLIW *vliw = CPU_VLIW (current_cpu);
395
396 /* Check for vliw constraints. */
397 if (vliw->constraint_violation)
398 frv_queue_illegal_instruction_interrupt (current_cpu, insn);
399 /* Check for non-excepting insns. */
400 else if (CGEN_INSN_ATTR_VALUE (insn, CGEN_INSN_NON_EXCEPTING)
401 && ! GET_H_PSR_NEM ())
402 frv_queue_non_implemented_instruction_interrupt (current_cpu, insn);
403 /* Check for conditional insns. */
404 else if (CGEN_INSN_ATTR_VALUE (insn, CGEN_INSN_CONDITIONAL)
405 && ! GET_H_PSR_CM ())
406 frv_queue_non_implemented_instruction_interrupt (current_cpu, insn);
407 /* Make sure floating point support is enabled. */
408 else if (! GET_H_PSR_EF ())
409 {
410 /* Generate fp_disabled if it is a floating point insn or if PSR.EM is
411 off and the insns accesses a fp register. */
412 if (frv_is_float_insn (insn)
413 || (CGEN_INSN_ATTR_VALUE (insn, CGEN_INSN_FR_ACCESS)
414 && ! GET_H_PSR_EM ()))
415 frv_queue_float_disabled_interrupt (current_cpu);
416 }
417 /* Make sure media support is enabled. */
418 else if (! GET_H_PSR_EM ())
419 {
420 /* Generate mp_disabled if it is a media insn. */
421 if (frv_is_media_insn (insn) || CGEN_INSN_NUM (insn) == FRV_INSN_MTRAP)
422 frv_queue_media_disabled_interrupt (current_cpu);
423 }
424 /* Check for privileged insns. */
425 else if (CGEN_INSN_ATTR_VALUE (insn, CGEN_INSN_PRIVILEGED) &&
426 ! GET_H_PSR_S ())
427 frv_queue_privileged_instruction_interrupt (current_cpu, insn);
428 #if 0 /* disable for now until we find out how FSR0.QNE gets reset. */
429 else
430 {
431 /* Enter the halt state if FSR0.QNE is set and we are executing a
432 floating point insn, a media insn or an insn which access a FR
433 register. */
434 SI fsr0 = GET_FSR (0);
435 if (GET_FSR_QNE (fsr0)
436 && (frv_is_float_insn (insn) || frv_is_media_insn (insn)
437 || CGEN_INSN_ATTR_VALUE (insn, CGEN_INSN_FR_ACCESS)))
438 {
439 sim_engine_halt (sd, current_cpu, NULL, GET_H_PC (), sim_stopped,
440 SIM_SIGINT);
441 }
442 }
443 #endif
444 }
445
446 /* Record the current VLIW slot in the given interrupt queue element. */
447 void
448 frv_set_interrupt_queue_slot (
449 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item
450 )
451 {
452 FRV_VLIW *vliw = CPU_VLIW (current_cpu);
453 int slot = vliw->next_slot - 1;
454 item->slot = (*vliw->current_vliw)[slot];
455 }
456
457 /* Handle an individual interrupt. */
458 static void
459 handle_interrupt (SIM_CPU *current_cpu, IADDR pc)
460 {
461 struct frv_interrupt *interrupt;
462 int writeback_done = 0;
463 while (1)
464 {
465 /* Interrupts are queued in priority order with the highest priority
466 last. */
467 int index = frv_interrupt_state.queue_index - 1;
468 struct frv_interrupt_queue_element *item
469 = & frv_interrupt_state.queue[index];
470 interrupt = & frv_interrupt_table[item->kind];
471
472 switch (interrupt->iclass)
473 {
474 case FRV_EXTERNAL_INTERRUPT:
475 /* Perform writeback first. This may cause a higher priority
476 interrupt. */
477 if (! writeback_done)
478 {
479 frvbf_perform_writeback (current_cpu);
480 writeback_done = 1;
481 continue;
482 }
483 frv_external_interrupt (current_cpu, item, pc);
484 return;
485 case FRV_SOFTWARE_INTERRUPT:
486 frv_interrupt_state.queue_index = index;
487 frv_software_interrupt (current_cpu, item, pc);
488 return;
489 case FRV_PROGRAM_INTERRUPT:
490 /* If the program interrupt is not strict (imprecise), then perform
491 writeback first. This may, in turn, cause a higher priority
492 interrupt. */
493 if (! interrupt->precise && ! writeback_done)
494 {
495 frv_interrupt_state.imprecise_interrupt = item;
496 frvbf_perform_writeback (current_cpu);
497 writeback_done = 1;
498 continue;
499 }
500 frv_interrupt_state.queue_index = index;
501 frv_program_interrupt (current_cpu, item, pc);
502 return;
503 case FRV_BREAK_INTERRUPT:
504 frv_interrupt_state.queue_index = index;
505 frv_break_interrupt (current_cpu, interrupt, pc);
506 return;
507 case FRV_RESET_INTERRUPT:
508 break;
509 default:
510 break;
511 }
512 frv_interrupt_state.queue_index = index;
513 break; /* out of loop. */
514 }
515
516 /* We should never get here. */
517 {
518 SIM_DESC sd = CPU_STATE (current_cpu);
519 sim_engine_abort (sd, current_cpu, pc,
520 "interrupt class not supported %d\n",
521 interrupt->iclass);
522 }
523 }
524
525 /* Check to see the if the RSTR.HR or RSTR.SR bits have been set. If so, handle
526 the appropriate reset interrupt. */
527 static int
528 check_reset (SIM_CPU *current_cpu, IADDR pc)
529 {
530 int hsr0;
531 int hr;
532 int sr;
533 SI rstr;
534 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
535 IADDR address = RSTR_ADDRESS;
536
537 /* We don't want this to show up in the cache statistics, so read the
538 cache passively. */
539 if (! frv_cache_read_passive_SI (cache, address, & rstr))
540 rstr = sim_core_read_unaligned_4 (current_cpu, pc, read_map, address);
541
542 hr = GET_RSTR_HR (rstr);
543 sr = GET_RSTR_SR (rstr);
544
545 if (! hr && ! sr)
546 return 0; /* no reset. */
547
548 /* Reinitialize the machine state. */
549 if (hr)
550 frv_hardware_reset (current_cpu);
551 else
552 frv_software_reset (current_cpu);
553
554 /* Branch to the reset address. */
555 hsr0 = GET_HSR0 ();
556 if (GET_HSR0_SA (hsr0))
557 SET_H_PC (0xff000000);
558 else
559 SET_H_PC (0);
560
561 return 1; /* reset */
562 }
563
564 /* Process any pending interrupt(s) after a group of parallel insns. */
565 void
566 frv_process_interrupts (SIM_CPU *current_cpu)
567 {
568 SI NE_flags[2];
569 /* Need to save the pc here because writeback may change it (due to a
570 branch). */
571 IADDR pc = CPU_PC_GET (current_cpu);
572
573 /* Check for a reset before anything else. */
574 if (check_reset (current_cpu, pc))
575 return;
576
577 /* First queue the writes for any accumulated NE flags. */
578 if (frv_interrupt_state.f_ne_flags[0] != 0
579 || frv_interrupt_state.f_ne_flags[1] != 0)
580 {
581 GET_NE_FLAGS (NE_flags, H_SPR_FNER0);
582 NE_flags[0] |= frv_interrupt_state.f_ne_flags[0];
583 NE_flags[1] |= frv_interrupt_state.f_ne_flags[1];
584 SET_NE_FLAGS (H_SPR_FNER0, NE_flags);
585 }
586
587 /* If there is no interrupt pending, then perform parallel writeback. This
588 may cause an interrupt. */
589 if (frv_interrupt_state.queue_index <= 0)
590 frvbf_perform_writeback (current_cpu);
591
592 /* If there is an interrupt pending, then process it. */
593 if (frv_interrupt_state.queue_index > 0)
594 handle_interrupt (current_cpu, pc);
595 }
596
597 /* Find the next available ESR and return its index */
598 static int
599 esr_for_data_access_exception (
600 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item
601 )
602 {
603 SIM_DESC sd = CPU_STATE (current_cpu);
604 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
605 return 8; /* Use ESR8, EPCR8. */
606
607 if (item->slot == UNIT_I0)
608 return 8; /* Use ESR8, EPCR8, EAR8, EDR8. */
609
610 return 9; /* Use ESR9, EPCR9, EAR9. */
611 }
612
613 /* Set the next available EDR register with the data which was to be stored
614 and return the index of the register. */
615 static int
616 set_edr_register (
617 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item, int edr_index
618 )
619 {
620 /* EDR0, EDR4 and EDR8 are available as blocks of 4.
621 SI data uses EDR3, EDR7 and EDR11
622 DI data uses EDR2, EDR6 and EDR10
623 XI data uses EDR0, EDR4 and EDR8. */
624 int i;
625 edr_index += 4 - item->u.data_written.length;
626 for (i = 0; i < item->u.data_written.length; ++i)
627 SET_EDR (edr_index + i, item->u.data_written.words[i]);
628
629 return edr_index;
630 };
631
632 /* Clear ESFR0, EPCRx, ESRx, EARx and EDRx. */
633 static void
634 clear_exception_status_registers (SIM_CPU *current_cpu)
635 {
636 int i;
637 /* It is only necessary to clear the flag bits indicating which registers
638 are valid. */
639 SET_ESFR (0, 0);
640 SET_ESFR (1, 0);
641
642 for (i = 0; i <= 2; ++i)
643 {
644 SI esr = GET_ESR (i);
645 CLEAR_ESR_VALID (esr);
646 SET_ESR (i, esr);
647 }
648 for (i = 8; i <= 15; ++i)
649 {
650 SI esr = GET_ESR (i);
651 CLEAR_ESR_VALID (esr);
652 SET_ESR (i, esr);
653 }
654 }
655
656 /* Record state for media exception. */
657 void
658 frv_set_mp_exception_registers (
659 SIM_CPU *current_cpu, enum frv_msr_mtt mtt, int sie
660 )
661 {
662 /* Record the interrupt factor in MSR0. */
663 SI msr0 = GET_MSR (0);
664 if (GET_MSR_MTT (msr0) == MTT_NONE)
665 SET_MSR_MTT (msr0, mtt);
666
667 /* Also set the OVF bit in the appropriate MSR as well as MSR0.AOVF. */
668 if (mtt == MTT_OVERFLOW)
669 {
670 FRV_VLIW *vliw = CPU_VLIW (current_cpu);
671 int slot = vliw->next_slot - 1;
672 SIM_DESC sd = CPU_STATE (current_cpu);
673
674 /* If this insn is in the M2 slot, then set MSR1.OVF and MSR1.SIE,
675 otherwise set MSR0.OVF and MSR0.SIE. */
676 if (STATE_ARCHITECTURE (sd)->mach != bfd_mach_fr550 && (*vliw->current_vliw)[slot] == UNIT_FM1)
677 {
678 SI msr = GET_MSR (1);
679 OR_MSR_SIE (msr, sie);
680 SET_MSR_OVF (msr);
681 SET_MSR (1, msr);
682 }
683 else
684 {
685 OR_MSR_SIE (msr0, sie);
686 SET_MSR_OVF (msr0);
687 }
688
689 /* Generate the interrupt now if MSR0.MPEM is set on fr550 */
690 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550 && GET_MSR_MPEM (msr0))
691 frv_queue_program_interrupt (current_cpu, FRV_MP_EXCEPTION);
692 else
693 {
694 /* Regardless of the slot, set MSR0.AOVF. */
695 SET_MSR_AOVF (msr0);
696 }
697 }
698
699 SET_MSR (0, msr0);
700 }
701
702 /* Determine the correct FQ register to use for the given exception.
703 Return -1 if a register is not available. */
704 static int
705 fq_for_exception (
706 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item
707 )
708 {
709 SI fq;
710 struct frv_fp_exception_info *fp_info = & item->u.fp_info;
711
712 /* For fp_exception overflow, underflow or inexact, use FQ0 or FQ1. */
713 if (fp_info->ftt == FTT_IEEE_754_EXCEPTION
714 && (fp_info->fsr_mask & (FSR_OVERFLOW | FSR_UNDERFLOW | FSR_INEXACT)))
715 {
716 fq = GET_FQ (0);
717 if (! GET_FQ_VALID (fq))
718 return 0; /* FQ0 is available. */
719 fq = GET_FQ (1);
720 if (! GET_FQ_VALID (fq))
721 return 1; /* FQ1 is available. */
722
723 /* No FQ register is available */
724 {
725 SIM_DESC sd = CPU_STATE (current_cpu);
726 IADDR pc = CPU_PC_GET (current_cpu);
727 sim_engine_abort (sd, current_cpu, pc, "No FQ register available\n");
728 }
729 return -1;
730 }
731 /* For other exceptions, use FQ2 if the insn was in slot F0/I0 and FQ3
732 otherwise. */
733 if (item->slot == UNIT_FM0 || item->slot == UNIT_I0)
734 return 2;
735
736 return 3;
737 }
738
739 /* Set FSR0, FQ0-FQ9, depending on the interrupt. */
740 static void
741 set_fp_exception_registers (
742 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item
743 )
744 {
745 int fq_index;
746 SI fq;
747 SI insn;
748 SI fsr0;
749 IADDR pc;
750 struct frv_fp_exception_info *fp_info;
751 SIM_DESC sd = CPU_STATE (current_cpu);
752
753 /* No FQ registers on fr550 */
754 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
755 {
756 /* Update the fsr. */
757 fp_info = & item->u.fp_info;
758 fsr0 = GET_FSR (0);
759 SET_FSR_FTT (fsr0, fp_info->ftt);
760 SET_FSR (0, fsr0);
761 return;
762 }
763
764 /* Select an FQ and update it with the exception information. */
765 fq_index = fq_for_exception (current_cpu, item);
766 if (fq_index == -1)
767 return;
768
769 fp_info = & item->u.fp_info;
770 fq = GET_FQ (fq_index);
771 SET_FQ_MIV (fq, MIV_FLOAT);
772 SET_FQ_SIE (fq, SIE_NIL);
773 SET_FQ_FTT (fq, fp_info->ftt);
774 SET_FQ_CEXC (fq, fp_info->fsr_mask);
775 SET_FQ_VALID (fq);
776 SET_FQ (fq_index, fq);
777
778 /* Write the failing insn into FQx.OPC. */
779 pc = item->vpc;
780 insn = GETMEMSI (current_cpu, pc, pc);
781 SET_FQ_OPC (fq_index, insn);
782
783 /* Update the fsr. */
784 fsr0 = GET_FSR (0);
785 SET_FSR_QNE (fsr0); /* FQ not empty */
786 SET_FSR_FTT (fsr0, fp_info->ftt);
787 SET_FSR (0, fsr0);
788 }
789
790 /* Record the state of a division exception in the ISR. */
791 static void
792 set_isr_exception_fields (
793 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item
794 )
795 {
796 USI isr = GET_ISR ();
797 int dtt = GET_ISR_DTT (isr);
798 dtt |= item->u.dtt;
799 SET_ISR_DTT (isr, dtt);
800 SET_ISR (isr);
801 }
802
803 /* Set ESFR0, EPCRx, ESRx, EARx and EDRx, according to the given program
804 interrupt. */
805 static void
806 set_exception_status_registers (
807 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item
808 )
809 {
810 struct frv_interrupt *interrupt = & frv_interrupt_table[item->kind];
811 int slot = (item->vpc - previous_vliw_pc) / 4;
812 int reg_index = -1;
813 int set_ear = 0;
814 int set_edr = 0;
815 int set_daec = 0;
816 int set_epcr = 0;
817 SI esr = 0;
818 SIM_DESC sd = CPU_STATE (current_cpu);
819
820 /* If the interrupt is strict (precise) or the interrupt is on the insns
821 in the I0 pipe, then set the 0 registers. */
822 if (interrupt->precise)
823 {
824 reg_index = 0;
825 if (interrupt->kind == FRV_REGISTER_EXCEPTION)
826 SET_ESR_REC (esr, item->u.rec);
827 else if (interrupt->kind == FRV_INSTRUCTION_ACCESS_EXCEPTION)
828 SET_ESR_IAEC (esr, item->u.iaec);
829 /* For fr550, don't set epcr for precise interrupts. */
830 if (STATE_ARCHITECTURE (sd)->mach != bfd_mach_fr550)
831 set_epcr = 1;
832 }
833 else
834 {
835 switch (interrupt->kind)
836 {
837 case FRV_DIVISION_EXCEPTION:
838 set_isr_exception_fields (current_cpu, item);
839 /* fall thru to set reg_index. */
840 case FRV_COMMIT_EXCEPTION:
841 /* For fr550, always use ESR0. */
842 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
843 reg_index = 0;
844 else if (item->slot == UNIT_I0)
845 reg_index = 0;
846 else if (item->slot == UNIT_I1)
847 reg_index = 1;
848 set_epcr = 1;
849 break;
850 case FRV_DATA_STORE_ERROR:
851 reg_index = 14; /* Use ESR14. */
852 break;
853 case FRV_DATA_ACCESS_ERROR:
854 reg_index = 15; /* Use ESR15, EPCR15. */
855 set_ear = 1;
856 break;
857 case FRV_DATA_ACCESS_EXCEPTION:
858 set_daec = 1;
859 /* fall through */
860 case FRV_DATA_ACCESS_MMU_MISS:
861 case FRV_MEM_ADDRESS_NOT_ALIGNED:
862 /* Get the appropriate ESR, EPCR, EAR and EDR.
863 EAR will be set. EDR will not be set if this is a store insn. */
864 set_ear = 1;
865 /* For fr550, never use EDRx. */
866 if (STATE_ARCHITECTURE (sd)->mach != bfd_mach_fr550)
867 if (item->u.data_written.length != 0)
868 set_edr = 1;
869 reg_index = esr_for_data_access_exception (current_cpu, item);
870 set_epcr = 1;
871 break;
872 case FRV_MP_EXCEPTION:
873 /* For fr550, use EPCR2 and ESR2. */
874 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
875 {
876 reg_index = 2;
877 set_epcr = 1;
878 }
879 break; /* MSR0-1, FQ0-9 are already set. */
880 case FRV_FP_EXCEPTION:
881 set_fp_exception_registers (current_cpu, item);
882 /* For fr550, use EPCR2 and ESR2. */
883 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
884 {
885 reg_index = 2;
886 set_epcr = 1;
887 }
888 break;
889 default:
890 {
891 SIM_DESC sd = CPU_STATE (current_cpu);
892 IADDR pc = CPU_PC_GET (current_cpu);
893 sim_engine_abort (sd, current_cpu, pc,
894 "invalid non-strict program interrupt kind: %d\n",
895 interrupt->kind);
896 break;
897 }
898 }
899 } /* non-strict (imprecise) interrupt */
900
901 /* Now fill in the selected exception status registers. */
902 if (reg_index != -1)
903 {
904 /* Now set the exception status registers. */
905 SET_ESFR_FLAG (reg_index);
906 SET_ESR_EC (esr, interrupt->ec);
907
908 if (set_epcr)
909 {
910 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400)
911 SET_EPCR (reg_index, previous_vliw_pc);
912 else
913 SET_EPCR (reg_index, item->vpc);
914 }
915
916 if (set_ear)
917 {
918 SET_EAR (reg_index, item->eaddress);
919 SET_ESR_EAV (esr);
920 }
921 else
922 CLEAR_ESR_EAV (esr);
923
924 if (set_edr)
925 {
926 int edn = set_edr_register (current_cpu, item, 0/* EDR0-3 */);
927 SET_ESR_EDN (esr, edn);
928 SET_ESR_EDV (esr);
929 }
930 else
931 CLEAR_ESR_EDV (esr);
932
933 if (set_daec)
934 SET_ESR_DAEC (esr, item->u.daec);
935
936 SET_ESR_VALID (esr);
937 SET_ESR (reg_index, esr);
938 }
939 }
940
941 /* Check for compound interrupts.
942 Returns NULL if no interrupt is to be processed. */
943 static struct frv_interrupt *
944 check_for_compound_interrupt (
945 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item
946 )
947 {
948 struct frv_interrupt *interrupt;
949
950 /* Set the exception status registers for the original interrupt. */
951 set_exception_status_registers (current_cpu, item);
952 interrupt = & frv_interrupt_table[item->kind];
953
954 if (! interrupt->precise)
955 {
956 IADDR vpc = 0;
957 int mask = 0;
958
959 vpc = item->vpc;
960 mask = (1 << item->kind);
961
962 /* Look for more queued program interrupts which are non-deferred
963 (pending inhibit), imprecise (non-strict) different than an interrupt
964 already found and caused by a different insn. A bit mask is used
965 to keep track of interrupts which have already been detected. */
966 while (item != frv_interrupt_state.queue)
967 {
968 enum frv_interrupt_kind kind;
969 struct frv_interrupt *next_interrupt;
970 --item;
971 kind = item->kind;
972 next_interrupt = & frv_interrupt_table[kind];
973
974 if (next_interrupt->iclass != FRV_PROGRAM_INTERRUPT)
975 break; /* no program interrupts left. */
976
977 if (item->vpc == vpc)
978 continue; /* caused by the same insn. */
979
980 vpc = item->vpc;
981 if (! next_interrupt->precise && ! next_interrupt->deferred)
982 {
983 if (! (mask & (1 << kind)))
984 {
985 /* Set the exception status registers for the additional
986 interrupt. */
987 set_exception_status_registers (current_cpu, item);
988 mask |= (1 << kind);
989 interrupt = & frv_interrupt_table[FRV_COMPOUND_EXCEPTION];
990 }
991 }
992 }
993 }
994
995 /* Return with either the original interrupt, a compound_exception,
996 or no exception. */
997 return interrupt;
998 }
999
1000 /* Handle a program interrupt. */
1001 void
1002 frv_program_interrupt (
1003 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item, IADDR pc
1004 )
1005 {
1006 struct frv_interrupt *interrupt;
1007
1008 clear_exception_status_registers (current_cpu);
1009 /* If two or more non-deferred imprecise (non-strict) interrupts occur
1010 on two or more insns, then generate a compound_exception. */
1011 interrupt = check_for_compound_interrupt (current_cpu, item);
1012 if (interrupt != NULL)
1013 {
1014 frv_program_or_software_interrupt (current_cpu, interrupt, pc);
1015 frv_clear_interrupt_classes (FRV_SOFTWARE_INTERRUPT,
1016 FRV_PROGRAM_INTERRUPT);
1017 }
1018 }
1019
1020 /* Handle a software interrupt. */
1021 void
1022 frv_software_interrupt (
1023 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item, IADDR pc
1024 )
1025 {
1026 struct frv_interrupt *interrupt = & frv_interrupt_table[item->kind];
1027 frv_program_or_software_interrupt (current_cpu, interrupt, pc);
1028 }
1029
1030 /* Handle a program interrupt or a software interrupt in non-operating mode. */
1031 void
1032 frv_non_operating_interrupt (
1033 SIM_CPU *current_cpu, enum frv_interrupt_kind kind, IADDR pc
1034 )
1035 {
1036 SIM_DESC sd = CPU_STATE (current_cpu);
1037 switch (kind)
1038 {
1039 case FRV_INTERRUPT_LEVEL_1:
1040 case FRV_INTERRUPT_LEVEL_2:
1041 case FRV_INTERRUPT_LEVEL_3:
1042 case FRV_INTERRUPT_LEVEL_4:
1043 case FRV_INTERRUPT_LEVEL_5:
1044 case FRV_INTERRUPT_LEVEL_6:
1045 case FRV_INTERRUPT_LEVEL_7:
1046 case FRV_INTERRUPT_LEVEL_8:
1047 case FRV_INTERRUPT_LEVEL_9:
1048 case FRV_INTERRUPT_LEVEL_10:
1049 case FRV_INTERRUPT_LEVEL_11:
1050 case FRV_INTERRUPT_LEVEL_12:
1051 case FRV_INTERRUPT_LEVEL_13:
1052 case FRV_INTERRUPT_LEVEL_14:
1053 case FRV_INTERRUPT_LEVEL_15:
1054 sim_engine_abort (sd, current_cpu, pc,
1055 "interrupt: external %d\n", kind + 1);
1056 break;
1057 case FRV_TRAP_INSTRUCTION:
1058 break; /* handle as in operating mode. */
1059 case FRV_COMMIT_EXCEPTION:
1060 sim_engine_abort (sd, current_cpu, pc,
1061 "interrupt: commit_exception\n");
1062 break;
1063 case FRV_DIVISION_EXCEPTION:
1064 sim_engine_abort (sd, current_cpu, pc,
1065 "interrupt: division_exception\n");
1066 break;
1067 case FRV_DATA_STORE_ERROR:
1068 sim_engine_abort (sd, current_cpu, pc,
1069 "interrupt: data_store_error\n");
1070 break;
1071 case FRV_DATA_ACCESS_EXCEPTION:
1072 sim_engine_abort (sd, current_cpu, pc,
1073 "interrupt: data_access_exception\n");
1074 break;
1075 case FRV_DATA_ACCESS_MMU_MISS:
1076 sim_engine_abort (sd, current_cpu, pc,
1077 "interrupt: data_access_mmu_miss\n");
1078 break;
1079 case FRV_DATA_ACCESS_ERROR:
1080 sim_engine_abort (sd, current_cpu, pc,
1081 "interrupt: data_access_error\n");
1082 break;
1083 case FRV_MP_EXCEPTION:
1084 sim_engine_abort (sd, current_cpu, pc,
1085 "interrupt: mp_exception\n");
1086 break;
1087 case FRV_FP_EXCEPTION:
1088 sim_engine_abort (sd, current_cpu, pc,
1089 "interrupt: fp_exception\n");
1090 break;
1091 case FRV_MEM_ADDRESS_NOT_ALIGNED:
1092 sim_engine_abort (sd, current_cpu, pc,
1093 "interrupt: mem_address_not_aligned\n");
1094 break;
1095 case FRV_REGISTER_EXCEPTION:
1096 sim_engine_abort (sd, current_cpu, pc,
1097 "interrupt: register_exception\n");
1098 break;
1099 case FRV_MP_DISABLED:
1100 sim_engine_abort (sd, current_cpu, pc,
1101 "interrupt: mp_disabled\n");
1102 break;
1103 case FRV_FP_DISABLED:
1104 sim_engine_abort (sd, current_cpu, pc,
1105 "interrupt: fp_disabled\n");
1106 break;
1107 case FRV_PRIVILEGED_INSTRUCTION:
1108 sim_engine_abort (sd, current_cpu, pc,
1109 "interrupt: privileged_instruction\n");
1110 break;
1111 case FRV_ILLEGAL_INSTRUCTION:
1112 sim_engine_abort (sd, current_cpu, pc,
1113 "interrupt: illegal_instruction\n");
1114 break;
1115 case FRV_INSTRUCTION_ACCESS_EXCEPTION:
1116 sim_engine_abort (sd, current_cpu, pc,
1117 "interrupt: instruction_access_exception\n");
1118 break;
1119 case FRV_INSTRUCTION_ACCESS_MMU_MISS:
1120 sim_engine_abort (sd, current_cpu, pc,
1121 "interrupt: instruction_access_mmu_miss\n");
1122 break;
1123 case FRV_INSTRUCTION_ACCESS_ERROR:
1124 sim_engine_abort (sd, current_cpu, pc,
1125 "interrupt: insn_access_error\n");
1126 break;
1127 case FRV_COMPOUND_EXCEPTION:
1128 sim_engine_abort (sd, current_cpu, pc,
1129 "interrupt: compound_exception\n");
1130 break;
1131 case FRV_BREAK_EXCEPTION:
1132 sim_engine_abort (sd, current_cpu, pc,
1133 "interrupt: break_exception\n");
1134 break;
1135 case FRV_RESET:
1136 sim_engine_abort (sd, current_cpu, pc,
1137 "interrupt: reset\n");
1138 break;
1139 default:
1140 sim_engine_abort (sd, current_cpu, pc,
1141 "unhandled interrupt kind: %d\n", kind);
1142 break;
1143 }
1144 }
1145
1146 /* Handle a break interrupt. */
1147 void
1148 frv_break_interrupt (
1149 SIM_CPU *current_cpu, struct frv_interrupt *interrupt, IADDR current_pc
1150 )
1151 {
1152 IADDR new_pc;
1153
1154 /* BPCSR=PC
1155 BPSR.BS=PSR.S
1156 BPSR.BET=PSR.ET
1157 PSR.S=1
1158 PSR.ET=0
1159 TBR.TT=0xff
1160 PC=TBR
1161 */
1162 /* Must set PSR.S first to allow access to supervisor-only spr registers. */
1163 SET_H_BPSR_BS (GET_H_PSR_S ());
1164 SET_H_BPSR_BET (GET_H_PSR_ET ());
1165 SET_H_PSR_S (1);
1166 SET_H_PSR_ET (0);
1167 /* Must set PSR.S first to allow access to supervisor-only spr registers. */
1168 SET_H_SPR (H_SPR_BPCSR, current_pc);
1169
1170 /* Set the new PC in the TBR. */
1171 SET_H_TBR_TT (interrupt->handler_offset);
1172 new_pc = GET_H_SPR (H_SPR_TBR);
1173 SET_H_PC (new_pc);
1174
1175 CPU_DEBUG_STATE (current_cpu) = 1;
1176 }
1177
1178 /* Handle a program interrupt or a software interrupt. */
1179 void
1180 frv_program_or_software_interrupt (
1181 SIM_CPU *current_cpu, struct frv_interrupt *interrupt, IADDR current_pc
1182 )
1183 {
1184 USI new_pc;
1185 int original_psr_et;
1186
1187 /* PCSR=PC
1188 PSR.PS=PSR.S
1189 PSR.ET=0
1190 PSR.S=1
1191 if PSR.ESR==1
1192 SR0 through SR3=GR4 through GR7
1193 TBR.TT=interrupt handler offset
1194 PC=TBR
1195 */
1196 original_psr_et = GET_H_PSR_ET ();
1197
1198 SET_H_PSR_PS (GET_H_PSR_S ());
1199 SET_H_PSR_ET (0);
1200 SET_H_PSR_S (1);
1201
1202 /* Must set PSR.S first to allow access to supervisor-only spr registers. */
1203 /* The PCSR depends on the precision of the interrupt. */
1204 if (interrupt->precise)
1205 SET_H_SPR (H_SPR_PCSR, previous_vliw_pc);
1206 else
1207 SET_H_SPR (H_SPR_PCSR, current_pc);
1208
1209 /* Set the new PC in the TBR. */
1210 SET_H_TBR_TT (interrupt->handler_offset);
1211 new_pc = GET_H_SPR (H_SPR_TBR);
1212 SET_H_PC (new_pc);
1213
1214 /* If PSR.ET was not originally set, then enter the stopped state. */
1215 if (! original_psr_et)
1216 {
1217 SIM_DESC sd = CPU_STATE (current_cpu);
1218 frv_non_operating_interrupt (current_cpu, interrupt->kind, current_pc);
1219 sim_engine_halt (sd, current_cpu, NULL, new_pc, sim_stopped, SIM_SIGINT);
1220 }
1221 }
1222
1223 /* Handle a program interrupt or a software interrupt. */
1224 void
1225 frv_external_interrupt (
1226 SIM_CPU *current_cpu, struct frv_interrupt_queue_element *item, IADDR pc
1227 )
1228 {
1229 USI new_pc;
1230 struct frv_interrupt *interrupt = & frv_interrupt_table[item->kind];
1231
1232 /* Don't process the interrupt if PSR.ET is not set or if it is masked.
1233 Interrupt 15 is processed even if it appears to be masked. */
1234 if (! GET_H_PSR_ET ()
1235 || (interrupt->kind != FRV_INTERRUPT_LEVEL_15
1236 && interrupt->kind < GET_H_PSR_PIL ()))
1237 return; /* Leave it for later. */
1238
1239 /* Remove the interrupt from the queue. */
1240 --frv_interrupt_state.queue_index;
1241
1242 /* PCSR=PC
1243 PSR.PS=PSR.S
1244 PSR.ET=0
1245 PSR.S=1
1246 if PSR.ESR==1
1247 SR0 through SR3=GR4 through GR7
1248 TBR.TT=interrupt handler offset
1249 PC=TBR
1250 */
1251 SET_H_PSR_PS (GET_H_PSR_S ());
1252 SET_H_PSR_ET (0);
1253 SET_H_PSR_S (1);
1254 /* Must set PSR.S first to allow access to supervisor-only spr registers. */
1255 SET_H_SPR (H_SPR_PCSR, GET_H_PC ());
1256
1257 /* Set the new PC in the TBR. */
1258 SET_H_TBR_TT (interrupt->handler_offset);
1259 new_pc = GET_H_SPR (H_SPR_TBR);
1260 SET_H_PC (new_pc);
1261 }
1262
1263 /* Clear interrupts which fall within the range of classes given. */
1264 void
1265 frv_clear_interrupt_classes (
1266 enum frv_interrupt_class low_class, enum frv_interrupt_class high_class
1267 )
1268 {
1269 int i;
1270 int j;
1271 int limit = frv_interrupt_state.queue_index;
1272
1273 /* Find the lowest priority interrupt to be removed. */
1274 for (i = 0; i < limit; ++i)
1275 {
1276 enum frv_interrupt_kind kind = frv_interrupt_state.queue[i].kind;
1277 struct frv_interrupt* interrupt = & frv_interrupt_table[kind];
1278 if (interrupt->iclass >= low_class)
1279 break;
1280 }
1281
1282 /* Find the highest priority interrupt to be removed. */
1283 for (j = limit - 1; j >= i; --j)
1284 {
1285 enum frv_interrupt_kind kind = frv_interrupt_state.queue[j].kind;
1286 struct frv_interrupt* interrupt = & frv_interrupt_table[kind];
1287 if (interrupt->iclass <= high_class)
1288 break;
1289 }
1290
1291 /* Shuffle the remaining high priority interrupts down into the empty space
1292 left by the deleted interrupts. */
1293 if (j >= i)
1294 {
1295 for (++j; j < limit; ++j)
1296 frv_interrupt_state.queue[i++] = frv_interrupt_state.queue[j];
1297 frv_interrupt_state.queue_index -= (j - i);
1298 }
1299 }
1300
1301 /* Save data written to memory into the interrupt state so that it can be
1302 copied to the appropriate EDR register, if necessary, in the event of an
1303 interrupt. */
1304 void
1305 frv_save_data_written_for_interrupts (
1306 SIM_CPU *current_cpu, CGEN_WRITE_QUEUE_ELEMENT *item
1307 )
1308 {
1309 /* Record the slot containing the insn doing the write in the
1310 interrupt state. */
1311 frv_interrupt_state.slot = CGEN_WRITE_QUEUE_ELEMENT_PIPE (item);
1312
1313 /* Now record any data written to memory in the interrupt state. */
1314 switch (CGEN_WRITE_QUEUE_ELEMENT_KIND (item))
1315 {
1316 case CGEN_BI_WRITE:
1317 case CGEN_QI_WRITE:
1318 case CGEN_SI_WRITE:
1319 case CGEN_SF_WRITE:
1320 case CGEN_PC_WRITE:
1321 case CGEN_FN_HI_WRITE:
1322 case CGEN_FN_SI_WRITE:
1323 case CGEN_FN_SF_WRITE:
1324 case CGEN_FN_DI_WRITE:
1325 case CGEN_FN_DF_WRITE:
1326 case CGEN_FN_XI_WRITE:
1327 case CGEN_FN_PC_WRITE:
1328 break; /* Ignore writes to registers. */
1329 case CGEN_MEM_QI_WRITE:
1330 frv_interrupt_state.data_written.length = 1;
1331 frv_interrupt_state.data_written.words[0]
1332 = item->kinds.mem_qi_write.value;
1333 break;
1334 case CGEN_MEM_HI_WRITE:
1335 frv_interrupt_state.data_written.length = 1;
1336 frv_interrupt_state.data_written.words[0]
1337 = item->kinds.mem_hi_write.value;
1338 break;
1339 case CGEN_MEM_SI_WRITE:
1340 frv_interrupt_state.data_written.length = 1;
1341 frv_interrupt_state.data_written.words[0]
1342 = item->kinds.mem_si_write.value;
1343 break;
1344 case CGEN_MEM_DI_WRITE:
1345 frv_interrupt_state.data_written.length = 2;
1346 frv_interrupt_state.data_written.words[0]
1347 = item->kinds.mem_di_write.value >> 32;
1348 frv_interrupt_state.data_written.words[1]
1349 = item->kinds.mem_di_write.value;
1350 break;
1351 case CGEN_MEM_DF_WRITE:
1352 frv_interrupt_state.data_written.length = 2;
1353 frv_interrupt_state.data_written.words[0]
1354 = item->kinds.mem_df_write.value >> 32;
1355 frv_interrupt_state.data_written.words[1]
1356 = item->kinds.mem_df_write.value;
1357 break;
1358 case CGEN_MEM_XI_WRITE:
1359 frv_interrupt_state.data_written.length = 4;
1360 frv_interrupt_state.data_written.words[0]
1361 = item->kinds.mem_xi_write.value[0];
1362 frv_interrupt_state.data_written.words[1]
1363 = item->kinds.mem_xi_write.value[1];
1364 frv_interrupt_state.data_written.words[2]
1365 = item->kinds.mem_xi_write.value[2];
1366 frv_interrupt_state.data_written.words[3]
1367 = item->kinds.mem_xi_write.value[3];
1368 break;
1369 case CGEN_FN_MEM_QI_WRITE:
1370 frv_interrupt_state.data_written.length = 1;
1371 frv_interrupt_state.data_written.words[0]
1372 = item->kinds.fn_mem_qi_write.value;
1373 break;
1374 case CGEN_FN_MEM_HI_WRITE:
1375 frv_interrupt_state.data_written.length = 1;
1376 frv_interrupt_state.data_written.words[0]
1377 = item->kinds.fn_mem_hi_write.value;
1378 break;
1379 case CGEN_FN_MEM_SI_WRITE:
1380 frv_interrupt_state.data_written.length = 1;
1381 frv_interrupt_state.data_written.words[0]
1382 = item->kinds.fn_mem_si_write.value;
1383 break;
1384 case CGEN_FN_MEM_DI_WRITE:
1385 frv_interrupt_state.data_written.length = 2;
1386 frv_interrupt_state.data_written.words[0]
1387 = item->kinds.fn_mem_di_write.value >> 32;
1388 frv_interrupt_state.data_written.words[1]
1389 = item->kinds.fn_mem_di_write.value;
1390 break;
1391 case CGEN_FN_MEM_DF_WRITE:
1392 frv_interrupt_state.data_written.length = 2;
1393 frv_interrupt_state.data_written.words[0]
1394 = item->kinds.fn_mem_df_write.value >> 32;
1395 frv_interrupt_state.data_written.words[1]
1396 = item->kinds.fn_mem_df_write.value;
1397 break;
1398 case CGEN_FN_MEM_XI_WRITE:
1399 frv_interrupt_state.data_written.length = 4;
1400 frv_interrupt_state.data_written.words[0]
1401 = item->kinds.fn_mem_xi_write.value[0];
1402 frv_interrupt_state.data_written.words[1]
1403 = item->kinds.fn_mem_xi_write.value[1];
1404 frv_interrupt_state.data_written.words[2]
1405 = item->kinds.fn_mem_xi_write.value[2];
1406 frv_interrupt_state.data_written.words[3]
1407 = item->kinds.fn_mem_xi_write.value[3];
1408 break;
1409 default:
1410 {
1411 SIM_DESC sd = CPU_STATE (current_cpu);
1412 IADDR pc = CPU_PC_GET (current_cpu);
1413 sim_engine_abort (sd, current_cpu, pc,
1414 "unknown write kind during save for interrupt\n");
1415 }
1416 break;
1417 }
1418 }
This page took 0.057606 seconds and 3 git commands to generate.