gdb: add target_ops::supports_displaced_step
[deliverable/binutils-gdb.git] / sim / bfin / dv-bfin_cec.c
CommitLineData
ef016f83
MF
1/* Blackfin Core Event Controller (CEC) model.
2
b811d2c2 3 Copyright (C) 2010-2020 Free Software Foundation, Inc.
ef016f83
MF
4 Contributed by Analog Devices, Inc.
5
6 This file is part of simulators.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "config.h"
22
23#include "sim-main.h"
24#include "devices.h"
25#include "dv-bfin_cec.h"
26#include "dv-bfin_evt.h"
27#include "dv-bfin_mmu.h"
28
29struct bfin_cec
30{
31 bu32 base;
32 SIM_CPU *cpu;
33 struct hw *me;
34 struct hw_event *pending;
35
36 /* Order after here is important -- matches hardware MMR layout. */
37 bu32 evt_override, imask, ipend, ilat, iprio;
38};
39#define mmr_base() offsetof(struct bfin_cec, evt_override)
40#define mmr_offset(mmr) (offsetof(struct bfin_cec, mmr) - mmr_base())
41
990d19fd
MF
42static const char * const mmr_names[] =
43{
ef016f83
MF
44 "EVT_OVERRIDE", "IMASK", "IPEND", "ILAT", "IPRIO",
45};
46#define mmr_name(off) mmr_names[(off) / 4]
47
48static void _cec_raise (SIM_CPU *, struct bfin_cec *, int);
49
50static void
51bfin_cec_hw_event_callback (struct hw *me, void *data)
52{
53 struct bfin_cec *cec = data;
54 hw_event_queue_deschedule (me, cec->pending);
55 _cec_raise (cec->cpu, cec, -1);
56 cec->pending = NULL;
57}
58static void
59bfin_cec_check_pending (struct hw *me, struct bfin_cec *cec)
60{
61 if (cec->pending)
62 return;
63 cec->pending = hw_event_queue_schedule (me, 0, bfin_cec_hw_event_callback, cec);
64}
65static void
66_cec_check_pending (SIM_CPU *cpu, struct bfin_cec *cec)
67{
68 bfin_cec_check_pending (cec->me, cec);
69}
70
71static void
72_cec_imask_write (struct bfin_cec *cec, bu32 value)
73{
74 cec->imask = (value & IVG_MASKABLE_B) | (cec->imask & IVG_UNMASKABLE_B);
75}
76
77static unsigned
78bfin_cec_io_write_buffer (struct hw *me, const void *source,
79 int space, address_word addr, unsigned nr_bytes)
80{
81 struct bfin_cec *cec = hw_data (me);
82 bu32 mmr_off;
83 bu32 value;
84
466b619e
MF
85 /* Invalid access mode is higher priority than missing register. */
86 if (!dv_bfin_mmr_require_32 (me, addr, nr_bytes, true))
87 return 0;
88
ef016f83
MF
89 value = dv_load_4 (source);
90 mmr_off = addr - cec->base;
91
92 HW_TRACE_WRITE ();
93
94 switch (mmr_off)
95 {
96 case mmr_offset(evt_override):
97 cec->evt_override = value;
98 break;
99 case mmr_offset(imask):
100 _cec_imask_write (cec, value);
101 bfin_cec_check_pending (me, cec);
102 break;
103 case mmr_offset(ipend):
104 /* Read-only register. */
105 break;
106 case mmr_offset(ilat):
9922f803 107 dv_w1c_4 (&cec->ilat, value, 0xffee);
ef016f83
MF
108 break;
109 case mmr_offset(iprio):
110 cec->iprio = (value & IVG_UNMASKABLE_B);
111 break;
112 }
113
114 return nr_bytes;
115}
116
117static unsigned
118bfin_cec_io_read_buffer (struct hw *me, void *dest,
119 int space, address_word addr, unsigned nr_bytes)
120{
121 struct bfin_cec *cec = hw_data (me);
122 bu32 mmr_off;
123 bu32 *valuep;
124
466b619e
MF
125 /* Invalid access mode is higher priority than missing register. */
126 if (!dv_bfin_mmr_require_32 (me, addr, nr_bytes, false))
127 return 0;
128
ef016f83
MF
129 mmr_off = addr - cec->base;
130 valuep = (void *)((unsigned long)cec + mmr_base() + mmr_off);
131
132 HW_TRACE_READ ();
133
134 dv_store_4 (dest, *valuep);
135
136 return nr_bytes;
137}
138
990d19fd
MF
139static const struct hw_port_descriptor bfin_cec_ports[] =
140{
ef016f83
MF
141 { "emu", IVG_EMU, 0, input_port, },
142 { "rst", IVG_RST, 0, input_port, },
143 { "nmi", IVG_NMI, 0, input_port, },
144 { "evx", IVG_EVX, 0, input_port, },
145 { "ivhw", IVG_IVHW, 0, input_port, },
146 { "ivtmr", IVG_IVTMR, 0, input_port, },
147 { "ivg7", IVG7, 0, input_port, },
148 { "ivg8", IVG8, 0, input_port, },
149 { "ivg9", IVG9, 0, input_port, },
150 { "ivg10", IVG10, 0, input_port, },
151 { "ivg11", IVG11, 0, input_port, },
152 { "ivg12", IVG12, 0, input_port, },
153 { "ivg13", IVG13, 0, input_port, },
154 { "ivg14", IVG14, 0, input_port, },
155 { "ivg15", IVG15, 0, input_port, },
156 { NULL, 0, 0, 0, },
157};
158
159static void
160bfin_cec_port_event (struct hw *me, int my_port, struct hw *source,
161 int source_port, int level)
162{
163 struct bfin_cec *cec = hw_data (me);
164 _cec_raise (cec->cpu, cec, my_port);
165}
166
167static void
168attach_bfin_cec_regs (struct hw *me, struct bfin_cec *cec)
169{
170 address_word attach_address;
171 int attach_space;
172 unsigned attach_size;
173 reg_property_spec reg;
174
175 if (hw_find_property (me, "reg") == NULL)
176 hw_abort (me, "Missing \"reg\" property");
177
178 if (!hw_find_reg_array_property (me, "reg", 0, &reg))
179 hw_abort (me, "\"reg\" property must contain three addr/size entries");
180
181 hw_unit_address_to_attach_address (hw_parent (me),
182 &reg.address,
183 &attach_space, &attach_address, me);
184 hw_unit_size_to_attach_size (hw_parent (me), &reg.size, &attach_size, me);
185
186 if (attach_size != BFIN_COREMMR_CEC_SIZE)
187 hw_abort (me, "\"reg\" size must be %#x", BFIN_COREMMR_CEC_SIZE);
188
189 hw_attach_address (hw_parent (me),
190 0, attach_space, attach_address, attach_size, me);
191
192 cec->base = attach_address;
193 /* XXX: should take from the device tree. */
194 cec->cpu = STATE_CPU (hw_system (me), 0);
195 cec->me = me;
196}
197
198static void
199bfin_cec_finish (struct hw *me)
200{
201 struct bfin_cec *cec;
202
203 cec = HW_ZALLOC (me, struct bfin_cec);
204
205 set_hw_data (me, cec);
206 set_hw_io_read_buffer (me, bfin_cec_io_read_buffer);
207 set_hw_io_write_buffer (me, bfin_cec_io_write_buffer);
208 set_hw_ports (me, bfin_cec_ports);
209 set_hw_port_event (me, bfin_cec_port_event);
210
211 attach_bfin_cec_regs (me, cec);
212
213 /* Initialize the CEC. */
214 cec->imask = IVG_UNMASKABLE_B;
215 cec->ipend = IVG_RST_B | IVG_IRPTEN_B;
216}
217
81d126c3
MF
218const struct hw_descriptor dv_bfin_cec_descriptor[] =
219{
ef016f83
MF
220 {"bfin_cec", bfin_cec_finish,},
221 {NULL, NULL},
222};
223
990d19fd
MF
224static const char * const excp_decoded[] =
225{
ef016f83
MF
226 [VEC_SYS ] = "Custom exception 0 (system call)",
227 [VEC_EXCPT01 ] = "Custom exception 1 (software breakpoint)",
228 [VEC_EXCPT02 ] = "Custom exception 2 (KGDB hook)",
229 [VEC_EXCPT03 ] = "Custom exception 3 (userspace stack overflow)",
230 [VEC_EXCPT04 ] = "Custom exception 4 (dump trace buffer)",
231 [VEC_EXCPT05 ] = "Custom exception 5",
232 [VEC_EXCPT06 ] = "Custom exception 6",
233 [VEC_EXCPT07 ] = "Custom exception 7",
234 [VEC_EXCPT08 ] = "Custom exception 8",
235 [VEC_EXCPT09 ] = "Custom exception 9",
236 [VEC_EXCPT10 ] = "Custom exception 10",
237 [VEC_EXCPT11 ] = "Custom exception 11",
238 [VEC_EXCPT12 ] = "Custom exception 12",
239 [VEC_EXCPT13 ] = "Custom exception 13",
240 [VEC_EXCPT14 ] = "Custom exception 14",
241 [VEC_EXCPT15 ] = "Custom exception 15",
242 [VEC_STEP ] = "Hardware single step",
243 [VEC_OVFLOW ] = "Trace buffer overflow",
244 [VEC_UNDEF_I ] = "Undefined instruction",
245 [VEC_ILGAL_I ] = "Illegal instruction combo (multi-issue)",
246 [VEC_CPLB_VL ] = "DCPLB protection violation",
247 [VEC_MISALI_D ] = "Unaligned data access",
248 [VEC_UNCOV ] = "Unrecoverable event (double fault)",
249 [VEC_CPLB_M ] = "DCPLB miss",
250 [VEC_CPLB_MHIT ] = "Multiple DCPLB hit",
251 [VEC_WATCH ] = "Watchpoint match",
252 [VEC_ISTRU_VL ] = "ADSP-BF535 only",
253 [VEC_MISALI_I ] = "Unaligned instruction access",
254 [VEC_CPLB_I_VL ] = "ICPLB protection violation",
255 [VEC_CPLB_I_M ] = "ICPLB miss",
256 [VEC_CPLB_I_MHIT] = "Multiple ICPLB hit",
257 [VEC_ILL_RES ] = "Illegal supervisor resource",
258};
259
260#define CEC_STATE(cpu) DV_STATE_CACHED (cpu, cec)
261
262#define __cec_get_ivg(val) (ffs ((val) & ~IVG_IRPTEN_B) - 1)
263#define _cec_get_ivg(cec) __cec_get_ivg ((cec)->ipend & ~IVG_EMU_B)
264
265int
266cec_get_ivg (SIM_CPU *cpu)
267{
268 switch (STATE_ENVIRONMENT (CPU_STATE (cpu)))
269 {
270 case OPERATING_ENVIRONMENT:
271 return _cec_get_ivg (CEC_STATE (cpu));
272 default:
273 return IVG_USER;
274 }
275}
276
277static bool
278_cec_is_supervisor_mode (struct bfin_cec *cec)
279{
280 return (cec->ipend & ~(IVG_EMU_B | IVG_IRPTEN_B));
281}
282bool
283cec_is_supervisor_mode (SIM_CPU *cpu)
284{
285 switch (STATE_ENVIRONMENT (CPU_STATE (cpu)))
286 {
287 case OPERATING_ENVIRONMENT:
288 return _cec_is_supervisor_mode (CEC_STATE (cpu));
289 case USER_ENVIRONMENT:
290 return false;
291 default:
292 return true;
293 }
294}
295static bool
296_cec_is_user_mode (struct bfin_cec *cec)
297{
298 return !_cec_is_supervisor_mode (cec);
299}
300bool
301cec_is_user_mode (SIM_CPU *cpu)
302{
303 return !cec_is_supervisor_mode (cpu);
304}
305static void
306_cec_require_supervisor (SIM_CPU *cpu, struct bfin_cec *cec)
307{
308 if (_cec_is_user_mode (cec))
309 cec_exception (cpu, VEC_ILL_RES);
310}
311void
312cec_require_supervisor (SIM_CPU *cpu)
313{
314 /* Do not call _cec_require_supervisor() to avoid CEC_STATE()
315 as that macro requires OS operating mode. */
316 if (cec_is_user_mode (cpu))
317 cec_exception (cpu, VEC_ILL_RES);
318}
319
320#define excp_to_sim_halt(reason, sigrc) \
321 sim_engine_halt (CPU_STATE (cpu), cpu, NULL, PCREG, reason, sigrc)
322void
323cec_exception (SIM_CPU *cpu, int excp)
324{
325 SIM_DESC sd = CPU_STATE (cpu);
326 int sigrc = -1;
327
328 TRACE_EVENTS (cpu, "processing exception %#x in EVT%i", excp,
329 cec_get_ivg (cpu));
330
331 /* Ideally what would happen here for real hardware exceptions (not
332 fake sim ones) is that:
333 - For service exceptions (excp <= 0x11):
334 RETX is the _next_ PC which can be tricky with jumps/hardware loops/...
335 - For error exceptions (excp > 0x11):
336 RETX is the _current_ PC (i.e. the one causing the exception)
337 - PC is loaded with EVT3 MMR
338 - ILAT/IPEND in CEC is updated depending on current IVG level
339 - the fault address MMRs get updated with data/instruction info
340 - Execution continues on in the EVT3 handler */
341
342 /* Handle simulator exceptions first. */
343 switch (excp)
344 {
345 case VEC_SIM_HLT:
346 excp_to_sim_halt (sim_exited, 0);
347 return;
348 case VEC_SIM_ABORT:
349 excp_to_sim_halt (sim_exited, 1);
350 return;
351 case VEC_SIM_TRAP:
352 /* GDB expects us to step over EMUEXCPT. */
353 /* XXX: What about hwloops and EMUEXCPT at the end?
354 Pretty sure gdb doesn't handle this already... */
355 SET_PCREG (PCREG + 2);
356 /* Only trap when we are running in gdb. */
357 if (STATE_OPEN_KIND (sd) == SIM_OPEN_DEBUG)
358 excp_to_sim_halt (sim_stopped, SIM_SIGTRAP);
359 return;
360 case VEC_SIM_DBGA:
361 /* If running in gdb, simply trap. */
362 if (STATE_OPEN_KIND (sd) == SIM_OPEN_DEBUG)
363 excp_to_sim_halt (sim_stopped, SIM_SIGTRAP);
364 else
365 excp_to_sim_halt (sim_exited, 2);
366 }
367
368 if (excp <= 0x3f)
369 {
370 SET_EXCAUSE (excp);
371 if (STATE_ENVIRONMENT (sd) == OPERATING_ENVIRONMENT)
372 {
373 /* ICPLB regs always get updated. */
374 /* XXX: Should optimize this call path ... */
375 if (excp != VEC_MISALI_I && excp != VEC_MISALI_D
376 && excp != VEC_CPLB_I_M && excp != VEC_CPLB_M
377 && excp != VEC_CPLB_I_VL && excp != VEC_CPLB_VL
378 && excp != VEC_CPLB_I_MHIT && excp != VEC_CPLB_MHIT)
379 mmu_log_ifault (cpu);
380 _cec_raise (cpu, CEC_STATE (cpu), IVG_EVX);
381 /* We need to restart the engine so that we don't return
382 and continue processing this bad insn. */
383 if (EXCAUSE >= 0x20)
384 sim_engine_restart (sd, cpu, NULL, PCREG);
385 return;
386 }
387 }
388
389 TRACE_EVENTS (cpu, "running virtual exception handler");
390
391 switch (excp)
392 {
393 case VEC_SYS:
394 bfin_syscall (cpu);
395 break;
396
397 case VEC_EXCPT01: /* Userspace gdb breakpoint. */
398 sigrc = SIM_SIGTRAP;
399 break;
400
401 case VEC_UNDEF_I: /* Undefined instruction. */
402 sigrc = SIM_SIGILL;
403 break;
404
405 case VEC_ILL_RES: /* Illegal supervisor resource. */
406 case VEC_MISALI_I: /* Misaligned instruction. */
407 sigrc = SIM_SIGBUS;
408 break;
409
410 case VEC_CPLB_M:
411 case VEC_CPLB_I_M:
412 sigrc = SIM_SIGSEGV;
413 break;
414
415 default:
416 sim_io_eprintf (sd, "Unhandled exception %#x at 0x%08x (%s)\n",
417 excp, PCREG, excp_decoded[excp]);
418 sigrc = SIM_SIGILL;
419 break;
420 }
421
422 if (sigrc != -1)
423 excp_to_sim_halt (sim_stopped, sigrc);
424}
425
426bu32 cec_cli (SIM_CPU *cpu)
427{
428 struct bfin_cec *cec;
429 bu32 old_mask;
430
431 if (STATE_ENVIRONMENT (CPU_STATE (cpu)) != OPERATING_ENVIRONMENT)
432 return 0;
433
434 cec = CEC_STATE (cpu);
435 _cec_require_supervisor (cpu, cec);
436
437 /* XXX: what about IPEND[4] ? */
438 old_mask = cec->imask;
439 _cec_imask_write (cec, 0);
440
441 TRACE_EVENTS (cpu, "CLI changed IMASK from %#x to %#x", old_mask, cec->imask);
442
443 return old_mask;
444}
445
446void cec_sti (SIM_CPU *cpu, bu32 ints)
447{
448 struct bfin_cec *cec;
449 bu32 old_mask;
450
451 if (STATE_ENVIRONMENT (CPU_STATE (cpu)) != OPERATING_ENVIRONMENT)
452 return;
453
454 cec = CEC_STATE (cpu);
455 _cec_require_supervisor (cpu, cec);
456
457 /* XXX: what about IPEND[4] ? */
458 old_mask = cec->imask;
459 _cec_imask_write (cec, ints);
460
461 TRACE_EVENTS (cpu, "STI changed IMASK from %#x to %#x", old_mask, cec->imask);
462
463 /* Check for pending interrupts that are now enabled. */
464 _cec_check_pending (cpu, cec);
465}
466
467static void
468cec_irpten_enable (SIM_CPU *cpu, struct bfin_cec *cec)
469{
470 /* Globally mask interrupts. */
471 TRACE_EVENTS (cpu, "setting IPEND[4] to globally mask interrupts");
472 cec->ipend |= IVG_IRPTEN_B;
473}
474
475static void
476cec_irpten_disable (SIM_CPU *cpu, struct bfin_cec *cec)
477{
478 /* Clear global interrupt mask. */
479 TRACE_EVENTS (cpu, "clearing IPEND[4] to not globally mask interrupts");
480 cec->ipend &= ~IVG_IRPTEN_B;
481}
482
483static void
484_cec_raise (SIM_CPU *cpu, struct bfin_cec *cec, int ivg)
485{
486 SIM_DESC sd = CPU_STATE (cpu);
487 int curr_ivg = _cec_get_ivg (cec);
488 bool snen;
489 bool irpten;
490
491 TRACE_EVENTS (cpu, "processing request for EVT%i while at EVT%i",
492 ivg, curr_ivg);
493
494 irpten = (cec->ipend & IVG_IRPTEN_B);
495 snen = (SYSCFGREG & SYSCFG_SNEN);
496
497 if (curr_ivg == -1)
498 curr_ivg = IVG_USER;
499
500 /* Just check for higher latched interrupts. */
501 if (ivg == -1)
502 {
503 if (irpten)
504 goto done; /* All interrupts are masked anyways. */
505
506 ivg = __cec_get_ivg (cec->ilat & cec->imask);
507 if (ivg < 0)
508 goto done; /* Nothing latched. */
509
510 if (ivg > curr_ivg)
511 goto done; /* Nothing higher latched. */
512
513 if (!snen && ivg == curr_ivg)
514 goto done; /* Self nesting disabled. */
515
516 /* Still here, so fall through to raise to higher pending. */
517 }
518
519 cec->ilat |= (1 << ivg);
520
521 if (ivg <= IVG_EVX)
522 {
523 /* These two are always processed. */
524 if (ivg == IVG_EMU || ivg == IVG_RST)
525 goto process_int;
526
527 /* Anything lower might trigger a double fault. */
528 if (curr_ivg <= ivg)
529 {
530 /* Double fault ! :( */
531 SET_EXCAUSE (VEC_UNCOV);
532 /* XXX: SET_RETXREG (...); */
533 sim_io_error (sd, "%s: double fault at 0x%08x ! :(", __func__, PCREG);
534 excp_to_sim_halt (sim_stopped, SIM_SIGABRT);
535 }
536
537 /* No double fault -> always process. */
538 goto process_int;
539 }
540 else if (irpten && curr_ivg != IVG_USER)
541 {
542 /* Interrupts are globally masked. */
543 }
544 else if (!(cec->imask & (1 << ivg)))
545 {
546 /* This interrupt is masked. */
547 }
548 else if (ivg < curr_ivg || (snen && ivg == curr_ivg))
549 {
550 /* Do transition! */
551 bu32 oldpc;
552
553 process_int:
554 cec->ipend |= (1 << ivg);
555 cec->ilat &= ~(1 << ivg);
556
557 /* Interrupts are processed in between insns which means the return
558 point is the insn-to-be-executed (which is the current PC). But
559 exceptions are handled while executing an insn, so we may have to
560 advance the PC ourselves when setting RETX.
561 XXX: Advancing the PC should only be for "service" exceptions, and
562 handling them after executing the insn should be OK, which
563 means we might be able to use the event interface for it. */
564
565 oldpc = PCREG;
566 switch (ivg)
567 {
568 case IVG_EMU:
569 /* Signal the JTAG ICE. */
570 /* XXX: what happens with 'raise 0' ? */
571 SET_RETEREG (oldpc);
572 excp_to_sim_halt (sim_stopped, SIM_SIGTRAP);
573 /* XXX: Need an easy way for gdb to signal it isnt here. */
574 cec->ipend &= ~IVG_EMU_B;
575 break;
576 case IVG_RST:
577 /* Have the core reset simply exit (i.e. "shutdown"). */
578 excp_to_sim_halt (sim_exited, 0);
579 break;
580 case IVG_NMI:
581 /* XXX: Should check this. */
582 SET_RETNREG (oldpc);
583 break;
584 case IVG_EVX:
585 /* Non-service exceptions point to the excepting instruction. */
586 if (EXCAUSE >= 0x20)
587 SET_RETXREG (oldpc);
588 else
589 {
590 bu32 nextpc = hwloop_get_next_pc (cpu, oldpc, INSN_LEN);
591 SET_RETXREG (nextpc);
592 }
593
594 break;
595 case IVG_IRPTEN:
596 /* XXX: what happens with 'raise 4' ? */
597 sim_io_error (sd, "%s: what to do with 'raise 4' ?", __func__);
598 break;
599 default:
600 SET_RETIREG (oldpc | (ivg == curr_ivg ? 1 : 0));
601 break;
602 }
603
604 /* If EVT_OVERRIDE is in effect (IVG7+), use the reset address. */
605 if ((cec->evt_override & 0xff80) & (1 << ivg))
606 SET_PCREG (cec_get_reset_evt (cpu));
607 else
608 SET_PCREG (cec_get_evt (cpu, ivg));
609
bb11f3ed 610 BFIN_TRACE_BRANCH (cpu, oldpc, PCREG, -1, "CEC changed PC (to EVT%i):", ivg);
ef016f83
MF
611 BFIN_CPU_STATE.did_jump = true;
612
613 /* Enable the global interrupt mask upon interrupt entry. */
614 if (ivg >= IVG_IVHW)
615 cec_irpten_enable (cpu, cec);
616 }
617
618 /* When moving between states, don't let internal states bleed through. */
619 DIS_ALGN_EXPT &= ~1;
620
621 /* When going from user to super, we set LSB in LB regs to avoid
622 misbehavior and/or malicious code.
623 Also need to load SP alias with KSP. */
624 if (curr_ivg == IVG_USER)
625 {
626 int i;
627 for (i = 0; i < 2; ++i)
628 if (!(LBREG (i) & 1))
629 SET_LBREG (i, LBREG (i) | 1);
630 SET_USPREG (SPREG);
631 SET_SPREG (KSPREG);
632 }
633
634 done:
635 TRACE_EVENTS (cpu, "now at EVT%i", _cec_get_ivg (cec));
636}
637
638static bu32
639cec_read_ret_reg (SIM_CPU *cpu, int ivg)
640{
641 switch (ivg)
642 {
643 case IVG_EMU: return RETEREG;
644 case IVG_NMI: return RETNREG;
645 case IVG_EVX: return RETXREG;
646 default: return RETIREG;
647 }
648}
649
650void
651cec_latch (SIM_CPU *cpu, int ivg)
652{
653 struct bfin_cec *cec;
654
655 if (STATE_ENVIRONMENT (CPU_STATE (cpu)) != OPERATING_ENVIRONMENT)
656 {
657 bu32 oldpc = PCREG;
658 SET_PCREG (cec_read_ret_reg (cpu, ivg));
bb11f3ed 659 BFIN_TRACE_BRANCH (cpu, oldpc, PCREG, -1, "CEC changed PC");
ef016f83
MF
660 return;
661 }
662
663 cec = CEC_STATE (cpu);
664 cec->ilat |= (1 << ivg);
665 _cec_check_pending (cpu, cec);
666}
667
668void
669cec_hwerr (SIM_CPU *cpu, int hwerr)
670{
671 SET_HWERRCAUSE (hwerr);
672 cec_latch (cpu, IVG_IVHW);
673}
674
675void
676cec_return (SIM_CPU *cpu, int ivg)
677{
678 SIM_DESC sd = CPU_STATE (cpu);
679 struct bfin_cec *cec;
680 bool snen;
681 int curr_ivg;
682 bu32 oldpc, newpc;
683
684 oldpc = PCREG;
685
686 BFIN_CPU_STATE.did_jump = true;
687 if (STATE_ENVIRONMENT (sd) != OPERATING_ENVIRONMENT)
688 {
689 SET_PCREG (cec_read_ret_reg (cpu, ivg));
bb11f3ed 690 BFIN_TRACE_BRANCH (cpu, oldpc, PCREG, -1, "CEC changed PC");
ef016f83
MF
691 return;
692 }
693
694 cec = CEC_STATE (cpu);
695
696 /* XXX: This isn't entirely correct ... */
697 cec->ipend &= ~IVG_EMU_B;
698
699 curr_ivg = _cec_get_ivg (cec);
700 if (curr_ivg == -1)
701 curr_ivg = IVG_USER;
702 if (ivg == -1)
703 ivg = curr_ivg;
704
705 TRACE_EVENTS (cpu, "returning from EVT%i (should be EVT%i)", curr_ivg, ivg);
706
707 /* Not allowed to return from usermode. */
708 if (curr_ivg == IVG_USER)
709 cec_exception (cpu, VEC_ILL_RES);
710
711 if (ivg > IVG15 || ivg < 0)
712 sim_io_error (sd, "%s: ivg %i out of range !", __func__, ivg);
713
714 _cec_require_supervisor (cpu, cec);
715
716 switch (ivg)
717 {
718 case IVG_EMU:
719 /* RTE -- only valid in emulation mode. */
720 /* XXX: What does the hardware do ? */
721 if (curr_ivg != IVG_EMU)
722 cec_exception (cpu, VEC_ILL_RES);
723 break;
724 case IVG_NMI:
725 /* RTN -- only valid in NMI. */
726 /* XXX: What does the hardware do ? */
727 if (curr_ivg != IVG_NMI)
728 cec_exception (cpu, VEC_ILL_RES);
729 break;
730 case IVG_EVX:
731 /* RTX -- only valid in exception. */
732 /* XXX: What does the hardware do ? */
733 if (curr_ivg != IVG_EVX)
734 cec_exception (cpu, VEC_ILL_RES);
735 break;
736 default:
737 /* RTI -- not valid in emulation, nmi, exception, or user. */
738 /* XXX: What does the hardware do ? */
739 if (curr_ivg == IVG_EMU || curr_ivg == IVG_NMI
740 || curr_ivg == IVG_EVX || curr_ivg == IVG_USER)
741 cec_exception (cpu, VEC_ILL_RES);
742 break;
743 case IVG_IRPTEN:
744 /* XXX: Is this even possible ? */
745 excp_to_sim_halt (sim_stopped, SIM_SIGABRT);
746 break;
747 }
748 newpc = cec_read_ret_reg (cpu, ivg);
749
750 /* XXX: Does this nested trick work on EMU/NMI/EVX ? */
751 snen = (newpc & 1);
752 /* XXX: Delayed clear shows bad PCREG register trace above ? */
753 SET_PCREG (newpc & ~1);
754
bb11f3ed 755 BFIN_TRACE_BRANCH (cpu, oldpc, PCREG, -1, "CEC changed PC (from EVT%i)", ivg);
ef016f83 756
bb11f3ed 757 /* Update ipend after the BFIN_TRACE_BRANCH so dv-bfin_trace
ef016f83
MF
758 knows current CEC state wrt overflow. */
759 if (!snen)
760 cec->ipend &= ~(1 << ivg);
761
762 /* Disable global interrupt mask to let any interrupt take over, but
763 only when we were already in a RTI level. Only way we could have
764 raised at that point is if it was cleared in the first place. */
765 if (ivg >= IVG_IVHW || ivg == IVG_RST)
766 cec_irpten_disable (cpu, cec);
767
768 /* When going from super to user, we clear LSB in LB regs in case
769 it was set on the transition up.
770 Also need to load SP alias with USP. */
771 if (_cec_get_ivg (cec) == -1)
772 {
773 int i;
774 for (i = 0; i < 2; ++i)
775 if (LBREG (i) & 1)
776 SET_LBREG (i, LBREG (i) & ~1);
777 SET_KSPREG (SPREG);
778 SET_SPREG (USPREG);
779 }
780
781 /* Check for pending interrupts before we return to usermode. */
782 _cec_check_pending (cpu, cec);
783}
784
785void
786cec_push_reti (SIM_CPU *cpu)
787{
788 /* XXX: Need to check hardware with popped RETI value
789 and bit 1 is set (when handling nested interrupts).
790 Also need to check behavior wrt SNEN in SYSCFG. */
791 struct bfin_cec *cec;
792
793 if (STATE_ENVIRONMENT (CPU_STATE (cpu)) != OPERATING_ENVIRONMENT)
794 return;
795
796 TRACE_EVENTS (cpu, "pushing RETI");
797
798 cec = CEC_STATE (cpu);
799 cec_irpten_disable (cpu, cec);
800 /* Check for pending interrupts. */
801 _cec_check_pending (cpu, cec);
802}
803
804void
805cec_pop_reti (SIM_CPU *cpu)
806{
807 /* XXX: Need to check hardware with popped RETI value
808 and bit 1 is set (when handling nested interrupts).
809 Also need to check behavior wrt SNEN in SYSCFG. */
810 struct bfin_cec *cec;
811
812 if (STATE_ENVIRONMENT (CPU_STATE (cpu)) != OPERATING_ENVIRONMENT)
813 return;
814
815 TRACE_EVENTS (cpu, "popping RETI");
816
817 cec = CEC_STATE (cpu);
818 cec_irpten_enable (cpu, cec);
819}
This page took 0.619169 seconds and 4 git commands to generate.