Regenerate sim configure files to pick up support for powerpc64le in
[deliverable/binutils-gdb.git] / sim / bfin / dv-bfin_cec.c
CommitLineData
ef016f83
MF
1/* Blackfin Core Event Controller (CEC) model.
2
8acc9f48 3 Copyright (C) 2010-2013 Free Software Foundation, Inc.
ef016f83
MF
4 Contributed by Analog Devices, Inc.
5
6 This file is part of simulators.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "config.h"
22
23#include "sim-main.h"
24#include "devices.h"
25#include "dv-bfin_cec.h"
26#include "dv-bfin_evt.h"
27#include "dv-bfin_mmu.h"
28
29struct bfin_cec
30{
31 bu32 base;
32 SIM_CPU *cpu;
33 struct hw *me;
34 struct hw_event *pending;
35
36 /* Order after here is important -- matches hardware MMR layout. */
37 bu32 evt_override, imask, ipend, ilat, iprio;
38};
39#define mmr_base() offsetof(struct bfin_cec, evt_override)
40#define mmr_offset(mmr) (offsetof(struct bfin_cec, mmr) - mmr_base())
41
990d19fd
MF
42static const char * const mmr_names[] =
43{
ef016f83
MF
44 "EVT_OVERRIDE", "IMASK", "IPEND", "ILAT", "IPRIO",
45};
46#define mmr_name(off) mmr_names[(off) / 4]
47
48static void _cec_raise (SIM_CPU *, struct bfin_cec *, int);
49
50static void
51bfin_cec_hw_event_callback (struct hw *me, void *data)
52{
53 struct bfin_cec *cec = data;
54 hw_event_queue_deschedule (me, cec->pending);
55 _cec_raise (cec->cpu, cec, -1);
56 cec->pending = NULL;
57}
58static void
59bfin_cec_check_pending (struct hw *me, struct bfin_cec *cec)
60{
61 if (cec->pending)
62 return;
63 cec->pending = hw_event_queue_schedule (me, 0, bfin_cec_hw_event_callback, cec);
64}
65static void
66_cec_check_pending (SIM_CPU *cpu, struct bfin_cec *cec)
67{
68 bfin_cec_check_pending (cec->me, cec);
69}
70
71static void
72_cec_imask_write (struct bfin_cec *cec, bu32 value)
73{
74 cec->imask = (value & IVG_MASKABLE_B) | (cec->imask & IVG_UNMASKABLE_B);
75}
76
77static unsigned
78bfin_cec_io_write_buffer (struct hw *me, const void *source,
79 int space, address_word addr, unsigned nr_bytes)
80{
81 struct bfin_cec *cec = hw_data (me);
82 bu32 mmr_off;
83 bu32 value;
84
85 value = dv_load_4 (source);
86 mmr_off = addr - cec->base;
87
88 HW_TRACE_WRITE ();
89
90 switch (mmr_off)
91 {
92 case mmr_offset(evt_override):
93 cec->evt_override = value;
94 break;
95 case mmr_offset(imask):
96 _cec_imask_write (cec, value);
97 bfin_cec_check_pending (me, cec);
98 break;
99 case mmr_offset(ipend):
100 /* Read-only register. */
101 break;
102 case mmr_offset(ilat):
9922f803 103 dv_w1c_4 (&cec->ilat, value, 0xffee);
ef016f83
MF
104 break;
105 case mmr_offset(iprio):
106 cec->iprio = (value & IVG_UNMASKABLE_B);
107 break;
108 }
109
110 return nr_bytes;
111}
112
113static unsigned
114bfin_cec_io_read_buffer (struct hw *me, void *dest,
115 int space, address_word addr, unsigned nr_bytes)
116{
117 struct bfin_cec *cec = hw_data (me);
118 bu32 mmr_off;
119 bu32 *valuep;
120
121 mmr_off = addr - cec->base;
122 valuep = (void *)((unsigned long)cec + mmr_base() + mmr_off);
123
124 HW_TRACE_READ ();
125
126 dv_store_4 (dest, *valuep);
127
128 return nr_bytes;
129}
130
990d19fd
MF
131static const struct hw_port_descriptor bfin_cec_ports[] =
132{
ef016f83
MF
133 { "emu", IVG_EMU, 0, input_port, },
134 { "rst", IVG_RST, 0, input_port, },
135 { "nmi", IVG_NMI, 0, input_port, },
136 { "evx", IVG_EVX, 0, input_port, },
137 { "ivhw", IVG_IVHW, 0, input_port, },
138 { "ivtmr", IVG_IVTMR, 0, input_port, },
139 { "ivg7", IVG7, 0, input_port, },
140 { "ivg8", IVG8, 0, input_port, },
141 { "ivg9", IVG9, 0, input_port, },
142 { "ivg10", IVG10, 0, input_port, },
143 { "ivg11", IVG11, 0, input_port, },
144 { "ivg12", IVG12, 0, input_port, },
145 { "ivg13", IVG13, 0, input_port, },
146 { "ivg14", IVG14, 0, input_port, },
147 { "ivg15", IVG15, 0, input_port, },
148 { NULL, 0, 0, 0, },
149};
150
151static void
152bfin_cec_port_event (struct hw *me, int my_port, struct hw *source,
153 int source_port, int level)
154{
155 struct bfin_cec *cec = hw_data (me);
156 _cec_raise (cec->cpu, cec, my_port);
157}
158
159static void
160attach_bfin_cec_regs (struct hw *me, struct bfin_cec *cec)
161{
162 address_word attach_address;
163 int attach_space;
164 unsigned attach_size;
165 reg_property_spec reg;
166
167 if (hw_find_property (me, "reg") == NULL)
168 hw_abort (me, "Missing \"reg\" property");
169
170 if (!hw_find_reg_array_property (me, "reg", 0, &reg))
171 hw_abort (me, "\"reg\" property must contain three addr/size entries");
172
173 hw_unit_address_to_attach_address (hw_parent (me),
174 &reg.address,
175 &attach_space, &attach_address, me);
176 hw_unit_size_to_attach_size (hw_parent (me), &reg.size, &attach_size, me);
177
178 if (attach_size != BFIN_COREMMR_CEC_SIZE)
179 hw_abort (me, "\"reg\" size must be %#x", BFIN_COREMMR_CEC_SIZE);
180
181 hw_attach_address (hw_parent (me),
182 0, attach_space, attach_address, attach_size, me);
183
184 cec->base = attach_address;
185 /* XXX: should take from the device tree. */
186 cec->cpu = STATE_CPU (hw_system (me), 0);
187 cec->me = me;
188}
189
190static void
191bfin_cec_finish (struct hw *me)
192{
193 struct bfin_cec *cec;
194
195 cec = HW_ZALLOC (me, struct bfin_cec);
196
197 set_hw_data (me, cec);
198 set_hw_io_read_buffer (me, bfin_cec_io_read_buffer);
199 set_hw_io_write_buffer (me, bfin_cec_io_write_buffer);
200 set_hw_ports (me, bfin_cec_ports);
201 set_hw_port_event (me, bfin_cec_port_event);
202
203 attach_bfin_cec_regs (me, cec);
204
205 /* Initialize the CEC. */
206 cec->imask = IVG_UNMASKABLE_B;
207 cec->ipend = IVG_RST_B | IVG_IRPTEN_B;
208}
209
81d126c3
MF
210const struct hw_descriptor dv_bfin_cec_descriptor[] =
211{
ef016f83
MF
212 {"bfin_cec", bfin_cec_finish,},
213 {NULL, NULL},
214};
215
990d19fd
MF
216static const char * const excp_decoded[] =
217{
ef016f83
MF
218 [VEC_SYS ] = "Custom exception 0 (system call)",
219 [VEC_EXCPT01 ] = "Custom exception 1 (software breakpoint)",
220 [VEC_EXCPT02 ] = "Custom exception 2 (KGDB hook)",
221 [VEC_EXCPT03 ] = "Custom exception 3 (userspace stack overflow)",
222 [VEC_EXCPT04 ] = "Custom exception 4 (dump trace buffer)",
223 [VEC_EXCPT05 ] = "Custom exception 5",
224 [VEC_EXCPT06 ] = "Custom exception 6",
225 [VEC_EXCPT07 ] = "Custom exception 7",
226 [VEC_EXCPT08 ] = "Custom exception 8",
227 [VEC_EXCPT09 ] = "Custom exception 9",
228 [VEC_EXCPT10 ] = "Custom exception 10",
229 [VEC_EXCPT11 ] = "Custom exception 11",
230 [VEC_EXCPT12 ] = "Custom exception 12",
231 [VEC_EXCPT13 ] = "Custom exception 13",
232 [VEC_EXCPT14 ] = "Custom exception 14",
233 [VEC_EXCPT15 ] = "Custom exception 15",
234 [VEC_STEP ] = "Hardware single step",
235 [VEC_OVFLOW ] = "Trace buffer overflow",
236 [VEC_UNDEF_I ] = "Undefined instruction",
237 [VEC_ILGAL_I ] = "Illegal instruction combo (multi-issue)",
238 [VEC_CPLB_VL ] = "DCPLB protection violation",
239 [VEC_MISALI_D ] = "Unaligned data access",
240 [VEC_UNCOV ] = "Unrecoverable event (double fault)",
241 [VEC_CPLB_M ] = "DCPLB miss",
242 [VEC_CPLB_MHIT ] = "Multiple DCPLB hit",
243 [VEC_WATCH ] = "Watchpoint match",
244 [VEC_ISTRU_VL ] = "ADSP-BF535 only",
245 [VEC_MISALI_I ] = "Unaligned instruction access",
246 [VEC_CPLB_I_VL ] = "ICPLB protection violation",
247 [VEC_CPLB_I_M ] = "ICPLB miss",
248 [VEC_CPLB_I_MHIT] = "Multiple ICPLB hit",
249 [VEC_ILL_RES ] = "Illegal supervisor resource",
250};
251
252#define CEC_STATE(cpu) DV_STATE_CACHED (cpu, cec)
253
254#define __cec_get_ivg(val) (ffs ((val) & ~IVG_IRPTEN_B) - 1)
255#define _cec_get_ivg(cec) __cec_get_ivg ((cec)->ipend & ~IVG_EMU_B)
256
257int
258cec_get_ivg (SIM_CPU *cpu)
259{
260 switch (STATE_ENVIRONMENT (CPU_STATE (cpu)))
261 {
262 case OPERATING_ENVIRONMENT:
263 return _cec_get_ivg (CEC_STATE (cpu));
264 default:
265 return IVG_USER;
266 }
267}
268
269static bool
270_cec_is_supervisor_mode (struct bfin_cec *cec)
271{
272 return (cec->ipend & ~(IVG_EMU_B | IVG_IRPTEN_B));
273}
274bool
275cec_is_supervisor_mode (SIM_CPU *cpu)
276{
277 switch (STATE_ENVIRONMENT (CPU_STATE (cpu)))
278 {
279 case OPERATING_ENVIRONMENT:
280 return _cec_is_supervisor_mode (CEC_STATE (cpu));
281 case USER_ENVIRONMENT:
282 return false;
283 default:
284 return true;
285 }
286}
287static bool
288_cec_is_user_mode (struct bfin_cec *cec)
289{
290 return !_cec_is_supervisor_mode (cec);
291}
292bool
293cec_is_user_mode (SIM_CPU *cpu)
294{
295 return !cec_is_supervisor_mode (cpu);
296}
297static void
298_cec_require_supervisor (SIM_CPU *cpu, struct bfin_cec *cec)
299{
300 if (_cec_is_user_mode (cec))
301 cec_exception (cpu, VEC_ILL_RES);
302}
303void
304cec_require_supervisor (SIM_CPU *cpu)
305{
306 /* Do not call _cec_require_supervisor() to avoid CEC_STATE()
307 as that macro requires OS operating mode. */
308 if (cec_is_user_mode (cpu))
309 cec_exception (cpu, VEC_ILL_RES);
310}
311
312#define excp_to_sim_halt(reason, sigrc) \
313 sim_engine_halt (CPU_STATE (cpu), cpu, NULL, PCREG, reason, sigrc)
314void
315cec_exception (SIM_CPU *cpu, int excp)
316{
317 SIM_DESC sd = CPU_STATE (cpu);
318 int sigrc = -1;
319
320 TRACE_EVENTS (cpu, "processing exception %#x in EVT%i", excp,
321 cec_get_ivg (cpu));
322
323 /* Ideally what would happen here for real hardware exceptions (not
324 fake sim ones) is that:
325 - For service exceptions (excp <= 0x11):
326 RETX is the _next_ PC which can be tricky with jumps/hardware loops/...
327 - For error exceptions (excp > 0x11):
328 RETX is the _current_ PC (i.e. the one causing the exception)
329 - PC is loaded with EVT3 MMR
330 - ILAT/IPEND in CEC is updated depending on current IVG level
331 - the fault address MMRs get updated with data/instruction info
332 - Execution continues on in the EVT3 handler */
333
334 /* Handle simulator exceptions first. */
335 switch (excp)
336 {
337 case VEC_SIM_HLT:
338 excp_to_sim_halt (sim_exited, 0);
339 return;
340 case VEC_SIM_ABORT:
341 excp_to_sim_halt (sim_exited, 1);
342 return;
343 case VEC_SIM_TRAP:
344 /* GDB expects us to step over EMUEXCPT. */
345 /* XXX: What about hwloops and EMUEXCPT at the end?
346 Pretty sure gdb doesn't handle this already... */
347 SET_PCREG (PCREG + 2);
348 /* Only trap when we are running in gdb. */
349 if (STATE_OPEN_KIND (sd) == SIM_OPEN_DEBUG)
350 excp_to_sim_halt (sim_stopped, SIM_SIGTRAP);
351 return;
352 case VEC_SIM_DBGA:
353 /* If running in gdb, simply trap. */
354 if (STATE_OPEN_KIND (sd) == SIM_OPEN_DEBUG)
355 excp_to_sim_halt (sim_stopped, SIM_SIGTRAP);
356 else
357 excp_to_sim_halt (sim_exited, 2);
358 }
359
360 if (excp <= 0x3f)
361 {
362 SET_EXCAUSE (excp);
363 if (STATE_ENVIRONMENT (sd) == OPERATING_ENVIRONMENT)
364 {
365 /* ICPLB regs always get updated. */
366 /* XXX: Should optimize this call path ... */
367 if (excp != VEC_MISALI_I && excp != VEC_MISALI_D
368 && excp != VEC_CPLB_I_M && excp != VEC_CPLB_M
369 && excp != VEC_CPLB_I_VL && excp != VEC_CPLB_VL
370 && excp != VEC_CPLB_I_MHIT && excp != VEC_CPLB_MHIT)
371 mmu_log_ifault (cpu);
372 _cec_raise (cpu, CEC_STATE (cpu), IVG_EVX);
373 /* We need to restart the engine so that we don't return
374 and continue processing this bad insn. */
375 if (EXCAUSE >= 0x20)
376 sim_engine_restart (sd, cpu, NULL, PCREG);
377 return;
378 }
379 }
380
381 TRACE_EVENTS (cpu, "running virtual exception handler");
382
383 switch (excp)
384 {
385 case VEC_SYS:
386 bfin_syscall (cpu);
387 break;
388
389 case VEC_EXCPT01: /* Userspace gdb breakpoint. */
390 sigrc = SIM_SIGTRAP;
391 break;
392
393 case VEC_UNDEF_I: /* Undefined instruction. */
394 sigrc = SIM_SIGILL;
395 break;
396
397 case VEC_ILL_RES: /* Illegal supervisor resource. */
398 case VEC_MISALI_I: /* Misaligned instruction. */
399 sigrc = SIM_SIGBUS;
400 break;
401
402 case VEC_CPLB_M:
403 case VEC_CPLB_I_M:
404 sigrc = SIM_SIGSEGV;
405 break;
406
407 default:
408 sim_io_eprintf (sd, "Unhandled exception %#x at 0x%08x (%s)\n",
409 excp, PCREG, excp_decoded[excp]);
410 sigrc = SIM_SIGILL;
411 break;
412 }
413
414 if (sigrc != -1)
415 excp_to_sim_halt (sim_stopped, sigrc);
416}
417
418bu32 cec_cli (SIM_CPU *cpu)
419{
420 struct bfin_cec *cec;
421 bu32 old_mask;
422
423 if (STATE_ENVIRONMENT (CPU_STATE (cpu)) != OPERATING_ENVIRONMENT)
424 return 0;
425
426 cec = CEC_STATE (cpu);
427 _cec_require_supervisor (cpu, cec);
428
429 /* XXX: what about IPEND[4] ? */
430 old_mask = cec->imask;
431 _cec_imask_write (cec, 0);
432
433 TRACE_EVENTS (cpu, "CLI changed IMASK from %#x to %#x", old_mask, cec->imask);
434
435 return old_mask;
436}
437
438void cec_sti (SIM_CPU *cpu, bu32 ints)
439{
440 struct bfin_cec *cec;
441 bu32 old_mask;
442
443 if (STATE_ENVIRONMENT (CPU_STATE (cpu)) != OPERATING_ENVIRONMENT)
444 return;
445
446 cec = CEC_STATE (cpu);
447 _cec_require_supervisor (cpu, cec);
448
449 /* XXX: what about IPEND[4] ? */
450 old_mask = cec->imask;
451 _cec_imask_write (cec, ints);
452
453 TRACE_EVENTS (cpu, "STI changed IMASK from %#x to %#x", old_mask, cec->imask);
454
455 /* Check for pending interrupts that are now enabled. */
456 _cec_check_pending (cpu, cec);
457}
458
459static void
460cec_irpten_enable (SIM_CPU *cpu, struct bfin_cec *cec)
461{
462 /* Globally mask interrupts. */
463 TRACE_EVENTS (cpu, "setting IPEND[4] to globally mask interrupts");
464 cec->ipend |= IVG_IRPTEN_B;
465}
466
467static void
468cec_irpten_disable (SIM_CPU *cpu, struct bfin_cec *cec)
469{
470 /* Clear global interrupt mask. */
471 TRACE_EVENTS (cpu, "clearing IPEND[4] to not globally mask interrupts");
472 cec->ipend &= ~IVG_IRPTEN_B;
473}
474
475static void
476_cec_raise (SIM_CPU *cpu, struct bfin_cec *cec, int ivg)
477{
478 SIM_DESC sd = CPU_STATE (cpu);
479 int curr_ivg = _cec_get_ivg (cec);
480 bool snen;
481 bool irpten;
482
483 TRACE_EVENTS (cpu, "processing request for EVT%i while at EVT%i",
484 ivg, curr_ivg);
485
486 irpten = (cec->ipend & IVG_IRPTEN_B);
487 snen = (SYSCFGREG & SYSCFG_SNEN);
488
489 if (curr_ivg == -1)
490 curr_ivg = IVG_USER;
491
492 /* Just check for higher latched interrupts. */
493 if (ivg == -1)
494 {
495 if (irpten)
496 goto done; /* All interrupts are masked anyways. */
497
498 ivg = __cec_get_ivg (cec->ilat & cec->imask);
499 if (ivg < 0)
500 goto done; /* Nothing latched. */
501
502 if (ivg > curr_ivg)
503 goto done; /* Nothing higher latched. */
504
505 if (!snen && ivg == curr_ivg)
506 goto done; /* Self nesting disabled. */
507
508 /* Still here, so fall through to raise to higher pending. */
509 }
510
511 cec->ilat |= (1 << ivg);
512
513 if (ivg <= IVG_EVX)
514 {
515 /* These two are always processed. */
516 if (ivg == IVG_EMU || ivg == IVG_RST)
517 goto process_int;
518
519 /* Anything lower might trigger a double fault. */
520 if (curr_ivg <= ivg)
521 {
522 /* Double fault ! :( */
523 SET_EXCAUSE (VEC_UNCOV);
524 /* XXX: SET_RETXREG (...); */
525 sim_io_error (sd, "%s: double fault at 0x%08x ! :(", __func__, PCREG);
526 excp_to_sim_halt (sim_stopped, SIM_SIGABRT);
527 }
528
529 /* No double fault -> always process. */
530 goto process_int;
531 }
532 else if (irpten && curr_ivg != IVG_USER)
533 {
534 /* Interrupts are globally masked. */
535 }
536 else if (!(cec->imask & (1 << ivg)))
537 {
538 /* This interrupt is masked. */
539 }
540 else if (ivg < curr_ivg || (snen && ivg == curr_ivg))
541 {
542 /* Do transition! */
543 bu32 oldpc;
544
545 process_int:
546 cec->ipend |= (1 << ivg);
547 cec->ilat &= ~(1 << ivg);
548
549 /* Interrupts are processed in between insns which means the return
550 point is the insn-to-be-executed (which is the current PC). But
551 exceptions are handled while executing an insn, so we may have to
552 advance the PC ourselves when setting RETX.
553 XXX: Advancing the PC should only be for "service" exceptions, and
554 handling them after executing the insn should be OK, which
555 means we might be able to use the event interface for it. */
556
557 oldpc = PCREG;
558 switch (ivg)
559 {
560 case IVG_EMU:
561 /* Signal the JTAG ICE. */
562 /* XXX: what happens with 'raise 0' ? */
563 SET_RETEREG (oldpc);
564 excp_to_sim_halt (sim_stopped, SIM_SIGTRAP);
565 /* XXX: Need an easy way for gdb to signal it isnt here. */
566 cec->ipend &= ~IVG_EMU_B;
567 break;
568 case IVG_RST:
569 /* Have the core reset simply exit (i.e. "shutdown"). */
570 excp_to_sim_halt (sim_exited, 0);
571 break;
572 case IVG_NMI:
573 /* XXX: Should check this. */
574 SET_RETNREG (oldpc);
575 break;
576 case IVG_EVX:
577 /* Non-service exceptions point to the excepting instruction. */
578 if (EXCAUSE >= 0x20)
579 SET_RETXREG (oldpc);
580 else
581 {
582 bu32 nextpc = hwloop_get_next_pc (cpu, oldpc, INSN_LEN);
583 SET_RETXREG (nextpc);
584 }
585
586 break;
587 case IVG_IRPTEN:
588 /* XXX: what happens with 'raise 4' ? */
589 sim_io_error (sd, "%s: what to do with 'raise 4' ?", __func__);
590 break;
591 default:
592 SET_RETIREG (oldpc | (ivg == curr_ivg ? 1 : 0));
593 break;
594 }
595
596 /* If EVT_OVERRIDE is in effect (IVG7+), use the reset address. */
597 if ((cec->evt_override & 0xff80) & (1 << ivg))
598 SET_PCREG (cec_get_reset_evt (cpu));
599 else
600 SET_PCREG (cec_get_evt (cpu, ivg));
601
602 TRACE_BRANCH (cpu, oldpc, PCREG, -1, "CEC changed PC (to EVT%i):", ivg);
603 BFIN_CPU_STATE.did_jump = true;
604
605 /* Enable the global interrupt mask upon interrupt entry. */
606 if (ivg >= IVG_IVHW)
607 cec_irpten_enable (cpu, cec);
608 }
609
610 /* When moving between states, don't let internal states bleed through. */
611 DIS_ALGN_EXPT &= ~1;
612
613 /* When going from user to super, we set LSB in LB regs to avoid
614 misbehavior and/or malicious code.
615 Also need to load SP alias with KSP. */
616 if (curr_ivg == IVG_USER)
617 {
618 int i;
619 for (i = 0; i < 2; ++i)
620 if (!(LBREG (i) & 1))
621 SET_LBREG (i, LBREG (i) | 1);
622 SET_USPREG (SPREG);
623 SET_SPREG (KSPREG);
624 }
625
626 done:
627 TRACE_EVENTS (cpu, "now at EVT%i", _cec_get_ivg (cec));
628}
629
630static bu32
631cec_read_ret_reg (SIM_CPU *cpu, int ivg)
632{
633 switch (ivg)
634 {
635 case IVG_EMU: return RETEREG;
636 case IVG_NMI: return RETNREG;
637 case IVG_EVX: return RETXREG;
638 default: return RETIREG;
639 }
640}
641
642void
643cec_latch (SIM_CPU *cpu, int ivg)
644{
645 struct bfin_cec *cec;
646
647 if (STATE_ENVIRONMENT (CPU_STATE (cpu)) != OPERATING_ENVIRONMENT)
648 {
649 bu32 oldpc = PCREG;
650 SET_PCREG (cec_read_ret_reg (cpu, ivg));
651 TRACE_BRANCH (cpu, oldpc, PCREG, -1, "CEC changed PC");
652 return;
653 }
654
655 cec = CEC_STATE (cpu);
656 cec->ilat |= (1 << ivg);
657 _cec_check_pending (cpu, cec);
658}
659
660void
661cec_hwerr (SIM_CPU *cpu, int hwerr)
662{
663 SET_HWERRCAUSE (hwerr);
664 cec_latch (cpu, IVG_IVHW);
665}
666
667void
668cec_return (SIM_CPU *cpu, int ivg)
669{
670 SIM_DESC sd = CPU_STATE (cpu);
671 struct bfin_cec *cec;
672 bool snen;
673 int curr_ivg;
674 bu32 oldpc, newpc;
675
676 oldpc = PCREG;
677
678 BFIN_CPU_STATE.did_jump = true;
679 if (STATE_ENVIRONMENT (sd) != OPERATING_ENVIRONMENT)
680 {
681 SET_PCREG (cec_read_ret_reg (cpu, ivg));
682 TRACE_BRANCH (cpu, oldpc, PCREG, -1, "CEC changed PC");
683 return;
684 }
685
686 cec = CEC_STATE (cpu);
687
688 /* XXX: This isn't entirely correct ... */
689 cec->ipend &= ~IVG_EMU_B;
690
691 curr_ivg = _cec_get_ivg (cec);
692 if (curr_ivg == -1)
693 curr_ivg = IVG_USER;
694 if (ivg == -1)
695 ivg = curr_ivg;
696
697 TRACE_EVENTS (cpu, "returning from EVT%i (should be EVT%i)", curr_ivg, ivg);
698
699 /* Not allowed to return from usermode. */
700 if (curr_ivg == IVG_USER)
701 cec_exception (cpu, VEC_ILL_RES);
702
703 if (ivg > IVG15 || ivg < 0)
704 sim_io_error (sd, "%s: ivg %i out of range !", __func__, ivg);
705
706 _cec_require_supervisor (cpu, cec);
707
708 switch (ivg)
709 {
710 case IVG_EMU:
711 /* RTE -- only valid in emulation mode. */
712 /* XXX: What does the hardware do ? */
713 if (curr_ivg != IVG_EMU)
714 cec_exception (cpu, VEC_ILL_RES);
715 break;
716 case IVG_NMI:
717 /* RTN -- only valid in NMI. */
718 /* XXX: What does the hardware do ? */
719 if (curr_ivg != IVG_NMI)
720 cec_exception (cpu, VEC_ILL_RES);
721 break;
722 case IVG_EVX:
723 /* RTX -- only valid in exception. */
724 /* XXX: What does the hardware do ? */
725 if (curr_ivg != IVG_EVX)
726 cec_exception (cpu, VEC_ILL_RES);
727 break;
728 default:
729 /* RTI -- not valid in emulation, nmi, exception, or user. */
730 /* XXX: What does the hardware do ? */
731 if (curr_ivg == IVG_EMU || curr_ivg == IVG_NMI
732 || curr_ivg == IVG_EVX || curr_ivg == IVG_USER)
733 cec_exception (cpu, VEC_ILL_RES);
734 break;
735 case IVG_IRPTEN:
736 /* XXX: Is this even possible ? */
737 excp_to_sim_halt (sim_stopped, SIM_SIGABRT);
738 break;
739 }
740 newpc = cec_read_ret_reg (cpu, ivg);
741
742 /* XXX: Does this nested trick work on EMU/NMI/EVX ? */
743 snen = (newpc & 1);
744 /* XXX: Delayed clear shows bad PCREG register trace above ? */
745 SET_PCREG (newpc & ~1);
746
747 TRACE_BRANCH (cpu, oldpc, PCREG, -1, "CEC changed PC (from EVT%i)", ivg);
748
749 /* Update ipend after the TRACE_BRANCH so dv-bfin_trace
750 knows current CEC state wrt overflow. */
751 if (!snen)
752 cec->ipend &= ~(1 << ivg);
753
754 /* Disable global interrupt mask to let any interrupt take over, but
755 only when we were already in a RTI level. Only way we could have
756 raised at that point is if it was cleared in the first place. */
757 if (ivg >= IVG_IVHW || ivg == IVG_RST)
758 cec_irpten_disable (cpu, cec);
759
760 /* When going from super to user, we clear LSB in LB regs in case
761 it was set on the transition up.
762 Also need to load SP alias with USP. */
763 if (_cec_get_ivg (cec) == -1)
764 {
765 int i;
766 for (i = 0; i < 2; ++i)
767 if (LBREG (i) & 1)
768 SET_LBREG (i, LBREG (i) & ~1);
769 SET_KSPREG (SPREG);
770 SET_SPREG (USPREG);
771 }
772
773 /* Check for pending interrupts before we return to usermode. */
774 _cec_check_pending (cpu, cec);
775}
776
777void
778cec_push_reti (SIM_CPU *cpu)
779{
780 /* XXX: Need to check hardware with popped RETI value
781 and bit 1 is set (when handling nested interrupts).
782 Also need to check behavior wrt SNEN in SYSCFG. */
783 struct bfin_cec *cec;
784
785 if (STATE_ENVIRONMENT (CPU_STATE (cpu)) != OPERATING_ENVIRONMENT)
786 return;
787
788 TRACE_EVENTS (cpu, "pushing RETI");
789
790 cec = CEC_STATE (cpu);
791 cec_irpten_disable (cpu, cec);
792 /* Check for pending interrupts. */
793 _cec_check_pending (cpu, cec);
794}
795
796void
797cec_pop_reti (SIM_CPU *cpu)
798{
799 /* XXX: Need to check hardware with popped RETI value
800 and bit 1 is set (when handling nested interrupts).
801 Also need to check behavior wrt SNEN in SYSCFG. */
802 struct bfin_cec *cec;
803
804 if (STATE_ENVIRONMENT (CPU_STATE (cpu)) != OPERATING_ENVIRONMENT)
805 return;
806
807 TRACE_EVENTS (cpu, "popping RETI");
808
809 cec = CEC_STATE (cpu);
810 cec_irpten_enable (cpu, cec);
811}
This page took 0.155827 seconds and 4 git commands to generate.