2003-10-06 Dave Brolley <brolley@redhat.com>
[deliverable/binutils-gdb.git] / sim / frv / frv.c
1 /* frv simulator support code
2 Copyright (C) 1998, 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
3 Contributed by Red Hat.
4
5 This file is part of the GNU simulators.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
10 any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
20
21 #define WANT_CPU
22 #define WANT_CPU_FRVBF
23
24 #include "sim-main.h"
25 #include "cgen-mem.h"
26 #include "cgen-ops.h"
27 #include "cgen-engine.h"
28 #include "cgen-par.h"
29 #include "bfd.h"
30 #include <math.h>
31
32 /* Maintain a flag in order to know when to write the address of the next
33 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL
34 insns. */
35 int frvbf_write_next_vliw_addr_to_LR;
36
37 /* The contents of BUF are in target byte order. */
38 int
39 frvbf_fetch_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
40 {
41 if (rn <= GR_REGNUM_MAX)
42 SETTSI (buf, GET_H_GR (rn));
43 else if (rn <= FR_REGNUM_MAX)
44 SETTSI (buf, GET_H_FR (rn - GR_REGNUM_MAX - 1));
45 else if (rn == PC_REGNUM)
46 SETTSI (buf, GET_H_PC ());
47 else if (rn == LR_REGNUM)
48 SETTSI (buf, GET_H_SPR (H_SPR_LR));
49 else
50 SETTSI (buf, 0xdeadbeef);
51
52 return -1;
53 }
54
55 /* The contents of BUF are in target byte order. */
56
57 int
58 frvbf_store_register (SIM_CPU *current_cpu, int rn, unsigned char *buf, int len)
59 {
60 if (rn <= GR_REGNUM_MAX)
61 SET_H_GR (rn, GETTSI (buf));
62 else if (rn <= FR_REGNUM_MAX)
63 SET_H_FR (rn - GR_REGNUM_MAX - 1, GETTSI (buf));
64 else if (rn == PC_REGNUM)
65 SET_H_PC (GETTSI (buf));
66 else if (rn == LR_REGNUM)
67 SET_H_SPR (H_SPR_LR, GETTSI (buf));
68
69 return -1;
70 }
71 \f
72 /* Cover fns to access the general registers. */
73 USI
74 frvbf_h_gr_get_handler (SIM_CPU *current_cpu, UINT gr)
75 {
76 frv_check_gr_access (current_cpu, gr);
77 return CPU (h_gr[gr]);
78 }
79
80 void
81 frvbf_h_gr_set_handler (SIM_CPU *current_cpu, UINT gr, USI newval)
82 {
83 frv_check_gr_access (current_cpu, gr);
84
85 if (gr == 0)
86 return; /* Storing into gr0 has no effect. */
87
88 CPU (h_gr[gr]) = newval;
89 }
90 \f
91 /* Cover fns to access the floating point registers. */
92 SF
93 frvbf_h_fr_get_handler (SIM_CPU *current_cpu, UINT fr)
94 {
95 frv_check_fr_access (current_cpu, fr);
96 return CPU (h_fr[fr]);
97 }
98
99 void
100 frvbf_h_fr_set_handler (SIM_CPU *current_cpu, UINT fr, SF newval)
101 {
102 frv_check_fr_access (current_cpu, fr);
103 CPU (h_fr[fr]) = newval;
104 }
105 \f
106 /* Cover fns to access the general registers as double words. */
107 static UINT
108 check_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
109 {
110 if (reg & align_mask)
111 {
112 SIM_DESC sd = CPU_STATE (current_cpu);
113 switch (STATE_ARCHITECTURE (sd)->mach)
114 {
115 case bfd_mach_fr400:
116 case bfd_mach_fr550:
117 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
118 break;
119 case bfd_mach_frvtomcat:
120 case bfd_mach_fr500:
121 case bfd_mach_frv:
122 frv_queue_register_exception_interrupt (current_cpu,
123 FRV_REC_UNALIGNED);
124 break;
125 default:
126 break;
127 }
128
129 reg &= ~align_mask;
130 }
131
132 return reg;
133 }
134
135 static UINT
136 check_fr_register_alignment (SIM_CPU *current_cpu, UINT reg, int align_mask)
137 {
138 if (reg & align_mask)
139 {
140 SIM_DESC sd = CPU_STATE (current_cpu);
141 switch (STATE_ARCHITECTURE (sd)->mach)
142 {
143 case bfd_mach_fr400:
144 case bfd_mach_fr550:
145 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
146 break;
147 case bfd_mach_frvtomcat:
148 case bfd_mach_fr500:
149 case bfd_mach_frv:
150 {
151 struct frv_fp_exception_info fp_info = {
152 FSR_NO_EXCEPTION, FTT_INVALID_FR
153 };
154 frv_queue_fp_exception_interrupt (current_cpu, & fp_info);
155 }
156 break;
157 default:
158 break;
159 }
160
161 reg &= ~align_mask;
162 }
163
164 return reg;
165 }
166
167 static UINT
168 check_memory_alignment (SIM_CPU *current_cpu, SI address, int align_mask)
169 {
170 if (address & align_mask)
171 {
172 SIM_DESC sd = CPU_STATE (current_cpu);
173 switch (STATE_ARCHITECTURE (sd)->mach)
174 {
175 case bfd_mach_fr400:
176 frv_queue_data_access_error_interrupt (current_cpu, address);
177 break;
178 case bfd_mach_frvtomcat:
179 case bfd_mach_fr500:
180 case bfd_mach_frv:
181 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
182 break;
183 default:
184 break;
185 }
186
187 address &= ~align_mask;
188 }
189
190 return address;
191 }
192
193 DI
194 frvbf_h_gr_double_get_handler (SIM_CPU *current_cpu, UINT gr)
195 {
196 DI value;
197
198 if (gr == 0)
199 return 0; /* gr0 is always 0. */
200
201 /* Check the register alignment. */
202 gr = check_register_alignment (current_cpu, gr, 1);
203
204 value = GET_H_GR (gr);
205 value <<= 32;
206 value |= (USI) GET_H_GR (gr + 1);
207 return value;
208 }
209
210 void
211 frvbf_h_gr_double_set_handler (SIM_CPU *current_cpu, UINT gr, DI newval)
212 {
213 if (gr == 0)
214 return; /* Storing into gr0 has no effect. */
215
216 /* Check the register alignment. */
217 gr = check_register_alignment (current_cpu, gr, 1);
218
219 SET_H_GR (gr , (newval >> 32) & 0xffffffff);
220 SET_H_GR (gr + 1, (newval ) & 0xffffffff);
221 }
222 \f
223 /* Cover fns to access the floating point register as double words. */
224 DF
225 frvbf_h_fr_double_get_handler (SIM_CPU *current_cpu, UINT fr)
226 {
227 union {
228 SF as_sf[2];
229 DF as_df;
230 } value;
231
232 /* Check the register alignment. */
233 fr = check_fr_register_alignment (current_cpu, fr, 1);
234
235 if (CURRENT_HOST_BYTE_ORDER == LITTLE_ENDIAN)
236 {
237 value.as_sf[1] = GET_H_FR (fr);
238 value.as_sf[0] = GET_H_FR (fr + 1);
239 }
240 else
241 {
242 value.as_sf[0] = GET_H_FR (fr);
243 value.as_sf[1] = GET_H_FR (fr + 1);
244 }
245
246 return value.as_df;
247 }
248
249 void
250 frvbf_h_fr_double_set_handler (SIM_CPU *current_cpu, UINT fr, DF newval)
251 {
252 union {
253 SF as_sf[2];
254 DF as_df;
255 } value;
256
257 /* Check the register alignment. */
258 fr = check_fr_register_alignment (current_cpu, fr, 1);
259
260 value.as_df = newval;
261 if (CURRENT_HOST_BYTE_ORDER == LITTLE_ENDIAN)
262 {
263 SET_H_FR (fr , value.as_sf[1]);
264 SET_H_FR (fr + 1, value.as_sf[0]);
265 }
266 else
267 {
268 SET_H_FR (fr , value.as_sf[0]);
269 SET_H_FR (fr + 1, value.as_sf[1]);
270 }
271 }
272 \f
273 /* Cover fns to access the floating point register as integer words. */
274 USI
275 frvbf_h_fr_int_get_handler (SIM_CPU *current_cpu, UINT fr)
276 {
277 union {
278 SF as_sf;
279 USI as_usi;
280 } value;
281
282 value.as_sf = GET_H_FR (fr);
283 return value.as_usi;
284 }
285
286 void
287 frvbf_h_fr_int_set_handler (SIM_CPU *current_cpu, UINT fr, USI newval)
288 {
289 union {
290 SF as_sf;
291 USI as_usi;
292 } value;
293
294 value.as_usi = newval;
295 SET_H_FR (fr, value.as_sf);
296 }
297 \f
298 /* Cover fns to access the coprocessor registers as double words. */
299 DI
300 frvbf_h_cpr_double_get_handler (SIM_CPU *current_cpu, UINT cpr)
301 {
302 DI value;
303
304 /* Check the register alignment. */
305 cpr = check_register_alignment (current_cpu, cpr, 1);
306
307 value = GET_H_CPR (cpr);
308 value <<= 32;
309 value |= (USI) GET_H_CPR (cpr + 1);
310 return value;
311 }
312
313 void
314 frvbf_h_cpr_double_set_handler (SIM_CPU *current_cpu, UINT cpr, DI newval)
315 {
316 /* Check the register alignment. */
317 cpr = check_register_alignment (current_cpu, cpr, 1);
318
319 SET_H_CPR (cpr , (newval >> 32) & 0xffffffff);
320 SET_H_CPR (cpr + 1, (newval ) & 0xffffffff);
321 }
322 \f
323 /* Cover fns to write registers as quad words. */
324 void
325 frvbf_h_gr_quad_set_handler (SIM_CPU *current_cpu, UINT gr, SI *newval)
326 {
327 if (gr == 0)
328 return; /* Storing into gr0 has no effect. */
329
330 /* Check the register alignment. */
331 gr = check_register_alignment (current_cpu, gr, 3);
332
333 SET_H_GR (gr , newval[0]);
334 SET_H_GR (gr + 1, newval[1]);
335 SET_H_GR (gr + 2, newval[2]);
336 SET_H_GR (gr + 3, newval[3]);
337 }
338
339 void
340 frvbf_h_fr_quad_set_handler (SIM_CPU *current_cpu, UINT fr, SI *newval)
341 {
342 /* Check the register alignment. */
343 fr = check_fr_register_alignment (current_cpu, fr, 3);
344
345 SET_H_FR (fr , newval[0]);
346 SET_H_FR (fr + 1, newval[1]);
347 SET_H_FR (fr + 2, newval[2]);
348 SET_H_FR (fr + 3, newval[3]);
349 }
350
351 void
352 frvbf_h_cpr_quad_set_handler (SIM_CPU *current_cpu, UINT cpr, SI *newval)
353 {
354 /* Check the register alignment. */
355 cpr = check_register_alignment (current_cpu, cpr, 3);
356
357 SET_H_CPR (cpr , newval[0]);
358 SET_H_CPR (cpr + 1, newval[1]);
359 SET_H_CPR (cpr + 2, newval[2]);
360 SET_H_CPR (cpr + 3, newval[3]);
361 }
362 \f
363 /* Cover fns to access the special purpose registers. */
364 USI
365 frvbf_h_spr_get_handler (SIM_CPU *current_cpu, UINT spr)
366 {
367 /* Check access restrictions. */
368 frv_check_spr_read_access (current_cpu, spr);
369
370 switch (spr)
371 {
372 case H_SPR_PSR:
373 return spr_psr_get_handler (current_cpu);
374 case H_SPR_TBR:
375 return spr_tbr_get_handler (current_cpu);
376 case H_SPR_BPSR:
377 return spr_bpsr_get_handler (current_cpu);
378 case H_SPR_CCR:
379 return spr_ccr_get_handler (current_cpu);
380 case H_SPR_CCCR:
381 return spr_cccr_get_handler (current_cpu);
382 case H_SPR_SR0:
383 case H_SPR_SR1:
384 case H_SPR_SR2:
385 case H_SPR_SR3:
386 return spr_sr_get_handler (current_cpu, spr);
387 break;
388 default:
389 return CPU (h_spr[spr]);
390 }
391 return 0;
392 }
393
394 void
395 frvbf_h_spr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
396 {
397 FRV_REGISTER_CONTROL *control;
398 USI mask;
399 USI oldval;
400
401 /* Check access restrictions. */
402 frv_check_spr_write_access (current_cpu, spr);
403
404 /* Only set those fields which are writeable. */
405 control = CPU_REGISTER_CONTROL (current_cpu);
406 mask = control->spr[spr].read_only_mask;
407 oldval = GET_H_SPR (spr);
408
409 newval = (newval & ~mask) | (oldval & mask);
410
411 /* Some registers are represented by individual components which are
412 referenced more often than the register itself. */
413 switch (spr)
414 {
415 case H_SPR_PSR:
416 spr_psr_set_handler (current_cpu, newval);
417 break;
418 case H_SPR_TBR:
419 spr_tbr_set_handler (current_cpu, newval);
420 break;
421 case H_SPR_BPSR:
422 spr_bpsr_set_handler (current_cpu, newval);
423 break;
424 case H_SPR_CCR:
425 spr_ccr_set_handler (current_cpu, newval);
426 break;
427 case H_SPR_CCCR:
428 spr_cccr_set_handler (current_cpu, newval);
429 break;
430 case H_SPR_SR0:
431 case H_SPR_SR1:
432 case H_SPR_SR2:
433 case H_SPR_SR3:
434 spr_sr_set_handler (current_cpu, spr, newval);
435 break;
436 case H_SPR_IHSR8:
437 frv_cache_reconfigure (current_cpu, CPU_INSN_CACHE (current_cpu));
438 break;
439 default:
440 CPU (h_spr[spr]) = newval;
441 break;
442 }
443 }
444 \f
445 /* Cover fns to access the gr_hi and gr_lo registers. */
446 UHI
447 frvbf_h_gr_hi_get_handler (SIM_CPU *current_cpu, UINT gr)
448 {
449 return (GET_H_GR(gr) >> 16) & 0xffff;
450 }
451
452 void
453 frvbf_h_gr_hi_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
454 {
455 USI value = (GET_H_GR (gr) & 0xffff) | (newval << 16);
456 SET_H_GR (gr, value);
457 }
458
459 UHI
460 frvbf_h_gr_lo_get_handler (SIM_CPU *current_cpu, UINT gr)
461 {
462 return GET_H_GR(gr) & 0xffff;
463 }
464
465 void
466 frvbf_h_gr_lo_set_handler (SIM_CPU *current_cpu, UINT gr, UHI newval)
467 {
468 USI value = (GET_H_GR (gr) & 0xffff0000) | (newval & 0xffff);
469 SET_H_GR (gr, value);
470 }
471 \f
472 /* Cover fns to access the tbr bits. */
473 USI
474 spr_tbr_get_handler (SIM_CPU *current_cpu)
475 {
476 int tbr = ((GET_H_TBR_TBA () & 0xfffff) << 12) |
477 ((GET_H_TBR_TT () & 0xff) << 4);
478
479 return tbr;
480 }
481
482 void
483 spr_tbr_set_handler (SIM_CPU *current_cpu, USI newval)
484 {
485 int tbr = newval;
486
487 SET_H_TBR_TBA ((tbr >> 12) & 0xfffff) ;
488 SET_H_TBR_TT ((tbr >> 4) & 0xff) ;
489 }
490 \f
491 /* Cover fns to access the bpsr bits. */
492 USI
493 spr_bpsr_get_handler (SIM_CPU *current_cpu)
494 {
495 int bpsr = ((GET_H_BPSR_BS () & 0x1) << 12) |
496 ((GET_H_BPSR_BET () & 0x1) );
497
498 return bpsr;
499 }
500
501 void
502 spr_bpsr_set_handler (SIM_CPU *current_cpu, USI newval)
503 {
504 int bpsr = newval;
505
506 SET_H_BPSR_BS ((bpsr >> 12) & 1);
507 SET_H_BPSR_BET ((bpsr ) & 1);
508 }
509 \f
510 /* Cover fns to access the psr bits. */
511 USI
512 spr_psr_get_handler (SIM_CPU *current_cpu)
513 {
514 int psr = ((GET_H_PSR_IMPLE () & 0xf) << 28) |
515 ((GET_H_PSR_VER () & 0xf) << 24) |
516 ((GET_H_PSR_ICE () & 0x1) << 16) |
517 ((GET_H_PSR_NEM () & 0x1) << 14) |
518 ((GET_H_PSR_CM () & 0x1) << 13) |
519 ((GET_H_PSR_BE () & 0x1) << 12) |
520 ((GET_H_PSR_ESR () & 0x1) << 11) |
521 ((GET_H_PSR_EF () & 0x1) << 8) |
522 ((GET_H_PSR_EM () & 0x1) << 7) |
523 ((GET_H_PSR_PIL () & 0xf) << 3) |
524 ((GET_H_PSR_S () & 0x1) << 2) |
525 ((GET_H_PSR_PS () & 0x1) << 1) |
526 ((GET_H_PSR_ET () & 0x1) );
527
528 return psr;
529 }
530
531 void
532 spr_psr_set_handler (SIM_CPU *current_cpu, USI newval)
533 {
534 /* The handler for PSR.S references the value of PSR.ESR, so set PSR.S
535 first. */
536 SET_H_PSR_S ((newval >> 2) & 1);
537
538 SET_H_PSR_IMPLE ((newval >> 28) & 0xf);
539 SET_H_PSR_VER ((newval >> 24) & 0xf);
540 SET_H_PSR_ICE ((newval >> 16) & 1);
541 SET_H_PSR_NEM ((newval >> 14) & 1);
542 SET_H_PSR_CM ((newval >> 13) & 1);
543 SET_H_PSR_BE ((newval >> 12) & 1);
544 SET_H_PSR_ESR ((newval >> 11) & 1);
545 SET_H_PSR_EF ((newval >> 8) & 1);
546 SET_H_PSR_EM ((newval >> 7) & 1);
547 SET_H_PSR_PIL ((newval >> 3) & 0xf);
548 SET_H_PSR_PS ((newval >> 1) & 1);
549 SET_H_PSR_ET ((newval ) & 1);
550 }
551
552 void
553 frvbf_h_psr_s_set_handler (SIM_CPU *current_cpu, BI newval)
554 {
555 /* If switching from user to supervisor mode, or vice-versa, then switch
556 the supervisor/user context. */
557 int psr_s = GET_H_PSR_S ();
558 if (psr_s != (newval & 1))
559 {
560 frvbf_switch_supervisor_user_context (current_cpu);
561 CPU (h_psr_s) = newval & 1;
562 }
563 }
564 \f
565 /* Cover fns to access the ccr bits. */
566 USI
567 spr_ccr_get_handler (SIM_CPU *current_cpu)
568 {
569 int ccr = ((GET_H_ICCR (H_ICCR_ICC3) & 0xf) << 28) |
570 ((GET_H_ICCR (H_ICCR_ICC2) & 0xf) << 24) |
571 ((GET_H_ICCR (H_ICCR_ICC1) & 0xf) << 20) |
572 ((GET_H_ICCR (H_ICCR_ICC0) & 0xf) << 16) |
573 ((GET_H_FCCR (H_FCCR_FCC3) & 0xf) << 12) |
574 ((GET_H_FCCR (H_FCCR_FCC2) & 0xf) << 8) |
575 ((GET_H_FCCR (H_FCCR_FCC1) & 0xf) << 4) |
576 ((GET_H_FCCR (H_FCCR_FCC0) & 0xf) );
577
578 return ccr;
579 }
580
581 void
582 spr_ccr_set_handler (SIM_CPU *current_cpu, USI newval)
583 {
584 int ccr = newval;
585
586 SET_H_ICCR (H_ICCR_ICC3, (newval >> 28) & 0xf);
587 SET_H_ICCR (H_ICCR_ICC2, (newval >> 24) & 0xf);
588 SET_H_ICCR (H_ICCR_ICC1, (newval >> 20) & 0xf);
589 SET_H_ICCR (H_ICCR_ICC0, (newval >> 16) & 0xf);
590 SET_H_FCCR (H_FCCR_FCC3, (newval >> 12) & 0xf);
591 SET_H_FCCR (H_FCCR_FCC2, (newval >> 8) & 0xf);
592 SET_H_FCCR (H_FCCR_FCC1, (newval >> 4) & 0xf);
593 SET_H_FCCR (H_FCCR_FCC0, (newval ) & 0xf);
594 }
595 \f
596 QI
597 frvbf_set_icc_for_shift_right (
598 SIM_CPU *current_cpu, SI value, SI shift, QI icc
599 )
600 {
601 /* Set the C flag of the given icc to the logical OR of the bits shifted
602 out. */
603 int mask = (1 << shift) - 1;
604 if ((value & mask) != 0)
605 return icc | 0x1;
606
607 return icc & 0xe;
608 }
609
610 QI
611 frvbf_set_icc_for_shift_left (
612 SIM_CPU *current_cpu, SI value, SI shift, QI icc
613 )
614 {
615 /* Set the V flag of the given icc to the logical OR of the bits shifted
616 out. */
617 int mask = ((1 << shift) - 1) << (32 - shift);
618 if ((value & mask) != 0)
619 return icc | 0x2;
620
621 return icc & 0xd;
622 }
623 \f
624 /* Cover fns to access the cccr bits. */
625 USI
626 spr_cccr_get_handler (SIM_CPU *current_cpu)
627 {
628 int cccr = ((GET_H_CCCR (H_CCCR_CC7) & 0x3) << 14) |
629 ((GET_H_CCCR (H_CCCR_CC6) & 0x3) << 12) |
630 ((GET_H_CCCR (H_CCCR_CC5) & 0x3) << 10) |
631 ((GET_H_CCCR (H_CCCR_CC4) & 0x3) << 8) |
632 ((GET_H_CCCR (H_CCCR_CC3) & 0x3) << 6) |
633 ((GET_H_CCCR (H_CCCR_CC2) & 0x3) << 4) |
634 ((GET_H_CCCR (H_CCCR_CC1) & 0x3) << 2) |
635 ((GET_H_CCCR (H_CCCR_CC0) & 0x3) );
636
637 return cccr;
638 }
639
640 void
641 spr_cccr_set_handler (SIM_CPU *current_cpu, USI newval)
642 {
643 int cccr = newval;
644
645 SET_H_CCCR (H_CCCR_CC7, (newval >> 14) & 0x3);
646 SET_H_CCCR (H_CCCR_CC6, (newval >> 12) & 0x3);
647 SET_H_CCCR (H_CCCR_CC5, (newval >> 10) & 0x3);
648 SET_H_CCCR (H_CCCR_CC4, (newval >> 8) & 0x3);
649 SET_H_CCCR (H_CCCR_CC3, (newval >> 6) & 0x3);
650 SET_H_CCCR (H_CCCR_CC2, (newval >> 4) & 0x3);
651 SET_H_CCCR (H_CCCR_CC1, (newval >> 2) & 0x3);
652 SET_H_CCCR (H_CCCR_CC0, (newval ) & 0x3);
653 }
654 \f
655 /* Cover fns to access the sr bits. */
656 USI
657 spr_sr_get_handler (SIM_CPU *current_cpu, UINT spr)
658 {
659 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
660 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
661 int psr_esr = GET_H_PSR_ESR ();
662 if (! psr_esr)
663 return GET_H_GR (4 + (spr - H_SPR_SR0));
664
665 return CPU (h_spr[spr]);
666 }
667
668 void
669 spr_sr_set_handler (SIM_CPU *current_cpu, UINT spr, USI newval)
670 {
671 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
672 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
673 int psr_esr = GET_H_PSR_ESR ();
674 if (! psr_esr)
675 SET_H_GR (4 + (spr - H_SPR_SR0), newval);
676 else
677 CPU (h_spr[spr]) = newval;
678 }
679 \f
680 /* Switch SR0-SR4 with GR4-GR7 if PSR.ESR is set. */
681 void
682 frvbf_switch_supervisor_user_context (SIM_CPU *current_cpu)
683 {
684 if (GET_H_PSR_ESR ())
685 {
686 /* We need to be in supervisor mode to swap the registers. Access the
687 PSR.S directly in order to avoid recursive context switches. */
688 int i;
689 int save_psr_s = CPU (h_psr_s);
690 CPU (h_psr_s) = 1;
691 for (i = 0; i < 4; ++i)
692 {
693 int gr = i + 4;
694 int spr = i + H_SPR_SR0;
695 SI tmp = GET_H_SPR (spr);
696 SET_H_SPR (spr, GET_H_GR (gr));
697 SET_H_GR (gr, tmp);
698 }
699 CPU (h_psr_s) = save_psr_s;
700 }
701 }
702 \f
703 /* Handle load/store of quad registers. */
704 void
705 frvbf_load_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
706 {
707 int i;
708 SI value[4];
709
710 /* Check memory alignment */
711 address = check_memory_alignment (current_cpu, address, 0xf);
712
713 /* If we need to count cycles, then the cache operation will be
714 initiated from the model profiling functions.
715 See frvbf_model_.... */
716 if (model_insn)
717 {
718 CPU_LOAD_ADDRESS (current_cpu) = address;
719 CPU_LOAD_LENGTH (current_cpu) = 16;
720 }
721 else
722 {
723 for (i = 0; i < 4; ++i)
724 {
725 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
726 address += 4;
727 }
728 sim_queue_fn_xi_write (current_cpu, frvbf_h_gr_quad_set_handler, targ_ix,
729 value);
730 }
731 }
732
733 void
734 frvbf_store_quad_GR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
735 {
736 int i;
737 SI value[4];
738 USI hsr0;
739
740 /* Check register and memory alignment. */
741 src_ix = check_register_alignment (current_cpu, src_ix, 3);
742 address = check_memory_alignment (current_cpu, address, 0xf);
743
744 for (i = 0; i < 4; ++i)
745 {
746 /* GR0 is always 0. */
747 if (src_ix == 0)
748 value[i] = 0;
749 else
750 value[i] = GET_H_GR (src_ix + i);
751 }
752 hsr0 = GET_HSR0 ();
753 if (GET_HSR0_DCE (hsr0))
754 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
755 else
756 sim_queue_mem_xi_write (current_cpu, address, value);
757 }
758
759 void
760 frvbf_load_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
761 {
762 int i;
763 SI value[4];
764
765 /* Check memory alignment */
766 address = check_memory_alignment (current_cpu, address, 0xf);
767
768 /* If we need to count cycles, then the cache operation will be
769 initiated from the model profiling functions.
770 See frvbf_model_.... */
771 if (model_insn)
772 {
773 CPU_LOAD_ADDRESS (current_cpu) = address;
774 CPU_LOAD_LENGTH (current_cpu) = 16;
775 }
776 else
777 {
778 for (i = 0; i < 4; ++i)
779 {
780 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
781 address += 4;
782 }
783 sim_queue_fn_xi_write (current_cpu, frvbf_h_fr_quad_set_handler, targ_ix,
784 value);
785 }
786 }
787
788 void
789 frvbf_store_quad_FRint (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
790 {
791 int i;
792 SI value[4];
793 USI hsr0;
794
795 /* Check register and memory alignment. */
796 src_ix = check_fr_register_alignment (current_cpu, src_ix, 3);
797 address = check_memory_alignment (current_cpu, address, 0xf);
798
799 for (i = 0; i < 4; ++i)
800 value[i] = GET_H_FR (src_ix + i);
801
802 hsr0 = GET_HSR0 ();
803 if (GET_HSR0_DCE (hsr0))
804 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
805 else
806 sim_queue_mem_xi_write (current_cpu, address, value);
807 }
808
809 void
810 frvbf_load_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI targ_ix)
811 {
812 int i;
813 SI value[4];
814
815 /* Check memory alignment */
816 address = check_memory_alignment (current_cpu, address, 0xf);
817
818 /* If we need to count cycles, then the cache operation will be
819 initiated from the model profiling functions.
820 See frvbf_model_.... */
821 if (model_insn)
822 {
823 CPU_LOAD_ADDRESS (current_cpu) = address;
824 CPU_LOAD_LENGTH (current_cpu) = 16;
825 }
826 else
827 {
828 for (i = 0; i < 4; ++i)
829 {
830 value[i] = frvbf_read_mem_SI (current_cpu, pc, address);
831 address += 4;
832 }
833 sim_queue_fn_xi_write (current_cpu, frvbf_h_cpr_quad_set_handler, targ_ix,
834 value);
835 }
836 }
837
838 void
839 frvbf_store_quad_CPR (SIM_CPU *current_cpu, PCADDR pc, SI address, SI src_ix)
840 {
841 int i;
842 SI value[4];
843 USI hsr0;
844
845 /* Check register and memory alignment. */
846 src_ix = check_register_alignment (current_cpu, src_ix, 3);
847 address = check_memory_alignment (current_cpu, address, 0xf);
848
849 for (i = 0; i < 4; ++i)
850 value[i] = GET_H_CPR (src_ix + i);
851
852 hsr0 = GET_HSR0 ();
853 if (GET_HSR0_DCE (hsr0))
854 sim_queue_fn_mem_xi_write (current_cpu, frvbf_mem_set_XI, address, value);
855 else
856 sim_queue_mem_xi_write (current_cpu, address, value);
857 }
858 \f
859 void
860 frvbf_signed_integer_divide (
861 SIM_CPU *current_cpu, SI arg1, SI arg2, int target_index, int non_excepting
862 )
863 {
864 enum frv_dtt dtt = FRV_DTT_NO_EXCEPTION;
865 if (arg1 == 0x80000000 && arg2 == -1)
866 {
867 /* 0x80000000/(-1) must result in 0x7fffffff when ISR.EDE is set
868 otherwise it may result in 0x7fffffff (sparc compatibility) or
869 0x80000000 (C language compatibility). */
870 USI isr;
871 dtt = FRV_DTT_OVERFLOW;
872
873 isr = GET_ISR ();
874 if (GET_ISR_EDE (isr))
875 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
876 0x7fffffff);
877 else
878 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
879 0x80000000);
880 frvbf_force_update (current_cpu); /* Force update of target register. */
881 }
882 else if (arg2 == 0)
883 dtt = FRV_DTT_DIVISION_BY_ZERO;
884 else
885 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
886 arg1 / arg2);
887
888 /* Check for exceptions. */
889 if (dtt != FRV_DTT_NO_EXCEPTION)
890 dtt = frvbf_division_exception (current_cpu, dtt, target_index,
891 non_excepting);
892 if (non_excepting && dtt == FRV_DTT_NO_EXCEPTION)
893 {
894 /* Non excepting instruction. Clear the NE flag for the target
895 register. */
896 SI NE_flags[2];
897 GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
898 CLEAR_NE_FLAG (NE_flags, target_index);
899 SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
900 }
901 }
902
903 void
904 frvbf_unsigned_integer_divide (
905 SIM_CPU *current_cpu, USI arg1, USI arg2, int target_index, int non_excepting
906 )
907 {
908 if (arg2 == 0)
909 frvbf_division_exception (current_cpu, FRV_DTT_DIVISION_BY_ZERO,
910 target_index, non_excepting);
911 else
912 {
913 sim_queue_fn_si_write (current_cpu, frvbf_h_gr_set, target_index,
914 arg1 / arg2);
915 if (non_excepting)
916 {
917 /* Non excepting instruction. Clear the NE flag for the target
918 register. */
919 SI NE_flags[2];
920 GET_NE_FLAGS (NE_flags, H_SPR_GNER0);
921 CLEAR_NE_FLAG (NE_flags, target_index);
922 SET_NE_FLAGS (H_SPR_GNER0, NE_flags);
923 }
924 }
925 }
926 \f
927 /* Clear accumulators. */
928 void
929 frvbf_clear_accumulators (SIM_CPU *current_cpu, SI acc_ix, int A)
930 {
931 SIM_DESC sd = CPU_STATE (current_cpu);
932 int acc_num =
933 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr500) ? 8 :
934 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550) ? 8 :
935 (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr400) ? 4 :
936 63;
937 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
938
939 ps->mclracc_acc = acc_ix;
940 ps->mclracc_A = A;
941 if (A == 0 || acc_ix != 0) /* Clear 1 accumuator? */
942 {
943 /* This instruction is a nop if the referenced accumulator is not
944 implemented. */
945 if (acc_ix < acc_num)
946 sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, acc_ix, 0);
947 }
948 else
949 {
950 /* Clear all implemented accumulators. */
951 int i;
952 for (i = 0; i < acc_num; ++i)
953 sim_queue_fn_di_write (current_cpu, frvbf_h_acc40S_set, i, 0);
954 }
955 }
956 \f
957 /* Functions to aid insn semantics. */
958
959 /* Compute the result of the SCAN and SCANI insns after the shift and xor. */
960 SI
961 frvbf_scan_result (SIM_CPU *current_cpu, SI value)
962 {
963 SI i;
964 SI mask;
965
966 if (value == 0)
967 return 63;
968
969 /* Find the position of the first non-zero bit.
970 The loop will terminate since there is guaranteed to be at least one
971 non-zero bit. */
972 mask = 1 << (sizeof (mask) * 8 - 1);
973 for (i = 0; (value & mask) == 0; ++i)
974 value <<= 1;
975
976 return i;
977 }
978
979 /* Compute the result of the cut insns. */
980 SI
981 frvbf_cut (SIM_CPU *current_cpu, SI reg1, SI reg2, SI cut_point)
982 {
983 SI result;
984 if (cut_point < 32)
985 {
986 result = reg1 << cut_point;
987 result |= (reg2 >> (32 - cut_point)) & ((1 << cut_point) - 1);
988 }
989 else
990 result = reg2 << (cut_point - 32);
991
992 return result;
993 }
994
995 /* Compute the result of the cut insns. */
996 SI
997 frvbf_media_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
998 {
999 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1000 cut_point = cut_point << 26 >> 26;
1001
1002 /* The cut_point is relative to bit 40 of 64 bits. */
1003 if (cut_point >= 0)
1004 return (acc << (cut_point + 24)) >> 32;
1005
1006 /* Extend the sign bit (bit 40) for negative cuts. */
1007 if (cut_point == -32)
1008 return (acc << 24) >> 63; /* Special case for full shiftout. */
1009
1010 return (acc << 24) >> (32 + -cut_point);
1011 }
1012
1013 /* Compute the result of the cut insns. */
1014 SI
1015 frvbf_media_cut_ss (SIM_CPU *current_cpu, DI acc, SI cut_point)
1016 {
1017 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1018 cut_point = cut_point << 26 >> 26;
1019
1020 if (cut_point >= 0)
1021 {
1022 /* The cut_point is relative to bit 40 of 64 bits. */
1023 DI shifted = acc << (cut_point + 24);
1024 DI unshifted = shifted >> (cut_point + 24);
1025
1026 /* The result will be saturated if significant bits are shifted out. */
1027 if (unshifted != acc)
1028 {
1029 if (acc < 0)
1030 return 0x80000000;
1031 return 0x7fffffff;
1032 }
1033 }
1034
1035 /* The result will not be saturated, so use the code for the normal cut. */
1036 return frvbf_media_cut (current_cpu, acc, cut_point);
1037 }
1038
1039 /* Compute the result of int accumulator cut (SCUTSS). */
1040 SI
1041 frvbf_iacc_cut (SIM_CPU *current_cpu, DI acc, SI cut_point)
1042 {
1043 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1044 cut_point = cut_point << 25 >> 25;
1045
1046 if (cut_point <= -32)
1047 cut_point = -31; /* Special case for full shiftout. */
1048
1049 /* Negative cuts (cannot saturate). */
1050 if (cut_point < 0)
1051 return acc >> (32 + -cut_point);
1052
1053 /* Positive cuts will saturate if significant bits are shifted out. */
1054 if (acc != ((acc << cut_point) >> cut_point))
1055 if (acc >= 0)
1056 return 0x7fffffff;
1057 else
1058 return 0x80000000;
1059
1060 /* No saturate, just cut. */
1061 return ((acc << cut_point) >> 32);
1062 }
1063
1064 /* Compute the result of shift-left-arithmetic-with-saturation (SLASS). */
1065 SI
1066 frvbf_shift_left_arith_saturate (SIM_CPU *current_cpu, SI arg1, SI arg2)
1067 {
1068 int neg_arg1;
1069
1070 /* FIXME: what to do with negative shift amt? */
1071 if (arg2 <= 0)
1072 return arg1;
1073
1074 if (arg1 == 0)
1075 return 0;
1076
1077 /* Signed shift by 31 or greater saturates by definition. */
1078 if (arg2 >= 31)
1079 if (arg1 > 0)
1080 return (SI) 0x7fffffff;
1081 else
1082 return (SI) 0x80000000;
1083
1084 /* OK, arg2 is between 1 and 31. */
1085 neg_arg1 = (arg1 < 0);
1086 do {
1087 arg1 <<= 1;
1088 /* Check for sign bit change (saturation). */
1089 if (neg_arg1 && (arg1 >= 0))
1090 return (SI) 0x80000000;
1091 else if (!neg_arg1 && (arg1 < 0))
1092 return (SI) 0x7fffffff;
1093 } while (--arg2 > 0);
1094
1095 return arg1;
1096 }
1097
1098 /* Simulate the media custom insns. */
1099 void
1100 frvbf_media_cop (SIM_CPU *current_cpu, int cop_num)
1101 {
1102 /* The semantics of the insn are a nop, since it is implementation defined.
1103 We do need to check whether it's implemented and set up for MTRAP
1104 if it's not. */
1105 USI msr0 = GET_MSR (0);
1106 if (GET_MSR_EMCI (msr0) == 0)
1107 {
1108 /* no interrupt queued at this time. */
1109 frv_set_mp_exception_registers (current_cpu, MTT_UNIMPLEMENTED_MPOP, 0);
1110 }
1111 }
1112
1113 /* Simulate the media average (MAVEH) insn. */
1114 static HI
1115 do_media_average (SIM_CPU *current_cpu, HI arg1, HI arg2)
1116 {
1117 SIM_DESC sd = CPU_STATE (current_cpu);
1118 SI sum = (arg1 + arg2);
1119 HI result = sum >> 1;
1120 int rounding_value;
1121
1122 /* On fr400 and fr550, check the rounding mode. On other machines rounding is always
1123 toward negative infinity and the result is already correctly rounded. */
1124 switch (STATE_ARCHITECTURE (sd)->mach)
1125 {
1126 /* Need to check rounding mode. */
1127 case bfd_mach_fr400:
1128 case bfd_mach_fr550:
1129 /* Check whether rounding will be required. Rounding will be required
1130 if the sum is an odd number. */
1131 rounding_value = sum & 1;
1132 if (rounding_value)
1133 {
1134 USI msr0 = GET_MSR (0);
1135 /* Check MSR0.SRDAV to determine which bits control the rounding. */
1136 if (GET_MSR_SRDAV (msr0))
1137 {
1138 /* MSR0.RD controls rounding. */
1139 switch (GET_MSR_RD (msr0))
1140 {
1141 case 0:
1142 /* Round to nearest. */
1143 if (result >= 0)
1144 ++result;
1145 break;
1146 case 1:
1147 /* Round toward 0. */
1148 if (result < 0)
1149 ++result;
1150 break;
1151 case 2:
1152 /* Round toward positive infinity. */
1153 ++result;
1154 break;
1155 case 3:
1156 /* Round toward negative infinity. The result is already
1157 correctly rounded. */
1158 break;
1159 default:
1160 abort ();
1161 break;
1162 }
1163 }
1164 else
1165 {
1166 /* MSR0.RDAV controls rounding. If set, round toward positive
1167 infinity. Otherwise the result is already rounded correctly
1168 toward negative infinity. */
1169 if (GET_MSR_RDAV (msr0))
1170 ++result;
1171 }
1172 }
1173 break;
1174 default:
1175 break;
1176 }
1177
1178 return result;
1179 }
1180
1181 SI
1182 frvbf_media_average (SIM_CPU *current_cpu, SI reg1, SI reg2)
1183 {
1184 SI result;
1185 result = do_media_average (current_cpu, reg1 & 0xffff, reg2 & 0xffff);
1186 result &= 0xffff;
1187 result |= do_media_average (current_cpu, (reg1 >> 16) & 0xffff,
1188 (reg2 >> 16) & 0xffff) << 16;
1189 return result;
1190 }
1191
1192 /* Maintain a flag in order to know when to write the address of the next
1193 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL. */
1194 void
1195 frvbf_set_write_next_vliw_addr_to_LR (SIM_CPU *current_cpu, int value)
1196 {
1197 frvbf_write_next_vliw_addr_to_LR = value;
1198 }
1199
1200 void
1201 frvbf_set_ne_index (SIM_CPU *current_cpu, int index)
1202 {
1203 USI NE_flags[2];
1204
1205 /* Save the target register so interrupt processing can set its NE flag
1206 in the event of an exception. */
1207 frv_interrupt_state.ne_index = index;
1208
1209 /* Clear the NE flag of the target register. It will be reset if necessary
1210 in the event of an exception. */
1211 GET_NE_FLAGS (NE_flags, H_SPR_FNER0);
1212 CLEAR_NE_FLAG (NE_flags, index);
1213 SET_NE_FLAGS (H_SPR_FNER0, NE_flags);
1214 }
1215
1216 void
1217 frvbf_force_update (SIM_CPU *current_cpu)
1218 {
1219 CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
1220 int ix = CGEN_WRITE_QUEUE_INDEX (q);
1221 if (ix > 0)
1222 {
1223 CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, ix - 1);
1224 item->flags |= FRV_WRITE_QUEUE_FORCE_WRITE;
1225 }
1226 }
1227 \f
1228 /* Condition code logic. */
1229 enum cr_ops {
1230 andcr, orcr, xorcr, nandcr, norcr, andncr, orncr, nandncr, norncr,
1231 num_cr_ops
1232 };
1233
1234 enum cr_result {cr_undefined, cr_undefined1, cr_false, cr_true};
1235
1236 static enum cr_result
1237 cr_logic[num_cr_ops][4][4] = {
1238 /* andcr */
1239 {
1240 /* undefined undefined false true */
1241 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1242 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1243 /* false */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1244 /* true */ {cr_undefined, cr_undefined, cr_false, cr_true }
1245 },
1246 /* orcr */
1247 {
1248 /* undefined undefined false true */
1249 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1250 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1251 /* false */ {cr_false, cr_false, cr_false, cr_true },
1252 /* true */ {cr_true, cr_true, cr_true, cr_true }
1253 },
1254 /* xorcr */
1255 {
1256 /* undefined undefined false true */
1257 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1258 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1259 /* false */ {cr_undefined, cr_undefined, cr_false, cr_true },
1260 /* true */ {cr_true, cr_true, cr_true, cr_false }
1261 },
1262 /* nandcr */
1263 {
1264 /* undefined undefined false true */
1265 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1266 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1267 /* false */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1268 /* true */ {cr_undefined, cr_undefined, cr_true, cr_false }
1269 },
1270 /* norcr */
1271 {
1272 /* undefined undefined false true */
1273 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1274 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1275 /* false */ {cr_true, cr_true, cr_true, cr_false },
1276 /* true */ {cr_false, cr_false, cr_false, cr_false }
1277 },
1278 /* andncr */
1279 {
1280 /* undefined undefined false true */
1281 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1282 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1283 /* false */ {cr_undefined, cr_undefined, cr_false, cr_true },
1284 /* true */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
1285 },
1286 /* orncr */
1287 {
1288 /* undefined undefined false true */
1289 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1290 /* undefined */ {cr_undefined, cr_undefined, cr_false, cr_true },
1291 /* false */ {cr_true, cr_true, cr_true, cr_true },
1292 /* true */ {cr_false, cr_false, cr_false, cr_true }
1293 },
1294 /* nandncr */
1295 {
1296 /* undefined undefined false true */
1297 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1298 /* undefined */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined},
1299 /* false */ {cr_undefined, cr_undefined, cr_true, cr_false },
1300 /* true */ {cr_undefined, cr_undefined, cr_undefined, cr_undefined}
1301 },
1302 /* norncr */
1303 {
1304 /* undefined undefined false true */
1305 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1306 /* undefined */ {cr_undefined, cr_undefined, cr_true, cr_false },
1307 /* false */ {cr_false, cr_false, cr_false, cr_false },
1308 /* true */ {cr_true, cr_true, cr_true, cr_false }
1309 }
1310 };
1311
1312 UQI
1313 frvbf_cr_logic (SIM_CPU *current_cpu, SI operation, UQI arg1, UQI arg2)
1314 {
1315 return cr_logic[operation][arg1][arg2];
1316 }
1317 \f
1318 /* Cache Manipulation. */
1319 void
1320 frvbf_insn_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
1321 {
1322 /* If we need to count cycles, then the cache operation will be
1323 initiated from the model profiling functions.
1324 See frvbf_model_.... */
1325 int hsr0 = GET_HSR0 ();
1326 if (GET_HSR0_ICE (hsr0))
1327 {
1328 if (model_insn)
1329 {
1330 CPU_LOAD_ADDRESS (current_cpu) = address;
1331 CPU_LOAD_LENGTH (current_cpu) = length;
1332 CPU_LOAD_LOCK (current_cpu) = lock;
1333 }
1334 else
1335 {
1336 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1337 frv_cache_preload (cache, address, length, lock);
1338 }
1339 }
1340 }
1341
1342 void
1343 frvbf_data_cache_preload (SIM_CPU *current_cpu, SI address, USI length, int lock)
1344 {
1345 /* If we need to count cycles, then the cache operation will be
1346 initiated from the model profiling functions.
1347 See frvbf_model_.... */
1348 int hsr0 = GET_HSR0 ();
1349 if (GET_HSR0_DCE (hsr0))
1350 {
1351 if (model_insn)
1352 {
1353 CPU_LOAD_ADDRESS (current_cpu) = address;
1354 CPU_LOAD_LENGTH (current_cpu) = length;
1355 CPU_LOAD_LOCK (current_cpu) = lock;
1356 }
1357 else
1358 {
1359 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1360 frv_cache_preload (cache, address, length, lock);
1361 }
1362 }
1363 }
1364
1365 void
1366 frvbf_insn_cache_unlock (SIM_CPU *current_cpu, SI address)
1367 {
1368 /* If we need to count cycles, then the cache operation will be
1369 initiated from the model profiling functions.
1370 See frvbf_model_.... */
1371 int hsr0 = GET_HSR0 ();
1372 if (GET_HSR0_ICE (hsr0))
1373 {
1374 if (model_insn)
1375 CPU_LOAD_ADDRESS (current_cpu) = address;
1376 else
1377 {
1378 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1379 frv_cache_unlock (cache, address);
1380 }
1381 }
1382 }
1383
1384 void
1385 frvbf_data_cache_unlock (SIM_CPU *current_cpu, SI address)
1386 {
1387 /* If we need to count cycles, then the cache operation will be
1388 initiated from the model profiling functions.
1389 See frvbf_model_.... */
1390 int hsr0 = GET_HSR0 ();
1391 if (GET_HSR0_DCE (hsr0))
1392 {
1393 if (model_insn)
1394 CPU_LOAD_ADDRESS (current_cpu) = address;
1395 else
1396 {
1397 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1398 frv_cache_unlock (cache, address);
1399 }
1400 }
1401 }
1402
1403 void
1404 frvbf_insn_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
1405 {
1406 /* Make sure the insn was specified properly. -1 will be passed for ALL
1407 for a icei with A=0. */
1408 if (all == -1)
1409 {
1410 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1411 return;
1412 }
1413
1414 /* If we need to count cycles, then the cache operation will be
1415 initiated from the model profiling functions.
1416 See frvbf_model_.... */
1417 if (model_insn)
1418 {
1419 /* Record the all-entries flag for use in profiling. */
1420 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1421 ps->all_cache_entries = all;
1422 CPU_LOAD_ADDRESS (current_cpu) = address;
1423 }
1424 else
1425 {
1426 FRV_CACHE *cache = CPU_INSN_CACHE (current_cpu);
1427 if (all)
1428 frv_cache_invalidate_all (cache, 0/* flush? */);
1429 else
1430 frv_cache_invalidate (cache, address, 0/* flush? */);
1431 }
1432 }
1433
1434 void
1435 frvbf_data_cache_invalidate (SIM_CPU *current_cpu, SI address, int all)
1436 {
1437 /* Make sure the insn was specified properly. -1 will be passed for ALL
1438 for a dcei with A=0. */
1439 if (all == -1)
1440 {
1441 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1442 return;
1443 }
1444
1445 /* If we need to count cycles, then the cache operation will be
1446 initiated from the model profiling functions.
1447 See frvbf_model_.... */
1448 if (model_insn)
1449 {
1450 /* Record the all-entries flag for use in profiling. */
1451 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1452 ps->all_cache_entries = all;
1453 CPU_LOAD_ADDRESS (current_cpu) = address;
1454 }
1455 else
1456 {
1457 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1458 if (all)
1459 frv_cache_invalidate_all (cache, 0/* flush? */);
1460 else
1461 frv_cache_invalidate (cache, address, 0/* flush? */);
1462 }
1463 }
1464
1465 void
1466 frvbf_data_cache_flush (SIM_CPU *current_cpu, SI address, int all)
1467 {
1468 /* Make sure the insn was specified properly. -1 will be passed for ALL
1469 for a dcef with A=0. */
1470 if (all == -1)
1471 {
1472 frv_queue_program_interrupt (current_cpu, FRV_ILLEGAL_INSTRUCTION);
1473 return;
1474 }
1475
1476 /* If we need to count cycles, then the cache operation will be
1477 initiated from the model profiling functions.
1478 See frvbf_model_.... */
1479 if (model_insn)
1480 {
1481 /* Record the all-entries flag for use in profiling. */
1482 FRV_PROFILE_STATE *ps = CPU_PROFILE_STATE (current_cpu);
1483 ps->all_cache_entries = all;
1484 CPU_LOAD_ADDRESS (current_cpu) = address;
1485 }
1486 else
1487 {
1488 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
1489 if (all)
1490 frv_cache_invalidate_all (cache, 1/* flush? */);
1491 else
1492 frv_cache_invalidate (cache, address, 1/* flush? */);
1493 }
1494 }
This page took 0.128902 seconds and 4 git commands to generate.