1 /* frv simulator support code
2 Copyright (C) 1998, 1999, 2000, 2001, 2003, 2004 Free Software
4 Contributed by Red Hat.
6 This file is part of the GNU simulators.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License along
19 with this program; if not, write to the Free Software Foundation, Inc.,
20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
23 #define WANT_CPU_FRVBF
28 #include "cgen-engine.h"
31 #include "gdb/sim-frv.h"
34 /* Maintain a flag in order to know when to write the address of the next
35 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL
37 int frvbf_write_next_vliw_addr_to_LR
;
39 /* The contents of BUF are in target byte order. */
41 frvbf_fetch_register (SIM_CPU
*current_cpu
, int rn
, unsigned char *buf
, int len
)
43 if (SIM_FRV_GR0_REGNUM
<= rn
&& rn
<= SIM_FRV_GR63_REGNUM
)
45 int hi_available
, lo_available
;
46 int grn
= rn
- SIM_FRV_GR0_REGNUM
;
48 frv_gr_registers_available (current_cpu
, &hi_available
, &lo_available
);
50 if ((grn
< 32 && !lo_available
) || (grn
>= 32 && !hi_available
))
53 SETTSI (buf
, GET_H_GR (grn
));
55 else if (SIM_FRV_FR0_REGNUM
<= rn
&& rn
<= SIM_FRV_FR63_REGNUM
)
57 int hi_available
, lo_available
;
58 int frn
= rn
- SIM_FRV_FR0_REGNUM
;
60 frv_fr_registers_available (current_cpu
, &hi_available
, &lo_available
);
62 if ((frn
< 32 && !lo_available
) || (frn
>= 32 && !hi_available
))
65 SETTSI (buf
, GET_H_FR (frn
));
67 else if (rn
== SIM_FRV_PC_REGNUM
)
68 SETTSI (buf
, GET_H_PC ());
69 else if (SIM_FRV_SPR0_REGNUM
<= rn
&& rn
<= SIM_FRV_SPR4095_REGNUM
)
71 /* Make sure the register is implemented. */
72 FRV_REGISTER_CONTROL
*control
= CPU_REGISTER_CONTROL (current_cpu
);
73 int spr
= rn
- SIM_FRV_SPR0_REGNUM
;
74 if (! control
->spr
[spr
].implemented
)
76 SETTSI (buf
, GET_H_SPR (spr
));
80 SETTSI (buf
, 0xdeadbeef);
87 /* The contents of BUF are in target byte order. */
90 frvbf_store_register (SIM_CPU
*current_cpu
, int rn
, unsigned char *buf
, int len
)
92 if (SIM_FRV_GR0_REGNUM
<= rn
&& rn
<= SIM_FRV_GR63_REGNUM
)
94 int hi_available
, lo_available
;
95 int grn
= rn
- SIM_FRV_GR0_REGNUM
;
97 frv_gr_registers_available (current_cpu
, &hi_available
, &lo_available
);
99 if ((grn
< 32 && !lo_available
) || (grn
>= 32 && !hi_available
))
102 SET_H_GR (grn
, GETTSI (buf
));
104 else if (SIM_FRV_FR0_REGNUM
<= rn
&& rn
<= SIM_FRV_FR63_REGNUM
)
106 int hi_available
, lo_available
;
107 int frn
= rn
- SIM_FRV_FR0_REGNUM
;
109 frv_fr_registers_available (current_cpu
, &hi_available
, &lo_available
);
111 if ((frn
< 32 && !lo_available
) || (frn
>= 32 && !hi_available
))
114 SET_H_FR (frn
, GETTSI (buf
));
116 else if (rn
== SIM_FRV_PC_REGNUM
)
117 SET_H_PC (GETTSI (buf
));
118 else if (SIM_FRV_SPR0_REGNUM
<= rn
&& rn
<= SIM_FRV_SPR4095_REGNUM
)
120 /* Make sure the register is implemented. */
121 FRV_REGISTER_CONTROL
*control
= CPU_REGISTER_CONTROL (current_cpu
);
122 int spr
= rn
- SIM_FRV_SPR0_REGNUM
;
123 if (! control
->spr
[spr
].implemented
)
125 SET_H_SPR (spr
, GETTSI (buf
));
133 /* Cover fns to access the general registers. */
135 frvbf_h_gr_get_handler (SIM_CPU
*current_cpu
, UINT gr
)
137 frv_check_gr_access (current_cpu
, gr
);
138 return CPU (h_gr
[gr
]);
142 frvbf_h_gr_set_handler (SIM_CPU
*current_cpu
, UINT gr
, USI newval
)
144 frv_check_gr_access (current_cpu
, gr
);
147 return; /* Storing into gr0 has no effect. */
149 CPU (h_gr
[gr
]) = newval
;
152 /* Cover fns to access the floating point registers. */
154 frvbf_h_fr_get_handler (SIM_CPU
*current_cpu
, UINT fr
)
156 frv_check_fr_access (current_cpu
, fr
);
157 return CPU (h_fr
[fr
]);
161 frvbf_h_fr_set_handler (SIM_CPU
*current_cpu
, UINT fr
, SF newval
)
163 frv_check_fr_access (current_cpu
, fr
);
164 CPU (h_fr
[fr
]) = newval
;
167 /* Cover fns to access the general registers as double words. */
169 check_register_alignment (SIM_CPU
*current_cpu
, UINT reg
, int align_mask
)
171 if (reg
& align_mask
)
173 SIM_DESC sd
= CPU_STATE (current_cpu
);
174 switch (STATE_ARCHITECTURE (sd
)->mach
)
178 frv_queue_program_interrupt (current_cpu
, FRV_ILLEGAL_INSTRUCTION
);
180 case bfd_mach_frvtomcat
:
183 frv_queue_register_exception_interrupt (current_cpu
,
197 check_fr_register_alignment (SIM_CPU
*current_cpu
, UINT reg
, int align_mask
)
199 if (reg
& align_mask
)
201 SIM_DESC sd
= CPU_STATE (current_cpu
);
202 switch (STATE_ARCHITECTURE (sd
)->mach
)
206 frv_queue_program_interrupt (current_cpu
, FRV_ILLEGAL_INSTRUCTION
);
208 case bfd_mach_frvtomcat
:
212 struct frv_fp_exception_info fp_info
= {
213 FSR_NO_EXCEPTION
, FTT_INVALID_FR
215 frv_queue_fp_exception_interrupt (current_cpu
, & fp_info
);
229 check_memory_alignment (SIM_CPU
*current_cpu
, SI address
, int align_mask
)
231 if (address
& align_mask
)
233 SIM_DESC sd
= CPU_STATE (current_cpu
);
234 switch (STATE_ARCHITECTURE (sd
)->mach
)
237 frv_queue_data_access_error_interrupt (current_cpu
, address
);
239 case bfd_mach_frvtomcat
:
242 frv_queue_mem_address_not_aligned_interrupt (current_cpu
, address
);
248 address
&= ~align_mask
;
255 frvbf_h_gr_double_get_handler (SIM_CPU
*current_cpu
, UINT gr
)
260 return 0; /* gr0 is always 0. */
262 /* Check the register alignment. */
263 gr
= check_register_alignment (current_cpu
, gr
, 1);
265 value
= GET_H_GR (gr
);
267 value
|= (USI
) GET_H_GR (gr
+ 1);
272 frvbf_h_gr_double_set_handler (SIM_CPU
*current_cpu
, UINT gr
, DI newval
)
275 return; /* Storing into gr0 has no effect. */
277 /* Check the register alignment. */
278 gr
= check_register_alignment (current_cpu
, gr
, 1);
280 SET_H_GR (gr
, (newval
>> 32) & 0xffffffff);
281 SET_H_GR (gr
+ 1, (newval
) & 0xffffffff);
284 /* Cover fns to access the floating point register as double words. */
286 frvbf_h_fr_double_get_handler (SIM_CPU
*current_cpu
, UINT fr
)
293 /* Check the register alignment. */
294 fr
= check_fr_register_alignment (current_cpu
, fr
, 1);
296 if (CURRENT_HOST_BYTE_ORDER
== LITTLE_ENDIAN
)
298 value
.as_sf
[1] = GET_H_FR (fr
);
299 value
.as_sf
[0] = GET_H_FR (fr
+ 1);
303 value
.as_sf
[0] = GET_H_FR (fr
);
304 value
.as_sf
[1] = GET_H_FR (fr
+ 1);
311 frvbf_h_fr_double_set_handler (SIM_CPU
*current_cpu
, UINT fr
, DF newval
)
318 /* Check the register alignment. */
319 fr
= check_fr_register_alignment (current_cpu
, fr
, 1);
321 value
.as_df
= newval
;
322 if (CURRENT_HOST_BYTE_ORDER
== LITTLE_ENDIAN
)
324 SET_H_FR (fr
, value
.as_sf
[1]);
325 SET_H_FR (fr
+ 1, value
.as_sf
[0]);
329 SET_H_FR (fr
, value
.as_sf
[0]);
330 SET_H_FR (fr
+ 1, value
.as_sf
[1]);
334 /* Cover fns to access the floating point register as integer words. */
336 frvbf_h_fr_int_get_handler (SIM_CPU
*current_cpu
, UINT fr
)
343 value
.as_sf
= GET_H_FR (fr
);
348 frvbf_h_fr_int_set_handler (SIM_CPU
*current_cpu
, UINT fr
, USI newval
)
355 value
.as_usi
= newval
;
356 SET_H_FR (fr
, value
.as_sf
);
359 /* Cover fns to access the coprocessor registers as double words. */
361 frvbf_h_cpr_double_get_handler (SIM_CPU
*current_cpu
, UINT cpr
)
365 /* Check the register alignment. */
366 cpr
= check_register_alignment (current_cpu
, cpr
, 1);
368 value
= GET_H_CPR (cpr
);
370 value
|= (USI
) GET_H_CPR (cpr
+ 1);
375 frvbf_h_cpr_double_set_handler (SIM_CPU
*current_cpu
, UINT cpr
, DI newval
)
377 /* Check the register alignment. */
378 cpr
= check_register_alignment (current_cpu
, cpr
, 1);
380 SET_H_CPR (cpr
, (newval
>> 32) & 0xffffffff);
381 SET_H_CPR (cpr
+ 1, (newval
) & 0xffffffff);
384 /* Cover fns to write registers as quad words. */
386 frvbf_h_gr_quad_set_handler (SIM_CPU
*current_cpu
, UINT gr
, SI
*newval
)
389 return; /* Storing into gr0 has no effect. */
391 /* Check the register alignment. */
392 gr
= check_register_alignment (current_cpu
, gr
, 3);
394 SET_H_GR (gr
, newval
[0]);
395 SET_H_GR (gr
+ 1, newval
[1]);
396 SET_H_GR (gr
+ 2, newval
[2]);
397 SET_H_GR (gr
+ 3, newval
[3]);
401 frvbf_h_fr_quad_set_handler (SIM_CPU
*current_cpu
, UINT fr
, SI
*newval
)
403 /* Check the register alignment. */
404 fr
= check_fr_register_alignment (current_cpu
, fr
, 3);
406 SET_H_FR (fr
, newval
[0]);
407 SET_H_FR (fr
+ 1, newval
[1]);
408 SET_H_FR (fr
+ 2, newval
[2]);
409 SET_H_FR (fr
+ 3, newval
[3]);
413 frvbf_h_cpr_quad_set_handler (SIM_CPU
*current_cpu
, UINT cpr
, SI
*newval
)
415 /* Check the register alignment. */
416 cpr
= check_register_alignment (current_cpu
, cpr
, 3);
418 SET_H_CPR (cpr
, newval
[0]);
419 SET_H_CPR (cpr
+ 1, newval
[1]);
420 SET_H_CPR (cpr
+ 2, newval
[2]);
421 SET_H_CPR (cpr
+ 3, newval
[3]);
424 /* Cover fns to access the special purpose registers. */
426 frvbf_h_spr_get_handler (SIM_CPU
*current_cpu
, UINT spr
)
428 /* Check access restrictions. */
429 frv_check_spr_read_access (current_cpu
, spr
);
434 return spr_psr_get_handler (current_cpu
);
436 return spr_tbr_get_handler (current_cpu
);
438 return spr_bpsr_get_handler (current_cpu
);
440 return spr_ccr_get_handler (current_cpu
);
442 return spr_cccr_get_handler (current_cpu
);
447 return spr_sr_get_handler (current_cpu
, spr
);
450 return CPU (h_spr
[spr
]);
456 frvbf_h_spr_set_handler (SIM_CPU
*current_cpu
, UINT spr
, USI newval
)
458 FRV_REGISTER_CONTROL
*control
;
462 /* Check access restrictions. */
463 frv_check_spr_write_access (current_cpu
, spr
);
465 /* Only set those fields which are writeable. */
466 control
= CPU_REGISTER_CONTROL (current_cpu
);
467 mask
= control
->spr
[spr
].read_only_mask
;
468 oldval
= GET_H_SPR (spr
);
470 newval
= (newval
& ~mask
) | (oldval
& mask
);
472 /* Some registers are represented by individual components which are
473 referenced more often than the register itself. */
477 spr_psr_set_handler (current_cpu
, newval
);
480 spr_tbr_set_handler (current_cpu
, newval
);
483 spr_bpsr_set_handler (current_cpu
, newval
);
486 spr_ccr_set_handler (current_cpu
, newval
);
489 spr_cccr_set_handler (current_cpu
, newval
);
495 spr_sr_set_handler (current_cpu
, spr
, newval
);
498 frv_cache_reconfigure (current_cpu
, CPU_INSN_CACHE (current_cpu
));
501 CPU (h_spr
[spr
]) = newval
;
506 /* Cover fns to access the gr_hi and gr_lo registers. */
508 frvbf_h_gr_hi_get_handler (SIM_CPU
*current_cpu
, UINT gr
)
510 return (GET_H_GR(gr
) >> 16) & 0xffff;
514 frvbf_h_gr_hi_set_handler (SIM_CPU
*current_cpu
, UINT gr
, UHI newval
)
516 USI value
= (GET_H_GR (gr
) & 0xffff) | (newval
<< 16);
517 SET_H_GR (gr
, value
);
521 frvbf_h_gr_lo_get_handler (SIM_CPU
*current_cpu
, UINT gr
)
523 return GET_H_GR(gr
) & 0xffff;
527 frvbf_h_gr_lo_set_handler (SIM_CPU
*current_cpu
, UINT gr
, UHI newval
)
529 USI value
= (GET_H_GR (gr
) & 0xffff0000) | (newval
& 0xffff);
530 SET_H_GR (gr
, value
);
533 /* Cover fns to access the tbr bits. */
535 spr_tbr_get_handler (SIM_CPU
*current_cpu
)
537 int tbr
= ((GET_H_TBR_TBA () & 0xfffff) << 12) |
538 ((GET_H_TBR_TT () & 0xff) << 4);
544 spr_tbr_set_handler (SIM_CPU
*current_cpu
, USI newval
)
548 SET_H_TBR_TBA ((tbr
>> 12) & 0xfffff) ;
549 SET_H_TBR_TT ((tbr
>> 4) & 0xff) ;
552 /* Cover fns to access the bpsr bits. */
554 spr_bpsr_get_handler (SIM_CPU
*current_cpu
)
556 int bpsr
= ((GET_H_BPSR_BS () & 0x1) << 12) |
557 ((GET_H_BPSR_BET () & 0x1) );
563 spr_bpsr_set_handler (SIM_CPU
*current_cpu
, USI newval
)
567 SET_H_BPSR_BS ((bpsr
>> 12) & 1);
568 SET_H_BPSR_BET ((bpsr
) & 1);
571 /* Cover fns to access the psr bits. */
573 spr_psr_get_handler (SIM_CPU
*current_cpu
)
575 int psr
= ((GET_H_PSR_IMPLE () & 0xf) << 28) |
576 ((GET_H_PSR_VER () & 0xf) << 24) |
577 ((GET_H_PSR_ICE () & 0x1) << 16) |
578 ((GET_H_PSR_NEM () & 0x1) << 14) |
579 ((GET_H_PSR_CM () & 0x1) << 13) |
580 ((GET_H_PSR_BE () & 0x1) << 12) |
581 ((GET_H_PSR_ESR () & 0x1) << 11) |
582 ((GET_H_PSR_EF () & 0x1) << 8) |
583 ((GET_H_PSR_EM () & 0x1) << 7) |
584 ((GET_H_PSR_PIL () & 0xf) << 3) |
585 ((GET_H_PSR_S () & 0x1) << 2) |
586 ((GET_H_PSR_PS () & 0x1) << 1) |
587 ((GET_H_PSR_ET () & 0x1) );
593 spr_psr_set_handler (SIM_CPU
*current_cpu
, USI newval
)
595 /* The handler for PSR.S references the value of PSR.ESR, so set PSR.S
597 SET_H_PSR_S ((newval
>> 2) & 1);
599 SET_H_PSR_IMPLE ((newval
>> 28) & 0xf);
600 SET_H_PSR_VER ((newval
>> 24) & 0xf);
601 SET_H_PSR_ICE ((newval
>> 16) & 1);
602 SET_H_PSR_NEM ((newval
>> 14) & 1);
603 SET_H_PSR_CM ((newval
>> 13) & 1);
604 SET_H_PSR_BE ((newval
>> 12) & 1);
605 SET_H_PSR_ESR ((newval
>> 11) & 1);
606 SET_H_PSR_EF ((newval
>> 8) & 1);
607 SET_H_PSR_EM ((newval
>> 7) & 1);
608 SET_H_PSR_PIL ((newval
>> 3) & 0xf);
609 SET_H_PSR_PS ((newval
>> 1) & 1);
610 SET_H_PSR_ET ((newval
) & 1);
614 frvbf_h_psr_s_set_handler (SIM_CPU
*current_cpu
, BI newval
)
616 /* If switching from user to supervisor mode, or vice-versa, then switch
617 the supervisor/user context. */
618 int psr_s
= GET_H_PSR_S ();
619 if (psr_s
!= (newval
& 1))
621 frvbf_switch_supervisor_user_context (current_cpu
);
622 CPU (h_psr_s
) = newval
& 1;
626 /* Cover fns to access the ccr bits. */
628 spr_ccr_get_handler (SIM_CPU
*current_cpu
)
630 int ccr
= ((GET_H_ICCR (H_ICCR_ICC3
) & 0xf) << 28) |
631 ((GET_H_ICCR (H_ICCR_ICC2
) & 0xf) << 24) |
632 ((GET_H_ICCR (H_ICCR_ICC1
) & 0xf) << 20) |
633 ((GET_H_ICCR (H_ICCR_ICC0
) & 0xf) << 16) |
634 ((GET_H_FCCR (H_FCCR_FCC3
) & 0xf) << 12) |
635 ((GET_H_FCCR (H_FCCR_FCC2
) & 0xf) << 8) |
636 ((GET_H_FCCR (H_FCCR_FCC1
) & 0xf) << 4) |
637 ((GET_H_FCCR (H_FCCR_FCC0
) & 0xf) );
643 spr_ccr_set_handler (SIM_CPU
*current_cpu
, USI newval
)
647 SET_H_ICCR (H_ICCR_ICC3
, (newval
>> 28) & 0xf);
648 SET_H_ICCR (H_ICCR_ICC2
, (newval
>> 24) & 0xf);
649 SET_H_ICCR (H_ICCR_ICC1
, (newval
>> 20) & 0xf);
650 SET_H_ICCR (H_ICCR_ICC0
, (newval
>> 16) & 0xf);
651 SET_H_FCCR (H_FCCR_FCC3
, (newval
>> 12) & 0xf);
652 SET_H_FCCR (H_FCCR_FCC2
, (newval
>> 8) & 0xf);
653 SET_H_FCCR (H_FCCR_FCC1
, (newval
>> 4) & 0xf);
654 SET_H_FCCR (H_FCCR_FCC0
, (newval
) & 0xf);
658 frvbf_set_icc_for_shift_right (
659 SIM_CPU
*current_cpu
, SI value
, SI shift
, QI icc
662 /* Set the C flag of the given icc to the logical OR of the bits shifted
664 int mask
= (1 << shift
) - 1;
665 if ((value
& mask
) != 0)
672 frvbf_set_icc_for_shift_left (
673 SIM_CPU
*current_cpu
, SI value
, SI shift
, QI icc
676 /* Set the V flag of the given icc to the logical OR of the bits shifted
678 int mask
= ((1 << shift
) - 1) << (32 - shift
);
679 if ((value
& mask
) != 0)
685 /* Cover fns to access the cccr bits. */
687 spr_cccr_get_handler (SIM_CPU
*current_cpu
)
689 int cccr
= ((GET_H_CCCR (H_CCCR_CC7
) & 0x3) << 14) |
690 ((GET_H_CCCR (H_CCCR_CC6
) & 0x3) << 12) |
691 ((GET_H_CCCR (H_CCCR_CC5
) & 0x3) << 10) |
692 ((GET_H_CCCR (H_CCCR_CC4
) & 0x3) << 8) |
693 ((GET_H_CCCR (H_CCCR_CC3
) & 0x3) << 6) |
694 ((GET_H_CCCR (H_CCCR_CC2
) & 0x3) << 4) |
695 ((GET_H_CCCR (H_CCCR_CC1
) & 0x3) << 2) |
696 ((GET_H_CCCR (H_CCCR_CC0
) & 0x3) );
702 spr_cccr_set_handler (SIM_CPU
*current_cpu
, USI newval
)
706 SET_H_CCCR (H_CCCR_CC7
, (newval
>> 14) & 0x3);
707 SET_H_CCCR (H_CCCR_CC6
, (newval
>> 12) & 0x3);
708 SET_H_CCCR (H_CCCR_CC5
, (newval
>> 10) & 0x3);
709 SET_H_CCCR (H_CCCR_CC4
, (newval
>> 8) & 0x3);
710 SET_H_CCCR (H_CCCR_CC3
, (newval
>> 6) & 0x3);
711 SET_H_CCCR (H_CCCR_CC2
, (newval
>> 4) & 0x3);
712 SET_H_CCCR (H_CCCR_CC1
, (newval
>> 2) & 0x3);
713 SET_H_CCCR (H_CCCR_CC0
, (newval
) & 0x3);
716 /* Cover fns to access the sr bits. */
718 spr_sr_get_handler (SIM_CPU
*current_cpu
, UINT spr
)
720 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
721 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
722 int psr_esr
= GET_H_PSR_ESR ();
724 return GET_H_GR (4 + (spr
- H_SPR_SR0
));
726 return CPU (h_spr
[spr
]);
730 spr_sr_set_handler (SIM_CPU
*current_cpu
, UINT spr
, USI newval
)
732 /* If PSR.ESR is not set, then SR0-3 map onto SGR4-7 which will be GR4-7,
733 otherwise the correct mapping of USG4-7 or SGR4-7 will be in SR0-3. */
734 int psr_esr
= GET_H_PSR_ESR ();
736 SET_H_GR (4 + (spr
- H_SPR_SR0
), newval
);
738 CPU (h_spr
[spr
]) = newval
;
741 /* Switch SR0-SR4 with GR4-GR7 if PSR.ESR is set. */
743 frvbf_switch_supervisor_user_context (SIM_CPU
*current_cpu
)
745 if (GET_H_PSR_ESR ())
747 /* We need to be in supervisor mode to swap the registers. Access the
748 PSR.S directly in order to avoid recursive context switches. */
750 int save_psr_s
= CPU (h_psr_s
);
752 for (i
= 0; i
< 4; ++i
)
755 int spr
= i
+ H_SPR_SR0
;
756 SI tmp
= GET_H_SPR (spr
);
757 SET_H_SPR (spr
, GET_H_GR (gr
));
760 CPU (h_psr_s
) = save_psr_s
;
764 /* Handle load/store of quad registers. */
766 frvbf_load_quad_GR (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI targ_ix
)
771 /* Check memory alignment */
772 address
= check_memory_alignment (current_cpu
, address
, 0xf);
774 /* If we need to count cycles, then the cache operation will be
775 initiated from the model profiling functions.
776 See frvbf_model_.... */
779 CPU_LOAD_ADDRESS (current_cpu
) = address
;
780 CPU_LOAD_LENGTH (current_cpu
) = 16;
784 for (i
= 0; i
< 4; ++i
)
786 value
[i
] = frvbf_read_mem_SI (current_cpu
, pc
, address
);
789 sim_queue_fn_xi_write (current_cpu
, frvbf_h_gr_quad_set_handler
, targ_ix
,
795 frvbf_store_quad_GR (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI src_ix
)
801 /* Check register and memory alignment. */
802 src_ix
= check_register_alignment (current_cpu
, src_ix
, 3);
803 address
= check_memory_alignment (current_cpu
, address
, 0xf);
805 for (i
= 0; i
< 4; ++i
)
807 /* GR0 is always 0. */
811 value
[i
] = GET_H_GR (src_ix
+ i
);
814 if (GET_HSR0_DCE (hsr0
))
815 sim_queue_fn_mem_xi_write (current_cpu
, frvbf_mem_set_XI
, address
, value
);
817 sim_queue_mem_xi_write (current_cpu
, address
, value
);
821 frvbf_load_quad_FRint (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI targ_ix
)
826 /* Check memory alignment */
827 address
= check_memory_alignment (current_cpu
, address
, 0xf);
829 /* If we need to count cycles, then the cache operation will be
830 initiated from the model profiling functions.
831 See frvbf_model_.... */
834 CPU_LOAD_ADDRESS (current_cpu
) = address
;
835 CPU_LOAD_LENGTH (current_cpu
) = 16;
839 for (i
= 0; i
< 4; ++i
)
841 value
[i
] = frvbf_read_mem_SI (current_cpu
, pc
, address
);
844 sim_queue_fn_xi_write (current_cpu
, frvbf_h_fr_quad_set_handler
, targ_ix
,
850 frvbf_store_quad_FRint (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI src_ix
)
856 /* Check register and memory alignment. */
857 src_ix
= check_fr_register_alignment (current_cpu
, src_ix
, 3);
858 address
= check_memory_alignment (current_cpu
, address
, 0xf);
860 for (i
= 0; i
< 4; ++i
)
861 value
[i
] = GET_H_FR (src_ix
+ i
);
864 if (GET_HSR0_DCE (hsr0
))
865 sim_queue_fn_mem_xi_write (current_cpu
, frvbf_mem_set_XI
, address
, value
);
867 sim_queue_mem_xi_write (current_cpu
, address
, value
);
871 frvbf_load_quad_CPR (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI targ_ix
)
876 /* Check memory alignment */
877 address
= check_memory_alignment (current_cpu
, address
, 0xf);
879 /* If we need to count cycles, then the cache operation will be
880 initiated from the model profiling functions.
881 See frvbf_model_.... */
884 CPU_LOAD_ADDRESS (current_cpu
) = address
;
885 CPU_LOAD_LENGTH (current_cpu
) = 16;
889 for (i
= 0; i
< 4; ++i
)
891 value
[i
] = frvbf_read_mem_SI (current_cpu
, pc
, address
);
894 sim_queue_fn_xi_write (current_cpu
, frvbf_h_cpr_quad_set_handler
, targ_ix
,
900 frvbf_store_quad_CPR (SIM_CPU
*current_cpu
, PCADDR pc
, SI address
, SI src_ix
)
906 /* Check register and memory alignment. */
907 src_ix
= check_register_alignment (current_cpu
, src_ix
, 3);
908 address
= check_memory_alignment (current_cpu
, address
, 0xf);
910 for (i
= 0; i
< 4; ++i
)
911 value
[i
] = GET_H_CPR (src_ix
+ i
);
914 if (GET_HSR0_DCE (hsr0
))
915 sim_queue_fn_mem_xi_write (current_cpu
, frvbf_mem_set_XI
, address
, value
);
917 sim_queue_mem_xi_write (current_cpu
, address
, value
);
921 frvbf_signed_integer_divide (
922 SIM_CPU
*current_cpu
, SI arg1
, SI arg2
, int target_index
, int non_excepting
925 enum frv_dtt dtt
= FRV_DTT_NO_EXCEPTION
;
926 if (arg1
== 0x80000000 && arg2
== -1)
928 /* 0x80000000/(-1) must result in 0x7fffffff when ISR.EDE is set
929 otherwise it may result in 0x7fffffff (sparc compatibility) or
930 0x80000000 (C language compatibility). */
932 dtt
= FRV_DTT_OVERFLOW
;
935 if (GET_ISR_EDE (isr
))
936 sim_queue_fn_si_write (current_cpu
, frvbf_h_gr_set
, target_index
,
939 sim_queue_fn_si_write (current_cpu
, frvbf_h_gr_set
, target_index
,
941 frvbf_force_update (current_cpu
); /* Force update of target register. */
944 dtt
= FRV_DTT_DIVISION_BY_ZERO
;
946 sim_queue_fn_si_write (current_cpu
, frvbf_h_gr_set
, target_index
,
949 /* Check for exceptions. */
950 if (dtt
!= FRV_DTT_NO_EXCEPTION
)
951 dtt
= frvbf_division_exception (current_cpu
, dtt
, target_index
,
953 if (non_excepting
&& dtt
== FRV_DTT_NO_EXCEPTION
)
955 /* Non excepting instruction. Clear the NE flag for the target
958 GET_NE_FLAGS (NE_flags
, H_SPR_GNER0
);
959 CLEAR_NE_FLAG (NE_flags
, target_index
);
960 SET_NE_FLAGS (H_SPR_GNER0
, NE_flags
);
965 frvbf_unsigned_integer_divide (
966 SIM_CPU
*current_cpu
, USI arg1
, USI arg2
, int target_index
, int non_excepting
970 frvbf_division_exception (current_cpu
, FRV_DTT_DIVISION_BY_ZERO
,
971 target_index
, non_excepting
);
974 sim_queue_fn_si_write (current_cpu
, frvbf_h_gr_set
, target_index
,
978 /* Non excepting instruction. Clear the NE flag for the target
981 GET_NE_FLAGS (NE_flags
, H_SPR_GNER0
);
982 CLEAR_NE_FLAG (NE_flags
, target_index
);
983 SET_NE_FLAGS (H_SPR_GNER0
, NE_flags
);
988 /* Clear accumulators. */
990 frvbf_clear_accumulators (SIM_CPU
*current_cpu
, SI acc_ix
, int A
)
992 SIM_DESC sd
= CPU_STATE (current_cpu
);
994 (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr500
) ? 8 :
995 (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr550
) ? 8 :
996 (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr400
) ? 4 :
998 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (current_cpu
);
1000 ps
->mclracc_acc
= acc_ix
;
1002 if (A
== 0 || acc_ix
!= 0) /* Clear 1 accumuator? */
1004 /* This instruction is a nop if the referenced accumulator is not
1006 if (acc_ix
< acc_num
)
1007 sim_queue_fn_di_write (current_cpu
, frvbf_h_acc40S_set
, acc_ix
, 0);
1011 /* Clear all implemented accumulators. */
1013 for (i
= 0; i
< acc_num
; ++i
)
1014 sim_queue_fn_di_write (current_cpu
, frvbf_h_acc40S_set
, i
, 0);
1018 /* Functions to aid insn semantics. */
1020 /* Compute the result of the SCAN and SCANI insns after the shift and xor. */
1022 frvbf_scan_result (SIM_CPU
*current_cpu
, SI value
)
1030 /* Find the position of the first non-zero bit.
1031 The loop will terminate since there is guaranteed to be at least one
1033 mask
= 1 << (sizeof (mask
) * 8 - 1);
1034 for (i
= 0; (value
& mask
) == 0; ++i
)
1040 /* Compute the result of the cut insns. */
1042 frvbf_cut (SIM_CPU
*current_cpu
, SI reg1
, SI reg2
, SI cut_point
)
1047 result
= reg1
<< cut_point
;
1048 result
|= (reg2
>> (32 - cut_point
)) & ((1 << cut_point
) - 1);
1051 result
= reg2
<< (cut_point
- 32);
1056 /* Compute the result of the cut insns. */
1058 frvbf_media_cut (SIM_CPU
*current_cpu
, DI acc
, SI cut_point
)
1060 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1061 cut_point
= cut_point
<< 26 >> 26;
1063 /* The cut_point is relative to bit 40 of 64 bits. */
1065 return (acc
<< (cut_point
+ 24)) >> 32;
1067 /* Extend the sign bit (bit 40) for negative cuts. */
1068 if (cut_point
== -32)
1069 return (acc
<< 24) >> 63; /* Special case for full shiftout. */
1071 return (acc
<< 24) >> (32 + -cut_point
);
1074 /* Compute the result of the cut insns. */
1076 frvbf_media_cut_ss (SIM_CPU
*current_cpu
, DI acc
, SI cut_point
)
1078 /* The cut point is the lower 6 bits (signed) of what we are passed. */
1079 cut_point
= cut_point
<< 26 >> 26;
1083 /* The cut_point is relative to bit 40 of 64 bits. */
1084 DI shifted
= acc
<< (cut_point
+ 24);
1085 DI unshifted
= shifted
>> (cut_point
+ 24);
1087 /* The result will be saturated if significant bits are shifted out. */
1088 if (unshifted
!= acc
)
1096 /* The result will not be saturated, so use the code for the normal cut. */
1097 return frvbf_media_cut (current_cpu
, acc
, cut_point
);
1100 /* Compute the result of int accumulator cut (SCUTSS). */
1102 frvbf_iacc_cut (SIM_CPU
*current_cpu
, DI acc
, SI cut_point
)
1106 /* The cut point is the lower 7 bits (signed) of what we are passed. */
1107 cut_point
= cut_point
<< 25 >> 25;
1109 /* Conceptually, the operation is on a 128-bit sign-extension of ACC.
1110 The top bit of the return value corresponds to bit (63 - CUT_POINT)
1111 of this 128-bit value.
1113 Since we can't deal with 128-bit values very easily, convert the
1114 operation into an equivalent 64-bit one. */
1117 /* Avoid an undefined shift operation. */
1118 if (cut_point
== -64)
1125 /* Get the shifted but unsaturated result. Set LOWER to the lowest
1126 32 bits of the result and UPPER to the result >> 31. */
1129 /* The cut loses the (32 - CUT_POINT) least significant bits.
1130 Round the result up if the most significant of these lost bits
1132 lower
= acc
>> (32 - cut_point
);
1133 if (lower
< 0x7fffffff)
1134 if (acc
& LSBIT64 (32 - cut_point
- 1))
1136 upper
= lower
>> 31;
1140 lower
= acc
<< (cut_point
- 32);
1141 upper
= acc
>> (63 - cut_point
);
1144 /* Saturate the result. */
1153 /* Compute the result of shift-left-arithmetic-with-saturation (SLASS). */
1155 frvbf_shift_left_arith_saturate (SIM_CPU
*current_cpu
, SI arg1
, SI arg2
)
1159 /* FIXME: what to do with negative shift amt? */
1166 /* Signed shift by 31 or greater saturates by definition. */
1169 return (SI
) 0x7fffffff;
1171 return (SI
) 0x80000000;
1173 /* OK, arg2 is between 1 and 31. */
1174 neg_arg1
= (arg1
< 0);
1177 /* Check for sign bit change (saturation). */
1178 if (neg_arg1
&& (arg1
>= 0))
1179 return (SI
) 0x80000000;
1180 else if (!neg_arg1
&& (arg1
< 0))
1181 return (SI
) 0x7fffffff;
1182 } while (--arg2
> 0);
1187 /* Simulate the media custom insns. */
1189 frvbf_media_cop (SIM_CPU
*current_cpu
, int cop_num
)
1191 /* The semantics of the insn are a nop, since it is implementation defined.
1192 We do need to check whether it's implemented and set up for MTRAP
1194 USI msr0
= GET_MSR (0);
1195 if (GET_MSR_EMCI (msr0
) == 0)
1197 /* no interrupt queued at this time. */
1198 frv_set_mp_exception_registers (current_cpu
, MTT_UNIMPLEMENTED_MPOP
, 0);
1202 /* Simulate the media average (MAVEH) insn. */
1204 do_media_average (SIM_CPU
*current_cpu
, HI arg1
, HI arg2
)
1206 SIM_DESC sd
= CPU_STATE (current_cpu
);
1207 SI sum
= (arg1
+ arg2
);
1208 HI result
= sum
>> 1;
1211 /* On fr400 and fr550, check the rounding mode. On other machines rounding is always
1212 toward negative infinity and the result is already correctly rounded. */
1213 switch (STATE_ARCHITECTURE (sd
)->mach
)
1215 /* Need to check rounding mode. */
1216 case bfd_mach_fr400
:
1217 case bfd_mach_fr550
:
1218 /* Check whether rounding will be required. Rounding will be required
1219 if the sum is an odd number. */
1220 rounding_value
= sum
& 1;
1223 USI msr0
= GET_MSR (0);
1224 /* Check MSR0.SRDAV to determine which bits control the rounding. */
1225 if (GET_MSR_SRDAV (msr0
))
1227 /* MSR0.RD controls rounding. */
1228 switch (GET_MSR_RD (msr0
))
1231 /* Round to nearest. */
1236 /* Round toward 0. */
1241 /* Round toward positive infinity. */
1245 /* Round toward negative infinity. The result is already
1246 correctly rounded. */
1255 /* MSR0.RDAV controls rounding. If set, round toward positive
1256 infinity. Otherwise the result is already rounded correctly
1257 toward negative infinity. */
1258 if (GET_MSR_RDAV (msr0
))
1271 frvbf_media_average (SIM_CPU
*current_cpu
, SI reg1
, SI reg2
)
1274 result
= do_media_average (current_cpu
, reg1
& 0xffff, reg2
& 0xffff);
1276 result
|= do_media_average (current_cpu
, (reg1
>> 16) & 0xffff,
1277 (reg2
>> 16) & 0xffff) << 16;
1281 /* Maintain a flag in order to know when to write the address of the next
1282 VLIW instruction into the LR register. Used by JMPL. JMPIL, and CALL. */
1284 frvbf_set_write_next_vliw_addr_to_LR (SIM_CPU
*current_cpu
, int value
)
1286 frvbf_write_next_vliw_addr_to_LR
= value
;
1290 frvbf_set_ne_index (SIM_CPU
*current_cpu
, int index
)
1294 /* Save the target register so interrupt processing can set its NE flag
1295 in the event of an exception. */
1296 frv_interrupt_state
.ne_index
= index
;
1298 /* Clear the NE flag of the target register. It will be reset if necessary
1299 in the event of an exception. */
1300 GET_NE_FLAGS (NE_flags
, H_SPR_FNER0
);
1301 CLEAR_NE_FLAG (NE_flags
, index
);
1302 SET_NE_FLAGS (H_SPR_FNER0
, NE_flags
);
1306 frvbf_force_update (SIM_CPU
*current_cpu
)
1308 CGEN_WRITE_QUEUE
*q
= CPU_WRITE_QUEUE (current_cpu
);
1309 int ix
= CGEN_WRITE_QUEUE_INDEX (q
);
1312 CGEN_WRITE_QUEUE_ELEMENT
*item
= CGEN_WRITE_QUEUE_ELEMENT (q
, ix
- 1);
1313 item
->flags
|= FRV_WRITE_QUEUE_FORCE_WRITE
;
1317 /* Condition code logic. */
1319 andcr
, orcr
, xorcr
, nandcr
, norcr
, andncr
, orncr
, nandncr
, norncr
,
1323 enum cr_result
{cr_undefined
, cr_undefined1
, cr_false
, cr_true
};
1325 static enum cr_result
1326 cr_logic
[num_cr_ops
][4][4] = {
1329 /* undefined undefined false true */
1330 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1331 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1332 /* false */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1333 /* true */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
}
1337 /* undefined undefined false true */
1338 /* undefined */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1339 /* undefined */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1340 /* false */ {cr_false
, cr_false
, cr_false
, cr_true
},
1341 /* true */ {cr_true
, cr_true
, cr_true
, cr_true
}
1345 /* undefined undefined false true */
1346 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1347 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1348 /* false */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1349 /* true */ {cr_true
, cr_true
, cr_true
, cr_false
}
1353 /* undefined undefined false true */
1354 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1355 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1356 /* false */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1357 /* true */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
}
1361 /* undefined undefined false true */
1362 /* undefined */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
},
1363 /* undefined */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
},
1364 /* false */ {cr_true
, cr_true
, cr_true
, cr_false
},
1365 /* true */ {cr_false
, cr_false
, cr_false
, cr_false
}
1369 /* undefined undefined false true */
1370 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1371 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1372 /* false */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1373 /* true */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
}
1377 /* undefined undefined false true */
1378 /* undefined */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1379 /* undefined */ {cr_undefined
, cr_undefined
, cr_false
, cr_true
},
1380 /* false */ {cr_true
, cr_true
, cr_true
, cr_true
},
1381 /* true */ {cr_false
, cr_false
, cr_false
, cr_true
}
1385 /* undefined undefined false true */
1386 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1387 /* undefined */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
},
1388 /* false */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
},
1389 /* true */ {cr_undefined
, cr_undefined
, cr_undefined
, cr_undefined
}
1393 /* undefined undefined false true */
1394 /* undefined */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
},
1395 /* undefined */ {cr_undefined
, cr_undefined
, cr_true
, cr_false
},
1396 /* false */ {cr_false
, cr_false
, cr_false
, cr_false
},
1397 /* true */ {cr_true
, cr_true
, cr_true
, cr_false
}
1402 frvbf_cr_logic (SIM_CPU
*current_cpu
, SI operation
, UQI arg1
, UQI arg2
)
1404 return cr_logic
[operation
][arg1
][arg2
];
1407 /* Cache Manipulation. */
1409 frvbf_insn_cache_preload (SIM_CPU
*current_cpu
, SI address
, USI length
, int lock
)
1411 /* If we need to count cycles, then the cache operation will be
1412 initiated from the model profiling functions.
1413 See frvbf_model_.... */
1414 int hsr0
= GET_HSR0 ();
1415 if (GET_HSR0_ICE (hsr0
))
1419 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1420 CPU_LOAD_LENGTH (current_cpu
) = length
;
1421 CPU_LOAD_LOCK (current_cpu
) = lock
;
1425 FRV_CACHE
*cache
= CPU_INSN_CACHE (current_cpu
);
1426 frv_cache_preload (cache
, address
, length
, lock
);
1432 frvbf_data_cache_preload (SIM_CPU
*current_cpu
, SI address
, USI length
, int lock
)
1434 /* If we need to count cycles, then the cache operation will be
1435 initiated from the model profiling functions.
1436 See frvbf_model_.... */
1437 int hsr0
= GET_HSR0 ();
1438 if (GET_HSR0_DCE (hsr0
))
1442 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1443 CPU_LOAD_LENGTH (current_cpu
) = length
;
1444 CPU_LOAD_LOCK (current_cpu
) = lock
;
1448 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
1449 frv_cache_preload (cache
, address
, length
, lock
);
1455 frvbf_insn_cache_unlock (SIM_CPU
*current_cpu
, SI address
)
1457 /* If we need to count cycles, then the cache operation will be
1458 initiated from the model profiling functions.
1459 See frvbf_model_.... */
1460 int hsr0
= GET_HSR0 ();
1461 if (GET_HSR0_ICE (hsr0
))
1464 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1467 FRV_CACHE
*cache
= CPU_INSN_CACHE (current_cpu
);
1468 frv_cache_unlock (cache
, address
);
1474 frvbf_data_cache_unlock (SIM_CPU
*current_cpu
, SI address
)
1476 /* If we need to count cycles, then the cache operation will be
1477 initiated from the model profiling functions.
1478 See frvbf_model_.... */
1479 int hsr0
= GET_HSR0 ();
1480 if (GET_HSR0_DCE (hsr0
))
1483 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1486 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
1487 frv_cache_unlock (cache
, address
);
1493 frvbf_insn_cache_invalidate (SIM_CPU
*current_cpu
, SI address
, int all
)
1495 /* Make sure the insn was specified properly. -1 will be passed for ALL
1496 for a icei with A=0. */
1499 frv_queue_program_interrupt (current_cpu
, FRV_ILLEGAL_INSTRUCTION
);
1503 /* If we need to count cycles, then the cache operation will be
1504 initiated from the model profiling functions.
1505 See frvbf_model_.... */
1508 /* Record the all-entries flag for use in profiling. */
1509 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (current_cpu
);
1510 ps
->all_cache_entries
= all
;
1511 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1515 FRV_CACHE
*cache
= CPU_INSN_CACHE (current_cpu
);
1517 frv_cache_invalidate_all (cache
, 0/* flush? */);
1519 frv_cache_invalidate (cache
, address
, 0/* flush? */);
1524 frvbf_data_cache_invalidate (SIM_CPU
*current_cpu
, SI address
, int all
)
1526 /* Make sure the insn was specified properly. -1 will be passed for ALL
1527 for a dcei with A=0. */
1530 frv_queue_program_interrupt (current_cpu
, FRV_ILLEGAL_INSTRUCTION
);
1534 /* If we need to count cycles, then the cache operation will be
1535 initiated from the model profiling functions.
1536 See frvbf_model_.... */
1539 /* Record the all-entries flag for use in profiling. */
1540 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (current_cpu
);
1541 ps
->all_cache_entries
= all
;
1542 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1546 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
1548 frv_cache_invalidate_all (cache
, 0/* flush? */);
1550 frv_cache_invalidate (cache
, address
, 0/* flush? */);
1555 frvbf_data_cache_flush (SIM_CPU
*current_cpu
, SI address
, int all
)
1557 /* Make sure the insn was specified properly. -1 will be passed for ALL
1558 for a dcef with A=0. */
1561 frv_queue_program_interrupt (current_cpu
, FRV_ILLEGAL_INSTRUCTION
);
1565 /* If we need to count cycles, then the cache operation will be
1566 initiated from the model profiling functions.
1567 See frvbf_model_.... */
1570 /* Record the all-entries flag for use in profiling. */
1571 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (current_cpu
);
1572 ps
->all_cache_entries
= all
;
1573 CPU_LOAD_ADDRESS (current_cpu
) = address
;
1577 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
1579 frv_cache_invalidate_all (cache
, 1/* flush? */);
1581 frv_cache_invalidate (cache
, address
, 1/* flush? */);