1 /* This file is part of the program psim.
3 Copyright (C) 1994-1995, Andrew Cagney <cagney@highland.com.au>
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 #ifndef STATIC_INLINE_VM
26 #define STATIC_INLINE_VM STATIC_INLINE
32 #include "registers.h"
34 #include "device_tree.h"
39 #include "interrupts.h"
45 For the VEA model, the VM layer is almost transparent. It's only
46 purpose is to maintain separate core_map's for the instruction
47 and data address spaces. This being so that writes to instruction
48 space or execution of a data space is prevented.
50 For the OEA model things are more complex. The reason for separate
51 instruction and data models becomes crucial. The OEA model is
52 built out of three parts. An instruction map, a data map and an
53 underlying structure that provides access to the VM data kept in
57 /* OEA data structures:
59 The OEA model maintains internal data structures that shadow the
60 semantics of the various OEA VM registers (BAT, SR, etc). This
61 allows a simple efficient model of the VM to be implemented.
63 Consistency between OEA registers and this model's internal data
64 structures is maintained by updating the structures at
65 `synchronization' points. Of particular note is that (at the time
66 of writing) the memory data types for BAT registers are rebuilt
67 when ever the processor moves between problem and system states */
72 Matrix of processor state, type of access and validity */
81 om_data_read
, om_data_write
,
82 om_instruction_read
, om_access_any
,
86 static int om_valid_access
[2][4][nr_om_access_types
] = {
87 /* read, write, instruction, any */
90 { 1, 1, 1, 1 }, /* 00 */
91 { 1, 1, 1, 1 }, /* 01 */
92 { 1, 1, 1, 1 }, /* 10 */
93 { 1, 0, 1, 1 }, /* 11 */
95 /* K bit == 1 or P bit valid */
97 { 0, 0, 0, 0 }, /* 00 */
98 { 1, 0, 1, 1 }, /* 01 */
99 { 1, 1, 1, 1 }, /* 10 */
100 { 1, 0, 1, 1 }, /* 11 */
107 The bat data structure only contains information on valid BAT
108 translations for the current processor mode and type of access. */
110 typedef struct _om_bat
{
111 unsigned_word block_effective_page_index
;
112 unsigned_word block_effective_page_index_mask
;
113 unsigned_word block_length_mask
;
114 unsigned_word block_real_page_number
;
118 enum _nr_om_bat_registers
{
119 nr_om_bat_registers
= 4
122 typedef struct _om_bats
{
123 int nr_valid_bat_registers
;
124 om_bat bat
[nr_om_bat_registers
];
130 In this model the 32 and 64 bit segment tables are treated in very
131 similar ways. The 32bit segment registers are treated as a
132 simplification of the 64bit segment tlb */
134 enum _om_segment_tlb_constants
{
135 #if (WITH_TARGET_WORD_BITSIZE == 64)
136 sizeof_segment_table_entry_group
= 128,
137 sizeof_segment_table_entry
= 16,
139 om_segment_tlb_index_start_bit
= 32,
140 om_segment_tlb_index_stop_bit
= 35,
141 nr_om_segment_tlb_entries
= 16,
142 nr_om_segment_tlb_constants
145 typedef struct _om_segment_tlb_entry
{
146 int key
[nr_om_modes
];
147 om_access_types invalid_access
; /* set to instruction if no_execute bit */
148 unsigned_word masked_virtual_segment_id
;
149 #if (WITH_TARGET_WORD_BITSIZE == 64)
151 unsigned_word masked_effective_segment_id
;
153 } om_segment_tlb_entry
;
155 typedef struct _om_segment_tlb
{
156 om_segment_tlb_entry entry
[nr_om_segment_tlb_entries
];
162 This OEA model includes a small direct map Page TLB. The tlb is to
163 cut down on the need for the OEA to perform walks of the page hash
166 enum _om_page_tlb_constants
{
167 om_page_tlb_index_start_bit
= 46,
168 om_page_tlb_index_stop_bit
= 51,
169 nr_om_page_tlb_entries
= 64,
170 #if (WITH_TARGET_WORD_BITSIZE == 64)
171 sizeof_pte_group
= 128,
174 #if (WITH_TARGET_WORD_BITSIZE == 32)
175 sizeof_pte_group
= 64,
178 nr_om_page_tlb_constants
181 typedef struct _om_page_tlb_entry
{
184 unsigned_word masked_virtual_segment_id
;
185 unsigned_word masked_page
;
186 unsigned_word masked_real_page_number
;
189 typedef struct _om_page_tlb
{
190 om_page_tlb_entry entry
[nr_om_page_tlb_entries
];
194 /* memory translation:
196 OEA memory translation possibly involves BAT, SR, TLB and HTAB
199 typedef struct _om_map
{
201 /* local cache of register values */
203 int is_problem_state
;
205 /* block address translation */
206 om_bats
*bat_registers
;
208 /* failing that, translate ea to va using segment tlb */
209 #if (WITH_TARGET_WORD_BITSIZE == 64)
210 unsigned_word real_address_of_segment_table
;
212 om_segment_tlb
*segment_tlb
;
214 /* then va to ra using hashed page table and tlb */
215 unsigned_word real_address_of_page_table
;
216 unsigned_word page_table_hash_mask
;
217 om_page_tlb
*page_tlb
;
219 /* physical memory for fetching page table entries */
227 External objects defined by vm.h */
229 struct _vm_instruction_map
{
230 /* real memory for last part */
232 /* translate effective to real */
236 struct _vm_data_map
{
237 /* translate effective to real */
239 /* real memory for translated address */
247 Underlying memory object. For the VEA this is just the
248 core_map. For OEA it is the instruction and data memory
253 /* OEA: base address registers */
257 /* OEA: segment registers */
258 om_segment_tlb segment_tlb
;
260 /* OEA: translation lookaside buffers */
261 om_page_tlb instruction_tlb
;
262 om_page_tlb data_tlb
;
268 vm_instruction_map instruction_map
;
269 vm_data_map data_map
;
274 /* OEA Support procedures */
277 STATIC_INLINE_VM unsigned_word
278 om_segment_tlb_index(unsigned_word ea
)
280 unsigned_word index
= EXTRACTED(ea
,
281 om_segment_tlb_index_start_bit
,
282 om_segment_tlb_index_stop_bit
);
286 STATIC_INLINE_VM unsigned_word
287 om_page_tlb_index(unsigned_word ea
)
289 unsigned_word index
= EXTRACTED(ea
,
290 om_page_tlb_index_start_bit
,
291 om_page_tlb_index_stop_bit
);
295 STATIC_INLINE_VM unsigned_word
296 om_masked_page(unsigned_word ea
)
298 unsigned_word masked_page
= MASKED(ea
, 36, 51);
302 STATIC_INLINE_VM unsigned_word
303 om_masked_byte(unsigned_word ea
)
305 unsigned_word masked_byte
= MASKED(ea
, 52, 63);
312 vm_create(core
*physical
)
316 /* internal checks */
317 if (nr_om_segment_tlb_entries
318 != (1 << (om_segment_tlb_index_stop_bit
319 - om_segment_tlb_index_start_bit
+ 1)))
320 error("new_vm() - internal error with om_segment constants\n");
321 if (nr_om_page_tlb_entries
322 != (1 << (om_page_tlb_index_stop_bit
323 - om_page_tlb_index_start_bit
+ 1)))
324 error("new_vm() - internal error with om_page constants\n");
326 /* create the new vm register file */
327 virtual = ZALLOC(vm
);
330 virtual->physical
= physical
;
332 /* set up the address decoders */
333 virtual->instruction_map
.translation
.bat_registers
= &virtual->ibats
;
334 virtual->instruction_map
.translation
.segment_tlb
= &virtual->segment_tlb
;
335 virtual->instruction_map
.translation
.page_tlb
= &virtual->instruction_tlb
;
336 virtual->instruction_map
.translation
.is_relocate
= 0;
337 virtual->instruction_map
.translation
.is_problem_state
= 0;
338 virtual->instruction_map
.translation
.physical
= core_readable(physical
);
339 virtual->instruction_map
.code
= core_readable(physical
);
341 virtual->data_map
.translation
.bat_registers
= &virtual->dbats
;
342 virtual->data_map
.translation
.segment_tlb
= &virtual->segment_tlb
;
343 virtual->data_map
.translation
.page_tlb
= &virtual->data_tlb
;
344 virtual->data_map
.translation
.is_relocate
= 0;
345 virtual->data_map
.translation
.is_problem_state
= 0;
346 virtual->data_map
.translation
.physical
= core_readable(physical
);
347 virtual->data_map
.read
= core_readable(physical
);
348 virtual->data_map
.write
= core_writeable(physical
);
354 STATIC_INLINE_VM om_bat
*
355 om_effective_to_bat(om_map
*map
,
359 om_bats
*bats
= map
->bat_registers
;
360 int nr_bats
= bats
->nr_valid_bat_registers
;
362 for (curr_bat
= 0; curr_bat
< nr_bats
; curr_bat
++) {
363 om_bat
*bat
= bats
->bat
+ curr_bat
;
364 if ((ea
& bat
->block_effective_page_index_mask
)
365 != bat
->block_effective_page_index
)
374 STATIC_INLINE_VM om_segment_tlb_entry
*
375 om_effective_to_virtual(om_map
*map
,
380 /* first try the segment tlb */
381 om_segment_tlb_entry
*segment_tlb_entry
= (map
->segment_tlb
->entry
382 + om_segment_tlb_index(ea
));
384 #if (WITH_TARGET_WORD_BITSIZE == 32)
385 return segment_tlb_entry
;
388 #if (WITH_TARGET_WORD_BITSIZE == 64)
389 if (segment_tlb_entry
->is_valid
390 && (segment_tlb_entry
->masked_effective_segment_id
== MASKED(ea
, 0, 35))) {
391 error("fixme - is there a need to update any bits\n");
392 return segment_tlb_entry
;
395 /* drats, segment tlb missed */
397 unsigned_word segment_id_hash
= ea
;
398 int current_hash
= 0;
399 for (current_hash
= 0; current_hash
< 2; current_hash
+= 1) {
400 unsigned_word segment_table_entry_group
=
401 (map
->real_address_of_segment_table
402 | (MASKED64(segment_id_hash
, 31, 35) >> (56-35)));
403 unsigned_word segment_table_entry
;
404 for (segment_table_entry
= segment_table_entry_group
;
405 segment_table_entry
< (segment_table_entry_group
406 + sizeof_segment_table_entry_group
);
407 segment_table_entry
+= sizeof_segment_table_entry
) {
409 unsigned_word segment_table_entry_dword_0
=
410 core_map_read_8(map
->physical
, segment_table_entry
, processor
, cia
);
411 unsigned_word segment_table_entry_dword_1
=
412 core_map_read_8(map
->physical
, segment_table_entry
+ 8, processor
, cia
);
413 int is_valid
= MASKED64(segment_table_entry_dword_0
, 56, 56) != 0;
414 unsigned_word masked_effective_segment_id
=
415 MASKED64(segment_table_entry_dword_0
, 0, 35);
416 if (is_valid
&& masked_effective_segment_id
== MASKED64(ea
, 0, 35)) {
417 /* don't permit some things */
418 if (MASKED64(segment_table_entry_dword_0
, 57, 57))
419 error("om_effective_to_virtual() - T=1 in STE not supported\n");
420 /* update segment tlb */
421 segment_tlb_entry
->is_valid
= is_valid
;
422 segment_tlb_entry
->masked_effective_segment_id
=
423 masked_effective_segment_id
;
424 segment_tlb_entry
->key
[om_supervisor_state
] =
425 EXTRACTED64(segment_table_entry_dword_0
, 58, 58);
426 segment_tlb_entry
->key
[om_problem_state
] =
427 EXTRACTED64(segment_table_entry_dword_0
, 59, 59);
428 segment_tlb_entry
->invalid_access
=
429 (MASKED64(segment_table_entry_dword_0
, 60, 60)
430 ? om_instruction_read
432 segment_tlb_entry
->masked_virtual_segment_id
=
433 MASKED(segment_table_entry_dword_1
, 0, 51);
434 return segment_tlb_entry
;
437 segment_id_hash
= ~segment_id_hash
;
446 STATIC_INLINE_VM om_page_tlb_entry
*
447 om_virtual_to_real(om_map
*map
,
449 om_segment_tlb_entry
*segment_tlb_entry
,
450 om_access_types access
,
454 om_page_tlb_entry
*page_tlb_entry
= (map
->page_tlb
->entry
455 + om_page_tlb_index(ea
));
457 /* is it a tlb hit? */
458 if (page_tlb_entry
->valid
459 && (page_tlb_entry
->masked_virtual_segment_id
==
460 segment_tlb_entry
->masked_virtual_segment_id
)
461 && (page_tlb_entry
->masked_page
== om_masked_page(ea
))) {
462 error("fixme - it is not a hit if direction/update bits do not match\n");
463 return page_tlb_entry
;
466 /* drats, it is a tlb miss */
468 unsigned_word page_hash
= (segment_tlb_entry
->masked_virtual_segment_id
469 ^ om_masked_page(ea
));
471 for (current_hash
= 0; current_hash
< 2; current_hash
+= 1) {
472 unsigned_word real_address_of_pte_group
=
473 (map
->real_address_of_page_table
474 | (page_hash
& map
->page_table_hash_mask
));
475 unsigned_word real_address_of_pte
;
476 for (real_address_of_pte
= real_address_of_pte_group
;
477 real_address_of_pte
< (real_address_of_pte_group
479 real_address_of_pte
+= sizeof_pte
) {
480 unsigned_word pte_word_0
=
481 core_map_read_word(map
->physical
,
484 unsigned_word pte_word_1
=
485 core_map_read_word(map
->physical
,
486 real_address_of_pte
+ sizeof_pte
/ 2,
488 error("fixme - check pte hit\n");
490 error("fixme - update the page_tlb\n");
491 page_tlb_entry
->valid
= 1;
492 page_tlb_entry
->protection
= 0;
493 page_tlb_entry
->masked_virtual_segment_id
= 0;
494 page_tlb_entry
->masked_page
= 0;
495 page_tlb_entry
->masked_real_page_number
= 0;
496 return page_tlb_entry
;
499 page_hash
= ~page_hash
; /*???*/
507 om_interrupt(cpu
*processor
,
510 om_access_types access
,
511 storage_interrupt_reasons reason
)
515 data_storage_interrupt(processor
, cia
, ea
, reason
, 0/*!is_store*/);
518 data_storage_interrupt(processor
, cia
, ea
, reason
, 1/*is_store*/);
520 case om_instruction_read
:
521 instruction_storage_interrupt(processor
, cia
, reason
);
524 error("om_interrupt - unexpected access type %d, cia=0x%x, ea=0x%x\n",
530 STATIC_INLINE_VM unsigned_word
531 om_translate_effective_to_real(om_map
*map
,
533 om_access_types access
,
539 om_segment_tlb_entry
*segment_tlb_entry
= NULL
;
540 om_page_tlb_entry
*page_tlb_entry
= NULL
;
543 if (!map
->is_relocate
) {
545 TRACE(trace_vm
, ("%s, direct map, ea=0x%x\n",
546 "om_translate_effective_to_real",
551 /* match with BAT? */
552 bat
= om_effective_to_bat(map
, ea
);
554 if (!om_valid_access
[1][bat
->protection_bits
][access
]) {
555 TRACE(trace_vm
, ("%s, bat protection violation, ea=0x%x\n",
556 "om_translate_effective_to_real",
559 om_interrupt(processor
, cia
, ea
, access
,
560 protection_violation_storage_interrupt
);
565 ra
= ((ea
& bat
->block_length_mask
) | bat
->block_real_page_number
);
566 TRACE(trace_vm
, ("%s, bat translation, ea=0x%x, ra=0x%x\n",
567 "om_translate_effective_to_real",
572 /* translate ea to va using segment map */
573 segment_tlb_entry
= om_effective_to_virtual(map
, ea
, processor
, cia
);
574 #if (WITH_TARGET_WORD_BITSIZE == 64)
575 if (segment_tlb_entry
== NULL
) {
576 TRACE(trace_vm
, ("%s, segment tlb lookup failed - ea=0x%x\n",
577 "om_translate_effective_to_real",
580 om_interrupt(processor
, cia
, ea
, access
,
581 segment_table_miss_storage_interrupt
);
586 /* check for invalid segment access type */
587 if (segment_tlb_entry
->invalid_access
== access
) {
588 TRACE(trace_vm
, ("%s, segment tlb access invalid - ea=0x%x\n",
589 "om_translate_effective_to_real",
592 om_interrupt(processor
, cia
, ea
, access
,
593 protection_violation_storage_interrupt
);
599 page_tlb_entry
= om_virtual_to_real(map
, ea
, segment_tlb_entry
,
602 if (page_tlb_entry
== NULL
) {
603 TRACE(trace_vm
, ("%s, page tlb lookup failed - ea=0x%x\n",
604 "om_translate_effective_to_real",
607 om_interrupt(processor
, cia
, ea
, access
,
608 hash_table_miss_storage_interrupt
);
612 if (!(om_valid_access
613 [segment_tlb_entry
->key
[map
->is_problem_state
]]
614 [page_tlb_entry
->protection
]
616 TRACE(trace_vm
, ("%s, page tlb access invalid - ea=0x%x\n",
617 "om_translate_effective_to_real",
620 om_interrupt(processor
, cia
, ea
, access
,
621 protection_violation_storage_interrupt
);
626 ra
= (page_tlb_entry
->masked_real_page_number
627 | om_masked_byte(ea
));
628 TRACE(trace_vm
, ("%s, page - ea=0x%x, ra=0x%x\n",
629 "om_translate_effective_to_real",
636 * Definition of operations for memory management
640 /* rebuild all the relevant bat information */
641 STATIC_INLINE_VM
void
642 om_unpack_bat(om_bat
*bat
,
646 /* for extracting out the offset within a page */
647 bat
->block_length_mask
= ((MASKED(ubat
, 51, 61) << (17-2))
648 | MASK(63-17+1, 63));
650 /* for checking the effective page index */
651 bat
->block_effective_page_index
= MASKED(ubat
, 0, 46);
652 bat
->block_effective_page_index_mask
= ~bat
->block_length_mask
;
654 /* protection information */
655 bat
->protection_bits
= EXTRACTED(lbat
, 62, 63);
656 bat
->block_real_page_number
= MASKED(lbat
, 0, 46);
660 /* rebuild the given bat table */
661 STATIC_INLINE_VM
void
662 om_unpack_bats(om_bats
*bats
,
667 bats
->nr_valid_bat_registers
= 0;
668 for (i
= 0; i
< nr_om_bat_registers
*2; i
+= 2) {
669 spreg ubat
= raw_bats
[i
];
670 spreg lbat
= raw_bats
[i
+1];
671 if ((msr
& msr_problem_state
)
672 ? EXTRACTED(ubat
, 62, 62)
673 : EXTRACTED(ubat
, 63, 63)) {
674 om_unpack_bat(&bats
->bat
[bats
->nr_valid_bat_registers
],
676 bats
->nr_valid_bat_registers
+= 1;
682 #if (WITH_TARGET_WORD_BITSIZE == 32)
683 STATIC_INLINE_VM
void
684 om_unpack_sr(vm
*virtual,
688 om_segment_tlb_entry
*segment_tlb_entry
= 0;
689 sreg new_sr_value
= 0;
691 /* check register in range */
692 if (which_sr
< 0 || which_sr
> nr_om_segment_tlb_entries
)
693 error("om_set_sr: segment register out of bounds\n");
695 /* get the working values */
696 segment_tlb_entry
= &virtual->segment_tlb
.entry
[which_sr
];
697 new_sr_value
= srs
[which_sr
];
699 /* do we support this */
700 if (MASKED32(new_sr_value
, 0, 0))
701 error("om_ser_sr(): unsupported value of T in segment register %d\n",
705 segment_tlb_entry
->key
[om_supervisor_state
] = EXTRACTED32(new_sr_value
, 1, 1);
706 segment_tlb_entry
->key
[om_problem_state
] = EXTRACTED32(new_sr_value
, 2, 2);
707 segment_tlb_entry
->invalid_access
= (MASKED32(new_sr_value
, 3, 3)
708 ? om_instruction_read
710 segment_tlb_entry
->masked_virtual_segment_id
= MASKED32(new_sr_value
, 8, 31);
715 #if (WITH_TARGET_WORD_BITSIZE == 32)
716 STATIC_INLINE_VM
void
717 om_unpack_srs(vm
*virtual,
721 for (which_sr
= 0; which_sr
< nr_om_segment_tlb_entries
; which_sr
++) {
722 om_unpack_sr(virtual, srs
, which_sr
);
728 /* Rebuild all the data structures for the new context as specifed by
729 the passed registers */
731 vm_synchronize_context(vm
*virtual,
737 /* enable/disable translation */
738 int problem_state
= (msr
& msr_problem_state
) != 0;
739 int data_relocate
= (msr
& msr_data_relocate
) != 0;
740 int instruction_relocate
= (msr
& msr_instruction_relocate
) != 0;
742 unsigned_word page_table_hash_mask
;
743 unsigned_word real_address_of_page_table
;
746 /* update current processor mode */
747 virtual->instruction_map
.translation
.is_relocate
= instruction_relocate
;
748 virtual->instruction_map
.translation
.is_problem_state
= problem_state
;
749 virtual->data_map
.translation
.is_relocate
= data_relocate
;
750 virtual->data_map
.translation
.is_problem_state
= problem_state
;
753 /* update bat registers for the new context */
754 om_unpack_bats(&virtual->ibats
, &sprs
[spr_ibat0u
], msr
);
755 om_unpack_bats(&virtual->dbats
, &sprs
[spr_dbat0u
], msr
);
758 /* unpack SDR1 - the storage description register 1 */
759 #if (WITH_TARGET_WORD_BITSIZE == 64)
760 real_address_of_page_table
= EXTRACTED64(sprs
[spr_sdr1
], 0, 45);
761 page_table_hash_mask
= MASK64(47-EXTRACTED64(sprs
[spr_sdr1
], 59, 63),
764 #if (WITH_TARGET_WORD_BITSIZE == 32)
765 real_address_of_page_table
= EXTRACTED32(sprs
[spr_sdr1
], 0, 15);
766 page_table_hash_mask
= ((EXTRACTED32(sprs
[spr_sdr1
], 23, 31) << (10+6))
769 virtual->instruction_map
.translation
.real_address_of_page_table
= real_address_of_page_table
;
770 virtual->instruction_map
.translation
.page_table_hash_mask
= page_table_hash_mask
;
771 virtual->data_map
.translation
.real_address_of_page_table
= real_address_of_page_table
;
772 virtual->data_map
.translation
.page_table_hash_mask
= page_table_hash_mask
;
775 #if (WITH_TARGET_WORD_BITSIZE == 32)
776 /* unpack the segment tlb registers */
777 om_unpack_srs(virtual, srs
);
782 INLINE_VM vm_data_map
*
783 vm_create_data_map(vm
*memory
)
785 return &memory
->data_map
;
789 INLINE_VM vm_instruction_map
*
790 vm_create_instruction_map(vm
*memory
)
792 return &memory
->instruction_map
;
796 STATIC_INLINE_VM unsigned_word
797 vm_translate(om_map
*map
,
799 om_access_types access
,
804 switch (CURRENT_ENVIRONMENT
) {
805 case USER_ENVIRONMENT
:
806 case VIRTUAL_ENVIRONMENT
:
808 case OPERATING_ENVIRONMENT
:
809 return om_translate_effective_to_real(map
, ea
, access
,
813 error("vm_translate() - unknown environment\n");
819 INLINE_VM unsigned_word
820 vm_real_data_addr(vm_data_map
*map
,
826 return vm_translate(&map
->translation
,
828 is_read
? om_data_read
: om_data_write
,
835 INLINE_VM unsigned_word
836 vm_real_instruction_addr(vm_instruction_map
*map
,
840 return vm_translate(&map
->translation
,
848 INLINE_VM instruction_word
849 vm_instruction_map_read(vm_instruction_map
*map
,
853 unsigned_word ra
= vm_real_instruction_addr(map
, processor
, cia
);
854 ASSERT((cia
& 0x3) == 0); /* always aligned */
855 return core_map_read_4(map
->code
, ra
, processor
, cia
);
860 vm_data_map_read_buffer(vm_data_map
*map
,
866 for (count
= 0; count
< nr_bytes
; count
++) {
868 unsigned_word ea
= addr
+ count
;
869 unsigned_word ra
= vm_translate(&map
->translation
,
874 if (ra
== MASK(0, 63))
876 if (core_map_read_buffer(map
->read
, &byte
, ea
, sizeof(byte
))
879 ((unsigned_1
*)target
)[count
] = T2H_1(byte
);
886 vm_data_map_write_buffer(vm_data_map
*map
,
890 int violate_read_only_section
)
894 for (count
= 0; count
< nr_bytes
; count
++) {
895 unsigned_word ea
= addr
+ count
;
896 unsigned_word ra
= vm_translate(&map
->translation
,
901 if (ra
== MASK(0, 63))
903 byte
= T2H_1(((unsigned_1
*)source
)[count
]);
904 if (core_map_write_buffer((violate_read_only_section
907 &byte
, ra
, sizeof(byte
)) != sizeof(byte
))
914 /* define the read/write 1/2/4/8/word functions */