Add determining when we do not have enough writeback slots; Do not do model specific...
[deliverable/binutils-gdb.git] / sim / ppc / vm.c
CommitLineData
a983c8f0
MM
1/* This file is part of the program psim.
2
3 Copyright (C) 1994-1995, Andrew Cagney <cagney@highland.com.au>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18
19 */
20
21
22#ifndef _VM_C_
23#define _VM_C_
24
25#ifndef STATIC_INLINE_VM
26#define STATIC_INLINE_VM STATIC_INLINE
27#endif
28
29
30#include "basics.h"
31
32#include "registers.h"
33
34#include "device_tree.h"
01860b7e 35#include "corefile.h"
a983c8f0
MM
36
37#include "vm.h"
38
39#include "interrupts.h"
40
41#include "mon.h"
42
43/* OEA vs VEA
44
45 For the VEA model, the VM layer is almost transparent. It's only
46 purpose is to maintain separate core_map's for the instruction
47 and data address spaces. This being so that writes to instruction
48 space or execution of a data space is prevented.
49
50 For the OEA model things are more complex. The reason for separate
51 instruction and data models becomes crucial. The OEA model is
52 built out of three parts. An instruction map, a data map and an
53 underlying structure that provides access to the VM data kept in
54 main memory. */
55
56
57/* OEA data structures:
58
59 The OEA model maintains internal data structures that shadow the
60 semantics of the various OEA VM registers (BAT, SR, etc). This
61 allows a simple efficient model of the VM to be implemented.
62
63 Consistency between OEA registers and this model's internal data
64 structures is maintained by updating the structures at
65 `synchronization' points. Of particular note is that (at the time
66 of writing) the memory data types for BAT registers are rebuilt
67 when ever the processor moves between problem and system states */
68
69
70/* Protection table:
71
72 Matrix of processor state, type of access and validity */
73
74typedef enum {
75 om_supervisor_state,
76 om_problem_state,
77 nr_om_modes
78} om_processor_modes;
79
80typedef enum {
81 om_data_read, om_data_write,
82 om_instruction_read, om_access_any,
83 nr_om_access_types
84} om_access_types;
85
86static int om_valid_access[2][4][nr_om_access_types] = {
87 /* read, write, instruction, any */
88 /* K bit == 0 */
89 { /*r w i a pp */
90 { 1, 1, 1, 1 }, /* 00 */
91 { 1, 1, 1, 1 }, /* 01 */
92 { 1, 1, 1, 1 }, /* 10 */
93 { 1, 0, 1, 1 }, /* 11 */
94 },
95 /* K bit == 1 or P bit valid */
96 { /*r w i a pp */
97 { 0, 0, 0, 0 }, /* 00 */
98 { 1, 0, 1, 1 }, /* 01 */
99 { 1, 1, 1, 1 }, /* 10 */
100 { 1, 0, 1, 1 }, /* 11 */
101 }
102};
103
104
105/* Bat translation:
106
107 The bat data structure only contains information on valid BAT
108 translations for the current processor mode and type of access. */
109
110typedef struct _om_bat {
111 unsigned_word block_effective_page_index;
112 unsigned_word block_effective_page_index_mask;
113 unsigned_word block_length_mask;
114 unsigned_word block_real_page_number;
115 int protection_bits;
116} om_bat;
117
118enum _nr_om_bat_registers {
119 nr_om_bat_registers = 4
120};
121
122typedef struct _om_bats {
123 int nr_valid_bat_registers;
124 om_bat bat[nr_om_bat_registers];
125} om_bats;
126
127
128/* Segment TLB:
129
130 In this model the 32 and 64 bit segment tables are treated in very
131 similar ways. The 32bit segment registers are treated as a
132 simplification of the 64bit segment tlb */
133
134enum _om_segment_tlb_constants {
135#if (WITH_TARGET_WORD_BITSIZE == 64)
136 sizeof_segment_table_entry_group = 128,
137 sizeof_segment_table_entry = 16,
138#endif
139 om_segment_tlb_index_start_bit = 32,
140 om_segment_tlb_index_stop_bit = 35,
141 nr_om_segment_tlb_entries = 16,
142 nr_om_segment_tlb_constants
143};
144
145typedef struct _om_segment_tlb_entry {
146 int key[nr_om_modes];
147 om_access_types invalid_access; /* set to instruction if no_execute bit */
148 unsigned_word masked_virtual_segment_id;
149#if (WITH_TARGET_WORD_BITSIZE == 64)
150 int is_valid;
151 unsigned_word masked_effective_segment_id;
152#endif
153} om_segment_tlb_entry;
154
155typedef struct _om_segment_tlb {
156 om_segment_tlb_entry entry[nr_om_segment_tlb_entries];
157} om_segment_tlb;
158
159
160/* Page TLB:
161
162 This OEA model includes a small direct map Page TLB. The tlb is to
163 cut down on the need for the OEA to perform walks of the page hash
164 table. */
165
166enum _om_page_tlb_constants {
167 om_page_tlb_index_start_bit = 46,
168 om_page_tlb_index_stop_bit = 51,
169 nr_om_page_tlb_entries = 64,
170#if (WITH_TARGET_WORD_BITSIZE == 64)
171 sizeof_pte_group = 128,
172 sizeof_pte = 16,
173#endif
174#if (WITH_TARGET_WORD_BITSIZE == 32)
175 sizeof_pte_group = 64,
176 sizeof_pte = 8,
177#endif
178 nr_om_page_tlb_constants
179};
180
181typedef struct _om_page_tlb_entry {
182 int valid;
183 int protection;
184 unsigned_word masked_virtual_segment_id;
185 unsigned_word masked_page;
186 unsigned_word masked_real_page_number;
187} om_page_tlb_entry;
188
189typedef struct _om_page_tlb {
190 om_page_tlb_entry entry[nr_om_page_tlb_entries];
191} om_page_tlb;
192
193
194/* memory translation:
195
196 OEA memory translation possibly involves BAT, SR, TLB and HTAB
197 information*/
198
199typedef struct _om_map {
200
201 /* local cache of register values */
202 int is_relocate;
203 int is_problem_state;
204
205 /* block address translation */
206 om_bats *bat_registers;
207
208 /* failing that, translate ea to va using segment tlb */
209#if (WITH_TARGET_WORD_BITSIZE == 64)
210 unsigned_word real_address_of_segment_table;
211#endif
212 om_segment_tlb *segment_tlb;
213
214 /* then va to ra using hashed page table and tlb */
215 unsigned_word real_address_of_page_table;
216 unsigned_word page_table_hash_mask;
217 om_page_tlb *page_tlb;
218
219 /* physical memory for fetching page table entries */
220 core_map *physical;
221
222} om_map;
223
224
225/* VM objects:
226
227 External objects defined by vm.h */
228
229struct _vm_instruction_map {
230 /* real memory for last part */
231 core_map *code;
232 /* translate effective to real */
233 om_map translation;
234};
235
236struct _vm_data_map {
237 /* translate effective to real */
238 om_map translation;
239 /* real memory for translated address */
240 core_map *read;
241 core_map *write;
242};
243
244
245/* VM:
246
247 Underlying memory object. For the VEA this is just the
248 core_map. For OEA it is the instruction and data memory
249 translation's */
250
251struct _vm {
252
253 /* OEA: base address registers */
254 om_bats ibats;
255 om_bats dbats;
256
257 /* OEA: segment registers */
258 om_segment_tlb segment_tlb;
259
260 /* OEA: translation lookaside buffers */
261 om_page_tlb instruction_tlb;
262 om_page_tlb data_tlb;
263
264 /* real memory */
265 core *physical;
266
267 /* memory maps */
268 vm_instruction_map instruction_map;
269 vm_data_map data_map;
270
271};
272
273
274/* OEA Support procedures */
275
276
277STATIC_INLINE_VM unsigned_word
278om_segment_tlb_index(unsigned_word ea)
279{
280 unsigned_word index = EXTRACTED(ea,
281 om_segment_tlb_index_start_bit,
282 om_segment_tlb_index_stop_bit);
283 return index;
284}
285
286STATIC_INLINE_VM unsigned_word
287om_page_tlb_index(unsigned_word ea)
288{
289 unsigned_word index = EXTRACTED(ea,
290 om_page_tlb_index_start_bit,
291 om_page_tlb_index_stop_bit);
292 return index;
293}
294
295STATIC_INLINE_VM unsigned_word
296om_masked_page(unsigned_word ea)
297{
298 unsigned_word masked_page = MASKED(ea, 36, 51);
299 return masked_page;
300}
301
302STATIC_INLINE_VM unsigned_word
303om_masked_byte(unsigned_word ea)
304{
305 unsigned_word masked_byte = MASKED(ea, 52, 63);
306 return masked_byte;
307}
308
309
310
311INLINE_VM vm *
312vm_create(core *physical)
313{
314 vm *virtual;
315
316 /* internal checks */
317 if (nr_om_segment_tlb_entries
318 != (1 << (om_segment_tlb_index_stop_bit
319 - om_segment_tlb_index_start_bit + 1)))
320 error("new_vm() - internal error with om_segment constants\n");
321 if (nr_om_page_tlb_entries
322 != (1 << (om_page_tlb_index_stop_bit
323 - om_page_tlb_index_start_bit + 1)))
324 error("new_vm() - internal error with om_page constants\n");
325
326 /* create the new vm register file */
327 virtual = ZALLOC(vm);
328
329 /* set up core */
330 virtual->physical = physical;
331
332 /* set up the address decoders */
333 virtual->instruction_map.translation.bat_registers = &virtual->ibats;
334 virtual->instruction_map.translation.segment_tlb = &virtual->segment_tlb;
335 virtual->instruction_map.translation.page_tlb = &virtual->instruction_tlb;
336 virtual->instruction_map.translation.is_relocate = 0;
337 virtual->instruction_map.translation.is_problem_state = 0;
338 virtual->instruction_map.translation.physical = core_readable(physical);
339 virtual->instruction_map.code = core_readable(physical);
340
341 virtual->data_map.translation.bat_registers = &virtual->dbats;
342 virtual->data_map.translation.segment_tlb = &virtual->segment_tlb;
343 virtual->data_map.translation.page_tlb = &virtual->data_tlb;
344 virtual->data_map.translation.is_relocate = 0;
345 virtual->data_map.translation.is_problem_state = 0;
346 virtual->data_map.translation.physical = core_readable(physical);
347 virtual->data_map.read = core_readable(physical);
348 virtual->data_map.write = core_writeable(physical);
349
350 return virtual;
351}
352
353
354STATIC_INLINE_VM om_bat *
355om_effective_to_bat(om_map *map,
356 unsigned_word ea)
357{
358 int curr_bat = 0;
359 om_bats *bats = map->bat_registers;
360 int nr_bats = bats->nr_valid_bat_registers;
361
362 for (curr_bat = 0; curr_bat < nr_bats; curr_bat++) {
363 om_bat *bat = bats->bat + curr_bat;
364 if ((ea & bat->block_effective_page_index_mask)
365 != bat->block_effective_page_index)
366 continue;
367 return bat;
368 }
369
370 return NULL;
371}
372
373
374STATIC_INLINE_VM om_segment_tlb_entry *
375om_effective_to_virtual(om_map *map,
376 unsigned_word ea,
377 cpu *processor,
378 unsigned_word cia)
379{
380 /* first try the segment tlb */
381 om_segment_tlb_entry *segment_tlb_entry = (map->segment_tlb->entry
382 + om_segment_tlb_index(ea));
383
384#if (WITH_TARGET_WORD_BITSIZE == 32)
385 return segment_tlb_entry;
386#endif
387
388#if (WITH_TARGET_WORD_BITSIZE == 64)
389 if (segment_tlb_entry->is_valid
390 && (segment_tlb_entry->masked_effective_segment_id == MASKED(ea, 0, 35))) {
391 error("fixme - is there a need to update any bits\n");
392 return segment_tlb_entry;
393 }
394
395 /* drats, segment tlb missed */
396 {
397 unsigned_word segment_id_hash = ea;
398 int current_hash = 0;
399 for (current_hash = 0; current_hash < 2; current_hash += 1) {
400 unsigned_word segment_table_entry_group =
401 (map->real_address_of_segment_table
402 | (MASKED64(segment_id_hash, 31, 35) >> (56-35)));
403 unsigned_word segment_table_entry;
404 for (segment_table_entry = segment_table_entry_group;
405 segment_table_entry < (segment_table_entry_group
406 + sizeof_segment_table_entry_group);
407 segment_table_entry += sizeof_segment_table_entry) {
408 /* byte order? */
409 unsigned_word segment_table_entry_dword_0 =
410 core_map_read_8(map->physical, segment_table_entry, processor, cia);
411 unsigned_word segment_table_entry_dword_1 =
412 core_map_read_8(map->physical, segment_table_entry + 8, processor, cia);
413 int is_valid = MASKED64(segment_table_entry_dword_0, 56, 56) != 0;
414 unsigned_word masked_effective_segment_id =
415 MASKED64(segment_table_entry_dword_0, 0, 35);
416 if (is_valid && masked_effective_segment_id == MASKED64(ea, 0, 35)) {
417 /* don't permit some things */
418 if (MASKED64(segment_table_entry_dword_0, 57, 57))
419 error("om_effective_to_virtual() - T=1 in STE not supported\n");
420 /* update segment tlb */
421 segment_tlb_entry->is_valid = is_valid;
422 segment_tlb_entry->masked_effective_segment_id =
423 masked_effective_segment_id;
424 segment_tlb_entry->key[om_supervisor_state] =
425 EXTRACTED64(segment_table_entry_dword_0, 58, 58);
426 segment_tlb_entry->key[om_problem_state] =
427 EXTRACTED64(segment_table_entry_dword_0, 59, 59);
428 segment_tlb_entry->invalid_access =
429 (MASKED64(segment_table_entry_dword_0, 60, 60)
430 ? om_instruction_read
431 : om_access_any);
432 segment_tlb_entry->masked_virtual_segment_id =
433 MASKED(segment_table_entry_dword_1, 0, 51);
434 return segment_tlb_entry;
435 }
436 }
437 segment_id_hash = ~segment_id_hash;
438 }
439 }
440 return NULL;
441#endif
442}
443
444
445
446STATIC_INLINE_VM om_page_tlb_entry *
447om_virtual_to_real(om_map *map,
448 unsigned_word ea,
449 om_segment_tlb_entry *segment_tlb_entry,
450 om_access_types access,
451 cpu *processor,
452 unsigned_word cia)
453{
454 om_page_tlb_entry *page_tlb_entry = (map->page_tlb->entry
455 + om_page_tlb_index(ea));
456
457 /* is it a tlb hit? */
458 if (page_tlb_entry->valid
459 && (page_tlb_entry->masked_virtual_segment_id ==
460 segment_tlb_entry->masked_virtual_segment_id)
461 && (page_tlb_entry->masked_page == om_masked_page(ea))) {
462 error("fixme - it is not a hit if direction/update bits do not match\n");
463 return page_tlb_entry;
464 }
465
466 /* drats, it is a tlb miss */
467 {
468 unsigned_word page_hash = (segment_tlb_entry->masked_virtual_segment_id
469 ^ om_masked_page(ea));
470 int current_hash;
471 for (current_hash = 0; current_hash < 2; current_hash += 1) {
472 unsigned_word real_address_of_pte_group =
473 (map->real_address_of_page_table
474 | (page_hash & map->page_table_hash_mask));
475 unsigned_word real_address_of_pte;
476 for (real_address_of_pte = real_address_of_pte_group;
477 real_address_of_pte < (real_address_of_pte_group
478 + sizeof_pte_group);
479 real_address_of_pte += sizeof_pte) {
480 unsigned_word pte_word_0 =
481 core_map_read_word(map->physical,
482 real_address_of_pte,
483 processor, cia);
484 unsigned_word pte_word_1 =
485 core_map_read_word(map->physical,
486 real_address_of_pte + sizeof_pte / 2,
487 processor, cia);
73c4941b
MM
488 error("fixme - check pte hit %ld %ld\n",
489 (long)pte_word_0,
490 (long)pte_word_1);
a983c8f0
MM
491 if (1) {
492 error("fixme - update the page_tlb\n");
493 page_tlb_entry->valid = 1;
494 page_tlb_entry->protection = 0;
495 page_tlb_entry->masked_virtual_segment_id = 0;
496 page_tlb_entry->masked_page = 0;
497 page_tlb_entry->masked_real_page_number = 0;
498 return page_tlb_entry;
499 }
500 }
501 page_hash = ~page_hash; /*???*/
502 }
503 }
504 return NULL;
505}
506
507
508static void
509om_interrupt(cpu *processor,
510 unsigned_word cia,
511 unsigned_word ea,
512 om_access_types access,
513 storage_interrupt_reasons reason)
514{
515 switch (access) {
516 case om_data_read:
517 data_storage_interrupt(processor, cia, ea, reason, 0/*!is_store*/);
518 break;
519 case om_data_write:
520 data_storage_interrupt(processor, cia, ea, reason, 1/*is_store*/);
521 break;
522 case om_instruction_read:
523 instruction_storage_interrupt(processor, cia, reason);
524 break;
525 default:
526 error("om_interrupt - unexpected access type %d, cia=0x%x, ea=0x%x\n",
527 access, cia, ea);
528 }
529}
530
531
532STATIC_INLINE_VM unsigned_word
533om_translate_effective_to_real(om_map *map,
534 unsigned_word ea,
535 om_access_types access,
536 cpu *processor,
537 unsigned_word cia,
538 int abort)
539{
540 om_bat *bat = NULL;
541 om_segment_tlb_entry *segment_tlb_entry = NULL;
542 om_page_tlb_entry *page_tlb_entry = NULL;
543 unsigned_word ra;
544
545 if (!map->is_relocate) {
546 ra = ea;
547 TRACE(trace_vm, ("%s, direct map, ea=0x%x\n",
548 "om_translate_effective_to_real",
549 ea));
550 return ra;
551 }
552
553 /* match with BAT? */
554 bat = om_effective_to_bat(map, ea);
555 if (bat != NULL) {
556 if (!om_valid_access[1][bat->protection_bits][access]) {
557 TRACE(trace_vm, ("%s, bat protection violation, ea=0x%x\n",
558 "om_translate_effective_to_real",
559 ea));
560 if (abort)
561 om_interrupt(processor, cia, ea, access,
562 protection_violation_storage_interrupt);
563 else
564 return MASK(0, 63);
565 }
566
567 ra = ((ea & bat->block_length_mask) | bat->block_real_page_number);
568 TRACE(trace_vm, ("%s, bat translation, ea=0x%x, ra=0x%x\n",
569 "om_translate_effective_to_real",
570 ea, ra));
571 return ra;
572 }
573
574 /* translate ea to va using segment map */
575 segment_tlb_entry = om_effective_to_virtual(map, ea, processor, cia);
576#if (WITH_TARGET_WORD_BITSIZE == 64)
577 if (segment_tlb_entry == NULL) {
578 TRACE(trace_vm, ("%s, segment tlb lookup failed - ea=0x%x\n",
579 "om_translate_effective_to_real",
580 ea));
581 if (abort)
582 om_interrupt(processor, cia, ea, access,
583 segment_table_miss_storage_interrupt);
584 else
585 return MASK(0, 63);
586 }
587#endif
588 /* check for invalid segment access type */
589 if (segment_tlb_entry->invalid_access == access) {
590 TRACE(trace_vm, ("%s, segment tlb access invalid - ea=0x%x\n",
591 "om_translate_effective_to_real",
592 ea));
593 if (abort)
594 om_interrupt(processor, cia, ea, access,
595 protection_violation_storage_interrupt);
596 else
597 return MASK(0, 63);
598 }
599
600 /* lookup in PTE */
601 page_tlb_entry = om_virtual_to_real(map, ea, segment_tlb_entry,
602 access,
603 processor, cia);
604 if (page_tlb_entry == NULL) {
605 TRACE(trace_vm, ("%s, page tlb lookup failed - ea=0x%x\n",
606 "om_translate_effective_to_real",
607 ea));
608 if (abort)
609 om_interrupt(processor, cia, ea, access,
610 hash_table_miss_storage_interrupt);
611 else
612 return MASK(0, 63);
613 }
614 if (!(om_valid_access
615 [segment_tlb_entry->key[map->is_problem_state]]
616 [page_tlb_entry->protection]
617 [access])) {
618 TRACE(trace_vm, ("%s, page tlb access invalid - ea=0x%x\n",
619 "om_translate_effective_to_real",
620 ea));
621 if (abort)
622 om_interrupt(processor, cia, ea, access,
623 protection_violation_storage_interrupt);
624 else
625 return MASK(0, 63);
626 }
627
628 ra = (page_tlb_entry->masked_real_page_number
629 | om_masked_byte(ea));
630 TRACE(trace_vm, ("%s, page - ea=0x%x, ra=0x%x\n",
631 "om_translate_effective_to_real",
632 ea, ra));
633 return ra;
634}
635
636
637/*
638 * Definition of operations for memory management
639 */
640
641
642/* rebuild all the relevant bat information */
643STATIC_INLINE_VM void
644om_unpack_bat(om_bat *bat,
645 spreg ubat,
646 spreg lbat)
647{
648 /* for extracting out the offset within a page */
649 bat->block_length_mask = ((MASKED(ubat, 51, 61) << (17-2))
650 | MASK(63-17+1, 63));
651
652 /* for checking the effective page index */
653 bat->block_effective_page_index = MASKED(ubat, 0, 46);
654 bat->block_effective_page_index_mask = ~bat->block_length_mask;
655
656 /* protection information */
657 bat->protection_bits = EXTRACTED(lbat, 62, 63);
658 bat->block_real_page_number = MASKED(lbat, 0, 46);
659}
660
661
662/* rebuild the given bat table */
663STATIC_INLINE_VM void
664om_unpack_bats(om_bats *bats,
665 spreg *raw_bats,
666 msreg msr)
667{
668 int i;
669 bats->nr_valid_bat_registers = 0;
670 for (i = 0; i < nr_om_bat_registers*2; i += 2) {
671 spreg ubat = raw_bats[i];
672 spreg lbat = raw_bats[i+1];
673 if ((msr & msr_problem_state)
674 ? EXTRACTED(ubat, 62, 62)
675 : EXTRACTED(ubat, 63, 63)) {
676 om_unpack_bat(&bats->bat[bats->nr_valid_bat_registers],
677 ubat, lbat);
678 bats->nr_valid_bat_registers += 1;
679 }
680 }
681}
682
683
684#if (WITH_TARGET_WORD_BITSIZE == 32)
685STATIC_INLINE_VM void
686om_unpack_sr(vm *virtual,
687 sreg *srs,
688 int which_sr)
689{
690 om_segment_tlb_entry *segment_tlb_entry = 0;
691 sreg new_sr_value = 0;
692
693 /* check register in range */
694 if (which_sr < 0 || which_sr > nr_om_segment_tlb_entries)
695 error("om_set_sr: segment register out of bounds\n");
696
697 /* get the working values */
698 segment_tlb_entry = &virtual->segment_tlb.entry[which_sr];
699 new_sr_value = srs[which_sr];
700
701 /* do we support this */
702 if (MASKED32(new_sr_value, 0, 0))
703 error("om_ser_sr(): unsupported value of T in segment register %d\n",
704 which_sr);
705
706 /* update info */
707 segment_tlb_entry->key[om_supervisor_state] = EXTRACTED32(new_sr_value, 1, 1);
708 segment_tlb_entry->key[om_problem_state] = EXTRACTED32(new_sr_value, 2, 2);
709 segment_tlb_entry->invalid_access = (MASKED32(new_sr_value, 3, 3)
710 ? om_instruction_read
711 : om_access_any);
712 segment_tlb_entry->masked_virtual_segment_id = MASKED32(new_sr_value, 8, 31);
713}
714#endif
715
716
717#if (WITH_TARGET_WORD_BITSIZE == 32)
718STATIC_INLINE_VM void
719om_unpack_srs(vm *virtual,
720 sreg *srs)
721{
722 int which_sr;
723 for (which_sr = 0; which_sr < nr_om_segment_tlb_entries; which_sr++) {
724 om_unpack_sr(virtual, srs, which_sr);
725 }
726}
727#endif
728
729
730/* Rebuild all the data structures for the new context as specifed by
731 the passed registers */
732INLINE_VM void
733vm_synchronize_context(vm *virtual,
734 spreg *sprs,
735 sreg *srs,
736 msreg msr)
737{
738
739 /* enable/disable translation */
740 int problem_state = (msr & msr_problem_state) != 0;
741 int data_relocate = (msr & msr_data_relocate) != 0;
742 int instruction_relocate = (msr & msr_instruction_relocate) != 0;
743
744 unsigned_word page_table_hash_mask;
745 unsigned_word real_address_of_page_table;
746
747
748 /* update current processor mode */
749 virtual->instruction_map.translation.is_relocate = instruction_relocate;
750 virtual->instruction_map.translation.is_problem_state = problem_state;
751 virtual->data_map.translation.is_relocate = data_relocate;
752 virtual->data_map.translation.is_problem_state = problem_state;
753
754
755 /* update bat registers for the new context */
756 om_unpack_bats(&virtual->ibats, &sprs[spr_ibat0u], msr);
757 om_unpack_bats(&virtual->dbats, &sprs[spr_dbat0u], msr);
758
759
760 /* unpack SDR1 - the storage description register 1 */
761#if (WITH_TARGET_WORD_BITSIZE == 64)
762 real_address_of_page_table = EXTRACTED64(sprs[spr_sdr1], 0, 45);
763 page_table_hash_mask = MASK64(47-EXTRACTED64(sprs[spr_sdr1], 59, 63),
764 57);
765#endif
766#if (WITH_TARGET_WORD_BITSIZE == 32)
767 real_address_of_page_table = EXTRACTED32(sprs[spr_sdr1], 0, 15);
768 page_table_hash_mask = ((EXTRACTED32(sprs[spr_sdr1], 23, 31) << (10+6))
769 | MASK32(16, 25));
770#endif
771 virtual->instruction_map.translation.real_address_of_page_table = real_address_of_page_table;
772 virtual->instruction_map.translation.page_table_hash_mask = page_table_hash_mask;
773 virtual->data_map.translation.real_address_of_page_table = real_address_of_page_table;
774 virtual->data_map.translation.page_table_hash_mask = page_table_hash_mask;
775
776
777#if (WITH_TARGET_WORD_BITSIZE == 32)
778 /* unpack the segment tlb registers */
779 om_unpack_srs(virtual, srs);
780#endif
781}
782
783
784INLINE_VM vm_data_map *
785vm_create_data_map(vm *memory)
786{
787 return &memory->data_map;
788}
789
790
791INLINE_VM vm_instruction_map *
792vm_create_instruction_map(vm *memory)
793{
794 return &memory->instruction_map;
795}
796
797
798STATIC_INLINE_VM unsigned_word
799vm_translate(om_map *map,
800 unsigned_word ea,
801 om_access_types access,
802 cpu *processor,
803 unsigned_word cia,
804 int abort)
805{
806 switch (CURRENT_ENVIRONMENT) {
807 case USER_ENVIRONMENT:
808 case VIRTUAL_ENVIRONMENT:
809 return ea;
810 case OPERATING_ENVIRONMENT:
811 return om_translate_effective_to_real(map, ea, access,
812 processor, cia,
813 abort);
814 default:
815 error("vm_translate() - unknown environment\n");
816 return 0;
817 }
818}
819
820
821INLINE_VM unsigned_word
822vm_real_data_addr(vm_data_map *map,
823 unsigned_word ea,
824 int is_read,
825 cpu *processor,
826 unsigned_word cia)
827{
828 return vm_translate(&map->translation,
829 ea,
830 is_read ? om_data_read : om_data_write,
831 processor,
832 cia,
833 1); /*abort*/
834}
835
836
837INLINE_VM unsigned_word
838vm_real_instruction_addr(vm_instruction_map *map,
839 cpu *processor,
840 unsigned_word cia)
841{
842 return vm_translate(&map->translation,
843 cia,
844 om_instruction_read,
845 processor,
846 cia,
847 1); /*abort*/
848}
849
850INLINE_VM instruction_word
851vm_instruction_map_read(vm_instruction_map *map,
852 cpu *processor,
853 unsigned_word cia)
854{
855 unsigned_word ra = vm_real_instruction_addr(map, processor, cia);
856 ASSERT((cia & 0x3) == 0); /* always aligned */
857 return core_map_read_4(map->code, ra, processor, cia);
858}
859
860
861INLINE_VM int
862vm_data_map_read_buffer(vm_data_map *map,
863 void *target,
864 unsigned_word addr,
865 unsigned nr_bytes)
866{
867 unsigned count;
868 for (count = 0; count < nr_bytes; count++) {
869 unsigned_1 byte;
870 unsigned_word ea = addr + count;
871 unsigned_word ra = vm_translate(&map->translation,
872 ea, om_data_read,
873 NULL, /*processor*/
874 0, /*cia*/
875 0); /*dont-abort*/
876 if (ra == MASK(0, 63))
877 break;
878 if (core_map_read_buffer(map->read, &byte, ea, sizeof(byte))
879 != sizeof(byte))
880 break;
881 ((unsigned_1*)target)[count] = T2H_1(byte);
882 }
883 return count;
884}
885
886
887INLINE_VM int
888vm_data_map_write_buffer(vm_data_map *map,
889 const void *source,
890 unsigned_word addr,
891 unsigned nr_bytes,
892 int violate_read_only_section)
893{
894 unsigned count;
895 unsigned_1 byte;
896 for (count = 0; count < nr_bytes; count++) {
897 unsigned_word ea = addr + count;
898 unsigned_word ra = vm_translate(&map->translation,
899 ea, om_data_write,
900 NULL/*processor*/,
901 0, /*cia*/
902 0); /*dont-abort*/
903 if (ra == MASK(0, 63))
904 break;
905 byte = T2H_1(((unsigned_1*)source)[count]);
906 if (core_map_write_buffer((violate_read_only_section
907 ? map->read
908 : map->write),
909 &byte, ra, sizeof(byte)) != sizeof(byte))
910 break;
911 }
912 return count;
913}
914
915
916/* define the read/write 1/2/4/8/word functions */
917
918#undef N
919#define N 1
920#include "vm_n.h"
921
922#undef N
923#define N 2
924#include "vm_n.h"
925
926#undef N
927#define N 4
928#include "vm_n.h"
929
930#undef N
931#define N 8
932#include "vm_n.h"
933
934#undef N
935#define N word
936#include "vm_n.h"
937
938
939
940#endif /* _VM_C_ */
This page took 0.057349 seconds and 4 git commands to generate.