This commit was generated by cvs2svn to track changes on a CVS vendor
[deliverable/binutils-gdb.git] / sim / ppc / vm.c
1 /* This file is part of the program psim.
2
3 Copyright (C) 1994-1995, Andrew Cagney <cagney@highland.com.au>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18
19 */
20
21
22 #ifndef _VM_C_
23 #define _VM_C_
24
25 #include "basics.h"
26 #include "registers.h"
27 #include "device.h"
28 #include "corefile.h"
29 #include "vm.h"
30 #include "interrupts.h"
31 #include "mon.h"
32
33 /* OEA vs VEA
34
35 For the VEA model, the VM layer is almost transparent. It's only
36 purpose is to maintain separate core_map's for the instruction
37 and data address spaces. This being so that writes to instruction
38 space or execution of a data space is prevented.
39
40 For the OEA model things are more complex. The reason for separate
41 instruction and data models becomes crucial. The OEA model is
42 built out of three parts. An instruction map, a data map and an
43 underlying structure that provides access to the VM data kept in
44 main memory. */
45
46
47 /* OEA data structures:
48
49 The OEA model maintains internal data structures that shadow the
50 semantics of the various OEA VM registers (BAT, SR, etc). This
51 allows a simple efficient model of the VM to be implemented.
52
53 Consistency between OEA registers and this model's internal data
54 structures is maintained by updating the structures at
55 `synchronization' points. Of particular note is that (at the time
56 of writing) the memory data types for BAT registers are rebuilt
57 when ever the processor moves between problem and system states.
58
59 Unpacked values are stored in the OEA so that they correctly align
60 to where they will be needed by the PTE address. */
61
62
63 /* Protection table:
64
65 Matrix of processor state, type of access and validity */
66
67 typedef enum {
68 om_supervisor_state,
69 om_problem_state,
70 nr_om_modes
71 } om_processor_modes;
72
73 typedef enum {
74 om_data_read, om_data_write,
75 om_instruction_read, om_access_any,
76 nr_om_access_types
77 } om_access_types;
78
79 static int om_valid_access[2][4][nr_om_access_types] = {
80 /* read, write, instruction, any */
81 /* K bit == 0 */
82 { /*r w i a pp */
83 { 1, 1, 1, 1 }, /* 00 */
84 { 1, 1, 1, 1 }, /* 01 */
85 { 1, 1, 1, 1 }, /* 10 */
86 { 1, 0, 1, 1 }, /* 11 */
87 },
88 /* K bit == 1 or P bit valid */
89 { /*r w i a pp */
90 { 0, 0, 0, 0 }, /* 00 */
91 { 1, 0, 1, 1 }, /* 01 */
92 { 1, 1, 1, 1 }, /* 10 */
93 { 1, 0, 1, 1 }, /* 11 */
94 }
95 };
96
97
98 /* Bat translation:
99
100 The bat data structure only contains information on valid BAT
101 translations for the current processor mode and type of access. */
102
103 typedef struct _om_bat {
104 unsigned_word block_effective_page_index;
105 unsigned_word block_effective_page_index_mask;
106 unsigned_word block_length_mask;
107 unsigned_word block_real_page_number;
108 int protection_bits;
109 } om_bat;
110
111 enum _nr_om_bat_registers {
112 nr_om_bat_registers = 4
113 };
114
115 typedef struct _om_bats {
116 int nr_valid_bat_registers;
117 om_bat bat[nr_om_bat_registers];
118 } om_bats;
119
120
121 /* Segment TLB:
122
123 In this model the 32 and 64 bit segment tables are treated in very
124 similar ways. The 32bit segment registers are treated as a
125 simplification of the 64bit segment tlb */
126
127 enum _om_segment_tlb_constants {
128 #if (WITH_TARGET_WORD_BITSIZE == 64)
129 sizeof_segment_table_entry_group = 128,
130 sizeof_segment_table_entry = 16,
131 #endif
132 om_segment_tlb_index_start_bit = 32,
133 om_segment_tlb_index_stop_bit = 35,
134 nr_om_segment_tlb_entries = 16,
135 nr_om_segment_tlb_constants
136 };
137
138 typedef struct _om_segment_tlb_entry {
139 int key[nr_om_modes];
140 om_access_types invalid_access; /* set to instruction if no_execute bit */
141 unsigned_word masked_virtual_segment_id; /* aligned ready for pte addr */
142 #if (WITH_TARGET_WORD_BITSIZE == 64)
143 int is_valid;
144 unsigned_word masked_effective_segment_id;
145 #endif
146 } om_segment_tlb_entry;
147
148 typedef struct _om_segment_tlb {
149 om_segment_tlb_entry entry[nr_om_segment_tlb_entries];
150 } om_segment_tlb;
151
152
153 /* Page TLB:
154
155 This OEA model includes a small direct map Page TLB. The tlb is to
156 cut down on the need for the OEA to perform walks of the page hash
157 table. */
158
159 enum _om_page_tlb_constants {
160 om_page_tlb_index_start_bit = 46,
161 om_page_tlb_index_stop_bit = 51,
162 nr_om_page_tlb_entries = 64,
163 #if (WITH_TARGET_WORD_BITSIZE == 64)
164 sizeof_pte_group = 128,
165 sizeof_pte = 16,
166 #endif
167 #if (WITH_TARGET_WORD_BITSIZE == 32)
168 sizeof_pte_group = 64,
169 sizeof_pte = 8,
170 #endif
171 nr_om_page_tlb_constants
172 };
173
174 enum {
175 invalid_tlb_vsid = MASK(0, 63),
176 };
177
178 typedef struct _om_page_tlb_entry {
179 int protection;
180 int changed;
181 unsigned_word real_address_of_pte_1;
182 unsigned_word masked_virtual_segment_id;
183 unsigned_word masked_page;
184 unsigned_word masked_real_page_number;
185 } om_page_tlb_entry;
186
187 typedef struct _om_page_tlb {
188 om_page_tlb_entry entry[nr_om_page_tlb_entries];
189 } om_page_tlb;
190
191
192 /* memory translation:
193
194 OEA memory translation possibly involves BAT, SR, TLB and HTAB
195 information*/
196
197 typedef struct _om_map {
198
199 /* local cache of register values */
200 int is_relocate;
201 int is_problem_state;
202
203 /* block address translation */
204 om_bats *bat_registers;
205
206 /* failing that, translate ea to va using segment tlb */
207 #if (WITH_TARGET_WORD_BITSIZE == 64)
208 unsigned_word real_address_of_segment_table;
209 #endif
210 om_segment_tlb *segment_tlb;
211
212 /* then va to ra using hashed page table and tlb */
213 unsigned_word real_address_of_page_table;
214 unsigned_word page_table_hash_mask;
215 om_page_tlb *page_tlb;
216
217 /* physical memory for fetching page table entries */
218 core_map *physical;
219
220 /* address xor for PPC endian */
221 unsigned xor[WITH_XOR_ENDIAN];
222
223 } om_map;
224
225
226 /* VM objects:
227
228 External objects defined by vm.h */
229
230 struct _vm_instruction_map {
231 /* real memory for last part */
232 core_map *code;
233 /* translate effective to real */
234 om_map translation;
235 };
236
237 struct _vm_data_map {
238 /* translate effective to real */
239 om_map translation;
240 /* real memory for translated address */
241 core_map *read;
242 core_map *write;
243 };
244
245
246 /* VM:
247
248 Underlying memory object. For the VEA this is just the
249 core_map. For OEA it is the instruction and data memory
250 translation's */
251
252 struct _vm {
253
254 /* OEA: base address registers */
255 om_bats ibats;
256 om_bats dbats;
257
258 /* OEA: segment registers */
259 om_segment_tlb segment_tlb;
260
261 /* OEA: translation lookaside buffers */
262 om_page_tlb instruction_tlb;
263 om_page_tlb data_tlb;
264
265 /* real memory */
266 core *physical;
267
268 /* memory maps */
269 vm_instruction_map instruction_map;
270 vm_data_map data_map;
271
272 };
273
274
275 /* OEA Support procedures */
276
277
278 unsigned_word STATIC_INLINE_VM
279 om_segment_tlb_index(unsigned_word ea)
280 {
281 unsigned_word index = EXTRACTED(ea,
282 om_segment_tlb_index_start_bit,
283 om_segment_tlb_index_stop_bit);
284 return index;
285 }
286
287 unsigned_word STATIC_INLINE_VM
288 om_page_tlb_index(unsigned_word ea)
289 {
290 unsigned_word index = EXTRACTED(ea,
291 om_page_tlb_index_start_bit,
292 om_page_tlb_index_stop_bit);
293 return index;
294 }
295
296 unsigned_word STATIC_INLINE_VM
297 om_hash_page(unsigned_word masked_vsid,
298 unsigned_word ea)
299 {
300 unsigned_word extracted_ea = EXTRACTED(ea, 36, 51);
301 #if (WITH_TARGET_WORD_BITSIZE == 32)
302 return masked_vsid ^ INSERTED32(extracted_ea, 7, 31-6);
303 #endif
304 #if (WITH_TARGET_WORD_BITSIZE == 64)
305 return masked_vsid ^ INSERTED64(extracted_ea, 18, 63-7);
306 #endif
307 }
308
309 unsigned_word STATIC_INLINE_VM
310 om_pte_0_api(unsigned_word pte_0)
311 {
312 #if (WITH_TARGET_WORD_BITSIZE == 32)
313 return EXTRACTED32(pte_0, 26, 31);
314 #endif
315 #if (WITH_TARGET_WORD_BITSIZE == 64)
316 return EXTRACTED64(pte_0, 52, 56);
317 #endif
318 }
319
320 unsigned_word STATIC_INLINE_VM
321 om_pte_0_hash(unsigned_word pte_0)
322 {
323 #if (WITH_TARGET_WORD_BITSIZE == 32)
324 return EXTRACTED32(pte_0, 25, 25);
325 #endif
326 #if (WITH_TARGET_WORD_BITSIZE == 64)
327 return EXTRACTED64(pte_0, 62, 62);
328 #endif
329 }
330
331 int STATIC_INLINE_VM
332 om_pte_0_valid(unsigned_word pte_0)
333 {
334 #if (WITH_TARGET_WORD_BITSIZE == 32)
335 return MASKED32(pte_0, 0, 0) != 0;
336 #endif
337 #if (WITH_TARGET_WORD_BITSIZE == 64)
338 return MASKED64(pte_0, 63, 63) != 0;
339 #endif
340 }
341
342 unsigned_word STATIC_INLINE_VM
343 om_ea_masked_page(unsigned_word ea)
344 {
345 return MASKED(ea, 36, 51);
346 }
347
348 unsigned_word STATIC_INLINE_VM
349 om_ea_masked_byte(unsigned_word ea)
350 {
351 return MASKED(ea, 52, 63);
352 }
353
354 unsigned_word STATIC_INLINE_VM
355 om_pte_0_masked_vsid(unsigned_word pte_0)
356 {
357 return INSERTED32(EXTRACTED32(pte_0, 1, 24), 7-5, 31-6);
358 }
359
360 unsigned_word STATIC_INLINE_VM
361 om_pte_1_pp(unsigned_word pte_1)
362 {
363 return MASKED(pte_1, 62, 63); /*PP*/
364 }
365
366 int STATIC_INLINE_VM
367 om_pte_1_referenced(unsigned_word pte_1)
368 {
369 return EXTRACTED(pte_1, 55, 55);
370 }
371
372 int STATIC_INLINE_VM
373 om_pte_1_changed(unsigned_word pte_1)
374 {
375 return EXTRACTED(pte_1, 56, 56);
376 }
377
378 int STATIC_INLINE_VM
379 om_pte_1_masked_rpn(unsigned_word pte_1)
380 {
381 return MASKED(pte_1, 0, 51); /*RPN*/
382 }
383
384 unsigned_word STATIC_INLINE_VM
385 om_ea_api(unsigned_word ea)
386 {
387 return EXTRACTED(ea, 36, 41);
388 }
389
390
391 /* Page and Segment table read/write operators, these need to still
392 account for the PPC's XOR operation */
393
394 unsigned_word STATIC_INLINE_VM
395 om_read_word(om_map *map,
396 unsigned_word ra,
397 cpu *processor,
398 unsigned_word cia)
399 {
400 if (WITH_XOR_ENDIAN)
401 ra ^= map->xor[sizeof(instruction_word) - 1];
402 return core_map_read_word(map->physical, ra, processor, cia);
403 }
404
405 void STATIC_INLINE_VM
406 om_write_word(om_map *map,
407 unsigned_word ra,
408 unsigned_word val,
409 cpu *processor,
410 unsigned_word cia)
411 {
412 if (WITH_XOR_ENDIAN)
413 ra ^= map->xor[sizeof(instruction_word) - 1];
414 core_map_write_word(map->physical, ra, val, processor, cia);
415 }
416
417
418 /* Bring things into existance */
419
420 vm INLINE_VM *
421 vm_create(core *physical)
422 {
423 vm *virtual;
424
425 /* internal checks */
426 if (nr_om_segment_tlb_entries
427 != (1 << (om_segment_tlb_index_stop_bit
428 - om_segment_tlb_index_start_bit + 1)))
429 error("new_vm() - internal error with om_segment constants\n");
430 if (nr_om_page_tlb_entries
431 != (1 << (om_page_tlb_index_stop_bit
432 - om_page_tlb_index_start_bit + 1)))
433 error("new_vm() - internal error with om_page constants\n");
434
435 /* create the new vm register file */
436 virtual = ZALLOC(vm);
437
438 /* set up core */
439 virtual->physical = physical;
440
441 /* set up the address decoders */
442 virtual->instruction_map.translation.bat_registers = &virtual->ibats;
443 virtual->instruction_map.translation.segment_tlb = &virtual->segment_tlb;
444 virtual->instruction_map.translation.page_tlb = &virtual->instruction_tlb;
445 virtual->instruction_map.translation.is_relocate = 0;
446 virtual->instruction_map.translation.is_problem_state = 0;
447 virtual->instruction_map.translation.physical = core_readable(physical);
448 virtual->instruction_map.code = core_readable(physical);
449
450 virtual->data_map.translation.bat_registers = &virtual->dbats;
451 virtual->data_map.translation.segment_tlb = &virtual->segment_tlb;
452 virtual->data_map.translation.page_tlb = &virtual->data_tlb;
453 virtual->data_map.translation.is_relocate = 0;
454 virtual->data_map.translation.is_problem_state = 0;
455 virtual->data_map.translation.physical = core_readable(physical);
456 virtual->data_map.read = core_readable(physical);
457 virtual->data_map.write = core_writeable(physical);
458
459 return virtual;
460 }
461
462
463 om_bat STATIC_INLINE_VM *
464 om_effective_to_bat(om_map *map,
465 unsigned_word ea)
466 {
467 int curr_bat = 0;
468 om_bats *bats = map->bat_registers;
469 int nr_bats = bats->nr_valid_bat_registers;
470
471 for (curr_bat = 0; curr_bat < nr_bats; curr_bat++) {
472 om_bat *bat = bats->bat + curr_bat;
473 if ((ea & bat->block_effective_page_index_mask)
474 != bat->block_effective_page_index)
475 continue;
476 return bat;
477 }
478
479 return NULL;
480 }
481
482
483 om_segment_tlb_entry STATIC_INLINE_VM *
484 om_effective_to_virtual(om_map *map,
485 unsigned_word ea,
486 cpu *processor,
487 unsigned_word cia)
488 {
489 /* first try the segment tlb */
490 om_segment_tlb_entry *segment_tlb_entry = (map->segment_tlb->entry
491 + om_segment_tlb_index(ea));
492
493 #if (WITH_TARGET_WORD_BITSIZE == 32)
494 return segment_tlb_entry;
495 #endif
496
497 #if (WITH_TARGET_WORD_BITSIZE == 64)
498 if (segment_tlb_entry->is_valid
499 && (segment_tlb_entry->masked_effective_segment_id == MASKED(ea, 0, 35))) {
500 error("fixme - is there a need to update any bits\n");
501 return segment_tlb_entry;
502 }
503
504 /* drats, segment tlb missed */
505 {
506 unsigned_word segment_id_hash = ea;
507 int current_hash = 0;
508 for (current_hash = 0; current_hash < 2; current_hash += 1) {
509 unsigned_word segment_table_entry_group =
510 (map->real_address_of_segment_table
511 | (MASKED64(segment_id_hash, 31, 35) >> (56-35)));
512 unsigned_word segment_table_entry;
513 for (segment_table_entry = segment_table_entry_group;
514 segment_table_entry < (segment_table_entry_group
515 + sizeof_segment_table_entry_group);
516 segment_table_entry += sizeof_segment_table_entry) {
517 /* byte order? */
518 unsigned_word segment_table_entry_dword_0 =
519 om_read_word(map->physical, segment_table_entry, processor, cia);
520 unsigned_word segment_table_entry_dword_1 =
521 om_read_word(map->physical, segment_table_entry + 8,
522 processor, cia);
523 int is_valid = MASKED64(segment_table_entry_dword_0, 56, 56) != 0;
524 unsigned_word masked_effective_segment_id =
525 MASKED64(segment_table_entry_dword_0, 0, 35);
526 if (is_valid && masked_effective_segment_id == MASKED64(ea, 0, 35)) {
527 /* don't permit some things */
528 if (MASKED64(segment_table_entry_dword_0, 57, 57))
529 error("om_effective_to_virtual() - T=1 in STE not supported\n");
530 /* update segment tlb */
531 segment_tlb_entry->is_valid = is_valid;
532 segment_tlb_entry->masked_effective_segment_id =
533 masked_effective_segment_id;
534 segment_tlb_entry->key[om_supervisor_state] =
535 EXTRACTED64(segment_table_entry_dword_0, 58, 58);
536 segment_tlb_entry->key[om_problem_state] =
537 EXTRACTED64(segment_table_entry_dword_0, 59, 59);
538 segment_tlb_entry->invalid_access =
539 (MASKED64(segment_table_entry_dword_0, 60, 60)
540 ? om_instruction_read
541 : om_access_any);
542 segment_tlb_entry->masked_virtual_segment_id =
543 INSERTED64(EXTRACTED64(segment_table_entry_dword_1, 0, 51),
544 18-13, 63-7); /* align ready for pte addr */
545 return segment_tlb_entry;
546 }
547 }
548 segment_id_hash = ~segment_id_hash;
549 }
550 }
551 return NULL;
552 #endif
553 }
554
555
556
557 om_page_tlb_entry STATIC_INLINE_VM *
558 om_virtual_to_real(om_map *map,
559 unsigned_word ea,
560 om_segment_tlb_entry *segment_tlb_entry,
561 om_access_types access,
562 cpu *processor,
563 unsigned_word cia)
564 {
565 om_page_tlb_entry *page_tlb_entry = (map->page_tlb->entry
566 + om_page_tlb_index(ea));
567
568 /* is it a tlb hit? */
569 if ((page_tlb_entry->masked_virtual_segment_id
570 == segment_tlb_entry->masked_virtual_segment_id)
571 && (page_tlb_entry->masked_page
572 == om_ea_masked_page(ea))) {
573 TRACE(trace_vm, ("ea=0x%lx - tlb hit - tlb=0x%lx\n",
574 (long)ea, (long)page_tlb_entry));
575 return page_tlb_entry;
576 }
577
578 /* drats, it is a tlb miss */
579 {
580 unsigned_word page_hash =
581 om_hash_page(segment_tlb_entry->masked_virtual_segment_id, ea);
582 int current_hash;
583 for (current_hash = 0; current_hash < 2; current_hash += 1) {
584 unsigned_word real_address_of_pte_group =
585 (map->real_address_of_page_table
586 | (page_hash & map->page_table_hash_mask));
587 unsigned_word real_address_of_pte_0;
588 TRACE(trace_vm,
589 ("ea=0x%lx - htab search - pteg=0x%lx htab=0x%lx mask=0x%lx hash=0x%lx\n",
590 (long)ea, (long)real_address_of_pte_group,
591 map->real_address_of_page_table,
592 map->page_table_hash_mask,
593 page_hash));
594 for (real_address_of_pte_0 = real_address_of_pte_group;
595 real_address_of_pte_0 < (real_address_of_pte_group
596 + sizeof_pte_group);
597 real_address_of_pte_0 += sizeof_pte) {
598 unsigned_word pte_0 = om_read_word(map,
599 real_address_of_pte_0,
600 processor, cia);
601 /* did we hit? */
602 if (om_pte_0_valid(pte_0)
603 && (current_hash == om_pte_0_hash(pte_0))
604 && (segment_tlb_entry->masked_virtual_segment_id
605 == om_pte_0_masked_vsid(pte_0))
606 && (om_ea_api(ea) == om_pte_0_api(pte_0))) {
607 unsigned_word real_address_of_pte_1 = (real_address_of_pte_0
608 + sizeof_pte / 2);
609 unsigned_word pte_1 = om_read_word(map,
610 real_address_of_pte_1,
611 processor, cia);
612 page_tlb_entry->protection = om_pte_1_pp(pte_1);
613 page_tlb_entry->changed = om_pte_1_changed(pte_1);
614 page_tlb_entry->masked_virtual_segment_id = segment_tlb_entry->masked_virtual_segment_id;
615 page_tlb_entry->masked_page = om_ea_masked_page(ea);
616 page_tlb_entry->masked_real_page_number = om_pte_1_masked_rpn(pte_1);
617 page_tlb_entry->real_address_of_pte_1 = real_address_of_pte_1;
618 if (!om_pte_1_referenced(pte_1)) {
619 om_write_word(map,
620 real_address_of_pte_1,
621 pte_1 | BIT(55),
622 processor, cia);
623 TRACE(trace_vm,
624 ("ea=0x%lx - htab hit - set ref - tlb=0x%lx &pte1=0x%lx\n",
625 (long)ea, page_tlb_entry, (long)real_address_of_pte_1));
626 }
627 else {
628 TRACE(trace_vm,
629 ("ea=0x%lx - htab hit - tlb=0x%lx &pte1=0x%lx\n",
630 (long)ea, page_tlb_entry, (long)real_address_of_pte_1));
631 }
632 return page_tlb_entry;
633 }
634 }
635 page_hash = ~page_hash; /*???*/
636 }
637 }
638 return NULL;
639 }
640
641
642 void STATIC_INLINE_VM
643 om_interrupt(cpu *processor,
644 unsigned_word cia,
645 unsigned_word ea,
646 om_access_types access,
647 storage_interrupt_reasons reason)
648 {
649 switch (access) {
650 case om_data_read:
651 data_storage_interrupt(processor, cia, ea, reason, 0/*!is_store*/);
652 break;
653 case om_data_write:
654 data_storage_interrupt(processor, cia, ea, reason, 1/*is_store*/);
655 break;
656 case om_instruction_read:
657 instruction_storage_interrupt(processor, cia, reason);
658 break;
659 default:
660 error("om_interrupt - unexpected access type %d, cia=0x%x, ea=0x%x\n",
661 access, cia, ea);
662 }
663 }
664
665
666 unsigned_word STATIC_INLINE_VM
667 om_translate_effective_to_real(om_map *map,
668 unsigned_word ea,
669 om_access_types access,
670 cpu *processor,
671 unsigned_word cia,
672 int abort)
673 {
674 om_bat *bat = NULL;
675 om_segment_tlb_entry *segment_tlb_entry = NULL;
676 om_page_tlb_entry *page_tlb_entry = NULL;
677 unsigned_word ra;
678
679 if (!map->is_relocate) {
680 ra = ea;
681 TRACE(trace_vm, ("ea=0x%lx - direct map - ra=0x%lx", (long)ea, (long)ra));
682 return ra;
683 }
684
685 /* match with BAT? */
686 bat = om_effective_to_bat(map, ea);
687 if (bat != NULL) {
688 if (!om_valid_access[1][bat->protection_bits][access]) {
689 TRACE(trace_vm, ("ea=0x%lx - bat access violation\n", (long)ea));
690 if (abort)
691 om_interrupt(processor, cia, ea, access,
692 protection_violation_storage_interrupt);
693 else
694 return MASK(0, 63);
695 }
696
697 ra = ((ea & bat->block_length_mask) | bat->block_real_page_number);
698 TRACE(trace_vm, ("ea=0x%lx - bat translation - ra=0x%lx\n",
699 (long)ea, (long)ra));
700 return ra;
701 }
702
703 /* translate ea to va using segment map */
704 segment_tlb_entry = om_effective_to_virtual(map, ea, processor, cia);
705 #if (WITH_TARGET_WORD_BITSIZE == 64)
706 if (segment_tlb_entry == NULL) {
707 TRACE(trace_vm, ("ea=0x%lx - segment tlb miss\n", (long)ea));
708 if (abort)
709 om_interrupt(processor, cia, ea, access,
710 segment_table_miss_storage_interrupt);
711 else
712 return MASK(0, 63);
713 }
714 #endif
715 /* check for invalid segment access type */
716 if (segment_tlb_entry->invalid_access == access) {
717 TRACE(trace_vm, ("ea=0x%lx - segment access invalid\n", (long)ea));
718 if (abort)
719 om_interrupt(processor, cia, ea, access,
720 protection_violation_storage_interrupt);
721 else
722 return MASK(0, 63);
723 }
724
725 /* lookup in PTE */
726 page_tlb_entry = om_virtual_to_real(map, ea, segment_tlb_entry,
727 access,
728 processor, cia);
729 if (page_tlb_entry == NULL) {
730 TRACE(trace_vm, ("ea=0x%lx - page tlb miss\n", (long)ea));
731 if (abort)
732 om_interrupt(processor, cia, ea, access,
733 hash_table_miss_storage_interrupt);
734 else
735 return MASK(0, 63);
736 }
737 if (!(om_valid_access
738 [segment_tlb_entry->key[map->is_problem_state]]
739 [page_tlb_entry->protection]
740 [access])) {
741 TRACE(trace_vm, ("ea=0x%lx - page tlb access violation\n", (long)ea));
742 if (abort)
743 om_interrupt(processor, cia, ea, access,
744 protection_violation_storage_interrupt);
745 else
746 return MASK(0, 63);
747 }
748
749 /* update change bit as needed */
750 if (access == om_data_write &&!page_tlb_entry->changed) {
751 unsigned_word pte_1 = om_read_word(map,
752 page_tlb_entry->real_address_of_pte_1,
753 processor, cia);
754 om_write_word(map,
755 page_tlb_entry->real_address_of_pte_1,
756 pte_1 | BIT(56),
757 processor, cia);
758 TRACE(trace_vm, ("ea=0x%lx - set change bit - tlb=0x%lx &pte1=0x%lx\n",
759 (long)ea, (long)page_tlb_entry,
760 (long)page_tlb_entry->real_address_of_pte_1));
761 }
762
763 ra = (page_tlb_entry->masked_real_page_number | om_ea_masked_byte(ea));
764 TRACE(trace_vm, ("ea=0x%lx - page translation - ra=0x%lx\n",
765 (long)ea, (long)ra));
766 return ra;
767 }
768
769
770 /*
771 * Definition of operations for memory management
772 */
773
774
775 /* rebuild all the relevant bat information */
776 void STATIC_INLINE_VM
777 om_unpack_bat(om_bat *bat,
778 spreg ubat,
779 spreg lbat)
780 {
781 /* for extracting out the offset within a page */
782 bat->block_length_mask = ((MASKED(ubat, 51, 61) << (17-2))
783 | MASK(63-17+1, 63));
784
785 /* for checking the effective page index */
786 bat->block_effective_page_index = MASKED(ubat, 0, 46);
787 bat->block_effective_page_index_mask = ~bat->block_length_mask;
788
789 /* protection information */
790 bat->protection_bits = EXTRACTED(lbat, 62, 63);
791 bat->block_real_page_number = MASKED(lbat, 0, 46);
792 }
793
794
795 /* rebuild the given bat table */
796 void STATIC_INLINE_VM
797 om_unpack_bats(om_bats *bats,
798 spreg *raw_bats,
799 msreg msr)
800 {
801 int i;
802 bats->nr_valid_bat_registers = 0;
803 for (i = 0; i < nr_om_bat_registers*2; i += 2) {
804 spreg ubat = raw_bats[i];
805 spreg lbat = raw_bats[i+1];
806 if ((msr & msr_problem_state)
807 ? EXTRACTED(ubat, 62, 62)
808 : EXTRACTED(ubat, 63, 63)) {
809 om_unpack_bat(&bats->bat[bats->nr_valid_bat_registers],
810 ubat, lbat);
811 bats->nr_valid_bat_registers += 1;
812 }
813 }
814 }
815
816
817 #if (WITH_TARGET_WORD_BITSIZE == 32)
818 void STATIC_INLINE_VM
819 om_unpack_sr(vm *virtual,
820 sreg *srs,
821 int which_sr)
822 {
823 om_segment_tlb_entry *segment_tlb_entry = 0;
824 sreg new_sr_value = 0;
825
826 /* check register in range */
827 if (which_sr < 0 || which_sr > nr_om_segment_tlb_entries)
828 error("om_set_sr: segment register out of bounds\n");
829
830 /* get the working values */
831 segment_tlb_entry = &virtual->segment_tlb.entry[which_sr];
832 new_sr_value = srs[which_sr];
833
834 /* do we support this */
835 if (MASKED32(new_sr_value, 0, 0))
836 error("om_ser_sr(): unsupported value of T in segment register %d\n",
837 which_sr);
838
839 /* update info */
840 segment_tlb_entry->key[om_supervisor_state] = EXTRACTED32(new_sr_value, 1, 1);
841 segment_tlb_entry->key[om_problem_state] = EXTRACTED32(new_sr_value, 2, 2);
842 segment_tlb_entry->invalid_access = (MASKED32(new_sr_value, 3, 3)
843 ? om_instruction_read
844 : om_access_any);
845 segment_tlb_entry->masked_virtual_segment_id =
846 INSERTED32(EXTRACTED32(new_sr_value, 8, 31),
847 7-5, 31-6); /* align ready for pte address */
848 }
849 #endif
850
851
852 #if (WITH_TARGET_WORD_BITSIZE == 32)
853 void STATIC_INLINE_VM
854 om_unpack_srs(vm *virtual,
855 sreg *srs)
856 {
857 int which_sr;
858 for (which_sr = 0; which_sr < nr_om_segment_tlb_entries; which_sr++) {
859 om_unpack_sr(virtual, srs, which_sr);
860 }
861 }
862 #endif
863
864
865 /* Rebuild all the data structures for the new context as specifed by
866 the passed registers */
867 void INLINE_VM
868 vm_synchronize_context(vm *virtual,
869 spreg *sprs,
870 sreg *srs,
871 msreg msr)
872 {
873
874 /* enable/disable translation */
875 int problem_state = (msr & msr_problem_state) != 0;
876 int data_relocate = (msr & msr_data_relocate) != 0;
877 int instruction_relocate = (msr & msr_instruction_relocate) != 0;
878 int little_endian = (msr & msr_little_endian_mode) != 0;
879
880 unsigned_word page_table_hash_mask;
881 unsigned_word real_address_of_page_table;
882
883 /* update current processor mode */
884 virtual->instruction_map.translation.is_relocate = instruction_relocate;
885 virtual->instruction_map.translation.is_problem_state = problem_state;
886 virtual->data_map.translation.is_relocate = data_relocate;
887 virtual->data_map.translation.is_problem_state = problem_state;
888
889 /* update bat registers for the new context */
890 om_unpack_bats(&virtual->ibats, &sprs[spr_ibat0u], msr);
891 om_unpack_bats(&virtual->dbats, &sprs[spr_dbat0u], msr);
892
893 /* unpack SDR1 - the storage description register 1 */
894 #if (WITH_TARGET_WORD_BITSIZE == 64)
895 real_address_of_page_table = MASKED64(sprs[spr_sdr1], 0, 45);
896 page_table_hash_mask = MASK64(18+28-EXTRACTED64(sprs[spr_sdr1], 59, 63),
897 63-7);
898 #endif
899 #if (WITH_TARGET_WORD_BITSIZE == 32)
900 real_address_of_page_table = MASKED32(sprs[spr_sdr1], 0, 15);
901 page_table_hash_mask = (INSERTED32(EXTRACTED32(sprs[spr_sdr1], 23, 31),
902 7, 7+9-1)
903 | MASK32(7+9, 31-6));
904 #endif
905 virtual->instruction_map.translation.real_address_of_page_table = real_address_of_page_table;
906 virtual->instruction_map.translation.page_table_hash_mask = page_table_hash_mask;
907 virtual->data_map.translation.real_address_of_page_table = real_address_of_page_table;
908 virtual->data_map.translation.page_table_hash_mask = page_table_hash_mask;
909
910
911 /* unpack the segment tlb registers */
912 #if (WITH_TARGET_WORD_BITSIZE == 32)
913 om_unpack_srs(virtual, srs);
914 #endif
915
916 /* set up the XOR registers if the current endian mode conflicts
917 with what is in the MSR */
918 if (WITH_XOR_ENDIAN) {
919 int i = 1;
920 unsigned mask;
921 if ((little_endian && CURRENT_TARGET_BYTE_ORDER == LITTLE_ENDIAN)
922 || (!little_endian && CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN))
923 mask = 0;
924 else
925 mask = WITH_XOR_ENDIAN - 1;
926 while (i - 1 < WITH_XOR_ENDIAN) {
927 virtual->instruction_map.translation.xor[i-1] = mask;
928 virtual->data_map.translation.xor[i-1] = mask;
929 mask = (mask << 1) & (WITH_XOR_ENDIAN - 1);
930 i = i * 2;
931 }
932 }
933 else {
934 /* don't allow the processor to change endian modes */
935 if ((little_endian && CURRENT_TARGET_BYTE_ORDER != LITTLE_ENDIAN)
936 || (!little_endian && CURRENT_TARGET_BYTE_ORDER != LITTLE_ENDIAN))
937 error("vm_synchronize_context() - unsuported change of byte order\n");
938 }
939 }
940
941
942 vm_data_map INLINE_VM *
943 vm_create_data_map(vm *memory)
944 {
945 return &memory->data_map;
946 }
947
948
949 vm_instruction_map INLINE_VM *
950 vm_create_instruction_map(vm *memory)
951 {
952 return &memory->instruction_map;
953 }
954
955
956 unsigned_word STATIC_INLINE_VM
957 vm_translate(om_map *map,
958 unsigned_word ea,
959 om_access_types access,
960 cpu *processor,
961 unsigned_word cia,
962 int abort)
963 {
964 switch (CURRENT_ENVIRONMENT) {
965 case USER_ENVIRONMENT:
966 case VIRTUAL_ENVIRONMENT:
967 return ea;
968 case OPERATING_ENVIRONMENT:
969 return om_translate_effective_to_real(map, ea, access,
970 processor, cia,
971 abort);
972 default:
973 error("vm_translate() - unknown environment\n");
974 return 0;
975 }
976 }
977
978
979 unsigned_word INLINE_VM
980 vm_real_data_addr(vm_data_map *map,
981 unsigned_word ea,
982 int is_read,
983 cpu *processor,
984 unsigned_word cia)
985 {
986 return vm_translate(&map->translation,
987 ea,
988 is_read ? om_data_read : om_data_write,
989 processor,
990 cia,
991 1); /*abort*/
992 }
993
994
995 unsigned_word INLINE_VM
996 vm_real_instruction_addr(vm_instruction_map *map,
997 cpu *processor,
998 unsigned_word cia)
999 {
1000 return vm_translate(&map->translation,
1001 cia,
1002 om_instruction_read,
1003 processor,
1004 cia,
1005 1); /*abort*/
1006 }
1007
1008 instruction_word INLINE_VM
1009 vm_instruction_map_read(vm_instruction_map *map,
1010 cpu *processor,
1011 unsigned_word cia)
1012 {
1013 unsigned_word ra = vm_real_instruction_addr(map, processor, cia);
1014 ASSERT((cia & 0x3) == 0); /* always aligned */
1015 if (WITH_XOR_ENDIAN)
1016 ra ^= map->translation.xor[sizeof(instruction_word) - 1];
1017 return core_map_read_4(map->code, ra, processor, cia);
1018 }
1019
1020
1021 int INLINE_VM
1022 vm_data_map_read_buffer(vm_data_map *map,
1023 void *target,
1024 unsigned_word addr,
1025 unsigned nr_bytes)
1026 {
1027 unsigned count;
1028 for (count = 0; count < nr_bytes; count++) {
1029 unsigned_1 byte;
1030 unsigned_word ea = addr + count;
1031 unsigned_word ra = vm_translate(&map->translation,
1032 ea, om_data_read,
1033 NULL, /*processor*/
1034 0, /*cia*/
1035 0); /*dont-abort*/
1036 if (ra == MASK(0, 63))
1037 break;
1038 if (WITH_XOR_ENDIAN)
1039 ra ^= map->translation.xor[0];
1040 if (core_map_read_buffer(map->read, &byte, ra, sizeof(byte))
1041 != sizeof(byte))
1042 break;
1043 ((unsigned_1*)target)[count] = T2H_1(byte);
1044 }
1045 return count;
1046 }
1047
1048
1049 int INLINE_VM
1050 vm_data_map_write_buffer(vm_data_map *map,
1051 const void *source,
1052 unsigned_word addr,
1053 unsigned nr_bytes,
1054 int violate_read_only_section)
1055 {
1056 unsigned count;
1057 unsigned_1 byte;
1058 for (count = 0; count < nr_bytes; count++) {
1059 unsigned_word ea = addr + count;
1060 unsigned_word ra = vm_translate(&map->translation,
1061 ea, om_data_write,
1062 NULL/*processor*/,
1063 0, /*cia*/
1064 0); /*dont-abort*/
1065 if (ra == MASK(0, 63))
1066 break;
1067 if (WITH_XOR_ENDIAN)
1068 ra ^= map->translation.xor[0];
1069 byte = T2H_1(((unsigned_1*)source)[count]);
1070 if (core_map_write_buffer((violate_read_only_section
1071 ? map->read
1072 : map->write),
1073 &byte, ra, sizeof(byte)) != sizeof(byte))
1074 break;
1075 }
1076 return count;
1077 }
1078
1079
1080 /* define the read/write 1/2/4/8/word functions */
1081
1082 #define N 1
1083 #include "vm_n.h"
1084 #undef N
1085
1086 #define N 2
1087 #include "vm_n.h"
1088 #undef N
1089
1090 #define N 4
1091 #include "vm_n.h"
1092 #undef N
1093
1094 #define N 8
1095 #include "vm_n.h"
1096 #undef N
1097
1098 #define N word
1099 #include "vm_n.h"
1100 #undef N
1101
1102
1103
1104 #endif /* _VM_C_ */
This page took 0.052413 seconds and 5 git commands to generate.