gdb: add target_ops::supports_displaced_step
[deliverable/binutils-gdb.git] / sim / ppc / vm.c
CommitLineData
c906108c
SS
1/* This file is part of the program psim.
2
3 Copyright (C) 1994-1997, Andrew Cagney <cagney@highland.com.au>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
3fd725ef 7 the Free Software Foundation; either version 3 of the License, or
c906108c
SS
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
51b318de 16 along with this program; if not, see <http://www.gnu.org/licenses/>.
c906108c
SS
17
18 */
19
20
21#ifndef _VM_C_
22#define _VM_C_
23
24#if 0
25#include "basics.h"
26#include "registers.h"
27#include "device.h"
28#include "corefile.h"
29#include "vm.h"
30#include "interrupts.h"
31#include "mon.h"
32#endif
33
34#include "cpu.h"
35
36/* OEA vs VEA
37
38 For the VEA model, the VM layer is almost transparent. It's only
39 purpose is to maintain separate core_map's for the instruction
40 and data address spaces. This being so that writes to instruction
41 space or execution of a data space is prevented.
42
43 For the OEA model things are more complex. The reason for separate
44 instruction and data models becomes crucial. The OEA model is
45 built out of three parts. An instruction map, a data map and an
46 underlying structure that provides access to the VM data kept in
47 main memory. */
48
49
50/* OEA data structures:
51
52 The OEA model maintains internal data structures that shadow the
53 semantics of the various OEA VM registers (BAT, SR, etc). This
54 allows a simple efficient model of the VM to be implemented.
55
56 Consistency between OEA registers and this model's internal data
57 structures is maintained by updating the structures at
58 `synchronization' points. Of particular note is that (at the time
59 of writing) the memory data types for BAT registers are rebuilt
60 when ever the processor moves between problem and system states.
61
62 Unpacked values are stored in the OEA so that they correctly align
63 to where they will be needed by the PTE address. */
64
65
66/* Protection table:
67
68 Matrix of processor state, type of access and validity */
69
70typedef enum {
71 om_supervisor_state,
72 om_problem_state,
73 nr_om_modes
74} om_processor_modes;
75
76typedef enum {
77 om_data_read, om_data_write,
78 om_instruction_read, om_access_any,
79 nr_om_access_types
80} om_access_types;
81
82static int om_valid_access[2][4][nr_om_access_types] = {
83 /* read, write, instruction, any */
84 /* K bit == 0 */
85 { /*r w i a pp */
86 { 1, 1, 1, 1 }, /* 00 */
87 { 1, 1, 1, 1 }, /* 01 */
88 { 1, 1, 1, 1 }, /* 10 */
89 { 1, 0, 1, 1 }, /* 11 */
90 },
91 /* K bit == 1 or P bit valid */
92 { /*r w i a pp */
93 { 0, 0, 0, 0 }, /* 00 */
94 { 1, 0, 1, 1 }, /* 01 */
95 { 1, 1, 1, 1 }, /* 10 */
96 { 1, 0, 1, 1 }, /* 11 */
97 }
98};
99
100
101/* Bat translation:
102
103 The bat data structure only contains information on valid BAT
104 translations for the current processor mode and type of access. */
105
106typedef struct _om_bat {
107 unsigned_word block_effective_page_index;
108 unsigned_word block_effective_page_index_mask;
109 unsigned_word block_length_mask;
110 unsigned_word block_real_page_number;
111 int protection_bits;
112} om_bat;
113
114enum _nr_om_bat_registers {
115 nr_om_bat_registers = 4
116};
117
118typedef struct _om_bats {
119 int nr_valid_bat_registers;
120 om_bat bat[nr_om_bat_registers];
121} om_bats;
122
123
124/* Segment TLB:
125
126 In this model the 32 and 64 bit segment tables are treated in very
127 similar ways. The 32bit segment registers are treated as a
128 simplification of the 64bit segment tlb */
129
130enum _om_segment_tlb_constants {
131#if (WITH_TARGET_WORD_BITSIZE == 64)
132 sizeof_segment_table_entry_group = 128,
133 sizeof_segment_table_entry = 16,
134#endif
135 om_segment_tlb_index_start_bit = 32,
136 om_segment_tlb_index_stop_bit = 35,
137 nr_om_segment_tlb_entries = 16,
138 nr_om_segment_tlb_constants
139};
140
141typedef struct _om_segment_tlb_entry {
142 int key[nr_om_modes];
143 om_access_types invalid_access; /* set to instruction if no_execute bit */
144 unsigned_word masked_virtual_segment_id; /* aligned ready for pte group addr */
145#if (WITH_TARGET_WORD_BITSIZE == 64)
146 int is_valid;
147 unsigned_word masked_effective_segment_id;
148#endif
149} om_segment_tlb_entry;
150
151typedef struct _om_segment_tlb {
152 om_segment_tlb_entry entry[nr_om_segment_tlb_entries];
153} om_segment_tlb;
154
155
156/* Page TLB:
157
158 This OEA model includes a small direct map Page TLB. The tlb is to
159 cut down on the need for the OEA to perform walks of the page hash
160 table. */
161
162enum _om_page_tlb_constants {
163 om_page_tlb_index_start_bit = 46,
164 om_page_tlb_index_stop_bit = 51,
165 nr_om_page_tlb_entries = 64,
166#if (WITH_TARGET_WORD_BITSIZE == 64)
167 sizeof_pte_group = 128,
168 sizeof_pte = 16,
169#endif
170#if (WITH_TARGET_WORD_BITSIZE == 32)
171 sizeof_pte_group = 64,
172 sizeof_pte = 8,
173#endif
174 nr_om_page_tlb_constants
175};
176
177typedef struct _om_page_tlb_entry {
178 int protection;
179 int changed;
180 unsigned_word real_address_of_pte_1;
181 unsigned_word masked_virtual_segment_id;
182 unsigned_word masked_page;
183 unsigned_word masked_real_page_number;
184} om_page_tlb_entry;
185
186typedef struct _om_page_tlb {
187 om_page_tlb_entry entry[nr_om_page_tlb_entries];
188} om_page_tlb;
189
190
191/* memory translation:
192
193 OEA memory translation possibly involves BAT, SR, TLB and HTAB
194 information*/
195
196typedef struct _om_map {
197
198 /* local cache of register values */
199 int is_relocate;
200 int is_problem_state;
201
202 /* block address translation */
203 om_bats *bat_registers;
204
205 /* failing that, translate ea to va using segment tlb */
206#if (WITH_TARGET_WORD_BITSIZE == 64)
207 unsigned_word real_address_of_segment_table;
208#endif
209 om_segment_tlb *segment_tlb;
210
211 /* then va to ra using hashed page table and tlb */
212 unsigned_word real_address_of_page_table;
213 unsigned_word page_table_hash_mask;
214 om_page_tlb *page_tlb;
215
216 /* physical memory for fetching page table entries */
217 core_map *physical;
218
219 /* address xor for PPC endian */
220 unsigned xor[WITH_XOR_ENDIAN];
221
222} om_map;
223
224
225/* VM objects:
226
227 External objects defined by vm.h */
228
229struct _vm_instruction_map {
230 /* real memory for last part */
231 core_map *code;
232 /* translate effective to real */
233 om_map translation;
234};
235
236struct _vm_data_map {
237 /* translate effective to real */
238 om_map translation;
239 /* real memory for translated address */
240 core_map *read;
241 core_map *write;
242};
243
244
245/* VM:
246
247 Underlying memory object. For the VEA this is just the
248 core_map. For OEA it is the instruction and data memory
249 translation's */
250
251struct _vm {
252
253 /* OEA: base address registers */
254 om_bats ibats;
255 om_bats dbats;
256
257 /* OEA: segment registers */
258 om_segment_tlb segment_tlb;
259
260 /* OEA: translation lookaside buffers */
261 om_page_tlb instruction_tlb;
262 om_page_tlb data_tlb;
263
264 /* real memory */
265 core *physical;
266
267 /* memory maps */
268 vm_instruction_map instruction_map;
269 vm_data_map data_map;
270
271};
272
273
274/* OEA Support procedures */
275
276
277STATIC_INLINE_VM\
278(unsigned_word)
279om_segment_tlb_index(unsigned_word ea)
280{
281 unsigned_word index = EXTRACTED(ea,
282 om_segment_tlb_index_start_bit,
283 om_segment_tlb_index_stop_bit);
284 return index;
285}
286
287STATIC_INLINE_VM\
288(unsigned_word)
289om_page_tlb_index(unsigned_word ea)
290{
291 unsigned_word index = EXTRACTED(ea,
292 om_page_tlb_index_start_bit,
293 om_page_tlb_index_stop_bit);
294 return index;
295}
296
297STATIC_INLINE_VM\
298(unsigned_word)
299om_hash_page(unsigned_word masked_vsid,
300 unsigned_word ea)
301{
302 unsigned_word extracted_ea = EXTRACTED(ea, 36, 51);
303#if (WITH_TARGET_WORD_BITSIZE == 32)
304 unsigned_word masked_ea = INSERTED32(extracted_ea, 7, 31-6);
305 unsigned_word hash = masked_vsid ^ masked_ea;
306#endif
307#if (WITH_TARGET_WORD_BITSIZE == 64)
308 unsigned_word masked_ea = INSERTED64(extracted_ea, 18, 63-7);
309 unsigned_word hash = masked_vsid ^ masked_ea;
310#endif
311 TRACE(trace_vm, ("ea=0x%lx - masked-vsid=0x%lx masked-ea=0x%lx hash=0x%lx\n",
312 (unsigned long)ea,
313 (unsigned long)masked_vsid,
314 (unsigned long)masked_ea,
315 (unsigned long)hash));
316 return hash;
317}
318
319STATIC_INLINE_VM\
320(unsigned_word)
321om_pte_0_api(unsigned_word pte_0)
322{
323#if (WITH_TARGET_WORD_BITSIZE == 32)
324 return EXTRACTED32(pte_0, 26, 31);
325#endif
326#if (WITH_TARGET_WORD_BITSIZE == 64)
327 return EXTRACTED64(pte_0, 52, 56);
328#endif
329}
330
331STATIC_INLINE_VM\
332(unsigned_word)
333om_pte_0_hash(unsigned_word pte_0)
334{
335#if (WITH_TARGET_WORD_BITSIZE == 32)
336 return EXTRACTED32(pte_0, 25, 25);
337#endif
338#if (WITH_TARGET_WORD_BITSIZE == 64)
339 return EXTRACTED64(pte_0, 62, 62);
340#endif
341}
342
343STATIC_INLINE_VM\
344(int)
345om_pte_0_valid(unsigned_word pte_0)
346{
347#if (WITH_TARGET_WORD_BITSIZE == 32)
348 return MASKED32(pte_0, 0, 0) != 0;
349#endif
350#if (WITH_TARGET_WORD_BITSIZE == 64)
351 return MASKED64(pte_0, 63, 63) != 0;
352#endif
353}
354
355STATIC_INLINE_VM\
356(unsigned_word)
357om_ea_masked_page(unsigned_word ea)
358{
359 return MASKED(ea, 36, 51);
360}
361
362STATIC_INLINE_VM\
363(unsigned_word)
364om_ea_masked_byte(unsigned_word ea)
365{
366 return MASKED(ea, 52, 63);
367}
368
369/* return the VSID aligned for pte group addr */
370STATIC_INLINE_VM\
371(unsigned_word)
372om_pte_0_masked_vsid(unsigned_word pte_0)
373{
374#if (WITH_TARGET_WORD_BITSIZE == 32)
375 return INSERTED32(EXTRACTED32(pte_0, 1, 24), 31-6-24+1, 31-6);
376#endif
377#if (WITH_TARGET_WORD_BITSIZE == 64)
378 return INSERTED64(EXTRACTED64(pte_0, 0, 51), 63-7-52+1, 63-7);
379#endif
380}
381
382STATIC_INLINE_VM\
383(unsigned_word)
384om_pte_1_pp(unsigned_word pte_1)
385{
386 return MASKED(pte_1, 62, 63); /*PP*/
387}
388
389STATIC_INLINE_VM\
390(int)
391om_pte_1_referenced(unsigned_word pte_1)
392{
393 return EXTRACTED(pte_1, 55, 55);
394}
395
396STATIC_INLINE_VM\
397(int)
398om_pte_1_changed(unsigned_word pte_1)
399{
400 return EXTRACTED(pte_1, 56, 56);
401}
402
403STATIC_INLINE_VM\
404(int)
405om_pte_1_masked_rpn(unsigned_word pte_1)
406{
407 return MASKED(pte_1, 0, 51); /*RPN*/
408}
409
410STATIC_INLINE_VM\
411(unsigned_word)
412om_ea_api(unsigned_word ea)
413{
414 return EXTRACTED(ea, 36, 41);
415}
416
417
418/* Page and Segment table read/write operators, these need to still
419 account for the PPC's XOR operation */
420
421STATIC_INLINE_VM\
422(unsigned_word)
423om_read_word(om_map *map,
424 unsigned_word ra,
425 cpu *processor,
426 unsigned_word cia)
427{
428 if (WITH_XOR_ENDIAN)
429 ra ^= map->xor[sizeof(instruction_word) - 1];
430 return core_map_read_word(map->physical, ra, processor, cia);
431}
432
433STATIC_INLINE_VM\
434(void)
435om_write_word(om_map *map,
436 unsigned_word ra,
437 unsigned_word val,
438 cpu *processor,
439 unsigned_word cia)
440{
441 if (WITH_XOR_ENDIAN)
442 ra ^= map->xor[sizeof(instruction_word) - 1];
443 core_map_write_word(map->physical, ra, val, processor, cia);
444}
445
446
447/* Bring things into existance */
448
449INLINE_VM\
450(vm *)
451vm_create(core *physical)
452{
453 vm *virtual;
454
455 /* internal checks */
456 if (nr_om_segment_tlb_entries
457 != (1 << (om_segment_tlb_index_stop_bit
458 - om_segment_tlb_index_start_bit + 1)))
459 error("internal error - vm_create - problem with om_segment constants\n");
460 if (nr_om_page_tlb_entries
461 != (1 << (om_page_tlb_index_stop_bit
462 - om_page_tlb_index_start_bit + 1)))
463 error("internal error - vm_create - problem with om_page constants\n");
464
465 /* create the new vm register file */
466 virtual = ZALLOC(vm);
467
468 /* set up core */
469 virtual->physical = physical;
470
471 /* set up the address decoders */
472 virtual->instruction_map.translation.bat_registers = &virtual->ibats;
473 virtual->instruction_map.translation.segment_tlb = &virtual->segment_tlb;
474 virtual->instruction_map.translation.page_tlb = &virtual->instruction_tlb;
475 virtual->instruction_map.translation.is_relocate = 0;
476 virtual->instruction_map.translation.is_problem_state = 0;
477 virtual->instruction_map.translation.physical = core_readable(physical);
478 virtual->instruction_map.code = core_readable(physical);
479
480 virtual->data_map.translation.bat_registers = &virtual->dbats;
481 virtual->data_map.translation.segment_tlb = &virtual->segment_tlb;
482 virtual->data_map.translation.page_tlb = &virtual->data_tlb;
483 virtual->data_map.translation.is_relocate = 0;
484 virtual->data_map.translation.is_problem_state = 0;
485 virtual->data_map.translation.physical = core_readable(physical);
486 virtual->data_map.read = core_readable(physical);
487 virtual->data_map.write = core_writeable(physical);
488
489 return virtual;
490}
491
492
493STATIC_INLINE_VM\
494(om_bat *)
495om_effective_to_bat(om_map *map,
496 unsigned_word ea)
497{
498 int curr_bat = 0;
499 om_bats *bats = map->bat_registers;
500 int nr_bats = bats->nr_valid_bat_registers;
501
502 for (curr_bat = 0; curr_bat < nr_bats; curr_bat++) {
503 om_bat *bat = bats->bat + curr_bat;
504 if ((ea & bat->block_effective_page_index_mask)
505 != bat->block_effective_page_index)
506 continue;
507 return bat;
508 }
509
510 return NULL;
511}
512
513
514STATIC_INLINE_VM\
515(om_segment_tlb_entry *)
516om_effective_to_virtual(om_map *map,
517 unsigned_word ea,
518 cpu *processor,
519 unsigned_word cia)
520{
521 /* first try the segment tlb */
522 om_segment_tlb_entry *segment_tlb_entry = (map->segment_tlb->entry
523 + om_segment_tlb_index(ea));
524
525#if (WITH_TARGET_WORD_BITSIZE == 32)
526 TRACE(trace_vm, ("ea=0x%lx - sr[%ld] - masked-vsid=0x%lx va=0x%lx%07lx\n",
527 (unsigned long)ea,
528 (long)om_segment_tlb_index(ea),
529 (unsigned long)segment_tlb_entry->masked_virtual_segment_id,
530 (unsigned long)EXTRACTED32(segment_tlb_entry->masked_virtual_segment_id, 31-6-24+1, 31-6),
531 (unsigned long)EXTRACTED32(ea, 4, 31)));
532 return segment_tlb_entry;
533#endif
534
535#if (WITH_TARGET_WORD_BITSIZE == 64)
536 if (segment_tlb_entry->is_valid
537 && (segment_tlb_entry->masked_effective_segment_id == MASKED(ea, 0, 35))) {
538 error("fixme - is there a need to update any bits\n");
539 return segment_tlb_entry;
540 }
541
542 /* drats, segment tlb missed */
543 {
544 unsigned_word segment_id_hash = ea;
545 int current_hash = 0;
546 for (current_hash = 0; current_hash < 2; current_hash += 1) {
547 unsigned_word segment_table_entry_group =
548 (map->real_address_of_segment_table
549 | (MASKED64(segment_id_hash, 31, 35) >> (56-35)));
550 unsigned_word segment_table_entry;
551 for (segment_table_entry = segment_table_entry_group;
552 segment_table_entry < (segment_table_entry_group
553 + sizeof_segment_table_entry_group);
554 segment_table_entry += sizeof_segment_table_entry) {
555 /* byte order? */
556 unsigned_word segment_table_entry_dword_0 =
557 om_read_word(map->physical, segment_table_entry, processor, cia);
558 unsigned_word segment_table_entry_dword_1 =
559 om_read_word(map->physical, segment_table_entry + 8,
560 processor, cia);
561 int is_valid = MASKED64(segment_table_entry_dword_0, 56, 56) != 0;
562 unsigned_word masked_effective_segment_id =
563 MASKED64(segment_table_entry_dword_0, 0, 35);
564 if (is_valid && masked_effective_segment_id == MASKED64(ea, 0, 35)) {
565 /* don't permit some things */
566 if (MASKED64(segment_table_entry_dword_0, 57, 57))
567 error("om_effective_to_virtual() - T=1 in STE not supported\n");
568 /* update segment tlb */
569 segment_tlb_entry->is_valid = is_valid;
570 segment_tlb_entry->masked_effective_segment_id =
571 masked_effective_segment_id;
572 segment_tlb_entry->key[om_supervisor_state] =
573 EXTRACTED64(segment_table_entry_dword_0, 58, 58);
574 segment_tlb_entry->key[om_problem_state] =
575 EXTRACTED64(segment_table_entry_dword_0, 59, 59);
576 segment_tlb_entry->invalid_access =
577 (MASKED64(segment_table_entry_dword_0, 60, 60)
578 ? om_instruction_read
579 : om_access_any);
580 segment_tlb_entry->masked_virtual_segment_id =
581 INSERTED64(EXTRACTED64(segment_table_entry_dword_1, 0, 51),
582 18-13, 63-7); /* aligned ready for pte group addr */
583 return segment_tlb_entry;
584 }
585 }
586 segment_id_hash = ~segment_id_hash;
587 }
588 }
589 return NULL;
590#endif
591}
592
593
594
595STATIC_INLINE_VM\
596(om_page_tlb_entry *)
597om_virtual_to_real(om_map *map,
598 unsigned_word ea,
599 om_segment_tlb_entry *segment_tlb_entry,
600 om_access_types access,
601 cpu *processor,
602 unsigned_word cia)
603{
604 om_page_tlb_entry *page_tlb_entry = (map->page_tlb->entry
605 + om_page_tlb_index(ea));
606
607 /* is it a tlb hit? */
608 if ((page_tlb_entry->masked_virtual_segment_id
609 == segment_tlb_entry->masked_virtual_segment_id)
610 && (page_tlb_entry->masked_page
611 == om_ea_masked_page(ea))) {
612 TRACE(trace_vm, ("ea=0x%lx - tlb hit - tlb=0x%lx\n",
613 (long)ea, (long)page_tlb_entry));
614 return page_tlb_entry;
615 }
616
617 /* drats, it is a tlb miss */
618 {
619 unsigned_word page_hash =
620 om_hash_page(segment_tlb_entry->masked_virtual_segment_id, ea);
621 int current_hash;
622 for (current_hash = 0; current_hash < 2; current_hash += 1) {
623 unsigned_word real_address_of_pte_group =
624 (map->real_address_of_page_table
625 | (page_hash & map->page_table_hash_mask));
626 unsigned_word real_address_of_pte_0;
627 TRACE(trace_vm,
628 ("ea=0x%lx - htab search %d - htab=0x%lx hash=0x%lx mask=0x%lx pteg=0x%lx\n",
629 (long)ea, current_hash,
630 map->real_address_of_page_table,
631 page_hash,
632 map->page_table_hash_mask,
633 (long)real_address_of_pte_group));
634 for (real_address_of_pte_0 = real_address_of_pte_group;
635 real_address_of_pte_0 < (real_address_of_pte_group
636 + sizeof_pte_group);
637 real_address_of_pte_0 += sizeof_pte) {
638 unsigned_word pte_0 = om_read_word(map,
639 real_address_of_pte_0,
640 processor, cia);
641 /* did we hit? */
642 if (om_pte_0_valid(pte_0)
643 && (current_hash == om_pte_0_hash(pte_0))
644 && (segment_tlb_entry->masked_virtual_segment_id
645 == om_pte_0_masked_vsid(pte_0))
646 && (om_ea_api(ea) == om_pte_0_api(pte_0))) {
647 unsigned_word real_address_of_pte_1 = (real_address_of_pte_0
648 + sizeof_pte / 2);
649 unsigned_word pte_1 = om_read_word(map,
650 real_address_of_pte_1,
651 processor, cia);
652 page_tlb_entry->protection = om_pte_1_pp(pte_1);
653 page_tlb_entry->changed = om_pte_1_changed(pte_1);
654 page_tlb_entry->masked_virtual_segment_id = segment_tlb_entry->masked_virtual_segment_id;
655 page_tlb_entry->masked_page = om_ea_masked_page(ea);
656 page_tlb_entry->masked_real_page_number = om_pte_1_masked_rpn(pte_1);
657 page_tlb_entry->real_address_of_pte_1 = real_address_of_pte_1;
658 if (!om_pte_1_referenced(pte_1)) {
659 om_write_word(map,
660 real_address_of_pte_1,
661 pte_1 | BIT(55),
662 processor, cia);
663 TRACE(trace_vm,
664 ("ea=0x%lx - htab hit - set ref - tlb=0x%lx &pte1=0x%lx\n",
665 (long)ea, (long)page_tlb_entry, (long)real_address_of_pte_1));
666 }
667 else {
668 TRACE(trace_vm,
669 ("ea=0x%lx - htab hit - tlb=0x%lx &pte1=0x%lx\n",
670 (long)ea, (long)page_tlb_entry, (long)real_address_of_pte_1));
671 }
672 return page_tlb_entry;
673 }
674 }
675 page_hash = ~page_hash; /*???*/
676 }
677 }
678 return NULL;
679}
680
681
682STATIC_INLINE_VM\
683(void)
684om_interrupt(cpu *processor,
685 unsigned_word cia,
686 unsigned_word ea,
687 om_access_types access,
688 storage_interrupt_reasons reason)
689{
690 switch (access) {
691 case om_data_read:
692 data_storage_interrupt(processor, cia, ea, reason, 0/*!is_store*/);
693 break;
694 case om_data_write:
695 data_storage_interrupt(processor, cia, ea, reason, 1/*is_store*/);
696 break;
697 case om_instruction_read:
698 instruction_storage_interrupt(processor, cia, reason);
699 break;
700 default:
701 error("internal error - om_interrupt - unexpected access type %d", access);
702 }
703}
704
705
706STATIC_INLINE_VM\
707(unsigned_word)
708om_translate_effective_to_real(om_map *map,
709 unsigned_word ea,
710 om_access_types access,
711 cpu *processor,
712 unsigned_word cia,
713 int abort)
714{
715 om_bat *bat = NULL;
716 om_segment_tlb_entry *segment_tlb_entry = NULL;
717 om_page_tlb_entry *page_tlb_entry = NULL;
718 unsigned_word ra;
719
720 if (!map->is_relocate) {
721 ra = ea;
722 TRACE(trace_vm, ("ea=0x%lx - direct map - ra=0x%lx\n",
723 (long)ea, (long)ra));
724 return ra;
725 }
726
727 /* match with BAT? */
728 bat = om_effective_to_bat(map, ea);
729 if (bat != NULL) {
730 if (!om_valid_access[1][bat->protection_bits][access]) {
731 TRACE(trace_vm, ("ea=0x%lx - bat access violation\n", (long)ea));
732 if (abort)
733 om_interrupt(processor, cia, ea, access,
734 protection_violation_storage_interrupt);
735 else
736 return MASK(0, 63);
737 }
738
739 ra = ((ea & bat->block_length_mask) | bat->block_real_page_number);
740 TRACE(trace_vm, ("ea=0x%lx - bat translation - ra=0x%lx\n",
741 (long)ea, (long)ra));
742 return ra;
743 }
744
745 /* translate ea to va using segment map */
746 segment_tlb_entry = om_effective_to_virtual(map, ea, processor, cia);
747#if (WITH_TARGET_WORD_BITSIZE == 64)
748 if (segment_tlb_entry == NULL) {
749 TRACE(trace_vm, ("ea=0x%lx - segment tlb miss\n", (long)ea));
750 if (abort)
751 om_interrupt(processor, cia, ea, access,
752 segment_table_miss_storage_interrupt);
753 else
754 return MASK(0, 63);
755 }
756#endif
757 /* check for invalid segment access type */
758 if (segment_tlb_entry->invalid_access == access) {
759 TRACE(trace_vm, ("ea=0x%lx - segment access invalid\n", (long)ea));
760 if (abort)
761 om_interrupt(processor, cia, ea, access,
762 protection_violation_storage_interrupt);
763 else
764 return MASK(0, 63);
765 }
766
767 /* lookup in PTE */
768 page_tlb_entry = om_virtual_to_real(map, ea, segment_tlb_entry,
769 access,
770 processor, cia);
771 if (page_tlb_entry == NULL) {
772 TRACE(trace_vm, ("ea=0x%lx - page tlb miss\n", (long)ea));
773 if (abort)
774 om_interrupt(processor, cia, ea, access,
775 hash_table_miss_storage_interrupt);
776 else
777 return MASK(0, 63);
778 }
779 if (!(om_valid_access
780 [segment_tlb_entry->key[map->is_problem_state]]
781 [page_tlb_entry->protection]
782 [access])) {
783 TRACE(trace_vm, ("ea=0x%lx - page tlb access violation\n", (long)ea));
784 if (abort)
785 om_interrupt(processor, cia, ea, access,
786 protection_violation_storage_interrupt);
787 else
788 return MASK(0, 63);
789 }
790
791 /* update change bit as needed */
792 if (access == om_data_write &&!page_tlb_entry->changed) {
793 unsigned_word pte_1 = om_read_word(map,
794 page_tlb_entry->real_address_of_pte_1,
795 processor, cia);
796 om_write_word(map,
797 page_tlb_entry->real_address_of_pte_1,
798 pte_1 | BIT(56),
799 processor, cia);
800 TRACE(trace_vm, ("ea=0x%lx - set change bit - tlb=0x%lx &pte1=0x%lx\n",
801 (long)ea, (long)page_tlb_entry,
802 (long)page_tlb_entry->real_address_of_pte_1));
803 }
804
805 ra = (page_tlb_entry->masked_real_page_number | om_ea_masked_byte(ea));
806 TRACE(trace_vm, ("ea=0x%lx - page translation - ra=0x%lx\n",
807 (long)ea, (long)ra));
808 return ra;
809}
810
811
812/*
813 * Definition of operations for memory management
814 */
815
816
817/* rebuild all the relevant bat information */
818STATIC_INLINE_VM\
819(void)
820om_unpack_bat(om_bat *bat,
821 spreg ubat,
822 spreg lbat)
823{
824 /* for extracting out the offset within a page */
825 bat->block_length_mask = ((MASKED(ubat, 51, 61) << (17-2))
826 | MASK(63-17+1, 63));
827
828 /* for checking the effective page index */
829 bat->block_effective_page_index = MASKED(ubat, 0, 46);
830 bat->block_effective_page_index_mask = ~bat->block_length_mask;
831
832 /* protection information */
833 bat->protection_bits = EXTRACTED(lbat, 62, 63);
834 bat->block_real_page_number = MASKED(lbat, 0, 46);
835}
836
837
838/* rebuild the given bat table */
839STATIC_INLINE_VM\
840(void)
841om_unpack_bats(om_bats *bats,
842 spreg *raw_bats,
843 msreg msr)
844{
845 int i;
846 bats->nr_valid_bat_registers = 0;
847 for (i = 0; i < nr_om_bat_registers*2; i += 2) {
848 spreg ubat = raw_bats[i];
849 spreg lbat = raw_bats[i+1];
850 if ((msr & msr_problem_state)
851 ? EXTRACTED(ubat, 63, 63)
852 : EXTRACTED(ubat, 62, 62)) {
853 om_unpack_bat(&bats->bat[bats->nr_valid_bat_registers],
854 ubat, lbat);
855 bats->nr_valid_bat_registers += 1;
856 }
857 }
858}
859
860
861#if (WITH_TARGET_WORD_BITSIZE == 32)
862STATIC_INLINE_VM\
863(void)
864om_unpack_sr(vm *virtual,
865 sreg *srs,
866 int which_sr,
867 cpu *processor,
868 unsigned_word cia)
869{
870 om_segment_tlb_entry *segment_tlb_entry = 0;
871 sreg new_sr_value = 0;
872
873 /* check register in range */
874 ASSERT(which_sr >= 0 && which_sr < nr_om_segment_tlb_entries);
875
876 /* get the working values */
877 segment_tlb_entry = &virtual->segment_tlb.entry[which_sr];
878 new_sr_value = srs[which_sr];
879
880 /* do we support this */
881 if (MASKED32(new_sr_value, 0, 0))
882 cpu_error(processor, cia, "unsupported value of T in segment register %d",
883 which_sr);
884
885 /* update info */
886 segment_tlb_entry->key[om_supervisor_state] = EXTRACTED32(new_sr_value, 1, 1);
887 segment_tlb_entry->key[om_problem_state] = EXTRACTED32(new_sr_value, 2, 2);
888 segment_tlb_entry->invalid_access = (MASKED32(new_sr_value, 3, 3)
889 ? om_instruction_read
890 : om_access_any);
891 segment_tlb_entry->masked_virtual_segment_id =
892 INSERTED32(EXTRACTED32(new_sr_value, 8, 31),
893 31-6-24+1, 31-6); /* aligned ready for pte group addr */
894}
895#endif
896
897
898#if (WITH_TARGET_WORD_BITSIZE == 32)
899STATIC_INLINE_VM\
900(void)
901om_unpack_srs(vm *virtual,
902 sreg *srs,
903 cpu *processor,
904 unsigned_word cia)
905{
906 int which_sr;
907 for (which_sr = 0; which_sr < nr_om_segment_tlb_entries; which_sr++) {
908 om_unpack_sr(virtual, srs, which_sr,
909 processor, cia);
910 }
911}
912#endif
913
914
91c67767 915/* Rebuild all the data structures for the new context as specified by
c906108c
SS
916 the passed registers */
917INLINE_VM\
918(void)
919vm_synchronize_context(vm *virtual,
920 spreg *sprs,
921 sreg *srs,
922 msreg msr,
923 /**/
924 cpu *processor,
925 unsigned_word cia)
926{
927
928 /* enable/disable translation */
929 int problem_state = (msr & msr_problem_state) != 0;
930 int data_relocate = (msr & msr_data_relocate) != 0;
931 int instruction_relocate = (msr & msr_instruction_relocate) != 0;
932 int little_endian = (msr & msr_little_endian_mode) != 0;
933
934 unsigned_word page_table_hash_mask;
935 unsigned_word real_address_of_page_table;
936
937 /* update current processor mode */
938 virtual->instruction_map.translation.is_relocate = instruction_relocate;
939 virtual->instruction_map.translation.is_problem_state = problem_state;
940 virtual->data_map.translation.is_relocate = data_relocate;
941 virtual->data_map.translation.is_problem_state = problem_state;
942
943 /* update bat registers for the new context */
944 om_unpack_bats(&virtual->ibats, &sprs[spr_ibat0u], msr);
945 om_unpack_bats(&virtual->dbats, &sprs[spr_dbat0u], msr);
946
947 /* unpack SDR1 - the storage description register 1 */
948#if (WITH_TARGET_WORD_BITSIZE == 64)
949 real_address_of_page_table = MASKED64(sprs[spr_sdr1], 0, 45);
950 page_table_hash_mask = MASK64(18+28-EXTRACTED64(sprs[spr_sdr1], 59, 63),
951 63-7);
952#endif
953#if (WITH_TARGET_WORD_BITSIZE == 32)
954 real_address_of_page_table = MASKED32(sprs[spr_sdr1], 0, 15);
955 page_table_hash_mask = (INSERTED32(EXTRACTED32(sprs[spr_sdr1], 23, 31),
956 7, 7+9-1)
957 | MASK32(7+9, 31-6));
958#endif
959 virtual->instruction_map.translation.real_address_of_page_table = real_address_of_page_table;
960 virtual->instruction_map.translation.page_table_hash_mask = page_table_hash_mask;
961 virtual->data_map.translation.real_address_of_page_table = real_address_of_page_table;
962 virtual->data_map.translation.page_table_hash_mask = page_table_hash_mask;
963
964
965 /* unpack the segment tlb registers */
966#if (WITH_TARGET_WORD_BITSIZE == 32)
967 om_unpack_srs(virtual, srs,
968 processor, cia);
969#endif
970
971 /* set up the XOR registers if the current endian mode conflicts
972 with what is in the MSR */
973 if (WITH_XOR_ENDIAN) {
974 int i = 1;
975 unsigned mask;
976 if ((little_endian && CURRENT_TARGET_BYTE_ORDER == LITTLE_ENDIAN)
977 || (!little_endian && CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN))
978 mask = 0;
979 else
980 mask = WITH_XOR_ENDIAN - 1;
981 while (i - 1 < WITH_XOR_ENDIAN) {
982 virtual->instruction_map.translation.xor[i-1] = mask;
983 virtual->data_map.translation.xor[i-1] = mask;
984 mask = (mask << 1) & (WITH_XOR_ENDIAN - 1);
985 i = i * 2;
986 }
987 }
988 else {
989 /* don't allow the processor to change endian modes */
990 if ((little_endian && CURRENT_TARGET_BYTE_ORDER != LITTLE_ENDIAN)
991 || (!little_endian && CURRENT_TARGET_BYTE_ORDER != BIG_ENDIAN))
992 cpu_error(processor, cia, "attempt to change hardwired byte order");
993 }
994}
995
996/* update vm data structures due to a TLB operation */
997
998INLINE_VM\
999(void)
1000vm_page_tlb_invalidate_entry(vm *memory,
1001 unsigned_word ea)
1002{
1003 int i = om_page_tlb_index(ea);
1004 memory->instruction_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);
1005 memory->data_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);
1006 TRACE(trace_vm, ("ea=0x%lx - tlb invalidate entry\n", (long)ea));
1007}
1008
1009INLINE_VM\
1010(void)
1011vm_page_tlb_invalidate_all(vm *memory)
1012{
1013 int i;
1014 for (i = 0; i < nr_om_page_tlb_entries; i++) {
1015 memory->instruction_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);
1016 memory->data_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);
1017 }
1018 TRACE(trace_vm, ("tlb invalidate all\n"));
1019}
1020
1021
1022
1023INLINE_VM\
1024(vm_data_map *)
1025vm_create_data_map(vm *memory)
1026{
1027 return &memory->data_map;
1028}
1029
1030
1031INLINE_VM\
1032(vm_instruction_map *)
1033vm_create_instruction_map(vm *memory)
1034{
1035 return &memory->instruction_map;
1036}
1037
1038
1039STATIC_INLINE_VM\
1040(unsigned_word)
1041vm_translate(om_map *map,
1042 unsigned_word ea,
1043 om_access_types access,
1044 cpu *processor,
1045 unsigned_word cia,
1046 int abort)
1047{
1048 switch (CURRENT_ENVIRONMENT) {
1049 case USER_ENVIRONMENT:
1050 case VIRTUAL_ENVIRONMENT:
1051 return ea;
1052 case OPERATING_ENVIRONMENT:
1053 return om_translate_effective_to_real(map, ea, access,
1054 processor, cia,
1055 abort);
1056 default:
1057 error("internal error - vm_translate - bad switch");
1058 return 0;
1059 }
1060}
1061
1062
1063INLINE_VM\
1064(unsigned_word)
1065vm_real_data_addr(vm_data_map *map,
1066 unsigned_word ea,
1067 int is_read,
1068 cpu *processor,
1069 unsigned_word cia)
1070{
1071 return vm_translate(&map->translation,
1072 ea,
1073 is_read ? om_data_read : om_data_write,
1074 processor,
1075 cia,
1076 1); /*abort*/
1077}
1078
1079
1080INLINE_VM\
1081(unsigned_word)
1082vm_real_instruction_addr(vm_instruction_map *map,
1083 cpu *processor,
1084 unsigned_word cia)
1085{
1086 return vm_translate(&map->translation,
1087 cia,
1088 om_instruction_read,
1089 processor,
1090 cia,
1091 1); /*abort*/
1092}
1093
1094INLINE_VM\
1095(instruction_word)
1096vm_instruction_map_read(vm_instruction_map *map,
1097 cpu *processor,
1098 unsigned_word cia)
1099{
1100 unsigned_word ra = vm_real_instruction_addr(map, processor, cia);
1101 ASSERT((cia & 0x3) == 0); /* always aligned */
1102 if (WITH_XOR_ENDIAN)
1103 ra ^= map->translation.xor[sizeof(instruction_word) - 1];
1104 return core_map_read_4(map->code, ra, processor, cia);
1105}
1106
1107
1108INLINE_VM\
1109(int)
1110vm_data_map_read_buffer(vm_data_map *map,
1111 void *target,
1112 unsigned_word addr,
1113 unsigned nr_bytes,
1114 cpu *processor,
1115 unsigned_word cia)
1116{
1117 unsigned count;
1118 for (count = 0; count < nr_bytes; count++) {
1119 unsigned_1 byte;
1120 unsigned_word ea = addr + count;
1121 unsigned_word ra = vm_translate(&map->translation,
1122 ea, om_data_read,
1123 processor, /*processor*/
1124 cia, /*cia*/
1125 processor != NULL); /*abort?*/
1126 if (ra == MASK(0, 63))
1127 break;
1128 if (WITH_XOR_ENDIAN)
1129 ra ^= map->translation.xor[0];
1130 if (core_map_read_buffer(map->read, &byte, ra, sizeof(byte))
1131 != sizeof(byte))
1132 break;
1133 ((unsigned_1*)target)[count] = T2H_1(byte);
1134 }
1135 return count;
1136}
1137
1138
1139INLINE_VM\
1140(int)
1141vm_data_map_write_buffer(vm_data_map *map,
1142 const void *source,
1143 unsigned_word addr,
1144 unsigned nr_bytes,
1145 int violate_read_only_section,
1146 cpu *processor,
1147 unsigned_word cia)
1148{
1149 unsigned count;
1150 unsigned_1 byte;
1151 for (count = 0; count < nr_bytes; count++) {
1152 unsigned_word ea = addr + count;
1153 unsigned_word ra = vm_translate(&map->translation,
1154 ea, om_data_write,
1155 processor,
1156 cia,
1157 processor != NULL); /*abort?*/
1158 if (ra == MASK(0, 63))
1159 break;
1160 if (WITH_XOR_ENDIAN)
1161 ra ^= map->translation.xor[0];
1162 byte = T2H_1(((unsigned_1*)source)[count]);
1163 if (core_map_write_buffer((violate_read_only_section
1164 ? map->read
1165 : map->write),
1166 &byte, ra, sizeof(byte)) != sizeof(byte))
1167 break;
1168 }
1169 return count;
1170}
1171
1172
1173/* define the read/write 1/2/4/8/word functions */
1174
1175#define N 1
1176#include "vm_n.h"
1177#undef N
1178
1179#define N 2
1180#include "vm_n.h"
1181#undef N
1182
1183#define N 4
1184#include "vm_n.h"
1185#undef N
1186
1187#define N 8
1188#include "vm_n.h"
1189#undef N
1190
1191#define N word
1192#include "vm_n.h"
1193#undef N
1194
1195
1196
1197#endif /* _VM_C_ */
This page took 1.17004 seconds and 4 git commands to generate.