Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/cris/arch-v10/mm/tlb.c | |
3 | * | |
4 | * Low level TLB handling | |
5 | * | |
6 | * | |
7 | * Copyright (C) 2000-2002 Axis Communications AB | |
8 | * | |
9 | * Authors: Bjorn Wesen (bjornw@axis.com) | |
10 | * | |
11 | */ | |
12 | ||
13 | #include <asm/tlb.h> | |
14 | #include <asm/mmu_context.h> | |
15 | #include <asm/arch/svinto.h> | |
16 | ||
17 | #define D(x) | |
18 | ||
19 | /* The TLB can host up to 64 different mm contexts at the same time. | |
20 | * The running context is R_MMU_CONTEXT, and each TLB entry contains a | |
21 | * page_id that has to match to give a hit. In page_id_map, we keep track | |
22 | * of which mm's we have assigned which page_id's, so that we know when | |
23 | * to invalidate TLB entries. | |
24 | * | |
25 | * The last page_id is never running - it is used as an invalid page_id | |
26 | * so we can make TLB entries that will never match. | |
27 | * | |
28 | * Notice that we need to make the flushes atomic, otherwise an interrupt | |
29 | * handler that uses vmalloced memory might cause a TLB load in the middle | |
30 | * of a flush causing. | |
31 | */ | |
32 | ||
33 | /* invalidate all TLB entries */ | |
34 | ||
35 | void | |
36 | flush_tlb_all(void) | |
37 | { | |
38 | int i; | |
39 | unsigned long flags; | |
40 | ||
41 | /* the vpn of i & 0xf is so we dont write similar TLB entries | |
42 | * in the same 4-way entry group. details.. | |
43 | */ | |
44 | ||
5cf885d0 | 45 | local_irq_save(flags); |
1da177e4 LT |
46 | for(i = 0; i < NUM_TLB_ENTRIES; i++) { |
47 | *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) ); | |
48 | *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | | |
49 | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); | |
50 | ||
51 | *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | | |
52 | IO_STATE(R_TLB_LO, valid, no ) | | |
53 | IO_STATE(R_TLB_LO, kernel,no ) | | |
54 | IO_STATE(R_TLB_LO, we, no ) | | |
55 | IO_FIELD(R_TLB_LO, pfn, 0 ) ); | |
56 | } | |
57 | local_irq_restore(flags); | |
58 | D(printk("tlb: flushed all\n")); | |
59 | } | |
60 | ||
61 | /* invalidate the selected mm context only */ | |
62 | ||
63 | void | |
64 | flush_tlb_mm(struct mm_struct *mm) | |
65 | { | |
66 | int i; | |
67 | int page_id = mm->context.page_id; | |
68 | unsigned long flags; | |
69 | ||
70 | D(printk("tlb: flush mm context %d (%p)\n", page_id, mm)); | |
71 | ||
72 | if(page_id == NO_CONTEXT) | |
73 | return; | |
74 | ||
75 | /* mark the TLB entries that match the page_id as invalid. | |
76 | * here we could also check the _PAGE_GLOBAL bit and NOT flush | |
77 | * global pages. is it worth the extra I/O ? | |
78 | */ | |
79 | ||
5cf885d0 | 80 | local_irq_save(flags); |
1da177e4 LT |
81 | for(i = 0; i < NUM_TLB_ENTRIES; i++) { |
82 | *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); | |
83 | if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) { | |
84 | *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | | |
85 | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); | |
86 | ||
87 | *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | | |
88 | IO_STATE(R_TLB_LO, valid, no ) | | |
89 | IO_STATE(R_TLB_LO, kernel,no ) | | |
90 | IO_STATE(R_TLB_LO, we, no ) | | |
91 | IO_FIELD(R_TLB_LO, pfn, 0 ) ); | |
92 | } | |
93 | } | |
94 | local_irq_restore(flags); | |
95 | } | |
96 | ||
97 | /* invalidate a single page */ | |
98 | ||
99 | void | |
100 | flush_tlb_page(struct vm_area_struct *vma, | |
101 | unsigned long addr) | |
102 | { | |
103 | struct mm_struct *mm = vma->vm_mm; | |
104 | int page_id = mm->context.page_id; | |
105 | int i; | |
106 | unsigned long flags; | |
107 | ||
108 | D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm)); | |
109 | ||
110 | if(page_id == NO_CONTEXT) | |
111 | return; | |
112 | ||
113 | addr &= PAGE_MASK; /* perhaps not necessary */ | |
114 | ||
115 | /* invalidate those TLB entries that match both the mm context | |
116 | * and the virtual address requested | |
117 | */ | |
118 | ||
5cf885d0 | 119 | local_irq_save(flags); |
1da177e4 LT |
120 | for(i = 0; i < NUM_TLB_ENTRIES; i++) { |
121 | unsigned long tlb_hi; | |
122 | *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); | |
123 | tlb_hi = *R_TLB_HI; | |
124 | if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id && | |
125 | (tlb_hi & PAGE_MASK) == addr) { | |
126 | *R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | | |
127 | addr; /* same addr as before works. */ | |
128 | ||
129 | *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | | |
130 | IO_STATE(R_TLB_LO, valid, no ) | | |
131 | IO_STATE(R_TLB_LO, kernel,no ) | | |
132 | IO_STATE(R_TLB_LO, we, no ) | | |
133 | IO_FIELD(R_TLB_LO, pfn, 0 ) ); | |
134 | } | |
135 | } | |
136 | local_irq_restore(flags); | |
137 | } | |
138 | ||
1da177e4 LT |
139 | /* dump the entire TLB for debug purposes */ |
140 | ||
141 | #if 0 | |
142 | void | |
143 | dump_tlb_all(void) | |
144 | { | |
145 | int i; | |
146 | unsigned long flags; | |
147 | ||
148 | printk("TLB dump. LO is: pfn | reserved | global | valid | kernel | we |\n"); | |
149 | ||
150 | local_save_flags(flags); | |
151 | local_irq_disable(); | |
152 | for(i = 0; i < NUM_TLB_ENTRIES; i++) { | |
153 | *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) ); | |
154 | printk("Entry %d: HI 0x%08lx, LO 0x%08lx\n", | |
155 | i, *R_TLB_HI, *R_TLB_LO); | |
156 | } | |
157 | local_irq_restore(flags); | |
158 | } | |
159 | #endif | |
160 | ||
161 | /* | |
162 | * Initialize the context related info for a new mm_struct | |
163 | * instance. | |
164 | */ | |
165 | ||
166 | int | |
167 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
168 | { | |
169 | mm->context.page_id = NO_CONTEXT; | |
170 | return 0; | |
171 | } | |
172 | ||
173 | /* called in schedule() just before actually doing the switch_to */ | |
174 | ||
175 | void | |
176 | switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
177 | struct task_struct *tsk) | |
178 | { | |
179 | /* make sure we have a context */ | |
180 | ||
181 | get_mmu_context(next); | |
182 | ||
183 | /* remember the pgd for the fault handlers | |
184 | * this is similar to the pgd register in some other CPU's. | |
185 | * we need our own copy of it because current and active_mm | |
186 | * might be invalid at points where we still need to derefer | |
187 | * the pgd. | |
188 | */ | |
189 | ||
8d20a541 | 190 | per_cpu(current_pgd, smp_processor_id()) = next->pgd; |
1da177e4 LT |
191 | |
192 | /* switch context in the MMU */ | |
193 | ||
194 | D(printk("switching mmu_context to %d (%p)\n", next->context, next)); | |
195 | ||
196 | *R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT, page_id, next->context.page_id); | |
197 | } | |
198 |