Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ALPHA_MMU_CONTEXT_H |
2 | #define __ALPHA_MMU_CONTEXT_H | |
3 | ||
4 | /* | |
5 | * get a new mmu context.. | |
6 | * | |
7 | * Copyright (C) 1996, Linus Torvalds | |
8 | */ | |
9 | ||
1da177e4 LT |
10 | #include <asm/machvec.h> |
11 | #include <asm/compiler.h> | |
d6dd61c8 | 12 | #include <asm-generic/mm_hooks.h> |
1da177e4 LT |
13 | |
14 | /* | |
15 | * Force a context reload. This is needed when we change the page | |
16 | * table pointer or when we update the ASN of the current process. | |
17 | */ | |
18 | ||
19 | /* Don't get into trouble with dueling __EXTERN_INLINEs. */ | |
20 | #ifndef __EXTERN_INLINE | |
21 | #include <asm/io.h> | |
22 | #endif | |
23 | ||
24 | ||
d559d4a2 | 25 | static inline unsigned long |
1da177e4 LT |
26 | __reload_thread(struct pcb_struct *pcb) |
27 | { | |
28 | register unsigned long a0 __asm__("$16"); | |
29 | register unsigned long v0 __asm__("$0"); | |
30 | ||
31 | a0 = virt_to_phys(pcb); | |
32 | __asm__ __volatile__( | |
33 | "call_pal %2 #__reload_thread" | |
34 | : "=r"(v0), "=r"(a0) | |
35 | : "i"(PAL_swpctx), "r"(a0) | |
36 | : "$1", "$22", "$23", "$24", "$25"); | |
37 | ||
38 | return v0; | |
39 | } | |
40 | ||
41 | ||
42 | /* | |
43 | * The maximum ASN's the processor supports. On the EV4 this is 63 | |
44 | * but the PAL-code doesn't actually use this information. On the | |
45 | * EV5 this is 127, and EV6 has 255. | |
46 | * | |
47 | * On the EV4, the ASNs are more-or-less useless anyway, as they are | |
48 | * only used as an icache tag, not for TB entries. On the EV5 and EV6, | |
49 | * ASN's also validate the TB entries, and thus make a lot more sense. | |
50 | * | |
51 | * The EV4 ASN's don't even match the architecture manual, ugh. And | |
52 | * I quote: "If a processor implements address space numbers (ASNs), | |
53 | * and the old PTE has the Address Space Match (ASM) bit clear (ASNs | |
54 | * in use) and the Valid bit set, then entries can also effectively be | |
55 | * made coherent by assigning a new, unused ASN to the currently | |
56 | * running process and not reusing the previous ASN before calling the | |
57 | * appropriate PALcode routine to invalidate the translation buffer (TB)". | |
58 | * | |
59 | * In short, the EV4 has a "kind of" ASN capability, but it doesn't actually | |
60 | * work correctly and can thus not be used (explaining the lack of PAL-code | |
61 | * support). | |
62 | */ | |
63 | #define EV4_MAX_ASN 63 | |
64 | #define EV5_MAX_ASN 127 | |
65 | #define EV6_MAX_ASN 255 | |
66 | ||
67 | #ifdef CONFIG_ALPHA_GENERIC | |
68 | # define MAX_ASN (alpha_mv.max_asn) | |
69 | #else | |
70 | # ifdef CONFIG_ALPHA_EV4 | |
71 | # define MAX_ASN EV4_MAX_ASN | |
72 | # elif defined(CONFIG_ALPHA_EV5) | |
73 | # define MAX_ASN EV5_MAX_ASN | |
74 | # else | |
75 | # define MAX_ASN EV6_MAX_ASN | |
76 | # endif | |
77 | #endif | |
78 | ||
79 | /* | |
80 | * cpu_last_asn(processor): | |
81 | * 63 0 | |
82 | * +-------------+----------------+--------------+ | |
83 | * | asn version | this processor | hardware asn | | |
84 | * +-------------+----------------+--------------+ | |
85 | */ | |
86 | ||
1da177e4 | 87 | #include <asm/smp.h> |
0fcdf96c | 88 | #ifdef CONFIG_SMP |
1da177e4 LT |
89 | #define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn) |
90 | #else | |
91 | extern unsigned long last_asn; | |
92 | #define cpu_last_asn(cpuid) last_asn | |
93 | #endif /* CONFIG_SMP */ | |
94 | ||
95 | #define WIDTH_HARDWARE_ASN 8 | |
96 | #define ASN_FIRST_VERSION (1UL << WIDTH_HARDWARE_ASN) | |
97 | #define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1) | |
98 | ||
99 | /* | |
100 | * NOTE! The way this is set up, the high bits of the "asn_cache" (and | |
101 | * the "mm->context") are the ASN _version_ code. A version of 0 is | |
102 | * always considered invalid, so to invalidate another process you only | |
103 | * need to do "p->mm->context = 0". | |
104 | * | |
105 | * If we need more ASN's than the processor has, we invalidate the old | |
106 | * user TLB's (tbiap()) and start a new ASN version. That will automatically | |
107 | * force a new asn for any other processes the next time they want to | |
108 | * run. | |
109 | */ | |
110 | ||
111 | #ifndef __EXTERN_INLINE | |
112 | #define __EXTERN_INLINE extern inline | |
113 | #define __MMU_EXTERN_INLINE | |
114 | #endif | |
115 | ||
d559d4a2 | 116 | extern inline unsigned long |
1da177e4 LT |
117 | __get_new_mm_context(struct mm_struct *mm, long cpu) |
118 | { | |
119 | unsigned long asn = cpu_last_asn(cpu); | |
120 | unsigned long next = asn + 1; | |
121 | ||
122 | if ((asn & HARDWARE_ASN_MASK) >= MAX_ASN) { | |
123 | tbiap(); | |
124 | imb(); | |
125 | next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION; | |
126 | } | |
127 | cpu_last_asn(cpu) = next; | |
128 | return next; | |
129 | } | |
130 | ||
131 | __EXTERN_INLINE void | |
132 | ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, | |
133 | struct task_struct *next) | |
134 | { | |
135 | /* Check if our ASN is of an older version, and thus invalid. */ | |
136 | unsigned long asn; | |
137 | unsigned long mmc; | |
138 | long cpu = smp_processor_id(); | |
139 | ||
140 | #ifdef CONFIG_SMP | |
141 | cpu_data[cpu].asn_lock = 1; | |
142 | barrier(); | |
143 | #endif | |
144 | asn = cpu_last_asn(cpu); | |
145 | mmc = next_mm->context[cpu]; | |
146 | if ((mmc ^ asn) & ~HARDWARE_ASN_MASK) { | |
147 | mmc = __get_new_mm_context(next_mm, cpu); | |
148 | next_mm->context[cpu] = mmc; | |
149 | } | |
150 | #ifdef CONFIG_SMP | |
151 | else | |
152 | cpu_data[cpu].need_new_asn = 1; | |
153 | #endif | |
154 | ||
155 | /* Always update the PCB ASN. Another thread may have allocated | |
156 | a new mm->context (via flush_tlb_mm) without the ASN serial | |
157 | number wrapping. We have no way to detect when this is needed. */ | |
37bfbaf9 | 158 | task_thread_info(next)->pcb.asn = mmc & HARDWARE_ASN_MASK; |
1da177e4 LT |
159 | } |
160 | ||
161 | __EXTERN_INLINE void | |
162 | ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, | |
163 | struct task_struct *next) | |
164 | { | |
165 | /* As described, ASN's are broken for TLB usage. But we can | |
166 | optimize for switching between threads -- if the mm is | |
167 | unchanged from current we needn't flush. */ | |
168 | /* ??? May not be needed because EV4 PALcode recognizes that | |
169 | ASN's are broken and does a tbiap itself on swpctx, under | |
170 | the "Must set ASN or flush" rule. At least this is true | |
171 | for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com). | |
172 | I'm going to leave this here anyway, just to Be Sure. -- r~ */ | |
173 | if (prev_mm != next_mm) | |
174 | tbiap(); | |
175 | ||
176 | /* Do continue to allocate ASNs, because we can still use them | |
177 | to avoid flushing the icache. */ | |
178 | ev5_switch_mm(prev_mm, next_mm, next); | |
179 | } | |
180 | ||
181 | extern void __load_new_mm_context(struct mm_struct *); | |
182 | ||
183 | #ifdef CONFIG_SMP | |
184 | #define check_mmu_context() \ | |
185 | do { \ | |
186 | int cpu = smp_processor_id(); \ | |
187 | cpu_data[cpu].asn_lock = 0; \ | |
188 | barrier(); \ | |
189 | if (cpu_data[cpu].need_new_asn) { \ | |
190 | struct mm_struct * mm = current->active_mm; \ | |
191 | cpu_data[cpu].need_new_asn = 0; \ | |
192 | if (!mm->context[cpu]) \ | |
193 | __load_new_mm_context(mm); \ | |
194 | } \ | |
195 | } while(0) | |
196 | #else | |
197 | #define check_mmu_context() do { } while(0) | |
198 | #endif | |
199 | ||
200 | __EXTERN_INLINE void | |
201 | ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm) | |
202 | { | |
203 | __load_new_mm_context(next_mm); | |
204 | } | |
205 | ||
206 | __EXTERN_INLINE void | |
207 | ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm) | |
208 | { | |
209 | __load_new_mm_context(next_mm); | |
210 | tbiap(); | |
211 | } | |
212 | ||
213 | #define deactivate_mm(tsk,mm) do { } while (0) | |
214 | ||
215 | #ifdef CONFIG_ALPHA_GENERIC | |
216 | # define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c)) | |
217 | # define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y)) | |
218 | #else | |
219 | # ifdef CONFIG_ALPHA_EV4 | |
220 | # define switch_mm(a,b,c) ev4_switch_mm((a),(b),(c)) | |
221 | # define activate_mm(x,y) ev4_activate_mm((x),(y)) | |
222 | # else | |
223 | # define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c)) | |
224 | # define activate_mm(x,y) ev5_activate_mm((x),(y)) | |
225 | # endif | |
226 | #endif | |
227 | ||
d559d4a2 | 228 | static inline int |
1da177e4 LT |
229 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
230 | { | |
231 | int i; | |
232 | ||
394e3902 AM |
233 | for_each_online_cpu(i) |
234 | mm->context[i] = 0; | |
1da177e4 | 235 | if (tsk != current) |
37bfbaf9 | 236 | task_thread_info(tsk)->pcb.ptbr |
1da177e4 LT |
237 | = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; |
238 | return 0; | |
239 | } | |
240 | ||
241 | extern inline void | |
242 | destroy_context(struct mm_struct *mm) | |
243 | { | |
244 | /* Nothing to do. */ | |
245 | } | |
246 | ||
247 | static inline void | |
248 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
249 | { | |
37bfbaf9 | 250 | task_thread_info(tsk)->pcb.ptbr |
1da177e4 LT |
251 | = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; |
252 | } | |
253 | ||
254 | #ifdef __MMU_EXTERN_INLINE | |
255 | #undef __EXTERN_INLINE | |
256 | #undef __MMU_EXTERN_INLINE | |
257 | #endif | |
258 | ||
259 | #endif /* __ALPHA_MMU_CONTEXT_H */ |