Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ALPHA_MMU_CONTEXT_H |
2 | #define __ALPHA_MMU_CONTEXT_H | |
3 | ||
4 | /* | |
5 | * get a new mmu context.. | |
6 | * | |
7 | * Copyright (C) 1996, Linus Torvalds | |
8 | */ | |
9 | ||
10 | #include <linux/config.h> | |
11 | #include <asm/system.h> | |
12 | #include <asm/machvec.h> | |
13 | #include <asm/compiler.h> | |
14 | ||
15 | /* | |
16 | * Force a context reload. This is needed when we change the page | |
17 | * table pointer or when we update the ASN of the current process. | |
18 | */ | |
19 | ||
20 | /* Don't get into trouble with dueling __EXTERN_INLINEs. */ | |
21 | #ifndef __EXTERN_INLINE | |
22 | #include <asm/io.h> | |
23 | #endif | |
24 | ||
25 | ||
26 | extern inline unsigned long | |
27 | __reload_thread(struct pcb_struct *pcb) | |
28 | { | |
29 | register unsigned long a0 __asm__("$16"); | |
30 | register unsigned long v0 __asm__("$0"); | |
31 | ||
32 | a0 = virt_to_phys(pcb); | |
33 | __asm__ __volatile__( | |
34 | "call_pal %2 #__reload_thread" | |
35 | : "=r"(v0), "=r"(a0) | |
36 | : "i"(PAL_swpctx), "r"(a0) | |
37 | : "$1", "$22", "$23", "$24", "$25"); | |
38 | ||
39 | return v0; | |
40 | } | |
41 | ||
42 | ||
43 | /* | |
44 | * The maximum ASN's the processor supports. On the EV4 this is 63 | |
45 | * but the PAL-code doesn't actually use this information. On the | |
46 | * EV5 this is 127, and EV6 has 255. | |
47 | * | |
48 | * On the EV4, the ASNs are more-or-less useless anyway, as they are | |
49 | * only used as an icache tag, not for TB entries. On the EV5 and EV6, | |
50 | * ASN's also validate the TB entries, and thus make a lot more sense. | |
51 | * | |
52 | * The EV4 ASN's don't even match the architecture manual, ugh. And | |
53 | * I quote: "If a processor implements address space numbers (ASNs), | |
54 | * and the old PTE has the Address Space Match (ASM) bit clear (ASNs | |
55 | * in use) and the Valid bit set, then entries can also effectively be | |
56 | * made coherent by assigning a new, unused ASN to the currently | |
57 | * running process and not reusing the previous ASN before calling the | |
58 | * appropriate PALcode routine to invalidate the translation buffer (TB)". | |
59 | * | |
60 | * In short, the EV4 has a "kind of" ASN capability, but it doesn't actually | |
61 | * work correctly and can thus not be used (explaining the lack of PAL-code | |
62 | * support). | |
63 | */ | |
64 | #define EV4_MAX_ASN 63 | |
65 | #define EV5_MAX_ASN 127 | |
66 | #define EV6_MAX_ASN 255 | |
67 | ||
68 | #ifdef CONFIG_ALPHA_GENERIC | |
69 | # define MAX_ASN (alpha_mv.max_asn) | |
70 | #else | |
71 | # ifdef CONFIG_ALPHA_EV4 | |
72 | # define MAX_ASN EV4_MAX_ASN | |
73 | # elif defined(CONFIG_ALPHA_EV5) | |
74 | # define MAX_ASN EV5_MAX_ASN | |
75 | # else | |
76 | # define MAX_ASN EV6_MAX_ASN | |
77 | # endif | |
78 | #endif | |
79 | ||
80 | /* | |
81 | * cpu_last_asn(processor): | |
82 | * 63 0 | |
83 | * +-------------+----------------+--------------+ | |
84 | * | asn version | this processor | hardware asn | | |
85 | * +-------------+----------------+--------------+ | |
86 | */ | |
87 | ||
88 | #ifdef CONFIG_SMP | |
89 | #include <asm/smp.h> | |
90 | #define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn) | |
91 | #else | |
92 | extern unsigned long last_asn; | |
93 | #define cpu_last_asn(cpuid) last_asn | |
94 | #endif /* CONFIG_SMP */ | |
95 | ||
96 | #define WIDTH_HARDWARE_ASN 8 | |
97 | #define ASN_FIRST_VERSION (1UL << WIDTH_HARDWARE_ASN) | |
98 | #define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1) | |
99 | ||
100 | /* | |
101 | * NOTE! The way this is set up, the high bits of the "asn_cache" (and | |
102 | * the "mm->context") are the ASN _version_ code. A version of 0 is | |
103 | * always considered invalid, so to invalidate another process you only | |
104 | * need to do "p->mm->context = 0". | |
105 | * | |
106 | * If we need more ASN's than the processor has, we invalidate the old | |
107 | * user TLB's (tbiap()) and start a new ASN version. That will automatically | |
108 | * force a new asn for any other processes the next time they want to | |
109 | * run. | |
110 | */ | |
111 | ||
112 | #ifndef __EXTERN_INLINE | |
113 | #define __EXTERN_INLINE extern inline | |
114 | #define __MMU_EXTERN_INLINE | |
115 | #endif | |
116 | ||
117 | static inline unsigned long | |
118 | __get_new_mm_context(struct mm_struct *mm, long cpu) | |
119 | { | |
120 | unsigned long asn = cpu_last_asn(cpu); | |
121 | unsigned long next = asn + 1; | |
122 | ||
123 | if ((asn & HARDWARE_ASN_MASK) >= MAX_ASN) { | |
124 | tbiap(); | |
125 | imb(); | |
126 | next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION; | |
127 | } | |
128 | cpu_last_asn(cpu) = next; | |
129 | return next; | |
130 | } | |
131 | ||
132 | __EXTERN_INLINE void | |
133 | ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, | |
134 | struct task_struct *next) | |
135 | { | |
136 | /* Check if our ASN is of an older version, and thus invalid. */ | |
137 | unsigned long asn; | |
138 | unsigned long mmc; | |
139 | long cpu = smp_processor_id(); | |
140 | ||
141 | #ifdef CONFIG_SMP | |
142 | cpu_data[cpu].asn_lock = 1; | |
143 | barrier(); | |
144 | #endif | |
145 | asn = cpu_last_asn(cpu); | |
146 | mmc = next_mm->context[cpu]; | |
147 | if ((mmc ^ asn) & ~HARDWARE_ASN_MASK) { | |
148 | mmc = __get_new_mm_context(next_mm, cpu); | |
149 | next_mm->context[cpu] = mmc; | |
150 | } | |
151 | #ifdef CONFIG_SMP | |
152 | else | |
153 | cpu_data[cpu].need_new_asn = 1; | |
154 | #endif | |
155 | ||
156 | /* Always update the PCB ASN. Another thread may have allocated | |
157 | a new mm->context (via flush_tlb_mm) without the ASN serial | |
158 | number wrapping. We have no way to detect when this is needed. */ | |
159 | next->thread_info->pcb.asn = mmc & HARDWARE_ASN_MASK; | |
160 | } | |
161 | ||
162 | __EXTERN_INLINE void | |
163 | ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, | |
164 | struct task_struct *next) | |
165 | { | |
166 | /* As described, ASN's are broken for TLB usage. But we can | |
167 | optimize for switching between threads -- if the mm is | |
168 | unchanged from current we needn't flush. */ | |
169 | /* ??? May not be needed because EV4 PALcode recognizes that | |
170 | ASN's are broken and does a tbiap itself on swpctx, under | |
171 | the "Must set ASN or flush" rule. At least this is true | |
172 | for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com). | |
173 | I'm going to leave this here anyway, just to Be Sure. -- r~ */ | |
174 | if (prev_mm != next_mm) | |
175 | tbiap(); | |
176 | ||
177 | /* Do continue to allocate ASNs, because we can still use them | |
178 | to avoid flushing the icache. */ | |
179 | ev5_switch_mm(prev_mm, next_mm, next); | |
180 | } | |
181 | ||
182 | extern void __load_new_mm_context(struct mm_struct *); | |
183 | ||
184 | #ifdef CONFIG_SMP | |
185 | #define check_mmu_context() \ | |
186 | do { \ | |
187 | int cpu = smp_processor_id(); \ | |
188 | cpu_data[cpu].asn_lock = 0; \ | |
189 | barrier(); \ | |
190 | if (cpu_data[cpu].need_new_asn) { \ | |
191 | struct mm_struct * mm = current->active_mm; \ | |
192 | cpu_data[cpu].need_new_asn = 0; \ | |
193 | if (!mm->context[cpu]) \ | |
194 | __load_new_mm_context(mm); \ | |
195 | } \ | |
196 | } while(0) | |
197 | #else | |
198 | #define check_mmu_context() do { } while(0) | |
199 | #endif | |
200 | ||
201 | __EXTERN_INLINE void | |
202 | ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm) | |
203 | { | |
204 | __load_new_mm_context(next_mm); | |
205 | } | |
206 | ||
207 | __EXTERN_INLINE void | |
208 | ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm) | |
209 | { | |
210 | __load_new_mm_context(next_mm); | |
211 | tbiap(); | |
212 | } | |
213 | ||
214 | #define deactivate_mm(tsk,mm) do { } while (0) | |
215 | ||
216 | #ifdef CONFIG_ALPHA_GENERIC | |
217 | # define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c)) | |
218 | # define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y)) | |
219 | #else | |
220 | # ifdef CONFIG_ALPHA_EV4 | |
221 | # define switch_mm(a,b,c) ev4_switch_mm((a),(b),(c)) | |
222 | # define activate_mm(x,y) ev4_activate_mm((x),(y)) | |
223 | # else | |
224 | # define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c)) | |
225 | # define activate_mm(x,y) ev5_activate_mm((x),(y)) | |
226 | # endif | |
227 | #endif | |
228 | ||
229 | extern inline int | |
230 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
231 | { | |
232 | int i; | |
233 | ||
234 | for (i = 0; i < NR_CPUS; i++) | |
235 | if (cpu_online(i)) | |
236 | mm->context[i] = 0; | |
237 | if (tsk != current) | |
238 | tsk->thread_info->pcb.ptbr | |
239 | = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; | |
240 | return 0; | |
241 | } | |
242 | ||
243 | extern inline void | |
244 | destroy_context(struct mm_struct *mm) | |
245 | { | |
246 | /* Nothing to do. */ | |
247 | } | |
248 | ||
249 | static inline void | |
250 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
251 | { | |
252 | tsk->thread_info->pcb.ptbr | |
253 | = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT; | |
254 | } | |
255 | ||
256 | #ifdef __MMU_EXTERN_INLINE | |
257 | #undef __EXTERN_INLINE | |
258 | #undef __MMU_EXTERN_INLINE | |
259 | #endif | |
260 | ||
261 | #endif /* __ALPHA_MMU_CONTEXT_H */ |