Commit | Line | Data |
---|---|---|
c048fdfe GC |
1 | #include <linux/init.h> |
2 | ||
3 | #include <linux/mm.h> | |
c048fdfe GC |
4 | #include <linux/spinlock.h> |
5 | #include <linux/smp.h> | |
c048fdfe | 6 | #include <linux/interrupt.h> |
6dd01bed | 7 | #include <linux/module.h> |
93296720 | 8 | #include <linux/cpu.h> |
c048fdfe | 9 | |
c048fdfe | 10 | #include <asm/tlbflush.h> |
c048fdfe | 11 | #include <asm/mmu_context.h> |
350f8f56 | 12 | #include <asm/cache.h> |
6dd01bed | 13 | #include <asm/apic.h> |
bdbcdd48 | 14 | #include <asm/uv/uv.h> |
3df3212f | 15 | #include <linux/debugfs.h> |
5af5573e | 16 | |
9eb912d1 BG |
17 | DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) |
18 | = { &init_mm, 0, }; | |
19 | ||
c048fdfe GC |
20 | /* |
21 | * Smarter SMP flushing macros. | |
22 | * c/o Linus Torvalds. | |
23 | * | |
24 | * These mean you can really definitely utterly forget about | |
25 | * writing to user space from interrupts. (Its not allowed anyway). | |
26 | * | |
27 | * Optimizations Manfred Spraul <manfred@colorfullife.com> | |
28 | * | |
29 | * More scalable flush, from Andi Kleen | |
30 | * | |
52aec330 | 31 | * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi |
c048fdfe GC |
32 | */ |
33 | ||
52aec330 AS |
34 | struct flush_tlb_info { |
35 | struct mm_struct *flush_mm; | |
36 | unsigned long flush_start; | |
37 | unsigned long flush_end; | |
38 | }; | |
93296720 | 39 | |
c048fdfe GC |
40 | /* |
41 | * We cannot call mmdrop() because we are in interrupt context, | |
42 | * instead update mm->cpu_vm_mask. | |
43 | */ | |
44 | void leave_mm(int cpu) | |
45 | { | |
02171b4a | 46 | struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm); |
c6ae41e7 | 47 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
c048fdfe | 48 | BUG(); |
a6fca40f SS |
49 | if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { |
50 | cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); | |
51 | load_cr3(swapper_pg_dir); | |
52 | } | |
c048fdfe GC |
53 | } |
54 | EXPORT_SYMBOL_GPL(leave_mm); | |
55 | ||
56 | /* | |
c048fdfe GC |
57 | * The flush IPI assumes that a thread switch happens in this order: |
58 | * [cpu0: the cpu that switches] | |
59 | * 1) switch_mm() either 1a) or 1b) | |
60 | * 1a) thread switch to a different mm | |
52aec330 AS |
61 | * 1a1) set cpu_tlbstate to TLBSTATE_OK |
62 | * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm | |
63 | * if cpu0 was in lazy tlb mode. | |
64 | * 1a2) update cpu active_mm | |
c048fdfe | 65 | * Now cpu0 accepts tlb flushes for the new mm. |
52aec330 | 66 | * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask); |
c048fdfe GC |
67 | * Now the other cpus will send tlb flush ipis. |
68 | * 1a4) change cr3. | |
52aec330 AS |
69 | * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask); |
70 | * Stop ipi delivery for the old mm. This is not synchronized with | |
71 | * the other cpus, but flush_tlb_func ignore flush ipis for the wrong | |
72 | * mm, and in the worst case we perform a superfluous tlb flush. | |
c048fdfe | 73 | * 1b) thread switch without mm change |
52aec330 AS |
74 | * cpu active_mm is correct, cpu0 already handles flush ipis. |
75 | * 1b1) set cpu_tlbstate to TLBSTATE_OK | |
c048fdfe GC |
76 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. |
77 | * Atomically set the bit [other cpus will start sending flush ipis], | |
78 | * and test the bit. | |
79 | * 1b3) if the bit was 0: leave_mm was called, flush the tlb. | |
80 | * 2) switch %%esp, ie current | |
81 | * | |
82 | * The interrupt must handle 2 special cases: | |
83 | * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. | |
84 | * - the cpu performs speculative tlb reads, i.e. even if the cpu only | |
85 | * runs in kernel space, the cpu could load tlb entries for user space | |
86 | * pages. | |
87 | * | |
52aec330 | 88 | * The good news is that cpu_tlbstate is local to each cpu, no |
c048fdfe GC |
89 | * write/read ordering problems. |
90 | */ | |
91 | ||
92 | /* | |
52aec330 | 93 | * TLB flush funcation: |
c048fdfe GC |
94 | * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. |
95 | * 2) Leave the mm if we are in the lazy tlb mode. | |
02cf94c3 | 96 | */ |
52aec330 | 97 | static void flush_tlb_func(void *info) |
c048fdfe | 98 | { |
52aec330 | 99 | struct flush_tlb_info *f = info; |
c048fdfe | 100 | |
fd0f5869 TS |
101 | inc_irq_stat(irq_tlb_count); |
102 | ||
52aec330 AS |
103 | if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) |
104 | return; | |
c048fdfe | 105 | |
52aec330 AS |
106 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { |
107 | if (f->flush_end == TLB_FLUSH_ALL || !cpu_has_invlpg) | |
108 | local_flush_tlb(); | |
109 | else if (!f->flush_end) | |
110 | __flush_tlb_single(f->flush_start); | |
111 | else { | |
112 | unsigned long addr; | |
113 | addr = f->flush_start; | |
114 | while (addr < f->flush_end) { | |
115 | __flush_tlb_single(addr); | |
116 | addr += PAGE_SIZE; | |
e7b52ffd | 117 | } |
52aec330 AS |
118 | } |
119 | } else | |
120 | leave_mm(smp_processor_id()); | |
c048fdfe | 121 | |
c048fdfe GC |
122 | } |
123 | ||
4595f962 | 124 | void native_flush_tlb_others(const struct cpumask *cpumask, |
e7b52ffd AS |
125 | struct mm_struct *mm, unsigned long start, |
126 | unsigned long end) | |
4595f962 | 127 | { |
52aec330 AS |
128 | struct flush_tlb_info info; |
129 | info.flush_mm = mm; | |
130 | info.flush_start = start; | |
131 | info.flush_end = end; | |
132 | ||
4595f962 | 133 | if (is_uv_system()) { |
bdbcdd48 | 134 | unsigned int cpu; |
0e21990a | 135 | |
25542c64 | 136 | cpu = smp_processor_id(); |
e7b52ffd | 137 | cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); |
bdbcdd48 | 138 | if (cpumask) |
52aec330 AS |
139 | smp_call_function_many(cpumask, flush_tlb_func, |
140 | &info, 1); | |
0e21990a | 141 | return; |
4595f962 | 142 | } |
52aec330 | 143 | smp_call_function_many(cpumask, flush_tlb_func, &info, 1); |
c048fdfe | 144 | } |
c048fdfe GC |
145 | |
146 | void flush_tlb_current_task(void) | |
147 | { | |
148 | struct mm_struct *mm = current->mm; | |
c048fdfe GC |
149 | |
150 | preempt_disable(); | |
c048fdfe GC |
151 | |
152 | local_flush_tlb(); | |
78f1c4d6 | 153 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
e7b52ffd | 154 | flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); |
c048fdfe GC |
155 | preempt_enable(); |
156 | } | |
157 | ||
611ae8e3 AS |
158 | /* |
159 | * It can find out the THP large page, or | |
160 | * HUGETLB page in tlb_flush when THP disabled | |
161 | */ | |
d8dfe60d AS |
162 | static inline unsigned long has_large_page(struct mm_struct *mm, |
163 | unsigned long start, unsigned long end) | |
164 | { | |
165 | pgd_t *pgd; | |
166 | pud_t *pud; | |
167 | pmd_t *pmd; | |
168 | unsigned long addr = ALIGN(start, HPAGE_SIZE); | |
169 | for (; addr < end; addr += HPAGE_SIZE) { | |
170 | pgd = pgd_offset(mm, addr); | |
171 | if (likely(!pgd_none(*pgd))) { | |
172 | pud = pud_offset(pgd, addr); | |
173 | if (likely(!pud_none(*pud))) { | |
174 | pmd = pmd_offset(pud, addr); | |
175 | if (likely(!pmd_none(*pmd))) | |
176 | if (pmd_large(*pmd)) | |
177 | return addr; | |
178 | } | |
179 | } | |
180 | } | |
181 | return 0; | |
182 | } | |
e7b52ffd | 183 | |
611ae8e3 AS |
184 | void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
185 | unsigned long end, unsigned long vmflag) | |
186 | { | |
187 | unsigned long addr; | |
188 | unsigned act_entries, tlb_entries = 0; | |
e7b52ffd AS |
189 | |
190 | preempt_disable(); | |
611ae8e3 AS |
191 | if (current->active_mm != mm) |
192 | goto flush_all; | |
e7b52ffd | 193 | |
611ae8e3 AS |
194 | if (!current->mm) { |
195 | leave_mm(smp_processor_id()); | |
196 | goto flush_all; | |
197 | } | |
c048fdfe | 198 | |
611ae8e3 AS |
199 | if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 |
200 | || vmflag == VM_HUGETLB) { | |
201 | local_flush_tlb(); | |
202 | goto flush_all; | |
203 | } | |
e7b52ffd | 204 | |
611ae8e3 AS |
205 | /* In modern CPU, last level tlb used for both data/ins */ |
206 | if (vmflag & VM_EXEC) | |
207 | tlb_entries = tlb_lli_4k[ENTRIES]; | |
208 | else | |
209 | tlb_entries = tlb_lld_4k[ENTRIES]; | |
210 | /* Assume all of TLB entries was occupied by this task */ | |
211 | act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm; | |
212 | ||
213 | /* tlb_flushall_shift is on balance point, details in commit log */ | |
214 | if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) | |
215 | local_flush_tlb(); | |
216 | else { | |
217 | if (has_large_page(mm, start, end)) { | |
218 | local_flush_tlb(); | |
219 | goto flush_all; | |
e7b52ffd | 220 | } |
611ae8e3 AS |
221 | /* flush range by one by one 'invlpg' */ |
222 | for (addr = start; addr < end; addr += PAGE_SIZE) | |
223 | __flush_tlb_single(addr); | |
224 | ||
225 | if (cpumask_any_but(mm_cpumask(mm), | |
226 | smp_processor_id()) < nr_cpu_ids) | |
227 | flush_tlb_others(mm_cpumask(mm), mm, start, end); | |
228 | preempt_enable(); | |
229 | return; | |
e7b52ffd | 230 | } |
611ae8e3 AS |
231 | |
232 | flush_all: | |
e7b52ffd AS |
233 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
234 | flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); | |
c048fdfe GC |
235 | preempt_enable(); |
236 | } | |
237 | ||
e7b52ffd | 238 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) |
c048fdfe GC |
239 | { |
240 | struct mm_struct *mm = vma->vm_mm; | |
c048fdfe GC |
241 | |
242 | preempt_disable(); | |
c048fdfe GC |
243 | |
244 | if (current->active_mm == mm) { | |
245 | if (current->mm) | |
e7b52ffd | 246 | __flush_tlb_one(start); |
c048fdfe GC |
247 | else |
248 | leave_mm(smp_processor_id()); | |
249 | } | |
250 | ||
78f1c4d6 | 251 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
e7b52ffd | 252 | flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); |
c048fdfe GC |
253 | |
254 | preempt_enable(); | |
255 | } | |
256 | ||
257 | static void do_flush_tlb_all(void *info) | |
258 | { | |
c048fdfe | 259 | __flush_tlb_all(); |
c6ae41e7 | 260 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) |
3f8afb77 | 261 | leave_mm(smp_processor_id()); |
c048fdfe GC |
262 | } |
263 | ||
264 | void flush_tlb_all(void) | |
265 | { | |
15c8b6c1 | 266 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
c048fdfe | 267 | } |
3df3212f | 268 | |
effee4b9 AS |
269 | static void do_kernel_range_flush(void *info) |
270 | { | |
271 | struct flush_tlb_info *f = info; | |
272 | unsigned long addr; | |
273 | ||
274 | /* flush range by one by one 'invlpg' */ | |
275 | for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE) | |
276 | __flush_tlb_single(addr); | |
277 | } | |
278 | ||
279 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
280 | { | |
281 | unsigned act_entries; | |
282 | struct flush_tlb_info info; | |
283 | ||
284 | /* In modern CPU, last level tlb used for both data/ins */ | |
285 | act_entries = tlb_lld_4k[ENTRIES]; | |
286 | ||
287 | /* Balance as user space task's flush, a bit conservative */ | |
288 | if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 || | |
289 | (end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) | |
290 | ||
291 | on_each_cpu(do_flush_tlb_all, NULL, 1); | |
292 | else { | |
293 | info.flush_start = start; | |
294 | info.flush_end = end; | |
295 | on_each_cpu(do_kernel_range_flush, &info, 1); | |
296 | } | |
297 | } | |
298 | ||
3df3212f AS |
299 | #ifdef CONFIG_DEBUG_TLBFLUSH |
300 | static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, | |
301 | size_t count, loff_t *ppos) | |
302 | { | |
303 | char buf[32]; | |
304 | unsigned int len; | |
305 | ||
306 | len = sprintf(buf, "%hd\n", tlb_flushall_shift); | |
307 | return simple_read_from_buffer(user_buf, count, ppos, buf, len); | |
308 | } | |
309 | ||
310 | static ssize_t tlbflush_write_file(struct file *file, | |
311 | const char __user *user_buf, size_t count, loff_t *ppos) | |
312 | { | |
313 | char buf[32]; | |
314 | ssize_t len; | |
315 | s8 shift; | |
316 | ||
317 | len = min(count, sizeof(buf) - 1); | |
318 | if (copy_from_user(buf, user_buf, len)) | |
319 | return -EFAULT; | |
320 | ||
321 | buf[len] = '\0'; | |
322 | if (kstrtos8(buf, 0, &shift)) | |
323 | return -EINVAL; | |
324 | ||
d4c9dbc6 | 325 | if (shift < -1 || shift >= BITS_PER_LONG) |
3df3212f AS |
326 | return -EINVAL; |
327 | ||
328 | tlb_flushall_shift = shift; | |
329 | return count; | |
330 | } | |
331 | ||
332 | static const struct file_operations fops_tlbflush = { | |
333 | .read = tlbflush_read_file, | |
334 | .write = tlbflush_write_file, | |
335 | .llseek = default_llseek, | |
336 | }; | |
337 | ||
338 | static int __cpuinit create_tlb_flushall_shift(void) | |
339 | { | |
340 | if (cpu_has_invlpg) { | |
341 | debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR, | |
342 | arch_debugfs_dir, NULL, &fops_tlbflush); | |
343 | } | |
344 | return 0; | |
345 | } | |
346 | late_initcall(create_tlb_flushall_shift); | |
347 | #endif |