Commit | Line | Data |
---|---|---|
c048fdfe GC |
1 | #include <linux/init.h> |
2 | ||
3 | #include <linux/mm.h> | |
c048fdfe GC |
4 | #include <linux/spinlock.h> |
5 | #include <linux/smp.h> | |
c048fdfe | 6 | #include <linux/interrupt.h> |
6dd01bed | 7 | #include <linux/module.h> |
93296720 | 8 | #include <linux/cpu.h> |
c048fdfe | 9 | |
c048fdfe | 10 | #include <asm/tlbflush.h> |
c048fdfe | 11 | #include <asm/mmu_context.h> |
350f8f56 | 12 | #include <asm/cache.h> |
6dd01bed | 13 | #include <asm/apic.h> |
bdbcdd48 | 14 | #include <asm/uv/uv.h> |
5af5573e | 15 | |
9eb912d1 BG |
16 | DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) |
17 | = { &init_mm, 0, }; | |
18 | ||
c048fdfe GC |
19 | /* |
20 | * Smarter SMP flushing macros. | |
21 | * c/o Linus Torvalds. | |
22 | * | |
23 | * These mean you can really definitely utterly forget about | |
24 | * writing to user space from interrupts. (Its not allowed anyway). | |
25 | * | |
26 | * Optimizations Manfred Spraul <manfred@colorfullife.com> | |
27 | * | |
28 | * More scalable flush, from Andi Kleen | |
29 | * | |
30 | * To avoid global state use 8 different call vectors. | |
31 | * Each CPU uses a specific vector to trigger flushes on other | |
32 | * CPUs. Depending on the received vector the target CPUs look into | |
09b3ec73 | 33 | * the right array slot for the flush data. |
c048fdfe GC |
34 | * |
35 | * With more than 8 CPUs they are hashed to the 8 available | |
36 | * vectors. The limited global vector space forces us to this right now. | |
37 | * In future when interrupts are split into per CPU domains this could be | |
38 | * fixed, at the cost of triggering multiple IPIs in some cases. | |
39 | */ | |
40 | ||
41 | union smp_flush_state { | |
42 | struct { | |
c048fdfe | 43 | struct mm_struct *flush_mm; |
e7b52ffd AS |
44 | unsigned long flush_start; |
45 | unsigned long flush_end; | |
39c662f6 | 46 | raw_spinlock_t tlbstate_lock; |
4595f962 | 47 | DECLARE_BITMAP(flush_cpumask, NR_CPUS); |
c048fdfe | 48 | }; |
350f8f56 | 49 | char pad[INTERNODE_CACHE_BYTES]; |
09b3ec73 | 50 | } ____cacheline_internodealigned_in_smp; |
c048fdfe GC |
51 | |
52 | /* State is put into the per CPU data section, but padded | |
53 | to a full cache line because other CPUs can access it and we don't | |
54 | want false sharing in the per cpu data segment. */ | |
09b3ec73 | 55 | static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS]; |
c048fdfe | 56 | |
93296720 SL |
57 | static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset); |
58 | ||
c048fdfe GC |
59 | /* |
60 | * We cannot call mmdrop() because we are in interrupt context, | |
61 | * instead update mm->cpu_vm_mask. | |
62 | */ | |
63 | void leave_mm(int cpu) | |
64 | { | |
02171b4a | 65 | struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm); |
c6ae41e7 | 66 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) |
c048fdfe | 67 | BUG(); |
a6fca40f SS |
68 | if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { |
69 | cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); | |
70 | load_cr3(swapper_pg_dir); | |
71 | } | |
c048fdfe GC |
72 | } |
73 | EXPORT_SYMBOL_GPL(leave_mm); | |
74 | ||
75 | /* | |
76 | * | |
77 | * The flush IPI assumes that a thread switch happens in this order: | |
78 | * [cpu0: the cpu that switches] | |
79 | * 1) switch_mm() either 1a) or 1b) | |
80 | * 1a) thread switch to a different mm | |
81 | * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask); | |
82 | * Stop ipi delivery for the old mm. This is not synchronized with | |
83 | * the other cpus, but smp_invalidate_interrupt ignore flush ipis | |
84 | * for the wrong mm, and in the worst case we perform a superfluous | |
85 | * tlb flush. | |
86 | * 1a2) set cpu mmu_state to TLBSTATE_OK | |
87 | * Now the smp_invalidate_interrupt won't call leave_mm if cpu0 | |
88 | * was in lazy tlb mode. | |
89 | * 1a3) update cpu active_mm | |
90 | * Now cpu0 accepts tlb flushes for the new mm. | |
91 | * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask); | |
92 | * Now the other cpus will send tlb flush ipis. | |
93 | * 1a4) change cr3. | |
94 | * 1b) thread switch without mm change | |
95 | * cpu active_mm is correct, cpu0 already handles | |
96 | * flush ipis. | |
97 | * 1b1) set cpu mmu_state to TLBSTATE_OK | |
98 | * 1b2) test_and_set the cpu bit in cpu_vm_mask. | |
99 | * Atomically set the bit [other cpus will start sending flush ipis], | |
100 | * and test the bit. | |
101 | * 1b3) if the bit was 0: leave_mm was called, flush the tlb. | |
102 | * 2) switch %%esp, ie current | |
103 | * | |
104 | * The interrupt must handle 2 special cases: | |
105 | * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm. | |
106 | * - the cpu performs speculative tlb reads, i.e. even if the cpu only | |
107 | * runs in kernel space, the cpu could load tlb entries for user space | |
108 | * pages. | |
109 | * | |
110 | * The good news is that cpu mmu_state is local to each cpu, no | |
111 | * write/read ordering problems. | |
112 | */ | |
113 | ||
114 | /* | |
115 | * TLB flush IPI: | |
116 | * | |
117 | * 1) Flush the tlb entries if the cpu uses the mm that's being flushed. | |
118 | * 2) Leave the mm if we are in the lazy tlb mode. | |
119 | * | |
120 | * Interrupts are disabled. | |
121 | */ | |
122 | ||
02cf94c3 TH |
123 | /* |
124 | * FIXME: use of asmlinkage is not consistent. On x86_64 it's noop | |
125 | * but still used for documentation purpose but the usage is slightly | |
126 | * inconsistent. On x86_32, asmlinkage is regparm(0) but interrupt | |
127 | * entry calls in with the first parameter in %eax. Maybe define | |
128 | * intrlinkage? | |
129 | */ | |
130 | #ifdef CONFIG_X86_64 | |
131 | asmlinkage | |
132 | #endif | |
133 | void smp_invalidate_interrupt(struct pt_regs *regs) | |
c048fdfe | 134 | { |
6dd01bed TH |
135 | unsigned int cpu; |
136 | unsigned int sender; | |
c048fdfe GC |
137 | union smp_flush_state *f; |
138 | ||
139 | cpu = smp_processor_id(); | |
140 | /* | |
141 | * orig_rax contains the negated interrupt vector. | |
142 | * Use that to determine where the sender put the data. | |
143 | */ | |
144 | sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; | |
09b3ec73 | 145 | f = &flush_state[sender]; |
c048fdfe | 146 | |
4595f962 | 147 | if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask))) |
c048fdfe GC |
148 | goto out; |
149 | /* | |
150 | * This was a BUG() but until someone can quote me the | |
151 | * line from the intel manual that guarantees an IPI to | |
152 | * multiple CPUs is retried _only_ on the erroring CPUs | |
153 | * its staying as a return | |
154 | * | |
155 | * BUG(); | |
156 | */ | |
157 | ||
c6ae41e7 AS |
158 | if (f->flush_mm == this_cpu_read(cpu_tlbstate.active_mm)) { |
159 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { | |
e7b52ffd AS |
160 | if (f->flush_end == TLB_FLUSH_ALL |
161 | || !cpu_has_invlpg) | |
c048fdfe | 162 | local_flush_tlb(); |
e7b52ffd AS |
163 | else if (!f->flush_end) |
164 | __flush_tlb_single(f->flush_start); | |
165 | else { | |
166 | unsigned long addr; | |
167 | addr = f->flush_start; | |
168 | while (addr < f->flush_end) { | |
169 | __flush_tlb_single(addr); | |
170 | addr += PAGE_SIZE; | |
171 | } | |
172 | } | |
c048fdfe GC |
173 | } else |
174 | leave_mm(cpu); | |
175 | } | |
176 | out: | |
177 | ack_APIC_irq(); | |
6dd01bed | 178 | smp_mb__before_clear_bit(); |
4595f962 | 179 | cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask)); |
6dd01bed | 180 | smp_mb__after_clear_bit(); |
8ae93669 | 181 | inc_irq_stat(irq_tlb_count); |
c048fdfe GC |
182 | } |
183 | ||
4595f962 | 184 | static void flush_tlb_others_ipi(const struct cpumask *cpumask, |
e7b52ffd AS |
185 | struct mm_struct *mm, unsigned long start, |
186 | unsigned long end) | |
c048fdfe | 187 | { |
6dd01bed | 188 | unsigned int sender; |
c048fdfe | 189 | union smp_flush_state *f; |
1812924b | 190 | |
c048fdfe | 191 | /* Caller has disabled preemption */ |
93296720 | 192 | sender = this_cpu_read(tlb_vector_offset); |
09b3ec73 | 193 | f = &flush_state[sender]; |
c048fdfe | 194 | |
7064d865 SL |
195 | if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS) |
196 | raw_spin_lock(&f->tlbstate_lock); | |
c048fdfe GC |
197 | |
198 | f->flush_mm = mm; | |
e7b52ffd AS |
199 | f->flush_start = start; |
200 | f->flush_end = end; | |
b04e6373 LT |
201 | if (cpumask_andnot(to_cpumask(f->flush_cpumask), cpumask, cpumask_of(smp_processor_id()))) { |
202 | /* | |
203 | * We have to send the IPI only to | |
204 | * CPUs affected. | |
205 | */ | |
206 | apic->send_IPI_mask(to_cpumask(f->flush_cpumask), | |
207 | INVALIDATE_TLB_VECTOR_START + sender); | |
c048fdfe | 208 | |
b04e6373 LT |
209 | while (!cpumask_empty(to_cpumask(f->flush_cpumask))) |
210 | cpu_relax(); | |
211 | } | |
c048fdfe GC |
212 | |
213 | f->flush_mm = NULL; | |
e7b52ffd AS |
214 | f->flush_start = 0; |
215 | f->flush_end = 0; | |
7064d865 SL |
216 | if (nr_cpu_ids > NUM_INVALIDATE_TLB_VECTORS) |
217 | raw_spin_unlock(&f->tlbstate_lock); | |
c048fdfe GC |
218 | } |
219 | ||
4595f962 | 220 | void native_flush_tlb_others(const struct cpumask *cpumask, |
e7b52ffd AS |
221 | struct mm_struct *mm, unsigned long start, |
222 | unsigned long end) | |
4595f962 RR |
223 | { |
224 | if (is_uv_system()) { | |
bdbcdd48 | 225 | unsigned int cpu; |
0e21990a | 226 | |
25542c64 | 227 | cpu = smp_processor_id(); |
e7b52ffd | 228 | cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu); |
bdbcdd48 | 229 | if (cpumask) |
e7b52ffd | 230 | flush_tlb_others_ipi(cpumask, mm, start, end); |
0e21990a | 231 | return; |
4595f962 | 232 | } |
e7b52ffd | 233 | flush_tlb_others_ipi(cpumask, mm, start, end); |
4595f962 RR |
234 | } |
235 | ||
93296720 SL |
236 | static void __cpuinit calculate_tlb_offset(void) |
237 | { | |
9223081f | 238 | int cpu, node, nr_node_vecs, idx = 0; |
93296720 SL |
239 | /* |
240 | * we are changing tlb_vector_offset for each CPU in runtime, but this | |
241 | * will not cause inconsistency, as the write is atomic under X86. we | |
242 | * might see more lock contentions in a short time, but after all CPU's | |
243 | * tlb_vector_offset are changed, everything should go normal | |
244 | * | |
245 | * Note: if NUM_INVALIDATE_TLB_VECTORS % nr_online_nodes !=0, we might | |
246 | * waste some vectors. | |
247 | **/ | |
248 | if (nr_online_nodes > NUM_INVALIDATE_TLB_VECTORS) | |
249 | nr_node_vecs = 1; | |
250 | else | |
251 | nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes; | |
252 | ||
253 | for_each_online_node(node) { | |
9223081f | 254 | int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) * |
93296720 SL |
255 | nr_node_vecs; |
256 | int cpu_offset = 0; | |
257 | for_each_cpu(cpu, cpumask_of_node(node)) { | |
258 | per_cpu(tlb_vector_offset, cpu) = node_offset + | |
259 | cpu_offset; | |
260 | cpu_offset++; | |
261 | cpu_offset = cpu_offset % nr_node_vecs; | |
262 | } | |
9223081f | 263 | idx++; |
93296720 SL |
264 | } |
265 | } | |
266 | ||
cf38d0ba | 267 | static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n, |
93296720 SL |
268 | unsigned long action, void *hcpu) |
269 | { | |
270 | switch (action & 0xf) { | |
271 | case CPU_ONLINE: | |
272 | case CPU_DEAD: | |
273 | calculate_tlb_offset(); | |
274 | } | |
275 | return NOTIFY_OK; | |
276 | } | |
277 | ||
a4928cff | 278 | static int __cpuinit init_smp_flush(void) |
c048fdfe GC |
279 | { |
280 | int i; | |
281 | ||
09b3ec73 | 282 | for (i = 0; i < ARRAY_SIZE(flush_state); i++) |
39c662f6 | 283 | raw_spin_lock_init(&flush_state[i].tlbstate_lock); |
7c04e64a | 284 | |
93296720 SL |
285 | calculate_tlb_offset(); |
286 | hotcpu_notifier(tlb_cpuhp_notify, 0); | |
c048fdfe GC |
287 | return 0; |
288 | } | |
289 | core_initcall(init_smp_flush); | |
290 | ||
291 | void flush_tlb_current_task(void) | |
292 | { | |
293 | struct mm_struct *mm = current->mm; | |
c048fdfe GC |
294 | |
295 | preempt_disable(); | |
c048fdfe GC |
296 | |
297 | local_flush_tlb(); | |
78f1c4d6 | 298 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
e7b52ffd | 299 | flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); |
c048fdfe GC |
300 | preempt_enable(); |
301 | } | |
302 | ||
303 | void flush_tlb_mm(struct mm_struct *mm) | |
304 | { | |
c048fdfe | 305 | preempt_disable(); |
c048fdfe GC |
306 | |
307 | if (current->active_mm == mm) { | |
308 | if (current->mm) | |
309 | local_flush_tlb(); | |
310 | else | |
311 | leave_mm(smp_processor_id()); | |
312 | } | |
78f1c4d6 | 313 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
e7b52ffd AS |
314 | flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); |
315 | ||
316 | preempt_enable(); | |
317 | } | |
318 | ||
d8dfe60d AS |
319 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
320 | static inline unsigned long has_large_page(struct mm_struct *mm, | |
321 | unsigned long start, unsigned long end) | |
322 | { | |
323 | pgd_t *pgd; | |
324 | pud_t *pud; | |
325 | pmd_t *pmd; | |
326 | unsigned long addr = ALIGN(start, HPAGE_SIZE); | |
327 | for (; addr < end; addr += HPAGE_SIZE) { | |
328 | pgd = pgd_offset(mm, addr); | |
329 | if (likely(!pgd_none(*pgd))) { | |
330 | pud = pud_offset(pgd, addr); | |
331 | if (likely(!pud_none(*pud))) { | |
332 | pmd = pmd_offset(pud, addr); | |
333 | if (likely(!pmd_none(*pmd))) | |
334 | if (pmd_large(*pmd)) | |
335 | return addr; | |
336 | } | |
337 | } | |
338 | } | |
339 | return 0; | |
340 | } | |
341 | #else | |
342 | static inline unsigned long has_large_page(struct mm_struct *mm, | |
343 | unsigned long start, unsigned long end) | |
344 | { | |
345 | return 0; | |
346 | } | |
347 | #endif | |
e7b52ffd AS |
348 | void flush_tlb_range(struct vm_area_struct *vma, |
349 | unsigned long start, unsigned long end) | |
350 | { | |
351 | struct mm_struct *mm; | |
352 | ||
c4211f42 | 353 | if (vma->vm_flags & VM_HUGETLB || tlb_flushall_shift == -1) { |
d8dfe60d | 354 | flush_all: |
e7b52ffd AS |
355 | flush_tlb_mm(vma->vm_mm); |
356 | return; | |
357 | } | |
358 | ||
359 | preempt_disable(); | |
360 | mm = vma->vm_mm; | |
361 | if (current->active_mm == mm) { | |
362 | if (current->mm) { | |
363 | unsigned long addr, vmflag = vma->vm_flags; | |
364 | unsigned act_entries, tlb_entries = 0; | |
365 | ||
366 | if (vmflag & VM_EXEC) | |
367 | tlb_entries = tlb_lli_4k[ENTRIES]; | |
368 | else | |
369 | tlb_entries = tlb_lld_4k[ENTRIES]; | |
370 | ||
371 | act_entries = tlb_entries > mm->total_vm ? | |
372 | mm->total_vm : tlb_entries; | |
c048fdfe | 373 | |
c4211f42 AS |
374 | if ((end - start) >> PAGE_SHIFT > |
375 | act_entries >> tlb_flushall_shift) | |
e7b52ffd AS |
376 | local_flush_tlb(); |
377 | else { | |
d8dfe60d AS |
378 | if (has_large_page(mm, start, end)) { |
379 | preempt_enable(); | |
380 | goto flush_all; | |
381 | } | |
e7b52ffd AS |
382 | for (addr = start; addr < end; |
383 | addr += PAGE_SIZE) | |
384 | __flush_tlb_single(addr); | |
385 | ||
386 | if (cpumask_any_but(mm_cpumask(mm), | |
387 | smp_processor_id()) < nr_cpu_ids) | |
388 | flush_tlb_others(mm_cpumask(mm), mm, | |
389 | start, end); | |
390 | preempt_enable(); | |
391 | return; | |
392 | } | |
393 | } else { | |
394 | leave_mm(smp_processor_id()); | |
395 | } | |
396 | } | |
397 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) | |
398 | flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); | |
c048fdfe GC |
399 | preempt_enable(); |
400 | } | |
401 | ||
e7b52ffd AS |
402 | |
403 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long start) | |
c048fdfe GC |
404 | { |
405 | struct mm_struct *mm = vma->vm_mm; | |
c048fdfe GC |
406 | |
407 | preempt_disable(); | |
c048fdfe GC |
408 | |
409 | if (current->active_mm == mm) { | |
410 | if (current->mm) | |
e7b52ffd | 411 | __flush_tlb_one(start); |
c048fdfe GC |
412 | else |
413 | leave_mm(smp_processor_id()); | |
414 | } | |
415 | ||
78f1c4d6 | 416 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
e7b52ffd | 417 | flush_tlb_others(mm_cpumask(mm), mm, start, 0UL); |
c048fdfe GC |
418 | |
419 | preempt_enable(); | |
420 | } | |
421 | ||
422 | static void do_flush_tlb_all(void *info) | |
423 | { | |
c048fdfe | 424 | __flush_tlb_all(); |
c6ae41e7 | 425 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) |
3f8afb77 | 426 | leave_mm(smp_processor_id()); |
c048fdfe GC |
427 | } |
428 | ||
429 | void flush_tlb_all(void) | |
430 | { | |
15c8b6c1 | 431 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
c048fdfe | 432 | } |