x86/mm: Build arch/x86/mm/tlb.c even on !SMP
[deliverable/linux.git] / arch / x86 / mm / tlb.c
CommitLineData
c048fdfe
GC
1#include <linux/init.h>
2
3#include <linux/mm.h>
c048fdfe
GC
4#include <linux/spinlock.h>
5#include <linux/smp.h>
c048fdfe 6#include <linux/interrupt.h>
6dd01bed 7#include <linux/module.h>
93296720 8#include <linux/cpu.h>
c048fdfe 9
c048fdfe 10#include <asm/tlbflush.h>
c048fdfe 11#include <asm/mmu_context.h>
350f8f56 12#include <asm/cache.h>
6dd01bed 13#include <asm/apic.h>
bdbcdd48 14#include <asm/uv/uv.h>
3df3212f 15#include <linux/debugfs.h>
5af5573e 16
c048fdfe
GC
17/*
18 * Smarter SMP flushing macros.
19 * c/o Linus Torvalds.
20 *
21 * These mean you can really definitely utterly forget about
22 * writing to user space from interrupts. (Its not allowed anyway).
23 *
24 * Optimizations Manfred Spraul <manfred@colorfullife.com>
25 *
26 * More scalable flush, from Andi Kleen
27 *
52aec330 28 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
c048fdfe
GC
29 */
30
e1074888
AL
31#ifdef CONFIG_SMP
32
52aec330
AS
33struct flush_tlb_info {
34 struct mm_struct *flush_mm;
35 unsigned long flush_start;
36 unsigned long flush_end;
37};
93296720 38
c048fdfe
GC
39/*
40 * We cannot call mmdrop() because we are in interrupt context,
41 * instead update mm->cpu_vm_mask.
42 */
43void leave_mm(int cpu)
44{
02171b4a 45 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
c6ae41e7 46 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
c048fdfe 47 BUG();
a6fca40f
SS
48 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
49 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
50 load_cr3(swapper_pg_dir);
7c7f1547
DH
51 /*
52 * This gets called in the idle path where RCU
53 * functions differently. Tracing normally
54 * uses RCU, so we have to call the tracepoint
55 * specially here.
56 */
57 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
a6fca40f 58 }
c048fdfe
GC
59}
60EXPORT_SYMBOL_GPL(leave_mm);
61
62/*
c048fdfe
GC
63 * The flush IPI assumes that a thread switch happens in this order:
64 * [cpu0: the cpu that switches]
65 * 1) switch_mm() either 1a) or 1b)
66 * 1a) thread switch to a different mm
52aec330
AS
67 * 1a1) set cpu_tlbstate to TLBSTATE_OK
68 * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
69 * if cpu0 was in lazy tlb mode.
70 * 1a2) update cpu active_mm
c048fdfe 71 * Now cpu0 accepts tlb flushes for the new mm.
52aec330 72 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
c048fdfe
GC
73 * Now the other cpus will send tlb flush ipis.
74 * 1a4) change cr3.
52aec330
AS
75 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
76 * Stop ipi delivery for the old mm. This is not synchronized with
77 * the other cpus, but flush_tlb_func ignore flush ipis for the wrong
78 * mm, and in the worst case we perform a superfluous tlb flush.
c048fdfe 79 * 1b) thread switch without mm change
52aec330
AS
80 * cpu active_mm is correct, cpu0 already handles flush ipis.
81 * 1b1) set cpu_tlbstate to TLBSTATE_OK
c048fdfe
GC
82 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
83 * Atomically set the bit [other cpus will start sending flush ipis],
84 * and test the bit.
85 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
86 * 2) switch %%esp, ie current
87 *
88 * The interrupt must handle 2 special cases:
89 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
90 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
91 * runs in kernel space, the cpu could load tlb entries for user space
92 * pages.
93 *
52aec330 94 * The good news is that cpu_tlbstate is local to each cpu, no
c048fdfe
GC
95 * write/read ordering problems.
96 */
97
98/*
52aec330 99 * TLB flush funcation:
c048fdfe
GC
100 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
101 * 2) Leave the mm if we are in the lazy tlb mode.
02cf94c3 102 */
52aec330 103static void flush_tlb_func(void *info)
c048fdfe 104{
52aec330 105 struct flush_tlb_info *f = info;
c048fdfe 106
fd0f5869
TS
107 inc_irq_stat(irq_tlb_count);
108
858eaaa7 109 if (f->flush_mm && f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
52aec330 110 return;
c048fdfe 111
ec659934 112 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
52aec330 113 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
d17d8f9d 114 if (f->flush_end == TLB_FLUSH_ALL) {
52aec330 115 local_flush_tlb();
d17d8f9d
DH
116 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
117 } else {
52aec330 118 unsigned long addr;
d17d8f9d 119 unsigned long nr_pages =
bbc03778 120 (f->flush_end - f->flush_start) / PAGE_SIZE;
52aec330
AS
121 addr = f->flush_start;
122 while (addr < f->flush_end) {
123 __flush_tlb_single(addr);
124 addr += PAGE_SIZE;
e7b52ffd 125 }
d17d8f9d 126 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);
52aec330
AS
127 }
128 } else
129 leave_mm(smp_processor_id());
c048fdfe 130
c048fdfe
GC
131}
132
4595f962 133void native_flush_tlb_others(const struct cpumask *cpumask,
e7b52ffd
AS
134 struct mm_struct *mm, unsigned long start,
135 unsigned long end)
4595f962 136{
52aec330 137 struct flush_tlb_info info;
18c98243
NA
138
139 if (end == 0)
140 end = start + PAGE_SIZE;
52aec330
AS
141 info.flush_mm = mm;
142 info.flush_start = start;
143 info.flush_end = end;
144
ec659934 145 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
18c98243
NA
146 if (end == TLB_FLUSH_ALL)
147 trace_tlb_flush(TLB_REMOTE_SEND_IPI, TLB_FLUSH_ALL);
148 else
149 trace_tlb_flush(TLB_REMOTE_SEND_IPI,
150 (end - start) >> PAGE_SHIFT);
151
4595f962 152 if (is_uv_system()) {
bdbcdd48 153 unsigned int cpu;
0e21990a 154
25542c64 155 cpu = smp_processor_id();
e7b52ffd 156 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
bdbcdd48 157 if (cpumask)
52aec330
AS
158 smp_call_function_many(cpumask, flush_tlb_func,
159 &info, 1);
0e21990a 160 return;
4595f962 161 }
52aec330 162 smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
c048fdfe 163}
c048fdfe
GC
164
165void flush_tlb_current_task(void)
166{
167 struct mm_struct *mm = current->mm;
c048fdfe
GC
168
169 preempt_disable();
c048fdfe 170
ec659934 171 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
71b3c126
AL
172
173 /* This is an implicit full barrier that synchronizes with switch_mm. */
c048fdfe 174 local_flush_tlb();
71b3c126 175
d17d8f9d 176 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
78f1c4d6 177 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
e7b52ffd 178 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
c048fdfe
GC
179 preempt_enable();
180}
181
a5102476
DH
182/*
183 * See Documentation/x86/tlb.txt for details. We choose 33
184 * because it is large enough to cover the vast majority (at
185 * least 95%) of allocations, and is small enough that we are
186 * confident it will not cause too much overhead. Each single
187 * flush is about 100 ns, so this caps the maximum overhead at
188 * _about_ 3,000 ns.
189 *
190 * This is in units of pages.
191 */
86426851 192static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
e9f4e0a9 193
611ae8e3
AS
194void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
195 unsigned long end, unsigned long vmflag)
196{
197 unsigned long addr;
9dfa6dee
DH
198 /* do a global flush by default */
199 unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
e7b52ffd
AS
200
201 preempt_disable();
71b3c126
AL
202 if (current->active_mm != mm) {
203 /* Synchronize with switch_mm. */
204 smp_mb();
205
4995ab9c 206 goto out;
71b3c126 207 }
e7b52ffd 208
611ae8e3
AS
209 if (!current->mm) {
210 leave_mm(smp_processor_id());
71b3c126
AL
211
212 /* Synchronize with switch_mm. */
213 smp_mb();
214
4995ab9c 215 goto out;
611ae8e3 216 }
c048fdfe 217
9dfa6dee
DH
218 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
219 base_pages_to_flush = (end - start) >> PAGE_SHIFT;
e7b52ffd 220
71b3c126
AL
221 /*
222 * Both branches below are implicit full barriers (MOV to CR or
223 * INVLPG) that synchronize with switch_mm.
224 */
9dfa6dee
DH
225 if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
226 base_pages_to_flush = TLB_FLUSH_ALL;
ec659934 227 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
611ae8e3 228 local_flush_tlb();
9824cf97 229 } else {
611ae8e3 230 /* flush range by one by one 'invlpg' */
9824cf97 231 for (addr = start; addr < end; addr += PAGE_SIZE) {
ec659934 232 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
611ae8e3 233 __flush_tlb_single(addr);
9824cf97 234 }
e7b52ffd 235 }
d17d8f9d 236 trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
4995ab9c 237out:
9dfa6dee 238 if (base_pages_to_flush == TLB_FLUSH_ALL) {
4995ab9c
DH
239 start = 0UL;
240 end = TLB_FLUSH_ALL;
241 }
e7b52ffd 242 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
4995ab9c 243 flush_tlb_others(mm_cpumask(mm), mm, start, end);
c048fdfe
GC
244 preempt_enable();
245}
246
e7b52ffd 247void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
c048fdfe
GC
248{
249 struct mm_struct *mm = vma->vm_mm;
c048fdfe
GC
250
251 preempt_disable();
c048fdfe
GC
252
253 if (current->active_mm == mm) {
71b3c126
AL
254 if (current->mm) {
255 /*
256 * Implicit full barrier (INVLPG) that synchronizes
257 * with switch_mm.
258 */
e7b52ffd 259 __flush_tlb_one(start);
71b3c126 260 } else {
c048fdfe 261 leave_mm(smp_processor_id());
71b3c126
AL
262
263 /* Synchronize with switch_mm. */
264 smp_mb();
265 }
c048fdfe
GC
266 }
267
78f1c4d6 268 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
e7b52ffd 269 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
c048fdfe
GC
270
271 preempt_enable();
272}
273
274static void do_flush_tlb_all(void *info)
275{
ec659934 276 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
c048fdfe 277 __flush_tlb_all();
c6ae41e7 278 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
3f8afb77 279 leave_mm(smp_processor_id());
c048fdfe
GC
280}
281
282void flush_tlb_all(void)
283{
ec659934 284 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
15c8b6c1 285 on_each_cpu(do_flush_tlb_all, NULL, 1);
c048fdfe 286}
3df3212f 287
effee4b9
AS
288static void do_kernel_range_flush(void *info)
289{
290 struct flush_tlb_info *f = info;
291 unsigned long addr;
292
293 /* flush range by one by one 'invlpg' */
6df46865 294 for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
effee4b9
AS
295 __flush_tlb_single(addr);
296}
297
298void flush_tlb_kernel_range(unsigned long start, unsigned long end)
299{
effee4b9
AS
300
301 /* Balance as user space task's flush, a bit conservative */
e9f4e0a9
DH
302 if (end == TLB_FLUSH_ALL ||
303 (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
effee4b9 304 on_each_cpu(do_flush_tlb_all, NULL, 1);
e9f4e0a9
DH
305 } else {
306 struct flush_tlb_info info;
effee4b9
AS
307 info.flush_start = start;
308 info.flush_end = end;
309 on_each_cpu(do_kernel_range_flush, &info, 1);
310 }
311}
2d040a1c
DH
312
313static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
314 size_t count, loff_t *ppos)
315{
316 char buf[32];
317 unsigned int len;
318
319 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
320 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
321}
322
323static ssize_t tlbflush_write_file(struct file *file,
324 const char __user *user_buf, size_t count, loff_t *ppos)
325{
326 char buf[32];
327 ssize_t len;
328 int ceiling;
329
330 len = min(count, sizeof(buf) - 1);
331 if (copy_from_user(buf, user_buf, len))
332 return -EFAULT;
333
334 buf[len] = '\0';
335 if (kstrtoint(buf, 0, &ceiling))
336 return -EINVAL;
337
338 if (ceiling < 0)
339 return -EINVAL;
340
341 tlb_single_page_flush_ceiling = ceiling;
342 return count;
343}
344
345static const struct file_operations fops_tlbflush = {
346 .read = tlbflush_read_file,
347 .write = tlbflush_write_file,
348 .llseek = default_llseek,
349};
350
351static int __init create_tlb_single_page_flush_ceiling(void)
352{
353 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
354 arch_debugfs_dir, NULL, &fops_tlbflush);
355 return 0;
356}
357late_initcall(create_tlb_single_page_flush_ceiling);
e1074888
AL
358
359#endif /* CONFIG_SMP */
This page took 0.462352 seconds and 5 git commands to generate.