Merge tag 'char-misc-3.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregk...
[deliverable/linux.git] / arch / x86 / mm / tlb.c
CommitLineData
c048fdfe
GC
1#include <linux/init.h>
2
3#include <linux/mm.h>
c048fdfe
GC
4#include <linux/spinlock.h>
5#include <linux/smp.h>
c048fdfe 6#include <linux/interrupt.h>
6dd01bed 7#include <linux/module.h>
93296720 8#include <linux/cpu.h>
c048fdfe 9
c048fdfe 10#include <asm/tlbflush.h>
c048fdfe 11#include <asm/mmu_context.h>
350f8f56 12#include <asm/cache.h>
6dd01bed 13#include <asm/apic.h>
bdbcdd48 14#include <asm/uv/uv.h>
3df3212f 15#include <linux/debugfs.h>
5af5573e 16
9eb912d1
BG
17DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
18 = { &init_mm, 0, };
19
c048fdfe
GC
20/*
21 * Smarter SMP flushing macros.
22 * c/o Linus Torvalds.
23 *
24 * These mean you can really definitely utterly forget about
25 * writing to user space from interrupts. (Its not allowed anyway).
26 *
27 * Optimizations Manfred Spraul <manfred@colorfullife.com>
28 *
29 * More scalable flush, from Andi Kleen
30 *
52aec330 31 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
c048fdfe
GC
32 */
33
52aec330
AS
34struct flush_tlb_info {
35 struct mm_struct *flush_mm;
36 unsigned long flush_start;
37 unsigned long flush_end;
38};
93296720 39
c048fdfe
GC
40/*
41 * We cannot call mmdrop() because we are in interrupt context,
42 * instead update mm->cpu_vm_mask.
43 */
44void leave_mm(int cpu)
45{
02171b4a 46 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
c6ae41e7 47 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
c048fdfe 48 BUG();
a6fca40f
SS
49 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
50 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
51 load_cr3(swapper_pg_dir);
7c7f1547
DH
52 /*
53 * This gets called in the idle path where RCU
54 * functions differently. Tracing normally
55 * uses RCU, so we have to call the tracepoint
56 * specially here.
57 */
58 trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
a6fca40f 59 }
c048fdfe
GC
60}
61EXPORT_SYMBOL_GPL(leave_mm);
62
63/*
c048fdfe
GC
64 * The flush IPI assumes that a thread switch happens in this order:
65 * [cpu0: the cpu that switches]
66 * 1) switch_mm() either 1a) or 1b)
67 * 1a) thread switch to a different mm
52aec330
AS
68 * 1a1) set cpu_tlbstate to TLBSTATE_OK
69 * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
70 * if cpu0 was in lazy tlb mode.
71 * 1a2) update cpu active_mm
c048fdfe 72 * Now cpu0 accepts tlb flushes for the new mm.
52aec330 73 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
c048fdfe
GC
74 * Now the other cpus will send tlb flush ipis.
75 * 1a4) change cr3.
52aec330
AS
76 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
77 * Stop ipi delivery for the old mm. This is not synchronized with
78 * the other cpus, but flush_tlb_func ignore flush ipis for the wrong
79 * mm, and in the worst case we perform a superfluous tlb flush.
c048fdfe 80 * 1b) thread switch without mm change
52aec330
AS
81 * cpu active_mm is correct, cpu0 already handles flush ipis.
82 * 1b1) set cpu_tlbstate to TLBSTATE_OK
c048fdfe
GC
83 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
84 * Atomically set the bit [other cpus will start sending flush ipis],
85 * and test the bit.
86 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
87 * 2) switch %%esp, ie current
88 *
89 * The interrupt must handle 2 special cases:
90 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
91 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
92 * runs in kernel space, the cpu could load tlb entries for user space
93 * pages.
94 *
52aec330 95 * The good news is that cpu_tlbstate is local to each cpu, no
c048fdfe
GC
96 * write/read ordering problems.
97 */
98
99/*
52aec330 100 * TLB flush funcation:
c048fdfe
GC
101 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
102 * 2) Leave the mm if we are in the lazy tlb mode.
02cf94c3 103 */
52aec330 104static void flush_tlb_func(void *info)
c048fdfe 105{
52aec330 106 struct flush_tlb_info *f = info;
c048fdfe 107
fd0f5869
TS
108 inc_irq_stat(irq_tlb_count);
109
52aec330
AS
110 if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
111 return;
a23421f1
DH
112 if (!f->flush_end)
113 f->flush_end = f->flush_start + PAGE_SIZE;
c048fdfe 114
ec659934 115 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
52aec330 116 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
d17d8f9d 117 if (f->flush_end == TLB_FLUSH_ALL) {
52aec330 118 local_flush_tlb();
d17d8f9d
DH
119 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL);
120 } else {
52aec330 121 unsigned long addr;
d17d8f9d
DH
122 unsigned long nr_pages =
123 f->flush_end - f->flush_start / PAGE_SIZE;
52aec330
AS
124 addr = f->flush_start;
125 while (addr < f->flush_end) {
126 __flush_tlb_single(addr);
127 addr += PAGE_SIZE;
e7b52ffd 128 }
d17d8f9d 129 trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages);
52aec330
AS
130 }
131 } else
132 leave_mm(smp_processor_id());
c048fdfe 133
c048fdfe
GC
134}
135
4595f962 136void native_flush_tlb_others(const struct cpumask *cpumask,
e7b52ffd
AS
137 struct mm_struct *mm, unsigned long start,
138 unsigned long end)
4595f962 139{
52aec330
AS
140 struct flush_tlb_info info;
141 info.flush_mm = mm;
142 info.flush_start = start;
143 info.flush_end = end;
144
ec659934 145 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
4595f962 146 if (is_uv_system()) {
bdbcdd48 147 unsigned int cpu;
0e21990a 148
25542c64 149 cpu = smp_processor_id();
e7b52ffd 150 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
bdbcdd48 151 if (cpumask)
52aec330
AS
152 smp_call_function_many(cpumask, flush_tlb_func,
153 &info, 1);
0e21990a 154 return;
4595f962 155 }
52aec330 156 smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
c048fdfe 157}
c048fdfe
GC
158
159void flush_tlb_current_task(void)
160{
161 struct mm_struct *mm = current->mm;
c048fdfe
GC
162
163 preempt_disable();
c048fdfe 164
ec659934 165 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
c048fdfe 166 local_flush_tlb();
d17d8f9d 167 trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
78f1c4d6 168 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
e7b52ffd 169 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
c048fdfe
GC
170 preempt_enable();
171}
172
a5102476
DH
173/*
174 * See Documentation/x86/tlb.txt for details. We choose 33
175 * because it is large enough to cover the vast majority (at
176 * least 95%) of allocations, and is small enough that we are
177 * confident it will not cause too much overhead. Each single
178 * flush is about 100 ns, so this caps the maximum overhead at
179 * _about_ 3,000 ns.
180 *
181 * This is in units of pages.
182 */
86426851 183static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
e9f4e0a9 184
611ae8e3
AS
185void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
186 unsigned long end, unsigned long vmflag)
187{
188 unsigned long addr;
9dfa6dee
DH
189 /* do a global flush by default */
190 unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
e7b52ffd
AS
191
192 preempt_disable();
611ae8e3 193 if (current->active_mm != mm)
4995ab9c 194 goto out;
e7b52ffd 195
611ae8e3
AS
196 if (!current->mm) {
197 leave_mm(smp_processor_id());
4995ab9c 198 goto out;
611ae8e3 199 }
c048fdfe 200
9dfa6dee
DH
201 if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
202 base_pages_to_flush = (end - start) >> PAGE_SHIFT;
e7b52ffd 203
9dfa6dee
DH
204 if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
205 base_pages_to_flush = TLB_FLUSH_ALL;
ec659934 206 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
611ae8e3 207 local_flush_tlb();
9824cf97 208 } else {
611ae8e3 209 /* flush range by one by one 'invlpg' */
9824cf97 210 for (addr = start; addr < end; addr += PAGE_SIZE) {
ec659934 211 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
611ae8e3 212 __flush_tlb_single(addr);
9824cf97 213 }
e7b52ffd 214 }
d17d8f9d 215 trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush);
4995ab9c 216out:
9dfa6dee 217 if (base_pages_to_flush == TLB_FLUSH_ALL) {
4995ab9c
DH
218 start = 0UL;
219 end = TLB_FLUSH_ALL;
220 }
e7b52ffd 221 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
4995ab9c 222 flush_tlb_others(mm_cpumask(mm), mm, start, end);
c048fdfe
GC
223 preempt_enable();
224}
225
e7b52ffd 226void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
c048fdfe
GC
227{
228 struct mm_struct *mm = vma->vm_mm;
c048fdfe
GC
229
230 preempt_disable();
c048fdfe
GC
231
232 if (current->active_mm == mm) {
233 if (current->mm)
e7b52ffd 234 __flush_tlb_one(start);
c048fdfe
GC
235 else
236 leave_mm(smp_processor_id());
237 }
238
78f1c4d6 239 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
e7b52ffd 240 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
c048fdfe
GC
241
242 preempt_enable();
243}
244
245static void do_flush_tlb_all(void *info)
246{
ec659934 247 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
c048fdfe 248 __flush_tlb_all();
c6ae41e7 249 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
3f8afb77 250 leave_mm(smp_processor_id());
c048fdfe
GC
251}
252
253void flush_tlb_all(void)
254{
ec659934 255 count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
15c8b6c1 256 on_each_cpu(do_flush_tlb_all, NULL, 1);
c048fdfe 257}
3df3212f 258
effee4b9
AS
259static void do_kernel_range_flush(void *info)
260{
261 struct flush_tlb_info *f = info;
262 unsigned long addr;
263
264 /* flush range by one by one 'invlpg' */
6df46865 265 for (addr = f->flush_start; addr < f->flush_end; addr += PAGE_SIZE)
effee4b9
AS
266 __flush_tlb_single(addr);
267}
268
269void flush_tlb_kernel_range(unsigned long start, unsigned long end)
270{
effee4b9
AS
271
272 /* Balance as user space task's flush, a bit conservative */
e9f4e0a9
DH
273 if (end == TLB_FLUSH_ALL ||
274 (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) {
effee4b9 275 on_each_cpu(do_flush_tlb_all, NULL, 1);
e9f4e0a9
DH
276 } else {
277 struct flush_tlb_info info;
effee4b9
AS
278 info.flush_start = start;
279 info.flush_end = end;
280 on_each_cpu(do_kernel_range_flush, &info, 1);
281 }
282}
2d040a1c
DH
283
284static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
285 size_t count, loff_t *ppos)
286{
287 char buf[32];
288 unsigned int len;
289
290 len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling);
291 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
292}
293
294static ssize_t tlbflush_write_file(struct file *file,
295 const char __user *user_buf, size_t count, loff_t *ppos)
296{
297 char buf[32];
298 ssize_t len;
299 int ceiling;
300
301 len = min(count, sizeof(buf) - 1);
302 if (copy_from_user(buf, user_buf, len))
303 return -EFAULT;
304
305 buf[len] = '\0';
306 if (kstrtoint(buf, 0, &ceiling))
307 return -EINVAL;
308
309 if (ceiling < 0)
310 return -EINVAL;
311
312 tlb_single_page_flush_ceiling = ceiling;
313 return count;
314}
315
316static const struct file_operations fops_tlbflush = {
317 .read = tlbflush_read_file,
318 .write = tlbflush_write_file,
319 .llseek = default_llseek,
320};
321
322static int __init create_tlb_single_page_flush_ceiling(void)
323{
324 debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR,
325 arch_debugfs_dir, NULL, &fops_tlbflush);
326 return 0;
327}
328late_initcall(create_tlb_single_page_flush_ceiling);
This page took 0.451274 seconds and 5 git commands to generate.