x86/tlb: replace INVALIDATE_TLB_VECTOR by CALL_FUNCTION_VECTOR
[deliverable/linux.git] / arch / x86 / mm / tlb.c
1 #include <linux/init.h>
2
3 #include <linux/mm.h>
4 #include <linux/spinlock.h>
5 #include <linux/smp.h>
6 #include <linux/interrupt.h>
7 #include <linux/module.h>
8 #include <linux/cpu.h>
9
10 #include <asm/tlbflush.h>
11 #include <asm/mmu_context.h>
12 #include <asm/cache.h>
13 #include <asm/apic.h>
14 #include <asm/uv/uv.h>
15 #include <linux/debugfs.h>
16
17 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
18 = { &init_mm, 0, };
19
20 /*
21 * Smarter SMP flushing macros.
22 * c/o Linus Torvalds.
23 *
24 * These mean you can really definitely utterly forget about
25 * writing to user space from interrupts. (Its not allowed anyway).
26 *
27 * Optimizations Manfred Spraul <manfred@colorfullife.com>
28 *
29 * More scalable flush, from Andi Kleen
30 *
31 * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
32 */
33
34 struct flush_tlb_info {
35 struct mm_struct *flush_mm;
36 unsigned long flush_start;
37 unsigned long flush_end;
38 };
39
40 /*
41 * We cannot call mmdrop() because we are in interrupt context,
42 * instead update mm->cpu_vm_mask.
43 */
44 void leave_mm(int cpu)
45 {
46 struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
47 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
48 BUG();
49 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
50 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
51 load_cr3(swapper_pg_dir);
52 }
53 }
54 EXPORT_SYMBOL_GPL(leave_mm);
55
56 /*
57 * The flush IPI assumes that a thread switch happens in this order:
58 * [cpu0: the cpu that switches]
59 * 1) switch_mm() either 1a) or 1b)
60 * 1a) thread switch to a different mm
61 * 1a1) set cpu_tlbstate to TLBSTATE_OK
62 * Now the tlb flush NMI handler flush_tlb_func won't call leave_mm
63 * if cpu0 was in lazy tlb mode.
64 * 1a2) update cpu active_mm
65 * Now cpu0 accepts tlb flushes for the new mm.
66 * 1a3) cpu_set(cpu, new_mm->cpu_vm_mask);
67 * Now the other cpus will send tlb flush ipis.
68 * 1a4) change cr3.
69 * 1a5) cpu_clear(cpu, old_mm->cpu_vm_mask);
70 * Stop ipi delivery for the old mm. This is not synchronized with
71 * the other cpus, but flush_tlb_func ignore flush ipis for the wrong
72 * mm, and in the worst case we perform a superfluous tlb flush.
73 * 1b) thread switch without mm change
74 * cpu active_mm is correct, cpu0 already handles flush ipis.
75 * 1b1) set cpu_tlbstate to TLBSTATE_OK
76 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
77 * Atomically set the bit [other cpus will start sending flush ipis],
78 * and test the bit.
79 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
80 * 2) switch %%esp, ie current
81 *
82 * The interrupt must handle 2 special cases:
83 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
84 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
85 * runs in kernel space, the cpu could load tlb entries for user space
86 * pages.
87 *
88 * The good news is that cpu_tlbstate is local to each cpu, no
89 * write/read ordering problems.
90 */
91
92 /*
93 * TLB flush funcation:
94 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
95 * 2) Leave the mm if we are in the lazy tlb mode.
96 */
97 static void flush_tlb_func(void *info)
98 {
99 struct flush_tlb_info *f = info;
100
101 if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
102 return;
103
104 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
105 if (f->flush_end == TLB_FLUSH_ALL || !cpu_has_invlpg)
106 local_flush_tlb();
107 else if (!f->flush_end)
108 __flush_tlb_single(f->flush_start);
109 else {
110 unsigned long addr;
111 addr = f->flush_start;
112 while (addr < f->flush_end) {
113 __flush_tlb_single(addr);
114 addr += PAGE_SIZE;
115 }
116 }
117 } else
118 leave_mm(smp_processor_id());
119
120 }
121
122 void native_flush_tlb_others(const struct cpumask *cpumask,
123 struct mm_struct *mm, unsigned long start,
124 unsigned long end)
125 {
126 struct flush_tlb_info info;
127 info.flush_mm = mm;
128 info.flush_start = start;
129 info.flush_end = end;
130
131 if (is_uv_system()) {
132 unsigned int cpu;
133
134 cpu = smp_processor_id();
135 cpumask = uv_flush_tlb_others(cpumask, mm, start, end, cpu);
136 if (cpumask)
137 smp_call_function_many(cpumask, flush_tlb_func,
138 &info, 1);
139 return;
140 }
141 smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
142 }
143
144 void flush_tlb_current_task(void)
145 {
146 struct mm_struct *mm = current->mm;
147
148 preempt_disable();
149
150 local_flush_tlb();
151 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
152 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
153 preempt_enable();
154 }
155
156 /*
157 * It can find out the THP large page, or
158 * HUGETLB page in tlb_flush when THP disabled
159 */
160 static inline unsigned long has_large_page(struct mm_struct *mm,
161 unsigned long start, unsigned long end)
162 {
163 pgd_t *pgd;
164 pud_t *pud;
165 pmd_t *pmd;
166 unsigned long addr = ALIGN(start, HPAGE_SIZE);
167 for (; addr < end; addr += HPAGE_SIZE) {
168 pgd = pgd_offset(mm, addr);
169 if (likely(!pgd_none(*pgd))) {
170 pud = pud_offset(pgd, addr);
171 if (likely(!pud_none(*pud))) {
172 pmd = pmd_offset(pud, addr);
173 if (likely(!pmd_none(*pmd)))
174 if (pmd_large(*pmd))
175 return addr;
176 }
177 }
178 }
179 return 0;
180 }
181
182 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
183 unsigned long end, unsigned long vmflag)
184 {
185 unsigned long addr;
186 unsigned act_entries, tlb_entries = 0;
187
188 preempt_disable();
189 if (current->active_mm != mm)
190 goto flush_all;
191
192 if (!current->mm) {
193 leave_mm(smp_processor_id());
194 goto flush_all;
195 }
196
197 if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1
198 || vmflag == VM_HUGETLB) {
199 local_flush_tlb();
200 goto flush_all;
201 }
202
203 /* In modern CPU, last level tlb used for both data/ins */
204 if (vmflag & VM_EXEC)
205 tlb_entries = tlb_lli_4k[ENTRIES];
206 else
207 tlb_entries = tlb_lld_4k[ENTRIES];
208 /* Assume all of TLB entries was occupied by this task */
209 act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm;
210
211 /* tlb_flushall_shift is on balance point, details in commit log */
212 if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift)
213 local_flush_tlb();
214 else {
215 if (has_large_page(mm, start, end)) {
216 local_flush_tlb();
217 goto flush_all;
218 }
219 /* flush range by one by one 'invlpg' */
220 for (addr = start; addr < end; addr += PAGE_SIZE)
221 __flush_tlb_single(addr);
222
223 if (cpumask_any_but(mm_cpumask(mm),
224 smp_processor_id()) < nr_cpu_ids)
225 flush_tlb_others(mm_cpumask(mm), mm, start, end);
226 preempt_enable();
227 return;
228 }
229
230 flush_all:
231 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
232 flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
233 preempt_enable();
234 }
235
236 void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
237 {
238 struct mm_struct *mm = vma->vm_mm;
239
240 preempt_disable();
241
242 if (current->active_mm == mm) {
243 if (current->mm)
244 __flush_tlb_one(start);
245 else
246 leave_mm(smp_processor_id());
247 }
248
249 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
250 flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
251
252 preempt_enable();
253 }
254
255 static void do_flush_tlb_all(void *info)
256 {
257 __flush_tlb_all();
258 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
259 leave_mm(smp_processor_id());
260 }
261
262 void flush_tlb_all(void)
263 {
264 on_each_cpu(do_flush_tlb_all, NULL, 1);
265 }
266
267 #ifdef CONFIG_DEBUG_TLBFLUSH
268 static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf,
269 size_t count, loff_t *ppos)
270 {
271 char buf[32];
272 unsigned int len;
273
274 len = sprintf(buf, "%hd\n", tlb_flushall_shift);
275 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
276 }
277
278 static ssize_t tlbflush_write_file(struct file *file,
279 const char __user *user_buf, size_t count, loff_t *ppos)
280 {
281 char buf[32];
282 ssize_t len;
283 s8 shift;
284
285 len = min(count, sizeof(buf) - 1);
286 if (copy_from_user(buf, user_buf, len))
287 return -EFAULT;
288
289 buf[len] = '\0';
290 if (kstrtos8(buf, 0, &shift))
291 return -EINVAL;
292
293 if (shift > 64)
294 return -EINVAL;
295
296 tlb_flushall_shift = shift;
297 return count;
298 }
299
300 static const struct file_operations fops_tlbflush = {
301 .read = tlbflush_read_file,
302 .write = tlbflush_write_file,
303 .llseek = default_llseek,
304 };
305
306 static int __cpuinit create_tlb_flushall_shift(void)
307 {
308 if (cpu_has_invlpg) {
309 debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR,
310 arch_debugfs_dir, NULL, &fops_tlbflush);
311 }
312 return 0;
313 }
314 late_initcall(create_tlb_flushall_shift);
315 #endif
This page took 0.040994 seconds and 5 git commands to generate.