1 #include <linux/init.h>
4 #include <linux/spinlock.h>
6 #include <linux/interrupt.h>
7 #include <linux/module.h>
10 #include <asm/tlbflush.h>
11 #include <asm/mmu_context.h>
12 #include <asm/cache.h>
14 #include <asm/uv/uv.h>
15 #include <linux/debugfs.h>
17 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state
, cpu_tlbstate
)
21 * Smarter SMP flushing macros.
24 * These mean you can really definitely utterly forget about
25 * writing to user space from interrupts. (Its not allowed anyway).
27 * Optimizations Manfred Spraul <manfred@colorfullife.com>
29 * More scalable flush, from Andi Kleen
31 * To avoid global state use 8 different call vectors.
32 * Each CPU uses a specific vector to trigger flushes on other
33 * CPUs. Depending on the received vector the target CPUs look into
34 * the right array slot for the flush data.
36 * With more than 8 CPUs they are hashed to the 8 available
37 * vectors. The limited global vector space forces us to this right now.
38 * In future when interrupts are split into per CPU domains this could be
39 * fixed, at the cost of triggering multiple IPIs in some cases.
42 union smp_flush_state
{
44 struct mm_struct
*flush_mm
;
45 unsigned long flush_start
;
46 unsigned long flush_end
;
47 raw_spinlock_t tlbstate_lock
;
48 DECLARE_BITMAP(flush_cpumask
, NR_CPUS
);
50 char pad
[INTERNODE_CACHE_BYTES
];
51 } ____cacheline_internodealigned_in_smp
;
53 /* State is put into the per CPU data section, but padded
54 to a full cache line because other CPUs can access it and we don't
55 want false sharing in the per cpu data segment. */
56 static union smp_flush_state flush_state
[NUM_INVALIDATE_TLB_VECTORS
];
58 static DEFINE_PER_CPU_READ_MOSTLY(int, tlb_vector_offset
);
61 * We cannot call mmdrop() because we are in interrupt context,
62 * instead update mm->cpu_vm_mask.
64 void leave_mm(int cpu
)
66 struct mm_struct
*active_mm
= this_cpu_read(cpu_tlbstate
.active_mm
);
67 if (this_cpu_read(cpu_tlbstate
.state
) == TLBSTATE_OK
)
69 if (cpumask_test_cpu(cpu
, mm_cpumask(active_mm
))) {
70 cpumask_clear_cpu(cpu
, mm_cpumask(active_mm
));
71 load_cr3(swapper_pg_dir
);
74 EXPORT_SYMBOL_GPL(leave_mm
);
78 * The flush IPI assumes that a thread switch happens in this order:
79 * [cpu0: the cpu that switches]
80 * 1) switch_mm() either 1a) or 1b)
81 * 1a) thread switch to a different mm
82 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
83 * Stop ipi delivery for the old mm. This is not synchronized with
84 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
85 * for the wrong mm, and in the worst case we perform a superfluous
87 * 1a2) set cpu mmu_state to TLBSTATE_OK
88 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
89 * was in lazy tlb mode.
90 * 1a3) update cpu active_mm
91 * Now cpu0 accepts tlb flushes for the new mm.
92 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
93 * Now the other cpus will send tlb flush ipis.
95 * 1b) thread switch without mm change
96 * cpu active_mm is correct, cpu0 already handles
98 * 1b1) set cpu mmu_state to TLBSTATE_OK
99 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
100 * Atomically set the bit [other cpus will start sending flush ipis],
102 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
103 * 2) switch %%esp, ie current
105 * The interrupt must handle 2 special cases:
106 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
107 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
108 * runs in kernel space, the cpu could load tlb entries for user space
111 * The good news is that cpu mmu_state is local to each cpu, no
112 * write/read ordering problems.
118 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
119 * 2) Leave the mm if we are in the lazy tlb mode.
121 * Interrupts are disabled.
125 * FIXME: use of asmlinkage is not consistent. On x86_64 it's noop
126 * but still used for documentation purpose but the usage is slightly
127 * inconsistent. On x86_32, asmlinkage is regparm(0) but interrupt
128 * entry calls in with the first parameter in %eax. Maybe define
134 void smp_invalidate_interrupt(struct pt_regs
*regs
)
138 union smp_flush_state
*f
;
140 cpu
= smp_processor_id();
142 * orig_rax contains the negated interrupt vector.
143 * Use that to determine where the sender put the data.
145 sender
= ~regs
->orig_ax
- INVALIDATE_TLB_VECTOR_START
;
146 f
= &flush_state
[sender
];
148 if (!cpumask_test_cpu(cpu
, to_cpumask(f
->flush_cpumask
)))
151 * This was a BUG() but until someone can quote me the
152 * line from the intel manual that guarantees an IPI to
153 * multiple CPUs is retried _only_ on the erroring CPUs
154 * its staying as a return
159 if (f
->flush_mm
== this_cpu_read(cpu_tlbstate
.active_mm
)) {
160 if (this_cpu_read(cpu_tlbstate
.state
) == TLBSTATE_OK
) {
161 if (f
->flush_end
== TLB_FLUSH_ALL
164 else if (!f
->flush_end
)
165 __flush_tlb_single(f
->flush_start
);
168 addr
= f
->flush_start
;
169 while (addr
< f
->flush_end
) {
170 __flush_tlb_single(addr
);
179 smp_mb__before_clear_bit();
180 cpumask_clear_cpu(cpu
, to_cpumask(f
->flush_cpumask
));
181 smp_mb__after_clear_bit();
182 inc_irq_stat(irq_tlb_count
);
185 static void flush_tlb_others_ipi(const struct cpumask
*cpumask
,
186 struct mm_struct
*mm
, unsigned long start
,
190 union smp_flush_state
*f
;
192 /* Caller has disabled preemption */
193 sender
= this_cpu_read(tlb_vector_offset
);
194 f
= &flush_state
[sender
];
196 if (nr_cpu_ids
> NUM_INVALIDATE_TLB_VECTORS
)
197 raw_spin_lock(&f
->tlbstate_lock
);
200 f
->flush_start
= start
;
202 if (cpumask_andnot(to_cpumask(f
->flush_cpumask
), cpumask
, cpumask_of(smp_processor_id()))) {
204 * We have to send the IPI only to
207 apic
->send_IPI_mask(to_cpumask(f
->flush_cpumask
),
208 INVALIDATE_TLB_VECTOR_START
+ sender
);
210 while (!cpumask_empty(to_cpumask(f
->flush_cpumask
)))
217 if (nr_cpu_ids
> NUM_INVALIDATE_TLB_VECTORS
)
218 raw_spin_unlock(&f
->tlbstate_lock
);
221 void native_flush_tlb_others(const struct cpumask
*cpumask
,
222 struct mm_struct
*mm
, unsigned long start
,
225 if (is_uv_system()) {
228 cpu
= smp_processor_id();
229 cpumask
= uv_flush_tlb_others(cpumask
, mm
, start
, end
, cpu
);
231 flush_tlb_others_ipi(cpumask
, mm
, start
, end
);
234 flush_tlb_others_ipi(cpumask
, mm
, start
, end
);
237 static void __cpuinit
calculate_tlb_offset(void)
239 int cpu
, node
, nr_node_vecs
, idx
= 0;
241 * we are changing tlb_vector_offset for each CPU in runtime, but this
242 * will not cause inconsistency, as the write is atomic under X86. we
243 * might see more lock contentions in a short time, but after all CPU's
244 * tlb_vector_offset are changed, everything should go normal
246 * Note: if NUM_INVALIDATE_TLB_VECTORS % nr_online_nodes !=0, we might
247 * waste some vectors.
249 if (nr_online_nodes
> NUM_INVALIDATE_TLB_VECTORS
)
252 nr_node_vecs
= NUM_INVALIDATE_TLB_VECTORS
/nr_online_nodes
;
254 for_each_online_node(node
) {
255 int node_offset
= (idx
% NUM_INVALIDATE_TLB_VECTORS
) *
258 for_each_cpu(cpu
, cpumask_of_node(node
)) {
259 per_cpu(tlb_vector_offset
, cpu
) = node_offset
+
262 cpu_offset
= cpu_offset
% nr_node_vecs
;
268 static int __cpuinit
tlb_cpuhp_notify(struct notifier_block
*n
,
269 unsigned long action
, void *hcpu
)
271 switch (action
& 0xf) {
274 calculate_tlb_offset();
279 static int __cpuinit
init_smp_flush(void)
283 for (i
= 0; i
< ARRAY_SIZE(flush_state
); i
++)
284 raw_spin_lock_init(&flush_state
[i
].tlbstate_lock
);
286 calculate_tlb_offset();
287 hotcpu_notifier(tlb_cpuhp_notify
, 0);
290 core_initcall(init_smp_flush
);
292 void flush_tlb_current_task(void)
294 struct mm_struct
*mm
= current
->mm
;
299 if (cpumask_any_but(mm_cpumask(mm
), smp_processor_id()) < nr_cpu_ids
)
300 flush_tlb_others(mm_cpumask(mm
), mm
, 0UL, TLB_FLUSH_ALL
);
305 * It can find out the THP large page, or
306 * HUGETLB page in tlb_flush when THP disabled
308 static inline unsigned long has_large_page(struct mm_struct
*mm
,
309 unsigned long start
, unsigned long end
)
314 unsigned long addr
= ALIGN(start
, HPAGE_SIZE
);
315 for (; addr
< end
; addr
+= HPAGE_SIZE
) {
316 pgd
= pgd_offset(mm
, addr
);
317 if (likely(!pgd_none(*pgd
))) {
318 pud
= pud_offset(pgd
, addr
);
319 if (likely(!pud_none(*pud
))) {
320 pmd
= pmd_offset(pud
, addr
);
321 if (likely(!pmd_none(*pmd
)))
330 void flush_tlb_mm_range(struct mm_struct
*mm
, unsigned long start
,
331 unsigned long end
, unsigned long vmflag
)
334 unsigned act_entries
, tlb_entries
= 0;
337 if (current
->active_mm
!= mm
)
341 leave_mm(smp_processor_id());
345 if (end
== TLB_FLUSH_ALL
|| tlb_flushall_shift
== -1
346 || vmflag
== VM_HUGETLB
) {
351 /* In modern CPU, last level tlb used for both data/ins */
352 if (vmflag
& VM_EXEC
)
353 tlb_entries
= tlb_lli_4k
[ENTRIES
];
355 tlb_entries
= tlb_lld_4k
[ENTRIES
];
356 /* Assume all of TLB entries was occupied by this task */
357 act_entries
= mm
->total_vm
> tlb_entries
? tlb_entries
: mm
->total_vm
;
359 /* tlb_flushall_shift is on balance point, details in commit log */
360 if ((end
- start
) >> PAGE_SHIFT
> act_entries
>> tlb_flushall_shift
)
363 if (has_large_page(mm
, start
, end
)) {
367 /* flush range by one by one 'invlpg' */
368 for (addr
= start
; addr
< end
; addr
+= PAGE_SIZE
)
369 __flush_tlb_single(addr
);
371 if (cpumask_any_but(mm_cpumask(mm
),
372 smp_processor_id()) < nr_cpu_ids
)
373 flush_tlb_others(mm_cpumask(mm
), mm
, start
, end
);
379 if (cpumask_any_but(mm_cpumask(mm
), smp_processor_id()) < nr_cpu_ids
)
380 flush_tlb_others(mm_cpumask(mm
), mm
, 0UL, TLB_FLUSH_ALL
);
384 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long start
)
386 struct mm_struct
*mm
= vma
->vm_mm
;
390 if (current
->active_mm
== mm
) {
392 __flush_tlb_one(start
);
394 leave_mm(smp_processor_id());
397 if (cpumask_any_but(mm_cpumask(mm
), smp_processor_id()) < nr_cpu_ids
)
398 flush_tlb_others(mm_cpumask(mm
), mm
, start
, 0UL);
403 static void do_flush_tlb_all(void *info
)
406 if (this_cpu_read(cpu_tlbstate
.state
) == TLBSTATE_LAZY
)
407 leave_mm(smp_processor_id());
410 void flush_tlb_all(void)
412 on_each_cpu(do_flush_tlb_all
, NULL
, 1);
415 #ifdef CONFIG_DEBUG_TLBFLUSH
416 static ssize_t
tlbflush_read_file(struct file
*file
, char __user
*user_buf
,
417 size_t count
, loff_t
*ppos
)
422 len
= sprintf(buf
, "%hd\n", tlb_flushall_shift
);
423 return simple_read_from_buffer(user_buf
, count
, ppos
, buf
, len
);
426 static ssize_t
tlbflush_write_file(struct file
*file
,
427 const char __user
*user_buf
, size_t count
, loff_t
*ppos
)
433 len
= min(count
, sizeof(buf
) - 1);
434 if (copy_from_user(buf
, user_buf
, len
))
438 if (kstrtos8(buf
, 0, &shift
))
444 tlb_flushall_shift
= shift
;
448 static const struct file_operations fops_tlbflush
= {
449 .read
= tlbflush_read_file
,
450 .write
= tlbflush_write_file
,
451 .llseek
= default_llseek
,
454 static int __cpuinit
create_tlb_flushall_shift(void)
456 if (cpu_has_invlpg
) {
457 debugfs_create_file("tlb_flushall_shift", S_IRUSR
| S_IWUSR
,
458 arch_debugfs_dir
, NULL
, &fops_tlbflush
);
462 late_initcall(create_tlb_flushall_shift
);