x86/cpufeature: Carve out X86_FEATURE_*
[deliverable/linux.git] / arch / x86 / include / asm / tlbflush.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_TLBFLUSH_H
2#define _ASM_X86_TLBFLUSH_H
d291cf83
TG
3
4#include <linux/mm.h>
5#include <linux/sched.h>
6
7#include <asm/processor.h>
cd4d09ec 8#include <asm/cpufeature.h>
f05e798a 9#include <asm/special_insns.h>
d291cf83
TG
10
11#ifdef CONFIG_PARAVIRT
12#include <asm/paravirt.h>
13#else
14#define __flush_tlb() __native_flush_tlb()
15#define __flush_tlb_global() __native_flush_tlb_global()
16#define __flush_tlb_single(addr) __native_flush_tlb_single(addr)
17#endif
18
1e02ce4c
AL
19struct tlb_state {
20#ifdef CONFIG_SMP
21 struct mm_struct *active_mm;
22 int state;
23#endif
24
25 /*
26 * Access to this CR4 shadow and to H/W CR4 is protected by
27 * disabling interrupts when modifying either one.
28 */
29 unsigned long cr4;
30};
31DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
32
33/* Initialize cr4 shadow for this CPU. */
34static inline void cr4_init_shadow(void)
35{
36 this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
37}
38
375074cc
AL
39/* Set in this cpu's CR4. */
40static inline void cr4_set_bits(unsigned long mask)
41{
42 unsigned long cr4;
43
1e02ce4c
AL
44 cr4 = this_cpu_read(cpu_tlbstate.cr4);
45 if ((cr4 | mask) != cr4) {
46 cr4 |= mask;
47 this_cpu_write(cpu_tlbstate.cr4, cr4);
48 __write_cr4(cr4);
49 }
375074cc
AL
50}
51
52/* Clear in this cpu's CR4. */
53static inline void cr4_clear_bits(unsigned long mask)
54{
55 unsigned long cr4;
56
1e02ce4c
AL
57 cr4 = this_cpu_read(cpu_tlbstate.cr4);
58 if ((cr4 & ~mask) != cr4) {
59 cr4 &= ~mask;
60 this_cpu_write(cpu_tlbstate.cr4, cr4);
61 __write_cr4(cr4);
62 }
63}
64
65/* Read the CR4 shadow. */
66static inline unsigned long cr4_read_shadow(void)
67{
68 return this_cpu_read(cpu_tlbstate.cr4);
375074cc
AL
69}
70
71/*
72 * Save some of cr4 feature set we're using (e.g. Pentium 4MB
73 * enable and PPro Global page enable), so that any CPU's that boot
74 * up after us can get the correct flags. This should only be used
75 * during boot on the boot cpu.
76 */
77extern unsigned long mmu_cr4_features;
78extern u32 *trampoline_cr4_features;
79
80static inline void cr4_set_bits_and_update_boot(unsigned long mask)
81{
82 mmu_cr4_features |= mask;
83 if (trampoline_cr4_features)
84 *trampoline_cr4_features = mmu_cr4_features;
85 cr4_set_bits(mask);
86}
87
d291cf83
TG
88static inline void __native_flush_tlb(void)
89{
d7285c6b 90 native_write_cr3(native_read_cr3());
d291cf83
TG
91}
92
086fc8f8
FY
93static inline void __native_flush_tlb_global_irq_disabled(void)
94{
95 unsigned long cr4;
96
1e02ce4c 97 cr4 = this_cpu_read(cpu_tlbstate.cr4);
086fc8f8
FY
98 /* clear PGE */
99 native_write_cr4(cr4 & ~X86_CR4_PGE);
100 /* write old PGE again and flush TLBs */
101 native_write_cr4(cr4);
102}
103
d291cf83
TG
104static inline void __native_flush_tlb_global(void)
105{
b1979a5f 106 unsigned long flags;
d291cf83 107
b1979a5f
IM
108 /*
109 * Read-modify-write to CR4 - protect it from preemption and
110 * from interrupts. (Use the raw variant because this code can
111 * be called from deep inside debugging code.)
112 */
113 raw_local_irq_save(flags);
114
086fc8f8 115 __native_flush_tlb_global_irq_disabled();
b1979a5f
IM
116
117 raw_local_irq_restore(flags);
d291cf83
TG
118}
119
120static inline void __native_flush_tlb_single(unsigned long addr)
121{
94cf8de0 122 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
d291cf83
TG
123}
124
125static inline void __flush_tlb_all(void)
126{
127 if (cpu_has_pge)
128 __flush_tlb_global();
129 else
130 __flush_tlb();
131}
132
133static inline void __flush_tlb_one(unsigned long addr)
134{
ec659934 135 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
e8747f10 136 __flush_tlb_single(addr);
d291cf83
TG
137}
138
3e7f3db0 139#define TLB_FLUSH_ALL -1UL
d291cf83
TG
140
141/*
142 * TLB flushing:
143 *
144 * - flush_tlb() flushes the current mm struct TLBs
145 * - flush_tlb_all() flushes all processes TLBs
146 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
147 * - flush_tlb_page(vma, vmaddr) flushes one page
148 * - flush_tlb_range(vma, start, end) flushes a range of pages
149 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
e7b52ffd 150 * - flush_tlb_others(cpumask, mm, start, end) flushes TLBs on other cpus
d291cf83
TG
151 *
152 * ..but the i386 has somewhat limited tlb flushing capabilities,
153 * and page-granular flushes are available only on i486 and up.
d291cf83
TG
154 */
155
156#ifndef CONFIG_SMP
157
6df46865
DH
158/* "_up" is for UniProcessor.
159 *
160 * This is a helper for other header functions. *Not* intended to be called
161 * directly. All global TLB flushes need to either call this, or to bump the
162 * vm statistics themselves.
163 */
164static inline void __flush_tlb_up(void)
165{
ec659934 166 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
6df46865
DH
167 __flush_tlb();
168}
169
170static inline void flush_tlb_all(void)
171{
ec659934 172 count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
6df46865
DH
173 __flush_tlb_all();
174}
175
176static inline void flush_tlb(void)
177{
178 __flush_tlb_up();
179}
180
181static inline void local_flush_tlb(void)
182{
183 __flush_tlb_up();
184}
d291cf83
TG
185
186static inline void flush_tlb_mm(struct mm_struct *mm)
187{
188 if (mm == current->active_mm)
6df46865 189 __flush_tlb_up();
d291cf83
TG
190}
191
192static inline void flush_tlb_page(struct vm_area_struct *vma,
193 unsigned long addr)
194{
195 if (vma->vm_mm == current->active_mm)
196 __flush_tlb_one(addr);
197}
198
199static inline void flush_tlb_range(struct vm_area_struct *vma,
200 unsigned long start, unsigned long end)
201{
202 if (vma->vm_mm == current->active_mm)
6df46865 203 __flush_tlb_up();
d291cf83
TG
204}
205
7efa1c87 206static inline void flush_tlb_mm_range(struct mm_struct *mm,
611ae8e3
AS
207 unsigned long start, unsigned long end, unsigned long vmflag)
208{
7efa1c87 209 if (mm == current->active_mm)
6df46865 210 __flush_tlb_up();
611ae8e3
AS
211}
212
4595f962 213static inline void native_flush_tlb_others(const struct cpumask *cpumask,
d291cf83 214 struct mm_struct *mm,
e7b52ffd
AS
215 unsigned long start,
216 unsigned long end)
d291cf83
TG
217{
218}
219
913da64b
AN
220static inline void reset_lazy_tlbstate(void)
221{
222}
223
effee4b9
AS
224static inline void flush_tlb_kernel_range(unsigned long start,
225 unsigned long end)
226{
227 flush_tlb_all();
228}
229
d291cf83
TG
230#else /* SMP */
231
232#include <asm/smp.h>
233
234#define local_flush_tlb() __flush_tlb()
235
611ae8e3
AS
236#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
237
238#define flush_tlb_range(vma, start, end) \
239 flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
240
d291cf83
TG
241extern void flush_tlb_all(void);
242extern void flush_tlb_current_task(void);
d291cf83 243extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
611ae8e3
AS
244extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
245 unsigned long end, unsigned long vmflag);
effee4b9 246extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
d291cf83
TG
247
248#define flush_tlb() flush_tlb_current_task()
249
4595f962 250void native_flush_tlb_others(const struct cpumask *cpumask,
e7b52ffd
AS
251 struct mm_struct *mm,
252 unsigned long start, unsigned long end);
d291cf83
TG
253
254#define TLBSTATE_OK 1
255#define TLBSTATE_LAZY 2
256
913da64b
AN
257static inline void reset_lazy_tlbstate(void)
258{
c6ae41e7
AS
259 this_cpu_write(cpu_tlbstate.state, 0);
260 this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
913da64b 261}
d291cf83
TG
262
263#endif /* SMP */
264
72b252ae
MG
265/* Not inlined due to inc_irq_stat not being defined yet */
266#define flush_tlb_local() { \
267 inc_irq_stat(irq_tlb_count); \
268 local_flush_tlb(); \
269}
270
d291cf83 271#ifndef CONFIG_PARAVIRT
e7b52ffd
AS
272#define flush_tlb_others(mask, mm, start, end) \
273 native_flush_tlb_others(mask, mm, start, end)
96a388de 274#endif
d291cf83 275
1965aae3 276#endif /* _ASM_X86_TLBFLUSH_H */
This page took 0.593469 seconds and 5 git commands to generate.