Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[deliverable/linux.git] / arch / x86 / include / asm / mmu_context.h
1 #ifndef _ASM_X86_MMU_CONTEXT_H
2 #define _ASM_X86_MMU_CONTEXT_H
3
4 #include <asm/desc.h>
5 #include <linux/atomic.h>
6 #include <linux/mm_types.h>
7
8 #include <trace/events/tlb.h>
9
10 #include <asm/pgalloc.h>
11 #include <asm/tlbflush.h>
12 #include <asm/paravirt.h>
13 #include <asm/mpx.h>
14 #ifndef CONFIG_PARAVIRT
15 static inline void paravirt_activate_mm(struct mm_struct *prev,
16 struct mm_struct *next)
17 {
18 }
19 #endif /* !CONFIG_PARAVIRT */
20
21 #ifdef CONFIG_PERF_EVENTS
22 extern struct static_key rdpmc_always_available;
23
24 static inline void load_mm_cr4(struct mm_struct *mm)
25 {
26 if (static_key_false(&rdpmc_always_available) ||
27 atomic_read(&mm->context.perf_rdpmc_allowed))
28 cr4_set_bits(X86_CR4_PCE);
29 else
30 cr4_clear_bits(X86_CR4_PCE);
31 }
32 #else
33 static inline void load_mm_cr4(struct mm_struct *mm) {}
34 #endif
35
36 #ifdef CONFIG_MODIFY_LDT_SYSCALL
37 /*
38 * ldt_structs can be allocated, used, and freed, but they are never
39 * modified while live.
40 */
41 struct ldt_struct {
42 /*
43 * Xen requires page-aligned LDTs with special permissions. This is
44 * needed to prevent us from installing evil descriptors such as
45 * call gates. On native, we could merge the ldt_struct and LDT
46 * allocations, but it's not worth trying to optimize.
47 */
48 struct desc_struct *entries;
49 int size;
50 };
51
52 /*
53 * Used for LDT copy/destruction.
54 */
55 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
56 void destroy_context(struct mm_struct *mm);
57 #else /* CONFIG_MODIFY_LDT_SYSCALL */
58 static inline int init_new_context(struct task_struct *tsk,
59 struct mm_struct *mm)
60 {
61 return 0;
62 }
63 static inline void destroy_context(struct mm_struct *mm) {}
64 #endif
65
66 static inline void load_mm_ldt(struct mm_struct *mm)
67 {
68 #ifdef CONFIG_MODIFY_LDT_SYSCALL
69 struct ldt_struct *ldt;
70
71 /* lockless_dereference synchronizes with smp_store_release */
72 ldt = lockless_dereference(mm->context.ldt);
73
74 /*
75 * Any change to mm->context.ldt is followed by an IPI to all
76 * CPUs with the mm active. The LDT will not be freed until
77 * after the IPI is handled by all such CPUs. This means that,
78 * if the ldt_struct changes before we return, the values we see
79 * will be safe, and the new values will be loaded before we run
80 * any user code.
81 *
82 * NB: don't try to convert this to use RCU without extreme care.
83 * We would still need IRQs off, because we don't want to change
84 * the local LDT after an IPI loaded a newer value than the one
85 * that we can see.
86 */
87
88 if (unlikely(ldt))
89 set_ldt(ldt->entries, ldt->size);
90 else
91 clear_LDT();
92 #else
93 clear_LDT();
94 #endif
95
96 DEBUG_LOCKS_WARN_ON(preemptible());
97 }
98
99 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
100 {
101 #ifdef CONFIG_SMP
102 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
103 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
104 #endif
105 }
106
107 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
108 struct task_struct *tsk)
109 {
110 unsigned cpu = smp_processor_id();
111
112 if (likely(prev != next)) {
113 #ifdef CONFIG_SMP
114 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
115 this_cpu_write(cpu_tlbstate.active_mm, next);
116 #endif
117 cpumask_set_cpu(cpu, mm_cpumask(next));
118
119 /* Re-load page tables */
120 load_cr3(next->pgd);
121 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
122
123 /* Stop flush ipis for the previous mm */
124 cpumask_clear_cpu(cpu, mm_cpumask(prev));
125
126 /* Load per-mm CR4 state */
127 load_mm_cr4(next);
128
129 #ifdef CONFIG_MODIFY_LDT_SYSCALL
130 /*
131 * Load the LDT, if the LDT is different.
132 *
133 * It's possible that prev->context.ldt doesn't match
134 * the LDT register. This can happen if leave_mm(prev)
135 * was called and then modify_ldt changed
136 * prev->context.ldt but suppressed an IPI to this CPU.
137 * In this case, prev->context.ldt != NULL, because we
138 * never set context.ldt to NULL while the mm still
139 * exists. That means that next->context.ldt !=
140 * prev->context.ldt, because mms never share an LDT.
141 */
142 if (unlikely(prev->context.ldt != next->context.ldt))
143 load_mm_ldt(next);
144 #endif
145 }
146 #ifdef CONFIG_SMP
147 else {
148 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
149 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
150
151 if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
152 /*
153 * On established mms, the mm_cpumask is only changed
154 * from irq context, from ptep_clear_flush() while in
155 * lazy tlb mode, and here. Irqs are blocked during
156 * schedule, protecting us from simultaneous changes.
157 */
158 cpumask_set_cpu(cpu, mm_cpumask(next));
159 /*
160 * We were in lazy tlb mode and leave_mm disabled
161 * tlb flush IPI delivery. We must reload CR3
162 * to make sure to use no freed page tables.
163 */
164 load_cr3(next->pgd);
165 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
166 load_mm_cr4(next);
167 load_mm_ldt(next);
168 }
169 }
170 #endif
171 }
172
173 #define activate_mm(prev, next) \
174 do { \
175 paravirt_activate_mm((prev), (next)); \
176 switch_mm((prev), (next), NULL); \
177 } while (0);
178
179 #ifdef CONFIG_X86_32
180 #define deactivate_mm(tsk, mm) \
181 do { \
182 lazy_load_gs(0); \
183 } while (0)
184 #else
185 #define deactivate_mm(tsk, mm) \
186 do { \
187 load_gs_index(0); \
188 loadsegment(fs, 0); \
189 } while (0)
190 #endif
191
192 static inline void arch_dup_mmap(struct mm_struct *oldmm,
193 struct mm_struct *mm)
194 {
195 paravirt_arch_dup_mmap(oldmm, mm);
196 }
197
198 static inline void arch_exit_mmap(struct mm_struct *mm)
199 {
200 paravirt_arch_exit_mmap(mm);
201 }
202
203 #ifdef CONFIG_X86_64
204 static inline bool is_64bit_mm(struct mm_struct *mm)
205 {
206 return !config_enabled(CONFIG_IA32_EMULATION) ||
207 !(mm->context.ia32_compat == TIF_IA32);
208 }
209 #else
210 static inline bool is_64bit_mm(struct mm_struct *mm)
211 {
212 return false;
213 }
214 #endif
215
216 static inline void arch_bprm_mm_init(struct mm_struct *mm,
217 struct vm_area_struct *vma)
218 {
219 mpx_mm_init(mm);
220 }
221
222 static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
223 unsigned long start, unsigned long end)
224 {
225 /*
226 * mpx_notify_unmap() goes and reads a rarely-hot
227 * cacheline in the mm_struct. That can be expensive
228 * enough to be seen in profiles.
229 *
230 * The mpx_notify_unmap() call and its contents have been
231 * observed to affect munmap() performance on hardware
232 * where MPX is not present.
233 *
234 * The unlikely() optimizes for the fast case: no MPX
235 * in the CPU, or no MPX use in the process. Even if
236 * we get this wrong (in the unlikely event that MPX
237 * is widely enabled on some system) the overhead of
238 * MPX itself (reading bounds tables) is expected to
239 * overwhelm the overhead of getting this unlikely()
240 * consistently wrong.
241 */
242 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
243 mpx_notify_unmap(mm, vma, start, end);
244 }
245
246 #endif /* _ASM_X86_MMU_CONTEXT_H */
This page took 0.036842 seconds and 6 git commands to generate.