spi: meson: Constify struct regmap_config
[deliverable/linux.git] / arch / x86 / include / asm / mmu_context.h
1 #ifndef _ASM_X86_MMU_CONTEXT_H
2 #define _ASM_X86_MMU_CONTEXT_H
3
4 #include <asm/desc.h>
5 #include <linux/atomic.h>
6 #include <linux/mm_types.h>
7
8 #include <trace/events/tlb.h>
9
10 #include <asm/pgalloc.h>
11 #include <asm/tlbflush.h>
12 #include <asm/paravirt.h>
13 #include <asm/mpx.h>
14 #ifndef CONFIG_PARAVIRT
15 static inline void paravirt_activate_mm(struct mm_struct *prev,
16 struct mm_struct *next)
17 {
18 }
19 #endif /* !CONFIG_PARAVIRT */
20
21 /*
22 * Used for LDT copy/destruction.
23 */
24 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
25 void destroy_context(struct mm_struct *mm);
26
27
28 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
29 {
30 #ifdef CONFIG_SMP
31 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
32 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
33 #endif
34 }
35
36 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
37 struct task_struct *tsk)
38 {
39 unsigned cpu = smp_processor_id();
40
41 if (likely(prev != next)) {
42 #ifdef CONFIG_SMP
43 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
44 this_cpu_write(cpu_tlbstate.active_mm, next);
45 #endif
46 cpumask_set_cpu(cpu, mm_cpumask(next));
47
48 /* Re-load page tables */
49 load_cr3(next->pgd);
50 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
51
52 /* Stop flush ipis for the previous mm */
53 cpumask_clear_cpu(cpu, mm_cpumask(prev));
54
55 /*
56 * Load the LDT, if the LDT is different.
57 *
58 * It's possible leave_mm(prev) has been called. If so,
59 * then prev->context.ldt could be out of sync with the
60 * LDT descriptor or the LDT register. This can only happen
61 * if prev->context.ldt is non-null, since we never free
62 * an LDT. But LDTs can't be shared across mms, so
63 * prev->context.ldt won't be equal to next->context.ldt.
64 */
65 if (unlikely(prev->context.ldt != next->context.ldt))
66 load_LDT_nolock(&next->context);
67 }
68 #ifdef CONFIG_SMP
69 else {
70 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
71 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
72
73 if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
74 /*
75 * On established mms, the mm_cpumask is only changed
76 * from irq context, from ptep_clear_flush() while in
77 * lazy tlb mode, and here. Irqs are blocked during
78 * schedule, protecting us from simultaneous changes.
79 */
80 cpumask_set_cpu(cpu, mm_cpumask(next));
81 /*
82 * We were in lazy tlb mode and leave_mm disabled
83 * tlb flush IPI delivery. We must reload CR3
84 * to make sure to use no freed page tables.
85 */
86 load_cr3(next->pgd);
87 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
88 load_LDT_nolock(&next->context);
89 }
90 }
91 #endif
92 }
93
94 #define activate_mm(prev, next) \
95 do { \
96 paravirt_activate_mm((prev), (next)); \
97 switch_mm((prev), (next), NULL); \
98 } while (0);
99
100 #ifdef CONFIG_X86_32
101 #define deactivate_mm(tsk, mm) \
102 do { \
103 lazy_load_gs(0); \
104 } while (0)
105 #else
106 #define deactivate_mm(tsk, mm) \
107 do { \
108 load_gs_index(0); \
109 loadsegment(fs, 0); \
110 } while (0)
111 #endif
112
113 static inline void arch_dup_mmap(struct mm_struct *oldmm,
114 struct mm_struct *mm)
115 {
116 paravirt_arch_dup_mmap(oldmm, mm);
117 }
118
119 static inline void arch_exit_mmap(struct mm_struct *mm)
120 {
121 paravirt_arch_exit_mmap(mm);
122 }
123
124 static inline void arch_bprm_mm_init(struct mm_struct *mm,
125 struct vm_area_struct *vma)
126 {
127 mpx_mm_init(mm);
128 }
129
130 static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
131 unsigned long start, unsigned long end)
132 {
133 mpx_notify_unmap(mm, vma, start, end);
134 }
135
136 #endif /* _ASM_X86_MMU_CONTEXT_H */
This page took 0.036191 seconds and 5 git commands to generate.