1 #ifndef _ASM_X86_MMU_CONTEXT_H
2 #define _ASM_X86_MMU_CONTEXT_H
5 #include <linux/atomic.h>
6 #include <linux/mm_types.h>
8 #include <trace/events/tlb.h>
10 #include <asm/pgalloc.h>
11 #include <asm/tlbflush.h>
12 #include <asm/paravirt.h>
13 #ifndef CONFIG_PARAVIRT
14 #include <asm-generic/mm_hooks.h>
16 static inline void paravirt_activate_mm(struct mm_struct
*prev
,
17 struct mm_struct
*next
)
20 #endif /* !CONFIG_PARAVIRT */
23 * Used for LDT copy/destruction.
25 int init_new_context(struct task_struct
*tsk
, struct mm_struct
*mm
);
26 void destroy_context(struct mm_struct
*mm
);
29 static inline void enter_lazy_tlb(struct mm_struct
*mm
, struct task_struct
*tsk
)
32 if (this_cpu_read(cpu_tlbstate
.state
) == TLBSTATE_OK
)
33 this_cpu_write(cpu_tlbstate
.state
, TLBSTATE_LAZY
);
37 static inline void switch_mm(struct mm_struct
*prev
, struct mm_struct
*next
,
38 struct task_struct
*tsk
)
40 unsigned cpu
= smp_processor_id();
42 if (likely(prev
!= next
)) {
44 this_cpu_write(cpu_tlbstate
.state
, TLBSTATE_OK
);
45 this_cpu_write(cpu_tlbstate
.active_mm
, next
);
47 cpumask_set_cpu(cpu
, mm_cpumask(next
));
49 /* Re-load page tables */
51 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH
, TLB_FLUSH_ALL
);
53 /* Stop flush ipis for the previous mm */
54 cpumask_clear_cpu(cpu
, mm_cpumask(prev
));
56 /* Load the LDT, if the LDT is different: */
57 if (unlikely(prev
->context
.ldt
!= next
->context
.ldt
))
58 load_LDT_nolock(&next
->context
);
62 this_cpu_write(cpu_tlbstate
.state
, TLBSTATE_OK
);
63 BUG_ON(this_cpu_read(cpu_tlbstate
.active_mm
) != next
);
65 if (!cpumask_test_cpu(cpu
, mm_cpumask(next
))) {
67 * On established mms, the mm_cpumask is only changed
68 * from irq context, from ptep_clear_flush() while in
69 * lazy tlb mode, and here. Irqs are blocked during
70 * schedule, protecting us from simultaneous changes.
72 cpumask_set_cpu(cpu
, mm_cpumask(next
));
74 * We were in lazy tlb mode and leave_mm disabled
75 * tlb flush IPI delivery. We must reload CR3
76 * to make sure to use no freed page tables.
79 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH
, TLB_FLUSH_ALL
);
80 load_LDT_nolock(&next
->context
);
86 #define activate_mm(prev, next) \
88 paravirt_activate_mm((prev), (next)); \
89 switch_mm((prev), (next), NULL); \
93 #define deactivate_mm(tsk, mm) \
98 #define deactivate_mm(tsk, mm) \
101 loadsegment(fs, 0); \
105 #endif /* _ASM_X86_MMU_CONTEXT_H */
This page took 0.111837 seconds and 5 git commands to generate.