Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_MMU_CONTEXT_32_H |
2 | #define _ASM_X86_MMU_CONTEXT_32_H | |
1da177e4 | 3 | |
1da177e4 LT |
4 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
5 | { | |
6 | #ifdef CONFIG_SMP | |
cfc31983 JB |
7 | if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) |
8 | x86_write_percpu(cpu_tlbstate.state, TLBSTATE_LAZY); | |
1da177e4 LT |
9 | #endif |
10 | } | |
11 | ||
12 | static inline void switch_mm(struct mm_struct *prev, | |
13 | struct mm_struct *next, | |
14 | struct task_struct *tsk) | |
15 | { | |
16 | int cpu = smp_processor_id(); | |
17 | ||
18 | if (likely(prev != next)) { | |
19 | /* stop flush ipis for the previous mm */ | |
20 | cpu_clear(cpu, prev->cpu_vm_mask); | |
21 | #ifdef CONFIG_SMP | |
cfc31983 JB |
22 | x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK); |
23 | x86_write_percpu(cpu_tlbstate.active_mm, next); | |
1da177e4 LT |
24 | #endif |
25 | cpu_set(cpu, next->cpu_vm_mask); | |
26 | ||
27 | /* Re-load page tables */ | |
28 | load_cr3(next->pgd); | |
29 | ||
30 | /* | |
31 | * load the LDT, if the LDT is different: | |
32 | */ | |
33 | if (unlikely(prev->context.ldt != next->context.ldt)) | |
e5e3a042 | 34 | load_LDT_nolock(&next->context); |
1da177e4 LT |
35 | } |
36 | #ifdef CONFIG_SMP | |
37 | else { | |
cfc31983 JB |
38 | x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK); |
39 | BUG_ON(x86_read_percpu(cpu_tlbstate.active_mm) != next); | |
1da177e4 LT |
40 | |
41 | if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) { | |
55464da9 | 42 | /* We were in lazy tlb mode and leave_mm disabled |
1da177e4 LT |
43 | * tlb flush IPI delivery. We must reload %cr3. |
44 | */ | |
45 | load_cr3(next->pgd); | |
e5e3a042 | 46 | load_LDT_nolock(&next->context); |
1da177e4 LT |
47 | } |
48 | } | |
49 | #endif | |
50 | } | |
51 | ||
f95d47ca | 52 | #define deactivate_mm(tsk, mm) \ |
464d1a78 | 53 | asm("movl %0,%%gs": :"r" (0)); |
1da177e4 | 54 | |
1965aae3 | 55 | #endif /* _ASM_X86_MMU_CONTEXT_32_H */ |