Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ALPHA_TLBFLUSH_H |
2 | #define _ALPHA_TLBFLUSH_H | |
3 | ||
1da177e4 LT |
4 | #include <linux/mm.h> |
5 | #include <asm/compiler.h> | |
6 | ||
7 | #ifndef __EXTERN_INLINE | |
8 | #define __EXTERN_INLINE extern inline | |
9 | #define __MMU_EXTERN_INLINE | |
10 | #endif | |
11 | ||
12 | extern void __load_new_mm_context(struct mm_struct *); | |
13 | ||
14 | ||
15 | /* Use a few helper functions to hide the ugly broken ASN | |
16 | numbers on early Alphas (ev4 and ev45). */ | |
17 | ||
18 | __EXTERN_INLINE void | |
19 | ev4_flush_tlb_current(struct mm_struct *mm) | |
20 | { | |
21 | __load_new_mm_context(mm); | |
22 | tbiap(); | |
23 | } | |
24 | ||
25 | __EXTERN_INLINE void | |
26 | ev5_flush_tlb_current(struct mm_struct *mm) | |
27 | { | |
28 | __load_new_mm_context(mm); | |
29 | } | |
30 | ||
31 | /* Flush just one page in the current TLB set. We need to be very | |
32 | careful about the icache here, there is no way to invalidate a | |
33 | specific icache page. */ | |
34 | ||
35 | __EXTERN_INLINE void | |
36 | ev4_flush_tlb_current_page(struct mm_struct * mm, | |
37 | struct vm_area_struct *vma, | |
38 | unsigned long addr) | |
39 | { | |
40 | int tbi_flag = 2; | |
41 | if (vma->vm_flags & VM_EXEC) { | |
42 | __load_new_mm_context(mm); | |
43 | tbi_flag = 3; | |
44 | } | |
45 | tbi(tbi_flag, addr); | |
46 | } | |
47 | ||
48 | __EXTERN_INLINE void | |
49 | ev5_flush_tlb_current_page(struct mm_struct * mm, | |
50 | struct vm_area_struct *vma, | |
51 | unsigned long addr) | |
52 | { | |
53 | if (vma->vm_flags & VM_EXEC) | |
54 | __load_new_mm_context(mm); | |
55 | else | |
56 | tbi(2, addr); | |
57 | } | |
58 | ||
59 | ||
60 | #ifdef CONFIG_ALPHA_GENERIC | |
61 | # define flush_tlb_current alpha_mv.mv_flush_tlb_current | |
62 | # define flush_tlb_current_page alpha_mv.mv_flush_tlb_current_page | |
63 | #else | |
64 | # ifdef CONFIG_ALPHA_EV4 | |
65 | # define flush_tlb_current ev4_flush_tlb_current | |
66 | # define flush_tlb_current_page ev4_flush_tlb_current_page | |
67 | # else | |
68 | # define flush_tlb_current ev5_flush_tlb_current | |
69 | # define flush_tlb_current_page ev5_flush_tlb_current_page | |
70 | # endif | |
71 | #endif | |
72 | ||
73 | #ifdef __MMU_EXTERN_INLINE | |
74 | #undef __EXTERN_INLINE | |
75 | #undef __MMU_EXTERN_INLINE | |
76 | #endif | |
77 | ||
78 | /* Flush current user mapping. */ | |
79 | static inline void | |
80 | flush_tlb(void) | |
81 | { | |
82 | flush_tlb_current(current->active_mm); | |
83 | } | |
84 | ||
85 | /* Flush someone else's user mapping. */ | |
86 | static inline void | |
87 | flush_tlb_other(struct mm_struct *mm) | |
88 | { | |
89 | unsigned long *mmc = &mm->context[smp_processor_id()]; | |
90 | /* Check it's not zero first to avoid cacheline ping pong | |
91 | when possible. */ | |
92 | if (*mmc) *mmc = 0; | |
93 | } | |
94 | ||
1da177e4 LT |
95 | #ifndef CONFIG_SMP |
96 | /* Flush everything (kernel mapping may also have changed | |
97 | due to vmalloc/vfree). */ | |
98 | static inline void flush_tlb_all(void) | |
99 | { | |
100 | tbia(); | |
101 | } | |
102 | ||
103 | /* Flush a specified user mapping. */ | |
104 | static inline void | |
105 | flush_tlb_mm(struct mm_struct *mm) | |
106 | { | |
107 | if (mm == current->active_mm) | |
108 | flush_tlb_current(mm); | |
109 | else | |
110 | flush_tlb_other(mm); | |
111 | } | |
112 | ||
113 | /* Page-granular tlb flush. */ | |
114 | static inline void | |
115 | flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) | |
116 | { | |
117 | struct mm_struct *mm = vma->vm_mm; | |
118 | ||
119 | if (mm == current->active_mm) | |
120 | flush_tlb_current_page(mm, vma, addr); | |
121 | else | |
122 | flush_tlb_other(mm); | |
123 | } | |
124 | ||
125 | /* Flush a specified range of user mapping. On the Alpha we flush | |
126 | the whole user tlb. */ | |
127 | static inline void | |
128 | flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |
129 | unsigned long end) | |
130 | { | |
131 | flush_tlb_mm(vma->vm_mm); | |
132 | } | |
133 | ||
134 | #else /* CONFIG_SMP */ | |
135 | ||
136 | extern void flush_tlb_all(void); | |
137 | extern void flush_tlb_mm(struct mm_struct *); | |
138 | extern void flush_tlb_page(struct vm_area_struct *, unsigned long); | |
139 | extern void flush_tlb_range(struct vm_area_struct *, unsigned long, | |
140 | unsigned long); | |
141 | ||
142 | #endif /* CONFIG_SMP */ | |
143 | ||
144 | #define flush_tlb_kernel_range(start, end) flush_tlb_all() | |
145 | ||
146 | #endif /* _ALPHA_TLBFLUSH_H */ |