Commit | Line | Data |
---|---|---|
1970282f SR |
1 | #ifndef _ASM_POWERPC_TLBFLUSH_H |
2 | #define _ASM_POWERPC_TLBFLUSH_H | |
3 | /* | |
4 | * TLB flushing: | |
5 | * | |
6 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | |
7 | * - flush_tlb_page(vma, vmaddr) flushes one page | |
8 | * - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB | |
9 | * - flush_tlb_range(vma, start, end) flushes a range of pages | |
10 | * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages | |
11 | * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables | |
12 | * | |
13 | * This program is free software; you can redistribute it and/or | |
14 | * modify it under the terms of the GNU General Public License | |
15 | * as published by the Free Software Foundation; either version | |
16 | * 2 of the License, or (at your option) any later version. | |
17 | */ | |
18 | #ifdef __KERNEL__ | |
19 | ||
1970282f | 20 | struct mm_struct; |
62102307 DG |
21 | struct vm_area_struct; |
22 | ||
23 | #if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE) | |
24 | /* | |
25 | * TLB flushing for software loaded TLB chips | |
26 | * | |
27 | * TODO: (CONFIG_FSL_BOOKE) determine if flush_tlb_range & | |
28 | * flush_tlb_kernel_range are best implemented as tlbia vs | |
29 | * specific tlbie's | |
30 | */ | |
31 | ||
32 | extern void _tlbie(unsigned long address); | |
33 | ||
34 | #if defined(CONFIG_40x) || defined(CONFIG_8xx) | |
35 | #define _tlbia() asm volatile ("tlbia; sync" : : : "memory") | |
36 | #else /* CONFIG_44x || CONFIG_FSL_BOOKE */ | |
37 | extern void _tlbia(void); | |
38 | #endif | |
1970282f | 39 | |
62102307 DG |
40 | static inline void flush_tlb_mm(struct mm_struct *mm) |
41 | { | |
42 | _tlbia(); | |
43 | } | |
44 | ||
45 | static inline void flush_tlb_page(struct vm_area_struct *vma, | |
46 | unsigned long vmaddr) | |
47 | { | |
48 | _tlbie(vmaddr); | |
49 | } | |
50 | ||
51 | static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, | |
52 | unsigned long vmaddr) | |
53 | { | |
54 | _tlbie(vmaddr); | |
55 | } | |
56 | ||
57 | static inline void flush_tlb_range(struct vm_area_struct *vma, | |
58 | unsigned long start, unsigned long end) | |
59 | { | |
60 | _tlbia(); | |
61 | } | |
62 | ||
63 | static inline void flush_tlb_kernel_range(unsigned long start, | |
64 | unsigned long end) | |
65 | { | |
66 | _tlbia(); | |
67 | } | |
68 | ||
69 | #elif defined(CONFIG_PPC32) | |
70 | /* | |
71 | * TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx | |
72 | */ | |
73 | extern void _tlbie(unsigned long address); | |
74 | extern void _tlbia(void); | |
75 | ||
76 | extern void flush_tlb_mm(struct mm_struct *mm); | |
77 | extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); | |
78 | extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr); | |
79 | extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |
80 | unsigned long end); | |
81 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | |
82 | ||
83 | #else | |
84 | /* | |
85 | * TLB flushing for 64-bit has-MMU CPUs | |
86 | */ | |
1970282f SR |
87 | |
88 | #include <linux/percpu.h> | |
89 | #include <asm/page.h> | |
90 | ||
91 | #define PPC64_TLB_BATCH_NR 192 | |
92 | ||
93 | struct ppc64_tlb_batch { | |
a741e679 BH |
94 | int active; |
95 | unsigned long index; | |
96 | struct mm_struct *mm; | |
97 | real_pte_t pte[PPC64_TLB_BATCH_NR]; | |
98 | unsigned long vaddr[PPC64_TLB_BATCH_NR]; | |
99 | unsigned int psize; | |
1189be65 | 100 | int ssize; |
1970282f SR |
101 | }; |
102 | DECLARE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | |
103 | ||
104 | extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); | |
105 | ||
a741e679 BH |
106 | extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr, |
107 | pte_t *ptep, unsigned long pte, int huge); | |
108 | ||
109 | #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE | |
110 | ||
111 | static inline void arch_enter_lazy_mmu_mode(void) | |
112 | { | |
113 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | |
114 | ||
115 | batch->active = 1; | |
116 | } | |
117 | ||
118 | static inline void arch_leave_lazy_mmu_mode(void) | |
1970282f | 119 | { |
a741e679 | 120 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
1970282f SR |
121 | |
122 | if (batch->index) | |
123 | __flush_tlb_pending(batch); | |
a741e679 | 124 | batch->active = 0; |
1970282f SR |
125 | } |
126 | ||
a741e679 BH |
127 | #define arch_flush_lazy_mmu_mode() do {} while (0) |
128 | ||
129 | ||
3c726f8d | 130 | extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize, |
1189be65 | 131 | int ssize, int local); |
3c726f8d | 132 | extern void flush_hash_range(unsigned long number, int local); |
1970282f | 133 | |
1970282f SR |
134 | |
135 | static inline void flush_tlb_mm(struct mm_struct *mm) | |
136 | { | |
1970282f SR |
137 | } |
138 | ||
139 | static inline void flush_tlb_page(struct vm_area_struct *vma, | |
62102307 | 140 | unsigned long vmaddr) |
1970282f | 141 | { |
1970282f SR |
142 | } |
143 | ||
144 | static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, | |
145 | unsigned long vmaddr) | |
146 | { | |
1970282f SR |
147 | } |
148 | ||
149 | static inline void flush_tlb_range(struct vm_area_struct *vma, | |
62102307 | 150 | unsigned long start, unsigned long end) |
1970282f | 151 | { |
1970282f SR |
152 | } |
153 | ||
154 | static inline void flush_tlb_kernel_range(unsigned long start, | |
62102307 | 155 | unsigned long end) |
1970282f | 156 | { |
1970282f SR |
157 | } |
158 | ||
3d5134ee BH |
159 | /* Private function for use by PCI IO mapping code */ |
160 | extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, | |
161 | unsigned long end); | |
162 | ||
163 | ||
1970282f SR |
164 | #endif |
165 | ||
62102307 DG |
166 | /* |
167 | * This gets called at the end of handling a page fault, when | |
168 | * the kernel has put a new PTE into the page table for the process. | |
169 | * We use it to ensure coherency between the i-cache and d-cache | |
170 | * for the page which has just been mapped in. | |
171 | * On machines which use an MMU hash table, we use this to put a | |
172 | * corresponding HPTE into the hash table ahead of time, instead of | |
173 | * waiting for the inevitable extra hash-table miss exception. | |
174 | */ | |
175 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | |
176 | ||
1970282f SR |
177 | /* |
178 | * This is called in munmap when we have freed up some page-table | |
179 | * pages. We don't need to do anything here, there's nothing special | |
180 | * about our page-table pages. -- paulus | |
181 | */ | |
182 | static inline void flush_tlb_pgtables(struct mm_struct *mm, | |
62102307 | 183 | unsigned long start, unsigned long end) |
1970282f SR |
184 | { |
185 | } | |
186 | ||
187 | #endif /*__KERNEL__ */ | |
188 | #endif /* _ASM_POWERPC_TLBFLUSH_H */ |