Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_IA64_TLB_H |
2 | #define _ASM_IA64_TLB_H | |
3 | /* | |
4 | * Based on <asm-generic/tlb.h>. | |
5 | * | |
6 | * Copyright (C) 2002-2003 Hewlett-Packard Co | |
7 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
8 | */ | |
9 | /* | |
10 | * Removing a translation from a page table (including TLB-shootdown) is a four-step | |
11 | * procedure: | |
12 | * | |
13 | * (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory | |
14 | * (this is a no-op on ia64). | |
15 | * (2) Clear the relevant portions of the page-table | |
16 | * (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs | |
17 | * (4) Release the pages that were freed up in step (2). | |
18 | * | |
19 | * Note that the ordering of these steps is crucial to avoid races on MP machines. | |
20 | * | |
21 | * The Linux kernel defines several platform-specific hooks for TLB-shootdown. When | |
22 | * unmapping a portion of the virtual address space, these hooks are called according to | |
23 | * the following template: | |
24 | * | |
25 | * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM | |
26 | * { | |
27 | * for each vma that needs a shootdown do { | |
28 | * tlb_start_vma(tlb, vma); | |
29 | * for each page-table-entry PTE that needs to be removed do { | |
30 | * tlb_remove_tlb_entry(tlb, pte, address); | |
31 | * if (pte refers to a normal page) { | |
32 | * tlb_remove_page(tlb, page); | |
33 | * } | |
34 | * } | |
35 | * tlb_end_vma(tlb, vma); | |
36 | * } | |
37 | * } | |
38 | * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM | |
39 | */ | |
1da177e4 LT |
40 | #include <linux/mm.h> |
41 | #include <linux/pagemap.h> | |
42 | #include <linux/swap.h> | |
43 | ||
44 | #include <asm/pgalloc.h> | |
45 | #include <asm/processor.h> | |
46 | #include <asm/tlbflush.h> | |
47 | #include <asm/machvec.h> | |
48 | ||
49 | #ifdef CONFIG_SMP | |
50 | # define FREE_PTE_NR 2048 | |
51 | # define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) | |
52 | #else | |
53 | # define FREE_PTE_NR 0 | |
54 | # define tlb_fast_mode(tlb) (1) | |
55 | #endif | |
56 | ||
57 | struct mmu_gather { | |
58 | struct mm_struct *mm; | |
59 | unsigned int nr; /* == ~0U => fast mode */ | |
60 | unsigned char fullmm; /* non-zero means full mm flush */ | |
61 | unsigned char need_flush; /* really unmapped some PTEs? */ | |
1da177e4 LT |
62 | unsigned long start_addr; |
63 | unsigned long end_addr; | |
64 | struct page *pages[FREE_PTE_NR]; | |
65 | }; | |
66 | ||
67 | /* Users of the generic TLB shootdown code must declare this storage space. */ | |
68 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | |
69 | ||
70 | /* | |
71 | * Flush the TLB for address range START to END and, if not in fast mode, release the | |
72 | * freed pages that where gathered up to this point. | |
73 | */ | |
74 | static inline void | |
75 | ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) | |
76 | { | |
77 | unsigned int nr; | |
78 | ||
79 | if (!tlb->need_flush) | |
80 | return; | |
81 | tlb->need_flush = 0; | |
82 | ||
83 | if (tlb->fullmm) { | |
84 | /* | |
85 | * Tearing down the entire address space. This happens both as a result | |
86 | * of exit() and execve(). The latter case necessitates the call to | |
87 | * flush_tlb_mm() here. | |
88 | */ | |
89 | flush_tlb_mm(tlb->mm); | |
90 | } else if (unlikely (end - start >= 1024*1024*1024*1024UL | |
91 | || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) | |
92 | { | |
93 | /* | |
94 | * If we flush more than a tera-byte or across regions, we're probably | |
95 | * better off just flushing the entire TLB(s). This should be very rare | |
96 | * and is not worth optimizing for. | |
97 | */ | |
98 | flush_tlb_all(); | |
99 | } else { | |
100 | /* | |
101 | * XXX fix me: flush_tlb_range() should take an mm pointer instead of a | |
102 | * vma pointer. | |
103 | */ | |
104 | struct vm_area_struct vma; | |
105 | ||
106 | vma.vm_mm = tlb->mm; | |
107 | /* flush the address range from the tlb: */ | |
108 | flush_tlb_range(&vma, start, end); | |
109 | /* now flush the virt. page-table area mapping the address range: */ | |
110 | flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end)); | |
111 | } | |
112 | ||
113 | /* lastly, release the freed pages */ | |
114 | nr = tlb->nr; | |
115 | if (!tlb_fast_mode(tlb)) { | |
116 | unsigned long i; | |
117 | tlb->nr = 0; | |
118 | tlb->start_addr = ~0UL; | |
119 | for (i = 0; i < nr; ++i) | |
120 | free_page_and_swap_cache(tlb->pages[i]); | |
121 | } | |
122 | } | |
123 | ||
124 | /* | |
125 | * Return a pointer to an initialized struct mmu_gather. | |
126 | */ | |
127 | static inline struct mmu_gather * | |
128 | tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush) | |
129 | { | |
15a23ffa | 130 | struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); |
1da177e4 LT |
131 | |
132 | tlb->mm = mm; | |
133 | /* | |
134 | * Use fast mode if only 1 CPU is online. | |
135 | * | |
136 | * It would be tempting to turn on fast-mode for full_mm_flush as well. But this | |
137 | * doesn't work because of speculative accesses and software prefetching: the page | |
138 | * table of "mm" may (and usually is) the currently active page table and even | |
139 | * though the kernel won't do any user-space accesses during the TLB shoot down, a | |
140 | * compiler might use speculation or lfetch.fault on what happens to be a valid | |
141 | * user-space address. This in turn could trigger a TLB miss fault (or a VHPT | |
142 | * walk) and re-insert a TLB entry we just removed. Slow mode avoids such | |
143 | * problems. (We could make fast-mode work by switching the current task to a | |
144 | * different "mm" during the shootdown.) --davidm 08/02/2002 | |
145 | */ | |
146 | tlb->nr = (num_online_cpus() == 1) ? ~0U : 0; | |
147 | tlb->fullmm = full_mm_flush; | |
1da177e4 LT |
148 | tlb->start_addr = ~0UL; |
149 | return tlb; | |
150 | } | |
151 | ||
152 | /* | |
153 | * Called at the end of the shootdown operation to free up any resources that were | |
15a23ffa | 154 | * collected. |
1da177e4 LT |
155 | */ |
156 | static inline void | |
157 | tlb_finish_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) | |
158 | { | |
1da177e4 LT |
159 | /* |
160 | * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and | |
161 | * tlb->end_addr. | |
162 | */ | |
163 | ia64_tlb_flush_mmu(tlb, start, end); | |
164 | ||
165 | /* keep the page table cache within bounds */ | |
166 | check_pgt_cache(); | |
15a23ffa HD |
167 | |
168 | put_cpu_var(mmu_gathers); | |
1da177e4 LT |
169 | } |
170 | ||
1da177e4 LT |
171 | /* |
172 | * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page | |
173 | * must be delayed until after the TLB has been flushed (see comments at the beginning of | |
174 | * this file). | |
175 | */ | |
176 | static inline void | |
177 | tlb_remove_page (struct mmu_gather *tlb, struct page *page) | |
178 | { | |
179 | tlb->need_flush = 1; | |
180 | ||
181 | if (tlb_fast_mode(tlb)) { | |
182 | free_page_and_swap_cache(page); | |
183 | return; | |
184 | } | |
185 | tlb->pages[tlb->nr++] = page; | |
186 | if (tlb->nr >= FREE_PTE_NR) | |
187 | ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); | |
188 | } | |
189 | ||
190 | /* | |
191 | * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any | |
192 | * PTE, not just those pointing to (normal) physical memory. | |
193 | */ | |
194 | static inline void | |
195 | __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address) | |
196 | { | |
197 | if (tlb->start_addr == ~0UL) | |
198 | tlb->start_addr = address; | |
199 | tlb->end_addr = address + PAGE_SIZE; | |
200 | } | |
201 | ||
202 | #define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm) | |
203 | ||
204 | #define tlb_start_vma(tlb, vma) do { } while (0) | |
205 | #define tlb_end_vma(tlb, vma) do { } while (0) | |
206 | ||
207 | #define tlb_remove_tlb_entry(tlb, ptep, addr) \ | |
208 | do { \ | |
209 | tlb->need_flush = 1; \ | |
210 | __tlb_remove_tlb_entry(tlb, ptep, addr); \ | |
211 | } while (0) | |
212 | ||
213 | #define pte_free_tlb(tlb, ptep) \ | |
214 | do { \ | |
215 | tlb->need_flush = 1; \ | |
216 | __pte_free_tlb(tlb, ptep); \ | |
217 | } while (0) | |
218 | ||
219 | #define pmd_free_tlb(tlb, ptep) \ | |
220 | do { \ | |
221 | tlb->need_flush = 1; \ | |
222 | __pmd_free_tlb(tlb, ptep); \ | |
223 | } while (0) | |
224 | ||
225 | #define pud_free_tlb(tlb, pudp) \ | |
226 | do { \ | |
227 | tlb->need_flush = 1; \ | |
228 | __pud_free_tlb(tlb, pudp); \ | |
229 | } while (0) | |
230 | ||
231 | #endif /* _ASM_IA64_TLB_H */ |