Commit | Line | Data |
---|---|---|
f30c2269 | 1 | /* include/asm-generic/tlb.h |
1da177e4 LT |
2 | * |
3 | * Generic TLB shootdown code | |
4 | * | |
5 | * Copyright 2001 Red Hat, Inc. | |
6 | * Based on code from mm/memory.c Copyright Linus Torvalds and others. | |
7 | * | |
d16dfc55 PZ |
8 | * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
9 | * | |
1da177e4 LT |
10 | * This program is free software; you can redistribute it and/or |
11 | * modify it under the terms of the GNU General Public License | |
12 | * as published by the Free Software Foundation; either version | |
13 | * 2 of the License, or (at your option) any later version. | |
14 | */ | |
15 | #ifndef _ASM_GENERIC__TLB_H | |
16 | #define _ASM_GENERIC__TLB_H | |
17 | ||
1da177e4 | 18 | #include <linux/swap.h> |
62152d0e | 19 | #include <asm/pgalloc.h> |
1da177e4 LT |
20 | #include <asm/tlbflush.h> |
21 | ||
26723911 PZ |
22 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
23 | /* | |
24 | * Semi RCU freeing of the page directories. | |
25 | * | |
26 | * This is needed by some architectures to implement software pagetable walkers. | |
27 | * | |
28 | * gup_fast() and other software pagetable walkers do a lockless page-table | |
29 | * walk and therefore needs some synchronization with the freeing of the page | |
30 | * directories. The chosen means to accomplish that is by disabling IRQs over | |
31 | * the walk. | |
32 | * | |
33 | * Architectures that use IPIs to flush TLBs will then automagically DTRT, | |
34 | * since we unlink the page, flush TLBs, free the page. Since the disabling of | |
35 | * IRQs delays the completion of the TLB flush we can never observe an already | |
36 | * freed page. | |
37 | * | |
38 | * Architectures that do not have this (PPC) need to delay the freeing by some | |
39 | * other means, this is that means. | |
40 | * | |
41 | * What we do is batch the freed directory pages (tables) and RCU free them. | |
42 | * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling | |
43 | * holds off grace periods. | |
44 | * | |
45 | * However, in order to batch these pages we need to allocate storage, this | |
46 | * allocation is deep inside the MM code and can thus easily fail on memory | |
47 | * pressure. To guarantee progress we fall back to single table freeing, see | |
48 | * the implementation of tlb_remove_table_one(). | |
49 | * | |
50 | */ | |
51 | struct mmu_table_batch { | |
52 | struct rcu_head rcu; | |
53 | unsigned int nr; | |
54 | void *tables[0]; | |
55 | }; | |
56 | ||
57 | #define MAX_TABLE_BATCH \ | |
58 | ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) | |
59 | ||
60 | extern void tlb_table_flush(struct mmu_gather *tlb); | |
61 | extern void tlb_remove_table(struct mmu_gather *tlb, void *table); | |
62 | ||
63 | #endif | |
64 | ||
d16dfc55 PZ |
65 | /* |
66 | * If we can't allocate a page to make a big batch of page pointers | |
67 | * to work on, then just handle a few from the on-stack structure. | |
68 | */ | |
69 | #define MMU_GATHER_BUNDLE 8 | |
70 | ||
e303297e PZ |
71 | struct mmu_gather_batch { |
72 | struct mmu_gather_batch *next; | |
73 | unsigned int nr; | |
74 | unsigned int max; | |
75 | struct page *pages[0]; | |
76 | }; | |
77 | ||
78 | #define MAX_GATHER_BATCH \ | |
79 | ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) | |
80 | ||
53a59fc6 MH |
81 | /* |
82 | * Limit the maximum number of mmu_gather batches to reduce a risk of soft | |
83 | * lockups for non-preemptible kernels on huge machines when a lot of memory | |
84 | * is zapped during unmapping. | |
85 | * 10K pages freed at once should be safe even without a preemption point. | |
86 | */ | |
87 | #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) | |
88 | ||
1da177e4 | 89 | /* struct mmu_gather is an opaque type used by the mm code for passing around |
15a23ffa | 90 | * any data needed by arch specific code for tlb_remove_page. |
1da177e4 LT |
91 | */ |
92 | struct mmu_gather { | |
93 | struct mm_struct *mm; | |
26723911 PZ |
94 | #ifdef CONFIG_HAVE_RCU_TABLE_FREE |
95 | struct mmu_table_batch *batch; | |
96 | #endif | |
597e1c35 AS |
97 | unsigned long start; |
98 | unsigned long end; | |
1de14c3c DH |
99 | /* we are in the middle of an operation to clear |
100 | * a full mm and can make some optimizations */ | |
fb7332a9 | 101 | unsigned int fullmm : 1, |
1de14c3c DH |
102 | /* we have performed an operation which |
103 | * requires a complete flush of the tlb */ | |
104 | need_flush_all : 1; | |
e303297e PZ |
105 | |
106 | struct mmu_gather_batch *active; | |
107 | struct mmu_gather_batch local; | |
108 | struct page *__pages[MMU_GATHER_BUNDLE]; | |
53a59fc6 | 109 | unsigned int batch_count; |
1da177e4 LT |
110 | }; |
111 | ||
9547d01b | 112 | #define HAVE_GENERIC_MMU_GATHER |
e303297e | 113 | |
2b047252 | 114 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); |
9547d01b | 115 | void tlb_flush_mmu(struct mmu_gather *tlb); |
c4211f42 AS |
116 | void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, |
117 | unsigned long end); | |
9547d01b | 118 | int __tlb_remove_page(struct mmu_gather *tlb, struct page *page); |
d16dfc55 PZ |
119 | |
120 | /* tlb_remove_page | |
121 | * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when | |
122 | * required. | |
123 | */ | |
124 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |
125 | { | |
126 | if (!__tlb_remove_page(tlb, page)) | |
127 | tlb_flush_mmu(tlb); | |
1da177e4 LT |
128 | } |
129 | ||
fb7332a9 WD |
130 | static inline void __tlb_adjust_range(struct mmu_gather *tlb, |
131 | unsigned long address) | |
132 | { | |
133 | tlb->start = min(tlb->start, address); | |
134 | tlb->end = max(tlb->end, address + PAGE_SIZE); | |
135 | } | |
136 | ||
137 | static inline void __tlb_reset_range(struct mmu_gather *tlb) | |
138 | { | |
721c21c1 WD |
139 | if (tlb->fullmm) { |
140 | tlb->start = tlb->end = ~0; | |
141 | } else { | |
142 | tlb->start = TASK_SIZE; | |
143 | tlb->end = 0; | |
144 | } | |
fb7332a9 WD |
145 | } |
146 | ||
147 | /* | |
148 | * In the case of tlb vma handling, we can optimise these away in the | |
149 | * case where we're doing a full MM flush. When we're doing a munmap, | |
150 | * the vmas are adjusted to only cover the region to be torn down. | |
151 | */ | |
152 | #ifndef tlb_start_vma | |
153 | #define tlb_start_vma(tlb, vma) do { } while (0) | |
154 | #endif | |
155 | ||
156 | #define __tlb_end_vma(tlb, vma) \ | |
157 | do { \ | |
158 | if (!tlb->fullmm && tlb->end) { \ | |
159 | tlb_flush(tlb); \ | |
160 | __tlb_reset_range(tlb); \ | |
161 | } \ | |
162 | } while (0) | |
163 | ||
164 | #ifndef tlb_end_vma | |
165 | #define tlb_end_vma __tlb_end_vma | |
166 | #endif | |
167 | ||
168 | #ifndef __tlb_remove_tlb_entry | |
169 | #define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) | |
170 | #endif | |
171 | ||
1da177e4 LT |
172 | /** |
173 | * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. | |
174 | * | |
fb7332a9 WD |
175 | * Record the fact that pte's were really unmapped by updating the range, |
176 | * so we can later optimise away the tlb invalidate. This helps when | |
177 | * userspace is unmapping already-unmapped pages, which happens quite a lot. | |
1da177e4 LT |
178 | */ |
179 | #define tlb_remove_tlb_entry(tlb, ptep, address) \ | |
180 | do { \ | |
fb7332a9 | 181 | __tlb_adjust_range(tlb, address); \ |
1da177e4 LT |
182 | __tlb_remove_tlb_entry(tlb, ptep, address); \ |
183 | } while (0) | |
184 | ||
f21760b1 SL |
185 | /** |
186 | * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation | |
187 | * This is a nop so far, because only x86 needs it. | |
188 | */ | |
189 | #ifndef __tlb_remove_pmd_tlb_entry | |
190 | #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) | |
191 | #endif | |
192 | ||
193 | #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ | |
194 | do { \ | |
fb7332a9 | 195 | __tlb_adjust_range(tlb, address); \ |
f21760b1 SL |
196 | __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ |
197 | } while (0) | |
198 | ||
9e1b32ca | 199 | #define pte_free_tlb(tlb, ptep, address) \ |
1da177e4 | 200 | do { \ |
fb7332a9 | 201 | __tlb_adjust_range(tlb, address); \ |
9e1b32ca | 202 | __pte_free_tlb(tlb, ptep, address); \ |
1da177e4 LT |
203 | } while (0) |
204 | ||
205 | #ifndef __ARCH_HAS_4LEVEL_HACK | |
9e1b32ca | 206 | #define pud_free_tlb(tlb, pudp, address) \ |
1da177e4 | 207 | do { \ |
fb7332a9 | 208 | __tlb_adjust_range(tlb, address); \ |
9e1b32ca | 209 | __pud_free_tlb(tlb, pudp, address); \ |
1da177e4 LT |
210 | } while (0) |
211 | #endif | |
212 | ||
9e1b32ca | 213 | #define pmd_free_tlb(tlb, pmdp, address) \ |
1da177e4 | 214 | do { \ |
fb7332a9 | 215 | __tlb_adjust_range(tlb, address); \ |
9e1b32ca | 216 | __pmd_free_tlb(tlb, pmdp, address); \ |
1da177e4 LT |
217 | } while (0) |
218 | ||
219 | #define tlb_migrate_finish(mm) do {} while (0) | |
220 | ||
221 | #endif /* _ASM_GENERIC__TLB_H */ |