Commit | Line | Data |
---|---|---|
74bf4312 DM |
1 | /* arch/sparc64/mm/tsb.c |
2 | * | |
3 | * Copyright (C) 2006 David S. Miller <davem@davemloft.net> | |
4 | */ | |
5 | ||
6 | #include <linux/kernel.h> | |
7 | #include <asm/system.h> | |
8 | #include <asm/page.h> | |
9 | #include <asm/tlbflush.h> | |
10 | #include <asm/tlb.h> | |
09f94287 | 11 | #include <asm/mmu_context.h> |
98c5584c | 12 | #include <asm/pgtable.h> |
bd40791e | 13 | #include <asm/tsb.h> |
9b4006dc | 14 | #include <asm/oplib.h> |
74bf4312 | 15 | |
74bf4312 DM |
16 | extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; |
17 | ||
dcc1e8dd | 18 | static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries) |
74bf4312 | 19 | { |
dcc1e8dd | 20 | vaddr >>= hash_shift; |
98c5584c | 21 | return vaddr & (nentries - 1); |
74bf4312 DM |
22 | } |
23 | ||
8b234274 | 24 | static inline int tag_compare(unsigned long tag, unsigned long vaddr) |
74bf4312 | 25 | { |
8b234274 | 26 | return (tag == (vaddr >> 22)); |
74bf4312 DM |
27 | } |
28 | ||
29 | /* TSB flushes need only occur on the processor initiating the address | |
30 | * space modification, not on each cpu the address space has run on. | |
31 | * Only the TLB flush needs that treatment. | |
32 | */ | |
33 | ||
34 | void flush_tsb_kernel_range(unsigned long start, unsigned long end) | |
35 | { | |
36 | unsigned long v; | |
37 | ||
38 | for (v = start; v < end; v += PAGE_SIZE) { | |
dcc1e8dd DM |
39 | unsigned long hash = tsb_hash(v, PAGE_SHIFT, |
40 | KERNEL_TSB_NENTRIES); | |
98c5584c | 41 | struct tsb *ent = &swapper_tsb[hash]; |
74bf4312 | 42 | |
8b234274 DM |
43 | if (tag_compare(ent->tag, v)) { |
44 | ent->tag = (1UL << TSB_TAG_INVALID_BIT); | |
74bf4312 DM |
45 | membar_storeload_storestore(); |
46 | } | |
47 | } | |
48 | } | |
49 | ||
dcc1e8dd | 50 | static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries) |
74bf4312 | 51 | { |
dcc1e8dd | 52 | unsigned long i; |
7a1ac526 | 53 | |
74bf4312 DM |
54 | for (i = 0; i < mp->tlb_nr; i++) { |
55 | unsigned long v = mp->vaddrs[i]; | |
517af332 | 56 | unsigned long tag, ent, hash; |
74bf4312 DM |
57 | |
58 | v &= ~0x1UL; | |
59 | ||
dcc1e8dd DM |
60 | hash = tsb_hash(v, hash_shift, nentries); |
61 | ent = tsb + (hash * sizeof(struct tsb)); | |
8b234274 | 62 | tag = (v >> 22UL); |
517af332 DM |
63 | |
64 | tsb_flush(ent, tag); | |
74bf4312 | 65 | } |
dcc1e8dd DM |
66 | } |
67 | ||
68 | void flush_tsb_user(struct mmu_gather *mp) | |
69 | { | |
70 | struct mm_struct *mm = mp->mm; | |
71 | unsigned long nentries, base, flags; | |
72 | ||
73 | spin_lock_irqsave(&mm->context.lock, flags); | |
7a1ac526 | 74 | |
dcc1e8dd DM |
75 | base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; |
76 | nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; | |
77 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | |
78 | base = __pa(base); | |
79 | __flush_tsb_one(mp, PAGE_SHIFT, base, nentries); | |
80 | ||
81 | #ifdef CONFIG_HUGETLB_PAGE | |
82 | if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { | |
83 | base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; | |
84 | nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; | |
85 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) | |
86 | base = __pa(base); | |
87 | __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries); | |
88 | } | |
89 | #endif | |
7a1ac526 | 90 | spin_unlock_irqrestore(&mm->context.lock, flags); |
74bf4312 | 91 | } |
09f94287 | 92 | |
dcc1e8dd DM |
93 | #if defined(CONFIG_SPARC64_PAGE_SIZE_8KB) |
94 | #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K | |
95 | #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K | |
96 | #elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB) | |
97 | #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K | |
98 | #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K | |
99 | #elif defined(CONFIG_SPARC64_PAGE_SIZE_512KB) | |
100 | #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_512K | |
101 | #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_512K | |
102 | #elif defined(CONFIG_SPARC64_PAGE_SIZE_4MB) | |
103 | #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_4MB | |
104 | #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_4MB | |
105 | #else | |
106 | #error Broken base page size setting... | |
107 | #endif | |
108 | ||
109 | #ifdef CONFIG_HUGETLB_PAGE | |
110 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | |
111 | #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_64K | |
112 | #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_64K | |
113 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) | |
114 | #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_512K | |
115 | #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_512K | |
116 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) | |
117 | #define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB | |
118 | #define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB | |
119 | #else | |
120 | #error Broken huge page size setting... | |
121 | #endif | |
122 | #endif | |
123 | ||
124 | static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes) | |
98c5584c DM |
125 | { |
126 | unsigned long tsb_reg, base, tsb_paddr; | |
127 | unsigned long page_sz, tte; | |
128 | ||
dcc1e8dd DM |
129 | mm->context.tsb_block[tsb_idx].tsb_nentries = |
130 | tsb_bytes / sizeof(struct tsb); | |
98c5584c DM |
131 | |
132 | base = TSBMAP_BASE; | |
c4bce90e | 133 | tte = pgprot_val(PAGE_KERNEL_LOCKED); |
dcc1e8dd | 134 | tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); |
517af332 | 135 | BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); |
98c5584c DM |
136 | |
137 | /* Use the smallest page size that can map the whole TSB | |
138 | * in one TLB entry. | |
139 | */ | |
140 | switch (tsb_bytes) { | |
141 | case 8192 << 0: | |
142 | tsb_reg = 0x0UL; | |
143 | #ifdef DCACHE_ALIASING_POSSIBLE | |
144 | base += (tsb_paddr & 8192); | |
145 | #endif | |
98c5584c DM |
146 | page_sz = 8192; |
147 | break; | |
148 | ||
149 | case 8192 << 1: | |
150 | tsb_reg = 0x1UL; | |
98c5584c DM |
151 | page_sz = 64 * 1024; |
152 | break; | |
153 | ||
154 | case 8192 << 2: | |
155 | tsb_reg = 0x2UL; | |
98c5584c DM |
156 | page_sz = 64 * 1024; |
157 | break; | |
158 | ||
159 | case 8192 << 3: | |
160 | tsb_reg = 0x3UL; | |
98c5584c DM |
161 | page_sz = 64 * 1024; |
162 | break; | |
163 | ||
164 | case 8192 << 4: | |
165 | tsb_reg = 0x4UL; | |
98c5584c DM |
166 | page_sz = 512 * 1024; |
167 | break; | |
168 | ||
169 | case 8192 << 5: | |
170 | tsb_reg = 0x5UL; | |
98c5584c DM |
171 | page_sz = 512 * 1024; |
172 | break; | |
173 | ||
174 | case 8192 << 6: | |
175 | tsb_reg = 0x6UL; | |
98c5584c DM |
176 | page_sz = 512 * 1024; |
177 | break; | |
178 | ||
179 | case 8192 << 7: | |
180 | tsb_reg = 0x7UL; | |
98c5584c DM |
181 | page_sz = 4 * 1024 * 1024; |
182 | break; | |
bd40791e DM |
183 | |
184 | default: | |
185 | BUG(); | |
98c5584c | 186 | }; |
c4bce90e | 187 | tte |= pte_sz_bits(page_sz); |
98c5584c | 188 | |
618e9ed9 | 189 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) { |
517af332 DM |
190 | /* Physical mapping, no locked TLB entry for TSB. */ |
191 | tsb_reg |= tsb_paddr; | |
192 | ||
dcc1e8dd DM |
193 | mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; |
194 | mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0; | |
195 | mm->context.tsb_block[tsb_idx].tsb_map_pte = 0; | |
517af332 DM |
196 | } else { |
197 | tsb_reg |= base; | |
198 | tsb_reg |= (tsb_paddr & (page_sz - 1UL)); | |
199 | tte |= (tsb_paddr & ~(page_sz - 1UL)); | |
200 | ||
dcc1e8dd DM |
201 | mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; |
202 | mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base; | |
203 | mm->context.tsb_block[tsb_idx].tsb_map_pte = tte; | |
517af332 | 204 | } |
98c5584c | 205 | |
618e9ed9 DM |
206 | /* Setup the Hypervisor TSB descriptor. */ |
207 | if (tlb_type == hypervisor) { | |
dcc1e8dd | 208 | struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx]; |
618e9ed9 | 209 | |
dcc1e8dd DM |
210 | switch (tsb_idx) { |
211 | case MM_TSB_BASE: | |
212 | hp->pgsz_idx = HV_PGSZ_IDX_BASE; | |
618e9ed9 | 213 | break; |
dcc1e8dd DM |
214 | #ifdef CONFIG_HUGETLB_PAGE |
215 | case MM_TSB_HUGE: | |
216 | hp->pgsz_idx = HV_PGSZ_IDX_HUGE; | |
618e9ed9 | 217 | break; |
dcc1e8dd DM |
218 | #endif |
219 | default: | |
220 | BUG(); | |
618e9ed9 DM |
221 | }; |
222 | hp->assoc = 1; | |
223 | hp->num_ttes = tsb_bytes / 16; | |
224 | hp->ctx_idx = 0; | |
dcc1e8dd DM |
225 | switch (tsb_idx) { |
226 | case MM_TSB_BASE: | |
227 | hp->pgsz_mask = HV_PGSZ_MASK_BASE; | |
618e9ed9 | 228 | break; |
dcc1e8dd DM |
229 | #ifdef CONFIG_HUGETLB_PAGE |
230 | case MM_TSB_HUGE: | |
231 | hp->pgsz_mask = HV_PGSZ_MASK_HUGE; | |
618e9ed9 | 232 | break; |
dcc1e8dd DM |
233 | #endif |
234 | default: | |
235 | BUG(); | |
618e9ed9 DM |
236 | }; |
237 | hp->tsb_base = tsb_paddr; | |
238 | hp->resv = 0; | |
239 | } | |
98c5584c DM |
240 | } |
241 | ||
9b4006dc DM |
242 | static kmem_cache_t *tsb_caches[8] __read_mostly; |
243 | ||
244 | static const char *tsb_cache_names[8] = { | |
245 | "tsb_8KB", | |
246 | "tsb_16KB", | |
247 | "tsb_32KB", | |
248 | "tsb_64KB", | |
249 | "tsb_128KB", | |
250 | "tsb_256KB", | |
251 | "tsb_512KB", | |
252 | "tsb_1MB", | |
253 | }; | |
254 | ||
255 | void __init tsb_cache_init(void) | |
256 | { | |
257 | unsigned long i; | |
258 | ||
259 | for (i = 0; i < 8; i++) { | |
260 | unsigned long size = 8192 << i; | |
261 | const char *name = tsb_cache_names[i]; | |
262 | ||
263 | tsb_caches[i] = kmem_cache_create(name, | |
264 | size, size, | |
265 | SLAB_HWCACHE_ALIGN | | |
266 | SLAB_MUST_HWCACHE_ALIGN, | |
267 | NULL, NULL); | |
268 | if (!tsb_caches[i]) { | |
269 | prom_printf("Could not create %s cache\n", name); | |
270 | prom_halt(); | |
271 | } | |
272 | } | |
273 | } | |
274 | ||
dcc1e8dd DM |
275 | /* When the RSS of an address space exceeds tsb_rss_limit for a TSB, |
276 | * do_sparc64_fault() invokes this routine to try and grow it. | |
7a1ac526 | 277 | * |
bd40791e | 278 | * When we reach the maximum TSB size supported, we stick ~0UL into |
dcc1e8dd | 279 | * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault() |
bd40791e DM |
280 | * will not trigger any longer. |
281 | * | |
282 | * The TSB can be anywhere from 8K to 1MB in size, in increasing powers | |
283 | * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB | |
b52439c2 DM |
284 | * must be 512K aligned. It also must be physically contiguous, so we |
285 | * cannot use vmalloc(). | |
bd40791e DM |
286 | * |
287 | * The idea here is to grow the TSB when the RSS of the process approaches | |
288 | * the number of entries that the current TSB can hold at once. Currently, | |
289 | * we trigger when the RSS hits 3/4 of the TSB capacity. | |
290 | */ | |
dcc1e8dd | 291 | void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss) |
bd40791e DM |
292 | { |
293 | unsigned long max_tsb_size = 1 * 1024 * 1024; | |
9b4006dc | 294 | unsigned long new_size, old_size, flags; |
7a1ac526 | 295 | struct tsb *old_tsb, *new_tsb; |
9b4006dc DM |
296 | unsigned long new_cache_index, old_cache_index; |
297 | unsigned long new_rss_limit; | |
b52439c2 | 298 | gfp_t gfp_flags; |
bd40791e DM |
299 | |
300 | if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) | |
301 | max_tsb_size = (PAGE_SIZE << MAX_ORDER); | |
302 | ||
9b4006dc DM |
303 | new_cache_index = 0; |
304 | for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) { | |
305 | unsigned long n_entries = new_size / sizeof(struct tsb); | |
bd40791e DM |
306 | |
307 | n_entries = (n_entries * 3) / 4; | |
308 | if (n_entries > rss) | |
309 | break; | |
9b4006dc DM |
310 | |
311 | new_cache_index++; | |
bd40791e DM |
312 | } |
313 | ||
9b4006dc | 314 | if (new_size == max_tsb_size) |
b52439c2 DM |
315 | new_rss_limit = ~0UL; |
316 | else | |
9b4006dc | 317 | new_rss_limit = ((new_size / sizeof(struct tsb)) * 3) / 4; |
b52439c2 | 318 | |
9b4006dc | 319 | retry_tsb_alloc: |
b52439c2 | 320 | gfp_flags = GFP_KERNEL; |
9b4006dc | 321 | if (new_size > (PAGE_SIZE * 2)) |
b52439c2 DM |
322 | gfp_flags = __GFP_NOWARN | __GFP_NORETRY; |
323 | ||
9b4006dc DM |
324 | new_tsb = kmem_cache_alloc(tsb_caches[new_cache_index], gfp_flags); |
325 | if (unlikely(!new_tsb)) { | |
b52439c2 DM |
326 | /* Not being able to fork due to a high-order TSB |
327 | * allocation failure is very bad behavior. Just back | |
328 | * down to a 0-order allocation and force no TSB | |
329 | * growing for this address space. | |
330 | */ | |
dcc1e8dd DM |
331 | if (mm->context.tsb_block[tsb_index].tsb == NULL && |
332 | new_cache_index > 0) { | |
9b4006dc DM |
333 | new_cache_index = 0; |
334 | new_size = 8192; | |
b52439c2 | 335 | new_rss_limit = ~0UL; |
9b4006dc | 336 | goto retry_tsb_alloc; |
b52439c2 DM |
337 | } |
338 | ||
339 | /* If we failed on a TSB grow, we are under serious | |
340 | * memory pressure so don't try to grow any more. | |
341 | */ | |
dcc1e8dd DM |
342 | if (mm->context.tsb_block[tsb_index].tsb != NULL) |
343 | mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL; | |
bd40791e | 344 | return; |
b52439c2 | 345 | } |
bd40791e | 346 | |
8b234274 | 347 | /* Mark all tags as invalid. */ |
bb8646d8 | 348 | tsb_init(new_tsb, new_size); |
7a1ac526 DM |
349 | |
350 | /* Ok, we are about to commit the changes. If we are | |
351 | * growing an existing TSB the locking is very tricky, | |
352 | * so WATCH OUT! | |
353 | * | |
354 | * We have to hold mm->context.lock while committing to the | |
355 | * new TSB, this synchronizes us with processors in | |
356 | * flush_tsb_user() and switch_mm() for this address space. | |
357 | * | |
358 | * But even with that lock held, processors run asynchronously | |
359 | * accessing the old TSB via TLB miss handling. This is OK | |
360 | * because those actions are just propagating state from the | |
361 | * Linux page tables into the TSB, page table mappings are not | |
362 | * being changed. If a real fault occurs, the processor will | |
363 | * synchronize with us when it hits flush_tsb_user(), this is | |
364 | * also true for the case where vmscan is modifying the page | |
365 | * tables. The only thing we need to be careful with is to | |
366 | * skip any locked TSB entries during copy_tsb(). | |
367 | * | |
368 | * When we finish committing to the new TSB, we have to drop | |
369 | * the lock and ask all other cpus running this address space | |
370 | * to run tsb_context_switch() to see the new TSB table. | |
371 | */ | |
372 | spin_lock_irqsave(&mm->context.lock, flags); | |
373 | ||
dcc1e8dd DM |
374 | old_tsb = mm->context.tsb_block[tsb_index].tsb; |
375 | old_cache_index = | |
376 | (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL); | |
377 | old_size = (mm->context.tsb_block[tsb_index].tsb_nentries * | |
378 | sizeof(struct tsb)); | |
7a1ac526 | 379 | |
9b4006dc | 380 | |
7a1ac526 DM |
381 | /* Handle multiple threads trying to grow the TSB at the same time. |
382 | * One will get in here first, and bump the size and the RSS limit. | |
383 | * The others will get in here next and hit this check. | |
384 | */ | |
dcc1e8dd DM |
385 | if (unlikely(old_tsb && |
386 | (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) { | |
7a1ac526 DM |
387 | spin_unlock_irqrestore(&mm->context.lock, flags); |
388 | ||
9b4006dc | 389 | kmem_cache_free(tsb_caches[new_cache_index], new_tsb); |
7a1ac526 DM |
390 | return; |
391 | } | |
8b234274 | 392 | |
dcc1e8dd | 393 | mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit; |
bd40791e | 394 | |
7a1ac526 DM |
395 | if (old_tsb) { |
396 | extern void copy_tsb(unsigned long old_tsb_base, | |
397 | unsigned long old_tsb_size, | |
398 | unsigned long new_tsb_base, | |
399 | unsigned long new_tsb_size); | |
400 | unsigned long old_tsb_base = (unsigned long) old_tsb; | |
401 | unsigned long new_tsb_base = (unsigned long) new_tsb; | |
402 | ||
403 | if (tlb_type == cheetah_plus || tlb_type == hypervisor) { | |
404 | old_tsb_base = __pa(old_tsb_base); | |
405 | new_tsb_base = __pa(new_tsb_base); | |
406 | } | |
9b4006dc | 407 | copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); |
7a1ac526 | 408 | } |
bd40791e | 409 | |
dcc1e8dd DM |
410 | mm->context.tsb_block[tsb_index].tsb = new_tsb; |
411 | setup_tsb_params(mm, tsb_index, new_size); | |
bd40791e | 412 | |
7a1ac526 DM |
413 | spin_unlock_irqrestore(&mm->context.lock, flags); |
414 | ||
bd40791e DM |
415 | /* If old_tsb is NULL, we're being invoked for the first time |
416 | * from init_new_context(). | |
417 | */ | |
418 | if (old_tsb) { | |
7a1ac526 | 419 | /* Reload it on the local cpu. */ |
bd40791e DM |
420 | tsb_context_switch(mm); |
421 | ||
7a1ac526 DM |
422 | /* Now force other processors to do the same. */ |
423 | smp_tsb_sync(mm); | |
424 | ||
425 | /* Now it is safe to free the old tsb. */ | |
9b4006dc | 426 | kmem_cache_free(tsb_caches[old_cache_index], old_tsb); |
bd40791e DM |
427 | } |
428 | } | |
429 | ||
09f94287 DM |
430 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
431 | { | |
dcc1e8dd DM |
432 | #ifdef CONFIG_HUGETLB_PAGE |
433 | unsigned long huge_pte_count; | |
434 | #endif | |
435 | unsigned int i; | |
436 | ||
a77754b4 | 437 | spin_lock_init(&mm->context.lock); |
09f94287 DM |
438 | |
439 | mm->context.sparc64_ctx_val = 0UL; | |
09f94287 | 440 | |
dcc1e8dd DM |
441 | #ifdef CONFIG_HUGETLB_PAGE |
442 | /* We reset it to zero because the fork() page copying | |
443 | * will re-increment the counters as the parent PTEs are | |
444 | * copied into the child address space. | |
445 | */ | |
446 | huge_pte_count = mm->context.huge_pte_count; | |
447 | mm->context.huge_pte_count = 0; | |
448 | #endif | |
449 | ||
bd40791e DM |
450 | /* copy_mm() copies over the parent's mm_struct before calling |
451 | * us, so we need to zero out the TSB pointer or else tsb_grow() | |
452 | * will be confused and think there is an older TSB to free up. | |
453 | */ | |
dcc1e8dd DM |
454 | for (i = 0; i < MM_NUM_TSBS; i++) |
455 | mm->context.tsb_block[i].tsb = NULL; | |
7a1ac526 DM |
456 | |
457 | /* If this is fork, inherit the parent's TSB size. We would | |
458 | * grow it to that size on the first page fault anyways. | |
459 | */ | |
dcc1e8dd | 460 | tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); |
bd40791e | 461 | |
dcc1e8dd DM |
462 | #ifdef CONFIG_HUGETLB_PAGE |
463 | if (unlikely(huge_pte_count)) | |
464 | tsb_grow(mm, MM_TSB_HUGE, huge_pte_count); | |
465 | #endif | |
466 | ||
467 | if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) | |
bd40791e | 468 | return -ENOMEM; |
09f94287 DM |
469 | |
470 | return 0; | |
471 | } | |
472 | ||
dcc1e8dd | 473 | static void tsb_destroy_one(struct tsb_config *tp) |
09f94287 | 474 | { |
dcc1e8dd | 475 | unsigned long cache_index; |
bd40791e | 476 | |
dcc1e8dd DM |
477 | if (!tp->tsb) |
478 | return; | |
479 | cache_index = tp->tsb_reg_val & 0x7UL; | |
480 | kmem_cache_free(tsb_caches[cache_index], tp->tsb); | |
481 | tp->tsb = NULL; | |
482 | tp->tsb_reg_val = 0UL; | |
483 | } | |
98c5584c | 484 | |
dcc1e8dd DM |
485 | void destroy_context(struct mm_struct *mm) |
486 | { | |
487 | unsigned long flags, i; | |
488 | ||
489 | for (i = 0; i < MM_NUM_TSBS; i++) | |
490 | tsb_destroy_one(&mm->context.tsb_block[i]); | |
09f94287 | 491 | |
77b838fa | 492 | spin_lock_irqsave(&ctx_alloc_lock, flags); |
09f94287 DM |
493 | |
494 | if (CTX_VALID(mm->context)) { | |
495 | unsigned long nr = CTX_NRBITS(mm->context); | |
496 | mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); | |
497 | } | |
498 | ||
77b838fa | 499 | spin_unlock_irqrestore(&ctx_alloc_lock, flags); |
09f94287 | 500 | } |