[SPARC64]: Register kernel TSB with hypervisor.
[deliverable/linux.git] / arch / sparc64 / mm / tsb.c
CommitLineData
74bf4312
DM
1/* arch/sparc64/mm/tsb.c
2 *
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
4 */
5
6#include <linux/kernel.h>
7#include <asm/system.h>
8#include <asm/page.h>
9#include <asm/tlbflush.h>
10#include <asm/tlb.h>
09f94287 11#include <asm/mmu_context.h>
98c5584c 12#include <asm/pgtable.h>
bd40791e 13#include <asm/tsb.h>
74bf4312 14
74bf4312
DM
15extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
16
98c5584c 17static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
74bf4312
DM
18{
19 vaddr >>= PAGE_SHIFT;
98c5584c 20 return vaddr & (nentries - 1);
74bf4312
DM
21}
22
517af332 23static inline int tag_compare(unsigned long tag, unsigned long vaddr, unsigned long context)
74bf4312 24{
517af332 25 return (tag == ((vaddr >> 22) | (context << 48)));
74bf4312
DM
26}
27
28/* TSB flushes need only occur on the processor initiating the address
29 * space modification, not on each cpu the address space has run on.
30 * Only the TLB flush needs that treatment.
31 */
32
33void flush_tsb_kernel_range(unsigned long start, unsigned long end)
34{
35 unsigned long v;
36
37 for (v = start; v < end; v += PAGE_SIZE) {
98c5584c
DM
38 unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
39 struct tsb *ent = &swapper_tsb[hash];
74bf4312 40
517af332 41 if (tag_compare(ent->tag, v, 0)) {
74bf4312
DM
42 ent->tag = 0UL;
43 membar_storeload_storestore();
44 }
45 }
46}
47
48void flush_tsb_user(struct mmu_gather *mp)
49{
50 struct mm_struct *mm = mp->mm;
98c5584c 51 struct tsb *tsb = mm->context.tsb;
98c5584c 52 unsigned long nentries = mm->context.tsb_nentries;
517af332 53 unsigned long ctx, base;
74bf4312
DM
54 int i;
55
517af332
DM
56 if (unlikely(!CTX_VALID(mm->context)))
57 return;
58
59 ctx = CTX_HWBITS(mm->context);
74bf4312 60
517af332
DM
61 if (tlb_type == cheetah_plus)
62 base = __pa(tsb);
63 else
64 base = (unsigned long) tsb;
65
74bf4312
DM
66 for (i = 0; i < mp->tlb_nr; i++) {
67 unsigned long v = mp->vaddrs[i];
517af332 68 unsigned long tag, ent, hash;
74bf4312
DM
69
70 v &= ~0x1UL;
71
517af332
DM
72 hash = tsb_hash(v, nentries);
73 ent = base + (hash * sizeof(struct tsb));
74 tag = (v >> 22UL) | (ctx << 48UL);
75
76 tsb_flush(ent, tag);
74bf4312
DM
77 }
78}
09f94287 79
98c5584c
DM
80static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
81{
82 unsigned long tsb_reg, base, tsb_paddr;
83 unsigned long page_sz, tte;
84
85 mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
86
87 base = TSBMAP_BASE;
88 tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP |
89 _PAGE_CV | _PAGE_P | _PAGE_W);
90 tsb_paddr = __pa(mm->context.tsb);
517af332 91 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
98c5584c
DM
92
93 /* Use the smallest page size that can map the whole TSB
94 * in one TLB entry.
95 */
96 switch (tsb_bytes) {
97 case 8192 << 0:
98 tsb_reg = 0x0UL;
99#ifdef DCACHE_ALIASING_POSSIBLE
100 base += (tsb_paddr & 8192);
101#endif
102 tte |= _PAGE_SZ8K;
103 page_sz = 8192;
104 break;
105
106 case 8192 << 1:
107 tsb_reg = 0x1UL;
108 tte |= _PAGE_SZ64K;
109 page_sz = 64 * 1024;
110 break;
111
112 case 8192 << 2:
113 tsb_reg = 0x2UL;
114 tte |= _PAGE_SZ64K;
115 page_sz = 64 * 1024;
116 break;
117
118 case 8192 << 3:
119 tsb_reg = 0x3UL;
120 tte |= _PAGE_SZ64K;
121 page_sz = 64 * 1024;
122 break;
123
124 case 8192 << 4:
125 tsb_reg = 0x4UL;
126 tte |= _PAGE_SZ512K;
127 page_sz = 512 * 1024;
128 break;
129
130 case 8192 << 5:
131 tsb_reg = 0x5UL;
132 tte |= _PAGE_SZ512K;
133 page_sz = 512 * 1024;
134 break;
135
136 case 8192 << 6:
137 tsb_reg = 0x6UL;
138 tte |= _PAGE_SZ512K;
139 page_sz = 512 * 1024;
140 break;
141
142 case 8192 << 7:
143 tsb_reg = 0x7UL;
144 tte |= _PAGE_SZ4MB;
145 page_sz = 4 * 1024 * 1024;
146 break;
bd40791e
DM
147
148 default:
149 BUG();
98c5584c
DM
150 };
151
618e9ed9 152 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
517af332
DM
153 /* Physical mapping, no locked TLB entry for TSB. */
154 tsb_reg |= tsb_paddr;
155
156 mm->context.tsb_reg_val = tsb_reg;
157 mm->context.tsb_map_vaddr = 0;
158 mm->context.tsb_map_pte = 0;
159 } else {
160 tsb_reg |= base;
161 tsb_reg |= (tsb_paddr & (page_sz - 1UL));
162 tte |= (tsb_paddr & ~(page_sz - 1UL));
163
164 mm->context.tsb_reg_val = tsb_reg;
165 mm->context.tsb_map_vaddr = base;
166 mm->context.tsb_map_pte = tte;
167 }
98c5584c 168
618e9ed9
DM
169 /* Setup the Hypervisor TSB descriptor. */
170 if (tlb_type == hypervisor) {
171 struct hv_tsb_descr *hp = &mm->context.tsb_descr;
172
173 switch (PAGE_SIZE) {
174 case 8192:
175 default:
176 hp->pgsz_idx = HV_PGSZ_IDX_8K;
177 break;
178
179 case 64 * 1024:
180 hp->pgsz_idx = HV_PGSZ_IDX_64K;
181 break;
182
183 case 512 * 1024:
184 hp->pgsz_idx = HV_PGSZ_IDX_512K;
185 break;
186
187 case 4 * 1024 * 1024:
188 hp->pgsz_idx = HV_PGSZ_IDX_4MB;
189 break;
190 };
191 hp->assoc = 1;
192 hp->num_ttes = tsb_bytes / 16;
193 hp->ctx_idx = 0;
194 switch (PAGE_SIZE) {
195 case 8192:
196 default:
197 hp->pgsz_mask = HV_PGSZ_MASK_8K;
198 break;
199
200 case 64 * 1024:
201 hp->pgsz_mask = HV_PGSZ_MASK_64K;
202 break;
203
204 case 512 * 1024:
205 hp->pgsz_mask = HV_PGSZ_MASK_512K;
206 break;
207
208 case 4 * 1024 * 1024:
209 hp->pgsz_mask = HV_PGSZ_MASK_4MB;
210 break;
211 };
212 hp->tsb_base = tsb_paddr;
213 hp->resv = 0;
214 }
98c5584c
DM
215}
216
bd40791e
DM
217/* The page tables are locked against modifications while this
218 * runs.
219 *
220 * XXX do some prefetching...
221 */
222static void copy_tsb(struct tsb *old_tsb, unsigned long old_size,
223 struct tsb *new_tsb, unsigned long new_size)
224{
225 unsigned long old_nentries = old_size / sizeof(struct tsb);
226 unsigned long new_nentries = new_size / sizeof(struct tsb);
227 unsigned long i;
228
229 for (i = 0; i < old_nentries; i++) {
230 register unsigned long tag asm("o4");
231 register unsigned long pte asm("o5");
517af332
DM
232 unsigned long v, hash;
233
e92b9257
DM
234 if (tlb_type == hypervisor) {
235 __asm__ __volatile__(
236 "ldda [%2] %3, %0"
237 : "=r" (tag), "=r" (pte)
238 : "r" (__pa(&old_tsb[i])),
239 "i" (ASI_QUAD_LDD_PHYS_4V));
240 } else if (tlb_type == cheetah_plus) {
517af332
DM
241 __asm__ __volatile__(
242 "ldda [%2] %3, %0"
243 : "=r" (tag), "=r" (pte)
244 : "r" (__pa(&old_tsb[i])),
245 "i" (ASI_QUAD_LDD_PHYS));
246 } else {
247 __asm__ __volatile__(
248 "ldda [%2] %3, %0"
249 : "=r" (tag), "=r" (pte)
250 : "r" (&old_tsb[i]),
251 "i" (ASI_NUCLEUS_QUAD_LDD));
252 }
bd40791e 253
4753eb2a 254 if (!tag || (tag & (1UL << TSB_TAG_LOCK_BIT)))
bd40791e
DM
255 continue;
256
257 /* We only put base page size PTEs into the TSB,
258 * but that might change in the future. This code
259 * would need to be changed if we start putting larger
260 * page size PTEs into there.
261 */
262 WARN_ON((pte & _PAGE_ALL_SZ_BITS) != _PAGE_SZBITS);
263
264 /* The tag holds bits 22 to 63 of the virtual address
265 * and the context. Clear out the context, and shift
266 * up to make a virtual address.
267 */
268 v = (tag & ((1UL << 42UL) - 1UL)) << 22UL;
269
270 /* The implied bits of the tag (bits 13 to 21) are
271 * determined by the TSB entry index, so fill that in.
272 */
273 v |= (i & (512UL - 1UL)) << 13UL;
274
275 hash = tsb_hash(v, new_nentries);
e92b9257
DM
276 if (tlb_type == cheetah_plus ||
277 tlb_type == hypervisor) {
517af332
DM
278 __asm__ __volatile__(
279 "stxa %0, [%1] %2\n\t"
280 "stxa %3, [%4] %2"
281 : /* no outputs */
282 : "r" (tag),
283 "r" (__pa(&new_tsb[hash].tag)),
284 "i" (ASI_PHYS_USE_EC),
285 "r" (pte),
286 "r" (__pa(&new_tsb[hash].pte)));
287 } else {
288 new_tsb[hash].tag = tag;
289 new_tsb[hash].pte = pte;
290 }
bd40791e
DM
291 }
292}
293
294/* When the RSS of an address space exceeds mm->context.tsb_rss_limit,
295 * update_mmu_cache() invokes this routine to try and grow the TSB.
296 * When we reach the maximum TSB size supported, we stick ~0UL into
297 * mm->context.tsb_rss_limit so the grow checks in update_mmu_cache()
298 * will not trigger any longer.
299 *
300 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
301 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB
302 * must be 512K aligned.
303 *
304 * The idea here is to grow the TSB when the RSS of the process approaches
305 * the number of entries that the current TSB can hold at once. Currently,
306 * we trigger when the RSS hits 3/4 of the TSB capacity.
307 */
308void tsb_grow(struct mm_struct *mm, unsigned long rss, gfp_t gfp_flags)
309{
310 unsigned long max_tsb_size = 1 * 1024 * 1024;
311 unsigned long size, old_size;
312 struct page *page;
313 struct tsb *old_tsb;
314
315 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
316 max_tsb_size = (PAGE_SIZE << MAX_ORDER);
317
318 for (size = PAGE_SIZE; size < max_tsb_size; size <<= 1UL) {
319 unsigned long n_entries = size / sizeof(struct tsb);
320
321 n_entries = (n_entries * 3) / 4;
322 if (n_entries > rss)
323 break;
324 }
325
326 page = alloc_pages(gfp_flags | __GFP_ZERO, get_order(size));
327 if (unlikely(!page))
328 return;
329
330 if (size == max_tsb_size)
331 mm->context.tsb_rss_limit = ~0UL;
332 else
333 mm->context.tsb_rss_limit =
334 ((size / sizeof(struct tsb)) * 3) / 4;
335
336 old_tsb = mm->context.tsb;
337 old_size = mm->context.tsb_nentries * sizeof(struct tsb);
338
339 if (old_tsb)
340 copy_tsb(old_tsb, old_size, page_address(page), size);
341
342 mm->context.tsb = page_address(page);
343 setup_tsb_params(mm, size);
344
345 /* If old_tsb is NULL, we're being invoked for the first time
346 * from init_new_context().
347 */
348 if (old_tsb) {
349 /* Now force all other processors to reload the new
350 * TSB state.
351 */
352 smp_tsb_sync(mm);
353
354 /* Finally reload it on the local cpu. No further
355 * references will remain to the old TSB and we can
356 * thus free it up.
357 */
358 tsb_context_switch(mm);
359
360 free_pages((unsigned long) old_tsb, get_order(old_size));
361 }
362}
363
09f94287
DM
364int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
365{
09f94287
DM
366
367 mm->context.sparc64_ctx_val = 0UL;
09f94287 368
bd40791e
DM
369 /* copy_mm() copies over the parent's mm_struct before calling
370 * us, so we need to zero out the TSB pointer or else tsb_grow()
371 * will be confused and think there is an older TSB to free up.
372 */
373 mm->context.tsb = NULL;
f4e841da 374 tsb_grow(mm, 0, GFP_KERNEL);
bd40791e
DM
375
376 if (unlikely(!mm->context.tsb))
377 return -ENOMEM;
09f94287
DM
378
379 return 0;
380}
381
382void destroy_context(struct mm_struct *mm)
383{
bd40791e
DM
384 unsigned long size = mm->context.tsb_nentries * sizeof(struct tsb);
385
386 free_pages((unsigned long) mm->context.tsb, get_order(size));
98c5584c
DM
387
388 /* We can remove these later, but for now it's useful
389 * to catch any bogus post-destroy_context() references
390 * to the TSB.
391 */
392 mm->context.tsb = NULL;
393 mm->context.tsb_reg_val = 0UL;
09f94287
DM
394
395 spin_lock(&ctx_alloc_lock);
396
397 if (CTX_VALID(mm->context)) {
398 unsigned long nr = CTX_NRBITS(mm->context);
399 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
400 }
401
402 spin_unlock(&ctx_alloc_lock);
403}
This page took 0.050051 seconds and 5 git commands to generate.