[SPARC64]: Fix two bugs wrt. kernel 4MB TSB.
[deliverable/linux.git] / arch / sparc64 / mm / init.c
CommitLineData
1da177e4
LT
1/* $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/mm/init.c
3 *
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
6 */
7
c4bce90e 8#include <linux/module.h>
1da177e4
LT
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <linux/string.h>
12#include <linux/init.h>
13#include <linux/bootmem.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
16#include <linux/slab.h>
17#include <linux/initrd.h>
18#include <linux/swap.h>
19#include <linux/pagemap.h>
c9cf5528 20#include <linux/poison.h>
1da177e4
LT
21#include <linux/fs.h>
22#include <linux/seq_file.h>
05e14cb3 23#include <linux/kprobes.h>
1ac4f5eb 24#include <linux/cache.h>
13edad7a 25#include <linux/sort.h>
5cbc3073 26#include <linux/percpu.h>
1da177e4
LT
27
28#include <asm/head.h>
29#include <asm/system.h>
30#include <asm/page.h>
31#include <asm/pgalloc.h>
32#include <asm/pgtable.h>
33#include <asm/oplib.h>
34#include <asm/iommu.h>
35#include <asm/io.h>
36#include <asm/uaccess.h>
37#include <asm/mmu_context.h>
38#include <asm/tlbflush.h>
39#include <asm/dma.h>
40#include <asm/starfire.h>
41#include <asm/tlb.h>
42#include <asm/spitfire.h>
43#include <asm/sections.h>
517af332 44#include <asm/tsb.h>
481295f9 45#include <asm/hypervisor.h>
372b07bb 46#include <asm/prom.h>
22d6a1cb 47#include <asm/sstate.h>
5cbc3073 48#include <asm/mdesc.h>
1da177e4 49
9cc3a1ac
DM
50#define MAX_PHYS_ADDRESS (1UL << 42UL)
51#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
52#define KPTE_BITMAP_BYTES \
53 ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8)
54
55unsigned long kern_linear_pte_xor[2] __read_mostly;
56
57/* A bitmap, one bit for every 256MB of physical memory. If the bit
58 * is clear, we should use a 4MB page (via kern_linear_pte_xor[0]) else
59 * if set we should use a 256MB page (via kern_linear_pte_xor[1]).
60 */
61unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
62
d1acb421 63#ifndef CONFIG_DEBUG_PAGEALLOC
2d9e2763
DM
64/* A special kernel TSB for 4MB and 256MB linear mappings.
65 * Space is allocated for this right after the trap table
66 * in arch/sparc64/kernel/head.S
67 */
68extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
d1acb421 69#endif
d7744a09 70
13edad7a
DM
71#define MAX_BANKS 32
72
73static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
74static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
75static int pavail_ents __initdata;
76static int pavail_rescan_ents __initdata;
77
78static int cmp_p64(const void *a, const void *b)
79{
80 const struct linux_prom64_registers *x = a, *y = b;
81
82 if (x->phys_addr > y->phys_addr)
83 return 1;
84 if (x->phys_addr < y->phys_addr)
85 return -1;
86 return 0;
87}
88
89static void __init read_obp_memory(const char *property,
90 struct linux_prom64_registers *regs,
91 int *num_ents)
92{
93 int node = prom_finddevice("/memory");
94 int prop_size = prom_getproplen(node, property);
95 int ents, ret, i;
96
97 ents = prop_size / sizeof(struct linux_prom64_registers);
98 if (ents > MAX_BANKS) {
99 prom_printf("The machine has more %s property entries than "
100 "this kernel can support (%d).\n",
101 property, MAX_BANKS);
102 prom_halt();
103 }
104
105 ret = prom_getproperty(node, property, (char *) regs, prop_size);
106 if (ret == -1) {
107 prom_printf("Couldn't get %s property from /memory.\n");
108 prom_halt();
109 }
110
13edad7a
DM
111 /* Sanitize what we got from the firmware, by page aligning
112 * everything.
113 */
114 for (i = 0; i < ents; i++) {
115 unsigned long base, size;
116
117 base = regs[i].phys_addr;
118 size = regs[i].reg_size;
10147570 119
13edad7a
DM
120 size &= PAGE_MASK;
121 if (base & ~PAGE_MASK) {
122 unsigned long new_base = PAGE_ALIGN(base);
123
124 size -= new_base - base;
125 if ((long) size < 0L)
126 size = 0UL;
127 base = new_base;
128 }
0015d3d6
DM
129 if (size == 0UL) {
130 /* If it is empty, simply get rid of it.
131 * This simplifies the logic of the other
132 * functions that process these arrays.
133 */
134 memmove(&regs[i], &regs[i + 1],
135 (ents - i - 1) * sizeof(regs[0]));
486ad10a 136 i--;
0015d3d6
DM
137 ents--;
138 continue;
486ad10a 139 }
0015d3d6
DM
140 regs[i].phys_addr = base;
141 regs[i].reg_size = size;
486ad10a
DM
142 }
143
144 *num_ents = ents;
145
c9c10830 146 sort(regs, ents, sizeof(struct linux_prom64_registers),
13edad7a
DM
147 cmp_p64, NULL);
148}
1da177e4 149
2bdb3cb2 150unsigned long *sparc64_valid_addr_bitmap __read_mostly;
1da177e4 151
d1112018 152/* Kernel physical address base and size in bytes. */
1ac4f5eb
DM
153unsigned long kern_base __read_mostly;
154unsigned long kern_size __read_mostly;
1da177e4 155
1da177e4
LT
156/* Initial ramdisk setup */
157extern unsigned long sparc_ramdisk_image64;
158extern unsigned int sparc_ramdisk_image;
159extern unsigned int sparc_ramdisk_size;
160
1ac4f5eb 161struct page *mem_map_zero __read_mostly;
1da177e4 162
0835ae0f
DM
163unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
164
165unsigned long sparc64_kern_pri_context __read_mostly;
166unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
167unsigned long sparc64_kern_sec_context __read_mostly;
168
1da177e4
LT
169int bigkernel = 0;
170
1da177e4
LT
171#ifdef CONFIG_DEBUG_DCFLUSH
172atomic_t dcpage_flushes = ATOMIC_INIT(0);
173#ifdef CONFIG_SMP
174atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
175#endif
176#endif
177
7a591cfe 178inline void flush_dcache_page_impl(struct page *page)
1da177e4 179{
7a591cfe 180 BUG_ON(tlb_type == hypervisor);
1da177e4
LT
181#ifdef CONFIG_DEBUG_DCFLUSH
182 atomic_inc(&dcpage_flushes);
183#endif
184
185#ifdef DCACHE_ALIASING_POSSIBLE
186 __flush_dcache_page(page_address(page),
187 ((tlb_type == spitfire) &&
188 page_mapping(page) != NULL));
189#else
190 if (page_mapping(page) != NULL &&
191 tlb_type == spitfire)
192 __flush_icache_page(__pa(page_address(page)));
193#endif
194}
195
196#define PG_dcache_dirty PG_arch_1
22adb358
DM
197#define PG_dcache_cpu_shift 32UL
198#define PG_dcache_cpu_mask \
199 ((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
1da177e4
LT
200
201#define dcache_dirty_cpu(page) \
48b0e548 202 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
1da177e4
LT
203
204static __inline__ void set_dcache_dirty(struct page *page, int this_cpu)
205{
206 unsigned long mask = this_cpu;
48b0e548
DM
207 unsigned long non_cpu_bits;
208
209 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
210 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
211
1da177e4
LT
212 __asm__ __volatile__("1:\n\t"
213 "ldx [%2], %%g7\n\t"
214 "and %%g7, %1, %%g1\n\t"
215 "or %%g1, %0, %%g1\n\t"
216 "casx [%2], %%g7, %%g1\n\t"
217 "cmp %%g7, %%g1\n\t"
b445e26c 218 "membar #StoreLoad | #StoreStore\n\t"
1da177e4 219 "bne,pn %%xcc, 1b\n\t"
b445e26c 220 " nop"
1da177e4
LT
221 : /* no outputs */
222 : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
223 : "g1", "g7");
224}
225
226static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
227{
228 unsigned long mask = (1UL << PG_dcache_dirty);
229
230 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
231 "1:\n\t"
232 "ldx [%2], %%g7\n\t"
48b0e548 233 "srlx %%g7, %4, %%g1\n\t"
1da177e4
LT
234 "and %%g1, %3, %%g1\n\t"
235 "cmp %%g1, %0\n\t"
236 "bne,pn %%icc, 2f\n\t"
237 " andn %%g7, %1, %%g1\n\t"
238 "casx [%2], %%g7, %%g1\n\t"
239 "cmp %%g7, %%g1\n\t"
b445e26c 240 "membar #StoreLoad | #StoreStore\n\t"
1da177e4 241 "bne,pn %%xcc, 1b\n\t"
b445e26c 242 " nop\n"
1da177e4
LT
243 "2:"
244 : /* no outputs */
245 : "r" (cpu), "r" (mask), "r" (&page->flags),
48b0e548
DM
246 "i" (PG_dcache_cpu_mask),
247 "i" (PG_dcache_cpu_shift)
1da177e4
LT
248 : "g1", "g7");
249}
250
517af332
DM
251static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
252{
253 unsigned long tsb_addr = (unsigned long) ent;
254
3b3ab2eb 255 if (tlb_type == cheetah_plus || tlb_type == hypervisor)
517af332
DM
256 tsb_addr = __pa(tsb_addr);
257
258 __tsb_insert(tsb_addr, tag, pte);
259}
260
c4bce90e
DM
261unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
262unsigned long _PAGE_SZBITS __read_mostly;
263
1da177e4
LT
264void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
265{
bd40791e 266 struct mm_struct *mm;
74ae9987 267 struct tsb *tsb;
7a1ac526 268 unsigned long tag, flags;
dcc1e8dd 269 unsigned long tsb_index, tsb_hash_shift;
7a591cfe
DM
270
271 if (tlb_type != hypervisor) {
272 unsigned long pfn = pte_pfn(pte);
273 unsigned long pg_flags;
274 struct page *page;
275
276 if (pfn_valid(pfn) &&
277 (page = pfn_to_page(pfn), page_mapping(page)) &&
278 ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
279 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
280 PG_dcache_cpu_mask);
281 int this_cpu = get_cpu();
282
283 /* This is just to optimize away some function calls
284 * in the SMP case.
285 */
286 if (cpu == this_cpu)
287 flush_dcache_page_impl(page);
288 else
289 smp_flush_dcache_page_impl(page, cpu);
290
291 clear_dcache_dirty_cpu(page, cpu);
292
293 put_cpu();
294 }
1da177e4 295 }
bd40791e
DM
296
297 mm = vma->vm_mm;
7a1ac526 298
dcc1e8dd
DM
299 tsb_index = MM_TSB_BASE;
300 tsb_hash_shift = PAGE_SHIFT;
301
7a1ac526
DM
302 spin_lock_irqsave(&mm->context.lock, flags);
303
dcc1e8dd
DM
304#ifdef CONFIG_HUGETLB_PAGE
305 if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL) {
306 if ((tlb_type == hypervisor &&
307 (pte_val(pte) & _PAGE_SZALL_4V) == _PAGE_SZHUGE_4V) ||
308 (tlb_type != hypervisor &&
309 (pte_val(pte) & _PAGE_SZALL_4U) == _PAGE_SZHUGE_4U)) {
310 tsb_index = MM_TSB_HUGE;
311 tsb_hash_shift = HPAGE_SHIFT;
312 }
313 }
314#endif
315
316 tsb = mm->context.tsb_block[tsb_index].tsb;
317 tsb += ((address >> tsb_hash_shift) &
318 (mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
74ae9987
DM
319 tag = (address >> 22UL);
320 tsb_insert(tsb, tag, pte_val(pte));
7a1ac526
DM
321
322 spin_unlock_irqrestore(&mm->context.lock, flags);
1da177e4
LT
323}
324
325void flush_dcache_page(struct page *page)
326{
a9546f59
DM
327 struct address_space *mapping;
328 int this_cpu;
1da177e4 329
7a591cfe
DM
330 if (tlb_type == hypervisor)
331 return;
332
a9546f59
DM
333 /* Do not bother with the expensive D-cache flush if it
334 * is merely the zero page. The 'bigcore' testcase in GDB
335 * causes this case to run millions of times.
336 */
337 if (page == ZERO_PAGE(0))
338 return;
339
340 this_cpu = get_cpu();
341
342 mapping = page_mapping(page);
1da177e4 343 if (mapping && !mapping_mapped(mapping)) {
a9546f59 344 int dirty = test_bit(PG_dcache_dirty, &page->flags);
1da177e4 345 if (dirty) {
a9546f59
DM
346 int dirty_cpu = dcache_dirty_cpu(page);
347
1da177e4
LT
348 if (dirty_cpu == this_cpu)
349 goto out;
350 smp_flush_dcache_page_impl(page, dirty_cpu);
351 }
352 set_dcache_dirty(page, this_cpu);
353 } else {
354 /* We could delay the flush for the !page_mapping
355 * case too. But that case is for exec env/arg
356 * pages and those are %99 certainly going to get
357 * faulted into the tlb (and thus flushed) anyways.
358 */
359 flush_dcache_page_impl(page);
360 }
361
362out:
363 put_cpu();
364}
365
05e14cb3 366void __kprobes flush_icache_range(unsigned long start, unsigned long end)
1da177e4 367{
a43fe0e7 368 /* Cheetah and Hypervisor platform cpus have coherent I-cache. */
1da177e4
LT
369 if (tlb_type == spitfire) {
370 unsigned long kaddr;
371
a94aa253
DM
372 /* This code only runs on Spitfire cpus so this is
373 * why we can assume _PAGE_PADDR_4U.
374 */
375 for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
376 unsigned long paddr, mask = _PAGE_PADDR_4U;
377
378 if (kaddr >= PAGE_OFFSET)
379 paddr = kaddr & mask;
380 else {
381 pgd_t *pgdp = pgd_offset_k(kaddr);
382 pud_t *pudp = pud_offset(pgdp, kaddr);
383 pmd_t *pmdp = pmd_offset(pudp, kaddr);
384 pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
385
386 paddr = pte_val(*ptep) & mask;
387 }
388 __flush_icache_page(paddr);
389 }
1da177e4
LT
390 }
391}
392
1da177e4
LT
393void show_mem(void)
394{
5be4a963
DM
395 unsigned long total = 0, reserved = 0;
396 unsigned long shared = 0, cached = 0;
397 pg_data_t *pgdat;
398
28256ca2 399 printk(KERN_INFO "Mem-info:\n");
1da177e4 400 show_free_areas();
28256ca2 401 printk(KERN_INFO "Free swap: %6ldkB\n",
1da177e4 402 nr_swap_pages << (PAGE_SHIFT-10));
5be4a963
DM
403 for_each_online_pgdat(pgdat) {
404 unsigned long i, flags;
405
406 pgdat_resize_lock(pgdat, &flags);
407 for (i = 0; i < pgdat->node_spanned_pages; i++) {
408 struct page *page = pgdat_page_nr(pgdat, i);
409 total++;
410 if (PageReserved(page))
411 reserved++;
412 else if (PageSwapCache(page))
413 cached++;
414 else if (page_count(page))
415 shared += page_count(page) - 1;
416 }
417 pgdat_resize_unlock(pgdat, &flags);
418 }
419
420 printk(KERN_INFO "%lu pages of RAM\n", total);
421 printk(KERN_INFO "%lu reserved pages\n", reserved);
422 printk(KERN_INFO "%lu pages shared\n", shared);
423 printk(KERN_INFO "%lu pages swap cached\n", cached);
424
425 printk(KERN_INFO "%lu pages dirty\n",
426 global_page_state(NR_FILE_DIRTY));
427 printk(KERN_INFO "%lu pages writeback\n",
428 global_page_state(NR_WRITEBACK));
429 printk(KERN_INFO "%lu pages mapped\n",
430 global_page_state(NR_FILE_MAPPED));
431 printk(KERN_INFO "%lu pages slab\n",
432 global_page_state(NR_SLAB_RECLAIMABLE) +
433 global_page_state(NR_SLAB_UNRECLAIMABLE));
434 printk(KERN_INFO "%lu pages pagetables\n",
435 global_page_state(NR_PAGETABLE));
1da177e4
LT
436}
437
438void mmu_info(struct seq_file *m)
439{
440 if (tlb_type == cheetah)
441 seq_printf(m, "MMU Type\t: Cheetah\n");
442 else if (tlb_type == cheetah_plus)
443 seq_printf(m, "MMU Type\t: Cheetah+\n");
444 else if (tlb_type == spitfire)
445 seq_printf(m, "MMU Type\t: Spitfire\n");
a43fe0e7
DM
446 else if (tlb_type == hypervisor)
447 seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
1da177e4
LT
448 else
449 seq_printf(m, "MMU Type\t: ???\n");
450
451#ifdef CONFIG_DEBUG_DCFLUSH
452 seq_printf(m, "DCPageFlushes\t: %d\n",
453 atomic_read(&dcpage_flushes));
454#ifdef CONFIG_SMP
455 seq_printf(m, "DCPageFlushesXC\t: %d\n",
456 atomic_read(&dcpage_flushes_xcall));
457#endif /* CONFIG_SMP */
458#endif /* CONFIG_DEBUG_DCFLUSH */
459}
460
a94aa253
DM
461struct linux_prom_translation {
462 unsigned long virt;
463 unsigned long size;
464 unsigned long data;
465};
466
467/* Exported for kernel TLB miss handling in ktlb.S */
468struct linux_prom_translation prom_trans[512] __read_mostly;
469unsigned int prom_trans_ents __read_mostly;
470
1da177e4
LT
471/* Exported for SMP bootup purposes. */
472unsigned long kern_locked_tte_data;
473
c9c10830
DM
474/* The obp translations are saved based on 8k pagesize, since obp can
475 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
74bf4312 476 * HI_OBP_ADDRESS range are handled in ktlb.S.
c9c10830 477 */
5085b4a5
DM
478static inline int in_obp_range(unsigned long vaddr)
479{
480 return (vaddr >= LOW_OBP_ADDRESS &&
481 vaddr < HI_OBP_ADDRESS);
482}
483
c9c10830 484static int cmp_ptrans(const void *a, const void *b)
405599bd 485{
c9c10830 486 const struct linux_prom_translation *x = a, *y = b;
405599bd 487
c9c10830
DM
488 if (x->virt > y->virt)
489 return 1;
490 if (x->virt < y->virt)
491 return -1;
492 return 0;
405599bd
DM
493}
494
c9c10830 495/* Read OBP translations property into 'prom_trans[]'. */
9ad98c5b 496static void __init read_obp_translations(void)
405599bd 497{
c9c10830 498 int n, node, ents, first, last, i;
1da177e4
LT
499
500 node = prom_finddevice("/virtual-memory");
501 n = prom_getproplen(node, "translations");
405599bd 502 if (unlikely(n == 0 || n == -1)) {
b206fc4c 503 prom_printf("prom_mappings: Couldn't get size.\n");
1da177e4
LT
504 prom_halt();
505 }
405599bd
DM
506 if (unlikely(n > sizeof(prom_trans))) {
507 prom_printf("prom_mappings: Size %Zd is too big.\n", n);
1da177e4
LT
508 prom_halt();
509 }
405599bd 510
b206fc4c 511 if ((n = prom_getproperty(node, "translations",
405599bd
DM
512 (char *)&prom_trans[0],
513 sizeof(prom_trans))) == -1) {
b206fc4c 514 prom_printf("prom_mappings: Couldn't get property.\n");
1da177e4
LT
515 prom_halt();
516 }
9ad98c5b 517
b206fc4c 518 n = n / sizeof(struct linux_prom_translation);
9ad98c5b 519
c9c10830
DM
520 ents = n;
521
522 sort(prom_trans, ents, sizeof(struct linux_prom_translation),
523 cmp_ptrans, NULL);
524
525 /* Now kick out all the non-OBP entries. */
526 for (i = 0; i < ents; i++) {
527 if (in_obp_range(prom_trans[i].virt))
528 break;
529 }
530 first = i;
531 for (; i < ents; i++) {
532 if (!in_obp_range(prom_trans[i].virt))
533 break;
534 }
535 last = i;
536
537 for (i = 0; i < (last - first); i++) {
538 struct linux_prom_translation *src = &prom_trans[i + first];
539 struct linux_prom_translation *dest = &prom_trans[i];
540
541 *dest = *src;
542 }
543 for (; i < ents; i++) {
544 struct linux_prom_translation *dest = &prom_trans[i];
545 dest->virt = dest->size = dest->data = 0x0UL;
546 }
547
548 prom_trans_ents = last - first;
549
550 if (tlb_type == spitfire) {
551 /* Clear diag TTE bits. */
552 for (i = 0; i < prom_trans_ents; i++)
553 prom_trans[i].data &= ~0x0003fe0000000000UL;
554 }
405599bd 555}
1da177e4 556
d82ace7d
DM
557static void __init hypervisor_tlb_lock(unsigned long vaddr,
558 unsigned long pte,
559 unsigned long mmu)
560{
164c220f
DM
561 register unsigned long func asm("%o5");
562 register unsigned long arg0 asm("%o0");
563 register unsigned long arg1 asm("%o1");
564 register unsigned long arg2 asm("%o2");
565 register unsigned long arg3 asm("%o3");
d82ace7d
DM
566
567 func = HV_FAST_MMU_MAP_PERM_ADDR;
568 arg0 = vaddr;
569 arg1 = 0;
570 arg2 = pte;
571 arg3 = mmu;
572 __asm__ __volatile__("ta 0x80"
573 : "=&r" (func), "=&r" (arg0),
574 "=&r" (arg1), "=&r" (arg2),
575 "=&r" (arg3)
576 : "0" (func), "1" (arg0), "2" (arg1),
577 "3" (arg2), "4" (arg3));
12e126ad
DM
578 if (arg0 != 0) {
579 prom_printf("hypervisor_tlb_lock[%lx:%lx:%lx:%lx]: "
580 "errors with %lx\n", vaddr, 0, pte, mmu, arg0);
581 prom_halt();
582 }
d82ace7d
DM
583}
584
c4bce90e
DM
585static unsigned long kern_large_tte(unsigned long paddr);
586
898cf0ec 587static void __init remap_kernel(void)
405599bd
DM
588{
589 unsigned long phys_page, tte_vaddr, tte_data;
405599bd
DM
590 int tlb_ent = sparc64_highest_locked_tlbent();
591
1da177e4 592 tte_vaddr = (unsigned long) KERNBASE;
bff06d55 593 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
c4bce90e 594 tte_data = kern_large_tte(phys_page);
1da177e4
LT
595
596 kern_locked_tte_data = tte_data;
597
d82ace7d
DM
598 /* Now lock us into the TLBs via Hypervisor or OBP. */
599 if (tlb_type == hypervisor) {
600 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
601 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
602 if (bigkernel) {
603 tte_vaddr += 0x400000;
604 tte_data += 0x400000;
605 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
606 hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
607 }
608 } else {
609 prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
610 prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
611 if (bigkernel) {
612 tlb_ent -= 1;
613 prom_dtlb_load(tlb_ent,
614 tte_data + 0x400000,
615 tte_vaddr + 0x400000);
616 prom_itlb_load(tlb_ent,
617 tte_data + 0x400000,
618 tte_vaddr + 0x400000);
619 }
620 sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
1da177e4 621 }
0835ae0f
DM
622 if (tlb_type == cheetah_plus) {
623 sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
624 CTX_CHEETAH_PLUS_NUC);
625 sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
626 sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
627 }
405599bd 628}
1da177e4 629
405599bd 630
c9c10830 631static void __init inherit_prom_mappings(void)
9ad98c5b
DM
632{
633 read_obp_translations();
405599bd
DM
634
635 /* Now fixup OBP's idea about where we really are mapped. */
636 prom_printf("Remapping the kernel... ");
637 remap_kernel();
1da177e4 638 prom_printf("done.\n");
1da177e4
LT
639}
640
1da177e4
LT
641void prom_world(int enter)
642{
1da177e4
LT
643 if (!enter)
644 set_fs((mm_segment_t) { get_thread_current_ds() });
645
3487d1d4 646 __asm__ __volatile__("flushw");
1da177e4
LT
647}
648
649#ifdef DCACHE_ALIASING_POSSIBLE
650void __flush_dcache_range(unsigned long start, unsigned long end)
651{
652 unsigned long va;
653
654 if (tlb_type == spitfire) {
655 int n = 0;
656
657 for (va = start; va < end; va += 32) {
658 spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
659 if (++n >= 512)
660 break;
661 }
a43fe0e7 662 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1da177e4
LT
663 start = __pa(start);
664 end = __pa(end);
665 for (va = start; va < end; va += 32)
666 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
667 "membar #Sync"
668 : /* no outputs */
669 : "r" (va),
670 "i" (ASI_DCACHE_INVALIDATE));
671 }
672}
673#endif /* DCACHE_ALIASING_POSSIBLE */
674
85f1e1f6
DM
675/* get_new_mmu_context() uses "cache + 1". */
676DEFINE_SPINLOCK(ctx_alloc_lock);
677unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
678#define MAX_CTX_NR (1UL << CTX_NR_BITS)
679#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
680DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
681
1da177e4
LT
682/* Caller does TLB context flushing on local CPU if necessary.
683 * The caller also ensures that CTX_VALID(mm->context) is false.
684 *
685 * We must be careful about boundary cases so that we never
686 * let the user have CTX 0 (nucleus) or we ever use a CTX
687 * version of zero (and thus NO_CONTEXT would not be caught
688 * by version mis-match tests in mmu_context.h).
a0663a79
DM
689 *
690 * Always invoked with interrupts disabled.
1da177e4
LT
691 */
692void get_new_mmu_context(struct mm_struct *mm)
693{
694 unsigned long ctx, new_ctx;
695 unsigned long orig_pgsz_bits;
a77754b4 696 unsigned long flags;
a0663a79 697 int new_version;
1da177e4 698
a77754b4 699 spin_lock_irqsave(&ctx_alloc_lock, flags);
1da177e4
LT
700 orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
701 ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
702 new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
a0663a79 703 new_version = 0;
1da177e4
LT
704 if (new_ctx >= (1 << CTX_NR_BITS)) {
705 new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
706 if (new_ctx >= ctx) {
707 int i;
708 new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
709 CTX_FIRST_VERSION;
710 if (new_ctx == 1)
711 new_ctx = CTX_FIRST_VERSION;
712
713 /* Don't call memset, for 16 entries that's just
714 * plain silly...
715 */
716 mmu_context_bmap[0] = 3;
717 mmu_context_bmap[1] = 0;
718 mmu_context_bmap[2] = 0;
719 mmu_context_bmap[3] = 0;
720 for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
721 mmu_context_bmap[i + 0] = 0;
722 mmu_context_bmap[i + 1] = 0;
723 mmu_context_bmap[i + 2] = 0;
724 mmu_context_bmap[i + 3] = 0;
725 }
a0663a79 726 new_version = 1;
1da177e4
LT
727 goto out;
728 }
729 }
730 mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
731 new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
732out:
733 tlb_context_cache = new_ctx;
734 mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
a77754b4 735 spin_unlock_irqrestore(&ctx_alloc_lock, flags);
a0663a79
DM
736
737 if (unlikely(new_version))
738 smp_new_mmu_context_version();
1da177e4
LT
739}
740
d1112018
DM
741/* Find a free area for the bootmem map, avoiding the kernel image
742 * and the initial ramdisk.
743 */
744static unsigned long __init choose_bootmap_pfn(unsigned long start_pfn,
745 unsigned long end_pfn)
1da177e4 746{
d1112018
DM
747 unsigned long avoid_start, avoid_end, bootmap_size;
748 int i;
749
39964653
DM
750 bootmap_size = bootmem_bootmap_pages(end_pfn - start_pfn);
751 bootmap_size <<= PAGE_SHIFT;
d1112018
DM
752
753 avoid_start = avoid_end = 0;
754#ifdef CONFIG_BLK_DEV_INITRD
755 avoid_start = initrd_start;
756 avoid_end = PAGE_ALIGN(initrd_end);
757#endif
758
759#ifdef CONFIG_DEBUG_BOOTMEM
760 prom_printf("choose_bootmap_pfn: kern[%lx:%lx] avoid[%lx:%lx]\n",
761 kern_base, PAGE_ALIGN(kern_base + kern_size),
762 avoid_start, avoid_end);
763#endif
764 for (i = 0; i < pavail_ents; i++) {
765 unsigned long start, end;
766
767 start = pavail[i].phys_addr;
768 end = start + pavail[i].reg_size;
769
770 while (start < end) {
771 if (start >= kern_base &&
772 start < PAGE_ALIGN(kern_base + kern_size)) {
773 start = PAGE_ALIGN(kern_base + kern_size);
774 continue;
775 }
776 if (start >= avoid_start && start < avoid_end) {
777 start = avoid_end;
778 continue;
779 }
780
781 if ((end - start) < bootmap_size)
782 break;
783
784 if (start < kern_base &&
785 (start + bootmap_size) > kern_base) {
786 start = PAGE_ALIGN(kern_base + kern_size);
787 continue;
788 }
789
790 if (start < avoid_start &&
791 (start + bootmap_size) > avoid_start) {
792 start = avoid_end;
793 continue;
794 }
795
796 /* OK, it doesn't overlap anything, use it. */
797#ifdef CONFIG_DEBUG_BOOTMEM
798 prom_printf("choose_bootmap_pfn: Using %lx [%lx]\n",
799 start >> PAGE_SHIFT, start);
800#endif
801 return start >> PAGE_SHIFT;
802 }
803 }
804
805 prom_printf("Cannot find free area for bootmap, aborting.\n");
806 prom_halt();
807}
808
6fc5bae7
DM
809static void __init trim_pavail(unsigned long *cur_size_p,
810 unsigned long *end_of_phys_p)
811{
812 unsigned long to_trim = *cur_size_p - cmdline_memory_size;
813 unsigned long avoid_start, avoid_end;
814 int i;
815
816 to_trim = PAGE_ALIGN(to_trim);
817
818 avoid_start = avoid_end = 0;
819#ifdef CONFIG_BLK_DEV_INITRD
820 avoid_start = initrd_start;
821 avoid_end = PAGE_ALIGN(initrd_end);
822#endif
823
824 /* Trim some pavail[] entries in order to satisfy the
825 * requested "mem=xxx" kernel command line specification.
826 *
827 * We must not trim off the kernel image area nor the
828 * initial ramdisk range (if any). Also, we must not trim
829 * any pavail[] entry down to zero in order to preserve
830 * the invariant that all pavail[] entries have a non-zero
831 * size which is assumed by all of the code in here.
832 */
833 for (i = 0; i < pavail_ents; i++) {
834 unsigned long start, end, kern_end;
835 unsigned long trim_low, trim_high, n;
836
837 kern_end = PAGE_ALIGN(kern_base + kern_size);
838
839 trim_low = start = pavail[i].phys_addr;
840 trim_high = end = start + pavail[i].reg_size;
841
842 if (kern_base >= start &&
843 kern_base < end) {
844 trim_low = kern_base;
845 if (kern_end >= end)
846 continue;
847 }
848 if (kern_end >= start &&
849 kern_end < end) {
850 trim_high = kern_end;
851 }
852 if (avoid_start &&
853 avoid_start >= start &&
854 avoid_start < end) {
855 if (trim_low > avoid_start)
856 trim_low = avoid_start;
857 if (avoid_end >= end)
858 continue;
859 }
860 if (avoid_end &&
861 avoid_end >= start &&
862 avoid_end < end) {
863 if (trim_high < avoid_end)
864 trim_high = avoid_end;
865 }
866
867 if (trim_high <= trim_low)
868 continue;
869
870 if (trim_low == start && trim_high == end) {
871 /* Whole chunk is available for trimming.
872 * Trim all except one page, in order to keep
873 * entry non-empty.
874 */
875 n = (end - start) - PAGE_SIZE;
876 if (n > to_trim)
877 n = to_trim;
878
879 if (n) {
880 pavail[i].phys_addr += n;
881 pavail[i].reg_size -= n;
882 to_trim -= n;
883 }
884 } else {
885 n = (trim_low - start);
886 if (n > to_trim)
887 n = to_trim;
888
889 if (n) {
890 pavail[i].phys_addr += n;
891 pavail[i].reg_size -= n;
892 to_trim -= n;
893 }
894 if (to_trim) {
895 n = end - trim_high;
896 if (n > to_trim)
897 n = to_trim;
898 if (n) {
899 pavail[i].reg_size -= n;
900 to_trim -= n;
901 }
902 }
903 }
904
905 if (!to_trim)
906 break;
907 }
908
909 /* Recalculate. */
910 *cur_size_p = 0UL;
911 for (i = 0; i < pavail_ents; i++) {
912 *end_of_phys_p = pavail[i].phys_addr +
913 pavail[i].reg_size;
914 *cur_size_p += pavail[i].reg_size;
915 }
916}
917
f1cfdb55
DM
918/* About pages_avail, this is the value we will use to calculate
919 * the zholes_size[] argument given to free_area_init_node(). The
920 * page allocator uses this to calculate nr_kernel_pages,
921 * nr_all_pages and zone->present_pages. On NUMA it is used
922 * to calculate zone->min_unmapped_pages and zone->min_slab_pages.
923 *
924 * So this number should really be set to what the page allocator
925 * actually ends up with. This means:
926 * 1) It should include bootmem map pages, we'll release those.
927 * 2) It should not include the kernel image, except for the
928 * __init sections which we will also release.
929 * 3) It should include the initrd image, since we'll release
930 * that too.
931 */
d1112018
DM
932static unsigned long __init bootmem_init(unsigned long *pages_avail,
933 unsigned long phys_base)
934{
935 unsigned long bootmap_size, end_pfn;
1da177e4
LT
936 unsigned long end_of_phys_memory = 0UL;
937 unsigned long bootmap_pfn, bytes_avail, size;
938 int i;
939
940#ifdef CONFIG_DEBUG_BOOTMEM
13edad7a 941 prom_printf("bootmem_init: Scan pavail, ");
1da177e4
LT
942#endif
943
944 bytes_avail = 0UL;
13edad7a
DM
945 for (i = 0; i < pavail_ents; i++) {
946 end_of_phys_memory = pavail[i].phys_addr +
947 pavail[i].reg_size;
948 bytes_avail += pavail[i].reg_size;
1da177e4
LT
949 }
950
6fc5bae7
DM
951 /* Determine the location of the initial ramdisk before trying
952 * to honor the "mem=xxx" command line argument. We must know
953 * where the kernel image and the ramdisk image are so that we
954 * do not trim those two areas from the physical memory map.
955 */
1da177e4
LT
956
957#ifdef CONFIG_BLK_DEV_INITRD
958 /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
959 if (sparc_ramdisk_image || sparc_ramdisk_image64) {
960 unsigned long ramdisk_image = sparc_ramdisk_image ?
961 sparc_ramdisk_image : sparc_ramdisk_image64;
715a0ecc 962 ramdisk_image -= KERNBASE;
1da177e4
LT
963 initrd_start = ramdisk_image + phys_base;
964 initrd_end = initrd_start + sparc_ramdisk_size;
965 if (initrd_end > end_of_phys_memory) {
966 printk(KERN_CRIT "initrd extends beyond end of memory "
967 "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
968 initrd_end, end_of_phys_memory);
969 initrd_start = 0;
d1112018 970 initrd_end = 0;
1da177e4
LT
971 }
972 }
973#endif
6fc5bae7
DM
974
975 if (cmdline_memory_size &&
976 bytes_avail > cmdline_memory_size)
977 trim_pavail(&bytes_avail,
978 &end_of_phys_memory);
979
980 *pages_avail = bytes_avail >> PAGE_SHIFT;
981
982 end_pfn = end_of_phys_memory >> PAGE_SHIFT;
983
1da177e4
LT
984 /* Initialize the boot-time allocator. */
985 max_pfn = max_low_pfn = end_pfn;
d1112018
DM
986 min_low_pfn = (phys_base >> PAGE_SHIFT);
987
988 bootmap_pfn = choose_bootmap_pfn(min_low_pfn, end_pfn);
1da177e4
LT
989
990#ifdef CONFIG_DEBUG_BOOTMEM
991 prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
992 min_low_pfn, bootmap_pfn, max_low_pfn);
993#endif
d1112018 994 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn,
17b0e199 995 min_low_pfn, end_pfn);
1da177e4 996
1da177e4
LT
997 /* Now register the available physical memory with the
998 * allocator.
999 */
13edad7a 1000 for (i = 0; i < pavail_ents; i++) {
1da177e4 1001#ifdef CONFIG_DEBUG_BOOTMEM
13edad7a
DM
1002 prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n",
1003 i, pavail[i].phys_addr, pavail[i].reg_size);
1da177e4 1004#endif
13edad7a 1005 free_bootmem(pavail[i].phys_addr, pavail[i].reg_size);
1da177e4
LT
1006 }
1007
1008#ifdef CONFIG_BLK_DEV_INITRD
1009 if (initrd_start) {
1010 size = initrd_end - initrd_start;
1011
e5dd42e4 1012 /* Reserve the initrd image area. */
1da177e4
LT
1013#ifdef CONFIG_DEBUG_BOOTMEM
1014 prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
1015 initrd_start, initrd_end);
1016#endif
1017 reserve_bootmem(initrd_start, size);
1da177e4
LT
1018
1019 initrd_start += PAGE_OFFSET;
1020 initrd_end += PAGE_OFFSET;
1021 }
1022#endif
1023 /* Reserve the kernel text/data/bss. */
1024#ifdef CONFIG_DEBUG_BOOTMEM
1025 prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size);
1026#endif
1027 reserve_bootmem(kern_base, kern_size);
1028 *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT;
1029
f1cfdb55
DM
1030 /* Add back in the initmem pages. */
1031 size = ((unsigned long)(__init_end) & PAGE_MASK) -
1032 PAGE_ALIGN((unsigned long)__init_begin);
1033 *pages_avail += size >> PAGE_SHIFT;
1034
1da177e4
LT
1035 /* Reserve the bootmem map. We do not account for it
1036 * in pages_avail because we will release that memory
1037 * in free_all_bootmem.
1038 */
1039 size = bootmap_size;
1040#ifdef CONFIG_DEBUG_BOOTMEM
1041 prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n",
1042 (bootmap_pfn << PAGE_SHIFT), size);
1043#endif
1044 reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size);
1da177e4 1045
d1112018
DM
1046 for (i = 0; i < pavail_ents; i++) {
1047 unsigned long start_pfn, end_pfn;
1048
1049 start_pfn = pavail[i].phys_addr >> PAGE_SHIFT;
1050 end_pfn = (start_pfn + (pavail[i].reg_size >> PAGE_SHIFT));
1051#ifdef CONFIG_DEBUG_BOOTMEM
1052 prom_printf("memory_present(0, %lx, %lx)\n",
1053 start_pfn, end_pfn);
1054#endif
1055 memory_present(0, start_pfn, end_pfn);
1056 }
1057
1058 sparse_init();
1059
1da177e4
LT
1060 return end_pfn;
1061}
1062
9cc3a1ac
DM
1063static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
1064static int pall_ents __initdata;
1065
56425306
DM
1066#ifdef CONFIG_DEBUG_PAGEALLOC
1067static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
1068{
1069 unsigned long vstart = PAGE_OFFSET + pstart;
1070 unsigned long vend = PAGE_OFFSET + pend;
1071 unsigned long alloc_bytes = 0UL;
1072
1073 if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
13edad7a 1074 prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
56425306
DM
1075 vstart, vend);
1076 prom_halt();
1077 }
1078
1079 while (vstart < vend) {
1080 unsigned long this_end, paddr = __pa(vstart);
1081 pgd_t *pgd = pgd_offset_k(vstart);
1082 pud_t *pud;
1083 pmd_t *pmd;
1084 pte_t *pte;
1085
1086 pud = pud_offset(pgd, vstart);
1087 if (pud_none(*pud)) {
1088 pmd_t *new;
1089
1090 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1091 alloc_bytes += PAGE_SIZE;
1092 pud_populate(&init_mm, pud, new);
1093 }
1094
1095 pmd = pmd_offset(pud, vstart);
1096 if (!pmd_present(*pmd)) {
1097 pte_t *new;
1098
1099 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1100 alloc_bytes += PAGE_SIZE;
1101 pmd_populate_kernel(&init_mm, pmd, new);
1102 }
1103
1104 pte = pte_offset_kernel(pmd, vstart);
1105 this_end = (vstart + PMD_SIZE) & PMD_MASK;
1106 if (this_end > vend)
1107 this_end = vend;
1108
1109 while (vstart < this_end) {
1110 pte_val(*pte) = (paddr | pgprot_val(prot));
1111
1112 vstart += PAGE_SIZE;
1113 paddr += PAGE_SIZE;
1114 pte++;
1115 }
1116 }
1117
1118 return alloc_bytes;
1119}
1120
56425306 1121extern unsigned int kvmap_linear_patch[1];
9cc3a1ac
DM
1122#endif /* CONFIG_DEBUG_PAGEALLOC */
1123
1124static void __init mark_kpte_bitmap(unsigned long start, unsigned long end)
1125{
1126 const unsigned long shift_256MB = 28;
1127 const unsigned long mask_256MB = ((1UL << shift_256MB) - 1UL);
1128 const unsigned long size_256MB = (1UL << shift_256MB);
1129
1130 while (start < end) {
1131 long remains;
1132
f7c00338
DM
1133 remains = end - start;
1134 if (remains < size_256MB)
1135 break;
1136
9cc3a1ac
DM
1137 if (start & mask_256MB) {
1138 start = (start + size_256MB) & ~mask_256MB;
1139 continue;
1140 }
1141
9cc3a1ac
DM
1142 while (remains >= size_256MB) {
1143 unsigned long index = start >> shift_256MB;
1144
1145 __set_bit(index, kpte_linear_bitmap);
1146
1147 start += size_256MB;
1148 remains -= size_256MB;
1149 }
1150 }
1151}
56425306
DM
1152
1153static void __init kernel_physical_mapping_init(void)
1154{
9cc3a1ac
DM
1155 unsigned long i;
1156#ifdef CONFIG_DEBUG_PAGEALLOC
1157 unsigned long mem_alloced = 0UL;
1158#endif
56425306 1159
13edad7a
DM
1160 read_obp_memory("reg", &pall[0], &pall_ents);
1161
1162 for (i = 0; i < pall_ents; i++) {
56425306
DM
1163 unsigned long phys_start, phys_end;
1164
13edad7a
DM
1165 phys_start = pall[i].phys_addr;
1166 phys_end = phys_start + pall[i].reg_size;
9cc3a1ac
DM
1167
1168 mark_kpte_bitmap(phys_start, phys_end);
1169
1170#ifdef CONFIG_DEBUG_PAGEALLOC
56425306
DM
1171 mem_alloced += kernel_map_range(phys_start, phys_end,
1172 PAGE_KERNEL);
9cc3a1ac 1173#endif
56425306
DM
1174 }
1175
9cc3a1ac 1176#ifdef CONFIG_DEBUG_PAGEALLOC
56425306
DM
1177 printk("Allocated %ld bytes for kernel page tables.\n",
1178 mem_alloced);
1179
1180 kvmap_linear_patch[0] = 0x01000000; /* nop */
1181 flushi(&kvmap_linear_patch[0]);
1182
1183 __flush_tlb_all();
9cc3a1ac 1184#endif
56425306
DM
1185}
1186
9cc3a1ac 1187#ifdef CONFIG_DEBUG_PAGEALLOC
56425306
DM
1188void kernel_map_pages(struct page *page, int numpages, int enable)
1189{
1190 unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
1191 unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
1192
1193 kernel_map_range(phys_start, phys_end,
1194 (enable ? PAGE_KERNEL : __pgprot(0)));
1195
74bf4312
DM
1196 flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
1197 PAGE_OFFSET + phys_end);
1198
56425306
DM
1199 /* we should perform an IPI and flush all tlbs,
1200 * but that can deadlock->flush only current cpu.
1201 */
1202 __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
1203 PAGE_OFFSET + phys_end);
1204}
1205#endif
1206
10147570
DM
1207unsigned long __init find_ecache_flush_span(unsigned long size)
1208{
0836a0eb
DM
1209 int i;
1210
13edad7a
DM
1211 for (i = 0; i < pavail_ents; i++) {
1212 if (pavail[i].reg_size >= size)
1213 return pavail[i].phys_addr;
0836a0eb
DM
1214 }
1215
13edad7a 1216 return ~0UL;
0836a0eb
DM
1217}
1218
517af332
DM
1219static void __init tsb_phys_patch(void)
1220{
d257d5da 1221 struct tsb_ldquad_phys_patch_entry *pquad;
517af332
DM
1222 struct tsb_phys_patch_entry *p;
1223
d257d5da
DM
1224 pquad = &__tsb_ldquad_phys_patch;
1225 while (pquad < &__tsb_ldquad_phys_patch_end) {
1226 unsigned long addr = pquad->addr;
1227
1228 if (tlb_type == hypervisor)
1229 *(unsigned int *) addr = pquad->sun4v_insn;
1230 else
1231 *(unsigned int *) addr = pquad->sun4u_insn;
1232 wmb();
1233 __asm__ __volatile__("flush %0"
1234 : /* no outputs */
1235 : "r" (addr));
1236
1237 pquad++;
1238 }
1239
517af332
DM
1240 p = &__tsb_phys_patch;
1241 while (p < &__tsb_phys_patch_end) {
1242 unsigned long addr = p->addr;
1243
1244 *(unsigned int *) addr = p->insn;
1245 wmb();
1246 __asm__ __volatile__("flush %0"
1247 : /* no outputs */
1248 : "r" (addr));
1249
1250 p++;
1251 }
1252}
1253
490384e7 1254/* Don't mark as init, we give this to the Hypervisor. */
d1acb421
DM
1255#ifndef CONFIG_DEBUG_PAGEALLOC
1256#define NUM_KTSB_DESCR 2
1257#else
1258#define NUM_KTSB_DESCR 1
1259#endif
1260static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
490384e7
DM
1261extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
1262
1263static void __init sun4v_ktsb_init(void)
1264{
1265 unsigned long ktsb_pa;
1266
d7744a09 1267 /* First KTSB for PAGE_SIZE mappings. */
490384e7
DM
1268 ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
1269
1270 switch (PAGE_SIZE) {
1271 case 8 * 1024:
1272 default:
1273 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
1274 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
1275 break;
1276
1277 case 64 * 1024:
1278 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
1279 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
1280 break;
1281
1282 case 512 * 1024:
1283 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
1284 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
1285 break;
1286
1287 case 4 * 1024 * 1024:
1288 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1289 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1290 break;
1291 };
1292
3f19a84e 1293 ktsb_descr[0].assoc = 1;
490384e7
DM
1294 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
1295 ktsb_descr[0].ctx_idx = 0;
1296 ktsb_descr[0].tsb_base = ktsb_pa;
1297 ktsb_descr[0].resv = 0;
1298
d1acb421 1299#ifndef CONFIG_DEBUG_PAGEALLOC
d7744a09
DM
1300 /* Second KTSB for 4MB/256MB mappings. */
1301 ktsb_pa = (kern_base +
1302 ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
1303
1304 ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
1305 ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB |
1306 HV_PGSZ_MASK_256MB);
1307 ktsb_descr[1].assoc = 1;
1308 ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
1309 ktsb_descr[1].ctx_idx = 0;
1310 ktsb_descr[1].tsb_base = ktsb_pa;
1311 ktsb_descr[1].resv = 0;
d1acb421 1312#endif
490384e7
DM
1313}
1314
1315void __cpuinit sun4v_ktsb_register(void)
1316{
1317 register unsigned long func asm("%o5");
1318 register unsigned long arg0 asm("%o0");
1319 register unsigned long arg1 asm("%o1");
1320 unsigned long pa;
1321
1322 pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
1323
1324 func = HV_FAST_MMU_TSB_CTX0;
d1acb421 1325 arg0 = NUM_KTSB_DESCR;
490384e7
DM
1326 arg1 = pa;
1327 __asm__ __volatile__("ta %6"
1328 : "=&r" (func), "=&r" (arg0), "=&r" (arg1)
1329 : "0" (func), "1" (arg0), "2" (arg1),
1330 "i" (HV_FAST_TRAP));
1331}
1332
1da177e4
LT
1333/* paging_init() sets up the page tables */
1334
1335extern void cheetah_ecache_flush_init(void);
d257d5da 1336extern void sun4v_patch_tlb_handlers(void);
1da177e4 1337
5cbc3073
DM
1338extern void cpu_probe(void);
1339extern void central_probe(void);
1340
1da177e4 1341static unsigned long last_valid_pfn;
56425306 1342pgd_t swapper_pg_dir[2048];
1da177e4 1343
c4bce90e
DM
1344static void sun4u_pgprot_init(void);
1345static void sun4v_pgprot_init(void);
1346
1da177e4
LT
1347void __init paging_init(void)
1348{
d1112018 1349 unsigned long end_pfn, pages_avail, shift, phys_base;
0836a0eb
DM
1350 unsigned long real_end, i;
1351
22adb358
DM
1352 /* These build time checkes make sure that the dcache_dirty_cpu()
1353 * page->flags usage will work.
1354 *
1355 * When a page gets marked as dcache-dirty, we store the
1356 * cpu number starting at bit 32 in the page->flags. Also,
1357 * functions like clear_dcache_dirty_cpu use the cpu mask
1358 * in 13-bit signed-immediate instruction fields.
1359 */
1360 BUILD_BUG_ON(FLAGS_RESERVED != 32);
1361 BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
1362 ilog2(roundup_pow_of_two(NR_CPUS)) > FLAGS_RESERVED);
1363 BUILD_BUG_ON(NR_CPUS > 4096);
1364
481295f9
DM
1365 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1366 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1367
22d6a1cb
DM
1368 sstate_booting();
1369
d7744a09 1370 /* Invalidate both kernel TSBs. */
8b234274 1371 memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
d1acb421 1372#ifndef CONFIG_DEBUG_PAGEALLOC
d7744a09 1373 memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
d1acb421 1374#endif
8b234274 1375
c4bce90e
DM
1376 if (tlb_type == hypervisor)
1377 sun4v_pgprot_init();
1378 else
1379 sun4u_pgprot_init();
1380
d257d5da
DM
1381 if (tlb_type == cheetah_plus ||
1382 tlb_type == hypervisor)
517af332
DM
1383 tsb_phys_patch();
1384
490384e7 1385 if (tlb_type == hypervisor) {
d257d5da 1386 sun4v_patch_tlb_handlers();
490384e7
DM
1387 sun4v_ktsb_init();
1388 }
d257d5da 1389
13edad7a
DM
1390 /* Find available physical memory... */
1391 read_obp_memory("available", &pavail[0], &pavail_ents);
0836a0eb
DM
1392
1393 phys_base = 0xffffffffffffffffUL;
13edad7a
DM
1394 for (i = 0; i < pavail_ents; i++)
1395 phys_base = min(phys_base, pavail[i].phys_addr);
0836a0eb 1396
1da177e4
LT
1397 set_bit(0, mmu_context_bmap);
1398
2bdb3cb2
DM
1399 shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
1400
1da177e4
LT
1401 real_end = (unsigned long)_end;
1402 if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
1403 bigkernel = 1;
2bdb3cb2
DM
1404 if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {
1405 prom_printf("paging_init: Kernel > 8MB, too large.\n");
1406 prom_halt();
1da177e4 1407 }
2bdb3cb2
DM
1408
1409 /* Set kernel pgd to upper alias so physical page computations
1da177e4
LT
1410 * work.
1411 */
1412 init_mm.pgd += ((shift) / (sizeof(pgd_t)));
1413
56425306 1414 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
1da177e4
LT
1415
1416 /* Now can init the kernel/bad page tables. */
1417 pud_set(pud_offset(&swapper_pg_dir[0], 0),
56425306 1418 swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
1da177e4 1419
c9c10830 1420 inherit_prom_mappings();
5085b4a5 1421
a8b900d8
DM
1422 /* Ok, we can use our TLB miss and window trap handlers safely. */
1423 setup_tba();
1da177e4 1424
c9c10830 1425 __flush_tlb_all();
9ad98c5b 1426
490384e7
DM
1427 if (tlb_type == hypervisor)
1428 sun4v_ktsb_register();
1429
2bdb3cb2
DM
1430 /* Setup bootmem... */
1431 pages_avail = 0;
d1112018
DM
1432 last_valid_pfn = end_pfn = bootmem_init(&pages_avail, phys_base);
1433
17b0e199 1434 max_mapnr = last_valid_pfn;
2bdb3cb2 1435
56425306 1436 kernel_physical_mapping_init();
56425306 1437
5cbc3073
DM
1438 real_setup_per_cpu_areas();
1439
372b07bb
DM
1440 prom_build_devicetree();
1441
5cbc3073
DM
1442 if (tlb_type == hypervisor)
1443 sun4v_mdesc_init();
1444
1da177e4
LT
1445 {
1446 unsigned long zones_size[MAX_NR_ZONES];
1447 unsigned long zholes_size[MAX_NR_ZONES];
1da177e4
LT
1448 int znum;
1449
1450 for (znum = 0; znum < MAX_NR_ZONES; znum++)
1451 zones_size[znum] = zholes_size[znum] = 0;
1452
1b51d3a0
DM
1453 zones_size[ZONE_NORMAL] = end_pfn;
1454 zholes_size[ZONE_NORMAL] = end_pfn - pages_avail;
1da177e4
LT
1455
1456 free_area_init_node(0, &contig_page_data, zones_size,
17b0e199
DM
1457 __pa(PAGE_OFFSET) >> PAGE_SHIFT,
1458 zholes_size);
1da177e4
LT
1459 }
1460
5cbc3073
DM
1461 prom_printf("Booting Linux...\n");
1462
1463 central_probe();
1464 cpu_probe();
1da177e4
LT
1465}
1466
1da177e4
LT
1467static void __init taint_real_pages(void)
1468{
1da177e4
LT
1469 int i;
1470
13edad7a 1471 read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
1da177e4 1472
13edad7a 1473 /* Find changes discovered in the physmem available rescan and
1da177e4
LT
1474 * reserve the lost portions in the bootmem maps.
1475 */
13edad7a 1476 for (i = 0; i < pavail_ents; i++) {
1da177e4
LT
1477 unsigned long old_start, old_end;
1478
13edad7a 1479 old_start = pavail[i].phys_addr;
1da177e4 1480 old_end = old_start +
13edad7a 1481 pavail[i].reg_size;
1da177e4
LT
1482 while (old_start < old_end) {
1483 int n;
1484
c2a5a46b 1485 for (n = 0; n < pavail_rescan_ents; n++) {
1da177e4
LT
1486 unsigned long new_start, new_end;
1487
13edad7a
DM
1488 new_start = pavail_rescan[n].phys_addr;
1489 new_end = new_start +
1490 pavail_rescan[n].reg_size;
1da177e4
LT
1491
1492 if (new_start <= old_start &&
1493 new_end >= (old_start + PAGE_SIZE)) {
13edad7a
DM
1494 set_bit(old_start >> 22,
1495 sparc64_valid_addr_bitmap);
1da177e4
LT
1496 goto do_next_page;
1497 }
1498 }
1499 reserve_bootmem(old_start, PAGE_SIZE);
1500
1501 do_next_page:
1502 old_start += PAGE_SIZE;
1503 }
1504 }
1505}
1506
c2a5a46b
DM
1507int __init page_in_phys_avail(unsigned long paddr)
1508{
1509 int i;
1510
1511 paddr &= PAGE_MASK;
1512
1513 for (i = 0; i < pavail_rescan_ents; i++) {
1514 unsigned long start, end;
1515
1516 start = pavail_rescan[i].phys_addr;
1517 end = start + pavail_rescan[i].reg_size;
1518
1519 if (paddr >= start && paddr < end)
1520 return 1;
1521 }
1522 if (paddr >= kern_base && paddr < (kern_base + kern_size))
1523 return 1;
1524#ifdef CONFIG_BLK_DEV_INITRD
1525 if (paddr >= __pa(initrd_start) &&
1526 paddr < __pa(PAGE_ALIGN(initrd_end)))
1527 return 1;
1528#endif
1529
1530 return 0;
1531}
1532
1da177e4
LT
1533void __init mem_init(void)
1534{
1535 unsigned long codepages, datapages, initpages;
1536 unsigned long addr, last;
1537 int i;
1538
1539 i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
1540 i += 1;
2bdb3cb2 1541 sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
1da177e4
LT
1542 if (sparc64_valid_addr_bitmap == NULL) {
1543 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1544 prom_halt();
1545 }
1546 memset(sparc64_valid_addr_bitmap, 0, i << 3);
1547
1548 addr = PAGE_OFFSET + kern_base;
1549 last = PAGE_ALIGN(kern_size) + addr;
1550 while (addr < last) {
1551 set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap);
1552 addr += PAGE_SIZE;
1553 }
1554
1555 taint_real_pages();
1556
1da177e4
LT
1557 high_memory = __va(last_valid_pfn << PAGE_SHIFT);
1558
1559#ifdef CONFIG_DEBUG_BOOTMEM
1560 prom_printf("mem_init: Calling free_all_bootmem().\n");
1561#endif
f1cfdb55
DM
1562
1563 /* We subtract one to account for the mem_map_zero page
1564 * allocated below.
1565 */
1da177e4
LT
1566 totalram_pages = num_physpages = free_all_bootmem() - 1;
1567
1568 /*
1569 * Set up the zero page, mark it reserved, so that page count
1570 * is not manipulated when freeing the page from user ptes.
1571 */
1572 mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
1573 if (mem_map_zero == NULL) {
1574 prom_printf("paging_init: Cannot alloc zero page.\n");
1575 prom_halt();
1576 }
1577 SetPageReserved(mem_map_zero);
1578
1579 codepages = (((unsigned long) _etext) - ((unsigned long) _start));
1580 codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
1581 datapages = (((unsigned long) _edata) - ((unsigned long) _etext));
1582 datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT;
1583 initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
1584 initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
1585
96177299 1586 printk("Memory: %luk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1da177e4
LT
1587 nr_free_pages() << (PAGE_SHIFT-10),
1588 codepages << (PAGE_SHIFT-10),
1589 datapages << (PAGE_SHIFT-10),
1590 initpages << (PAGE_SHIFT-10),
1591 PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT));
1592
1593 if (tlb_type == cheetah || tlb_type == cheetah_plus)
1594 cheetah_ecache_flush_init();
1595}
1596
898cf0ec 1597void free_initmem(void)
1da177e4
LT
1598{
1599 unsigned long addr, initend;
1600
1601 /*
1602 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
1603 */
1604 addr = PAGE_ALIGN((unsigned long)(__init_begin));
1605 initend = (unsigned long)(__init_end) & PAGE_MASK;
1606 for (; addr < initend; addr += PAGE_SIZE) {
1607 unsigned long page;
1608 struct page *p;
1609
1610 page = (addr +
1611 ((unsigned long) __va(kern_base)) -
1612 ((unsigned long) KERNBASE));
c9cf5528 1613 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
1da177e4
LT
1614 p = virt_to_page(page);
1615
1616 ClearPageReserved(p);
7835e98b 1617 init_page_count(p);
1da177e4
LT
1618 __free_page(p);
1619 num_physpages++;
1620 totalram_pages++;
1621 }
1622}
1623
1624#ifdef CONFIG_BLK_DEV_INITRD
1625void free_initrd_mem(unsigned long start, unsigned long end)
1626{
1627 if (start < end)
1628 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
1629 for (; start < end; start += PAGE_SIZE) {
1630 struct page *p = virt_to_page(start);
1631
1632 ClearPageReserved(p);
7835e98b 1633 init_page_count(p);
1da177e4
LT
1634 __free_page(p);
1635 num_physpages++;
1636 totalram_pages++;
1637 }
1638}
1639#endif
c4bce90e 1640
c4bce90e
DM
1641#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
1642#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
1643#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
1644#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
1645#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
1646#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
1647
1648pgprot_t PAGE_KERNEL __read_mostly;
1649EXPORT_SYMBOL(PAGE_KERNEL);
1650
1651pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
1652pgprot_t PAGE_COPY __read_mostly;
0f15952a
DM
1653
1654pgprot_t PAGE_SHARED __read_mostly;
1655EXPORT_SYMBOL(PAGE_SHARED);
1656
c4bce90e
DM
1657pgprot_t PAGE_EXEC __read_mostly;
1658unsigned long pg_iobits __read_mostly;
1659
1660unsigned long _PAGE_IE __read_mostly;
987c74fc 1661EXPORT_SYMBOL(_PAGE_IE);
b2bef442 1662
c4bce90e 1663unsigned long _PAGE_E __read_mostly;
b2bef442
DM
1664EXPORT_SYMBOL(_PAGE_E);
1665
c4bce90e 1666unsigned long _PAGE_CACHE __read_mostly;
b2bef442 1667EXPORT_SYMBOL(_PAGE_CACHE);
c4bce90e
DM
1668
1669static void prot_init_common(unsigned long page_none,
1670 unsigned long page_shared,
1671 unsigned long page_copy,
1672 unsigned long page_readonly,
1673 unsigned long page_exec_bit)
1674{
1675 PAGE_COPY = __pgprot(page_copy);
0f15952a 1676 PAGE_SHARED = __pgprot(page_shared);
c4bce90e
DM
1677
1678 protection_map[0x0] = __pgprot(page_none);
1679 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
1680 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
1681 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
1682 protection_map[0x4] = __pgprot(page_readonly);
1683 protection_map[0x5] = __pgprot(page_readonly);
1684 protection_map[0x6] = __pgprot(page_copy);
1685 protection_map[0x7] = __pgprot(page_copy);
1686 protection_map[0x8] = __pgprot(page_none);
1687 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
1688 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
1689 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
1690 protection_map[0xc] = __pgprot(page_readonly);
1691 protection_map[0xd] = __pgprot(page_readonly);
1692 protection_map[0xe] = __pgprot(page_shared);
1693 protection_map[0xf] = __pgprot(page_shared);
1694}
1695
1696static void __init sun4u_pgprot_init(void)
1697{
1698 unsigned long page_none, page_shared, page_copy, page_readonly;
1699 unsigned long page_exec_bit;
1700
1701 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
1702 _PAGE_CACHE_4U | _PAGE_P_4U |
1703 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
1704 _PAGE_EXEC_4U);
1705 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
1706 _PAGE_CACHE_4U | _PAGE_P_4U |
1707 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
1708 _PAGE_EXEC_4U | _PAGE_L_4U);
1709 PAGE_EXEC = __pgprot(_PAGE_EXEC_4U);
1710
1711 _PAGE_IE = _PAGE_IE_4U;
1712 _PAGE_E = _PAGE_E_4U;
1713 _PAGE_CACHE = _PAGE_CACHE_4U;
1714
1715 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
1716 __ACCESS_BITS_4U | _PAGE_E_4U);
1717
d1acb421
DM
1718#ifdef CONFIG_DEBUG_PAGEALLOC
1719 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4U) ^
1720 0xfffff80000000000;
1721#else
9cc3a1ac 1722 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
c4bce90e 1723 0xfffff80000000000;
d1acb421 1724#endif
9cc3a1ac
DM
1725 kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
1726 _PAGE_P_4U | _PAGE_W_4U);
1727
1728 /* XXX Should use 256MB on Panther. XXX */
1729 kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
c4bce90e
DM
1730
1731 _PAGE_SZBITS = _PAGE_SZBITS_4U;
1732 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
1733 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
1734 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
1735
1736
1737 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
1738 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1739 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
1740 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1741 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
1742 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1743 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
1744
1745 page_exec_bit = _PAGE_EXEC_4U;
1746
1747 prot_init_common(page_none, page_shared, page_copy, page_readonly,
1748 page_exec_bit);
1749}
1750
1751static void __init sun4v_pgprot_init(void)
1752{
1753 unsigned long page_none, page_shared, page_copy, page_readonly;
1754 unsigned long page_exec_bit;
1755
1756 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
1757 _PAGE_CACHE_4V | _PAGE_P_4V |
1758 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
1759 _PAGE_EXEC_4V);
1760 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
1761 PAGE_EXEC = __pgprot(_PAGE_EXEC_4V);
1762
1763 _PAGE_IE = _PAGE_IE_4V;
1764 _PAGE_E = _PAGE_E_4V;
1765 _PAGE_CACHE = _PAGE_CACHE_4V;
1766
d1acb421
DM
1767#ifdef CONFIG_DEBUG_PAGEALLOC
1768 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
1769 0xfffff80000000000;
1770#else
9cc3a1ac
DM
1771 kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
1772 0xfffff80000000000;
d1acb421 1773#endif
9cc3a1ac
DM
1774 kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1775 _PAGE_P_4V | _PAGE_W_4V);
1776
d1acb421
DM
1777#ifdef CONFIG_DEBUG_PAGEALLOC
1778 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZBITS_4V) ^
1779 0xfffff80000000000;
1780#else
9cc3a1ac 1781 kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
c4bce90e 1782 0xfffff80000000000;
d1acb421 1783#endif
9cc3a1ac
DM
1784 kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
1785 _PAGE_P_4V | _PAGE_W_4V);
c4bce90e
DM
1786
1787 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
1788 __ACCESS_BITS_4V | _PAGE_E_4V);
1789
1790 _PAGE_SZBITS = _PAGE_SZBITS_4V;
1791 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
1792 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
1793 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
1794 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
1795
1796 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
1797 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1798 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
1799 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1800 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
1801 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1802 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
1803
1804 page_exec_bit = _PAGE_EXEC_4V;
1805
1806 prot_init_common(page_none, page_shared, page_copy, page_readonly,
1807 page_exec_bit);
1808}
1809
1810unsigned long pte_sz_bits(unsigned long sz)
1811{
1812 if (tlb_type == hypervisor) {
1813 switch (sz) {
1814 case 8 * 1024:
1815 default:
1816 return _PAGE_SZ8K_4V;
1817 case 64 * 1024:
1818 return _PAGE_SZ64K_4V;
1819 case 512 * 1024:
1820 return _PAGE_SZ512K_4V;
1821 case 4 * 1024 * 1024:
1822 return _PAGE_SZ4MB_4V;
1823 };
1824 } else {
1825 switch (sz) {
1826 case 8 * 1024:
1827 default:
1828 return _PAGE_SZ8K_4U;
1829 case 64 * 1024:
1830 return _PAGE_SZ64K_4U;
1831 case 512 * 1024:
1832 return _PAGE_SZ512K_4U;
1833 case 4 * 1024 * 1024:
1834 return _PAGE_SZ4MB_4U;
1835 };
1836 }
1837}
1838
1839pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
1840{
1841 pte_t pte;
cf627156
DM
1842
1843 pte_val(pte) = page | pgprot_val(pgprot_noncached(prot));
c4bce90e
DM
1844 pte_val(pte) |= (((unsigned long)space) << 32);
1845 pte_val(pte) |= pte_sz_bits(page_size);
c4bce90e 1846
cf627156 1847 return pte;
c4bce90e
DM
1848}
1849
1850static unsigned long kern_large_tte(unsigned long paddr)
1851{
1852 unsigned long val;
1853
1854 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
1855 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
1856 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
1857 if (tlb_type == hypervisor)
1858 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
1859 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
1860 _PAGE_EXEC_4V | _PAGE_W_4V);
1861
1862 return val | paddr;
1863}
1864
c4bce90e
DM
1865/* If not locked, zap it. */
1866void __flush_tlb_all(void)
1867{
1868 unsigned long pstate;
1869 int i;
1870
1871 __asm__ __volatile__("flushw\n\t"
1872 "rdpr %%pstate, %0\n\t"
1873 "wrpr %0, %1, %%pstate"
1874 : "=r" (pstate)
1875 : "i" (PSTATE_IE));
1876 if (tlb_type == spitfire) {
1877 for (i = 0; i < 64; i++) {
1878 /* Spitfire Errata #32 workaround */
1879 /* NOTE: Always runs on spitfire, so no
1880 * cheetah+ page size encodings.
1881 */
1882 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
1883 "flush %%g6"
1884 : /* No outputs */
1885 : "r" (0),
1886 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
1887
1888 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
1889 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1890 "membar #Sync"
1891 : /* no outputs */
1892 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
1893 spitfire_put_dtlb_data(i, 0x0UL);
1894 }
1895
1896 /* Spitfire Errata #32 workaround */
1897 /* NOTE: Always runs on spitfire, so no
1898 * cheetah+ page size encodings.
1899 */
1900 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
1901 "flush %%g6"
1902 : /* No outputs */
1903 : "r" (0),
1904 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
1905
1906 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
1907 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1908 "membar #Sync"
1909 : /* no outputs */
1910 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
1911 spitfire_put_itlb_data(i, 0x0UL);
1912 }
1913 }
1914 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1915 cheetah_flush_dtlb_all();
1916 cheetah_flush_itlb_all();
1917 }
1918 __asm__ __volatile__("wrpr %0, 0, %%pstate"
1919 : : "r" (pstate));
1920}
88d70794
DM
1921
1922#ifdef CONFIG_MEMORY_HOTPLUG
1923
1924void online_page(struct page *page)
1925{
1926 ClearPageReserved(page);
fcab1e51
NP
1927 init_page_count(page);
1928 __free_page(page);
88d70794
DM
1929 totalram_pages++;
1930 num_physpages++;
1931}
1932
1933int remove_memory(u64 start, u64 size)
1934{
1935 return -EINVAL;
1936}
1937
1938#endif /* CONFIG_MEMORY_HOTPLUG */
This page took 0.396574 seconds and 5 git commands to generate.