1 /* $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $
2 * arch/sparc64/mm/init.c
4 * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 #include <linux/config.h>
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/string.h>
12 #include <linux/init.h>
13 #include <linux/bootmem.h>
15 #include <linux/hugetlb.h>
16 #include <linux/slab.h>
17 #include <linux/initrd.h>
18 #include <linux/swap.h>
19 #include <linux/pagemap.h>
21 #include <linux/seq_file.h>
22 #include <linux/kprobes.h>
23 #include <linux/cache.h>
26 #include <asm/system.h>
28 #include <asm/pgalloc.h>
29 #include <asm/pgtable.h>
30 #include <asm/oplib.h>
31 #include <asm/iommu.h>
33 #include <asm/uaccess.h>
34 #include <asm/mmu_context.h>
35 #include <asm/tlbflush.h>
37 #include <asm/starfire.h>
39 #include <asm/spitfire.h>
40 #include <asm/sections.h>
42 extern void device_scan(void);
44 struct sparc_phys_banks sp_banks
[SPARC_PHYS_BANKS
];
46 unsigned long *sparc64_valid_addr_bitmap __read_mostly
;
48 /* Ugly, but necessary... -DaveM */
49 unsigned long phys_base __read_mostly
;
50 unsigned long kern_base __read_mostly
;
51 unsigned long kern_size __read_mostly
;
52 unsigned long pfn_base __read_mostly
;
54 /* get_new_mmu_context() uses "cache + 1". */
55 DEFINE_SPINLOCK(ctx_alloc_lock
);
56 unsigned long tlb_context_cache
= CTX_FIRST_VERSION
- 1;
57 #define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6))
58 unsigned long mmu_context_bmap
[CTX_BMAP_SLOTS
];
60 /* References to special section boundaries */
61 extern char _start
[], _end
[];
63 /* Initial ramdisk setup */
64 extern unsigned long sparc_ramdisk_image64
;
65 extern unsigned int sparc_ramdisk_image
;
66 extern unsigned int sparc_ramdisk_size
;
68 struct page
*mem_map_zero __read_mostly
;
72 /* XXX Tune this... */
73 #define PGT_CACHE_LOW 25
74 #define PGT_CACHE_HIGH 50
76 void check_pgt_cache(void)
79 if (pgtable_cache_size
> PGT_CACHE_HIGH
) {
82 free_pgd_slow(get_pgd_fast());
84 free_pte_slow(pte_alloc_one_fast(NULL
, 0));
86 free_pte_slow(pte_alloc_one_fast(NULL
, 1 << (PAGE_SHIFT
+ 10)));
87 } while (pgtable_cache_size
> PGT_CACHE_LOW
);
92 #ifdef CONFIG_DEBUG_DCFLUSH
93 atomic_t dcpage_flushes
= ATOMIC_INIT(0);
95 atomic_t dcpage_flushes_xcall
= ATOMIC_INIT(0);
99 __inline__
void flush_dcache_page_impl(struct page
*page
)
101 #ifdef CONFIG_DEBUG_DCFLUSH
102 atomic_inc(&dcpage_flushes
);
105 #ifdef DCACHE_ALIASING_POSSIBLE
106 __flush_dcache_page(page_address(page
),
107 ((tlb_type
== spitfire
) &&
108 page_mapping(page
) != NULL
));
110 if (page_mapping(page
) != NULL
&&
111 tlb_type
== spitfire
)
112 __flush_icache_page(__pa(page_address(page
)));
116 #define PG_dcache_dirty PG_arch_1
117 #define PG_dcache_cpu_shift 24
118 #define PG_dcache_cpu_mask (256 - 1)
121 #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
124 #define dcache_dirty_cpu(page) \
125 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
127 static __inline__
void set_dcache_dirty(struct page
*page
, int this_cpu
)
129 unsigned long mask
= this_cpu
;
130 unsigned long non_cpu_bits
;
132 non_cpu_bits
= ~(PG_dcache_cpu_mask
<< PG_dcache_cpu_shift
);
133 mask
= (mask
<< PG_dcache_cpu_shift
) | (1UL << PG_dcache_dirty
);
135 __asm__
__volatile__("1:\n\t"
137 "and %%g7, %1, %%g1\n\t"
138 "or %%g1, %0, %%g1\n\t"
139 "casx [%2], %%g7, %%g1\n\t"
141 "membar #StoreLoad | #StoreStore\n\t"
142 "bne,pn %%xcc, 1b\n\t"
145 : "r" (mask
), "r" (non_cpu_bits
), "r" (&page
->flags
)
149 static __inline__
void clear_dcache_dirty_cpu(struct page
*page
, unsigned long cpu
)
151 unsigned long mask
= (1UL << PG_dcache_dirty
);
153 __asm__
__volatile__("! test_and_clear_dcache_dirty\n"
156 "srlx %%g7, %4, %%g1\n\t"
157 "and %%g1, %3, %%g1\n\t"
159 "bne,pn %%icc, 2f\n\t"
160 " andn %%g7, %1, %%g1\n\t"
161 "casx [%2], %%g7, %%g1\n\t"
163 "membar #StoreLoad | #StoreStore\n\t"
164 "bne,pn %%xcc, 1b\n\t"
168 : "r" (cpu
), "r" (mask
), "r" (&page
->flags
),
169 "i" (PG_dcache_cpu_mask
),
170 "i" (PG_dcache_cpu_shift
)
174 extern void __update_mmu_cache(unsigned long mmu_context_hw
, unsigned long address
, pte_t pte
, int code
);
176 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
, pte_t pte
)
180 unsigned long pg_flags
;
183 if (pfn_valid(pfn
) &&
184 (page
= pfn_to_page(pfn
), page_mapping(page
)) &&
185 ((pg_flags
= page
->flags
) & (1UL << PG_dcache_dirty
))) {
186 int cpu
= ((pg_flags
>> PG_dcache_cpu_shift
) &
188 int this_cpu
= get_cpu();
190 /* This is just to optimize away some function calls
194 flush_dcache_page_impl(page
);
196 smp_flush_dcache_page_impl(page
, cpu
);
198 clear_dcache_dirty_cpu(page
, cpu
);
203 if (get_thread_fault_code())
204 __update_mmu_cache(CTX_NRBITS(vma
->vm_mm
->context
),
205 address
, pte
, get_thread_fault_code());
208 void flush_dcache_page(struct page
*page
)
210 struct address_space
*mapping
;
213 /* Do not bother with the expensive D-cache flush if it
214 * is merely the zero page. The 'bigcore' testcase in GDB
215 * causes this case to run millions of times.
217 if (page
== ZERO_PAGE(0))
220 this_cpu
= get_cpu();
222 mapping
= page_mapping(page
);
223 if (mapping
&& !mapping_mapped(mapping
)) {
224 int dirty
= test_bit(PG_dcache_dirty
, &page
->flags
);
226 int dirty_cpu
= dcache_dirty_cpu(page
);
228 if (dirty_cpu
== this_cpu
)
230 smp_flush_dcache_page_impl(page
, dirty_cpu
);
232 set_dcache_dirty(page
, this_cpu
);
234 /* We could delay the flush for the !page_mapping
235 * case too. But that case is for exec env/arg
236 * pages and those are %99 certainly going to get
237 * faulted into the tlb (and thus flushed) anyways.
239 flush_dcache_page_impl(page
);
246 void __kprobes
flush_icache_range(unsigned long start
, unsigned long end
)
248 /* Cheetah has coherent I-cache. */
249 if (tlb_type
== spitfire
) {
252 for (kaddr
= start
; kaddr
< end
; kaddr
+= PAGE_SIZE
)
253 __flush_icache_page(__get_phys(kaddr
));
257 unsigned long page_to_pfn(struct page
*page
)
259 return (unsigned long) ((page
- mem_map
) + pfn_base
);
262 struct page
*pfn_to_page(unsigned long pfn
)
264 return (mem_map
+ (pfn
- pfn_base
));
269 printk("Mem-info:\n");
271 printk("Free swap: %6ldkB\n",
272 nr_swap_pages
<< (PAGE_SHIFT
-10));
273 printk("%ld pages of RAM\n", num_physpages
);
274 printk("%d free pages\n", nr_free_pages());
275 printk("%d pages in page table cache\n",pgtable_cache_size
);
278 void mmu_info(struct seq_file
*m
)
280 if (tlb_type
== cheetah
)
281 seq_printf(m
, "MMU Type\t: Cheetah\n");
282 else if (tlb_type
== cheetah_plus
)
283 seq_printf(m
, "MMU Type\t: Cheetah+\n");
284 else if (tlb_type
== spitfire
)
285 seq_printf(m
, "MMU Type\t: Spitfire\n");
287 seq_printf(m
, "MMU Type\t: ???\n");
289 #ifdef CONFIG_DEBUG_DCFLUSH
290 seq_printf(m
, "DCPageFlushes\t: %d\n",
291 atomic_read(&dcpage_flushes
));
293 seq_printf(m
, "DCPageFlushesXC\t: %d\n",
294 atomic_read(&dcpage_flushes_xcall
));
295 #endif /* CONFIG_SMP */
296 #endif /* CONFIG_DEBUG_DCFLUSH */
299 struct linux_prom_translation
{
304 static struct linux_prom_translation prom_trans
[512] __initdata
;
306 extern unsigned long prom_boot_page
;
307 extern void prom_remap(unsigned long physpage
, unsigned long virtpage
, int mmu_ihandle
);
308 extern int prom_get_mmu_ihandle(void);
309 extern void register_prom_callbacks(void);
311 /* Exported for SMP bootup purposes. */
312 unsigned long kern_locked_tte_data
;
314 /* Exported for kernel TLB miss handling in ktlb.S */
315 unsigned long prom_pmd_phys __read_mostly
;
316 unsigned int swapper_pgd_zero __read_mostly
;
318 /* Allocate power-of-2 aligned chunks from the end of the
319 * kernel image. Return physical address.
321 static inline unsigned long early_alloc_phys(unsigned long size
)
325 BUILD_BUG_ON(size
& (size
- 1));
327 kern_size
= (kern_size
+ (size
- 1)) & ~(size
- 1);
328 base
= kern_base
+ kern_size
;
334 static inline unsigned long load_phys32(unsigned long pa
)
338 __asm__
__volatile__("lduwa [%1] %2, %0"
340 : "r" (pa
), "i" (ASI_PHYS_USE_EC
));
345 static inline unsigned long load_phys64(unsigned long pa
)
349 __asm__
__volatile__("ldxa [%1] %2, %0"
351 : "r" (pa
), "i" (ASI_PHYS_USE_EC
));
356 static inline void store_phys32(unsigned long pa
, unsigned long val
)
358 __asm__
__volatile__("stwa %0, [%1] %2"
360 : "r" (val
), "r" (pa
), "i" (ASI_PHYS_USE_EC
));
363 static inline void store_phys64(unsigned long pa
, unsigned long val
)
365 __asm__
__volatile__("stxa %0, [%1] %2"
367 : "r" (val
), "r" (pa
), "i" (ASI_PHYS_USE_EC
));
370 #define BASE_PAGE_SIZE 8192
373 * Translate PROM's mapping we capture at boot time into physical address.
374 * The second parameter is only set from prom_callback() invocations.
376 unsigned long prom_virt_to_phys(unsigned long promva
, int *error
)
378 unsigned long pmd_phys
= (prom_pmd_phys
+
379 ((promva
>> 23) & 0x7ff) * sizeof(pmd_t
));
380 unsigned long pte_phys
;
385 pmd_val(pmd_ent
) = load_phys32(pmd_phys
);
386 if (pmd_none(pmd_ent
)) {
392 pte_phys
= (unsigned long)pmd_val(pmd_ent
) << 11UL;
393 pte_phys
+= ((promva
>> 13) & 0x3ff) * sizeof(pte_t
);
394 pte_val(pte_ent
) = load_phys64(pte_phys
);
395 if (!pte_present(pte_ent
)) {
402 return pte_val(pte_ent
);
404 base
= pte_val(pte_ent
) & _PAGE_PADDR
;
405 return (base
+ (promva
& (BASE_PAGE_SIZE
- 1)));
408 /* The obp translations are saved based on 8k pagesize, since obp can
409 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
410 * HI_OBP_ADDRESS range are handled in entry.S and do not use the vpte
411 * scheme (also, see rant in inherit_locked_prom_mappings()).
413 static void build_obp_range(unsigned long start
, unsigned long end
, unsigned long data
)
417 for (vaddr
= start
; vaddr
< end
; vaddr
+= BASE_PAGE_SIZE
) {
418 unsigned long val
, pte_phys
, pmd_phys
;
422 pmd_phys
= (prom_pmd_phys
+
423 (((vaddr
>> 23) & 0x7ff) * sizeof(pmd_t
)));
424 pmd_val(pmd_ent
) = load_phys32(pmd_phys
);
425 if (pmd_none(pmd_ent
)) {
426 pte_phys
= early_alloc_phys(BASE_PAGE_SIZE
);
428 for (i
= 0; i
< BASE_PAGE_SIZE
/ sizeof(pte_t
); i
++)
429 store_phys64(pte_phys
+i
*sizeof(pte_t
),0);
431 pmd_val(pmd_ent
) = pte_phys
>> 11UL;
432 store_phys32(pmd_phys
, pmd_val(pmd_ent
));
435 pte_phys
= (unsigned long)pmd_val(pmd_ent
) << 11UL;
436 pte_phys
+= (((vaddr
>> 13) & 0x3ff) * sizeof(pte_t
));
440 /* Clear diag TTE bits. */
441 if (tlb_type
== spitfire
)
442 val
&= ~0x0003fe0000000000UL
;
444 store_phys64(pte_phys
, val
| _PAGE_MODIFIED
);
446 data
+= BASE_PAGE_SIZE
;
450 static inline int in_obp_range(unsigned long vaddr
)
452 return (vaddr
>= LOW_OBP_ADDRESS
&&
453 vaddr
< HI_OBP_ADDRESS
);
456 #define OBP_PMD_SIZE 2048
457 static void build_obp_pgtable(int prom_trans_ents
)
461 prom_pmd_phys
= early_alloc_phys(OBP_PMD_SIZE
);
462 for (i
= 0; i
< OBP_PMD_SIZE
; i
+= 4)
463 store_phys32(prom_pmd_phys
+ i
, 0);
465 for (i
= 0; i
< prom_trans_ents
; i
++) {
466 unsigned long start
, end
;
468 if (!in_obp_range(prom_trans
[i
].virt
))
471 start
= prom_trans
[i
].virt
;
472 end
= start
+ prom_trans
[i
].size
;
473 if (end
> HI_OBP_ADDRESS
)
474 end
= HI_OBP_ADDRESS
;
476 build_obp_range(start
, end
, prom_trans
[i
].data
);
480 /* Read OBP translations property into 'prom_trans[]'.
481 * Return the number of entries.
483 static int read_obp_translations(void)
487 node
= prom_finddevice("/virtual-memory");
488 n
= prom_getproplen(node
, "translations");
489 if (unlikely(n
== 0 || n
== -1)) {
490 prom_printf("prom_mappings: Couldn't get size.\n");
493 if (unlikely(n
> sizeof(prom_trans
))) {
494 prom_printf("prom_mappings: Size %Zd is too big.\n", n
);
498 if ((n
= prom_getproperty(node
, "translations",
499 (char *)&prom_trans
[0],
500 sizeof(prom_trans
))) == -1) {
501 prom_printf("prom_mappings: Couldn't get property.\n");
504 n
= n
/ sizeof(struct linux_prom_translation
);
508 static void remap_kernel(void)
510 unsigned long phys_page
, tte_vaddr
, tte_data
;
511 int tlb_ent
= sparc64_highest_locked_tlbent();
513 tte_vaddr
= (unsigned long) KERNBASE
;
514 phys_page
= (prom_boot_mapping_phys_low
>> 22UL) << 22UL;
515 tte_data
= (phys_page
| (_PAGE_VALID
| _PAGE_SZ4MB
|
516 _PAGE_CP
| _PAGE_CV
| _PAGE_P
|
519 kern_locked_tte_data
= tte_data
;
521 /* Now lock us into the TLBs via OBP. */
522 prom_dtlb_load(tlb_ent
, tte_data
, tte_vaddr
);
523 prom_itlb_load(tlb_ent
, tte_data
, tte_vaddr
);
525 prom_dtlb_load(tlb_ent
- 1,
527 tte_vaddr
+ 0x400000);
528 prom_itlb_load(tlb_ent
- 1,
530 tte_vaddr
+ 0x400000);
534 static void inherit_prom_mappings(void)
538 n
= read_obp_translations();
539 build_obp_pgtable(n
);
541 /* Now fixup OBP's idea about where we really are mapped. */
542 prom_printf("Remapping the kernel... ");
545 prom_printf("done.\n");
547 register_prom_callbacks();
550 /* The OBP specifications for sun4u mark 0xfffffffc00000000 and
551 * upwards as reserved for use by the firmware (I wonder if this
552 * will be the same on Cheetah...). We use this virtual address
553 * range for the VPTE table mappings of the nucleus so we need
554 * to zap them when we enter the PROM. -DaveM
556 static void __flush_nucleus_vptes(void)
558 unsigned long prom_reserved_base
= 0xfffffffc00000000UL
;
561 /* Only DTLB must be checked for VPTE entries. */
562 if (tlb_type
== spitfire
) {
563 for (i
= 0; i
< 63; i
++) {
566 /* Spitfire Errata #32 workaround */
567 /* NOTE: Always runs on spitfire, so no cheetah+
568 * page size encodings.
570 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
574 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
576 tag
= spitfire_get_dtlb_tag(i
);
577 if (((tag
& ~(PAGE_MASK
)) == 0) &&
578 ((tag
& (PAGE_MASK
)) >= prom_reserved_base
)) {
579 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
582 : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
583 spitfire_put_dtlb_data(i
, 0x0UL
);
586 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
587 for (i
= 0; i
< 512; i
++) {
588 unsigned long tag
= cheetah_get_dtlb_tag(i
, 2);
590 if ((tag
& ~PAGE_MASK
) == 0 &&
591 (tag
& PAGE_MASK
) >= prom_reserved_base
) {
592 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
595 : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
596 cheetah_put_dtlb_data(i
, 0x0UL
, 2);
599 if (tlb_type
!= cheetah_plus
)
602 tag
= cheetah_get_dtlb_tag(i
, 3);
604 if ((tag
& ~PAGE_MASK
) == 0 &&
605 (tag
& PAGE_MASK
) >= prom_reserved_base
) {
606 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
609 : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
610 cheetah_put_dtlb_data(i
, 0x0UL
, 3);
614 /* Implement me :-) */
619 static int prom_ditlb_set
;
620 struct prom_tlb_entry
{
622 unsigned long tlb_tag
;
623 unsigned long tlb_data
;
625 struct prom_tlb_entry prom_itlb
[16], prom_dtlb
[16];
627 void prom_world(int enter
)
629 unsigned long pstate
;
633 set_fs((mm_segment_t
) { get_thread_current_ds() });
638 /* Make sure the following runs atomically. */
639 __asm__
__volatile__("flushw\n\t"
640 "rdpr %%pstate, %0\n\t"
641 "wrpr %0, %1, %%pstate"
646 /* Kick out nucleus VPTEs. */
647 __flush_nucleus_vptes();
649 /* Install PROM world. */
650 for (i
= 0; i
< 16; i
++) {
651 if (prom_dtlb
[i
].tlb_ent
!= -1) {
652 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
654 : : "r" (prom_dtlb
[i
].tlb_tag
), "r" (TLB_TAG_ACCESS
),
656 if (tlb_type
== spitfire
)
657 spitfire_put_dtlb_data(prom_dtlb
[i
].tlb_ent
,
658 prom_dtlb
[i
].tlb_data
);
659 else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
660 cheetah_put_ldtlb_data(prom_dtlb
[i
].tlb_ent
,
661 prom_dtlb
[i
].tlb_data
);
663 if (prom_itlb
[i
].tlb_ent
!= -1) {
664 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
666 : : "r" (prom_itlb
[i
].tlb_tag
),
667 "r" (TLB_TAG_ACCESS
),
669 if (tlb_type
== spitfire
)
670 spitfire_put_itlb_data(prom_itlb
[i
].tlb_ent
,
671 prom_itlb
[i
].tlb_data
);
672 else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
673 cheetah_put_litlb_data(prom_itlb
[i
].tlb_ent
,
674 prom_itlb
[i
].tlb_data
);
678 for (i
= 0; i
< 16; i
++) {
679 if (prom_dtlb
[i
].tlb_ent
!= -1) {
680 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
682 : : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
683 if (tlb_type
== spitfire
)
684 spitfire_put_dtlb_data(prom_dtlb
[i
].tlb_ent
, 0x0UL
);
686 cheetah_put_ldtlb_data(prom_dtlb
[i
].tlb_ent
, 0x0UL
);
688 if (prom_itlb
[i
].tlb_ent
!= -1) {
689 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
691 : : "r" (TLB_TAG_ACCESS
),
693 if (tlb_type
== spitfire
)
694 spitfire_put_itlb_data(prom_itlb
[i
].tlb_ent
, 0x0UL
);
696 cheetah_put_litlb_data(prom_itlb
[i
].tlb_ent
, 0x0UL
);
700 __asm__
__volatile__("wrpr %0, 0, %%pstate"
704 void inherit_locked_prom_mappings(int save_p
)
710 /* Fucking losing PROM has more mappings in the TLB, but
711 * it (conveniently) fails to mention any of these in the
712 * translations property. The only ones that matter are
713 * the locked PROM tlb entries, so we impose the following
714 * irrecovable rule on the PROM, it is allowed 8 locked
715 * entries in the ITLB and 8 in the DTLB.
717 * Supposedly the upper 16GB of the address space is
718 * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED
719 * SOMEWHERE!!!!!!!!!!!!!!!!! Furthermore the entire interface
720 * used between the client program and the firmware on sun5
721 * systems to coordinate mmu mappings is also COMPLETELY
722 * UNDOCUMENTED!!!!!! Thanks S(t)un!
725 for (i
= 0; i
< 16; i
++) {
726 prom_itlb
[i
].tlb_ent
= -1;
727 prom_dtlb
[i
].tlb_ent
= -1;
730 if (tlb_type
== spitfire
) {
731 int high
= SPITFIRE_HIGHEST_LOCKED_TLBENT
- bigkernel
;
732 for (i
= 0; i
< high
; i
++) {
735 /* Spitfire Errata #32 workaround */
736 /* NOTE: Always runs on spitfire, so no cheetah+
737 * page size encodings.
739 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
743 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
745 data
= spitfire_get_dtlb_data(i
);
746 if ((data
& (_PAGE_L
|_PAGE_VALID
)) == (_PAGE_L
|_PAGE_VALID
)) {
749 /* Spitfire Errata #32 workaround */
750 /* NOTE: Always runs on spitfire, so no
751 * cheetah+ page size encodings.
753 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
757 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
759 tag
= spitfire_get_dtlb_tag(i
);
761 prom_dtlb
[dtlb_seen
].tlb_ent
= i
;
762 prom_dtlb
[dtlb_seen
].tlb_tag
= tag
;
763 prom_dtlb
[dtlb_seen
].tlb_data
= data
;
765 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
767 : : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
768 spitfire_put_dtlb_data(i
, 0x0UL
);
776 for (i
= 0; i
< high
; i
++) {
779 /* Spitfire Errata #32 workaround */
780 /* NOTE: Always runs on spitfire, so no
781 * cheetah+ page size encodings.
783 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
787 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
789 data
= spitfire_get_itlb_data(i
);
790 if ((data
& (_PAGE_L
|_PAGE_VALID
)) == (_PAGE_L
|_PAGE_VALID
)) {
793 /* Spitfire Errata #32 workaround */
794 /* NOTE: Always runs on spitfire, so no
795 * cheetah+ page size encodings.
797 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
801 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
803 tag
= spitfire_get_itlb_tag(i
);
805 prom_itlb
[itlb_seen
].tlb_ent
= i
;
806 prom_itlb
[itlb_seen
].tlb_tag
= tag
;
807 prom_itlb
[itlb_seen
].tlb_data
= data
;
809 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
811 : : "r" (TLB_TAG_ACCESS
), "i" (ASI_IMMU
));
812 spitfire_put_itlb_data(i
, 0x0UL
);
819 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
820 int high
= CHEETAH_HIGHEST_LOCKED_TLBENT
- bigkernel
;
822 for (i
= 0; i
< high
; i
++) {
825 data
= cheetah_get_ldtlb_data(i
);
826 if ((data
& (_PAGE_L
|_PAGE_VALID
)) == (_PAGE_L
|_PAGE_VALID
)) {
829 tag
= cheetah_get_ldtlb_tag(i
);
831 prom_dtlb
[dtlb_seen
].tlb_ent
= i
;
832 prom_dtlb
[dtlb_seen
].tlb_tag
= tag
;
833 prom_dtlb
[dtlb_seen
].tlb_data
= data
;
835 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
837 : : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
838 cheetah_put_ldtlb_data(i
, 0x0UL
);
846 for (i
= 0; i
< high
; i
++) {
849 data
= cheetah_get_litlb_data(i
);
850 if ((data
& (_PAGE_L
|_PAGE_VALID
)) == (_PAGE_L
|_PAGE_VALID
)) {
853 tag
= cheetah_get_litlb_tag(i
);
855 prom_itlb
[itlb_seen
].tlb_ent
= i
;
856 prom_itlb
[itlb_seen
].tlb_tag
= tag
;
857 prom_itlb
[itlb_seen
].tlb_data
= data
;
859 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
861 : : "r" (TLB_TAG_ACCESS
), "i" (ASI_IMMU
));
862 cheetah_put_litlb_data(i
, 0x0UL
);
870 /* Implement me :-) */
877 /* Give PROM back his world, done during reboots... */
878 void prom_reload_locked(void)
882 for (i
= 0; i
< 16; i
++) {
883 if (prom_dtlb
[i
].tlb_ent
!= -1) {
884 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
886 : : "r" (prom_dtlb
[i
].tlb_tag
), "r" (TLB_TAG_ACCESS
),
888 if (tlb_type
== spitfire
)
889 spitfire_put_dtlb_data(prom_dtlb
[i
].tlb_ent
,
890 prom_dtlb
[i
].tlb_data
);
891 else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
892 cheetah_put_ldtlb_data(prom_dtlb
[i
].tlb_ent
,
893 prom_dtlb
[i
].tlb_data
);
896 if (prom_itlb
[i
].tlb_ent
!= -1) {
897 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
899 : : "r" (prom_itlb
[i
].tlb_tag
),
900 "r" (TLB_TAG_ACCESS
),
902 if (tlb_type
== spitfire
)
903 spitfire_put_itlb_data(prom_itlb
[i
].tlb_ent
,
904 prom_itlb
[i
].tlb_data
);
906 cheetah_put_litlb_data(prom_itlb
[i
].tlb_ent
,
907 prom_itlb
[i
].tlb_data
);
912 #ifdef DCACHE_ALIASING_POSSIBLE
913 void __flush_dcache_range(unsigned long start
, unsigned long end
)
917 if (tlb_type
== spitfire
) {
920 for (va
= start
; va
< end
; va
+= 32) {
921 spitfire_put_dcache_tag(va
& 0x3fe0, 0x0);
928 for (va
= start
; va
< end
; va
+= 32)
929 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
933 "i" (ASI_DCACHE_INVALIDATE
));
936 #endif /* DCACHE_ALIASING_POSSIBLE */
938 /* If not locked, zap it. */
939 void __flush_tlb_all(void)
941 unsigned long pstate
;
944 __asm__
__volatile__("flushw\n\t"
945 "rdpr %%pstate, %0\n\t"
946 "wrpr %0, %1, %%pstate"
949 if (tlb_type
== spitfire
) {
950 for (i
= 0; i
< 64; i
++) {
951 /* Spitfire Errata #32 workaround */
952 /* NOTE: Always runs on spitfire, so no
953 * cheetah+ page size encodings.
955 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
959 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
961 if (!(spitfire_get_dtlb_data(i
) & _PAGE_L
)) {
962 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
965 : "r" (TLB_TAG_ACCESS
), "i" (ASI_DMMU
));
966 spitfire_put_dtlb_data(i
, 0x0UL
);
969 /* Spitfire Errata #32 workaround */
970 /* NOTE: Always runs on spitfire, so no
971 * cheetah+ page size encodings.
973 __asm__
__volatile__("stxa %0, [%1] %2\n\t"
977 "r" (PRIMARY_CONTEXT
), "i" (ASI_DMMU
));
979 if (!(spitfire_get_itlb_data(i
) & _PAGE_L
)) {
980 __asm__
__volatile__("stxa %%g0, [%0] %1\n\t"
983 : "r" (TLB_TAG_ACCESS
), "i" (ASI_IMMU
));
984 spitfire_put_itlb_data(i
, 0x0UL
);
987 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
988 cheetah_flush_dtlb_all();
989 cheetah_flush_itlb_all();
991 __asm__
__volatile__("wrpr %0, 0, %%pstate"
995 /* Caller does TLB context flushing on local CPU if necessary.
996 * The caller also ensures that CTX_VALID(mm->context) is false.
998 * We must be careful about boundary cases so that we never
999 * let the user have CTX 0 (nucleus) or we ever use a CTX
1000 * version of zero (and thus NO_CONTEXT would not be caught
1001 * by version mis-match tests in mmu_context.h).
1003 void get_new_mmu_context(struct mm_struct
*mm
)
1005 unsigned long ctx
, new_ctx
;
1006 unsigned long orig_pgsz_bits
;
1009 spin_lock(&ctx_alloc_lock
);
1010 orig_pgsz_bits
= (mm
->context
.sparc64_ctx_val
& CTX_PGSZ_MASK
);
1011 ctx
= (tlb_context_cache
+ 1) & CTX_NR_MASK
;
1012 new_ctx
= find_next_zero_bit(mmu_context_bmap
, 1 << CTX_NR_BITS
, ctx
);
1013 if (new_ctx
>= (1 << CTX_NR_BITS
)) {
1014 new_ctx
= find_next_zero_bit(mmu_context_bmap
, ctx
, 1);
1015 if (new_ctx
>= ctx
) {
1017 new_ctx
= (tlb_context_cache
& CTX_VERSION_MASK
) +
1020 new_ctx
= CTX_FIRST_VERSION
;
1022 /* Don't call memset, for 16 entries that's just
1025 mmu_context_bmap
[0] = 3;
1026 mmu_context_bmap
[1] = 0;
1027 mmu_context_bmap
[2] = 0;
1028 mmu_context_bmap
[3] = 0;
1029 for (i
= 4; i
< CTX_BMAP_SLOTS
; i
+= 4) {
1030 mmu_context_bmap
[i
+ 0] = 0;
1031 mmu_context_bmap
[i
+ 1] = 0;
1032 mmu_context_bmap
[i
+ 2] = 0;
1033 mmu_context_bmap
[i
+ 3] = 0;
1038 mmu_context_bmap
[new_ctx
>>6] |= (1UL << (new_ctx
& 63));
1039 new_ctx
|= (tlb_context_cache
& CTX_VERSION_MASK
);
1041 tlb_context_cache
= new_ctx
;
1042 mm
->context
.sparc64_ctx_val
= new_ctx
| orig_pgsz_bits
;
1043 spin_unlock(&ctx_alloc_lock
);
1047 struct pgtable_cache_struct pgt_quicklists
;
1050 /* OK, we have to color these pages. The page tables are accessed
1051 * by non-Dcache enabled mapping in the VPTE area by the dtlb_backend.S
1052 * code, as well as by PAGE_OFFSET range direct-mapped addresses by
1053 * other parts of the kernel. By coloring, we make sure that the tlbmiss
1054 * fast handlers do not get data from old/garbage dcache lines that
1055 * correspond to an old/stale virtual address (user/kernel) that
1056 * previously mapped the pagetable page while accessing vpte range
1057 * addresses. The idea is that if the vpte color and PAGE_OFFSET range
1058 * color is the same, then when the kernel initializes the pagetable
1059 * using the later address range, accesses with the first address
1060 * range will see the newly initialized data rather than the garbage.
1062 #ifdef DCACHE_ALIASING_POSSIBLE
1063 #define DC_ALIAS_SHIFT 1
1065 #define DC_ALIAS_SHIFT 0
1067 pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long address
)
1070 unsigned long color
;
1073 pte_t
*ptep
= pte_alloc_one_fast(mm
, address
);
1079 color
= VPTE_COLOR(address
);
1080 page
= alloc_pages(GFP_KERNEL
|__GFP_REPEAT
, DC_ALIAS_SHIFT
);
1082 unsigned long *to_free
;
1083 unsigned long paddr
;
1086 #ifdef DCACHE_ALIASING_POSSIBLE
1087 set_page_count(page
, 1);
1088 ClearPageCompound(page
);
1090 set_page_count((page
+ 1), 1);
1091 ClearPageCompound(page
+ 1);
1093 paddr
= (unsigned long) page_address(page
);
1094 memset((char *)paddr
, 0, (PAGE_SIZE
<< DC_ALIAS_SHIFT
));
1097 pte
= (pte_t
*) paddr
;
1098 to_free
= (unsigned long *) (paddr
+ PAGE_SIZE
);
1100 pte
= (pte_t
*) (paddr
+ PAGE_SIZE
);
1101 to_free
= (unsigned long *) paddr
;
1104 #ifdef DCACHE_ALIASING_POSSIBLE
1105 /* Now free the other one up, adjust cache size. */
1107 *to_free
= (unsigned long) pte_quicklist
[color
^ 0x1];
1108 pte_quicklist
[color
^ 0x1] = to_free
;
1109 pgtable_cache_size
++;
1118 void sparc_ultra_dump_itlb(void)
1122 if (tlb_type
== spitfire
) {
1123 printk ("Contents of itlb: ");
1124 for (slot
= 0; slot
< 14; slot
++) printk (" ");
1125 printk ("%2x:%016lx,%016lx\n",
1127 spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0));
1128 for (slot
= 1; slot
< 64; slot
+=3) {
1129 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1131 spitfire_get_itlb_tag(slot
), spitfire_get_itlb_data(slot
),
1133 spitfire_get_itlb_tag(slot
+1), spitfire_get_itlb_data(slot
+1),
1135 spitfire_get_itlb_tag(slot
+2), spitfire_get_itlb_data(slot
+2));
1137 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
1138 printk ("Contents of itlb0:\n");
1139 for (slot
= 0; slot
< 16; slot
+=2) {
1140 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1142 cheetah_get_litlb_tag(slot
), cheetah_get_litlb_data(slot
),
1144 cheetah_get_litlb_tag(slot
+1), cheetah_get_litlb_data(slot
+1));
1146 printk ("Contents of itlb2:\n");
1147 for (slot
= 0; slot
< 128; slot
+=2) {
1148 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1150 cheetah_get_itlb_tag(slot
), cheetah_get_itlb_data(slot
),
1152 cheetah_get_itlb_tag(slot
+1), cheetah_get_itlb_data(slot
+1));
1157 void sparc_ultra_dump_dtlb(void)
1161 if (tlb_type
== spitfire
) {
1162 printk ("Contents of dtlb: ");
1163 for (slot
= 0; slot
< 14; slot
++) printk (" ");
1164 printk ("%2x:%016lx,%016lx\n", 0,
1165 spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0));
1166 for (slot
= 1; slot
< 64; slot
+=3) {
1167 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1169 spitfire_get_dtlb_tag(slot
), spitfire_get_dtlb_data(slot
),
1171 spitfire_get_dtlb_tag(slot
+1), spitfire_get_dtlb_data(slot
+1),
1173 spitfire_get_dtlb_tag(slot
+2), spitfire_get_dtlb_data(slot
+2));
1175 } else if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
) {
1176 printk ("Contents of dtlb0:\n");
1177 for (slot
= 0; slot
< 16; slot
+=2) {
1178 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1180 cheetah_get_ldtlb_tag(slot
), cheetah_get_ldtlb_data(slot
),
1182 cheetah_get_ldtlb_tag(slot
+1), cheetah_get_ldtlb_data(slot
+1));
1184 printk ("Contents of dtlb2:\n");
1185 for (slot
= 0; slot
< 512; slot
+=2) {
1186 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1188 cheetah_get_dtlb_tag(slot
, 2), cheetah_get_dtlb_data(slot
, 2),
1190 cheetah_get_dtlb_tag(slot
+1, 2), cheetah_get_dtlb_data(slot
+1, 2));
1192 if (tlb_type
== cheetah_plus
) {
1193 printk ("Contents of dtlb3:\n");
1194 for (slot
= 0; slot
< 512; slot
+=2) {
1195 printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n",
1197 cheetah_get_dtlb_tag(slot
, 3), cheetah_get_dtlb_data(slot
, 3),
1199 cheetah_get_dtlb_tag(slot
+1, 3), cheetah_get_dtlb_data(slot
+1, 3));
1205 extern unsigned long cmdline_memory_size
;
1207 unsigned long __init
bootmem_init(unsigned long *pages_avail
)
1209 unsigned long bootmap_size
, start_pfn
, end_pfn
;
1210 unsigned long end_of_phys_memory
= 0UL;
1211 unsigned long bootmap_pfn
, bytes_avail
, size
;
1214 #ifdef CONFIG_DEBUG_BOOTMEM
1215 prom_printf("bootmem_init: Scan sp_banks, ");
1219 for (i
= 0; sp_banks
[i
].num_bytes
!= 0; i
++) {
1220 end_of_phys_memory
= sp_banks
[i
].base_addr
+
1221 sp_banks
[i
].num_bytes
;
1222 bytes_avail
+= sp_banks
[i
].num_bytes
;
1223 if (cmdline_memory_size
) {
1224 if (bytes_avail
> cmdline_memory_size
) {
1225 unsigned long slack
= bytes_avail
- cmdline_memory_size
;
1227 bytes_avail
-= slack
;
1228 end_of_phys_memory
-= slack
;
1230 sp_banks
[i
].num_bytes
-= slack
;
1231 if (sp_banks
[i
].num_bytes
== 0) {
1232 sp_banks
[i
].base_addr
= 0xdeadbeef;
1234 sp_banks
[i
+1].num_bytes
= 0;
1235 sp_banks
[i
+1].base_addr
= 0xdeadbeef;
1242 *pages_avail
= bytes_avail
>> PAGE_SHIFT
;
1244 /* Start with page aligned address of last symbol in kernel
1245 * image. The kernel is hard mapped below PAGE_OFFSET in a
1246 * 4MB locked TLB translation.
1248 start_pfn
= PAGE_ALIGN(kern_base
+ kern_size
) >> PAGE_SHIFT
;
1250 bootmap_pfn
= start_pfn
;
1252 end_pfn
= end_of_phys_memory
>> PAGE_SHIFT
;
1254 #ifdef CONFIG_BLK_DEV_INITRD
1255 /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
1256 if (sparc_ramdisk_image
|| sparc_ramdisk_image64
) {
1257 unsigned long ramdisk_image
= sparc_ramdisk_image
?
1258 sparc_ramdisk_image
: sparc_ramdisk_image64
;
1259 if (ramdisk_image
>= (unsigned long)_end
- 2 * PAGE_SIZE
)
1260 ramdisk_image
-= KERNBASE
;
1261 initrd_start
= ramdisk_image
+ phys_base
;
1262 initrd_end
= initrd_start
+ sparc_ramdisk_size
;
1263 if (initrd_end
> end_of_phys_memory
) {
1264 printk(KERN_CRIT
"initrd extends beyond end of memory "
1265 "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
1266 initrd_end
, end_of_phys_memory
);
1270 if (initrd_start
>= (start_pfn
<< PAGE_SHIFT
) &&
1271 initrd_start
< (start_pfn
<< PAGE_SHIFT
) + 2 * PAGE_SIZE
)
1272 bootmap_pfn
= PAGE_ALIGN (initrd_end
) >> PAGE_SHIFT
;
1276 /* Initialize the boot-time allocator. */
1277 max_pfn
= max_low_pfn
= end_pfn
;
1278 min_low_pfn
= pfn_base
;
1280 #ifdef CONFIG_DEBUG_BOOTMEM
1281 prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n",
1282 min_low_pfn
, bootmap_pfn
, max_low_pfn
);
1284 bootmap_size
= init_bootmem_node(NODE_DATA(0), bootmap_pfn
, pfn_base
, end_pfn
);
1286 /* Now register the available physical memory with the
1289 for (i
= 0; sp_banks
[i
].num_bytes
!= 0; i
++) {
1290 #ifdef CONFIG_DEBUG_BOOTMEM
1291 prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n",
1292 i
, sp_banks
[i
].base_addr
, sp_banks
[i
].num_bytes
);
1294 free_bootmem(sp_banks
[i
].base_addr
, sp_banks
[i
].num_bytes
);
1297 #ifdef CONFIG_BLK_DEV_INITRD
1299 size
= initrd_end
- initrd_start
;
1301 /* Resert the initrd image area. */
1302 #ifdef CONFIG_DEBUG_BOOTMEM
1303 prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
1304 initrd_start
, initrd_end
);
1306 reserve_bootmem(initrd_start
, size
);
1307 *pages_avail
-= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1309 initrd_start
+= PAGE_OFFSET
;
1310 initrd_end
+= PAGE_OFFSET
;
1313 /* Reserve the kernel text/data/bss. */
1314 #ifdef CONFIG_DEBUG_BOOTMEM
1315 prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base
, kern_size
);
1317 reserve_bootmem(kern_base
, kern_size
);
1318 *pages_avail
-= PAGE_ALIGN(kern_size
) >> PAGE_SHIFT
;
1320 /* Reserve the bootmem map. We do not account for it
1321 * in pages_avail because we will release that memory
1322 * in free_all_bootmem.
1324 size
= bootmap_size
;
1325 #ifdef CONFIG_DEBUG_BOOTMEM
1326 prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n",
1327 (bootmap_pfn
<< PAGE_SHIFT
), size
);
1329 reserve_bootmem((bootmap_pfn
<< PAGE_SHIFT
), size
);
1330 *pages_avail
-= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1335 /* paging_init() sets up the page tables */
1337 extern void cheetah_ecache_flush_init(void);
1339 static unsigned long last_valid_pfn
;
1341 void __init
paging_init(void)
1343 extern pmd_t swapper_pmd_dir
[1024];
1344 unsigned long end_pfn
, pages_avail
, shift
;
1345 unsigned long real_end
;
1347 set_bit(0, mmu_context_bmap
);
1349 shift
= kern_base
+ PAGE_OFFSET
- ((unsigned long)KERNBASE
);
1351 real_end
= (unsigned long)_end
;
1352 if ((real_end
> ((unsigned long)KERNBASE
+ 0x400000)))
1354 if ((real_end
> ((unsigned long)KERNBASE
+ 0x800000))) {
1355 prom_printf("paging_init: Kernel > 8MB, too large.\n");
1359 /* Set kernel pgd to upper alias so physical page computations
1362 init_mm
.pgd
+= ((shift
) / (sizeof(pgd_t
)));
1364 memset(swapper_pmd_dir
, 0, sizeof(swapper_pmd_dir
));
1366 /* Now can init the kernel/bad page tables. */
1367 pud_set(pud_offset(&swapper_pg_dir
[0], 0),
1368 swapper_pmd_dir
+ (shift
/ sizeof(pgd_t
)));
1370 swapper_pgd_zero
= pgd_val(swapper_pg_dir
[0]);
1372 /* Inherit non-locked OBP mappings. */
1373 inherit_prom_mappings();
1375 /* Ok, we can use our TLB miss and window trap handlers safely.
1376 * We need to do a quick peek here to see if we are on StarFire
1377 * or not, so setup_tba can setup the IRQ globals correctly (it
1378 * needs to get the hard smp processor id correctly).
1381 extern void setup_tba(int);
1382 setup_tba(this_is_starfire
);
1385 inherit_locked_prom_mappings(1);
1389 /* Setup bootmem... */
1391 last_valid_pfn
= end_pfn
= bootmem_init(&pages_avail
);
1394 unsigned long zones_size
[MAX_NR_ZONES
];
1395 unsigned long zholes_size
[MAX_NR_ZONES
];
1396 unsigned long npages
;
1399 for (znum
= 0; znum
< MAX_NR_ZONES
; znum
++)
1400 zones_size
[znum
] = zholes_size
[znum
] = 0;
1402 npages
= end_pfn
- pfn_base
;
1403 zones_size
[ZONE_DMA
] = npages
;
1404 zholes_size
[ZONE_DMA
] = npages
- pages_avail
;
1406 free_area_init_node(0, &contig_page_data
, zones_size
,
1407 phys_base
>> PAGE_SHIFT
, zholes_size
);
1413 /* Ok, it seems that the prom can allocate some more memory chunks
1414 * as a side effect of some prom calls we perform during the
1415 * boot sequence. My most likely theory is that it is from the
1416 * prom_set_traptable() call, and OBP is allocating a scratchpad
1417 * for saving client program register state etc.
1419 static void __init
sort_memlist(struct linux_mlist_p1275
*thislist
)
1423 unsigned long tmpaddr
, tmpsize
;
1424 unsigned long lowest
;
1426 for (i
= 0; thislist
[i
].theres_more
!= 0; i
++) {
1427 lowest
= thislist
[i
].start_adr
;
1428 for (mitr
= i
+1; thislist
[mitr
-1].theres_more
!= 0; mitr
++)
1429 if (thislist
[mitr
].start_adr
< lowest
) {
1430 lowest
= thislist
[mitr
].start_adr
;
1433 if (lowest
== thislist
[i
].start_adr
)
1435 tmpaddr
= thislist
[swapi
].start_adr
;
1436 tmpsize
= thislist
[swapi
].num_bytes
;
1437 for (mitr
= swapi
; mitr
> i
; mitr
--) {
1438 thislist
[mitr
].start_adr
= thislist
[mitr
-1].start_adr
;
1439 thislist
[mitr
].num_bytes
= thislist
[mitr
-1].num_bytes
;
1441 thislist
[i
].start_adr
= tmpaddr
;
1442 thislist
[i
].num_bytes
= tmpsize
;
1446 void __init
rescan_sp_banks(void)
1448 struct linux_prom64_registers memlist
[64];
1449 struct linux_mlist_p1275 avail
[64], *mlist
;
1450 unsigned long bytes
, base_paddr
;
1451 int num_regs
, node
= prom_finddevice("/memory");
1454 num_regs
= prom_getproperty(node
, "available",
1455 (char *) memlist
, sizeof(memlist
));
1456 num_regs
= (num_regs
/ sizeof(struct linux_prom64_registers
));
1457 for (i
= 0; i
< num_regs
; i
++) {
1458 avail
[i
].start_adr
= memlist
[i
].phys_addr
;
1459 avail
[i
].num_bytes
= memlist
[i
].reg_size
;
1460 avail
[i
].theres_more
= &avail
[i
+ 1];
1462 avail
[i
- 1].theres_more
= NULL
;
1463 sort_memlist(avail
);
1467 bytes
= mlist
->num_bytes
;
1468 base_paddr
= mlist
->start_adr
;
1470 sp_banks
[0].base_addr
= base_paddr
;
1471 sp_banks
[0].num_bytes
= bytes
;
1473 while (mlist
->theres_more
!= NULL
){
1475 mlist
= mlist
->theres_more
;
1476 bytes
= mlist
->num_bytes
;
1477 if (i
>= SPARC_PHYS_BANKS
-1) {
1478 printk ("The machine has more banks than "
1479 "this kernel can support\n"
1480 "Increase the SPARC_PHYS_BANKS "
1481 "setting (currently %d)\n",
1483 i
= SPARC_PHYS_BANKS
-1;
1487 sp_banks
[i
].base_addr
= mlist
->start_adr
;
1488 sp_banks
[i
].num_bytes
= mlist
->num_bytes
;
1492 sp_banks
[i
].base_addr
= 0xdeadbeefbeefdeadUL
;
1493 sp_banks
[i
].num_bytes
= 0;
1495 for (i
= 0; sp_banks
[i
].num_bytes
!= 0; i
++)
1496 sp_banks
[i
].num_bytes
&= PAGE_MASK
;
1499 static void __init
taint_real_pages(void)
1501 struct sparc_phys_banks saved_sp_banks
[SPARC_PHYS_BANKS
];
1504 for (i
= 0; i
< SPARC_PHYS_BANKS
; i
++) {
1505 saved_sp_banks
[i
].base_addr
=
1506 sp_banks
[i
].base_addr
;
1507 saved_sp_banks
[i
].num_bytes
=
1508 sp_banks
[i
].num_bytes
;
1513 /* Find changes discovered in the sp_bank rescan and
1514 * reserve the lost portions in the bootmem maps.
1516 for (i
= 0; saved_sp_banks
[i
].num_bytes
; i
++) {
1517 unsigned long old_start
, old_end
;
1519 old_start
= saved_sp_banks
[i
].base_addr
;
1520 old_end
= old_start
+
1521 saved_sp_banks
[i
].num_bytes
;
1522 while (old_start
< old_end
) {
1525 for (n
= 0; sp_banks
[n
].num_bytes
; n
++) {
1526 unsigned long new_start
, new_end
;
1528 new_start
= sp_banks
[n
].base_addr
;
1529 new_end
= new_start
+ sp_banks
[n
].num_bytes
;
1531 if (new_start
<= old_start
&&
1532 new_end
>= (old_start
+ PAGE_SIZE
)) {
1533 set_bit (old_start
>> 22,
1534 sparc64_valid_addr_bitmap
);
1538 reserve_bootmem(old_start
, PAGE_SIZE
);
1541 old_start
+= PAGE_SIZE
;
1546 void __init
mem_init(void)
1548 unsigned long codepages
, datapages
, initpages
;
1549 unsigned long addr
, last
;
1552 i
= last_valid_pfn
>> ((22 - PAGE_SHIFT
) + 6);
1554 sparc64_valid_addr_bitmap
= (unsigned long *) alloc_bootmem(i
<< 3);
1555 if (sparc64_valid_addr_bitmap
== NULL
) {
1556 prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
1559 memset(sparc64_valid_addr_bitmap
, 0, i
<< 3);
1561 addr
= PAGE_OFFSET
+ kern_base
;
1562 last
= PAGE_ALIGN(kern_size
) + addr
;
1563 while (addr
< last
) {
1564 set_bit(__pa(addr
) >> 22, sparc64_valid_addr_bitmap
);
1570 max_mapnr
= last_valid_pfn
- pfn_base
;
1571 high_memory
= __va(last_valid_pfn
<< PAGE_SHIFT
);
1573 #ifdef CONFIG_DEBUG_BOOTMEM
1574 prom_printf("mem_init: Calling free_all_bootmem().\n");
1576 totalram_pages
= num_physpages
= free_all_bootmem() - 1;
1579 * Set up the zero page, mark it reserved, so that page count
1580 * is not manipulated when freeing the page from user ptes.
1582 mem_map_zero
= alloc_pages(GFP_KERNEL
|__GFP_ZERO
, 0);
1583 if (mem_map_zero
== NULL
) {
1584 prom_printf("paging_init: Cannot alloc zero page.\n");
1587 SetPageReserved(mem_map_zero
);
1589 codepages
= (((unsigned long) _etext
) - ((unsigned long) _start
));
1590 codepages
= PAGE_ALIGN(codepages
) >> PAGE_SHIFT
;
1591 datapages
= (((unsigned long) _edata
) - ((unsigned long) _etext
));
1592 datapages
= PAGE_ALIGN(datapages
) >> PAGE_SHIFT
;
1593 initpages
= (((unsigned long) __init_end
) - ((unsigned long) __init_begin
));
1594 initpages
= PAGE_ALIGN(initpages
) >> PAGE_SHIFT
;
1596 printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
1597 nr_free_pages() << (PAGE_SHIFT
-10),
1598 codepages
<< (PAGE_SHIFT
-10),
1599 datapages
<< (PAGE_SHIFT
-10),
1600 initpages
<< (PAGE_SHIFT
-10),
1601 PAGE_OFFSET
, (last_valid_pfn
<< PAGE_SHIFT
));
1603 if (tlb_type
== cheetah
|| tlb_type
== cheetah_plus
)
1604 cheetah_ecache_flush_init();
1607 void free_initmem (void)
1609 unsigned long addr
, initend
;
1612 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
1614 addr
= PAGE_ALIGN((unsigned long)(__init_begin
));
1615 initend
= (unsigned long)(__init_end
) & PAGE_MASK
;
1616 for (; addr
< initend
; addr
+= PAGE_SIZE
) {
1621 ((unsigned long) __va(kern_base
)) -
1622 ((unsigned long) KERNBASE
));
1623 memset((void *)addr
, 0xcc, PAGE_SIZE
);
1624 p
= virt_to_page(page
);
1626 ClearPageReserved(p
);
1627 set_page_count(p
, 1);
1634 #ifdef CONFIG_BLK_DEV_INITRD
1635 void free_initrd_mem(unsigned long start
, unsigned long end
)
1638 printk ("Freeing initrd memory: %ldk freed\n", (end
- start
) >> 10);
1639 for (; start
< end
; start
+= PAGE_SIZE
) {
1640 struct page
*p
= virt_to_page(start
);
1642 ClearPageReserved(p
);
1643 set_page_count(p
, 1);