Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* $Id: init.c,v 1.209 2002/02/09 19:49:31 davem Exp $ |
2 | * arch/sparc64/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu) | |
5 | * Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz) | |
6 | */ | |
7 | ||
8 | #include <linux/config.h> | |
9 | #include <linux/kernel.h> | |
10 | #include <linux/sched.h> | |
11 | #include <linux/string.h> | |
12 | #include <linux/init.h> | |
13 | #include <linux/bootmem.h> | |
14 | #include <linux/mm.h> | |
15 | #include <linux/hugetlb.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/initrd.h> | |
18 | #include <linux/swap.h> | |
19 | #include <linux/pagemap.h> | |
20 | #include <linux/fs.h> | |
21 | #include <linux/seq_file.h> | |
05e14cb3 | 22 | #include <linux/kprobes.h> |
1ac4f5eb | 23 | #include <linux/cache.h> |
13edad7a | 24 | #include <linux/sort.h> |
1da177e4 LT |
25 | |
26 | #include <asm/head.h> | |
27 | #include <asm/system.h> | |
28 | #include <asm/page.h> | |
29 | #include <asm/pgalloc.h> | |
30 | #include <asm/pgtable.h> | |
31 | #include <asm/oplib.h> | |
32 | #include <asm/iommu.h> | |
33 | #include <asm/io.h> | |
34 | #include <asm/uaccess.h> | |
35 | #include <asm/mmu_context.h> | |
36 | #include <asm/tlbflush.h> | |
37 | #include <asm/dma.h> | |
38 | #include <asm/starfire.h> | |
39 | #include <asm/tlb.h> | |
40 | #include <asm/spitfire.h> | |
41 | #include <asm/sections.h> | |
42 | ||
43 | extern void device_scan(void); | |
44 | ||
13edad7a DM |
45 | #define MAX_BANKS 32 |
46 | ||
47 | static struct linux_prom64_registers pavail[MAX_BANKS] __initdata; | |
48 | static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata; | |
49 | static int pavail_ents __initdata; | |
50 | static int pavail_rescan_ents __initdata; | |
51 | ||
52 | static int cmp_p64(const void *a, const void *b) | |
53 | { | |
54 | const struct linux_prom64_registers *x = a, *y = b; | |
55 | ||
56 | if (x->phys_addr > y->phys_addr) | |
57 | return 1; | |
58 | if (x->phys_addr < y->phys_addr) | |
59 | return -1; | |
60 | return 0; | |
61 | } | |
62 | ||
63 | static void __init read_obp_memory(const char *property, | |
64 | struct linux_prom64_registers *regs, | |
65 | int *num_ents) | |
66 | { | |
67 | int node = prom_finddevice("/memory"); | |
68 | int prop_size = prom_getproplen(node, property); | |
69 | int ents, ret, i; | |
70 | ||
71 | ents = prop_size / sizeof(struct linux_prom64_registers); | |
72 | if (ents > MAX_BANKS) { | |
73 | prom_printf("The machine has more %s property entries than " | |
74 | "this kernel can support (%d).\n", | |
75 | property, MAX_BANKS); | |
76 | prom_halt(); | |
77 | } | |
78 | ||
79 | ret = prom_getproperty(node, property, (char *) regs, prop_size); | |
80 | if (ret == -1) { | |
81 | prom_printf("Couldn't get %s property from /memory.\n"); | |
82 | prom_halt(); | |
83 | } | |
84 | ||
85 | *num_ents = ents; | |
10147570 | 86 | |
13edad7a DM |
87 | /* Sanitize what we got from the firmware, by page aligning |
88 | * everything. | |
89 | */ | |
90 | for (i = 0; i < ents; i++) { | |
91 | unsigned long base, size; | |
92 | ||
93 | base = regs[i].phys_addr; | |
94 | size = regs[i].reg_size; | |
10147570 | 95 | |
13edad7a DM |
96 | size &= PAGE_MASK; |
97 | if (base & ~PAGE_MASK) { | |
98 | unsigned long new_base = PAGE_ALIGN(base); | |
99 | ||
100 | size -= new_base - base; | |
101 | if ((long) size < 0L) | |
102 | size = 0UL; | |
103 | base = new_base; | |
104 | } | |
105 | regs[i].phys_addr = base; | |
106 | regs[i].reg_size = size; | |
107 | } | |
c9c10830 | 108 | sort(regs, ents, sizeof(struct linux_prom64_registers), |
13edad7a DM |
109 | cmp_p64, NULL); |
110 | } | |
1da177e4 | 111 | |
2bdb3cb2 | 112 | unsigned long *sparc64_valid_addr_bitmap __read_mostly; |
1da177e4 LT |
113 | |
114 | /* Ugly, but necessary... -DaveM */ | |
1ac4f5eb DM |
115 | unsigned long phys_base __read_mostly; |
116 | unsigned long kern_base __read_mostly; | |
117 | unsigned long kern_size __read_mostly; | |
118 | unsigned long pfn_base __read_mostly; | |
1da177e4 | 119 | |
1da177e4 LT |
120 | /* get_new_mmu_context() uses "cache + 1". */ |
121 | DEFINE_SPINLOCK(ctx_alloc_lock); | |
122 | unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; | |
123 | #define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6)) | |
124 | unsigned long mmu_context_bmap[CTX_BMAP_SLOTS]; | |
125 | ||
126 | /* References to special section boundaries */ | |
127 | extern char _start[], _end[]; | |
128 | ||
129 | /* Initial ramdisk setup */ | |
130 | extern unsigned long sparc_ramdisk_image64; | |
131 | extern unsigned int sparc_ramdisk_image; | |
132 | extern unsigned int sparc_ramdisk_size; | |
133 | ||
1ac4f5eb | 134 | struct page *mem_map_zero __read_mostly; |
1da177e4 | 135 | |
0835ae0f DM |
136 | unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly; |
137 | ||
138 | unsigned long sparc64_kern_pri_context __read_mostly; | |
139 | unsigned long sparc64_kern_pri_nuc_bits __read_mostly; | |
140 | unsigned long sparc64_kern_sec_context __read_mostly; | |
141 | ||
1da177e4 LT |
142 | int bigkernel = 0; |
143 | ||
3c936465 | 144 | kmem_cache_t *pgtable_cache __read_mostly; |
1da177e4 | 145 | |
3c936465 DM |
146 | static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags) |
147 | { | |
148 | clear_page(addr); | |
149 | } | |
05e28f9d | 150 | |
3c936465 | 151 | void pgtable_cache_init(void) |
1da177e4 | 152 | { |
3c936465 DM |
153 | pgtable_cache = kmem_cache_create("pgtable_cache", |
154 | PAGE_SIZE, PAGE_SIZE, | |
155 | SLAB_HWCACHE_ALIGN | | |
156 | SLAB_MUST_HWCACHE_ALIGN, | |
157 | zero_ctor, | |
158 | NULL); | |
159 | if (!pgtable_cache) { | |
160 | prom_printf("pgtable_cache_init(): Could not create!\n"); | |
161 | prom_halt(); | |
1da177e4 | 162 | } |
1da177e4 LT |
163 | } |
164 | ||
165 | #ifdef CONFIG_DEBUG_DCFLUSH | |
166 | atomic_t dcpage_flushes = ATOMIC_INIT(0); | |
167 | #ifdef CONFIG_SMP | |
168 | atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0); | |
169 | #endif | |
170 | #endif | |
171 | ||
172 | __inline__ void flush_dcache_page_impl(struct page *page) | |
173 | { | |
174 | #ifdef CONFIG_DEBUG_DCFLUSH | |
175 | atomic_inc(&dcpage_flushes); | |
176 | #endif | |
177 | ||
178 | #ifdef DCACHE_ALIASING_POSSIBLE | |
179 | __flush_dcache_page(page_address(page), | |
180 | ((tlb_type == spitfire) && | |
181 | page_mapping(page) != NULL)); | |
182 | #else | |
183 | if (page_mapping(page) != NULL && | |
184 | tlb_type == spitfire) | |
185 | __flush_icache_page(__pa(page_address(page))); | |
186 | #endif | |
187 | } | |
188 | ||
189 | #define PG_dcache_dirty PG_arch_1 | |
48b0e548 DM |
190 | #define PG_dcache_cpu_shift 24 |
191 | #define PG_dcache_cpu_mask (256 - 1) | |
192 | ||
193 | #if NR_CPUS > 256 | |
194 | #error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus | |
195 | #endif | |
1da177e4 LT |
196 | |
197 | #define dcache_dirty_cpu(page) \ | |
48b0e548 | 198 | (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask) |
1da177e4 LT |
199 | |
200 | static __inline__ void set_dcache_dirty(struct page *page, int this_cpu) | |
201 | { | |
202 | unsigned long mask = this_cpu; | |
48b0e548 DM |
203 | unsigned long non_cpu_bits; |
204 | ||
205 | non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift); | |
206 | mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty); | |
207 | ||
1da177e4 LT |
208 | __asm__ __volatile__("1:\n\t" |
209 | "ldx [%2], %%g7\n\t" | |
210 | "and %%g7, %1, %%g1\n\t" | |
211 | "or %%g1, %0, %%g1\n\t" | |
212 | "casx [%2], %%g7, %%g1\n\t" | |
213 | "cmp %%g7, %%g1\n\t" | |
b445e26c | 214 | "membar #StoreLoad | #StoreStore\n\t" |
1da177e4 | 215 | "bne,pn %%xcc, 1b\n\t" |
b445e26c | 216 | " nop" |
1da177e4 LT |
217 | : /* no outputs */ |
218 | : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags) | |
219 | : "g1", "g7"); | |
220 | } | |
221 | ||
222 | static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu) | |
223 | { | |
224 | unsigned long mask = (1UL << PG_dcache_dirty); | |
225 | ||
226 | __asm__ __volatile__("! test_and_clear_dcache_dirty\n" | |
227 | "1:\n\t" | |
228 | "ldx [%2], %%g7\n\t" | |
48b0e548 | 229 | "srlx %%g7, %4, %%g1\n\t" |
1da177e4 LT |
230 | "and %%g1, %3, %%g1\n\t" |
231 | "cmp %%g1, %0\n\t" | |
232 | "bne,pn %%icc, 2f\n\t" | |
233 | " andn %%g7, %1, %%g1\n\t" | |
234 | "casx [%2], %%g7, %%g1\n\t" | |
235 | "cmp %%g7, %%g1\n\t" | |
b445e26c | 236 | "membar #StoreLoad | #StoreStore\n\t" |
1da177e4 | 237 | "bne,pn %%xcc, 1b\n\t" |
b445e26c | 238 | " nop\n" |
1da177e4 LT |
239 | "2:" |
240 | : /* no outputs */ | |
241 | : "r" (cpu), "r" (mask), "r" (&page->flags), | |
48b0e548 DM |
242 | "i" (PG_dcache_cpu_mask), |
243 | "i" (PG_dcache_cpu_shift) | |
1da177e4 LT |
244 | : "g1", "g7"); |
245 | } | |
246 | ||
1da177e4 LT |
247 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) |
248 | { | |
bd40791e | 249 | struct mm_struct *mm; |
1da177e4 LT |
250 | struct page *page; |
251 | unsigned long pfn; | |
252 | unsigned long pg_flags; | |
bd40791e | 253 | unsigned long mm_rss; |
1da177e4 LT |
254 | |
255 | pfn = pte_pfn(pte); | |
256 | if (pfn_valid(pfn) && | |
257 | (page = pfn_to_page(pfn), page_mapping(page)) && | |
258 | ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { | |
48b0e548 DM |
259 | int cpu = ((pg_flags >> PG_dcache_cpu_shift) & |
260 | PG_dcache_cpu_mask); | |
1da177e4 LT |
261 | int this_cpu = get_cpu(); |
262 | ||
263 | /* This is just to optimize away some function calls | |
264 | * in the SMP case. | |
265 | */ | |
266 | if (cpu == this_cpu) | |
267 | flush_dcache_page_impl(page); | |
268 | else | |
269 | smp_flush_dcache_page_impl(page, cpu); | |
270 | ||
271 | clear_dcache_dirty_cpu(page, cpu); | |
272 | ||
273 | put_cpu(); | |
274 | } | |
bd40791e DM |
275 | |
276 | mm = vma->vm_mm; | |
277 | mm_rss = get_mm_rss(mm); | |
278 | if (mm_rss >= mm->context.tsb_rss_limit) | |
279 | tsb_grow(mm, mm_rss, GFP_ATOMIC); | |
b70c0fa1 DM |
280 | |
281 | if ((pte_val(pte) & _PAGE_ALL_SZ_BITS) == _PAGE_SZBITS) { | |
282 | struct tsb *tsb; | |
283 | unsigned long tag; | |
284 | ||
285 | tsb = &mm->context.tsb[(address >> PAGE_SHIFT) & | |
286 | (mm->context.tsb_nentries - 1UL)]; | |
287 | tag = (address >> 22UL) | CTX_HWBITS(mm->context) << 48UL; | |
288 | tsb_insert(tsb, tag, pte_val(pte)); | |
289 | } | |
1da177e4 LT |
290 | } |
291 | ||
292 | void flush_dcache_page(struct page *page) | |
293 | { | |
a9546f59 DM |
294 | struct address_space *mapping; |
295 | int this_cpu; | |
1da177e4 | 296 | |
a9546f59 DM |
297 | /* Do not bother with the expensive D-cache flush if it |
298 | * is merely the zero page. The 'bigcore' testcase in GDB | |
299 | * causes this case to run millions of times. | |
300 | */ | |
301 | if (page == ZERO_PAGE(0)) | |
302 | return; | |
303 | ||
304 | this_cpu = get_cpu(); | |
305 | ||
306 | mapping = page_mapping(page); | |
1da177e4 | 307 | if (mapping && !mapping_mapped(mapping)) { |
a9546f59 | 308 | int dirty = test_bit(PG_dcache_dirty, &page->flags); |
1da177e4 | 309 | if (dirty) { |
a9546f59 DM |
310 | int dirty_cpu = dcache_dirty_cpu(page); |
311 | ||
1da177e4 LT |
312 | if (dirty_cpu == this_cpu) |
313 | goto out; | |
314 | smp_flush_dcache_page_impl(page, dirty_cpu); | |
315 | } | |
316 | set_dcache_dirty(page, this_cpu); | |
317 | } else { | |
318 | /* We could delay the flush for the !page_mapping | |
319 | * case too. But that case is for exec env/arg | |
320 | * pages and those are %99 certainly going to get | |
321 | * faulted into the tlb (and thus flushed) anyways. | |
322 | */ | |
323 | flush_dcache_page_impl(page); | |
324 | } | |
325 | ||
326 | out: | |
327 | put_cpu(); | |
328 | } | |
329 | ||
05e14cb3 | 330 | void __kprobes flush_icache_range(unsigned long start, unsigned long end) |
1da177e4 LT |
331 | { |
332 | /* Cheetah has coherent I-cache. */ | |
333 | if (tlb_type == spitfire) { | |
334 | unsigned long kaddr; | |
335 | ||
336 | for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) | |
337 | __flush_icache_page(__get_phys(kaddr)); | |
338 | } | |
339 | } | |
340 | ||
341 | unsigned long page_to_pfn(struct page *page) | |
342 | { | |
343 | return (unsigned long) ((page - mem_map) + pfn_base); | |
344 | } | |
345 | ||
346 | struct page *pfn_to_page(unsigned long pfn) | |
347 | { | |
348 | return (mem_map + (pfn - pfn_base)); | |
349 | } | |
350 | ||
351 | void show_mem(void) | |
352 | { | |
353 | printk("Mem-info:\n"); | |
354 | show_free_areas(); | |
355 | printk("Free swap: %6ldkB\n", | |
356 | nr_swap_pages << (PAGE_SHIFT-10)); | |
357 | printk("%ld pages of RAM\n", num_physpages); | |
358 | printk("%d free pages\n", nr_free_pages()); | |
1da177e4 LT |
359 | } |
360 | ||
361 | void mmu_info(struct seq_file *m) | |
362 | { | |
363 | if (tlb_type == cheetah) | |
364 | seq_printf(m, "MMU Type\t: Cheetah\n"); | |
365 | else if (tlb_type == cheetah_plus) | |
366 | seq_printf(m, "MMU Type\t: Cheetah+\n"); | |
367 | else if (tlb_type == spitfire) | |
368 | seq_printf(m, "MMU Type\t: Spitfire\n"); | |
369 | else | |
370 | seq_printf(m, "MMU Type\t: ???\n"); | |
371 | ||
372 | #ifdef CONFIG_DEBUG_DCFLUSH | |
373 | seq_printf(m, "DCPageFlushes\t: %d\n", | |
374 | atomic_read(&dcpage_flushes)); | |
375 | #ifdef CONFIG_SMP | |
376 | seq_printf(m, "DCPageFlushesXC\t: %d\n", | |
377 | atomic_read(&dcpage_flushes_xcall)); | |
378 | #endif /* CONFIG_SMP */ | |
379 | #endif /* CONFIG_DEBUG_DCFLUSH */ | |
380 | } | |
381 | ||
382 | struct linux_prom_translation { | |
383 | unsigned long virt; | |
384 | unsigned long size; | |
385 | unsigned long data; | |
386 | }; | |
c9c10830 DM |
387 | |
388 | /* Exported for kernel TLB miss handling in ktlb.S */ | |
389 | struct linux_prom_translation prom_trans[512] __read_mostly; | |
390 | unsigned int prom_trans_ents __read_mostly; | |
391 | unsigned int swapper_pgd_zero __read_mostly; | |
1da177e4 LT |
392 | |
393 | extern unsigned long prom_boot_page; | |
394 | extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle); | |
395 | extern int prom_get_mmu_ihandle(void); | |
396 | extern void register_prom_callbacks(void); | |
397 | ||
398 | /* Exported for SMP bootup purposes. */ | |
399 | unsigned long kern_locked_tte_data; | |
400 | ||
1da177e4 LT |
401 | /* |
402 | * Translate PROM's mapping we capture at boot time into physical address. | |
403 | * The second parameter is only set from prom_callback() invocations. | |
404 | */ | |
405 | unsigned long prom_virt_to_phys(unsigned long promva, int *error) | |
406 | { | |
c9c10830 | 407 | int i; |
405599bd | 408 | |
c9c10830 DM |
409 | for (i = 0; i < prom_trans_ents; i++) { |
410 | struct linux_prom_translation *p = &prom_trans[i]; | |
405599bd | 411 | |
c9c10830 DM |
412 | if (promva >= p->virt && |
413 | promva < (p->virt + p->size)) { | |
414 | unsigned long base = p->data & _PAGE_PADDR; | |
5085b4a5 | 415 | |
c9c10830 DM |
416 | if (error) |
417 | *error = 0; | |
418 | return base + (promva & (8192 - 1)); | |
405599bd | 419 | } |
405599bd | 420 | } |
c9c10830 DM |
421 | if (error) |
422 | *error = 1; | |
423 | return 0UL; | |
405599bd DM |
424 | } |
425 | ||
c9c10830 DM |
426 | /* The obp translations are saved based on 8k pagesize, since obp can |
427 | * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> | |
74bf4312 | 428 | * HI_OBP_ADDRESS range are handled in ktlb.S. |
c9c10830 | 429 | */ |
5085b4a5 DM |
430 | static inline int in_obp_range(unsigned long vaddr) |
431 | { | |
432 | return (vaddr >= LOW_OBP_ADDRESS && | |
433 | vaddr < HI_OBP_ADDRESS); | |
434 | } | |
435 | ||
c9c10830 | 436 | static int cmp_ptrans(const void *a, const void *b) |
405599bd | 437 | { |
c9c10830 | 438 | const struct linux_prom_translation *x = a, *y = b; |
405599bd | 439 | |
c9c10830 DM |
440 | if (x->virt > y->virt) |
441 | return 1; | |
442 | if (x->virt < y->virt) | |
443 | return -1; | |
444 | return 0; | |
405599bd DM |
445 | } |
446 | ||
c9c10830 | 447 | /* Read OBP translations property into 'prom_trans[]'. */ |
9ad98c5b | 448 | static void __init read_obp_translations(void) |
405599bd | 449 | { |
c9c10830 | 450 | int n, node, ents, first, last, i; |
1da177e4 LT |
451 | |
452 | node = prom_finddevice("/virtual-memory"); | |
453 | n = prom_getproplen(node, "translations"); | |
405599bd | 454 | if (unlikely(n == 0 || n == -1)) { |
b206fc4c | 455 | prom_printf("prom_mappings: Couldn't get size.\n"); |
1da177e4 LT |
456 | prom_halt(); |
457 | } | |
405599bd DM |
458 | if (unlikely(n > sizeof(prom_trans))) { |
459 | prom_printf("prom_mappings: Size %Zd is too big.\n", n); | |
1da177e4 LT |
460 | prom_halt(); |
461 | } | |
405599bd | 462 | |
b206fc4c | 463 | if ((n = prom_getproperty(node, "translations", |
405599bd DM |
464 | (char *)&prom_trans[0], |
465 | sizeof(prom_trans))) == -1) { | |
b206fc4c | 466 | prom_printf("prom_mappings: Couldn't get property.\n"); |
1da177e4 LT |
467 | prom_halt(); |
468 | } | |
9ad98c5b | 469 | |
b206fc4c | 470 | n = n / sizeof(struct linux_prom_translation); |
9ad98c5b | 471 | |
c9c10830 DM |
472 | ents = n; |
473 | ||
474 | sort(prom_trans, ents, sizeof(struct linux_prom_translation), | |
475 | cmp_ptrans, NULL); | |
476 | ||
477 | /* Now kick out all the non-OBP entries. */ | |
478 | for (i = 0; i < ents; i++) { | |
479 | if (in_obp_range(prom_trans[i].virt)) | |
480 | break; | |
481 | } | |
482 | first = i; | |
483 | for (; i < ents; i++) { | |
484 | if (!in_obp_range(prom_trans[i].virt)) | |
485 | break; | |
486 | } | |
487 | last = i; | |
488 | ||
489 | for (i = 0; i < (last - first); i++) { | |
490 | struct linux_prom_translation *src = &prom_trans[i + first]; | |
491 | struct linux_prom_translation *dest = &prom_trans[i]; | |
492 | ||
493 | *dest = *src; | |
494 | } | |
495 | for (; i < ents; i++) { | |
496 | struct linux_prom_translation *dest = &prom_trans[i]; | |
497 | dest->virt = dest->size = dest->data = 0x0UL; | |
498 | } | |
499 | ||
500 | prom_trans_ents = last - first; | |
501 | ||
502 | if (tlb_type == spitfire) { | |
503 | /* Clear diag TTE bits. */ | |
504 | for (i = 0; i < prom_trans_ents; i++) | |
505 | prom_trans[i].data &= ~0x0003fe0000000000UL; | |
506 | } | |
405599bd | 507 | } |
1da177e4 | 508 | |
898cf0ec | 509 | static void __init remap_kernel(void) |
405599bd DM |
510 | { |
511 | unsigned long phys_page, tte_vaddr, tte_data; | |
405599bd DM |
512 | int tlb_ent = sparc64_highest_locked_tlbent(); |
513 | ||
1da177e4 | 514 | tte_vaddr = (unsigned long) KERNBASE; |
bff06d55 DM |
515 | phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; |
516 | tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB | | |
517 | _PAGE_CP | _PAGE_CV | _PAGE_P | | |
518 | _PAGE_L | _PAGE_W)); | |
1da177e4 LT |
519 | |
520 | kern_locked_tte_data = tte_data; | |
521 | ||
bff06d55 | 522 | /* Now lock us into the TLBs via OBP. */ |
405599bd DM |
523 | prom_dtlb_load(tlb_ent, tte_data, tte_vaddr); |
524 | prom_itlb_load(tlb_ent, tte_data, tte_vaddr); | |
1da177e4 | 525 | if (bigkernel) { |
0835ae0f DM |
526 | tlb_ent -= 1; |
527 | prom_dtlb_load(tlb_ent, | |
405599bd DM |
528 | tte_data + 0x400000, |
529 | tte_vaddr + 0x400000); | |
0835ae0f | 530 | prom_itlb_load(tlb_ent, |
405599bd DM |
531 | tte_data + 0x400000, |
532 | tte_vaddr + 0x400000); | |
1da177e4 | 533 | } |
0835ae0f DM |
534 | sparc64_highest_unlocked_tlb_ent = tlb_ent - 1; |
535 | if (tlb_type == cheetah_plus) { | |
536 | sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 | | |
537 | CTX_CHEETAH_PLUS_NUC); | |
538 | sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC; | |
539 | sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0; | |
540 | } | |
405599bd | 541 | } |
1da177e4 | 542 | |
405599bd | 543 | |
c9c10830 | 544 | static void __init inherit_prom_mappings(void) |
9ad98c5b DM |
545 | { |
546 | read_obp_translations(); | |
405599bd DM |
547 | |
548 | /* Now fixup OBP's idea about where we really are mapped. */ | |
549 | prom_printf("Remapping the kernel... "); | |
550 | remap_kernel(); | |
1da177e4 LT |
551 | prom_printf("done.\n"); |
552 | ||
c9c10830 | 553 | prom_printf("Registering callbacks... "); |
1da177e4 | 554 | register_prom_callbacks(); |
c9c10830 | 555 | prom_printf("done.\n"); |
1da177e4 LT |
556 | } |
557 | ||
1da177e4 LT |
558 | static int prom_ditlb_set; |
559 | struct prom_tlb_entry { | |
560 | int tlb_ent; | |
561 | unsigned long tlb_tag; | |
562 | unsigned long tlb_data; | |
563 | }; | |
564 | struct prom_tlb_entry prom_itlb[16], prom_dtlb[16]; | |
565 | ||
566 | void prom_world(int enter) | |
567 | { | |
568 | unsigned long pstate; | |
569 | int i; | |
570 | ||
571 | if (!enter) | |
572 | set_fs((mm_segment_t) { get_thread_current_ds() }); | |
573 | ||
574 | if (!prom_ditlb_set) | |
575 | return; | |
576 | ||
577 | /* Make sure the following runs atomically. */ | |
578 | __asm__ __volatile__("flushw\n\t" | |
579 | "rdpr %%pstate, %0\n\t" | |
580 | "wrpr %0, %1, %%pstate" | |
581 | : "=r" (pstate) | |
582 | : "i" (PSTATE_IE)); | |
583 | ||
584 | if (enter) { | |
1da177e4 LT |
585 | /* Install PROM world. */ |
586 | for (i = 0; i < 16; i++) { | |
587 | if (prom_dtlb[i].tlb_ent != -1) { | |
588 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
589 | "membar #Sync" | |
590 | : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS), | |
591 | "i" (ASI_DMMU)); | |
592 | if (tlb_type == spitfire) | |
593 | spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, | |
594 | prom_dtlb[i].tlb_data); | |
595 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) | |
596 | cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, | |
597 | prom_dtlb[i].tlb_data); | |
598 | } | |
599 | if (prom_itlb[i].tlb_ent != -1) { | |
600 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
601 | "membar #Sync" | |
602 | : : "r" (prom_itlb[i].tlb_tag), | |
603 | "r" (TLB_TAG_ACCESS), | |
604 | "i" (ASI_IMMU)); | |
605 | if (tlb_type == spitfire) | |
606 | spitfire_put_itlb_data(prom_itlb[i].tlb_ent, | |
607 | prom_itlb[i].tlb_data); | |
608 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) | |
609 | cheetah_put_litlb_data(prom_itlb[i].tlb_ent, | |
610 | prom_itlb[i].tlb_data); | |
611 | } | |
612 | } | |
613 | } else { | |
614 | for (i = 0; i < 16; i++) { | |
615 | if (prom_dtlb[i].tlb_ent != -1) { | |
616 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
617 | "membar #Sync" | |
618 | : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | |
619 | if (tlb_type == spitfire) | |
620 | spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, 0x0UL); | |
621 | else | |
622 | cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, 0x0UL); | |
623 | } | |
624 | if (prom_itlb[i].tlb_ent != -1) { | |
625 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
626 | "membar #Sync" | |
627 | : : "r" (TLB_TAG_ACCESS), | |
628 | "i" (ASI_IMMU)); | |
629 | if (tlb_type == spitfire) | |
630 | spitfire_put_itlb_data(prom_itlb[i].tlb_ent, 0x0UL); | |
631 | else | |
632 | cheetah_put_litlb_data(prom_itlb[i].tlb_ent, 0x0UL); | |
633 | } | |
634 | } | |
635 | } | |
636 | __asm__ __volatile__("wrpr %0, 0, %%pstate" | |
637 | : : "r" (pstate)); | |
638 | } | |
639 | ||
640 | void inherit_locked_prom_mappings(int save_p) | |
641 | { | |
642 | int i; | |
643 | int dtlb_seen = 0; | |
644 | int itlb_seen = 0; | |
645 | ||
646 | /* Fucking losing PROM has more mappings in the TLB, but | |
647 | * it (conveniently) fails to mention any of these in the | |
648 | * translations property. The only ones that matter are | |
649 | * the locked PROM tlb entries, so we impose the following | |
650 | * irrecovable rule on the PROM, it is allowed 8 locked | |
651 | * entries in the ITLB and 8 in the DTLB. | |
652 | * | |
653 | * Supposedly the upper 16GB of the address space is | |
654 | * reserved for OBP, BUT I WISH THIS WAS DOCUMENTED | |
655 | * SOMEWHERE!!!!!!!!!!!!!!!!! Furthermore the entire interface | |
656 | * used between the client program and the firmware on sun5 | |
657 | * systems to coordinate mmu mappings is also COMPLETELY | |
658 | * UNDOCUMENTED!!!!!! Thanks S(t)un! | |
659 | */ | |
660 | if (save_p) { | |
661 | for (i = 0; i < 16; i++) { | |
662 | prom_itlb[i].tlb_ent = -1; | |
663 | prom_dtlb[i].tlb_ent = -1; | |
664 | } | |
665 | } | |
666 | if (tlb_type == spitfire) { | |
0835ae0f DM |
667 | int high = sparc64_highest_unlocked_tlb_ent; |
668 | for (i = 0; i <= high; i++) { | |
1da177e4 LT |
669 | unsigned long data; |
670 | ||
671 | /* Spitfire Errata #32 workaround */ | |
672 | /* NOTE: Always runs on spitfire, so no cheetah+ | |
673 | * page size encodings. | |
674 | */ | |
675 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
676 | "flush %%g6" | |
677 | : /* No outputs */ | |
678 | : "r" (0), | |
679 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | |
680 | ||
681 | data = spitfire_get_dtlb_data(i); | |
682 | if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { | |
683 | unsigned long tag; | |
684 | ||
685 | /* Spitfire Errata #32 workaround */ | |
686 | /* NOTE: Always runs on spitfire, so no | |
687 | * cheetah+ page size encodings. | |
688 | */ | |
689 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
690 | "flush %%g6" | |
691 | : /* No outputs */ | |
692 | : "r" (0), | |
693 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | |
694 | ||
695 | tag = spitfire_get_dtlb_tag(i); | |
696 | if (save_p) { | |
697 | prom_dtlb[dtlb_seen].tlb_ent = i; | |
698 | prom_dtlb[dtlb_seen].tlb_tag = tag; | |
699 | prom_dtlb[dtlb_seen].tlb_data = data; | |
700 | } | |
701 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
702 | "membar #Sync" | |
703 | : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | |
704 | spitfire_put_dtlb_data(i, 0x0UL); | |
705 | ||
706 | dtlb_seen++; | |
707 | if (dtlb_seen > 15) | |
708 | break; | |
709 | } | |
710 | } | |
711 | ||
712 | for (i = 0; i < high; i++) { | |
713 | unsigned long data; | |
714 | ||
715 | /* Spitfire Errata #32 workaround */ | |
716 | /* NOTE: Always runs on spitfire, so no | |
717 | * cheetah+ page size encodings. | |
718 | */ | |
719 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
720 | "flush %%g6" | |
721 | : /* No outputs */ | |
722 | : "r" (0), | |
723 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | |
724 | ||
725 | data = spitfire_get_itlb_data(i); | |
726 | if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { | |
727 | unsigned long tag; | |
728 | ||
729 | /* Spitfire Errata #32 workaround */ | |
730 | /* NOTE: Always runs on spitfire, so no | |
731 | * cheetah+ page size encodings. | |
732 | */ | |
733 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
734 | "flush %%g6" | |
735 | : /* No outputs */ | |
736 | : "r" (0), | |
737 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | |
738 | ||
739 | tag = spitfire_get_itlb_tag(i); | |
740 | if (save_p) { | |
741 | prom_itlb[itlb_seen].tlb_ent = i; | |
742 | prom_itlb[itlb_seen].tlb_tag = tag; | |
743 | prom_itlb[itlb_seen].tlb_data = data; | |
744 | } | |
745 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
746 | "membar #Sync" | |
747 | : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); | |
748 | spitfire_put_itlb_data(i, 0x0UL); | |
749 | ||
750 | itlb_seen++; | |
751 | if (itlb_seen > 15) | |
752 | break; | |
753 | } | |
754 | } | |
755 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | |
0835ae0f | 756 | int high = sparc64_highest_unlocked_tlb_ent; |
1da177e4 | 757 | |
0835ae0f | 758 | for (i = 0; i <= high; i++) { |
1da177e4 LT |
759 | unsigned long data; |
760 | ||
761 | data = cheetah_get_ldtlb_data(i); | |
762 | if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { | |
763 | unsigned long tag; | |
764 | ||
765 | tag = cheetah_get_ldtlb_tag(i); | |
766 | if (save_p) { | |
767 | prom_dtlb[dtlb_seen].tlb_ent = i; | |
768 | prom_dtlb[dtlb_seen].tlb_tag = tag; | |
769 | prom_dtlb[dtlb_seen].tlb_data = data; | |
770 | } | |
771 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
772 | "membar #Sync" | |
773 | : : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | |
774 | cheetah_put_ldtlb_data(i, 0x0UL); | |
775 | ||
776 | dtlb_seen++; | |
777 | if (dtlb_seen > 15) | |
778 | break; | |
779 | } | |
780 | } | |
781 | ||
782 | for (i = 0; i < high; i++) { | |
783 | unsigned long data; | |
784 | ||
785 | data = cheetah_get_litlb_data(i); | |
786 | if ((data & (_PAGE_L|_PAGE_VALID)) == (_PAGE_L|_PAGE_VALID)) { | |
787 | unsigned long tag; | |
788 | ||
789 | tag = cheetah_get_litlb_tag(i); | |
790 | if (save_p) { | |
791 | prom_itlb[itlb_seen].tlb_ent = i; | |
792 | prom_itlb[itlb_seen].tlb_tag = tag; | |
793 | prom_itlb[itlb_seen].tlb_data = data; | |
794 | } | |
795 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
796 | "membar #Sync" | |
797 | : : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); | |
798 | cheetah_put_litlb_data(i, 0x0UL); | |
799 | ||
800 | itlb_seen++; | |
801 | if (itlb_seen > 15) | |
802 | break; | |
803 | } | |
804 | } | |
805 | } else { | |
806 | /* Implement me :-) */ | |
807 | BUG(); | |
808 | } | |
809 | if (save_p) | |
810 | prom_ditlb_set = 1; | |
811 | } | |
812 | ||
813 | /* Give PROM back his world, done during reboots... */ | |
814 | void prom_reload_locked(void) | |
815 | { | |
816 | int i; | |
817 | ||
818 | for (i = 0; i < 16; i++) { | |
819 | if (prom_dtlb[i].tlb_ent != -1) { | |
820 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
821 | "membar #Sync" | |
822 | : : "r" (prom_dtlb[i].tlb_tag), "r" (TLB_TAG_ACCESS), | |
823 | "i" (ASI_DMMU)); | |
824 | if (tlb_type == spitfire) | |
825 | spitfire_put_dtlb_data(prom_dtlb[i].tlb_ent, | |
826 | prom_dtlb[i].tlb_data); | |
827 | else if (tlb_type == cheetah || tlb_type == cheetah_plus) | |
828 | cheetah_put_ldtlb_data(prom_dtlb[i].tlb_ent, | |
829 | prom_dtlb[i].tlb_data); | |
830 | } | |
831 | ||
832 | if (prom_itlb[i].tlb_ent != -1) { | |
833 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
834 | "membar #Sync" | |
835 | : : "r" (prom_itlb[i].tlb_tag), | |
836 | "r" (TLB_TAG_ACCESS), | |
837 | "i" (ASI_IMMU)); | |
838 | if (tlb_type == spitfire) | |
839 | spitfire_put_itlb_data(prom_itlb[i].tlb_ent, | |
840 | prom_itlb[i].tlb_data); | |
841 | else | |
842 | cheetah_put_litlb_data(prom_itlb[i].tlb_ent, | |
843 | prom_itlb[i].tlb_data); | |
844 | } | |
845 | } | |
846 | } | |
847 | ||
848 | #ifdef DCACHE_ALIASING_POSSIBLE | |
849 | void __flush_dcache_range(unsigned long start, unsigned long end) | |
850 | { | |
851 | unsigned long va; | |
852 | ||
853 | if (tlb_type == spitfire) { | |
854 | int n = 0; | |
855 | ||
856 | for (va = start; va < end; va += 32) { | |
857 | spitfire_put_dcache_tag(va & 0x3fe0, 0x0); | |
858 | if (++n >= 512) | |
859 | break; | |
860 | } | |
861 | } else { | |
862 | start = __pa(start); | |
863 | end = __pa(end); | |
864 | for (va = start; va < end; va += 32) | |
865 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
866 | "membar #Sync" | |
867 | : /* no outputs */ | |
868 | : "r" (va), | |
869 | "i" (ASI_DCACHE_INVALIDATE)); | |
870 | } | |
871 | } | |
872 | #endif /* DCACHE_ALIASING_POSSIBLE */ | |
873 | ||
874 | /* If not locked, zap it. */ | |
875 | void __flush_tlb_all(void) | |
876 | { | |
877 | unsigned long pstate; | |
878 | int i; | |
879 | ||
880 | __asm__ __volatile__("flushw\n\t" | |
881 | "rdpr %%pstate, %0\n\t" | |
882 | "wrpr %0, %1, %%pstate" | |
883 | : "=r" (pstate) | |
884 | : "i" (PSTATE_IE)); | |
885 | if (tlb_type == spitfire) { | |
886 | for (i = 0; i < 64; i++) { | |
887 | /* Spitfire Errata #32 workaround */ | |
888 | /* NOTE: Always runs on spitfire, so no | |
889 | * cheetah+ page size encodings. | |
890 | */ | |
891 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
892 | "flush %%g6" | |
893 | : /* No outputs */ | |
894 | : "r" (0), | |
895 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | |
896 | ||
897 | if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) { | |
898 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
899 | "membar #Sync" | |
900 | : /* no outputs */ | |
901 | : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU)); | |
902 | spitfire_put_dtlb_data(i, 0x0UL); | |
903 | } | |
904 | ||
905 | /* Spitfire Errata #32 workaround */ | |
906 | /* NOTE: Always runs on spitfire, so no | |
907 | * cheetah+ page size encodings. | |
908 | */ | |
909 | __asm__ __volatile__("stxa %0, [%1] %2\n\t" | |
910 | "flush %%g6" | |
911 | : /* No outputs */ | |
912 | : "r" (0), | |
913 | "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU)); | |
914 | ||
915 | if (!(spitfire_get_itlb_data(i) & _PAGE_L)) { | |
916 | __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" | |
917 | "membar #Sync" | |
918 | : /* no outputs */ | |
919 | : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU)); | |
920 | spitfire_put_itlb_data(i, 0x0UL); | |
921 | } | |
922 | } | |
923 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | |
924 | cheetah_flush_dtlb_all(); | |
925 | cheetah_flush_itlb_all(); | |
926 | } | |
927 | __asm__ __volatile__("wrpr %0, 0, %%pstate" | |
928 | : : "r" (pstate)); | |
929 | } | |
930 | ||
931 | /* Caller does TLB context flushing on local CPU if necessary. | |
932 | * The caller also ensures that CTX_VALID(mm->context) is false. | |
933 | * | |
934 | * We must be careful about boundary cases so that we never | |
935 | * let the user have CTX 0 (nucleus) or we ever use a CTX | |
936 | * version of zero (and thus NO_CONTEXT would not be caught | |
937 | * by version mis-match tests in mmu_context.h). | |
938 | */ | |
939 | void get_new_mmu_context(struct mm_struct *mm) | |
940 | { | |
941 | unsigned long ctx, new_ctx; | |
942 | unsigned long orig_pgsz_bits; | |
943 | ||
944 | ||
945 | spin_lock(&ctx_alloc_lock); | |
946 | orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); | |
947 | ctx = (tlb_context_cache + 1) & CTX_NR_MASK; | |
948 | new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); | |
949 | if (new_ctx >= (1 << CTX_NR_BITS)) { | |
950 | new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); | |
951 | if (new_ctx >= ctx) { | |
952 | int i; | |
953 | new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + | |
954 | CTX_FIRST_VERSION; | |
955 | if (new_ctx == 1) | |
956 | new_ctx = CTX_FIRST_VERSION; | |
957 | ||
958 | /* Don't call memset, for 16 entries that's just | |
959 | * plain silly... | |
960 | */ | |
961 | mmu_context_bmap[0] = 3; | |
962 | mmu_context_bmap[1] = 0; | |
963 | mmu_context_bmap[2] = 0; | |
964 | mmu_context_bmap[3] = 0; | |
965 | for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { | |
966 | mmu_context_bmap[i + 0] = 0; | |
967 | mmu_context_bmap[i + 1] = 0; | |
968 | mmu_context_bmap[i + 2] = 0; | |
969 | mmu_context_bmap[i + 3] = 0; | |
970 | } | |
971 | goto out; | |
972 | } | |
973 | } | |
974 | mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); | |
975 | new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); | |
976 | out: | |
977 | tlb_context_cache = new_ctx; | |
978 | mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; | |
979 | spin_unlock(&ctx_alloc_lock); | |
980 | } | |
981 | ||
1da177e4 LT |
982 | void sparc_ultra_dump_itlb(void) |
983 | { | |
984 | int slot; | |
985 | ||
986 | if (tlb_type == spitfire) { | |
987 | printk ("Contents of itlb: "); | |
988 | for (slot = 0; slot < 14; slot++) printk (" "); | |
989 | printk ("%2x:%016lx,%016lx\n", | |
990 | 0, | |
991 | spitfire_get_itlb_tag(0), spitfire_get_itlb_data(0)); | |
992 | for (slot = 1; slot < 64; slot+=3) { | |
993 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", | |
994 | slot, | |
995 | spitfire_get_itlb_tag(slot), spitfire_get_itlb_data(slot), | |
996 | slot+1, | |
997 | spitfire_get_itlb_tag(slot+1), spitfire_get_itlb_data(slot+1), | |
998 | slot+2, | |
999 | spitfire_get_itlb_tag(slot+2), spitfire_get_itlb_data(slot+2)); | |
1000 | } | |
1001 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | |
1002 | printk ("Contents of itlb0:\n"); | |
1003 | for (slot = 0; slot < 16; slot+=2) { | |
1004 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n", | |
1005 | slot, | |
1006 | cheetah_get_litlb_tag(slot), cheetah_get_litlb_data(slot), | |
1007 | slot+1, | |
1008 | cheetah_get_litlb_tag(slot+1), cheetah_get_litlb_data(slot+1)); | |
1009 | } | |
1010 | printk ("Contents of itlb2:\n"); | |
1011 | for (slot = 0; slot < 128; slot+=2) { | |
1012 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n", | |
1013 | slot, | |
1014 | cheetah_get_itlb_tag(slot), cheetah_get_itlb_data(slot), | |
1015 | slot+1, | |
1016 | cheetah_get_itlb_tag(slot+1), cheetah_get_itlb_data(slot+1)); | |
1017 | } | |
1018 | } | |
1019 | } | |
1020 | ||
1021 | void sparc_ultra_dump_dtlb(void) | |
1022 | { | |
1023 | int slot; | |
1024 | ||
1025 | if (tlb_type == spitfire) { | |
1026 | printk ("Contents of dtlb: "); | |
1027 | for (slot = 0; slot < 14; slot++) printk (" "); | |
1028 | printk ("%2x:%016lx,%016lx\n", 0, | |
1029 | spitfire_get_dtlb_tag(0), spitfire_get_dtlb_data(0)); | |
1030 | for (slot = 1; slot < 64; slot+=3) { | |
1031 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx %2x:%016lx,%016lx\n", | |
1032 | slot, | |
1033 | spitfire_get_dtlb_tag(slot), spitfire_get_dtlb_data(slot), | |
1034 | slot+1, | |
1035 | spitfire_get_dtlb_tag(slot+1), spitfire_get_dtlb_data(slot+1), | |
1036 | slot+2, | |
1037 | spitfire_get_dtlb_tag(slot+2), spitfire_get_dtlb_data(slot+2)); | |
1038 | } | |
1039 | } else if (tlb_type == cheetah || tlb_type == cheetah_plus) { | |
1040 | printk ("Contents of dtlb0:\n"); | |
1041 | for (slot = 0; slot < 16; slot+=2) { | |
1042 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n", | |
1043 | slot, | |
1044 | cheetah_get_ldtlb_tag(slot), cheetah_get_ldtlb_data(slot), | |
1045 | slot+1, | |
1046 | cheetah_get_ldtlb_tag(slot+1), cheetah_get_ldtlb_data(slot+1)); | |
1047 | } | |
1048 | printk ("Contents of dtlb2:\n"); | |
1049 | for (slot = 0; slot < 512; slot+=2) { | |
1050 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n", | |
1051 | slot, | |
1052 | cheetah_get_dtlb_tag(slot, 2), cheetah_get_dtlb_data(slot, 2), | |
1053 | slot+1, | |
1054 | cheetah_get_dtlb_tag(slot+1, 2), cheetah_get_dtlb_data(slot+1, 2)); | |
1055 | } | |
1056 | if (tlb_type == cheetah_plus) { | |
1057 | printk ("Contents of dtlb3:\n"); | |
1058 | for (slot = 0; slot < 512; slot+=2) { | |
1059 | printk ("%2x:%016lx,%016lx %2x:%016lx,%016lx\n", | |
1060 | slot, | |
1061 | cheetah_get_dtlb_tag(slot, 3), cheetah_get_dtlb_data(slot, 3), | |
1062 | slot+1, | |
1063 | cheetah_get_dtlb_tag(slot+1, 3), cheetah_get_dtlb_data(slot+1, 3)); | |
1064 | } | |
1065 | } | |
1066 | } | |
1067 | } | |
1068 | ||
1069 | extern unsigned long cmdline_memory_size; | |
1070 | ||
1071 | unsigned long __init bootmem_init(unsigned long *pages_avail) | |
1072 | { | |
1073 | unsigned long bootmap_size, start_pfn, end_pfn; | |
1074 | unsigned long end_of_phys_memory = 0UL; | |
1075 | unsigned long bootmap_pfn, bytes_avail, size; | |
1076 | int i; | |
1077 | ||
1078 | #ifdef CONFIG_DEBUG_BOOTMEM | |
13edad7a | 1079 | prom_printf("bootmem_init: Scan pavail, "); |
1da177e4 LT |
1080 | #endif |
1081 | ||
1082 | bytes_avail = 0UL; | |
13edad7a DM |
1083 | for (i = 0; i < pavail_ents; i++) { |
1084 | end_of_phys_memory = pavail[i].phys_addr + | |
1085 | pavail[i].reg_size; | |
1086 | bytes_avail += pavail[i].reg_size; | |
1da177e4 LT |
1087 | if (cmdline_memory_size) { |
1088 | if (bytes_avail > cmdline_memory_size) { | |
1089 | unsigned long slack = bytes_avail - cmdline_memory_size; | |
1090 | ||
1091 | bytes_avail -= slack; | |
1092 | end_of_phys_memory -= slack; | |
1093 | ||
13edad7a DM |
1094 | pavail[i].reg_size -= slack; |
1095 | if ((long)pavail[i].reg_size <= 0L) { | |
1096 | pavail[i].phys_addr = 0xdeadbeefUL; | |
1097 | pavail[i].reg_size = 0UL; | |
1098 | pavail_ents = i; | |
1da177e4 | 1099 | } else { |
13edad7a DM |
1100 | pavail[i+1].reg_size = 0Ul; |
1101 | pavail[i+1].phys_addr = 0xdeadbeefUL; | |
1102 | pavail_ents = i + 1; | |
1da177e4 LT |
1103 | } |
1104 | break; | |
1105 | } | |
1106 | } | |
1107 | } | |
1108 | ||
1109 | *pages_avail = bytes_avail >> PAGE_SHIFT; | |
1110 | ||
1111 | /* Start with page aligned address of last symbol in kernel | |
1112 | * image. The kernel is hard mapped below PAGE_OFFSET in a | |
1113 | * 4MB locked TLB translation. | |
1114 | */ | |
1115 | start_pfn = PAGE_ALIGN(kern_base + kern_size) >> PAGE_SHIFT; | |
1116 | ||
1117 | bootmap_pfn = start_pfn; | |
1118 | ||
1119 | end_pfn = end_of_phys_memory >> PAGE_SHIFT; | |
1120 | ||
1121 | #ifdef CONFIG_BLK_DEV_INITRD | |
1122 | /* Now have to check initial ramdisk, so that bootmap does not overwrite it */ | |
1123 | if (sparc_ramdisk_image || sparc_ramdisk_image64) { | |
1124 | unsigned long ramdisk_image = sparc_ramdisk_image ? | |
1125 | sparc_ramdisk_image : sparc_ramdisk_image64; | |
1126 | if (ramdisk_image >= (unsigned long)_end - 2 * PAGE_SIZE) | |
1127 | ramdisk_image -= KERNBASE; | |
1128 | initrd_start = ramdisk_image + phys_base; | |
1129 | initrd_end = initrd_start + sparc_ramdisk_size; | |
1130 | if (initrd_end > end_of_phys_memory) { | |
1131 | printk(KERN_CRIT "initrd extends beyond end of memory " | |
1132 | "(0x%016lx > 0x%016lx)\ndisabling initrd\n", | |
1133 | initrd_end, end_of_phys_memory); | |
1134 | initrd_start = 0; | |
1135 | } | |
1136 | if (initrd_start) { | |
1137 | if (initrd_start >= (start_pfn << PAGE_SHIFT) && | |
1138 | initrd_start < (start_pfn << PAGE_SHIFT) + 2 * PAGE_SIZE) | |
1139 | bootmap_pfn = PAGE_ALIGN (initrd_end) >> PAGE_SHIFT; | |
1140 | } | |
1141 | } | |
1142 | #endif | |
1143 | /* Initialize the boot-time allocator. */ | |
1144 | max_pfn = max_low_pfn = end_pfn; | |
1145 | min_low_pfn = pfn_base; | |
1146 | ||
1147 | #ifdef CONFIG_DEBUG_BOOTMEM | |
1148 | prom_printf("init_bootmem(min[%lx], bootmap[%lx], max[%lx])\n", | |
1149 | min_low_pfn, bootmap_pfn, max_low_pfn); | |
1150 | #endif | |
1151 | bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn); | |
1152 | ||
1da177e4 LT |
1153 | /* Now register the available physical memory with the |
1154 | * allocator. | |
1155 | */ | |
13edad7a | 1156 | for (i = 0; i < pavail_ents; i++) { |
1da177e4 | 1157 | #ifdef CONFIG_DEBUG_BOOTMEM |
13edad7a DM |
1158 | prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n", |
1159 | i, pavail[i].phys_addr, pavail[i].reg_size); | |
1da177e4 | 1160 | #endif |
13edad7a | 1161 | free_bootmem(pavail[i].phys_addr, pavail[i].reg_size); |
1da177e4 LT |
1162 | } |
1163 | ||
1164 | #ifdef CONFIG_BLK_DEV_INITRD | |
1165 | if (initrd_start) { | |
1166 | size = initrd_end - initrd_start; | |
1167 | ||
1168 | /* Resert the initrd image area. */ | |
1169 | #ifdef CONFIG_DEBUG_BOOTMEM | |
1170 | prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n", | |
1171 | initrd_start, initrd_end); | |
1172 | #endif | |
1173 | reserve_bootmem(initrd_start, size); | |
1174 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; | |
1175 | ||
1176 | initrd_start += PAGE_OFFSET; | |
1177 | initrd_end += PAGE_OFFSET; | |
1178 | } | |
1179 | #endif | |
1180 | /* Reserve the kernel text/data/bss. */ | |
1181 | #ifdef CONFIG_DEBUG_BOOTMEM | |
1182 | prom_printf("reserve_bootmem(kernel): base[%lx] size[%lx]\n", kern_base, kern_size); | |
1183 | #endif | |
1184 | reserve_bootmem(kern_base, kern_size); | |
1185 | *pages_avail -= PAGE_ALIGN(kern_size) >> PAGE_SHIFT; | |
1186 | ||
1187 | /* Reserve the bootmem map. We do not account for it | |
1188 | * in pages_avail because we will release that memory | |
1189 | * in free_all_bootmem. | |
1190 | */ | |
1191 | size = bootmap_size; | |
1192 | #ifdef CONFIG_DEBUG_BOOTMEM | |
1193 | prom_printf("reserve_bootmem(bootmap): base[%lx] size[%lx]\n", | |
1194 | (bootmap_pfn << PAGE_SHIFT), size); | |
1195 | #endif | |
1196 | reserve_bootmem((bootmap_pfn << PAGE_SHIFT), size); | |
1197 | *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT; | |
1198 | ||
1199 | return end_pfn; | |
1200 | } | |
1201 | ||
56425306 DM |
1202 | #ifdef CONFIG_DEBUG_PAGEALLOC |
1203 | static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot) | |
1204 | { | |
1205 | unsigned long vstart = PAGE_OFFSET + pstart; | |
1206 | unsigned long vend = PAGE_OFFSET + pend; | |
1207 | unsigned long alloc_bytes = 0UL; | |
1208 | ||
1209 | if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) { | |
13edad7a | 1210 | prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n", |
56425306 DM |
1211 | vstart, vend); |
1212 | prom_halt(); | |
1213 | } | |
1214 | ||
1215 | while (vstart < vend) { | |
1216 | unsigned long this_end, paddr = __pa(vstart); | |
1217 | pgd_t *pgd = pgd_offset_k(vstart); | |
1218 | pud_t *pud; | |
1219 | pmd_t *pmd; | |
1220 | pte_t *pte; | |
1221 | ||
1222 | pud = pud_offset(pgd, vstart); | |
1223 | if (pud_none(*pud)) { | |
1224 | pmd_t *new; | |
1225 | ||
1226 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); | |
1227 | alloc_bytes += PAGE_SIZE; | |
1228 | pud_populate(&init_mm, pud, new); | |
1229 | } | |
1230 | ||
1231 | pmd = pmd_offset(pud, vstart); | |
1232 | if (!pmd_present(*pmd)) { | |
1233 | pte_t *new; | |
1234 | ||
1235 | new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); | |
1236 | alloc_bytes += PAGE_SIZE; | |
1237 | pmd_populate_kernel(&init_mm, pmd, new); | |
1238 | } | |
1239 | ||
1240 | pte = pte_offset_kernel(pmd, vstart); | |
1241 | this_end = (vstart + PMD_SIZE) & PMD_MASK; | |
1242 | if (this_end > vend) | |
1243 | this_end = vend; | |
1244 | ||
1245 | while (vstart < this_end) { | |
1246 | pte_val(*pte) = (paddr | pgprot_val(prot)); | |
1247 | ||
1248 | vstart += PAGE_SIZE; | |
1249 | paddr += PAGE_SIZE; | |
1250 | pte++; | |
1251 | } | |
1252 | } | |
1253 | ||
1254 | return alloc_bytes; | |
1255 | } | |
1256 | ||
13edad7a DM |
1257 | static struct linux_prom64_registers pall[MAX_BANKS] __initdata; |
1258 | static int pall_ents __initdata; | |
1259 | ||
56425306 DM |
1260 | extern unsigned int kvmap_linear_patch[1]; |
1261 | ||
1262 | static void __init kernel_physical_mapping_init(void) | |
1263 | { | |
13edad7a | 1264 | unsigned long i, mem_alloced = 0UL; |
56425306 | 1265 | |
13edad7a DM |
1266 | read_obp_memory("reg", &pall[0], &pall_ents); |
1267 | ||
1268 | for (i = 0; i < pall_ents; i++) { | |
56425306 DM |
1269 | unsigned long phys_start, phys_end; |
1270 | ||
13edad7a DM |
1271 | phys_start = pall[i].phys_addr; |
1272 | phys_end = phys_start + pall[i].reg_size; | |
56425306 DM |
1273 | mem_alloced += kernel_map_range(phys_start, phys_end, |
1274 | PAGE_KERNEL); | |
56425306 DM |
1275 | } |
1276 | ||
1277 | printk("Allocated %ld bytes for kernel page tables.\n", | |
1278 | mem_alloced); | |
1279 | ||
1280 | kvmap_linear_patch[0] = 0x01000000; /* nop */ | |
1281 | flushi(&kvmap_linear_patch[0]); | |
1282 | ||
1283 | __flush_tlb_all(); | |
1284 | } | |
1285 | ||
1286 | void kernel_map_pages(struct page *page, int numpages, int enable) | |
1287 | { | |
1288 | unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT; | |
1289 | unsigned long phys_end = phys_start + (numpages * PAGE_SIZE); | |
1290 | ||
1291 | kernel_map_range(phys_start, phys_end, | |
1292 | (enable ? PAGE_KERNEL : __pgprot(0))); | |
1293 | ||
74bf4312 DM |
1294 | flush_tsb_kernel_range(PAGE_OFFSET + phys_start, |
1295 | PAGE_OFFSET + phys_end); | |
1296 | ||
56425306 DM |
1297 | /* we should perform an IPI and flush all tlbs, |
1298 | * but that can deadlock->flush only current cpu. | |
1299 | */ | |
1300 | __flush_tlb_kernel_range(PAGE_OFFSET + phys_start, | |
1301 | PAGE_OFFSET + phys_end); | |
1302 | } | |
1303 | #endif | |
1304 | ||
10147570 DM |
1305 | unsigned long __init find_ecache_flush_span(unsigned long size) |
1306 | { | |
0836a0eb DM |
1307 | int i; |
1308 | ||
13edad7a DM |
1309 | for (i = 0; i < pavail_ents; i++) { |
1310 | if (pavail[i].reg_size >= size) | |
1311 | return pavail[i].phys_addr; | |
0836a0eb DM |
1312 | } |
1313 | ||
13edad7a | 1314 | return ~0UL; |
0836a0eb DM |
1315 | } |
1316 | ||
1da177e4 LT |
1317 | /* paging_init() sets up the page tables */ |
1318 | ||
1319 | extern void cheetah_ecache_flush_init(void); | |
1320 | ||
1321 | static unsigned long last_valid_pfn; | |
56425306 | 1322 | pgd_t swapper_pg_dir[2048]; |
1da177e4 LT |
1323 | |
1324 | void __init paging_init(void) | |
1325 | { | |
2bdb3cb2 | 1326 | unsigned long end_pfn, pages_avail, shift; |
0836a0eb DM |
1327 | unsigned long real_end, i; |
1328 | ||
13edad7a DM |
1329 | /* Find available physical memory... */ |
1330 | read_obp_memory("available", &pavail[0], &pavail_ents); | |
0836a0eb DM |
1331 | |
1332 | phys_base = 0xffffffffffffffffUL; | |
13edad7a DM |
1333 | for (i = 0; i < pavail_ents; i++) |
1334 | phys_base = min(phys_base, pavail[i].phys_addr); | |
0836a0eb | 1335 | |
0836a0eb DM |
1336 | pfn_base = phys_base >> PAGE_SHIFT; |
1337 | ||
1338 | kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; | |
1339 | kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; | |
1da177e4 LT |
1340 | |
1341 | set_bit(0, mmu_context_bmap); | |
1342 | ||
2bdb3cb2 DM |
1343 | shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE); |
1344 | ||
1da177e4 LT |
1345 | real_end = (unsigned long)_end; |
1346 | if ((real_end > ((unsigned long)KERNBASE + 0x400000))) | |
1347 | bigkernel = 1; | |
2bdb3cb2 DM |
1348 | if ((real_end > ((unsigned long)KERNBASE + 0x800000))) { |
1349 | prom_printf("paging_init: Kernel > 8MB, too large.\n"); | |
1350 | prom_halt(); | |
1da177e4 | 1351 | } |
2bdb3cb2 DM |
1352 | |
1353 | /* Set kernel pgd to upper alias so physical page computations | |
1da177e4 LT |
1354 | * work. |
1355 | */ | |
1356 | init_mm.pgd += ((shift) / (sizeof(pgd_t))); | |
1357 | ||
56425306 | 1358 | memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir)); |
1da177e4 LT |
1359 | |
1360 | /* Now can init the kernel/bad page tables. */ | |
1361 | pud_set(pud_offset(&swapper_pg_dir[0], 0), | |
56425306 | 1362 | swapper_low_pmd_dir + (shift / sizeof(pgd_t))); |
1da177e4 | 1363 | |
2bdb3cb2 | 1364 | swapper_pgd_zero = pgd_val(swapper_pg_dir[0]); |
1da177e4 | 1365 | |
c9c10830 | 1366 | inherit_prom_mappings(); |
5085b4a5 | 1367 | |
1da177e4 LT |
1368 | /* Ok, we can use our TLB miss and window trap handlers safely. |
1369 | * We need to do a quick peek here to see if we are on StarFire | |
1370 | * or not, so setup_tba can setup the IRQ globals correctly (it | |
1371 | * needs to get the hard smp processor id correctly). | |
1372 | */ | |
1373 | { | |
1374 | extern void setup_tba(int); | |
1375 | setup_tba(this_is_starfire); | |
1376 | } | |
1da177e4 | 1377 | |
c9c10830 DM |
1378 | inherit_locked_prom_mappings(1); |
1379 | ||
1380 | __flush_tlb_all(); | |
9ad98c5b | 1381 | |
2bdb3cb2 DM |
1382 | /* Setup bootmem... */ |
1383 | pages_avail = 0; | |
1384 | last_valid_pfn = end_pfn = bootmem_init(&pages_avail); | |
1385 | ||
56425306 DM |
1386 | #ifdef CONFIG_DEBUG_PAGEALLOC |
1387 | kernel_physical_mapping_init(); | |
1388 | #endif | |
1389 | ||
1da177e4 LT |
1390 | { |
1391 | unsigned long zones_size[MAX_NR_ZONES]; | |
1392 | unsigned long zholes_size[MAX_NR_ZONES]; | |
1393 | unsigned long npages; | |
1394 | int znum; | |
1395 | ||
1396 | for (znum = 0; znum < MAX_NR_ZONES; znum++) | |
1397 | zones_size[znum] = zholes_size[znum] = 0; | |
1398 | ||
1399 | npages = end_pfn - pfn_base; | |
1400 | zones_size[ZONE_DMA] = npages; | |
1401 | zholes_size[ZONE_DMA] = npages - pages_avail; | |
1402 | ||
1403 | free_area_init_node(0, &contig_page_data, zones_size, | |
1404 | phys_base >> PAGE_SHIFT, zholes_size); | |
1405 | } | |
1406 | ||
1407 | device_scan(); | |
1408 | } | |
1409 | ||
1da177e4 LT |
1410 | static void __init taint_real_pages(void) |
1411 | { | |
1da177e4 LT |
1412 | int i; |
1413 | ||
13edad7a | 1414 | read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents); |
1da177e4 | 1415 | |
13edad7a | 1416 | /* Find changes discovered in the physmem available rescan and |
1da177e4 LT |
1417 | * reserve the lost portions in the bootmem maps. |
1418 | */ | |
13edad7a | 1419 | for (i = 0; i < pavail_ents; i++) { |
1da177e4 LT |
1420 | unsigned long old_start, old_end; |
1421 | ||
13edad7a | 1422 | old_start = pavail[i].phys_addr; |
1da177e4 | 1423 | old_end = old_start + |
13edad7a | 1424 | pavail[i].reg_size; |
1da177e4 LT |
1425 | while (old_start < old_end) { |
1426 | int n; | |
1427 | ||
13edad7a | 1428 | for (n = 0; pavail_rescan_ents; n++) { |
1da177e4 LT |
1429 | unsigned long new_start, new_end; |
1430 | ||
13edad7a DM |
1431 | new_start = pavail_rescan[n].phys_addr; |
1432 | new_end = new_start + | |
1433 | pavail_rescan[n].reg_size; | |
1da177e4 LT |
1434 | |
1435 | if (new_start <= old_start && | |
1436 | new_end >= (old_start + PAGE_SIZE)) { | |
13edad7a DM |
1437 | set_bit(old_start >> 22, |
1438 | sparc64_valid_addr_bitmap); | |
1da177e4 LT |
1439 | goto do_next_page; |
1440 | } | |
1441 | } | |
1442 | reserve_bootmem(old_start, PAGE_SIZE); | |
1443 | ||
1444 | do_next_page: | |
1445 | old_start += PAGE_SIZE; | |
1446 | } | |
1447 | } | |
1448 | } | |
1449 | ||
1450 | void __init mem_init(void) | |
1451 | { | |
1452 | unsigned long codepages, datapages, initpages; | |
1453 | unsigned long addr, last; | |
1454 | int i; | |
1455 | ||
1456 | i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6); | |
1457 | i += 1; | |
2bdb3cb2 | 1458 | sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3); |
1da177e4 LT |
1459 | if (sparc64_valid_addr_bitmap == NULL) { |
1460 | prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n"); | |
1461 | prom_halt(); | |
1462 | } | |
1463 | memset(sparc64_valid_addr_bitmap, 0, i << 3); | |
1464 | ||
1465 | addr = PAGE_OFFSET + kern_base; | |
1466 | last = PAGE_ALIGN(kern_size) + addr; | |
1467 | while (addr < last) { | |
1468 | set_bit(__pa(addr) >> 22, sparc64_valid_addr_bitmap); | |
1469 | addr += PAGE_SIZE; | |
1470 | } | |
1471 | ||
1472 | taint_real_pages(); | |
1473 | ||
1474 | max_mapnr = last_valid_pfn - pfn_base; | |
1475 | high_memory = __va(last_valid_pfn << PAGE_SHIFT); | |
1476 | ||
1477 | #ifdef CONFIG_DEBUG_BOOTMEM | |
1478 | prom_printf("mem_init: Calling free_all_bootmem().\n"); | |
1479 | #endif | |
1480 | totalram_pages = num_physpages = free_all_bootmem() - 1; | |
1481 | ||
1482 | /* | |
1483 | * Set up the zero page, mark it reserved, so that page count | |
1484 | * is not manipulated when freeing the page from user ptes. | |
1485 | */ | |
1486 | mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0); | |
1487 | if (mem_map_zero == NULL) { | |
1488 | prom_printf("paging_init: Cannot alloc zero page.\n"); | |
1489 | prom_halt(); | |
1490 | } | |
1491 | SetPageReserved(mem_map_zero); | |
1492 | ||
1493 | codepages = (((unsigned long) _etext) - ((unsigned long) _start)); | |
1494 | codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT; | |
1495 | datapages = (((unsigned long) _edata) - ((unsigned long) _etext)); | |
1496 | datapages = PAGE_ALIGN(datapages) >> PAGE_SHIFT; | |
1497 | initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin)); | |
1498 | initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT; | |
1499 | ||
1500 | printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n", | |
1501 | nr_free_pages() << (PAGE_SHIFT-10), | |
1502 | codepages << (PAGE_SHIFT-10), | |
1503 | datapages << (PAGE_SHIFT-10), | |
1504 | initpages << (PAGE_SHIFT-10), | |
1505 | PAGE_OFFSET, (last_valid_pfn << PAGE_SHIFT)); | |
1506 | ||
1507 | if (tlb_type == cheetah || tlb_type == cheetah_plus) | |
1508 | cheetah_ecache_flush_init(); | |
1509 | } | |
1510 | ||
898cf0ec | 1511 | void free_initmem(void) |
1da177e4 LT |
1512 | { |
1513 | unsigned long addr, initend; | |
1514 | ||
1515 | /* | |
1516 | * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes. | |
1517 | */ | |
1518 | addr = PAGE_ALIGN((unsigned long)(__init_begin)); | |
1519 | initend = (unsigned long)(__init_end) & PAGE_MASK; | |
1520 | for (; addr < initend; addr += PAGE_SIZE) { | |
1521 | unsigned long page; | |
1522 | struct page *p; | |
1523 | ||
1524 | page = (addr + | |
1525 | ((unsigned long) __va(kern_base)) - | |
1526 | ((unsigned long) KERNBASE)); | |
1527 | memset((void *)addr, 0xcc, PAGE_SIZE); | |
1528 | p = virt_to_page(page); | |
1529 | ||
1530 | ClearPageReserved(p); | |
1531 | set_page_count(p, 1); | |
1532 | __free_page(p); | |
1533 | num_physpages++; | |
1534 | totalram_pages++; | |
1535 | } | |
1536 | } | |
1537 | ||
1538 | #ifdef CONFIG_BLK_DEV_INITRD | |
1539 | void free_initrd_mem(unsigned long start, unsigned long end) | |
1540 | { | |
1541 | if (start < end) | |
1542 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | |
1543 | for (; start < end; start += PAGE_SIZE) { | |
1544 | struct page *p = virt_to_page(start); | |
1545 | ||
1546 | ClearPageReserved(p); | |
1547 | set_page_count(p, 1); | |
1548 | __free_page(p); | |
1549 | num_physpages++; | |
1550 | totalram_pages++; | |
1551 | } | |
1552 | } | |
1553 | #endif |