9b58eb5fd0d57e4f9492dbacd16eda8b4aa69f10
[deliverable/linux.git] / arch / mips / mm / init.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
10 */
11 #include <linux/bug.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/smp.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/pagemap.h>
22 #include <linux/ptrace.h>
23 #include <linux/mman.h>
24 #include <linux/mm.h>
25 #include <linux/bootmem.h>
26 #include <linux/highmem.h>
27 #include <linux/swap.h>
28 #include <linux/proc_fs.h>
29 #include <linux/pfn.h>
30 #include <linux/hardirq.h>
31 #include <linux/gfp.h>
32 #include <linux/kcore.h>
33
34 #include <asm/asm-offsets.h>
35 #include <asm/bootinfo.h>
36 #include <asm/cachectl.h>
37 #include <asm/cpu.h>
38 #include <asm/dma.h>
39 #include <asm/kmap_types.h>
40 #include <asm/maar.h>
41 #include <asm/mmu_context.h>
42 #include <asm/sections.h>
43 #include <asm/pgtable.h>
44 #include <asm/pgalloc.h>
45 #include <asm/tlb.h>
46 #include <asm/fixmap.h>
47 #include <asm/maar.h>
48
49 /*
50 * We have up to 8 empty zeroed pages so we can map one of the right colour
51 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
52 * where we have to avoid VCED / VECI exceptions for good performance at
53 * any price. Since page is never written to after the initialization we
54 * don't have to care about aliases on other CPUs.
55 */
56 unsigned long empty_zero_page, zero_page_mask;
57 EXPORT_SYMBOL_GPL(empty_zero_page);
58 EXPORT_SYMBOL(zero_page_mask);
59
60 /*
61 * Not static inline because used by IP27 special magic initialization code
62 */
63 void setup_zero_pages(void)
64 {
65 unsigned int order, i;
66 struct page *page;
67
68 if (cpu_has_vce)
69 order = 3;
70 else
71 order = 0;
72
73 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
74 if (!empty_zero_page)
75 panic("Oh boy, that early out of memory?");
76
77 page = virt_to_page((void *)empty_zero_page);
78 split_page(page, order);
79 for (i = 0; i < (1 << order); i++, page++)
80 mark_page_reserved(page);
81
82 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
83 }
84
85 static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
86 {
87 enum fixed_addresses idx;
88 unsigned long vaddr, flags, entrylo;
89 unsigned long old_ctx;
90 pte_t pte;
91 int tlbidx;
92
93 BUG_ON(Page_dcache_dirty(page));
94
95 preempt_disable();
96 pagefault_disable();
97 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
98 idx += in_interrupt() ? FIX_N_COLOURS : 0;
99 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
100 pte = mk_pte(page, prot);
101 #if defined(CONFIG_XPA)
102 entrylo = pte_to_entrylo(pte.pte_high);
103 #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
104 entrylo = pte.pte_high;
105 #else
106 entrylo = pte_to_entrylo(pte_val(pte));
107 #endif
108
109 local_irq_save(flags);
110 old_ctx = read_c0_entryhi();
111 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
112 write_c0_entrylo0(entrylo);
113 write_c0_entrylo1(entrylo);
114 #ifdef CONFIG_XPA
115 if (cpu_has_xpa) {
116 entrylo = (pte.pte_low & _PFNX_MASK);
117 writex_c0_entrylo0(entrylo);
118 writex_c0_entrylo1(entrylo);
119 }
120 #endif
121 tlbidx = read_c0_wired();
122 write_c0_wired(tlbidx + 1);
123 write_c0_index(tlbidx);
124 mtc0_tlbw_hazard();
125 tlb_write_indexed();
126 tlbw_use_hazard();
127 write_c0_entryhi(old_ctx);
128 local_irq_restore(flags);
129
130 return (void*) vaddr;
131 }
132
133 void *kmap_coherent(struct page *page, unsigned long addr)
134 {
135 return __kmap_pgprot(page, addr, PAGE_KERNEL);
136 }
137
138 void *kmap_noncoherent(struct page *page, unsigned long addr)
139 {
140 return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
141 }
142
143 void kunmap_coherent(void)
144 {
145 unsigned int wired;
146 unsigned long flags, old_ctx;
147
148 local_irq_save(flags);
149 old_ctx = read_c0_entryhi();
150 wired = read_c0_wired() - 1;
151 write_c0_wired(wired);
152 write_c0_index(wired);
153 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
154 write_c0_entrylo0(0);
155 write_c0_entrylo1(0);
156 mtc0_tlbw_hazard();
157 tlb_write_indexed();
158 tlbw_use_hazard();
159 write_c0_entryhi(old_ctx);
160 local_irq_restore(flags);
161 pagefault_enable();
162 preempt_enable();
163 }
164
165 void copy_user_highpage(struct page *to, struct page *from,
166 unsigned long vaddr, struct vm_area_struct *vma)
167 {
168 void *vfrom, *vto;
169
170 vto = kmap_atomic(to);
171 if (cpu_has_dc_aliases &&
172 page_mapcount(from) && !Page_dcache_dirty(from)) {
173 vfrom = kmap_coherent(from, vaddr);
174 copy_page(vto, vfrom);
175 kunmap_coherent();
176 } else {
177 vfrom = kmap_atomic(from);
178 copy_page(vto, vfrom);
179 kunmap_atomic(vfrom);
180 }
181 if ((!cpu_has_ic_fills_f_dc) ||
182 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
183 flush_data_cache_page((unsigned long)vto);
184 kunmap_atomic(vto);
185 /* Make sure this page is cleared on other CPU's too before using it */
186 smp_wmb();
187 }
188
189 void copy_to_user_page(struct vm_area_struct *vma,
190 struct page *page, unsigned long vaddr, void *dst, const void *src,
191 unsigned long len)
192 {
193 if (cpu_has_dc_aliases &&
194 page_mapcount(page) && !Page_dcache_dirty(page)) {
195 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
196 memcpy(vto, src, len);
197 kunmap_coherent();
198 } else {
199 memcpy(dst, src, len);
200 if (cpu_has_dc_aliases)
201 SetPageDcacheDirty(page);
202 }
203 if (vma->vm_flags & VM_EXEC)
204 flush_cache_page(vma, vaddr, page_to_pfn(page));
205 }
206
207 void copy_from_user_page(struct vm_area_struct *vma,
208 struct page *page, unsigned long vaddr, void *dst, const void *src,
209 unsigned long len)
210 {
211 if (cpu_has_dc_aliases &&
212 page_mapcount(page) && !Page_dcache_dirty(page)) {
213 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
214 memcpy(dst, vfrom, len);
215 kunmap_coherent();
216 } else {
217 memcpy(dst, src, len);
218 if (cpu_has_dc_aliases)
219 SetPageDcacheDirty(page);
220 }
221 }
222 EXPORT_SYMBOL_GPL(copy_from_user_page);
223
224 void __init fixrange_init(unsigned long start, unsigned long end,
225 pgd_t *pgd_base)
226 {
227 #ifdef CONFIG_HIGHMEM
228 pgd_t *pgd;
229 pud_t *pud;
230 pmd_t *pmd;
231 pte_t *pte;
232 int i, j, k;
233 unsigned long vaddr;
234
235 vaddr = start;
236 i = __pgd_offset(vaddr);
237 j = __pud_offset(vaddr);
238 k = __pmd_offset(vaddr);
239 pgd = pgd_base + i;
240
241 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
242 pud = (pud_t *)pgd;
243 for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
244 pmd = (pmd_t *)pud;
245 for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
246 if (pmd_none(*pmd)) {
247 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
248 set_pmd(pmd, __pmd((unsigned long)pte));
249 BUG_ON(pte != pte_offset_kernel(pmd, 0));
250 }
251 vaddr += PMD_SIZE;
252 }
253 k = 0;
254 }
255 j = 0;
256 }
257 #endif
258 }
259
260 unsigned __weak platform_maar_init(unsigned num_pairs)
261 {
262 struct maar_config cfg[BOOT_MEM_MAP_MAX];
263 unsigned i, num_configured, num_cfg = 0;
264 phys_addr_t skip;
265
266 for (i = 0; i < boot_mem_map.nr_map; i++) {
267 switch (boot_mem_map.map[i].type) {
268 case BOOT_MEM_RAM:
269 case BOOT_MEM_INIT_RAM:
270 break;
271 default:
272 continue;
273 }
274
275 skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff);
276
277 cfg[num_cfg].lower = boot_mem_map.map[i].addr;
278 cfg[num_cfg].lower += skip;
279
280 cfg[num_cfg].upper = cfg[num_cfg].lower;
281 cfg[num_cfg].upper += boot_mem_map.map[i].size - 1;
282 cfg[num_cfg].upper -= skip;
283
284 cfg[num_cfg].attrs = MIPS_MAAR_S;
285 num_cfg++;
286 }
287
288 num_configured = maar_config(cfg, num_cfg, num_pairs);
289 if (num_configured < num_cfg)
290 pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
291 num_pairs, num_cfg);
292
293 return num_configured;
294 }
295
296 void maar_init(void)
297 {
298 unsigned num_maars, used, i;
299 phys_addr_t lower, upper, attr;
300 static struct {
301 struct maar_config cfgs[3];
302 unsigned used;
303 } recorded = { { { 0 } }, 0 };
304
305 if (!cpu_has_maar)
306 return;
307
308 /* Detect the number of MAARs */
309 write_c0_maari(~0);
310 back_to_back_c0_hazard();
311 num_maars = read_c0_maari() + 1;
312
313 /* MAARs should be in pairs */
314 WARN_ON(num_maars % 2);
315
316 /* Set MAARs using values we recorded already */
317 if (recorded.used) {
318 used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
319 BUG_ON(used != recorded.used);
320 } else {
321 /* Configure the required MAARs */
322 used = platform_maar_init(num_maars / 2);
323 }
324
325 /* Disable any further MAARs */
326 for (i = (used * 2); i < num_maars; i++) {
327 write_c0_maari(i);
328 back_to_back_c0_hazard();
329 write_c0_maar(0);
330 back_to_back_c0_hazard();
331 }
332
333 if (recorded.used)
334 return;
335
336 pr_info("MAAR configuration:\n");
337 for (i = 0; i < num_maars; i += 2) {
338 write_c0_maari(i);
339 back_to_back_c0_hazard();
340 upper = read_c0_maar();
341
342 write_c0_maari(i + 1);
343 back_to_back_c0_hazard();
344 lower = read_c0_maar();
345
346 attr = lower & upper;
347 lower = (lower & MIPS_MAAR_ADDR) << 4;
348 upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
349
350 pr_info(" [%d]: ", i / 2);
351 if (!(attr & MIPS_MAAR_V)) {
352 pr_cont("disabled\n");
353 continue;
354 }
355
356 pr_cont("%pa-%pa", &lower, &upper);
357
358 if (attr & MIPS_MAAR_S)
359 pr_cont(" speculate");
360
361 pr_cont("\n");
362
363 /* Record the setup for use on secondary CPUs */
364 if (used <= ARRAY_SIZE(recorded.cfgs)) {
365 recorded.cfgs[recorded.used].lower = lower;
366 recorded.cfgs[recorded.used].upper = upper;
367 recorded.cfgs[recorded.used].attrs = attr;
368 recorded.used++;
369 }
370 }
371 }
372
373 #ifndef CONFIG_NEED_MULTIPLE_NODES
374 int page_is_ram(unsigned long pagenr)
375 {
376 int i;
377
378 for (i = 0; i < boot_mem_map.nr_map; i++) {
379 unsigned long addr, end;
380
381 switch (boot_mem_map.map[i].type) {
382 case BOOT_MEM_RAM:
383 case BOOT_MEM_INIT_RAM:
384 break;
385 default:
386 /* not usable memory */
387 continue;
388 }
389
390 addr = PFN_UP(boot_mem_map.map[i].addr);
391 end = PFN_DOWN(boot_mem_map.map[i].addr +
392 boot_mem_map.map[i].size);
393
394 if (pagenr >= addr && pagenr < end)
395 return 1;
396 }
397
398 return 0;
399 }
400
401 void __init paging_init(void)
402 {
403 unsigned long max_zone_pfns[MAX_NR_ZONES];
404 unsigned long lastpfn __maybe_unused;
405
406 pagetable_init();
407
408 #ifdef CONFIG_HIGHMEM
409 kmap_init();
410 #endif
411 #ifdef CONFIG_ZONE_DMA
412 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
413 #endif
414 #ifdef CONFIG_ZONE_DMA32
415 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
416 #endif
417 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
418 lastpfn = max_low_pfn;
419 #ifdef CONFIG_HIGHMEM
420 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
421 lastpfn = highend_pfn;
422
423 if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
424 printk(KERN_WARNING "This processor doesn't support highmem."
425 " %ldk highmem ignored\n",
426 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
427 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
428 lastpfn = max_low_pfn;
429 }
430 #endif
431
432 free_area_init_nodes(max_zone_pfns);
433 }
434
435 #ifdef CONFIG_64BIT
436 static struct kcore_list kcore_kseg0;
437 #endif
438
439 static inline void mem_init_free_highmem(void)
440 {
441 #ifdef CONFIG_HIGHMEM
442 unsigned long tmp;
443
444 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
445 struct page *page = pfn_to_page(tmp);
446
447 if (!page_is_ram(tmp))
448 SetPageReserved(page);
449 else
450 free_highmem_page(page);
451 }
452 #endif
453 }
454
455 void __init mem_init(void)
456 {
457 #ifdef CONFIG_HIGHMEM
458 #ifdef CONFIG_DISCONTIGMEM
459 #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
460 #endif
461 max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
462 #else
463 max_mapnr = max_low_pfn;
464 #endif
465 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
466
467 maar_init();
468 free_all_bootmem();
469 setup_zero_pages(); /* Setup zeroed pages. */
470 mem_init_free_highmem();
471 mem_init_print_info(NULL);
472
473 #ifdef CONFIG_64BIT
474 if ((unsigned long) &_text > (unsigned long) CKSEG0)
475 /* The -4 is a hack so that user tools don't have to handle
476 the overflow. */
477 kclist_add(&kcore_kseg0, (void *) CKSEG0,
478 0x80000000 - 4, KCORE_TEXT);
479 #endif
480 }
481 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
482
483 void free_init_pages(const char *what, unsigned long begin, unsigned long end)
484 {
485 unsigned long pfn;
486
487 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
488 struct page *page = pfn_to_page(pfn);
489 void *addr = phys_to_virt(PFN_PHYS(pfn));
490
491 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
492 free_reserved_page(page);
493 }
494 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
495 }
496
497 #ifdef CONFIG_BLK_DEV_INITRD
498 void free_initrd_mem(unsigned long start, unsigned long end)
499 {
500 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
501 "initrd");
502 }
503 #endif
504
505 void (*free_init_pages_eva)(void *begin, void *end) = NULL;
506
507 void __init_refok free_initmem(void)
508 {
509 prom_free_prom_memory();
510 /*
511 * Let the platform define a specific function to free the
512 * init section since EVA may have used any possible mapping
513 * between virtual and physical addresses.
514 */
515 if (free_init_pages_eva)
516 free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
517 else
518 free_initmem_default(POISON_FREE_INITMEM);
519 }
520
521 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
522 unsigned long pgd_current[NR_CPUS];
523 #endif
524
525 /*
526 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
527 * are constants. So we use the variants from asm-offset.h until that gcc
528 * will officially be retired.
529 *
530 * Align swapper_pg_dir in to 64K, allows its address to be loaded
531 * with a single LUI instruction in the TLB handlers. If we used
532 * __aligned(64K), its size would get rounded up to the alignment
533 * size, and waste space. So we place it in its own section and align
534 * it in the linker script.
535 */
536 pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
537 #ifndef __PAGETABLE_PMD_FOLDED
538 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
539 #endif
540 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
This page took 0.04666 seconds and 4 git commands to generate.