MIPS: Fix detection of unsupported highmem with cache aliases
[deliverable/linux.git] / arch / mips / mm / init.c
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
10 */
b868868a 11#include <linux/bug.h>
1da177e4
LT
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
631330f5 16#include <linux/smp.h>
1da177e4
LT
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/pagemap.h>
22#include <linux/ptrace.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
25#include <linux/bootmem.h>
26#include <linux/highmem.h>
27#include <linux/swap.h>
3d503753 28#include <linux/proc_fs.h>
22a9835c 29#include <linux/pfn.h>
0f334a3e 30#include <linux/hardirq.h>
5a0e3ad6 31#include <linux/gfp.h>
2f96b8c1 32#include <linux/kcore.h>
1da177e4 33
9975e77d 34#include <asm/asm-offsets.h>
1da177e4
LT
35#include <asm/bootinfo.h>
36#include <asm/cachectl.h>
37#include <asm/cpu.h>
38#include <asm/dma.h>
f8829cae 39#include <asm/kmap_types.h>
cbd95a89 40#include <asm/maar.h>
1da177e4
LT
41#include <asm/mmu_context.h>
42#include <asm/sections.h>
43#include <asm/pgtable.h>
44#include <asm/pgalloc.h>
45#include <asm/tlb.h>
f8829cae 46#include <asm/fixmap.h>
e060f6ed 47#include <asm/maar.h>
f8829cae 48
1da177e4
LT
49/*
50 * We have up to 8 empty zeroed pages so we can map one of the right colour
70342287 51 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
1da177e4
LT
52 * where we have to avoid VCED / VECI exceptions for good performance at
53 * any price. Since page is never written to after the initialization we
54 * don't have to care about aliases on other CPUs.
55 */
56unsigned long empty_zero_page, zero_page_mask;
497d2adc 57EXPORT_SYMBOL_GPL(empty_zero_page);
0b70068e 58EXPORT_SYMBOL(zero_page_mask);
1da177e4
LT
59
60/*
61 * Not static inline because used by IP27 special magic initialization code
62 */
31605922 63void setup_zero_pages(void)
1da177e4 64{
31605922 65 unsigned int order, i;
1da177e4
LT
66 struct page *page;
67
68 if (cpu_has_vce)
69 order = 3;
70 else
71 order = 0;
72
73 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
74 if (!empty_zero_page)
75 panic("Oh boy, that early out of memory?");
76
99e3b942 77 page = virt_to_page((void *)empty_zero_page);
8dfcc9ba 78 split_page(page, order);
31605922
JL
79 for (i = 0; i < (1 << order); i++, page++)
80 mark_page_reserved(page);
1da177e4 81
31605922 82 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
1da177e4
LT
83}
84
e2a9e5ad 85static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
f8829cae
RB
86{
87 enum fixed_addresses idx;
88 unsigned long vaddr, flags, entrylo;
89 unsigned long old_ctx;
90 pte_t pte;
91 int tlbidx;
92
b868868a
RB
93 BUG_ON(Page_dcache_dirty(page));
94
ce01948e 95 preempt_disable();
bdb43806 96 pagefault_disable();
f8829cae 97 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
0f334a3e 98 idx += in_interrupt() ? FIX_N_COLOURS : 0;
f8829cae 99 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
e2a9e5ad 100 pte = mk_pte(page, prot);
7b2cb64f 101#if defined(CONFIG_XPA)
c5b36783 102 entrylo = pte_to_entrylo(pte.pte_high);
7b2cb64f
PB
103#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
104 entrylo = pte.pte_high;
f8829cae 105#else
6dd9344c 106 entrylo = pte_to_entrylo(pte_val(pte));
f8829cae
RB
107#endif
108
b633648c 109 local_irq_save(flags);
f8829cae
RB
110 old_ctx = read_c0_entryhi();
111 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
112 write_c0_entrylo0(entrylo);
113 write_c0_entrylo1(entrylo);
c5b36783 114#ifdef CONFIG_XPA
4b6f99d3
JH
115 if (cpu_has_xpa) {
116 entrylo = (pte.pte_low & _PFNX_MASK);
117 writex_c0_entrylo0(entrylo);
118 writex_c0_entrylo1(entrylo);
119 }
c5b36783 120#endif
f8829cae
RB
121 tlbidx = read_c0_wired();
122 write_c0_wired(tlbidx + 1);
123 write_c0_index(tlbidx);
124 mtc0_tlbw_hazard();
125 tlb_write_indexed();
f8829cae
RB
126 tlbw_use_hazard();
127 write_c0_entryhi(old_ctx);
b633648c 128 local_irq_restore(flags);
f8829cae
RB
129
130 return (void*) vaddr;
131}
132
e2a9e5ad
PB
133void *kmap_coherent(struct page *page, unsigned long addr)
134{
135 return __kmap_pgprot(page, addr, PAGE_KERNEL);
136}
137
138void *kmap_noncoherent(struct page *page, unsigned long addr)
139{
140 return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
141}
142
eacb9d61 143void kunmap_coherent(void)
f8829cae 144{
f8829cae
RB
145 unsigned int wired;
146 unsigned long flags, old_ctx;
147
b633648c 148 local_irq_save(flags);
f8829cae
RB
149 old_ctx = read_c0_entryhi();
150 wired = read_c0_wired() - 1;
151 write_c0_wired(wired);
152 write_c0_index(wired);
153 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
154 write_c0_entrylo0(0);
155 write_c0_entrylo1(0);
156 mtc0_tlbw_hazard();
157 tlb_write_indexed();
158 tlbw_use_hazard();
159 write_c0_entryhi(old_ctx);
b633648c 160 local_irq_restore(flags);
bdb43806 161 pagefault_enable();
ce01948e 162 preempt_enable();
f8829cae
RB
163}
164
bcd02280
AN
165void copy_user_highpage(struct page *to, struct page *from,
166 unsigned long vaddr, struct vm_area_struct *vma)
167{
168 void *vfrom, *vto;
169
9c02048f 170 vto = kmap_atomic(to);
9a74b3eb 171 if (cpu_has_dc_aliases &&
e1534ae9 172 page_mapcount(from) && !Page_dcache_dirty(from)) {
bcd02280
AN
173 vfrom = kmap_coherent(from, vaddr);
174 copy_page(vto, vfrom);
eacb9d61 175 kunmap_coherent();
bcd02280 176 } else {
9c02048f 177 vfrom = kmap_atomic(from);
bcd02280 178 copy_page(vto, vfrom);
9c02048f 179 kunmap_atomic(vfrom);
bcd02280 180 }
39b8d525 181 if ((!cpu_has_ic_fills_f_dc) ||
bcd02280
AN
182 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
183 flush_data_cache_page((unsigned long)vto);
9c02048f 184 kunmap_atomic(vto);
bcd02280
AN
185 /* Make sure this page is cleared on other CPU's too before using it */
186 smp_wmb();
187}
188
f8829cae
RB
189void copy_to_user_page(struct vm_area_struct *vma,
190 struct page *page, unsigned long vaddr, void *dst, const void *src,
191 unsigned long len)
192{
9a74b3eb 193 if (cpu_has_dc_aliases &&
e1534ae9 194 page_mapcount(page) && !Page_dcache_dirty(page)) {
f8829cae
RB
195 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
196 memcpy(vto, src, len);
eacb9d61 197 kunmap_coherent();
985c30ef 198 } else {
f8829cae 199 memcpy(dst, src, len);
985c30ef
RB
200 if (cpu_has_dc_aliases)
201 SetPageDcacheDirty(page);
202 }
b2a3c5be 203 if (vma->vm_flags & VM_EXEC)
f8829cae
RB
204 flush_cache_page(vma, vaddr, page_to_pfn(page));
205}
206
f8829cae
RB
207void copy_from_user_page(struct vm_area_struct *vma,
208 struct page *page, unsigned long vaddr, void *dst, const void *src,
209 unsigned long len)
210{
9a74b3eb 211 if (cpu_has_dc_aliases &&
e1534ae9 212 page_mapcount(page) && !Page_dcache_dirty(page)) {
985c30ef 213 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
f8829cae 214 memcpy(dst, vfrom, len);
eacb9d61 215 kunmap_coherent();
985c30ef 216 } else {
f8829cae 217 memcpy(dst, src, len);
985c30ef
RB
218 if (cpu_has_dc_aliases)
219 SetPageDcacheDirty(page);
220 }
f8829cae 221}
bf9621aa 222EXPORT_SYMBOL_GPL(copy_from_user_page);
f8829cae 223
84fd089a 224void __init fixrange_init(unsigned long start, unsigned long end,
1da177e4
LT
225 pgd_t *pgd_base)
226{
b633648c 227#ifdef CONFIG_HIGHMEM
1da177e4 228 pgd_t *pgd;
c6e8b587 229 pud_t *pud;
1da177e4
LT
230 pmd_t *pmd;
231 pte_t *pte;
c6e8b587 232 int i, j, k;
1da177e4
LT
233 unsigned long vaddr;
234
235 vaddr = start;
236 i = __pgd_offset(vaddr);
c6e8b587
RB
237 j = __pud_offset(vaddr);
238 k = __pmd_offset(vaddr);
1da177e4
LT
239 pgd = pgd_base + i;
240
464fd83e 241 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
c6e8b587 242 pud = (pud_t *)pgd;
464fd83e 243 for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
c6e8b587 244 pmd = (pmd_t *)pud;
464fd83e 245 for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
c6e8b587
RB
246 if (pmd_none(*pmd)) {
247 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
f8829cae 248 set_pmd(pmd, __pmd((unsigned long)pte));
b72b7092 249 BUG_ON(pte != pte_offset_kernel(pmd, 0));
c6e8b587
RB
250 }
251 vaddr += PMD_SIZE;
1da177e4 252 }
c6e8b587 253 k = 0;
1da177e4
LT
254 }
255 j = 0;
256 }
f8829cae 257#endif
1da177e4 258}
1da177e4 259
def3ab5d
PB
260unsigned __weak platform_maar_init(unsigned num_pairs)
261{
262 struct maar_config cfg[BOOT_MEM_MAP_MAX];
263 unsigned i, num_configured, num_cfg = 0;
def3ab5d
PB
264
265 for (i = 0; i < boot_mem_map.nr_map; i++) {
266 switch (boot_mem_map.map[i].type) {
267 case BOOT_MEM_RAM:
268 case BOOT_MEM_INIT_RAM:
269 break;
270 default:
271 continue;
272 }
273
ac7e385f 274 /* Round lower up */
def3ab5d 275 cfg[num_cfg].lower = boot_mem_map.map[i].addr;
ac7e385f 276 cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff;
def3ab5d 277
ac7e385f
JH
278 /* Round upper down */
279 cfg[num_cfg].upper = boot_mem_map.map[i].addr +
280 boot_mem_map.map[i].size;
281 cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1;
def3ab5d
PB
282
283 cfg[num_cfg].attrs = MIPS_MAAR_S;
284 num_cfg++;
285 }
286
287 num_configured = maar_config(cfg, num_cfg, num_pairs);
288 if (num_configured < num_cfg)
289 pr_warn("Not enough MAAR pairs (%u) for all bootmem regions (%u)\n",
290 num_pairs, num_cfg);
291
292 return num_configured;
293}
294
e060f6ed 295void maar_init(void)
def3ab5d
PB
296{
297 unsigned num_maars, used, i;
651ca7f4 298 phys_addr_t lower, upper, attr;
e060f6ed
PB
299 static struct {
300 struct maar_config cfgs[3];
301 unsigned used;
302 } recorded = { { { 0 } }, 0 };
def3ab5d
PB
303
304 if (!cpu_has_maar)
305 return;
306
307 /* Detect the number of MAARs */
308 write_c0_maari(~0);
309 back_to_back_c0_hazard();
310 num_maars = read_c0_maari() + 1;
311
312 /* MAARs should be in pairs */
313 WARN_ON(num_maars % 2);
314
e060f6ed
PB
315 /* Set MAARs using values we recorded already */
316 if (recorded.used) {
317 used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
318 BUG_ON(used != recorded.used);
319 } else {
320 /* Configure the required MAARs */
321 used = platform_maar_init(num_maars / 2);
322 }
def3ab5d
PB
323
324 /* Disable any further MAARs */
325 for (i = (used * 2); i < num_maars; i++) {
326 write_c0_maari(i);
327 back_to_back_c0_hazard();
328 write_c0_maar(0);
329 back_to_back_c0_hazard();
330 }
651ca7f4 331
e060f6ed
PB
332 if (recorded.used)
333 return;
334
651ca7f4
PB
335 pr_info("MAAR configuration:\n");
336 for (i = 0; i < num_maars; i += 2) {
337 write_c0_maari(i);
338 back_to_back_c0_hazard();
339 upper = read_c0_maar();
340
341 write_c0_maari(i + 1);
342 back_to_back_c0_hazard();
343 lower = read_c0_maar();
344
345 attr = lower & upper;
346 lower = (lower & MIPS_MAAR_ADDR) << 4;
347 upper = ((upper & MIPS_MAAR_ADDR) << 4) | 0xffff;
348
349 pr_info(" [%d]: ", i / 2);
350 if (!(attr & MIPS_MAAR_V)) {
351 pr_cont("disabled\n");
352 continue;
353 }
354
355 pr_cont("%pa-%pa", &lower, &upper);
356
357 if (attr & MIPS_MAAR_S)
358 pr_cont(" speculate");
359
360 pr_cont("\n");
e060f6ed
PB
361
362 /* Record the setup for use on secondary CPUs */
363 if (used <= ARRAY_SIZE(recorded.cfgs)) {
364 recorded.cfgs[recorded.used].lower = lower;
365 recorded.cfgs[recorded.used].upper = upper;
366 recorded.cfgs[recorded.used].attrs = attr;
367 recorded.used++;
368 }
651ca7f4 369 }
def3ab5d
PB
370}
371
b4819b59 372#ifndef CONFIG_NEED_MULTIPLE_NODES
61ef2489 373int page_is_ram(unsigned long pagenr)
565200a1
AN
374{
375 int i;
376
377 for (i = 0; i < boot_mem_map.nr_map; i++) {
378 unsigned long addr, end;
379
43064c0c
DD
380 switch (boot_mem_map.map[i].type) {
381 case BOOT_MEM_RAM:
382 case BOOT_MEM_INIT_RAM:
383 break;
384 default:
565200a1
AN
385 /* not usable memory */
386 continue;
43064c0c 387 }
565200a1
AN
388
389 addr = PFN_UP(boot_mem_map.map[i].addr);
390 end = PFN_DOWN(boot_mem_map.map[i].addr +
391 boot_mem_map.map[i].size);
392
393 if (pagenr >= addr && pagenr < end)
394 return 1;
395 }
396
397 return 0;
398}
399
1da177e4
LT
400void __init paging_init(void)
401{
cce335ae 402 unsigned long max_zone_pfns[MAX_NR_ZONES];
d3ce0e98 403 unsigned long lastpfn __maybe_unused;
1da177e4
LT
404
405 pagetable_init();
406
407#ifdef CONFIG_HIGHMEM
408 kmap_init();
409#endif
05502339 410#ifdef CONFIG_ZONE_DMA
cce335ae 411 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
1da177e4 412#endif
cce335ae
RB
413#ifdef CONFIG_ZONE_DMA32
414 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
415#endif
416 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
417 lastpfn = max_low_pfn;
1da177e4 418#ifdef CONFIG_HIGHMEM
cce335ae
RB
419 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
420 lastpfn = highend_pfn;
cbb8fc07 421
cce335ae 422 if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
cbb8fc07 423 printk(KERN_WARNING "This processor doesn't support highmem."
cce335ae
RB
424 " %ldk highmem ignored\n",
425 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
426 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
427 lastpfn = max_low_pfn;
cbb8fc07 428 }
1da177e4
LT
429#endif
430
cce335ae 431 free_area_init_nodes(max_zone_pfns);
1da177e4
LT
432}
433
3d503753
DJ
434#ifdef CONFIG_64BIT
435static struct kcore_list kcore_kseg0;
436#endif
437
1132137e 438static inline void mem_init_free_highmem(void)
1da177e4 439{
1132137e
JL
440#ifdef CONFIG_HIGHMEM
441 unsigned long tmp;
1da177e4 442
058effe7
PB
443 if (cpu_has_dc_aliases)
444 return;
445
1132137e
JL
446 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
447 struct page *page = pfn_to_page(tmp);
448
449 if (!page_is_ram(tmp))
450 SetPageReserved(page);
451 else
452 free_highmem_page(page);
453 }
454#endif
455}
456
457void __init mem_init(void)
458{
1da177e4
LT
459#ifdef CONFIG_HIGHMEM
460#ifdef CONFIG_DISCONTIGMEM
461#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
462#endif
b6da0ffb 463 max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
1da177e4 464#else
565200a1 465 max_mapnr = max_low_pfn;
1da177e4
LT
466#endif
467 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
468
ab9988a3 469 maar_init();
0c988534 470 free_all_bootmem();
31605922 471 setup_zero_pages(); /* Setup zeroed pages. */
1132137e
JL
472 mem_init_free_highmem();
473 mem_init_print_info(NULL);
1da177e4 474
3d503753
DJ
475#ifdef CONFIG_64BIT
476 if ((unsigned long) &_text > (unsigned long) CKSEG0)
477 /* The -4 is a hack so that user tools don't have to handle
478 the overflow. */
c30bb2a2
KH
479 kclist_add(&kcore_kseg0, (void *) CKSEG0,
480 0x80000000 - 4, KCORE_TEXT);
3d503753 481#endif
1da177e4 482}
b4819b59 483#endif /* !CONFIG_NEED_MULTIPLE_NODES */
1da177e4 484
c44e8d5e 485void free_init_pages(const char *what, unsigned long begin, unsigned long end)
6fd11a21 486{
acd86b86 487 unsigned long pfn;
6fd11a21 488
acd86b86
FBH
489 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
490 struct page *page = pfn_to_page(pfn);
491 void *addr = phys_to_virt(PFN_PHYS(pfn));
492
acd86b86 493 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
31605922 494 free_reserved_page(page);
6fd11a21
RB
495 }
496 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
497}
498
1da177e4
LT
499#ifdef CONFIG_BLK_DEV_INITRD
500void free_initrd_mem(unsigned long start, unsigned long end)
501{
11199692
JL
502 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
503 "initrd");
1da177e4
LT
504}
505#endif
506
0893d3fb
MC
507void (*free_init_pages_eva)(void *begin, void *end) = NULL;
508
bd721ea7 509void __ref free_initmem(void)
1da177e4 510{
c44e8d5e 511 prom_free_prom_memory();
0893d3fb
MC
512 /*
513 * Let the platform define a specific function to free the
514 * init section since EVA may have used any possible mapping
515 * between virtual and physical addresses.
516 */
517 if (free_init_pages_eva)
518 free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
519 else
520 free_initmem_default(POISON_FREE_INITMEM);
1da177e4 521}
69a6c312 522
82622284 523#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
69a6c312 524unsigned long pgd_current[NR_CPUS];
82622284 525#endif
9975e77d
RB
526
527/*
528 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
529 * are constants. So we use the variants from asm-offset.h until that gcc
530 * will officially be retired.
485172b3
DD
531 *
532 * Align swapper_pg_dir in to 64K, allows its address to be loaded
533 * with a single LUI instruction in the TLB handlers. If we used
534 * __aligned(64K), its size would get rounded up to the alignment
535 * size, and waste space. So we place it in its own section and align
536 * it in the linker script.
9975e77d 537 */
485172b3 538pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
325f8a0a 539#ifndef __PAGETABLE_PMD_FOLDED
485172b3 540pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
69a6c312 541#endif
485172b3 542pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
This page took 0.810091 seconds and 5 git commands to generate.