MIPS: CPC: provide locking functions
[deliverable/linux.git] / arch / mips / mm / init.c
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
10 */
b868868a 11#include <linux/bug.h>
1da177e4
LT
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/signal.h>
15#include <linux/sched.h>
631330f5 16#include <linux/smp.h>
1da177e4
LT
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/string.h>
20#include <linux/types.h>
21#include <linux/pagemap.h>
22#include <linux/ptrace.h>
23#include <linux/mman.h>
24#include <linux/mm.h>
25#include <linux/bootmem.h>
26#include <linux/highmem.h>
27#include <linux/swap.h>
3d503753 28#include <linux/proc_fs.h>
22a9835c 29#include <linux/pfn.h>
0f334a3e 30#include <linux/hardirq.h>
5a0e3ad6 31#include <linux/gfp.h>
2f96b8c1 32#include <linux/kcore.h>
1da177e4 33
9975e77d 34#include <asm/asm-offsets.h>
1da177e4
LT
35#include <asm/bootinfo.h>
36#include <asm/cachectl.h>
37#include <asm/cpu.h>
38#include <asm/dma.h>
f8829cae 39#include <asm/kmap_types.h>
1da177e4
LT
40#include <asm/mmu_context.h>
41#include <asm/sections.h>
42#include <asm/pgtable.h>
43#include <asm/pgalloc.h>
44#include <asm/tlb.h>
f8829cae
RB
45#include <asm/fixmap.h>
46
47/* Atomicity and interruptability */
48#ifdef CONFIG_MIPS_MT_SMTC
49
50#include <asm/mipsmtregs.h>
51
52#define ENTER_CRITICAL(flags) \
53 { \
54 unsigned int mvpflags; \
55 local_irq_save(flags);\
56 mvpflags = dvpe()
57#define EXIT_CRITICAL(flags) \
58 evpe(mvpflags); \
59 local_irq_restore(flags); \
60 }
61#else
62
63#define ENTER_CRITICAL(flags) local_irq_save(flags)
64#define EXIT_CRITICAL(flags) local_irq_restore(flags)
65
66#endif /* CONFIG_MIPS_MT_SMTC */
1da177e4 67
1da177e4
LT
68/*
69 * We have up to 8 empty zeroed pages so we can map one of the right colour
70342287 70 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
1da177e4
LT
71 * where we have to avoid VCED / VECI exceptions for good performance at
72 * any price. Since page is never written to after the initialization we
73 * don't have to care about aliases on other CPUs.
74 */
75unsigned long empty_zero_page, zero_page_mask;
497d2adc 76EXPORT_SYMBOL_GPL(empty_zero_page);
1da177e4
LT
77
78/*
79 * Not static inline because used by IP27 special magic initialization code
80 */
31605922 81void setup_zero_pages(void)
1da177e4 82{
31605922 83 unsigned int order, i;
1da177e4
LT
84 struct page *page;
85
86 if (cpu_has_vce)
87 order = 3;
88 else
89 order = 0;
90
91 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
92 if (!empty_zero_page)
93 panic("Oh boy, that early out of memory?");
94
99e3b942 95 page = virt_to_page((void *)empty_zero_page);
8dfcc9ba 96 split_page(page, order);
31605922
JL
97 for (i = 0; i < (1 << order); i++, page++)
98 mark_page_reserved(page);
1da177e4 99
31605922 100 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
1da177e4
LT
101}
102
f8829cae
RB
103#ifdef CONFIG_MIPS_MT_SMTC
104static pte_t *kmap_coherent_pte;
105static void __init kmap_coherent_init(void)
106{
107 unsigned long vaddr;
108
109 /* cache the first coherent kmap pte */
110 vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
111 kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
112}
113#else
114static inline void kmap_coherent_init(void) {}
115#endif
116
7575a49f 117void *kmap_coherent(struct page *page, unsigned long addr)
f8829cae
RB
118{
119 enum fixed_addresses idx;
120 unsigned long vaddr, flags, entrylo;
121 unsigned long old_ctx;
122 pte_t pte;
123 int tlbidx;
124
b868868a
RB
125 BUG_ON(Page_dcache_dirty(page));
126
bdb43806 127 pagefault_disable();
f8829cae
RB
128 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
129#ifdef CONFIG_MIPS_MT_SMTC
0f334a3e
KC
130 idx += FIX_N_COLOURS * smp_processor_id() +
131 (in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0);
132#else
133 idx += in_interrupt() ? FIX_N_COLOURS : 0;
f8829cae
RB
134#endif
135 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
136 pte = mk_pte(page, PAGE_KERNEL);
962f480e 137#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
f8829cae
RB
138 entrylo = pte.pte_high;
139#else
6dd9344c 140 entrylo = pte_to_entrylo(pte_val(pte));
f8829cae
RB
141#endif
142
143 ENTER_CRITICAL(flags);
144 old_ctx = read_c0_entryhi();
145 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
146 write_c0_entrylo0(entrylo);
147 write_c0_entrylo1(entrylo);
148#ifdef CONFIG_MIPS_MT_SMTC
149 set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
150 /* preload TLB instead of local_flush_tlb_one() */
151 mtc0_tlbw_hazard();
152 tlb_probe();
153 tlb_probe_hazard();
154 tlbidx = read_c0_index();
155 mtc0_tlbw_hazard();
156 if (tlbidx < 0)
157 tlb_write_random();
158 else
159 tlb_write_indexed();
160#else
161 tlbidx = read_c0_wired();
162 write_c0_wired(tlbidx + 1);
163 write_c0_index(tlbidx);
164 mtc0_tlbw_hazard();
165 tlb_write_indexed();
166#endif
167 tlbw_use_hazard();
168 write_c0_entryhi(old_ctx);
169 EXIT_CRITICAL(flags);
170
171 return (void*) vaddr;
172}
173
eacb9d61 174void kunmap_coherent(void)
f8829cae
RB
175{
176#ifndef CONFIG_MIPS_MT_SMTC
177 unsigned int wired;
178 unsigned long flags, old_ctx;
179
180 ENTER_CRITICAL(flags);
181 old_ctx = read_c0_entryhi();
182 wired = read_c0_wired() - 1;
183 write_c0_wired(wired);
184 write_c0_index(wired);
185 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
186 write_c0_entrylo0(0);
187 write_c0_entrylo1(0);
188 mtc0_tlbw_hazard();
189 tlb_write_indexed();
190 tlbw_use_hazard();
191 write_c0_entryhi(old_ctx);
192 EXIT_CRITICAL(flags);
193#endif
bdb43806 194 pagefault_enable();
f8829cae
RB
195}
196
bcd02280
AN
197void copy_user_highpage(struct page *to, struct page *from,
198 unsigned long vaddr, struct vm_area_struct *vma)
199{
200 void *vfrom, *vto;
201
9c02048f 202 vto = kmap_atomic(to);
9a74b3eb
RB
203 if (cpu_has_dc_aliases &&
204 page_mapped(from) && !Page_dcache_dirty(from)) {
bcd02280
AN
205 vfrom = kmap_coherent(from, vaddr);
206 copy_page(vto, vfrom);
eacb9d61 207 kunmap_coherent();
bcd02280 208 } else {
9c02048f 209 vfrom = kmap_atomic(from);
bcd02280 210 copy_page(vto, vfrom);
9c02048f 211 kunmap_atomic(vfrom);
bcd02280 212 }
39b8d525 213 if ((!cpu_has_ic_fills_f_dc) ||
bcd02280
AN
214 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
215 flush_data_cache_page((unsigned long)vto);
9c02048f 216 kunmap_atomic(vto);
bcd02280
AN
217 /* Make sure this page is cleared on other CPU's too before using it */
218 smp_wmb();
219}
220
f8829cae
RB
221void copy_to_user_page(struct vm_area_struct *vma,
222 struct page *page, unsigned long vaddr, void *dst, const void *src,
223 unsigned long len)
224{
9a74b3eb
RB
225 if (cpu_has_dc_aliases &&
226 page_mapped(page) && !Page_dcache_dirty(page)) {
f8829cae
RB
227 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
228 memcpy(vto, src, len);
eacb9d61 229 kunmap_coherent();
985c30ef 230 } else {
f8829cae 231 memcpy(dst, src, len);
985c30ef
RB
232 if (cpu_has_dc_aliases)
233 SetPageDcacheDirty(page);
234 }
f8829cae
RB
235 if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
236 flush_cache_page(vma, vaddr, page_to_pfn(page));
237}
238
f8829cae
RB
239void copy_from_user_page(struct vm_area_struct *vma,
240 struct page *page, unsigned long vaddr, void *dst, const void *src,
241 unsigned long len)
242{
9a74b3eb
RB
243 if (cpu_has_dc_aliases &&
244 page_mapped(page) && !Page_dcache_dirty(page)) {
985c30ef 245 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
f8829cae 246 memcpy(dst, vfrom, len);
eacb9d61 247 kunmap_coherent();
985c30ef 248 } else {
f8829cae 249 memcpy(dst, src, len);
985c30ef
RB
250 if (cpu_has_dc_aliases)
251 SetPageDcacheDirty(page);
252 }
f8829cae 253}
bf9621aa 254EXPORT_SYMBOL_GPL(copy_from_user_page);
f8829cae 255
84fd089a 256void __init fixrange_init(unsigned long start, unsigned long end,
1da177e4
LT
257 pgd_t *pgd_base)
258{
f8829cae 259#if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC)
1da177e4 260 pgd_t *pgd;
c6e8b587 261 pud_t *pud;
1da177e4
LT
262 pmd_t *pmd;
263 pte_t *pte;
c6e8b587 264 int i, j, k;
1da177e4
LT
265 unsigned long vaddr;
266
267 vaddr = start;
268 i = __pgd_offset(vaddr);
c6e8b587
RB
269 j = __pud_offset(vaddr);
270 k = __pmd_offset(vaddr);
1da177e4
LT
271 pgd = pgd_base + i;
272
464fd83e 273 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
c6e8b587 274 pud = (pud_t *)pgd;
464fd83e 275 for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
c6e8b587 276 pmd = (pmd_t *)pud;
464fd83e 277 for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
c6e8b587
RB
278 if (pmd_none(*pmd)) {
279 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
f8829cae 280 set_pmd(pmd, __pmd((unsigned long)pte));
b72b7092 281 BUG_ON(pte != pte_offset_kernel(pmd, 0));
c6e8b587
RB
282 }
283 vaddr += PMD_SIZE;
1da177e4 284 }
c6e8b587 285 k = 0;
1da177e4
LT
286 }
287 j = 0;
288 }
f8829cae 289#endif
1da177e4 290}
1da177e4 291
b4819b59 292#ifndef CONFIG_NEED_MULTIPLE_NODES
61ef2489 293int page_is_ram(unsigned long pagenr)
565200a1
AN
294{
295 int i;
296
297 for (i = 0; i < boot_mem_map.nr_map; i++) {
298 unsigned long addr, end;
299
43064c0c
DD
300 switch (boot_mem_map.map[i].type) {
301 case BOOT_MEM_RAM:
302 case BOOT_MEM_INIT_RAM:
303 break;
304 default:
565200a1
AN
305 /* not usable memory */
306 continue;
43064c0c 307 }
565200a1
AN
308
309 addr = PFN_UP(boot_mem_map.map[i].addr);
310 end = PFN_DOWN(boot_mem_map.map[i].addr +
311 boot_mem_map.map[i].size);
312
313 if (pagenr >= addr && pagenr < end)
314 return 1;
315 }
316
317 return 0;
318}
319
1da177e4
LT
320void __init paging_init(void)
321{
cce335ae 322 unsigned long max_zone_pfns[MAX_NR_ZONES];
d3ce0e98 323 unsigned long lastpfn __maybe_unused;
1da177e4
LT
324
325 pagetable_init();
326
327#ifdef CONFIG_HIGHMEM
328 kmap_init();
329#endif
f8829cae 330 kmap_coherent_init();
1da177e4 331
05502339 332#ifdef CONFIG_ZONE_DMA
cce335ae 333 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
1da177e4 334#endif
cce335ae
RB
335#ifdef CONFIG_ZONE_DMA32
336 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
337#endif
338 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
339 lastpfn = max_low_pfn;
1da177e4 340#ifdef CONFIG_HIGHMEM
cce335ae
RB
341 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
342 lastpfn = highend_pfn;
cbb8fc07 343
cce335ae 344 if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
cbb8fc07 345 printk(KERN_WARNING "This processor doesn't support highmem."
cce335ae
RB
346 " %ldk highmem ignored\n",
347 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
348 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
349 lastpfn = max_low_pfn;
cbb8fc07 350 }
1da177e4
LT
351#endif
352
cce335ae 353 free_area_init_nodes(max_zone_pfns);
1da177e4
LT
354}
355
3d503753
DJ
356#ifdef CONFIG_64BIT
357static struct kcore_list kcore_kseg0;
358#endif
359
1132137e 360static inline void mem_init_free_highmem(void)
1da177e4 361{
1132137e
JL
362#ifdef CONFIG_HIGHMEM
363 unsigned long tmp;
1da177e4 364
1132137e
JL
365 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
366 struct page *page = pfn_to_page(tmp);
367
368 if (!page_is_ram(tmp))
369 SetPageReserved(page);
370 else
371 free_highmem_page(page);
372 }
373#endif
374}
375
376void __init mem_init(void)
377{
1da177e4
LT
378#ifdef CONFIG_HIGHMEM
379#ifdef CONFIG_DISCONTIGMEM
380#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
381#endif
b6da0ffb 382 max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
1da177e4 383#else
565200a1 384 max_mapnr = max_low_pfn;
1da177e4
LT
385#endif
386 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
387
0c988534 388 free_all_bootmem();
31605922 389 setup_zero_pages(); /* Setup zeroed pages. */
1132137e
JL
390 mem_init_free_highmem();
391 mem_init_print_info(NULL);
1da177e4 392
3d503753
DJ
393#ifdef CONFIG_64BIT
394 if ((unsigned long) &_text > (unsigned long) CKSEG0)
395 /* The -4 is a hack so that user tools don't have to handle
396 the overflow. */
c30bb2a2
KH
397 kclist_add(&kcore_kseg0, (void *) CKSEG0,
398 0x80000000 - 4, KCORE_TEXT);
3d503753 399#endif
1da177e4 400}
b4819b59 401#endif /* !CONFIG_NEED_MULTIPLE_NODES */
1da177e4 402
c44e8d5e 403void free_init_pages(const char *what, unsigned long begin, unsigned long end)
6fd11a21 404{
acd86b86 405 unsigned long pfn;
6fd11a21 406
acd86b86
FBH
407 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
408 struct page *page = pfn_to_page(pfn);
409 void *addr = phys_to_virt(PFN_PHYS(pfn));
410
acd86b86 411 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
31605922 412 free_reserved_page(page);
6fd11a21
RB
413 }
414 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
415}
416
1da177e4
LT
417#ifdef CONFIG_BLK_DEV_INITRD
418void free_initrd_mem(unsigned long start, unsigned long end)
419{
11199692
JL
420 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
421 "initrd");
1da177e4
LT
422}
423#endif
424
0893d3fb
MC
425void (*free_init_pages_eva)(void *begin, void *end) = NULL;
426
fb4bb133 427void __init_refok free_initmem(void)
1da177e4 428{
c44e8d5e 429 prom_free_prom_memory();
0893d3fb
MC
430 /*
431 * Let the platform define a specific function to free the
432 * init section since EVA may have used any possible mapping
433 * between virtual and physical addresses.
434 */
435 if (free_init_pages_eva)
436 free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
437 else
438 free_initmem_default(POISON_FREE_INITMEM);
1da177e4 439}
69a6c312 440
82622284 441#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
69a6c312 442unsigned long pgd_current[NR_CPUS];
82622284 443#endif
9975e77d
RB
444
445/*
446 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
447 * are constants. So we use the variants from asm-offset.h until that gcc
448 * will officially be retired.
485172b3
DD
449 *
450 * Align swapper_pg_dir in to 64K, allows its address to be loaded
451 * with a single LUI instruction in the TLB handlers. If we used
452 * __aligned(64K), its size would get rounded up to the alignment
453 * size, and waste space. So we place it in its own section and align
454 * it in the linker script.
9975e77d 455 */
485172b3 456pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
325f8a0a 457#ifndef __PAGETABLE_PMD_FOLDED
485172b3 458pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
69a6c312 459#endif
485172b3 460pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
This page took 0.718276 seconds and 5 git commands to generate.