2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
11 #include <linux/bug.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/smp.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/pagemap.h>
22 #include <linux/ptrace.h>
23 #include <linux/mman.h>
25 #include <linux/bootmem.h>
26 #include <linux/highmem.h>
27 #include <linux/swap.h>
28 #include <linux/proc_fs.h>
29 #include <linux/pfn.h>
30 #include <linux/hardirq.h>
31 #include <linux/gfp.h>
32 #include <linux/kcore.h>
34 #include <asm/asm-offsets.h>
35 #include <asm/bootinfo.h>
36 #include <asm/cachectl.h>
39 #include <asm/kmap_types.h>
40 #include <asm/mmu_context.h>
41 #include <asm/sections.h>
42 #include <asm/pgtable.h>
43 #include <asm/pgalloc.h>
45 #include <asm/fixmap.h>
48 * We have up to 8 empty zeroed pages so we can map one of the right colour
49 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
50 * where we have to avoid VCED / VECI exceptions for good performance at
51 * any price. Since page is never written to after the initialization we
52 * don't have to care about aliases on other CPUs.
54 unsigned long empty_zero_page
, zero_page_mask
;
55 EXPORT_SYMBOL_GPL(empty_zero_page
);
58 * Not static inline because used by IP27 special magic initialization code
60 void setup_zero_pages(void)
62 unsigned int order
, i
;
70 empty_zero_page
= __get_free_pages(GFP_KERNEL
| __GFP_ZERO
, order
);
72 panic("Oh boy, that early out of memory?");
74 page
= virt_to_page((void *)empty_zero_page
);
75 split_page(page
, order
);
76 for (i
= 0; i
< (1 << order
); i
++, page
++)
77 mark_page_reserved(page
);
79 zero_page_mask
= ((PAGE_SIZE
<< order
) - 1) & PAGE_MASK
;
82 static void *__kmap_pgprot(struct page
*page
, unsigned long addr
, pgprot_t prot
)
84 enum fixed_addresses idx
;
85 unsigned long vaddr
, flags
, entrylo
;
86 unsigned long old_ctx
;
90 BUG_ON(Page_dcache_dirty(page
));
93 idx
= (addr
>> PAGE_SHIFT
) & (FIX_N_COLOURS
- 1);
94 idx
+= in_interrupt() ? FIX_N_COLOURS
: 0;
95 vaddr
= __fix_to_virt(FIX_CMAP_END
- idx
);
96 pte
= mk_pte(page
, prot
);
97 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
98 entrylo
= pte
.pte_high
;
100 entrylo
= pte_to_entrylo(pte_val(pte
));
103 local_irq_save(flags
);
104 old_ctx
= read_c0_entryhi();
105 write_c0_entryhi(vaddr
& (PAGE_MASK
<< 1));
106 write_c0_entrylo0(entrylo
);
107 write_c0_entrylo1(entrylo
);
108 tlbidx
= read_c0_wired();
109 write_c0_wired(tlbidx
+ 1);
110 write_c0_index(tlbidx
);
114 write_c0_entryhi(old_ctx
);
115 local_irq_restore(flags
);
117 return (void*) vaddr
;
120 void *kmap_coherent(struct page
*page
, unsigned long addr
)
122 return __kmap_pgprot(page
, addr
, PAGE_KERNEL
);
125 void *kmap_noncoherent(struct page
*page
, unsigned long addr
)
127 return __kmap_pgprot(page
, addr
, PAGE_KERNEL_NC
);
130 void kunmap_coherent(void)
133 unsigned long flags
, old_ctx
;
135 local_irq_save(flags
);
136 old_ctx
= read_c0_entryhi();
137 wired
= read_c0_wired() - 1;
138 write_c0_wired(wired
);
139 write_c0_index(wired
);
140 write_c0_entryhi(UNIQUE_ENTRYHI(wired
));
141 write_c0_entrylo0(0);
142 write_c0_entrylo1(0);
146 write_c0_entryhi(old_ctx
);
147 local_irq_restore(flags
);
151 void copy_user_highpage(struct page
*to
, struct page
*from
,
152 unsigned long vaddr
, struct vm_area_struct
*vma
)
156 vto
= kmap_atomic(to
);
157 if (cpu_has_dc_aliases
&&
158 page_mapped(from
) && !Page_dcache_dirty(from
)) {
159 vfrom
= kmap_coherent(from
, vaddr
);
160 copy_page(vto
, vfrom
);
163 vfrom
= kmap_atomic(from
);
164 copy_page(vto
, vfrom
);
165 kunmap_atomic(vfrom
);
167 if ((!cpu_has_ic_fills_f_dc
) ||
168 pages_do_alias((unsigned long)vto
, vaddr
& PAGE_MASK
))
169 flush_data_cache_page((unsigned long)vto
);
171 /* Make sure this page is cleared on other CPU's too before using it */
175 void copy_to_user_page(struct vm_area_struct
*vma
,
176 struct page
*page
, unsigned long vaddr
, void *dst
, const void *src
,
179 if (cpu_has_dc_aliases
&&
180 page_mapped(page
) && !Page_dcache_dirty(page
)) {
181 void *vto
= kmap_coherent(page
, vaddr
) + (vaddr
& ~PAGE_MASK
);
182 memcpy(vto
, src
, len
);
185 memcpy(dst
, src
, len
);
186 if (cpu_has_dc_aliases
)
187 SetPageDcacheDirty(page
);
189 if ((vma
->vm_flags
& VM_EXEC
) && !cpu_has_ic_fills_f_dc
)
190 flush_cache_page(vma
, vaddr
, page_to_pfn(page
));
193 void copy_from_user_page(struct vm_area_struct
*vma
,
194 struct page
*page
, unsigned long vaddr
, void *dst
, const void *src
,
197 if (cpu_has_dc_aliases
&&
198 page_mapped(page
) && !Page_dcache_dirty(page
)) {
199 void *vfrom
= kmap_coherent(page
, vaddr
) + (vaddr
& ~PAGE_MASK
);
200 memcpy(dst
, vfrom
, len
);
203 memcpy(dst
, src
, len
);
204 if (cpu_has_dc_aliases
)
205 SetPageDcacheDirty(page
);
208 EXPORT_SYMBOL_GPL(copy_from_user_page
);
210 void __init
fixrange_init(unsigned long start
, unsigned long end
,
213 #ifdef CONFIG_HIGHMEM
222 i
= __pgd_offset(vaddr
);
223 j
= __pud_offset(vaddr
);
224 k
= __pmd_offset(vaddr
);
227 for ( ; (i
< PTRS_PER_PGD
) && (vaddr
< end
); pgd
++, i
++) {
229 for ( ; (j
< PTRS_PER_PUD
) && (vaddr
< end
); pud
++, j
++) {
231 for (; (k
< PTRS_PER_PMD
) && (vaddr
< end
); pmd
++, k
++) {
232 if (pmd_none(*pmd
)) {
233 pte
= (pte_t
*) alloc_bootmem_low_pages(PAGE_SIZE
);
234 set_pmd(pmd
, __pmd((unsigned long)pte
));
235 BUG_ON(pte
!= pte_offset_kernel(pmd
, 0));
246 #ifndef CONFIG_NEED_MULTIPLE_NODES
247 int page_is_ram(unsigned long pagenr
)
251 for (i
= 0; i
< boot_mem_map
.nr_map
; i
++) {
252 unsigned long addr
, end
;
254 switch (boot_mem_map
.map
[i
].type
) {
256 case BOOT_MEM_INIT_RAM
:
259 /* not usable memory */
263 addr
= PFN_UP(boot_mem_map
.map
[i
].addr
);
264 end
= PFN_DOWN(boot_mem_map
.map
[i
].addr
+
265 boot_mem_map
.map
[i
].size
);
267 if (pagenr
>= addr
&& pagenr
< end
)
274 void __init
paging_init(void)
276 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
277 unsigned long lastpfn __maybe_unused
;
281 #ifdef CONFIG_HIGHMEM
284 #ifdef CONFIG_ZONE_DMA
285 max_zone_pfns
[ZONE_DMA
] = MAX_DMA_PFN
;
287 #ifdef CONFIG_ZONE_DMA32
288 max_zone_pfns
[ZONE_DMA32
] = MAX_DMA32_PFN
;
290 max_zone_pfns
[ZONE_NORMAL
] = max_low_pfn
;
291 lastpfn
= max_low_pfn
;
292 #ifdef CONFIG_HIGHMEM
293 max_zone_pfns
[ZONE_HIGHMEM
] = highend_pfn
;
294 lastpfn
= highend_pfn
;
296 if (cpu_has_dc_aliases
&& max_low_pfn
!= highend_pfn
) {
297 printk(KERN_WARNING
"This processor doesn't support highmem."
298 " %ldk highmem ignored\n",
299 (highend_pfn
- max_low_pfn
) << (PAGE_SHIFT
- 10));
300 max_zone_pfns
[ZONE_HIGHMEM
] = max_low_pfn
;
301 lastpfn
= max_low_pfn
;
305 free_area_init_nodes(max_zone_pfns
);
309 static struct kcore_list kcore_kseg0
;
312 static inline void mem_init_free_highmem(void)
314 #ifdef CONFIG_HIGHMEM
317 for (tmp
= highstart_pfn
; tmp
< highend_pfn
; tmp
++) {
318 struct page
*page
= pfn_to_page(tmp
);
320 if (!page_is_ram(tmp
))
321 SetPageReserved(page
);
323 free_highmem_page(page
);
328 void __init
mem_init(void)
330 #ifdef CONFIG_HIGHMEM
331 #ifdef CONFIG_DISCONTIGMEM
332 #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
334 max_mapnr
= highend_pfn
? highend_pfn
: max_low_pfn
;
336 max_mapnr
= max_low_pfn
;
338 high_memory
= (void *) __va(max_low_pfn
<< PAGE_SHIFT
);
341 setup_zero_pages(); /* Setup zeroed pages. */
342 mem_init_free_highmem();
343 mem_init_print_info(NULL
);
346 if ((unsigned long) &_text
> (unsigned long) CKSEG0
)
347 /* The -4 is a hack so that user tools don't have to handle
349 kclist_add(&kcore_kseg0
, (void *) CKSEG0
,
350 0x80000000 - 4, KCORE_TEXT
);
353 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
355 void free_init_pages(const char *what
, unsigned long begin
, unsigned long end
)
359 for (pfn
= PFN_UP(begin
); pfn
< PFN_DOWN(end
); pfn
++) {
360 struct page
*page
= pfn_to_page(pfn
);
361 void *addr
= phys_to_virt(PFN_PHYS(pfn
));
363 memset(addr
, POISON_FREE_INITMEM
, PAGE_SIZE
);
364 free_reserved_page(page
);
366 printk(KERN_INFO
"Freeing %s: %ldk freed\n", what
, (end
- begin
) >> 10);
369 #ifdef CONFIG_BLK_DEV_INITRD
370 void free_initrd_mem(unsigned long start
, unsigned long end
)
372 free_reserved_area((void *)start
, (void *)end
, POISON_FREE_INITMEM
,
377 void (*free_init_pages_eva
)(void *begin
, void *end
) = NULL
;
379 void __init_refok
free_initmem(void)
381 prom_free_prom_memory();
383 * Let the platform define a specific function to free the
384 * init section since EVA may have used any possible mapping
385 * between virtual and physical addresses.
387 if (free_init_pages_eva
)
388 free_init_pages_eva((void *)&__init_begin
, (void *)&__init_end
);
390 free_initmem_default(POISON_FREE_INITMEM
);
393 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
394 unsigned long pgd_current
[NR_CPUS
];
398 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
399 * are constants. So we use the variants from asm-offset.h until that gcc
400 * will officially be retired.
402 * Align swapper_pg_dir in to 64K, allows its address to be loaded
403 * with a single LUI instruction in the TLB handlers. If we used
404 * __aligned(64K), its size would get rounded up to the alignment
405 * size, and waste space. So we place it in its own section and align
406 * it in the linker script.
408 pgd_t swapper_pg_dir
[_PTRS_PER_PGD
] __section(.bss
..swapper_pg_dir
);
409 #ifndef __PAGETABLE_PMD_FOLDED
410 pmd_t invalid_pmd_table
[PTRS_PER_PMD
] __page_aligned_bss
;
412 pte_t invalid_pte_table
[PTRS_PER_PTE
] __page_aligned_bss
;
This page took 0.053878 seconds and 5 git commands to generate.