ethernet: Add new driver for Marvell Armada 375 network unit
[deliverable/linux.git] / arch / mips / mm / init.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
10 */
11 #include <linux/bug.h>
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/smp.h>
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/string.h>
20 #include <linux/types.h>
21 #include <linux/pagemap.h>
22 #include <linux/ptrace.h>
23 #include <linux/mman.h>
24 #include <linux/mm.h>
25 #include <linux/bootmem.h>
26 #include <linux/highmem.h>
27 #include <linux/swap.h>
28 #include <linux/proc_fs.h>
29 #include <linux/pfn.h>
30 #include <linux/hardirq.h>
31 #include <linux/gfp.h>
32 #include <linux/kcore.h>
33
34 #include <asm/asm-offsets.h>
35 #include <asm/bootinfo.h>
36 #include <asm/cachectl.h>
37 #include <asm/cpu.h>
38 #include <asm/dma.h>
39 #include <asm/kmap_types.h>
40 #include <asm/mmu_context.h>
41 #include <asm/sections.h>
42 #include <asm/pgtable.h>
43 #include <asm/pgalloc.h>
44 #include <asm/tlb.h>
45 #include <asm/fixmap.h>
46
47 /*
48 * We have up to 8 empty zeroed pages so we can map one of the right colour
49 * when needed. This is necessary only on R4000 / R4400 SC and MC versions
50 * where we have to avoid VCED / VECI exceptions for good performance at
51 * any price. Since page is never written to after the initialization we
52 * don't have to care about aliases on other CPUs.
53 */
54 unsigned long empty_zero_page, zero_page_mask;
55 EXPORT_SYMBOL_GPL(empty_zero_page);
56
57 /*
58 * Not static inline because used by IP27 special magic initialization code
59 */
60 void setup_zero_pages(void)
61 {
62 unsigned int order, i;
63 struct page *page;
64
65 if (cpu_has_vce)
66 order = 3;
67 else
68 order = 0;
69
70 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
71 if (!empty_zero_page)
72 panic("Oh boy, that early out of memory?");
73
74 page = virt_to_page((void *)empty_zero_page);
75 split_page(page, order);
76 for (i = 0; i < (1 << order); i++, page++)
77 mark_page_reserved(page);
78
79 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
80 }
81
82 static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
83 {
84 enum fixed_addresses idx;
85 unsigned long vaddr, flags, entrylo;
86 unsigned long old_ctx;
87 pte_t pte;
88 int tlbidx;
89
90 BUG_ON(Page_dcache_dirty(page));
91
92 pagefault_disable();
93 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
94 idx += in_interrupt() ? FIX_N_COLOURS : 0;
95 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
96 pte = mk_pte(page, prot);
97 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
98 entrylo = pte.pte_high;
99 #else
100 entrylo = pte_to_entrylo(pte_val(pte));
101 #endif
102
103 local_irq_save(flags);
104 old_ctx = read_c0_entryhi();
105 write_c0_entryhi(vaddr & (PAGE_MASK << 1));
106 write_c0_entrylo0(entrylo);
107 write_c0_entrylo1(entrylo);
108 tlbidx = read_c0_wired();
109 write_c0_wired(tlbidx + 1);
110 write_c0_index(tlbidx);
111 mtc0_tlbw_hazard();
112 tlb_write_indexed();
113 tlbw_use_hazard();
114 write_c0_entryhi(old_ctx);
115 local_irq_restore(flags);
116
117 return (void*) vaddr;
118 }
119
120 void *kmap_coherent(struct page *page, unsigned long addr)
121 {
122 return __kmap_pgprot(page, addr, PAGE_KERNEL);
123 }
124
125 void *kmap_noncoherent(struct page *page, unsigned long addr)
126 {
127 return __kmap_pgprot(page, addr, PAGE_KERNEL_NC);
128 }
129
130 void kunmap_coherent(void)
131 {
132 unsigned int wired;
133 unsigned long flags, old_ctx;
134
135 local_irq_save(flags);
136 old_ctx = read_c0_entryhi();
137 wired = read_c0_wired() - 1;
138 write_c0_wired(wired);
139 write_c0_index(wired);
140 write_c0_entryhi(UNIQUE_ENTRYHI(wired));
141 write_c0_entrylo0(0);
142 write_c0_entrylo1(0);
143 mtc0_tlbw_hazard();
144 tlb_write_indexed();
145 tlbw_use_hazard();
146 write_c0_entryhi(old_ctx);
147 local_irq_restore(flags);
148 pagefault_enable();
149 }
150
151 void copy_user_highpage(struct page *to, struct page *from,
152 unsigned long vaddr, struct vm_area_struct *vma)
153 {
154 void *vfrom, *vto;
155
156 vto = kmap_atomic(to);
157 if (cpu_has_dc_aliases &&
158 page_mapped(from) && !Page_dcache_dirty(from)) {
159 vfrom = kmap_coherent(from, vaddr);
160 copy_page(vto, vfrom);
161 kunmap_coherent();
162 } else {
163 vfrom = kmap_atomic(from);
164 copy_page(vto, vfrom);
165 kunmap_atomic(vfrom);
166 }
167 if ((!cpu_has_ic_fills_f_dc) ||
168 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
169 flush_data_cache_page((unsigned long)vto);
170 kunmap_atomic(vto);
171 /* Make sure this page is cleared on other CPU's too before using it */
172 smp_wmb();
173 }
174
175 void copy_to_user_page(struct vm_area_struct *vma,
176 struct page *page, unsigned long vaddr, void *dst, const void *src,
177 unsigned long len)
178 {
179 if (cpu_has_dc_aliases &&
180 page_mapped(page) && !Page_dcache_dirty(page)) {
181 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
182 memcpy(vto, src, len);
183 kunmap_coherent();
184 } else {
185 memcpy(dst, src, len);
186 if (cpu_has_dc_aliases)
187 SetPageDcacheDirty(page);
188 }
189 if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
190 flush_cache_page(vma, vaddr, page_to_pfn(page));
191 }
192
193 void copy_from_user_page(struct vm_area_struct *vma,
194 struct page *page, unsigned long vaddr, void *dst, const void *src,
195 unsigned long len)
196 {
197 if (cpu_has_dc_aliases &&
198 page_mapped(page) && !Page_dcache_dirty(page)) {
199 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
200 memcpy(dst, vfrom, len);
201 kunmap_coherent();
202 } else {
203 memcpy(dst, src, len);
204 if (cpu_has_dc_aliases)
205 SetPageDcacheDirty(page);
206 }
207 }
208 EXPORT_SYMBOL_GPL(copy_from_user_page);
209
210 void __init fixrange_init(unsigned long start, unsigned long end,
211 pgd_t *pgd_base)
212 {
213 #ifdef CONFIG_HIGHMEM
214 pgd_t *pgd;
215 pud_t *pud;
216 pmd_t *pmd;
217 pte_t *pte;
218 int i, j, k;
219 unsigned long vaddr;
220
221 vaddr = start;
222 i = __pgd_offset(vaddr);
223 j = __pud_offset(vaddr);
224 k = __pmd_offset(vaddr);
225 pgd = pgd_base + i;
226
227 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
228 pud = (pud_t *)pgd;
229 for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
230 pmd = (pmd_t *)pud;
231 for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
232 if (pmd_none(*pmd)) {
233 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
234 set_pmd(pmd, __pmd((unsigned long)pte));
235 BUG_ON(pte != pte_offset_kernel(pmd, 0));
236 }
237 vaddr += PMD_SIZE;
238 }
239 k = 0;
240 }
241 j = 0;
242 }
243 #endif
244 }
245
246 #ifndef CONFIG_NEED_MULTIPLE_NODES
247 int page_is_ram(unsigned long pagenr)
248 {
249 int i;
250
251 for (i = 0; i < boot_mem_map.nr_map; i++) {
252 unsigned long addr, end;
253
254 switch (boot_mem_map.map[i].type) {
255 case BOOT_MEM_RAM:
256 case BOOT_MEM_INIT_RAM:
257 break;
258 default:
259 /* not usable memory */
260 continue;
261 }
262
263 addr = PFN_UP(boot_mem_map.map[i].addr);
264 end = PFN_DOWN(boot_mem_map.map[i].addr +
265 boot_mem_map.map[i].size);
266
267 if (pagenr >= addr && pagenr < end)
268 return 1;
269 }
270
271 return 0;
272 }
273
274 void __init paging_init(void)
275 {
276 unsigned long max_zone_pfns[MAX_NR_ZONES];
277 unsigned long lastpfn __maybe_unused;
278
279 pagetable_init();
280
281 #ifdef CONFIG_HIGHMEM
282 kmap_init();
283 #endif
284 #ifdef CONFIG_ZONE_DMA
285 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
286 #endif
287 #ifdef CONFIG_ZONE_DMA32
288 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
289 #endif
290 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
291 lastpfn = max_low_pfn;
292 #ifdef CONFIG_HIGHMEM
293 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
294 lastpfn = highend_pfn;
295
296 if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
297 printk(KERN_WARNING "This processor doesn't support highmem."
298 " %ldk highmem ignored\n",
299 (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
300 max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
301 lastpfn = max_low_pfn;
302 }
303 #endif
304
305 free_area_init_nodes(max_zone_pfns);
306 }
307
308 #ifdef CONFIG_64BIT
309 static struct kcore_list kcore_kseg0;
310 #endif
311
312 static inline void mem_init_free_highmem(void)
313 {
314 #ifdef CONFIG_HIGHMEM
315 unsigned long tmp;
316
317 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) {
318 struct page *page = pfn_to_page(tmp);
319
320 if (!page_is_ram(tmp))
321 SetPageReserved(page);
322 else
323 free_highmem_page(page);
324 }
325 #endif
326 }
327
328 void __init mem_init(void)
329 {
330 #ifdef CONFIG_HIGHMEM
331 #ifdef CONFIG_DISCONTIGMEM
332 #error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
333 #endif
334 max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
335 #else
336 max_mapnr = max_low_pfn;
337 #endif
338 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
339
340 free_all_bootmem();
341 setup_zero_pages(); /* Setup zeroed pages. */
342 mem_init_free_highmem();
343 mem_init_print_info(NULL);
344
345 #ifdef CONFIG_64BIT
346 if ((unsigned long) &_text > (unsigned long) CKSEG0)
347 /* The -4 is a hack so that user tools don't have to handle
348 the overflow. */
349 kclist_add(&kcore_kseg0, (void *) CKSEG0,
350 0x80000000 - 4, KCORE_TEXT);
351 #endif
352 }
353 #endif /* !CONFIG_NEED_MULTIPLE_NODES */
354
355 void free_init_pages(const char *what, unsigned long begin, unsigned long end)
356 {
357 unsigned long pfn;
358
359 for (pfn = PFN_UP(begin); pfn < PFN_DOWN(end); pfn++) {
360 struct page *page = pfn_to_page(pfn);
361 void *addr = phys_to_virt(PFN_PHYS(pfn));
362
363 memset(addr, POISON_FREE_INITMEM, PAGE_SIZE);
364 free_reserved_page(page);
365 }
366 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
367 }
368
369 #ifdef CONFIG_BLK_DEV_INITRD
370 void free_initrd_mem(unsigned long start, unsigned long end)
371 {
372 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
373 "initrd");
374 }
375 #endif
376
377 void (*free_init_pages_eva)(void *begin, void *end) = NULL;
378
379 void __init_refok free_initmem(void)
380 {
381 prom_free_prom_memory();
382 /*
383 * Let the platform define a specific function to free the
384 * init section since EVA may have used any possible mapping
385 * between virtual and physical addresses.
386 */
387 if (free_init_pages_eva)
388 free_init_pages_eva((void *)&__init_begin, (void *)&__init_end);
389 else
390 free_initmem_default(POISON_FREE_INITMEM);
391 }
392
393 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT
394 unsigned long pgd_current[NR_CPUS];
395 #endif
396
397 /*
398 * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
399 * are constants. So we use the variants from asm-offset.h until that gcc
400 * will officially be retired.
401 *
402 * Align swapper_pg_dir in to 64K, allows its address to be loaded
403 * with a single LUI instruction in the TLB handlers. If we used
404 * __aligned(64K), its size would get rounded up to the alignment
405 * size, and waste space. So we place it in its own section and align
406 * it in the linker script.
407 */
408 pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
409 #ifndef __PAGETABLE_PMD_FOLDED
410 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
411 #endif
412 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
This page took 0.053878 seconds and 5 git commands to generate.