Commit | Line | Data |
---|---|---|
61e85e36 JB |
1 | /* |
2 | * OpenRISC idle.c | |
3 | * | |
4 | * Linux architectural port borrowing liberally from similar works of | |
5 | * others. All original copyrights apply as per the original source | |
6 | * declaration. | |
7 | * | |
8 | * Modifications for the OpenRISC architecture: | |
9 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | |
10 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License | |
14 | * as published by the Free Software Foundation; either version | |
15 | * 2 of the License, or (at your option) any later version. | |
16 | */ | |
17 | ||
18 | #include <linux/signal.h> | |
19 | #include <linux/sched.h> | |
20 | #include <linux/kernel.h> | |
21 | #include <linux/errno.h> | |
22 | #include <linux/string.h> | |
23 | #include <linux/types.h> | |
24 | #include <linux/ptrace.h> | |
25 | #include <linux/mman.h> | |
26 | #include <linux/mm.h> | |
27 | #include <linux/swap.h> | |
28 | #include <linux/smp.h> | |
29 | #include <linux/bootmem.h> | |
30 | #include <linux/init.h> | |
31 | #include <linux/delay.h> | |
32 | #include <linux/blkdev.h> /* for initrd_* */ | |
33 | #include <linux/pagemap.h> | |
34 | #include <linux/memblock.h> | |
35 | ||
61e85e36 JB |
36 | #include <asm/segment.h> |
37 | #include <asm/pgalloc.h> | |
38 | #include <asm/pgtable.h> | |
39 | #include <asm/dma.h> | |
40 | #include <asm/io.h> | |
41 | #include <asm/tlb.h> | |
42 | #include <asm/mmu_context.h> | |
43 | #include <asm/kmap_types.h> | |
44 | #include <asm/fixmap.h> | |
45 | #include <asm/tlbflush.h> | |
7932f61b | 46 | #include <asm/sections.h> |
61e85e36 JB |
47 | |
48 | int mem_init_done; | |
49 | ||
50 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |
51 | ||
52 | static void __init zone_sizes_init(void) | |
53 | { | |
54 | unsigned long zones_size[MAX_NR_ZONES]; | |
55 | ||
56 | /* Clear the zone sizes */ | |
57 | memset(zones_size, 0, sizeof(zones_size)); | |
58 | ||
59 | /* | |
60 | * We use only ZONE_NORMAL | |
61 | */ | |
62 | zones_size[ZONE_NORMAL] = max_low_pfn; | |
63 | ||
64 | free_area_init(zones_size); | |
65 | } | |
66 | ||
67 | extern const char _s_kernel_ro[], _e_kernel_ro[]; | |
68 | ||
69 | /* | |
70 | * Map all physical memory into kernel's address space. | |
71 | * | |
72 | * This is explicitly coded for two-level page tables, so if you need | |
73 | * something else then this needs to change. | |
74 | */ | |
75 | static void __init map_ram(void) | |
76 | { | |
77 | unsigned long v, p, e; | |
78 | pgprot_t prot; | |
79 | pgd_t *pge; | |
80 | pud_t *pue; | |
81 | pmd_t *pme; | |
82 | pte_t *pte; | |
83 | /* These mark extents of read-only kernel pages... | |
84 | * ...from vmlinux.lds.S | |
85 | */ | |
86 | struct memblock_region *region; | |
87 | ||
88 | v = PAGE_OFFSET; | |
89 | ||
90 | for_each_memblock(memory, region) { | |
91 | p = (u32) region->base & PAGE_MASK; | |
92 | e = p + (u32) region->size; | |
93 | ||
94 | v = (u32) __va(p); | |
95 | pge = pgd_offset_k(v); | |
96 | ||
97 | while (p < e) { | |
98 | int j; | |
99 | pue = pud_offset(pge, v); | |
100 | pme = pmd_offset(pue, v); | |
101 | ||
102 | if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) { | |
103 | panic("%s: OR1K kernel hardcoded for " | |
104 | "two-level page tables", | |
105 | __func__); | |
106 | } | |
107 | ||
108 | /* Alloc one page for holding PTE's... */ | |
109 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); | |
110 | set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte))); | |
111 | ||
112 | /* Fill the newly allocated page with PTE'S */ | |
113 | for (j = 0; p < e && j < PTRS_PER_PGD; | |
114 | v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) { | |
115 | if (v >= (u32) _e_kernel_ro || | |
116 | v < (u32) _s_kernel_ro) | |
117 | prot = PAGE_KERNEL; | |
118 | else | |
119 | prot = PAGE_KERNEL_RO; | |
120 | ||
121 | set_pte(pte, mk_pte_phys(p, prot)); | |
122 | } | |
123 | ||
124 | pge++; | |
125 | } | |
126 | ||
127 | printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__, | |
128 | region->base, region->base + region->size); | |
129 | } | |
130 | } | |
131 | ||
132 | void __init paging_init(void) | |
133 | { | |
134 | extern void tlb_init(void); | |
135 | ||
136 | unsigned long end; | |
137 | int i; | |
138 | ||
139 | printk(KERN_INFO "Setting up paging and PTEs.\n"); | |
140 | ||
141 | /* clear out the init_mm.pgd that will contain the kernel's mappings */ | |
142 | ||
143 | for (i = 0; i < PTRS_PER_PGD; i++) | |
144 | swapper_pg_dir[i] = __pgd(0); | |
145 | ||
146 | /* make sure the current pgd table points to something sane | |
147 | * (even if it is most probably not used until the next | |
148 | * switch_mm) | |
149 | */ | |
150 | current_pgd = init_mm.pgd; | |
151 | ||
152 | end = (unsigned long)__va(max_low_pfn * PAGE_SIZE); | |
153 | ||
154 | map_ram(); | |
155 | ||
156 | zone_sizes_init(); | |
157 | ||
158 | /* self modifying code ;) */ | |
159 | /* Since the old TLB miss handler has been running up until now, | |
160 | * the kernel pages are still all RW, so we can still modify the | |
161 | * text directly... after this change and a TLB flush, the kernel | |
162 | * pages will become RO. | |
163 | */ | |
164 | { | |
165 | extern unsigned long dtlb_miss_handler; | |
166 | extern unsigned long itlb_miss_handler; | |
167 | ||
168 | unsigned long *dtlb_vector = __va(0x900); | |
169 | unsigned long *itlb_vector = __va(0xa00); | |
170 | ||
8668480e JB |
171 | printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler); |
172 | *itlb_vector = ((unsigned long)&itlb_miss_handler - | |
173 | (unsigned long)itlb_vector) >> 2; | |
174 | ||
175 | /* Soft ordering constraint to ensure that dtlb_vector is | |
176 | * the last thing updated | |
177 | */ | |
178 | barrier(); | |
179 | ||
61e85e36 JB |
180 | printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler); |
181 | *dtlb_vector = ((unsigned long)&dtlb_miss_handler - | |
182 | (unsigned long)dtlb_vector) >> 2; | |
183 | ||
61e85e36 JB |
184 | } |
185 | ||
8668480e JB |
186 | /* Soft ordering constraint to ensure that cache invalidation and |
187 | * TLB flush really happen _after_ code has been modified. | |
188 | */ | |
189 | barrier(); | |
190 | ||
61e85e36 JB |
191 | /* Invalidate instruction caches after code modification */ |
192 | mtspr(SPR_ICBIR, 0x900); | |
193 | mtspr(SPR_ICBIR, 0xa00); | |
194 | ||
195 | /* New TLB miss handlers and kernel page tables are in now place. | |
196 | * Make sure that page flags get updated for all pages in TLB by | |
197 | * flushing the TLB and forcing all TLB entries to be recreated | |
198 | * from their page table flags. | |
199 | */ | |
200 | flush_tlb_all(); | |
201 | } | |
202 | ||
203 | /* References to section boundaries */ | |
204 | ||
61e85e36 JB |
205 | void __init mem_init(void) |
206 | { | |
2e1c958d | 207 | BUG_ON(!mem_map); |
61e85e36 | 208 | |
1173db12 | 209 | max_mapnr = max_low_pfn; |
61e85e36 JB |
210 | high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); |
211 | ||
212 | /* clear the zero-page */ | |
213 | memset((void *)empty_zero_page, 0, PAGE_SIZE); | |
214 | ||
1173db12 JL |
215 | /* this will put all low memory onto the freelists */ |
216 | free_all_bootmem(); | |
61e85e36 | 217 | |
1173db12 | 218 | mem_init_print_info(NULL); |
61e85e36 JB |
219 | |
220 | printk("mem_init_done ...........................................\n"); | |
221 | mem_init_done = 1; | |
222 | return; | |
223 | } | |
224 | ||
225 | #ifdef CONFIG_BLK_DEV_INITRD | |
226 | void free_initrd_mem(unsigned long start, unsigned long end) | |
227 | { | |
dbe67df4 | 228 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
61e85e36 JB |
229 | } |
230 | #endif | |
231 | ||
232 | void free_initmem(void) | |
233 | { | |
dbe67df4 | 234 | free_initmem_default(-1); |
61e85e36 | 235 | } |