Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/i386/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
5 | * | |
6 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | |
7 | */ | |
8 | ||
1da177e4 LT |
9 | #include <linux/module.h> |
10 | #include <linux/signal.h> | |
11 | #include <linux/sched.h> | |
12 | #include <linux/kernel.h> | |
13 | #include <linux/errno.h> | |
14 | #include <linux/string.h> | |
15 | #include <linux/types.h> | |
16 | #include <linux/ptrace.h> | |
17 | #include <linux/mman.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/hugetlb.h> | |
20 | #include <linux/swap.h> | |
21 | #include <linux/smp.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/highmem.h> | |
24 | #include <linux/pagemap.h> | |
6fb14755 | 25 | #include <linux/pfn.h> |
c9cf5528 | 26 | #include <linux/poison.h> |
1da177e4 LT |
27 | #include <linux/bootmem.h> |
28 | #include <linux/slab.h> | |
29 | #include <linux/proc_fs.h> | |
05039b92 | 30 | #include <linux/memory_hotplug.h> |
27d99f7e | 31 | #include <linux/initrd.h> |
55b2355e | 32 | #include <linux/cpumask.h> |
1da177e4 LT |
33 | |
34 | #include <asm/processor.h> | |
35 | #include <asm/system.h> | |
36 | #include <asm/uaccess.h> | |
37 | #include <asm/pgtable.h> | |
38 | #include <asm/dma.h> | |
39 | #include <asm/fixmap.h> | |
40 | #include <asm/e820.h> | |
41 | #include <asm/apic.h> | |
42 | #include <asm/tlb.h> | |
43 | #include <asm/tlbflush.h> | |
a5a19c63 | 44 | #include <asm/pgalloc.h> |
1da177e4 | 45 | #include <asm/sections.h> |
b239fb25 | 46 | #include <asm/paravirt.h> |
1da177e4 LT |
47 | |
48 | unsigned int __VMALLOC_RESERVE = 128 << 20; | |
49 | ||
50 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |
51 | unsigned long highstart_pfn, highend_pfn; | |
52 | ||
53 | static int noinline do_test_wp_bit(void); | |
54 | ||
55 | /* | |
56 | * Creates a middle page table and puts a pointer to it in the | |
57 | * given global directory entry. This only returns the gd entry | |
58 | * in non-PAE compilation mode, since the middle layer is folded. | |
59 | */ | |
60 | static pmd_t * __init one_md_table_init(pgd_t *pgd) | |
61 | { | |
62 | pud_t *pud; | |
63 | pmd_t *pmd_table; | |
64 | ||
65 | #ifdef CONFIG_X86_PAE | |
b239fb25 JF |
66 | if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { |
67 | pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE); | |
68 | ||
6c435456 | 69 | paravirt_alloc_pd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); |
b239fb25 JF |
70 | set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); |
71 | pud = pud_offset(pgd, 0); | |
72 | if (pmd_table != pmd_offset(pud, 0)) | |
73 | BUG(); | |
74 | } | |
75 | #endif | |
1da177e4 LT |
76 | pud = pud_offset(pgd, 0); |
77 | pmd_table = pmd_offset(pud, 0); | |
1da177e4 LT |
78 | return pmd_table; |
79 | } | |
80 | ||
81 | /* | |
82 | * Create a page table and place a pointer to it in a middle page | |
83 | * directory entry. | |
84 | */ | |
85 | static pte_t * __init one_page_table_init(pmd_t *pmd) | |
86 | { | |
b239fb25 | 87 | if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { |
509a80c4 IM |
88 | pte_t *page_table = NULL; |
89 | ||
90 | #ifdef CONFIG_DEBUG_PAGEALLOC | |
91 | page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); | |
92 | #endif | |
93 | if (!page_table) | |
94 | page_table = | |
95 | (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); | |
b239fb25 | 96 | |
fdb4c338 | 97 | paravirt_alloc_pt(&init_mm, __pa(page_table) >> PAGE_SHIFT); |
1da177e4 | 98 | set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); |
b239fb25 | 99 | BUG_ON(page_table != pte_offset_kernel(pmd, 0)); |
1da177e4 | 100 | } |
509a80c4 | 101 | |
1da177e4 LT |
102 | return pte_offset_kernel(pmd, 0); |
103 | } | |
104 | ||
105 | /* | |
106 | * This function initializes a certain range of kernel virtual memory | |
107 | * with new bootmem page tables, everywhere page tables are missing in | |
108 | * the given range. | |
109 | */ | |
110 | ||
111 | /* | |
112 | * NOTE: The pagetables are allocated contiguous on the physical space | |
113 | * so we can cache the place of the first one and move around without | |
114 | * checking the pgd every time. | |
115 | */ | |
116 | static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base) | |
117 | { | |
118 | pgd_t *pgd; | |
1da177e4 LT |
119 | pmd_t *pmd; |
120 | int pgd_idx, pmd_idx; | |
121 | unsigned long vaddr; | |
122 | ||
123 | vaddr = start; | |
124 | pgd_idx = pgd_index(vaddr); | |
125 | pmd_idx = pmd_index(vaddr); | |
126 | pgd = pgd_base + pgd_idx; | |
127 | ||
128 | for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { | |
b239fb25 JF |
129 | pmd = one_md_table_init(pgd); |
130 | pmd = pmd + pmd_index(vaddr); | |
1da177e4 | 131 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) { |
b239fb25 | 132 | one_page_table_init(pmd); |
1da177e4 LT |
133 | |
134 | vaddr += PMD_SIZE; | |
135 | } | |
136 | pmd_idx = 0; | |
137 | } | |
138 | } | |
139 | ||
140 | static inline int is_kernel_text(unsigned long addr) | |
141 | { | |
142 | if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end) | |
143 | return 1; | |
144 | return 0; | |
145 | } | |
146 | ||
147 | /* | |
148 | * This maps the physical memory to kernel virtual address space, a total | |
149 | * of max_low_pfn pages, by creating page tables starting from address | |
150 | * PAGE_OFFSET. | |
151 | */ | |
152 | static void __init kernel_physical_mapping_init(pgd_t *pgd_base) | |
153 | { | |
154 | unsigned long pfn; | |
155 | pgd_t *pgd; | |
156 | pmd_t *pmd; | |
157 | pte_t *pte; | |
158 | int pgd_idx, pmd_idx, pte_ofs; | |
159 | ||
160 | pgd_idx = pgd_index(PAGE_OFFSET); | |
161 | pgd = pgd_base + pgd_idx; | |
162 | pfn = 0; | |
163 | ||
164 | for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { | |
165 | pmd = one_md_table_init(pgd); | |
166 | if (pfn >= max_low_pfn) | |
167 | continue; | |
f3f20de8 JF |
168 | for (pmd_idx = 0; |
169 | pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; | |
170 | pmd++, pmd_idx++) { | |
1da177e4 LT |
171 | unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET; |
172 | ||
f3f20de8 JF |
173 | /* Map with big pages if possible, otherwise |
174 | create normal page tables. */ | |
1da177e4 | 175 | if (cpu_has_pse) { |
f3f20de8 JF |
176 | unsigned int address2; |
177 | pgprot_t prot = PAGE_KERNEL_LARGE; | |
178 | ||
179 | address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + | |
180 | PAGE_OFFSET + PAGE_SIZE-1; | |
181 | ||
182 | if (is_kernel_text(address) || | |
183 | is_kernel_text(address2)) | |
184 | prot = PAGE_KERNEL_LARGE_EXEC; | |
185 | ||
186 | set_pmd(pmd, pfn_pmd(pfn, prot)); | |
b239fb25 | 187 | |
1da177e4 LT |
188 | pfn += PTRS_PER_PTE; |
189 | } else { | |
190 | pte = one_page_table_init(pmd); | |
191 | ||
b239fb25 JF |
192 | for (pte_ofs = 0; |
193 | pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; | |
194 | pte++, pfn++, pte_ofs++, address += PAGE_SIZE) { | |
f3f20de8 JF |
195 | pgprot_t prot = PAGE_KERNEL; |
196 | ||
b239fb25 | 197 | if (is_kernel_text(address)) |
f3f20de8 JF |
198 | prot = PAGE_KERNEL_EXEC; |
199 | ||
200 | set_pte(pte, pfn_pte(pfn, prot)); | |
1da177e4 LT |
201 | } |
202 | } | |
203 | } | |
204 | } | |
205 | } | |
206 | ||
207 | static inline int page_kills_ppro(unsigned long pagenr) | |
208 | { | |
209 | if (pagenr >= 0x70000 && pagenr <= 0x7003F) | |
210 | return 1; | |
211 | return 0; | |
212 | } | |
213 | ||
1da177e4 LT |
214 | #ifdef CONFIG_HIGHMEM |
215 | pte_t *kmap_pte; | |
216 | pgprot_t kmap_prot; | |
217 | ||
218 | #define kmap_get_fixmap_pte(vaddr) \ | |
219 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr)) | |
220 | ||
221 | static void __init kmap_init(void) | |
222 | { | |
223 | unsigned long kmap_vstart; | |
224 | ||
225 | /* cache the first kmap pte */ | |
226 | kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); | |
227 | kmap_pte = kmap_get_fixmap_pte(kmap_vstart); | |
228 | ||
229 | kmap_prot = PAGE_KERNEL; | |
230 | } | |
231 | ||
232 | static void __init permanent_kmaps_init(pgd_t *pgd_base) | |
233 | { | |
234 | pgd_t *pgd; | |
235 | pud_t *pud; | |
236 | pmd_t *pmd; | |
237 | pte_t *pte; | |
238 | unsigned long vaddr; | |
239 | ||
240 | vaddr = PKMAP_BASE; | |
241 | page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); | |
242 | ||
243 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
244 | pud = pud_offset(pgd, vaddr); | |
245 | pmd = pmd_offset(pud, vaddr); | |
246 | pte = pte_offset_kernel(pmd, vaddr); | |
247 | pkmap_page_table = pte; | |
248 | } | |
249 | ||
c09b4240 | 250 | static void __meminit free_new_highpage(struct page *page) |
05039b92 | 251 | { |
7835e98b | 252 | init_page_count(page); |
05039b92 DH |
253 | __free_page(page); |
254 | totalhigh_pages++; | |
255 | } | |
256 | ||
257 | void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro) | |
1da177e4 LT |
258 | { |
259 | if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) { | |
260 | ClearPageReserved(page); | |
05039b92 | 261 | free_new_highpage(page); |
1da177e4 LT |
262 | } else |
263 | SetPageReserved(page); | |
264 | } | |
265 | ||
0e0be25d | 266 | static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long pfn) |
05039b92 DH |
267 | { |
268 | free_new_highpage(page); | |
269 | totalram_pages++; | |
270 | #ifdef CONFIG_FLATMEM | |
271 | max_mapnr = max(pfn, max_mapnr); | |
272 | #endif | |
273 | num_physpages++; | |
274 | return 0; | |
275 | } | |
276 | ||
277 | /* | |
278 | * Not currently handling the NUMA case. | |
279 | * Assuming single node and all memory that | |
280 | * has been added dynamically that would be | |
281 | * onlined here is in HIGHMEM | |
282 | */ | |
0e0be25d | 283 | void __meminit online_page(struct page *page) |
05039b92 DH |
284 | { |
285 | ClearPageReserved(page); | |
286 | add_one_highpage_hotplug(page, page_to_pfn(page)); | |
287 | } | |
288 | ||
289 | ||
05b79bdc AW |
290 | #ifdef CONFIG_NUMA |
291 | extern void set_highmem_pages_init(int); | |
292 | #else | |
1da177e4 LT |
293 | static void __init set_highmem_pages_init(int bad_ppro) |
294 | { | |
295 | int pfn; | |
23be8c7d IM |
296 | for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) { |
297 | /* | |
298 | * Holes under sparsemem might not have no mem_map[]: | |
299 | */ | |
300 | if (pfn_valid(pfn)) | |
301 | add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro); | |
302 | } | |
1da177e4 LT |
303 | totalram_pages += totalhigh_pages; |
304 | } | |
05b79bdc | 305 | #endif /* CONFIG_FLATMEM */ |
1da177e4 LT |
306 | |
307 | #else | |
308 | #define kmap_init() do { } while (0) | |
309 | #define permanent_kmaps_init(pgd_base) do { } while (0) | |
310 | #define set_highmem_pages_init(bad_ppro) do { } while (0) | |
311 | #endif /* CONFIG_HIGHMEM */ | |
312 | ||
c93c82bb | 313 | pteval_t __PAGE_KERNEL = _PAGE_KERNEL; |
129f6946 | 314 | EXPORT_SYMBOL(__PAGE_KERNEL); |
c93c82bb | 315 | pteval_t __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC; |
1da177e4 | 316 | |
05b79bdc | 317 | #ifdef CONFIG_NUMA |
1da177e4 | 318 | extern void __init remap_numa_kva(void); |
05b79bdc AW |
319 | #else |
320 | #define remap_numa_kva() do {} while (0) | |
1da177e4 LT |
321 | #endif |
322 | ||
b239fb25 | 323 | void __init native_pagetable_setup_start(pgd_t *base) |
1da177e4 | 324 | { |
1da177e4 LT |
325 | #ifdef CONFIG_X86_PAE |
326 | int i; | |
b239fb25 JF |
327 | |
328 | /* | |
329 | * Init entries of the first-level page table to the | |
330 | * zero page, if they haven't already been set up. | |
331 | * | |
332 | * In a normal native boot, we'll be running on a | |
333 | * pagetable rooted in swapper_pg_dir, but not in PAE | |
334 | * mode, so this will end up clobbering the mappings | |
335 | * for the lower 24Mbytes of the address space, | |
336 | * without affecting the kernel address space. | |
337 | */ | |
338 | for (i = 0; i < USER_PTRS_PER_PGD; i++) | |
339 | set_pgd(&base[i], | |
340 | __pgd(__pa(empty_zero_page) | _PAGE_PRESENT)); | |
341 | ||
342 | /* Make sure kernel address space is empty so that a pagetable | |
343 | will be allocated for it. */ | |
344 | memset(&base[USER_PTRS_PER_PGD], 0, | |
345 | KERNEL_PGD_PTRS * sizeof(pgd_t)); | |
c119ecce | 346 | #else |
6c435456 | 347 | paravirt_alloc_pd(&init_mm, __pa(base) >> PAGE_SHIFT); |
1da177e4 | 348 | #endif |
b239fb25 JF |
349 | } |
350 | ||
351 | void __init native_pagetable_setup_done(pgd_t *base) | |
352 | { | |
353 | #ifdef CONFIG_X86_PAE | |
354 | /* | |
355 | * Add low memory identity-mappings - SMP needs it when | |
356 | * starting up on an AP from real-mode. In the non-PAE | |
357 | * case we already have these mappings through head.S. | |
358 | * All user-space mappings are explicitly cleared after | |
359 | * SMP startup. | |
360 | */ | |
361 | set_pgd(&base[0], base[USER_PTRS_PER_PGD]); | |
362 | #endif | |
363 | } | |
364 | ||
365 | /* | |
366 | * Build a proper pagetable for the kernel mappings. Up until this | |
367 | * point, we've been running on some set of pagetables constructed by | |
368 | * the boot process. | |
369 | * | |
370 | * If we're booting on native hardware, this will be a pagetable | |
371 | * constructed in arch/i386/kernel/head.S, and not running in PAE mode | |
372 | * (even if we'll end up running in PAE). The root of the pagetable | |
373 | * will be swapper_pg_dir. | |
374 | * | |
375 | * If we're booting paravirtualized under a hypervisor, then there are | |
376 | * more options: we may already be running PAE, and the pagetable may | |
377 | * or may not be based in swapper_pg_dir. In any case, | |
378 | * paravirt_pagetable_setup_start() will set up swapper_pg_dir | |
379 | * appropriately for the rest of the initialization to work. | |
380 | * | |
381 | * In general, pagetable_init() assumes that the pagetable may already | |
382 | * be partially populated, and so it avoids stomping on any existing | |
383 | * mappings. | |
384 | */ | |
385 | static void __init pagetable_init (void) | |
386 | { | |
387 | unsigned long vaddr, end; | |
388 | pgd_t *pgd_base = swapper_pg_dir; | |
389 | ||
390 | paravirt_pagetable_setup_start(pgd_base); | |
1da177e4 LT |
391 | |
392 | /* Enable PSE if available */ | |
b239fb25 | 393 | if (cpu_has_pse) |
1da177e4 | 394 | set_in_cr4(X86_CR4_PSE); |
1da177e4 LT |
395 | |
396 | /* Enable PGE if available */ | |
397 | if (cpu_has_pge) { | |
398 | set_in_cr4(X86_CR4_PGE); | |
399 | __PAGE_KERNEL |= _PAGE_GLOBAL; | |
400 | __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL; | |
401 | } | |
402 | ||
403 | kernel_physical_mapping_init(pgd_base); | |
404 | remap_numa_kva(); | |
405 | ||
406 | /* | |
407 | * Fixed mappings, only the page table structure has to be | |
408 | * created - mappings will be set by set_fixmap(): | |
409 | */ | |
beacfaac | 410 | early_ioremap_clear(); |
1da177e4 | 411 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; |
b239fb25 JF |
412 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; |
413 | page_table_range_init(vaddr, end, pgd_base); | |
beacfaac | 414 | early_ioremap_reset(); |
1da177e4 LT |
415 | |
416 | permanent_kmaps_init(pgd_base); | |
417 | ||
b239fb25 | 418 | paravirt_pagetable_setup_done(pgd_base); |
1da177e4 LT |
419 | } |
420 | ||
b0cb1a19 | 421 | #if defined(CONFIG_HIBERNATION) || defined(CONFIG_ACPI) |
1da177e4 LT |
422 | /* |
423 | * Swap suspend & friends need this for resume because things like the intel-agp | |
424 | * driver might have split up a kernel 4MB mapping. | |
425 | */ | |
426 | char __nosavedata swsusp_pg_dir[PAGE_SIZE] | |
427 | __attribute__ ((aligned (PAGE_SIZE))); | |
428 | ||
429 | static inline void save_pg_dir(void) | |
430 | { | |
431 | memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE); | |
432 | } | |
433 | #else | |
434 | static inline void save_pg_dir(void) | |
435 | { | |
436 | } | |
437 | #endif | |
438 | ||
439 | void zap_low_mappings (void) | |
440 | { | |
441 | int i; | |
442 | ||
443 | save_pg_dir(); | |
444 | ||
445 | /* | |
446 | * Zap initial low-memory mappings. | |
447 | * | |
448 | * Note that "pgd_clear()" doesn't do it for | |
449 | * us, because pgd_clear() is a no-op on i386. | |
450 | */ | |
451 | for (i = 0; i < USER_PTRS_PER_PGD; i++) | |
452 | #ifdef CONFIG_X86_PAE | |
453 | set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page))); | |
454 | #else | |
455 | set_pgd(swapper_pg_dir+i, __pgd(0)); | |
456 | #endif | |
457 | flush_tlb_all(); | |
458 | } | |
459 | ||
d5321abe JB |
460 | int nx_enabled = 0; |
461 | ||
6fdc05d4 JF |
462 | pteval_t __supported_pte_mask __read_mostly = ~_PAGE_NX; |
463 | EXPORT_SYMBOL_GPL(__supported_pte_mask); | |
464 | ||
d5321abe JB |
465 | #ifdef CONFIG_X86_PAE |
466 | ||
1da177e4 | 467 | static int disable_nx __initdata = 0; |
1da177e4 LT |
468 | |
469 | /* | |
470 | * noexec = on|off | |
471 | * | |
472 | * Control non executable mappings. | |
473 | * | |
474 | * on Enable | |
475 | * off Disable | |
476 | */ | |
1a3f239d | 477 | static int __init noexec_setup(char *str) |
1da177e4 | 478 | { |
1a3f239d RR |
479 | if (!str || !strcmp(str, "on")) { |
480 | if (cpu_has_nx) { | |
481 | __supported_pte_mask |= _PAGE_NX; | |
482 | disable_nx = 0; | |
483 | } | |
484 | } else if (!strcmp(str,"off")) { | |
1da177e4 LT |
485 | disable_nx = 1; |
486 | __supported_pte_mask &= ~_PAGE_NX; | |
1a3f239d RR |
487 | } else |
488 | return -EINVAL; | |
489 | ||
490 | return 0; | |
1da177e4 | 491 | } |
1a3f239d | 492 | early_param("noexec", noexec_setup); |
1da177e4 | 493 | |
1da177e4 LT |
494 | static void __init set_nx(void) |
495 | { | |
496 | unsigned int v[4], l, h; | |
497 | ||
498 | if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) { | |
499 | cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]); | |
500 | if ((v[3] & (1 << 20)) && !disable_nx) { | |
501 | rdmsr(MSR_EFER, l, h); | |
502 | l |= EFER_NX; | |
503 | wrmsr(MSR_EFER, l, h); | |
504 | nx_enabled = 1; | |
505 | __supported_pte_mask |= _PAGE_NX; | |
506 | } | |
507 | } | |
508 | } | |
509 | ||
1da177e4 LT |
510 | #endif |
511 | ||
512 | /* | |
513 | * paging_init() sets up the page tables - note that the first 8MB are | |
514 | * already mapped by head.S. | |
515 | * | |
516 | * This routines also unmaps the page at virtual kernel address 0, so | |
517 | * that we can trap those pesky NULL-reference errors in the kernel. | |
518 | */ | |
519 | void __init paging_init(void) | |
520 | { | |
521 | #ifdef CONFIG_X86_PAE | |
522 | set_nx(); | |
523 | if (nx_enabled) | |
524 | printk("NX (Execute Disable) protection: active\n"); | |
525 | #endif | |
526 | ||
527 | pagetable_init(); | |
528 | ||
529 | load_cr3(swapper_pg_dir); | |
530 | ||
531 | #ifdef CONFIG_X86_PAE | |
532 | /* | |
533 | * We will bail out later - printk doesn't work right now so | |
534 | * the user would just see a hanging kernel. | |
535 | */ | |
536 | if (cpu_has_pae) | |
537 | set_in_cr4(X86_CR4_PAE); | |
538 | #endif | |
539 | __flush_tlb_all(); | |
540 | ||
541 | kmap_init(); | |
542 | } | |
543 | ||
544 | /* | |
545 | * Test if the WP bit works in supervisor mode. It isn't supported on 386's | |
546 | * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This | |
547 | * used to involve black magic jumps to work around some nasty CPU bugs, | |
548 | * but fortunately the switch to using exceptions got rid of all that. | |
549 | */ | |
550 | ||
551 | static void __init test_wp_bit(void) | |
552 | { | |
553 | printk("Checking if this processor honours the WP bit even in supervisor mode... "); | |
554 | ||
555 | /* Any page-aligned address will do, the test is non-destructive */ | |
556 | __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY); | |
557 | boot_cpu_data.wp_works_ok = do_test_wp_bit(); | |
558 | clear_fixmap(FIX_WP_TEST); | |
559 | ||
560 | if (!boot_cpu_data.wp_works_ok) { | |
561 | printk("No.\n"); | |
562 | #ifdef CONFIG_X86_WP_WORKS_OK | |
563 | panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!"); | |
564 | #endif | |
565 | } else { | |
566 | printk("Ok.\n"); | |
567 | } | |
568 | } | |
569 | ||
1da177e4 LT |
570 | static struct kcore_list kcore_mem, kcore_vmalloc; |
571 | ||
572 | void __init mem_init(void) | |
573 | { | |
574 | extern int ppro_with_ram_bug(void); | |
575 | int codesize, reservedpages, datasize, initsize; | |
576 | int tmp; | |
577 | int bad_ppro; | |
578 | ||
05b79bdc | 579 | #ifdef CONFIG_FLATMEM |
8d8f3cbe | 580 | BUG_ON(!mem_map); |
1da177e4 LT |
581 | #endif |
582 | ||
583 | bad_ppro = ppro_with_ram_bug(); | |
584 | ||
585 | #ifdef CONFIG_HIGHMEM | |
586 | /* check that fixmap and pkmap do not overlap */ | |
587 | if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) { | |
588 | printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n"); | |
589 | printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n", | |
590 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START); | |
591 | BUG(); | |
592 | } | |
593 | #endif | |
594 | ||
1da177e4 LT |
595 | /* this will put all low memory onto the freelists */ |
596 | totalram_pages += free_all_bootmem(); | |
597 | ||
598 | reservedpages = 0; | |
599 | for (tmp = 0; tmp < max_low_pfn; tmp++) | |
600 | /* | |
601 | * Only count reserved RAM pages | |
602 | */ | |
603 | if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) | |
604 | reservedpages++; | |
605 | ||
606 | set_highmem_pages_init(bad_ppro); | |
607 | ||
608 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | |
609 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | |
610 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | |
611 | ||
612 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); | |
613 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, | |
614 | VMALLOC_END-VMALLOC_START); | |
615 | ||
616 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n", | |
617 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | |
618 | num_physpages << (PAGE_SHIFT-10), | |
619 | codesize >> 10, | |
620 | reservedpages << (PAGE_SHIFT-10), | |
621 | datasize >> 10, | |
622 | initsize >> 10, | |
623 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) | |
624 | ); | |
625 | ||
052e7994 JF |
626 | #if 1 /* double-sanity-check paranoia */ |
627 | printk("virtual kernel memory layout:\n" | |
628 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
629 | #ifdef CONFIG_HIGHMEM | |
630 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
631 | #endif | |
632 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" | |
633 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" | |
634 | " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
635 | " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
636 | " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", | |
637 | FIXADDR_START, FIXADDR_TOP, | |
638 | (FIXADDR_TOP - FIXADDR_START) >> 10, | |
639 | ||
640 | #ifdef CONFIG_HIGHMEM | |
641 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, | |
642 | (LAST_PKMAP*PAGE_SIZE) >> 10, | |
643 | #endif | |
644 | ||
645 | VMALLOC_START, VMALLOC_END, | |
646 | (VMALLOC_END - VMALLOC_START) >> 20, | |
647 | ||
648 | (unsigned long)__va(0), (unsigned long)high_memory, | |
649 | ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, | |
650 | ||
651 | (unsigned long)&__init_begin, (unsigned long)&__init_end, | |
652 | ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10, | |
653 | ||
654 | (unsigned long)&_etext, (unsigned long)&_edata, | |
655 | ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, | |
656 | ||
657 | (unsigned long)&_text, (unsigned long)&_etext, | |
658 | ((unsigned long)&_etext - (unsigned long)&_text) >> 10); | |
659 | ||
660 | #ifdef CONFIG_HIGHMEM | |
661 | BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START); | |
662 | BUG_ON(VMALLOC_END > PKMAP_BASE); | |
663 | #endif | |
664 | BUG_ON(VMALLOC_START > VMALLOC_END); | |
665 | BUG_ON((unsigned long)high_memory > VMALLOC_START); | |
666 | #endif /* double-sanity-check paranoia */ | |
667 | ||
1da177e4 LT |
668 | #ifdef CONFIG_X86_PAE |
669 | if (!cpu_has_pae) | |
670 | panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!"); | |
671 | #endif | |
672 | if (boot_cpu_data.wp_works_ok < 0) | |
673 | test_wp_bit(); | |
674 | ||
675 | /* | |
676 | * Subtle. SMP is doing it's boot stuff late (because it has to | |
677 | * fork idle threads) - but it also needs low mappings for the | |
678 | * protected-mode entry to work. We zap these entries only after | |
679 | * the WP-bit has been tested. | |
680 | */ | |
681 | #ifndef CONFIG_SMP | |
682 | zap_low_mappings(); | |
683 | #endif | |
684 | } | |
685 | ||
ad8f5797 | 686 | #ifdef CONFIG_MEMORY_HOTPLUG |
bc02af93 | 687 | int arch_add_memory(int nid, u64 start, u64 size) |
05039b92 | 688 | { |
7c7e9425 | 689 | struct pglist_data *pgdata = NODE_DATA(nid); |
776ed98b | 690 | struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM; |
05039b92 DH |
691 | unsigned long start_pfn = start >> PAGE_SHIFT; |
692 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
693 | ||
694 | return __add_pages(zone, start_pfn, nr_pages); | |
695 | } | |
696 | ||
9d99aaa3 | 697 | #endif |
05039b92 | 698 | |
e18b890b | 699 | struct kmem_cache *pmd_cache; |
1da177e4 LT |
700 | |
701 | void __init pgtable_cache_init(void) | |
702 | { | |
4f817847 | 703 | if (PTRS_PER_PMD > 1) |
1da177e4 | 704 | pmd_cache = kmem_cache_create("pmd", |
4f817847 JF |
705 | PTRS_PER_PMD*sizeof(pmd_t), |
706 | PTRS_PER_PMD*sizeof(pmd_t), | |
707 | SLAB_PANIC, | |
708 | pmd_ctor); | |
1da177e4 LT |
709 | } |
710 | ||
711 | /* | |
712 | * This function cannot be __init, since exceptions don't work in that | |
713 | * section. Put this after the callers, so that it cannot be inlined. | |
714 | */ | |
715 | static int noinline do_test_wp_bit(void) | |
716 | { | |
717 | char tmp_reg; | |
718 | int flag; | |
719 | ||
720 | __asm__ __volatile__( | |
721 | " movb %0,%1 \n" | |
722 | "1: movb %1,%0 \n" | |
723 | " xorl %2,%2 \n" | |
724 | "2: \n" | |
725 | ".section __ex_table,\"a\"\n" | |
726 | " .align 4 \n" | |
727 | " .long 1b,2b \n" | |
728 | ".previous \n" | |
729 | :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)), | |
730 | "=q" (tmp_reg), | |
731 | "=r" (flag) | |
732 | :"2" (1) | |
733 | :"memory"); | |
734 | ||
735 | return flag; | |
736 | } | |
737 | ||
63aaf308 AV |
738 | #ifdef CONFIG_DEBUG_RODATA |
739 | ||
63aaf308 AV |
740 | void mark_rodata_ro(void) |
741 | { | |
6fb14755 JB |
742 | unsigned long start = PFN_ALIGN(_text); |
743 | unsigned long size = PFN_ALIGN(_etext) - start; | |
63aaf308 | 744 | |
602033ed LT |
745 | #ifndef CONFIG_KPROBES |
746 | #ifdef CONFIG_HOTPLUG_CPU | |
747 | /* It must still be possible to apply SMP alternatives. */ | |
748 | if (num_possible_cpus() <= 1) | |
749 | #endif | |
750 | { | |
6d238cc4 | 751 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
602033ed | 752 | printk("Write protecting the kernel text: %luk\n", size >> 10); |
0c42f392 AK |
753 | |
754 | #ifdef CONFIG_CPA_DEBUG | |
0c42f392 | 755 | printk("Testing CPA: Reverting %lx-%lx\n", start, start+size); |
6d238cc4 | 756 | set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); |
0c42f392 AK |
757 | |
758 | printk("Testing CPA: write protecting again\n"); | |
6d238cc4 | 759 | set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); |
0c42f392 | 760 | #endif |
602033ed LT |
761 | } |
762 | #endif | |
6fb14755 JB |
763 | start += size; |
764 | size = (unsigned long)__end_rodata - start; | |
6d238cc4 | 765 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
6fb14755 JB |
766 | printk("Write protecting the kernel read-only data: %luk\n", |
767 | size >> 10); | |
63aaf308 | 768 | |
0c42f392 AK |
769 | #ifdef CONFIG_CPA_DEBUG |
770 | printk("Testing CPA: undo %lx-%lx\n", start, start + size); | |
6d238cc4 | 771 | set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); |
0c42f392 AK |
772 | |
773 | printk("Testing CPA: write protecting again\n"); | |
6d238cc4 | 774 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
0c42f392 | 775 | #endif |
63aaf308 AV |
776 | } |
777 | #endif | |
778 | ||
9a0b5817 GH |
779 | void free_init_pages(char *what, unsigned long begin, unsigned long end) |
780 | { | |
781 | unsigned long addr; | |
782 | ||
3c1df68b AV |
783 | /* |
784 | * We just marked the kernel text read only above, now that | |
785 | * we are going to free part of that, we need to make that | |
786 | * writeable first. | |
787 | */ | |
788 | set_memory_rw(begin, (end - begin) >> PAGE_SHIFT); | |
789 | ||
9a0b5817 | 790 | for (addr = begin; addr < end; addr += PAGE_SIZE) { |
e3ebadd9 LT |
791 | ClearPageReserved(virt_to_page(addr)); |
792 | init_page_count(virt_to_page(addr)); | |
793 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); | |
794 | free_page(addr); | |
9a0b5817 GH |
795 | totalram_pages++; |
796 | } | |
6fb14755 | 797 | printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); |
9a0b5817 GH |
798 | } |
799 | ||
800 | void free_initmem(void) | |
801 | { | |
802 | free_init_pages("unused kernel memory", | |
e3ebadd9 LT |
803 | (unsigned long)(&__init_begin), |
804 | (unsigned long)(&__init_end)); | |
9a0b5817 | 805 | } |
63aaf308 | 806 | |
1da177e4 LT |
807 | #ifdef CONFIG_BLK_DEV_INITRD |
808 | void free_initrd_mem(unsigned long start, unsigned long end) | |
809 | { | |
e3ebadd9 | 810 | free_init_pages("initrd memory", start, end); |
1da177e4 LT |
811 | } |
812 | #endif | |
9a0b5817 | 813 |