x86: set_highmem_pages_init() cleanup
[deliverable/linux.git] / arch / x86 / mm / init_32.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 *
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 */
7
1da177e4
LT
8#include <linux/module.h>
9#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/hugetlb.h>
19#include <linux/swap.h>
20#include <linux/smp.h>
21#include <linux/init.h>
22#include <linux/highmem.h>
23#include <linux/pagemap.h>
cfb80c9e 24#include <linux/pci.h>
6fb14755 25#include <linux/pfn.h>
c9cf5528 26#include <linux/poison.h>
1da177e4
LT
27#include <linux/bootmem.h>
28#include <linux/slab.h>
29#include <linux/proc_fs.h>
05039b92 30#include <linux/memory_hotplug.h>
27d99f7e 31#include <linux/initrd.h>
55b2355e 32#include <linux/cpumask.h>
1da177e4 33
f832ff18 34#include <asm/asm.h>
46eaa670 35#include <asm/bios_ebda.h>
1da177e4
LT
36#include <asm/processor.h>
37#include <asm/system.h>
38#include <asm/uaccess.h>
39#include <asm/pgtable.h>
40#include <asm/dma.h>
41#include <asm/fixmap.h>
42#include <asm/e820.h>
43#include <asm/apic.h>
8550eb99 44#include <asm/bugs.h>
1da177e4
LT
45#include <asm/tlb.h>
46#include <asm/tlbflush.h>
a5a19c63 47#include <asm/pgalloc.h>
1da177e4 48#include <asm/sections.h>
b239fb25 49#include <asm/paravirt.h>
551889a6 50#include <asm/setup.h>
7bfeab9a 51#include <asm/cacheflush.h>
1da177e4 52
f361a450 53unsigned long max_low_pfn_mapped;
67794292 54unsigned long max_pfn_mapped;
7d1116a9 55
1da177e4
LT
56DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
57unsigned long highstart_pfn, highend_pfn;
58
8550eb99 59static noinline int do_test_wp_bit(void);
1da177e4 60
4e29684c
YL
61
62static unsigned long __initdata table_start;
63static unsigned long __meminitdata table_end;
64static unsigned long __meminitdata table_top;
65
66static int __initdata after_init_bootmem;
67
d6be89ad 68static __init void *alloc_low_page(void)
4e29684c
YL
69{
70 unsigned long pfn = table_end++;
71 void *adr;
72
73 if (pfn >= table_top)
74 panic("alloc_low_page: ran out of memory");
75
76 adr = __va(pfn * PAGE_SIZE);
77 memset(adr, 0, PAGE_SIZE);
4e29684c
YL
78 return adr;
79}
80
1da177e4
LT
81/*
82 * Creates a middle page table and puts a pointer to it in the
83 * given global directory entry. This only returns the gd entry
84 * in non-PAE compilation mode, since the middle layer is folded.
85 */
86static pmd_t * __init one_md_table_init(pgd_t *pgd)
87{
88 pud_t *pud;
89 pmd_t *pmd_table;
8550eb99 90
1da177e4 91#ifdef CONFIG_X86_PAE
b239fb25 92 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
4e29684c
YL
93 if (after_init_bootmem)
94 pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
95 else
d6be89ad 96 pmd_table = (pmd_t *)alloc_low_page();
6944a9c8 97 paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
b239fb25
JF
98 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
99 pud = pud_offset(pgd, 0);
8550eb99 100 BUG_ON(pmd_table != pmd_offset(pud, 0));
a376f30a
Z
101
102 return pmd_table;
b239fb25
JF
103 }
104#endif
1da177e4
LT
105 pud = pud_offset(pgd, 0);
106 pmd_table = pmd_offset(pud, 0);
8550eb99 107
1da177e4
LT
108 return pmd_table;
109}
110
111/*
112 * Create a page table and place a pointer to it in a middle page
8550eb99 113 * directory entry:
1da177e4
LT
114 */
115static pte_t * __init one_page_table_init(pmd_t *pmd)
116{
b239fb25 117 if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
509a80c4
IM
118 pte_t *page_table = NULL;
119
4e29684c 120 if (after_init_bootmem) {
509a80c4 121#ifdef CONFIG_DEBUG_PAGEALLOC
4e29684c 122 page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
509a80c4 123#endif
4e29684c
YL
124 if (!page_table)
125 page_table =
509a80c4 126 (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
d6be89ad
JB
127 } else
128 page_table = (pte_t *)alloc_low_page();
b239fb25 129
6944a9c8 130 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
1da177e4 131 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
b239fb25 132 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
1da177e4 133 }
509a80c4 134
1da177e4
LT
135 return pte_offset_kernel(pmd, 0);
136}
137
a3c6018e
JB
138static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
139 unsigned long vaddr, pte_t *lastpte)
140{
141#ifdef CONFIG_HIGHMEM
142 /*
143 * Something (early fixmap) may already have put a pte
144 * page here, which causes the page table allocation
145 * to become nonlinear. Attempt to fix it, and if it
146 * is still nonlinear then we have to bug.
147 */
148 int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
149 int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
150
151 if (pmd_idx_kmap_begin != pmd_idx_kmap_end
152 && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
153 && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
154 && ((__pa(pte) >> PAGE_SHIFT) < table_start
155 || (__pa(pte) >> PAGE_SHIFT) >= table_end)) {
156 pte_t *newpte;
157 int i;
158
159 BUG_ON(after_init_bootmem);
160 newpte = alloc_low_page();
161 for (i = 0; i < PTRS_PER_PTE; i++)
162 set_pte(newpte + i, pte[i]);
163
164 paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
165 set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
166 BUG_ON(newpte != pte_offset_kernel(pmd, 0));
167 __flush_tlb_all();
168
169 paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
170 pte = newpte;
171 }
172 BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
173 && vaddr > fix_to_virt(FIX_KMAP_END)
174 && lastpte && lastpte + PTRS_PER_PTE != pte);
175#endif
176 return pte;
177}
178
1da177e4 179/*
8550eb99 180 * This function initializes a certain range of kernel virtual memory
1da177e4
LT
181 * with new bootmem page tables, everywhere page tables are missing in
182 * the given range.
8550eb99
IM
183 *
184 * NOTE: The pagetables are allocated contiguous on the physical space
185 * so we can cache the place of the first one and move around without
1da177e4
LT
186 * checking the pgd every time.
187 */
8550eb99
IM
188static void __init
189page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
1da177e4 190{
1da177e4
LT
191 int pgd_idx, pmd_idx;
192 unsigned long vaddr;
8550eb99
IM
193 pgd_t *pgd;
194 pmd_t *pmd;
a3c6018e 195 pte_t *pte = NULL;
1da177e4
LT
196
197 vaddr = start;
198 pgd_idx = pgd_index(vaddr);
199 pmd_idx = pmd_index(vaddr);
200 pgd = pgd_base + pgd_idx;
201
202 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
b239fb25
JF
203 pmd = one_md_table_init(pgd);
204 pmd = pmd + pmd_index(vaddr);
8550eb99
IM
205 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
206 pmd++, pmd_idx++) {
a3c6018e
JB
207 pte = page_table_kmap_check(one_page_table_init(pmd),
208 pmd, vaddr, pte);
1da177e4
LT
209
210 vaddr += PMD_SIZE;
211 }
212 pmd_idx = 0;
213 }
214}
215
216static inline int is_kernel_text(unsigned long addr)
217{
218 if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
219 return 1;
220 return 0;
221}
222
223/*
8550eb99
IM
224 * This maps the physical memory to kernel virtual address space, a total
225 * of max_low_pfn pages, by creating page tables starting from address
226 * PAGE_OFFSET:
1da177e4 227 */
4e29684c 228static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
a04ad82d
YL
229 unsigned long start_pfn,
230 unsigned long end_pfn,
231 int use_pse)
1da177e4 232{
8550eb99 233 int pgd_idx, pmd_idx, pte_ofs;
1da177e4
LT
234 unsigned long pfn;
235 pgd_t *pgd;
236 pmd_t *pmd;
237 pte_t *pte;
a2699e47
SS
238 unsigned pages_2m, pages_4k;
239 int mapping_iter;
240
241 /*
242 * First iteration will setup identity mapping using large/small pages
243 * based on use_pse, with other attributes same as set by
244 * the early code in head_32.S
245 *
246 * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
247 * as desired for the kernel identity mapping.
248 *
249 * This two pass mechanism conforms to the TLB app note which says:
250 *
251 * "Software should not write to a paging-structure entry in a way
252 * that would change, for any linear address, both the page size
253 * and either the page frame or attributes."
254 */
255 mapping_iter = 1;
1da177e4 256
a04ad82d
YL
257 if (!cpu_has_pse)
258 use_pse = 0;
1da177e4 259
a2699e47
SS
260repeat:
261 pages_2m = pages_4k = 0;
a04ad82d
YL
262 pfn = start_pfn;
263 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
264 pgd = pgd_base + pgd_idx;
1da177e4
LT
265 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
266 pmd = one_md_table_init(pgd);
8550eb99 267
a04ad82d
YL
268 if (pfn >= end_pfn)
269 continue;
270#ifdef CONFIG_X86_PAE
271 pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
272 pmd += pmd_idx;
273#else
274 pmd_idx = 0;
275#endif
276 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
f3f20de8 277 pmd++, pmd_idx++) {
8550eb99 278 unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
1da177e4 279
8550eb99
IM
280 /*
281 * Map with big pages if possible, otherwise
282 * create normal page tables:
283 */
a04ad82d 284 if (use_pse) {
8550eb99 285 unsigned int addr2;
f3f20de8 286 pgprot_t prot = PAGE_KERNEL_LARGE;
a2699e47
SS
287 /*
288 * first pass will use the same initial
289 * identity mapping attribute + _PAGE_PSE.
290 */
291 pgprot_t init_prot =
292 __pgprot(PTE_IDENT_ATTR |
293 _PAGE_PSE);
f3f20de8 294
8550eb99 295 addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
f3f20de8
JF
296 PAGE_OFFSET + PAGE_SIZE-1;
297
8550eb99
IM
298 if (is_kernel_text(addr) ||
299 is_kernel_text(addr2))
f3f20de8
JF
300 prot = PAGE_KERNEL_LARGE_EXEC;
301
ce0c0e50 302 pages_2m++;
a2699e47
SS
303 if (mapping_iter == 1)
304 set_pmd(pmd, pfn_pmd(pfn, init_prot));
305 else
306 set_pmd(pmd, pfn_pmd(pfn, prot));
b239fb25 307
1da177e4 308 pfn += PTRS_PER_PTE;
8550eb99
IM
309 continue;
310 }
311 pte = one_page_table_init(pmd);
1da177e4 312
a04ad82d
YL
313 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
314 pte += pte_ofs;
315 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
8550eb99
IM
316 pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
317 pgprot_t prot = PAGE_KERNEL;
a2699e47
SS
318 /*
319 * first pass will use the same initial
320 * identity mapping attribute.
321 */
322 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
f3f20de8 323
8550eb99
IM
324 if (is_kernel_text(addr))
325 prot = PAGE_KERNEL_EXEC;
f3f20de8 326
ce0c0e50 327 pages_4k++;
a2699e47
SS
328 if (mapping_iter == 1)
329 set_pte(pte, pfn_pte(pfn, init_prot));
330 else
331 set_pte(pte, pfn_pte(pfn, prot));
1da177e4
LT
332 }
333 }
334 }
a2699e47
SS
335 if (mapping_iter == 1) {
336 /*
337 * update direct mapping page count only in the first
338 * iteration.
339 */
340 update_page_count(PG_LEVEL_2M, pages_2m);
341 update_page_count(PG_LEVEL_4K, pages_4k);
342
343 /*
344 * local global flush tlb, which will flush the previous
345 * mappings present in both small and large page TLB's.
346 */
347 __flush_tlb_all();
348
349 /*
350 * Second iteration will set the actual desired PTE attributes.
351 */
352 mapping_iter = 2;
353 goto repeat;
354 }
1da177e4
LT
355}
356
ae531c26
AV
357/*
358 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
359 * is valid. The argument is a physical page number.
360 *
361 *
362 * On x86, access has to be given to the first megabyte of ram because that area
363 * contains bios code and data regions used by X and dosemu and similar apps.
364 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
365 * mmio resources as well as potential bios/acpi data regions.
366 */
367int devmem_is_allowed(unsigned long pagenr)
368{
369 if (pagenr <= 256)
370 return 1;
e8de1481
AV
371 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
372 return 0;
ae531c26
AV
373 if (!page_is_ram(pagenr))
374 return 1;
375 return 0;
376}
377
1da177e4
LT
378pte_t *kmap_pte;
379pgprot_t kmap_prot;
380
8550eb99
IM
381static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
382{
383 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
384 vaddr), vaddr), vaddr);
385}
1da177e4
LT
386
387static void __init kmap_init(void)
388{
389 unsigned long kmap_vstart;
390
8550eb99
IM
391 /*
392 * Cache the first kmap pte:
393 */
1da177e4
LT
394 kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
395 kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
396
397 kmap_prot = PAGE_KERNEL;
398}
399
fd940934 400#ifdef CONFIG_HIGHMEM
1da177e4
LT
401static void __init permanent_kmaps_init(pgd_t *pgd_base)
402{
8550eb99 403 unsigned long vaddr;
1da177e4
LT
404 pgd_t *pgd;
405 pud_t *pud;
406 pmd_t *pmd;
407 pte_t *pte;
1da177e4
LT
408
409 vaddr = PKMAP_BASE;
410 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
411
412 pgd = swapper_pg_dir + pgd_index(vaddr);
413 pud = pud_offset(pgd, vaddr);
414 pmd = pmd_offset(pud, vaddr);
415 pte = pte_offset_kernel(pmd, vaddr);
8550eb99 416 pkmap_page_table = pte;
1da177e4
LT
417}
418
cc9f7a0c 419static void __init add_one_highpage_init(struct page *page, int pfn)
1da177e4 420{
cc9f7a0c
YL
421 ClearPageReserved(page);
422 init_page_count(page);
423 __free_page(page);
424 totalhigh_pages++;
1da177e4
LT
425}
426
b5bc6c0e
YL
427struct add_highpages_data {
428 unsigned long start_pfn;
429 unsigned long end_pfn;
b5bc6c0e
YL
430};
431
d52d53b8 432static int __init add_highpages_work_fn(unsigned long start_pfn,
b5bc6c0e 433 unsigned long end_pfn, void *datax)
1da177e4 434{
b5bc6c0e
YL
435 int node_pfn;
436 struct page *page;
437 unsigned long final_start_pfn, final_end_pfn;
438 struct add_highpages_data *data;
8550eb99 439
b5bc6c0e 440 data = (struct add_highpages_data *)datax;
b5bc6c0e
YL
441
442 final_start_pfn = max(start_pfn, data->start_pfn);
443 final_end_pfn = min(end_pfn, data->end_pfn);
444 if (final_start_pfn >= final_end_pfn)
d52d53b8 445 return 0;
b5bc6c0e
YL
446
447 for (node_pfn = final_start_pfn; node_pfn < final_end_pfn;
448 node_pfn++) {
449 if (!pfn_valid(node_pfn))
450 continue;
451 page = pfn_to_page(node_pfn);
cc9f7a0c 452 add_one_highpage_init(page, node_pfn);
23be8c7d 453 }
b5bc6c0e 454
d52d53b8
YL
455 return 0;
456
b5bc6c0e
YL
457}
458
459void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
cc9f7a0c 460 unsigned long end_pfn)
b5bc6c0e
YL
461{
462 struct add_highpages_data data;
463
464 data.start_pfn = start_pfn;
465 data.end_pfn = end_pfn;
b5bc6c0e
YL
466
467 work_with_active_regions(nid, add_highpages_work_fn, &data);
468}
469
1da177e4 470#else
e8e32326
IB
471static inline void permanent_kmaps_init(pgd_t *pgd_base)
472{
473}
1da177e4
LT
474#endif /* CONFIG_HIGHMEM */
475
b239fb25 476void __init native_pagetable_setup_start(pgd_t *base)
1da177e4 477{
551889a6
IC
478 unsigned long pfn, va;
479 pgd_t *pgd;
480 pud_t *pud;
481 pmd_t *pmd;
482 pte_t *pte;
b239fb25
JF
483
484 /*
551889a6
IC
485 * Remove any mappings which extend past the end of physical
486 * memory from the boot time page table:
b239fb25 487 */
551889a6
IC
488 for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
489 va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
490 pgd = base + pgd_index(va);
491 if (!pgd_present(*pgd))
492 break;
493
494 pud = pud_offset(pgd, va);
495 pmd = pmd_offset(pud, va);
496 if (!pmd_present(*pmd))
497 break;
498
499 pte = pte_offset_kernel(pmd, va);
500 if (!pte_present(*pte))
501 break;
502
503 pte_clear(NULL, va, pte);
504 }
6944a9c8 505 paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
b239fb25
JF
506}
507
508void __init native_pagetable_setup_done(pgd_t *base)
509{
b239fb25
JF
510}
511
512/*
513 * Build a proper pagetable for the kernel mappings. Up until this
514 * point, we've been running on some set of pagetables constructed by
515 * the boot process.
516 *
517 * If we're booting on native hardware, this will be a pagetable
551889a6
IC
518 * constructed in arch/x86/kernel/head_32.S. The root of the
519 * pagetable will be swapper_pg_dir.
b239fb25
JF
520 *
521 * If we're booting paravirtualized under a hypervisor, then there are
522 * more options: we may already be running PAE, and the pagetable may
523 * or may not be based in swapper_pg_dir. In any case,
524 * paravirt_pagetable_setup_start() will set up swapper_pg_dir
525 * appropriately for the rest of the initialization to work.
526 *
527 * In general, pagetable_init() assumes that the pagetable may already
528 * be partially populated, and so it avoids stomping on any existing
529 * mappings.
530 */
e7b37895 531static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base)
b239fb25 532{
8550eb99 533 unsigned long vaddr, end;
b239fb25 534
1da177e4
LT
535 /*
536 * Fixed mappings, only the page table structure has to be
537 * created - mappings will be set by set_fixmap():
538 */
539 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
b239fb25
JF
540 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
541 page_table_range_init(vaddr, end, pgd_base);
beacfaac 542 early_ioremap_reset();
e7b37895
YL
543}
544
545static void __init pagetable_init(void)
546{
547 pgd_t *pgd_base = swapper_pg_dir;
548
1da177e4 549 permanent_kmaps_init(pgd_base);
1da177e4
LT
550}
551
a6eb84bc 552#ifdef CONFIG_ACPI_SLEEP
1da177e4 553/*
a6eb84bc 554 * ACPI suspend needs this for resume, because things like the intel-agp
1da177e4
LT
555 * driver might have split up a kernel 4MB mapping.
556 */
a6eb84bc 557char swsusp_pg_dir[PAGE_SIZE]
8550eb99 558 __attribute__ ((aligned(PAGE_SIZE)));
1da177e4
LT
559
560static inline void save_pg_dir(void)
561{
562 memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
563}
a6eb84bc 564#else /* !CONFIG_ACPI_SLEEP */
1da177e4
LT
565static inline void save_pg_dir(void)
566{
567}
a6eb84bc 568#endif /* !CONFIG_ACPI_SLEEP */
1da177e4 569
8550eb99 570void zap_low_mappings(void)
1da177e4
LT
571{
572 int i;
573
1da177e4
LT
574 /*
575 * Zap initial low-memory mappings.
576 *
577 * Note that "pgd_clear()" doesn't do it for
578 * us, because pgd_clear() is a no-op on i386.
579 */
68db065c 580 for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) {
1da177e4
LT
581#ifdef CONFIG_X86_PAE
582 set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
583#else
584 set_pgd(swapper_pg_dir+i, __pgd(0));
585#endif
8550eb99 586 }
1da177e4
LT
587 flush_tlb_all();
588}
589
8550eb99 590int nx_enabled;
d5321abe 591
be43d728 592pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
6fdc05d4
JF
593EXPORT_SYMBOL_GPL(__supported_pte_mask);
594
d5321abe
JB
595#ifdef CONFIG_X86_PAE
596
8550eb99 597static int disable_nx __initdata;
1da177e4
LT
598
599/*
600 * noexec = on|off
601 *
602 * Control non executable mappings.
603 *
604 * on Enable
605 * off Disable
606 */
1a3f239d 607static int __init noexec_setup(char *str)
1da177e4 608{
1a3f239d
RR
609 if (!str || !strcmp(str, "on")) {
610 if (cpu_has_nx) {
611 __supported_pte_mask |= _PAGE_NX;
612 disable_nx = 0;
613 }
8550eb99
IM
614 } else {
615 if (!strcmp(str, "off")) {
616 disable_nx = 1;
617 __supported_pte_mask &= ~_PAGE_NX;
618 } else {
619 return -EINVAL;
620 }
621 }
1a3f239d
RR
622
623 return 0;
1da177e4 624}
1a3f239d 625early_param("noexec", noexec_setup);
1da177e4 626
1da177e4
LT
627static void __init set_nx(void)
628{
629 unsigned int v[4], l, h;
630
631 if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
632 cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
8550eb99 633
1da177e4
LT
634 if ((v[3] & (1 << 20)) && !disable_nx) {
635 rdmsr(MSR_EFER, l, h);
636 l |= EFER_NX;
637 wrmsr(MSR_EFER, l, h);
638 nx_enabled = 1;
639 __supported_pte_mask |= _PAGE_NX;
640 }
641 }
642}
1da177e4
LT
643#endif
644
90d967e0
YL
645/* user-defined highmem size */
646static unsigned int highmem_pages = -1;
647
648/*
649 * highmem=size forces highmem to be exactly 'size' bytes.
650 * This works even on boxes that have no highmem otherwise.
651 * This also works to reduce highmem size on bigger boxes.
652 */
653static int __init parse_highmem(char *arg)
654{
655 if (!arg)
656 return -EINVAL;
657
658 highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
659 return 0;
660}
661early_param("highmem", parse_highmem);
662
4769843b
IM
663#define MSG_HIGHMEM_TOO_BIG \
664 "highmem size (%luMB) is bigger than pages available (%luMB)!\n"
665
666#define MSG_LOWMEM_TOO_SMALL \
667 "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
90d967e0 668/*
4769843b
IM
669 * All of RAM fits into lowmem - but if user wants highmem
670 * artificially via the highmem=x boot parameter then create
671 * it:
90d967e0 672 */
4769843b 673void __init lowmem_pfn_init(void)
90d967e0 674{
346cafec 675 /* max_low_pfn is 0, we already have early_res support */
90d967e0 676 max_low_pfn = max_pfn;
d88316c2 677
4769843b
IM
678 if (highmem_pages == -1)
679 highmem_pages = 0;
680#ifdef CONFIG_HIGHMEM
681 if (highmem_pages >= max_pfn) {
682 printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
683 pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
684 highmem_pages = 0;
685 }
686 if (highmem_pages) {
687 if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
688 printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
90d967e0
YL
689 pages_to_mb(highmem_pages));
690 highmem_pages = 0;
691 }
4769843b
IM
692 max_low_pfn -= highmem_pages;
693 }
694#else
695 if (highmem_pages)
696 printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
697#endif
698}
699
700#define MSG_HIGHMEM_TOO_SMALL \
701 "only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
702
703#define MSG_HIGHMEM_TRIMMED \
704 "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
705/*
706 * We have more RAM than fits into lowmem - we try to put it into
707 * highmem, also taking the highmem=x boot parameter into account:
708 */
709void __init highmem_pfn_init(void)
710{
d88316c2
IM
711 max_low_pfn = MAXMEM_PFN;
712
4769843b
IM
713 if (highmem_pages == -1)
714 highmem_pages = max_pfn - MAXMEM_PFN;
715
716 if (highmem_pages + MAXMEM_PFN < max_pfn)
717 max_pfn = MAXMEM_PFN + highmem_pages;
718
719 if (highmem_pages + MAXMEM_PFN > max_pfn) {
720 printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
721 pages_to_mb(max_pfn - MAXMEM_PFN),
722 pages_to_mb(highmem_pages));
723 highmem_pages = 0;
724 }
90d967e0 725#ifndef CONFIG_HIGHMEM
4769843b
IM
726 /* Maximum memory usable is what is directly addressable */
727 printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
728 if (max_pfn > MAX_NONPAE_PFN)
729 printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
730 else
731 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
732 max_pfn = MAXMEM_PFN;
90d967e0
YL
733#else /* !CONFIG_HIGHMEM */
734#ifndef CONFIG_HIGHMEM64G
4769843b
IM
735 if (max_pfn > MAX_NONPAE_PFN) {
736 max_pfn = MAX_NONPAE_PFN;
737 printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
738 }
90d967e0
YL
739#endif /* !CONFIG_HIGHMEM64G */
740#endif /* !CONFIG_HIGHMEM */
4769843b
IM
741}
742
743/*
744 * Determine low and high memory ranges:
745 */
746void __init find_low_pfn_range(void)
747{
748 /* it could update max_pfn */
749
d88316c2 750 if (max_pfn <= MAXMEM_PFN)
4769843b 751 lowmem_pfn_init();
d88316c2
IM
752 else
753 highmem_pfn_init();
90d967e0
YL
754}
755
b2ac82a0 756#ifndef CONFIG_NEED_MULTIPLE_NODES
2ec65f8b 757void __init initmem_init(unsigned long start_pfn,
b2ac82a0
YL
758 unsigned long end_pfn)
759{
b2ac82a0
YL
760#ifdef CONFIG_HIGHMEM
761 highstart_pfn = highend_pfn = max_pfn;
762 if (max_pfn > max_low_pfn)
763 highstart_pfn = max_low_pfn;
764 memory_present(0, 0, highend_pfn);
cb95a13a 765 e820_register_active_regions(0, 0, highend_pfn);
b2ac82a0
YL
766 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
767 pages_to_mb(highend_pfn - highstart_pfn));
768 num_physpages = highend_pfn;
769 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
770#else
771 memory_present(0, 0, max_low_pfn);
cb95a13a 772 e820_register_active_regions(0, 0, max_low_pfn);
b2ac82a0
YL
773 num_physpages = max_low_pfn;
774 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
775#endif
776#ifdef CONFIG_FLATMEM
777 max_mapnr = num_physpages;
778#endif
779 printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
780 pages_to_mb(max_low_pfn));
781
782 setup_bootmem_allocator();
b2ac82a0 783}
cb95a13a 784#endif /* !CONFIG_NEED_MULTIPLE_NODES */
b2ac82a0 785
cb95a13a 786static void __init zone_sizes_init(void)
b2ac82a0
YL
787{
788 unsigned long max_zone_pfns[MAX_NR_ZONES];
789 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
790 max_zone_pfns[ZONE_DMA] =
791 virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
792 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
b2ac82a0
YL
793#ifdef CONFIG_HIGHMEM
794 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
b2ac82a0
YL
795#endif
796
797 free_area_init_nodes(max_zone_pfns);
798}
b2ac82a0 799
b2ac82a0
YL
800void __init setup_bootmem_allocator(void)
801{
802 int i;
803 unsigned long bootmap_size, bootmap;
804 /*
805 * Initialize the boot-time allocator (with low memory only):
806 */
807 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
808 bootmap = find_e820_area(min_low_pfn<<PAGE_SHIFT,
809 max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
810 PAGE_SIZE);
811 if (bootmap == -1L)
812 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
813 reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
225c37d7 814
346cafec
YL
815 /* don't touch min_low_pfn */
816 bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
817 min_low_pfn, max_low_pfn);
b2ac82a0
YL
818 printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
819 max_pfn_mapped<<PAGE_SHIFT);
820 printk(KERN_INFO " low ram: %08lx - %08lx\n",
821 min_low_pfn<<PAGE_SHIFT, max_low_pfn<<PAGE_SHIFT);
822 printk(KERN_INFO " bootmap %08lx - %08lx\n",
823 bootmap, bootmap + bootmap_size);
824 for_each_online_node(i)
825 free_bootmem_with_active_regions(i, max_low_pfn);
826 early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
827
4e29684c 828 after_init_bootmem = 1;
b2ac82a0
YL
829}
830
0b8fdcbc 831static void __init find_early_table_space(unsigned long end, int use_pse)
4e29684c 832{
7482b0e9 833 unsigned long puds, pmds, ptes, tables, start;
4e29684c
YL
834
835 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
fd578f9c 836 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
4e29684c
YL
837
838 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
fd578f9c 839 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
4e29684c 840
0b8fdcbc 841 if (use_pse) {
7482b0e9 842 unsigned long extra;
a04ad82d
YL
843
844 extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
845 extra += PMD_SIZE;
7482b0e9
YL
846 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
847 } else
848 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
849
fd578f9c 850 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
8207c257 851
a04ad82d 852 /* for fixmap */
fd578f9c 853 tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
a04ad82d 854
4e29684c
YL
855 /*
856 * RED-PEN putting page tables only on node 0 could
857 * cause a hotspot and fill up ZONE_DMA. The page tables
858 * need roughly 0.5KB per GB.
859 */
860 start = 0x7000;
861 table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
862 tables, PAGE_SIZE);
863 if (table_start == -1UL)
864 panic("Cannot find space for the kernel page tables");
865
866 table_start >>= PAGE_SHIFT;
867 table_end = table_start;
868 table_top = table_start + (tables>>PAGE_SHIFT);
869
870 printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
871 end, table_start << PAGE_SHIFT,
872 (table_start << PAGE_SHIFT) + tables);
873}
874
875unsigned long __init_refok init_memory_mapping(unsigned long start,
876 unsigned long end)
877{
878 pgd_t *pgd_base = swapper_pg_dir;
a04ad82d
YL
879 unsigned long start_pfn, end_pfn;
880 unsigned long big_page_start;
0b8fdcbc
SS
881#ifdef CONFIG_DEBUG_PAGEALLOC
882 /*
883 * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
884 * This will simplify cpa(), which otherwise needs to support splitting
885 * large pages into small in interrupt context, etc.
886 */
887 int use_pse = 0;
888#else
889 int use_pse = cpu_has_pse;
890#endif
4e29684c
YL
891
892 /*
893 * Find space for the kernel direct mapping tables.
894 */
895 if (!after_init_bootmem)
0b8fdcbc 896 find_early_table_space(end, use_pse);
4e29684c
YL
897
898#ifdef CONFIG_X86_PAE
899 set_nx();
900 if (nx_enabled)
901 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
902#endif
903
904 /* Enable PSE if available */
905 if (cpu_has_pse)
906 set_in_cr4(X86_CR4_PSE);
907
908 /* Enable PGE if available */
909 if (cpu_has_pge) {
910 set_in_cr4(X86_CR4_PGE);
ef5e94af 911 __supported_pte_mask |= _PAGE_GLOBAL;
4e29684c
YL
912 }
913
a04ad82d
YL
914 /*
915 * Don't use a large page for the first 2/4MB of memory
916 * because there are often fixed size MTRRs in there
917 * and overlapping MTRRs into large pages can cause
918 * slowdowns.
919 */
920 big_page_start = PMD_SIZE;
921
922 if (start < big_page_start) {
923 start_pfn = start >> PAGE_SHIFT;
924 end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT);
925 } else {
926 /* head is not big page alignment ? */
927 start_pfn = start >> PAGE_SHIFT;
928 end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
929 << (PMD_SHIFT - PAGE_SHIFT);
930 }
931 if (start_pfn < end_pfn)
932 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0);
933
934 /* big page range */
935 start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
936 << (PMD_SHIFT - PAGE_SHIFT);
937 if (start_pfn < (big_page_start >> PAGE_SHIFT))
938 start_pfn = big_page_start >> PAGE_SHIFT;
939 end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
940 if (start_pfn < end_pfn)
941 kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
0b8fdcbc 942 use_pse);
a04ad82d
YL
943
944 /* tail is not big page alignment ? */
945 start_pfn = end_pfn;
946 if (start_pfn > (big_page_start>>PAGE_SHIFT)) {
947 end_pfn = end >> PAGE_SHIFT;
948 if (start_pfn < end_pfn)
949 kernel_physical_mapping_init(pgd_base, start_pfn,
950 end_pfn, 0);
951 }
4e29684c 952
e7b37895
YL
953 early_ioremap_page_table_range_init(pgd_base);
954
4e29684c
YL
955 load_cr3(swapper_pg_dir);
956
957 __flush_tlb_all();
958
959 if (!after_init_bootmem)
960 reserve_early(table_start << PAGE_SHIFT,
961 table_end << PAGE_SHIFT, "PGTABLE");
962
caadbdce
YL
963 if (!after_init_bootmem)
964 early_memtest(start, end);
965
4e29684c
YL
966 return end >> PAGE_SHIFT;
967}
968
e7b37895 969
1da177e4
LT
970/*
971 * paging_init() sets up the page tables - note that the first 8MB are
972 * already mapped by head.S.
973 *
974 * This routines also unmaps the page at virtual kernel address 0, so
975 * that we can trap those pesky NULL-reference errors in the kernel.
976 */
977void __init paging_init(void)
978{
1da177e4
LT
979 pagetable_init();
980
1da177e4
LT
981 __flush_tlb_all();
982
983 kmap_init();
11cd0bc1
YL
984
985 /*
986 * NOTE: at this point the bootmem allocator is fully available.
987 */
11cd0bc1
YL
988 sparse_init();
989 zone_sizes_init();
1da177e4
LT
990}
991
992/*
993 * Test if the WP bit works in supervisor mode. It isn't supported on 386's
f7f17a67
DV
994 * and also on some strange 486's. All 586+'s are OK. This used to involve
995 * black magic jumps to work around some nasty CPU bugs, but fortunately the
996 * switch to using exceptions got rid of all that.
1da177e4 997 */
1da177e4
LT
998static void __init test_wp_bit(void)
999{
d7d119d7
IM
1000 printk(KERN_INFO
1001 "Checking if this processor honours the WP bit even in supervisor mode...");
1da177e4
LT
1002
1003 /* Any page-aligned address will do, the test is non-destructive */
1004 __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
1005 boot_cpu_data.wp_works_ok = do_test_wp_bit();
1006 clear_fixmap(FIX_WP_TEST);
1007
1008 if (!boot_cpu_data.wp_works_ok) {
d7d119d7 1009 printk(KERN_CONT "No.\n");
1da177e4 1010#ifdef CONFIG_X86_WP_WORKS_OK
d7d119d7
IM
1011 panic(
1012 "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
1da177e4
LT
1013#endif
1014 } else {
d7d119d7 1015 printk(KERN_CONT "Ok.\n");
1da177e4
LT
1016 }
1017}
1018
8550eb99 1019static struct kcore_list kcore_mem, kcore_vmalloc;
1da177e4
LT
1020
1021void __init mem_init(void)
1022{
1da177e4 1023 int codesize, reservedpages, datasize, initsize;
cc9f7a0c 1024 int tmp;
1da177e4 1025
cfb80c9e
JF
1026 pci_iommu_alloc();
1027
05b79bdc 1028#ifdef CONFIG_FLATMEM
8d8f3cbe 1029 BUG_ON(!mem_map);
1da177e4 1030#endif
1da177e4
LT
1031 /* this will put all low memory onto the freelists */
1032 totalram_pages += free_all_bootmem();
1033
1034 reservedpages = 0;
1035 for (tmp = 0; tmp < max_low_pfn; tmp++)
1036 /*
8550eb99 1037 * Only count reserved RAM pages:
1da177e4
LT
1038 */
1039 if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
1040 reservedpages++;
1041
cc9f7a0c 1042 set_highmem_pages_init();
1da177e4
LT
1043
1044 codesize = (unsigned long) &_etext - (unsigned long) &_text;
1045 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
1046 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
1047
8550eb99
IM
1048 kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
1049 kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
1da177e4
LT
1050 VMALLOC_END-VMALLOC_START);
1051
8550eb99
IM
1052 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
1053 "%dk reserved, %dk data, %dk init, %ldk highmem)\n",
1da177e4
LT
1054 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
1055 num_physpages << (PAGE_SHIFT-10),
1056 codesize >> 10,
1057 reservedpages << (PAGE_SHIFT-10),
1058 datasize >> 10,
1059 initsize >> 10,
1060 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
1061 );
1062
d7d119d7 1063 printk(KERN_INFO "virtual kernel memory layout:\n"
8550eb99 1064 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
052e7994 1065#ifdef CONFIG_HIGHMEM
8550eb99 1066 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
052e7994 1067#endif
8550eb99
IM
1068 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
1069 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
1070 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
1071 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
1072 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
1073 FIXADDR_START, FIXADDR_TOP,
1074 (FIXADDR_TOP - FIXADDR_START) >> 10,
052e7994
JF
1075
1076#ifdef CONFIG_HIGHMEM
8550eb99
IM
1077 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
1078 (LAST_PKMAP*PAGE_SIZE) >> 10,
052e7994
JF
1079#endif
1080
8550eb99
IM
1081 VMALLOC_START, VMALLOC_END,
1082 (VMALLOC_END - VMALLOC_START) >> 20,
052e7994 1083
8550eb99
IM
1084 (unsigned long)__va(0), (unsigned long)high_memory,
1085 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
052e7994 1086
8550eb99
IM
1087 (unsigned long)&__init_begin, (unsigned long)&__init_end,
1088 ((unsigned long)&__init_end -
1089 (unsigned long)&__init_begin) >> 10,
052e7994 1090
8550eb99
IM
1091 (unsigned long)&_etext, (unsigned long)&_edata,
1092 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
052e7994 1093
8550eb99
IM
1094 (unsigned long)&_text, (unsigned long)&_etext,
1095 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
052e7994 1096
beeb4195
JB
1097 /*
1098 * Check boundaries twice: Some fundamental inconsistencies can
1099 * be detected at build time already.
1100 */
1101#define __FIXADDR_TOP (-PAGE_SIZE)
1102#ifdef CONFIG_HIGHMEM
1103 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
1104 BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE);
1105#endif
1106#define high_memory (-128UL << 20)
1107 BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
1108#undef high_memory
1109#undef __FIXADDR_TOP
1110
052e7994 1111#ifdef CONFIG_HIGHMEM
8550eb99
IM
1112 BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
1113 BUG_ON(VMALLOC_END > PKMAP_BASE);
052e7994 1114#endif
beeb4195 1115 BUG_ON(VMALLOC_START >= VMALLOC_END);
8550eb99 1116 BUG_ON((unsigned long)high_memory > VMALLOC_START);
052e7994 1117
1da177e4
LT
1118 if (boot_cpu_data.wp_works_ok < 0)
1119 test_wp_bit();
1120
61165d7a 1121 save_pg_dir();
1da177e4 1122 zap_low_mappings();
1da177e4
LT
1123}
1124
ad8f5797 1125#ifdef CONFIG_MEMORY_HOTPLUG
bc02af93 1126int arch_add_memory(int nid, u64 start, u64 size)
05039b92 1127{
7c7e9425 1128 struct pglist_data *pgdata = NODE_DATA(nid);
776ed98b 1129 struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
05039b92
DH
1130 unsigned long start_pfn = start >> PAGE_SHIFT;
1131 unsigned long nr_pages = size >> PAGE_SHIFT;
1132
c04fc586 1133 return __add_pages(nid, zone, start_pfn, nr_pages);
05039b92 1134}
9d99aaa3 1135#endif
05039b92 1136
1da177e4
LT
1137/*
1138 * This function cannot be __init, since exceptions don't work in that
1139 * section. Put this after the callers, so that it cannot be inlined.
1140 */
8550eb99 1141static noinline int do_test_wp_bit(void)
1da177e4
LT
1142{
1143 char tmp_reg;
1144 int flag;
1145
1146 __asm__ __volatile__(
8550eb99
IM
1147 " movb %0, %1 \n"
1148 "1: movb %1, %0 \n"
1149 " xorl %2, %2 \n"
1da177e4 1150 "2: \n"
f832ff18 1151 _ASM_EXTABLE(1b,2b)
1da177e4
LT
1152 :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
1153 "=q" (tmp_reg),
1154 "=r" (flag)
1155 :"2" (1)
1156 :"memory");
8550eb99 1157
1da177e4
LT
1158 return flag;
1159}
1160
63aaf308 1161#ifdef CONFIG_DEBUG_RODATA
edeed305
AV
1162const int rodata_test_data = 0xC3;
1163EXPORT_SYMBOL_GPL(rodata_test_data);
63aaf308 1164
63aaf308
AV
1165void mark_rodata_ro(void)
1166{
6fb14755
JB
1167 unsigned long start = PFN_ALIGN(_text);
1168 unsigned long size = PFN_ALIGN(_etext) - start;
63aaf308 1169
8f0f996e
SR
1170#ifndef CONFIG_DYNAMIC_FTRACE
1171 /* Dynamic tracing modifies the kernel text section */
4e4eee0e
MD
1172 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
1173 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
1174 size >> 10);
0c42f392
AK
1175
1176#ifdef CONFIG_CPA_DEBUG
4e4eee0e
MD
1177 printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
1178 start, start+size);
1179 set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
0c42f392 1180
4e4eee0e
MD
1181 printk(KERN_INFO "Testing CPA: write protecting again\n");
1182 set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
602033ed 1183#endif
8f0f996e
SR
1184#endif /* CONFIG_DYNAMIC_FTRACE */
1185
6fb14755
JB
1186 start += size;
1187 size = (unsigned long)__end_rodata - start;
6d238cc4 1188 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
d7d119d7
IM
1189 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1190 size >> 10);
edeed305 1191 rodata_test();
63aaf308 1192
0c42f392 1193#ifdef CONFIG_CPA_DEBUG
d7d119d7 1194 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
6d238cc4 1195 set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
0c42f392 1196
d7d119d7 1197 printk(KERN_INFO "Testing CPA: write protecting again\n");
6d238cc4 1198 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
0c42f392 1199#endif
63aaf308
AV
1200}
1201#endif
1202
1da177e4
LT
1203#ifdef CONFIG_BLK_DEV_INITRD
1204void free_initrd_mem(unsigned long start, unsigned long end)
1205{
e3ebadd9 1206 free_init_pages("initrd memory", start, end);
1da177e4
LT
1207}
1208#endif
d2dbf343
YL
1209
1210int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
1211 int flags)
1212{
1213 return reserve_bootmem(phys, len, flags);
1214}
This page took 1.106822 seconds and 5 git commands to generate.