x86: clean up output resulting from update_mptable option
[deliverable/linux.git] / arch / x86 / kernel / e820.c
CommitLineData
b79cd8f1
YL
1/*
2 * Handle the memory map.
3 * The functions here do the job until bootmem takes over.
4 *
5 * Getting sanitize_e820_map() in sync with i386 version by applying change:
6 * - Provisions for empty E820 memory regions (reported by certain BIOSes).
7 * Alex Achenbach <xela@slit.de>, December 2002.
8 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
9 *
10 */
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/init.h>
14#include <linux/bootmem.h>
15#include <linux/ioport.h>
16#include <linux/string.h>
17#include <linux/kexec.h>
18#include <linux/module.h>
19#include <linux/mm.h>
20#include <linux/pfn.h>
bf62f398 21#include <linux/suspend.h>
5dfcf14d 22#include <linux/firmware-map.h>
b79cd8f1
YL
23
24#include <asm/pgtable.h>
25#include <asm/page.h>
26#include <asm/e820.h>
a4c81cf6 27#include <asm/proto.h>
b79cd8f1 28#include <asm/setup.h>
a4c81cf6 29#include <asm/trampoline.h>
b79cd8f1 30
5dfcf14d
BW
31/*
32 * The e820 map is the map that gets modified e.g. with command line parameters
33 * and that is also registered with modifications in the kernel resource tree
34 * with the iomem_resource as parent.
35 *
36 * The e820_saved is directly saved after the BIOS-provided memory map is
37 * copied. It doesn't get modified afterwards. It's registered for the
38 * /sys/firmware/memmap interface.
39 *
40 * That memory map is not modified and is used as base for kexec. The kexec'd
41 * kernel should get the same memory map as the firmware provides. Then the
42 * user can e.g. boot the original kernel with mem=1G while still booting the
43 * next kernel with full memory.
44 */
b79cd8f1 45struct e820map e820;
5dfcf14d 46struct e820map e820_saved;
b79cd8f1
YL
47
48/* For PCI or other memory-mapped resources */
49unsigned long pci_mem_start = 0xaeedbabe;
50#ifdef CONFIG_PCI
51EXPORT_SYMBOL(pci_mem_start);
52#endif
53
54/*
55 * This function checks if any part of the range <start,end> is mapped
56 * with type.
57 */
58int
59e820_any_mapped(u64 start, u64 end, unsigned type)
60{
61 int i;
62
63 for (i = 0; i < e820.nr_map; i++) {
64 struct e820entry *ei = &e820.map[i];
65
66 if (type && ei->type != type)
67 continue;
68 if (ei->addr >= end || ei->addr + ei->size <= start)
69 continue;
70 return 1;
71 }
72 return 0;
73}
74EXPORT_SYMBOL_GPL(e820_any_mapped);
75
76/*
77 * This function checks if the entire range <start,end> is mapped with type.
78 *
79 * Note: this function only works correct if the e820 table is sorted and
80 * not-overlapping, which is the case
81 */
82int __init e820_all_mapped(u64 start, u64 end, unsigned type)
83{
84 int i;
85
86 for (i = 0; i < e820.nr_map; i++) {
87 struct e820entry *ei = &e820.map[i];
88
89 if (type && ei->type != type)
90 continue;
91 /* is the region (part) in overlap with the current region ?*/
92 if (ei->addr >= end || ei->addr + ei->size <= start)
93 continue;
94
95 /* if the region is at the beginning of <start,end> we move
96 * start to the end of the region since it's ok until there
97 */
98 if (ei->addr <= start)
99 start = ei->addr + ei->size;
100 /*
101 * if start is now at or beyond end, we're done, full
102 * coverage
103 */
104 if (start >= end)
105 return 1;
106 }
107 return 0;
108}
109
110/*
111 * Add a memory region to the kernel e820 map.
112 */
d0be6bde 113void __init e820_add_region(u64 start, u64 size, int type)
b79cd8f1
YL
114{
115 int x = e820.nr_map;
116
c3965bd1 117 if (x == ARRAY_SIZE(e820.map)) {
b79cd8f1
YL
118 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
119 return;
120 }
121
122 e820.map[x].addr = start;
123 e820.map[x].size = size;
124 e820.map[x].type = type;
125 e820.nr_map++;
126}
127
128void __init e820_print_map(char *who)
129{
130 int i;
131
132 for (i = 0; i < e820.nr_map; i++) {
133 printk(KERN_INFO " %s: %016Lx - %016Lx ", who,
134 (unsigned long long) e820.map[i].addr,
135 (unsigned long long)
136 (e820.map[i].addr + e820.map[i].size));
137 switch (e820.map[i].type) {
138 case E820_RAM:
28bb2237 139 case E820_RESERVED_KERN:
b79cd8f1
YL
140 printk(KERN_CONT "(usable)\n");
141 break;
142 case E820_RESERVED:
143 printk(KERN_CONT "(reserved)\n");
144 break;
145 case E820_ACPI:
146 printk(KERN_CONT "(ACPI data)\n");
147 break;
148 case E820_NVS:
149 printk(KERN_CONT "(ACPI NVS)\n");
150 break;
671eef85
CJ
151 case E820_UNUSABLE:
152 printk("(unusable)\n");
153 break;
b79cd8f1
YL
154 default:
155 printk(KERN_CONT "type %u\n", e820.map[i].type);
156 break;
157 }
158 }
159}
160
161/*
162 * Sanitize the BIOS e820 map.
163 *
164 * Some e820 responses include overlapping entries. The following
5b7eb2e9
PJ
165 * replaces the original e820 map with a new one, removing overlaps,
166 * and resolving conflicting memory types in favor of highest
167 * numbered type.
b79cd8f1 168 *
5b7eb2e9
PJ
169 * The input parameter biosmap points to an array of 'struct
170 * e820entry' which on entry has elements in the range [0, *pnr_map)
171 * valid, and which has space for up to max_nr_map entries.
172 * On return, the resulting sanitized e820 map entries will be in
173 * overwritten in the same location, starting at biosmap.
174 *
175 * The integer pointed to by pnr_map must be valid on entry (the
176 * current number of valid entries located at biosmap) and will
177 * be updated on return, with the new number of valid entries
178 * (something no more than max_nr_map.)
179 *
180 * The return value from sanitize_e820_map() is zero if it
181 * successfully 'sanitized' the map entries passed in, and is -1
182 * if it did nothing, which can happen if either of (1) it was
183 * only passed one map entry, or (2) any of the input map entries
184 * were invalid (start + size < start, meaning that the size was
185 * so big the described memory range wrapped around through zero.)
186 *
187 * Visually we're performing the following
188 * (1,2,3,4 = memory types)...
189 *
190 * Sample memory map (w/overlaps):
191 * ____22__________________
192 * ______________________4_
193 * ____1111________________
194 * _44_____________________
195 * 11111111________________
196 * ____________________33__
197 * ___________44___________
198 * __________33333_________
199 * ______________22________
200 * ___________________2222_
201 * _________111111111______
202 * _____________________11_
203 * _________________4______
204 *
205 * Sanitized equivalent (no overlap):
206 * 1_______________________
207 * _44_____________________
208 * ___1____________________
209 * ____22__________________
210 * ______11________________
211 * _________1______________
212 * __________3_____________
213 * ___________44___________
214 * _____________33_________
215 * _______________2________
216 * ________________1_______
217 * _________________4______
218 * ___________________2____
219 * ____________________33__
220 * ______________________4_
b79cd8f1 221 */
5b7eb2e9 222
c3965bd1 223int __init sanitize_e820_map(struct e820entry *biosmap, int max_nr_map,
6e9bcc79 224 int *pnr_map)
b79cd8f1
YL
225{
226 struct change_member {
227 struct e820entry *pbios; /* pointer to original bios entry */
228 unsigned long long addr; /* address for this change point */
229 };
157fabf0
PJ
230 static struct change_member change_point_list[2*E820_X_MAX] __initdata;
231 static struct change_member *change_point[2*E820_X_MAX] __initdata;
232 static struct e820entry *overlap_list[E820_X_MAX] __initdata;
233 static struct e820entry new_bios[E820_X_MAX] __initdata;
b79cd8f1
YL
234 struct change_member *change_tmp;
235 unsigned long current_type, last_type;
236 unsigned long long last_addr;
237 int chgidx, still_changing;
238 int overlap_entries;
239 int new_bios_entry;
240 int old_nr, new_nr, chg_nr;
241 int i;
242
b79cd8f1
YL
243 /* if there's only one memory region, don't bother */
244 if (*pnr_map < 2)
245 return -1;
246
247 old_nr = *pnr_map;
6e9bcc79 248 BUG_ON(old_nr > max_nr_map);
b79cd8f1
YL
249
250 /* bail out if we find any unreasonable addresses in bios map */
251 for (i = 0; i < old_nr; i++)
252 if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
253 return -1;
254
255 /* create pointers for initial change-point information (for sorting) */
256 for (i = 0; i < 2 * old_nr; i++)
257 change_point[i] = &change_point_list[i];
258
259 /* record all known change-points (starting and ending addresses),
260 omitting those that are for empty memory regions */
261 chgidx = 0;
262 for (i = 0; i < old_nr; i++) {
263 if (biosmap[i].size != 0) {
264 change_point[chgidx]->addr = biosmap[i].addr;
265 change_point[chgidx++]->pbios = &biosmap[i];
266 change_point[chgidx]->addr = biosmap[i].addr +
267 biosmap[i].size;
268 change_point[chgidx++]->pbios = &biosmap[i];
269 }
270 }
271 chg_nr = chgidx;
272
273 /* sort change-point list by memory addresses (low -> high) */
274 still_changing = 1;
275 while (still_changing) {
276 still_changing = 0;
277 for (i = 1; i < chg_nr; i++) {
278 unsigned long long curaddr, lastaddr;
279 unsigned long long curpbaddr, lastpbaddr;
280
281 curaddr = change_point[i]->addr;
282 lastaddr = change_point[i - 1]->addr;
283 curpbaddr = change_point[i]->pbios->addr;
284 lastpbaddr = change_point[i - 1]->pbios->addr;
285
286 /*
287 * swap entries, when:
288 *
289 * curaddr > lastaddr or
290 * curaddr == lastaddr and curaddr == curpbaddr and
291 * lastaddr != lastpbaddr
292 */
293 if (curaddr < lastaddr ||
294 (curaddr == lastaddr && curaddr == curpbaddr &&
295 lastaddr != lastpbaddr)) {
296 change_tmp = change_point[i];
297 change_point[i] = change_point[i-1];
298 change_point[i-1] = change_tmp;
299 still_changing = 1;
300 }
301 }
302 }
303
304 /* create a new bios memory map, removing overlaps */
305 overlap_entries = 0; /* number of entries in the overlap table */
306 new_bios_entry = 0; /* index for creating new bios map entries */
307 last_type = 0; /* start with undefined memory type */
308 last_addr = 0; /* start with 0 as last starting address */
309
310 /* loop through change-points, determining affect on the new bios map */
311 for (chgidx = 0; chgidx < chg_nr; chgidx++) {
312 /* keep track of all overlapping bios entries */
313 if (change_point[chgidx]->addr ==
314 change_point[chgidx]->pbios->addr) {
315 /*
316 * add map entry to overlap list (> 1 entry
317 * implies an overlap)
318 */
319 overlap_list[overlap_entries++] =
320 change_point[chgidx]->pbios;
321 } else {
322 /*
323 * remove entry from list (order independent,
324 * so swap with last)
325 */
326 for (i = 0; i < overlap_entries; i++) {
327 if (overlap_list[i] ==
328 change_point[chgidx]->pbios)
329 overlap_list[i] =
330 overlap_list[overlap_entries-1];
331 }
332 overlap_entries--;
333 }
334 /*
335 * if there are overlapping entries, decide which
336 * "type" to use (larger value takes precedence --
337 * 1=usable, 2,3,4,4+=unusable)
338 */
339 current_type = 0;
340 for (i = 0; i < overlap_entries; i++)
341 if (overlap_list[i]->type > current_type)
342 current_type = overlap_list[i]->type;
343 /*
344 * continue building up new bios map based on this
345 * information
346 */
347 if (current_type != last_type) {
348 if (last_type != 0) {
349 new_bios[new_bios_entry].size =
350 change_point[chgidx]->addr - last_addr;
351 /*
352 * move forward only if the new size
353 * was non-zero
354 */
355 if (new_bios[new_bios_entry].size != 0)
356 /*
357 * no more space left for new
358 * bios entries ?
359 */
c3965bd1 360 if (++new_bios_entry >= max_nr_map)
b79cd8f1
YL
361 break;
362 }
363 if (current_type != 0) {
364 new_bios[new_bios_entry].addr =
365 change_point[chgidx]->addr;
366 new_bios[new_bios_entry].type = current_type;
367 last_addr = change_point[chgidx]->addr;
368 }
369 last_type = current_type;
370 }
371 }
372 /* retain count for new bios entries */
373 new_nr = new_bios_entry;
374
375 /* copy new bios mapping into original location */
376 memcpy(biosmap, new_bios, new_nr * sizeof(struct e820entry));
377 *pnr_map = new_nr;
378
379 return 0;
380}
381
dc8e8120 382static int __init __append_e820_map(struct e820entry *biosmap, int nr_map)
8c5beb50
HY
383{
384 while (nr_map) {
385 u64 start = biosmap->addr;
386 u64 size = biosmap->size;
387 u64 end = start + size;
388 u32 type = biosmap->type;
389
390 /* Overflow in 64 bits? Ignore the memory map. */
391 if (start > end)
392 return -1;
393
394 e820_add_region(start, size, type);
395
396 biosmap++;
397 nr_map--;
398 }
399 return 0;
400}
401
b79cd8f1
YL
402/*
403 * Copy the BIOS e820 map into a safe place.
404 *
405 * Sanity-check it while we're at it..
406 *
407 * If we're lucky and live on a modern system, the setup code
408 * will have given us a memory map that we can use to properly
409 * set up memory. If we aren't, we'll fake a memory map.
410 */
dc8e8120 411static int __init append_e820_map(struct e820entry *biosmap, int nr_map)
b79cd8f1
YL
412{
413 /* Only one memory region (or negative)? Ignore it */
414 if (nr_map < 2)
415 return -1;
416
dc8e8120 417 return __append_e820_map(biosmap, nr_map);
b79cd8f1
YL
418}
419
fc9036ea
YL
420static u64 __init e820_update_range_map(struct e820map *e820x, u64 start,
421 u64 size, unsigned old_type,
422 unsigned new_type)
b79cd8f1
YL
423{
424 int i;
425 u64 real_updated_size = 0;
426
427 BUG_ON(old_type == new_type);
428
232b957a
YL
429 if (size > (ULLONG_MAX - start))
430 size = ULLONG_MAX - start;
431
b79cd8f1 432 for (i = 0; i < e820.nr_map; i++) {
fc9036ea 433 struct e820entry *ei = &e820x->map[i];
b79cd8f1
YL
434 u64 final_start, final_end;
435 if (ei->type != old_type)
436 continue;
437 /* totally covered? */
438 if (ei->addr >= start &&
439 (ei->addr + ei->size) <= (start + size)) {
440 ei->type = new_type;
441 real_updated_size += ei->size;
442 continue;
443 }
444 /* partially covered */
445 final_start = max(start, ei->addr);
446 final_end = min(start + size, ei->addr + ei->size);
447 if (final_start >= final_end)
448 continue;
d0be6bde 449 e820_add_region(final_start, final_end - final_start,
b79cd8f1
YL
450 new_type);
451 real_updated_size += final_end - final_start;
976dd4dc
YL
452
453 ei->size -= final_end - final_start;
454 if (ei->addr < final_start)
455 continue;
456 ei->addr = final_end;
b79cd8f1
YL
457 }
458 return real_updated_size;
459}
460
fc9036ea
YL
461u64 __init e820_update_range(u64 start, u64 size, unsigned old_type,
462 unsigned new_type)
463{
464 return e820_update_range_map(&e820, start, size, old_type, new_type);
465}
466
467static u64 __init e820_update_range_saved(u64 start, u64 size,
468 unsigned old_type, unsigned new_type)
469{
470 return e820_update_range_map(&e820_saved, start, size, old_type,
471 new_type);
472}
473
7a1fd986
YL
474/* make e820 not cover the range */
475u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type,
476 int checktype)
477{
478 int i;
479 u64 real_removed_size = 0;
480
232b957a
YL
481 if (size > (ULLONG_MAX - start))
482 size = ULLONG_MAX - start;
483
7a1fd986
YL
484 for (i = 0; i < e820.nr_map; i++) {
485 struct e820entry *ei = &e820.map[i];
486 u64 final_start, final_end;
487
488 if (checktype && ei->type != old_type)
489 continue;
490 /* totally covered? */
491 if (ei->addr >= start &&
492 (ei->addr + ei->size) <= (start + size)) {
493 real_removed_size += ei->size;
494 memset(ei, 0, sizeof(struct e820entry));
495 continue;
496 }
497 /* partially covered */
498 final_start = max(start, ei->addr);
499 final_end = min(start + size, ei->addr + ei->size);
500 if (final_start >= final_end)
501 continue;
502 real_removed_size += final_end - final_start;
503
504 ei->size -= final_end - final_start;
505 if (ei->addr < final_start)
506 continue;
507 ei->addr = final_end;
508 }
509 return real_removed_size;
510}
511
b79cd8f1
YL
512void __init update_e820(void)
513{
6e9bcc79 514 int nr_map;
b79cd8f1
YL
515
516 nr_map = e820.nr_map;
c3965bd1 517 if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map))
b79cd8f1
YL
518 return;
519 e820.nr_map = nr_map;
520 printk(KERN_INFO "modified physical RAM map:\n");
521 e820_print_map("modified");
522}
fc9036ea
YL
523static void __init update_e820_saved(void)
524{
525 int nr_map;
526
527 nr_map = e820_saved.nr_map;
528 if (sanitize_e820_map(e820_saved.map, ARRAY_SIZE(e820_saved.map), &nr_map))
529 return;
530 e820_saved.nr_map = nr_map;
531}
fd6493e1 532#define MAX_GAP_END 0x100000000ull
b79cd8f1 533/*
fd6493e1 534 * Search for a gap in the e820 memory space from start_addr to end_addr.
b79cd8f1 535 */
3381959d 536__init int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
fd6493e1 537 unsigned long start_addr, unsigned long long end_addr)
b79cd8f1 538{
fd6493e1 539 unsigned long long last;
3381959d 540 int i = e820.nr_map;
b79cd8f1
YL
541 int found = 0;
542
fd6493e1
AK
543 last = (end_addr && end_addr < MAX_GAP_END) ? end_addr : MAX_GAP_END;
544
b79cd8f1
YL
545 while (--i >= 0) {
546 unsigned long long start = e820.map[i].addr;
547 unsigned long long end = start + e820.map[i].size;
548
3381959d
AK
549 if (end < start_addr)
550 continue;
551
b79cd8f1
YL
552 /*
553 * Since "last" is at most 4GB, we know we'll
554 * fit in 32 bits if this condition is true
555 */
556 if (last > end) {
557 unsigned long gap = last - end;
558
3381959d
AK
559 if (gap >= *gapsize) {
560 *gapsize = gap;
561 *gapstart = end;
b79cd8f1
YL
562 found = 1;
563 }
564 }
565 if (start < last)
566 last = start;
567 }
3381959d
AK
568 return found;
569}
570
571/*
572 * Search for the biggest gap in the low 32 bits of the e820
573 * memory space. We pass this space to PCI to assign MMIO resources
574 * for hotplug or unconfigured devices in.
575 * Hopefully the BIOS let enough space left.
576 */
577__init void e820_setup_gap(void)
578{
579 unsigned long gapstart, gapsize, round;
580 int found;
581
582 gapstart = 0x10000000;
583 gapsize = 0x400000;
fd6493e1 584 found = e820_search_gap(&gapstart, &gapsize, 0, MAX_GAP_END);
b79cd8f1
YL
585
586#ifdef CONFIG_X86_64
587 if (!found) {
c987d12f 588 gapstart = (max_pfn << PAGE_SHIFT) + 1024*1024;
b79cd8f1
YL
589 printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit "
590 "address range\n"
591 KERN_ERR "PCI: Unassigned devices with 32bit resource "
592 "registers may break!\n");
593 }
594#endif
595
596 /*
597 * See how much we want to round up: start off with
598 * rounding to the next 1MB area.
599 */
600 round = 0x100000;
601 while ((gapsize >> 4) > round)
602 round += round;
603 /* Fun with two's complement */
604 pci_mem_start = (gapstart + round) & -round;
605
606 printk(KERN_INFO
607 "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
608 pci_mem_start, gapstart, gapsize);
609}
610
8c5beb50
HY
611/**
612 * Because of the size limitation of struct boot_params, only first
613 * 128 E820 memory entries are passed to kernel via
614 * boot_params.e820_map, others are passed via SETUP_E820_EXT node of
615 * linked list of struct setup_data, which is parsed here.
616 */
617void __init parse_e820_ext(struct setup_data *sdata, unsigned long pa_data)
618{
619 u32 map_len;
620 int entries;
621 struct e820entry *extmap;
622
623 entries = sdata->len / sizeof(struct e820entry);
624 map_len = sdata->len + sizeof(struct setup_data);
625 if (map_len > PAGE_SIZE)
626 sdata = early_ioremap(pa_data, map_len);
627 extmap = (struct e820entry *)(sdata->data);
dc8e8120 628 __append_e820_map(extmap, entries);
8c5beb50
HY
629 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
630 if (map_len > PAGE_SIZE)
631 early_iounmap(sdata, map_len);
632 printk(KERN_INFO "extended physical RAM map:\n");
633 e820_print_map("extended");
634}
635
bf62f398
YL
636#if defined(CONFIG_X86_64) || \
637 (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
638/**
639 * Find the ranges of physical addresses that do not correspond to
640 * e820 RAM areas and mark the corresponding pages as nosave for
641 * hibernation (32 bit) or software suspend and suspend to RAM (64 bit).
642 *
643 * This function requires the e820 map to be sorted and without any
644 * overlapping entries and assumes the first e820 area to be RAM.
645 */
646void __init e820_mark_nosave_regions(unsigned long limit_pfn)
647{
648 int i;
649 unsigned long pfn;
650
651 pfn = PFN_DOWN(e820.map[0].addr + e820.map[0].size);
652 for (i = 1; i < e820.nr_map; i++) {
653 struct e820entry *ei = &e820.map[i];
654
655 if (pfn < PFN_UP(ei->addr))
656 register_nosave_region(pfn, PFN_UP(ei->addr));
657
658 pfn = PFN_DOWN(ei->addr + ei->size);
28bb2237 659 if (ei->type != E820_RAM && ei->type != E820_RESERVED_KERN)
bf62f398
YL
660 register_nosave_region(PFN_UP(ei->addr), pfn);
661
662 if (pfn >= limit_pfn)
663 break;
664 }
665}
666#endif
a4c81cf6 667
b69edc76
RW
668#ifdef CONFIG_HIBERNATION
669/**
670 * Mark ACPI NVS memory region, so that we can save/restore it during
671 * hibernation and the subsequent resume.
672 */
673static int __init e820_mark_nvs_memory(void)
674{
675 int i;
676
677 for (i = 0; i < e820.nr_map; i++) {
678 struct e820entry *ei = &e820.map[i];
679
680 if (ei->type == E820_NVS)
681 hibernate_nvs_register(ei->addr, ei->size);
682 }
683
684 return 0;
685}
686core_initcall(e820_mark_nvs_memory);
687#endif
688
a4c81cf6
YL
689/*
690 * Early reserved memory areas.
691 */
692#define MAX_EARLY_RES 20
693
694struct early_res {
695 u64 start, end;
696 char name[16];
c4ba1320 697 char overlap_ok;
a4c81cf6
YL
698};
699static struct early_res early_res[MAX_EARLY_RES] __initdata = {
700 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
a4c81cf6
YL
701 {}
702};
703
d3fbe5ea 704static int __init find_overlapped_early(u64 start, u64 end)
a4c81cf6
YL
705{
706 int i;
707 struct early_res *r;
d3fbe5ea 708
a4c81cf6
YL
709 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
710 r = &early_res[i];
711 if (end > r->start && start < r->end)
d3fbe5ea 712 break;
a4c81cf6 713 }
d3fbe5ea
HY
714
715 return i;
716}
717
c4ba1320
PJ
718/*
719 * Drop the i-th range from the early reservation map,
720 * by copying any higher ranges down one over it, and
721 * clearing what had been the last slot.
722 */
723static void __init drop_range(int i)
724{
725 int j;
726
727 for (j = i + 1; j < MAX_EARLY_RES && early_res[j].end; j++)
728 ;
729
730 memmove(&early_res[i], &early_res[i + 1],
731 (j - 1 - i) * sizeof(struct early_res));
732
733 early_res[j - 1].end = 0;
734}
735
736/*
737 * Split any existing ranges that:
738 * 1) are marked 'overlap_ok', and
739 * 2) overlap with the stated range [start, end)
740 * into whatever portion (if any) of the existing range is entirely
741 * below or entirely above the stated range. Drop the portion
742 * of the existing range that overlaps with the stated range,
743 * which will allow the caller of this routine to then add that
744 * stated range without conflicting with any existing range.
745 */
746static void __init drop_overlaps_that_are_ok(u64 start, u64 end)
747{
748 int i;
749 struct early_res *r;
750 u64 lower_start, lower_end;
751 u64 upper_start, upper_end;
752 char name[16];
753
754 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
755 r = &early_res[i];
756
757 /* Continue past non-overlapping ranges */
758 if (end <= r->start || start >= r->end)
759 continue;
760
761 /*
762 * Leave non-ok overlaps as is; let caller
763 * panic "Overlapping early reservations"
764 * when it hits this overlap.
765 */
766 if (!r->overlap_ok)
767 return;
768
769 /*
770 * We have an ok overlap. We will drop it from the early
771 * reservation map, and add back in any non-overlapping
772 * portions (lower or upper) as separate, overlap_ok,
773 * non-overlapping ranges.
774 */
775
776 /* 1. Note any non-overlapping (lower or upper) ranges. */
777 strncpy(name, r->name, sizeof(name) - 1);
778
779 lower_start = lower_end = 0;
780 upper_start = upper_end = 0;
781 if (r->start < start) {
782 lower_start = r->start;
783 lower_end = start;
784 }
785 if (r->end > end) {
786 upper_start = end;
787 upper_end = r->end;
788 }
789
790 /* 2. Drop the original ok overlapping range */
791 drop_range(i);
792
793 i--; /* resume for-loop on copied down entry */
794
795 /* 3. Add back in any non-overlapping ranges. */
796 if (lower_end)
797 reserve_early_overlap_ok(lower_start, lower_end, name);
798 if (upper_end)
799 reserve_early_overlap_ok(upper_start, upper_end, name);
800 }
801}
802
803static void __init __reserve_early(u64 start, u64 end, char *name,
804 int overlap_ok)
d3fbe5ea
HY
805{
806 int i;
807 struct early_res *r;
808
809 i = find_overlapped_early(start, end);
a4c81cf6
YL
810 if (i >= MAX_EARLY_RES)
811 panic("Too many early reservations");
812 r = &early_res[i];
d3fbe5ea
HY
813 if (r->end)
814 panic("Overlapping early reservations "
815 "%llx-%llx %s to %llx-%llx %s\n",
816 start, end - 1, name?name:"", r->start,
817 r->end - 1, r->name);
a4c81cf6
YL
818 r->start = start;
819 r->end = end;
c4ba1320 820 r->overlap_ok = overlap_ok;
a4c81cf6
YL
821 if (name)
822 strncpy(r->name, name, sizeof(r->name) - 1);
823}
824
c4ba1320
PJ
825/*
826 * A few early reservtations come here.
827 *
828 * The 'overlap_ok' in the name of this routine does -not- mean it
829 * is ok for these reservations to overlap an earlier reservation.
830 * Rather it means that it is ok for subsequent reservations to
831 * overlap this one.
832 *
833 * Use this entry point to reserve early ranges when you are doing
834 * so out of "Paranoia", reserving perhaps more memory than you need,
835 * just in case, and don't mind a subsequent overlapping reservation
836 * that is known to be needed.
837 *
838 * The drop_overlaps_that_are_ok() call here isn't really needed.
839 * It would be needed if we had two colliding 'overlap_ok'
840 * reservations, so that the second such would not panic on the
841 * overlap with the first. We don't have any such as of this
842 * writing, but might as well tolerate such if it happens in
843 * the future.
844 */
845void __init reserve_early_overlap_ok(u64 start, u64 end, char *name)
846{
847 drop_overlaps_that_are_ok(start, end);
848 __reserve_early(start, end, name, 1);
849}
850
851/*
852 * Most early reservations come here.
853 *
854 * We first have drop_overlaps_that_are_ok() drop any pre-existing
855 * 'overlap_ok' ranges, so that we can then reserve this memory
856 * range without risk of panic'ing on an overlapping overlap_ok
857 * early reservation.
858 */
859void __init reserve_early(u64 start, u64 end, char *name)
860{
46cb27f5
YL
861 if (start >= end)
862 return;
863
c4ba1320
PJ
864 drop_overlaps_that_are_ok(start, end);
865 __reserve_early(start, end, name, 0);
866}
867
a4c81cf6
YL
868void __init free_early(u64 start, u64 end)
869{
870 struct early_res *r;
c4ba1320 871 int i;
a4c81cf6 872
d3fbe5ea
HY
873 i = find_overlapped_early(start, end);
874 r = &early_res[i];
875 if (i >= MAX_EARLY_RES || r->end != end || r->start != start)
a4c81cf6 876 panic("free_early on not reserved area: %llx-%llx!",
d3fbe5ea 877 start, end - 1);
a4c81cf6 878
c4ba1320 879 drop_range(i);
a4c81cf6
YL
880}
881
882void __init early_res_to_bootmem(u64 start, u64 end)
883{
ab67715c 884 int i, count;
a4c81cf6 885 u64 final_start, final_end;
ab67715c
YL
886
887 count = 0;
888 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++)
889 count++;
890
5f1f2b3d
YL
891 printk(KERN_INFO "(%d early reservations) ==> bootmem [%010llx - %010llx]\n",
892 count, start, end);
ab67715c 893 for (i = 0; i < count; i++) {
a4c81cf6 894 struct early_res *r = &early_res[i];
4fcc545a 895 printk(KERN_INFO " #%d [%010llx - %010llx] %16s", i,
ab67715c 896 r->start, r->end, r->name);
a4c81cf6
YL
897 final_start = max(start, r->start);
898 final_end = min(end, r->end);
ab67715c
YL
899 if (final_start >= final_end) {
900 printk(KERN_CONT "\n");
a4c81cf6 901 continue;
ab67715c 902 }
4fcc545a 903 printk(KERN_CONT " ==> [%010llx - %010llx]\n",
ab67715c 904 final_start, final_end);
d2dbf343 905 reserve_bootmem_generic(final_start, final_end - final_start,
a4c81cf6 906 BOOTMEM_DEFAULT);
a4c81cf6
YL
907 }
908}
909
910/* Check for already reserved areas */
911static inline int __init bad_addr(u64 *addrp, u64 size, u64 align)
912{
913 int i;
d3fbe5ea 914 u64 addr = *addrp;
a4c81cf6 915 int changed = 0;
d3fbe5ea 916 struct early_res *r;
a4c81cf6 917again:
d3fbe5ea
HY
918 i = find_overlapped_early(addr, addr + size);
919 r = &early_res[i];
920 if (i < MAX_EARLY_RES && r->end) {
921 *addrp = addr = round_up(r->end, align);
922 changed = 1;
923 goto again;
a4c81cf6
YL
924 }
925 return changed;
926}
927
928/* Check for already reserved areas */
929static inline int __init bad_addr_size(u64 *addrp, u64 *sizep, u64 align)
930{
931 int i;
932 u64 addr = *addrp, last;
933 u64 size = *sizep;
934 int changed = 0;
935again:
936 last = addr + size;
937 for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
938 struct early_res *r = &early_res[i];
939 if (last > r->start && addr < r->start) {
940 size = r->start - addr;
941 changed = 1;
942 goto again;
943 }
944 if (last > r->end && addr < r->end) {
945 addr = round_up(r->end, align);
946 size = last - addr;
947 changed = 1;
948 goto again;
949 }
950 if (last <= r->end && addr >= r->start) {
951 (*sizep)++;
952 return 0;
953 }
954 }
955 if (changed) {
956 *addrp = addr;
957 *sizep = size;
958 }
959 return changed;
960}
961
962/*
963 * Find a free area with specified alignment in a specific range.
964 */
965u64 __init find_e820_area(u64 start, u64 end, u64 size, u64 align)
966{
967 int i;
968
969 for (i = 0; i < e820.nr_map; i++) {
970 struct e820entry *ei = &e820.map[i];
971 u64 addr, last;
972 u64 ei_last;
973
974 if (ei->type != E820_RAM)
975 continue;
976 addr = round_up(ei->addr, align);
977 ei_last = ei->addr + ei->size;
978 if (addr < start)
979 addr = round_up(start, align);
980 if (addr >= ei_last)
981 continue;
982 while (bad_addr(&addr, size, align) && addr+size <= ei_last)
983 ;
984 last = addr + size;
985 if (last > ei_last)
986 continue;
987 if (last > end)
988 continue;
989 return addr;
990 }
991 return -1ULL;
992}
993
994/*
995 * Find next free range after *start
996 */
997u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
998{
999 int i;
1000
1001 for (i = 0; i < e820.nr_map; i++) {
1002 struct e820entry *ei = &e820.map[i];
1003 u64 addr, last;
1004 u64 ei_last;
1005
1006 if (ei->type != E820_RAM)
1007 continue;
1008 addr = round_up(ei->addr, align);
1009 ei_last = ei->addr + ei->size;
1010 if (addr < start)
1011 addr = round_up(start, align);
1012 if (addr >= ei_last)
1013 continue;
1014 *sizep = ei_last - addr;
1015 while (bad_addr_size(&addr, sizep, align) &&
1016 addr + *sizep <= ei_last)
1017 ;
1018 last = addr + *sizep;
1019 if (last > ei_last)
1020 continue;
1021 return addr;
1022 }
1023 return -1UL;
1024
1025}
2944e16b
YL
1026
1027/*
1028 * pre allocated 4k and reserved it in e820
1029 */
1030u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
1031{
1032 u64 size = 0;
1033 u64 addr;
1034 u64 start;
1035
1036 start = startt;
1037 while (size < sizet)
1038 start = find_e820_area_size(start, &size, align);
1039
1040 if (size < sizet)
1041 return 0;
1042
1043 addr = round_down(start + size - sizet, align);
d0be6bde 1044 e820_update_range(addr, sizet, E820_RAM, E820_RESERVED);
fc9036ea 1045 e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED);
2944e16b
YL
1046 printk(KERN_INFO "update e820 for early_reserve_e820\n");
1047 update_e820();
fc9036ea 1048 update_e820_saved();
2944e16b
YL
1049
1050 return addr;
1051}
1052
ee0c80fa
YL
1053#ifdef CONFIG_X86_32
1054# ifdef CONFIG_X86_PAE
1055# define MAX_ARCH_PFN (1ULL<<(36-PAGE_SHIFT))
1056# else
1057# define MAX_ARCH_PFN (1ULL<<(32-PAGE_SHIFT))
1058# endif
1059#else /* CONFIG_X86_32 */
bd70e522 1060# define MAX_ARCH_PFN MAXMEM>>PAGE_SHIFT
ee0c80fa
YL
1061#endif
1062
ee0c80fa
YL
1063/*
1064 * Find the highest page frame number we have available
1065 */
f361a450 1066static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
ee0c80fa 1067{
2dc807b3
YL
1068 int i;
1069 unsigned long last_pfn = 0;
ee0c80fa
YL
1070 unsigned long max_arch_pfn = MAX_ARCH_PFN;
1071
2dc807b3
YL
1072 for (i = 0; i < e820.nr_map; i++) {
1073 struct e820entry *ei = &e820.map[i];
f361a450 1074 unsigned long start_pfn;
2dc807b3
YL
1075 unsigned long end_pfn;
1076
f361a450 1077 if (ei->type != type)
c22d4c18 1078 continue;
c22d4c18 1079
f361a450 1080 start_pfn = ei->addr >> PAGE_SHIFT;
2dc807b3 1081 end_pfn = (ei->addr + ei->size) >> PAGE_SHIFT;
f361a450
YL
1082
1083 if (start_pfn >= limit_pfn)
1084 continue;
1085 if (end_pfn > limit_pfn) {
1086 last_pfn = limit_pfn;
1087 break;
1088 }
2dc807b3
YL
1089 if (end_pfn > last_pfn)
1090 last_pfn = end_pfn;
1091 }
ee0c80fa
YL
1092
1093 if (last_pfn > max_arch_pfn)
1094 last_pfn = max_arch_pfn;
ee0c80fa 1095
5dab8ec1 1096 printk(KERN_INFO "last_pfn = %#lx max_arch_pfn = %#lx\n",
ee0c80fa
YL
1097 last_pfn, max_arch_pfn);
1098 return last_pfn;
1099}
f361a450
YL
1100unsigned long __init e820_end_of_ram_pfn(void)
1101{
1102 return e820_end_pfn(MAX_ARCH_PFN, E820_RAM);
1103}
ee0c80fa 1104
f361a450
YL
1105unsigned long __init e820_end_of_low_ram_pfn(void)
1106{
1107 return e820_end_pfn(1UL<<(32 - PAGE_SHIFT), E820_RAM);
1108}
ee0c80fa
YL
1109/*
1110 * Finds an active region in the address range from start_pfn to last_pfn and
1111 * returns its range in ei_startpfn and ei_endpfn for the e820 entry.
1112 */
1113int __init e820_find_active_region(const struct e820entry *ei,
1114 unsigned long start_pfn,
1115 unsigned long last_pfn,
1116 unsigned long *ei_startpfn,
1117 unsigned long *ei_endpfn)
1118{
1119 u64 align = PAGE_SIZE;
1120
1121 *ei_startpfn = round_up(ei->addr, align) >> PAGE_SHIFT;
1122 *ei_endpfn = round_down(ei->addr + ei->size, align) >> PAGE_SHIFT;
1123
1124 /* Skip map entries smaller than a page */
1125 if (*ei_startpfn >= *ei_endpfn)
1126 return 0;
1127
1128 /* Skip if map is outside the node */
1129 if (ei->type != E820_RAM || *ei_endpfn <= start_pfn ||
1130 *ei_startpfn >= last_pfn)
1131 return 0;
1132
1133 /* Check for overlaps */
1134 if (*ei_startpfn < start_pfn)
1135 *ei_startpfn = start_pfn;
1136 if (*ei_endpfn > last_pfn)
1137 *ei_endpfn = last_pfn;
1138
ee0c80fa
YL
1139 return 1;
1140}
1141
1142/* Walk the e820 map and register active regions within a node */
1143void __init e820_register_active_regions(int nid, unsigned long start_pfn,
1144 unsigned long last_pfn)
1145{
1146 unsigned long ei_startpfn;
1147 unsigned long ei_endpfn;
1148 int i;
1149
1150 for (i = 0; i < e820.nr_map; i++)
1151 if (e820_find_active_region(&e820.map[i],
1152 start_pfn, last_pfn,
1153 &ei_startpfn, &ei_endpfn))
1154 add_active_range(nid, ei_startpfn, ei_endpfn);
1155}
1156
1157/*
1158 * Find the hole size (in bytes) in the memory range.
1159 * @start: starting address of the memory range to scan
1160 * @end: ending address of the memory range to scan
1161 */
1162u64 __init e820_hole_size(u64 start, u64 end)
1163{
1164 unsigned long start_pfn = start >> PAGE_SHIFT;
1165 unsigned long last_pfn = end >> PAGE_SHIFT;
1166 unsigned long ei_startpfn, ei_endpfn, ram = 0;
1167 int i;
1168
1169 for (i = 0; i < e820.nr_map; i++) {
1170 if (e820_find_active_region(&e820.map[i],
1171 start_pfn, last_pfn,
1172 &ei_startpfn, &ei_endpfn))
1173 ram += ei_endpfn - ei_startpfn;
1174 }
1175 return end - start - ((u64)ram << PAGE_SHIFT);
1176}
ab4a465e
YL
1177
1178static void early_panic(char *msg)
1179{
1180 early_printk(msg);
1181 panic(msg);
1182}
1183
69a7704d
YL
1184static int userdef __initdata;
1185
ab4a465e
YL
1186/* "mem=nopentium" disables the 4MB page tables. */
1187static int __init parse_memopt(char *p)
1188{
1189 u64 mem_size;
1190
1191 if (!p)
1192 return -EINVAL;
1193
1194#ifdef CONFIG_X86_32
1195 if (!strcmp(p, "nopentium")) {
1196 setup_clear_cpu_cap(X86_FEATURE_PSE);
1197 return 0;
1198 }
1199#endif
1200
69a7704d 1201 userdef = 1;
ab4a465e 1202 mem_size = memparse(p, &p);
69a7704d 1203 e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1);
611dfd78 1204
ab4a465e
YL
1205 return 0;
1206}
1207early_param("mem", parse_memopt);
1208
ab4a465e
YL
1209static int __init parse_memmap_opt(char *p)
1210{
1211 char *oldp;
1212 u64 start_at, mem_size;
1213
a737abd1
CG
1214 if (!p)
1215 return -EINVAL;
1216
d6be118a 1217 if (!strncmp(p, "exactmap", 8)) {
ab4a465e
YL
1218#ifdef CONFIG_CRASH_DUMP
1219 /*
1220 * If we are doing a crash dump, we still need to know
1221 * the real mem size before original memory map is
1222 * reset.
1223 */
f361a450 1224 saved_max_pfn = e820_end_of_ram_pfn();
ab4a465e
YL
1225#endif
1226 e820.nr_map = 0;
1227 userdef = 1;
1228 return 0;
1229 }
1230
1231 oldp = p;
1232 mem_size = memparse(p, &p);
1233 if (p == oldp)
1234 return -EINVAL;
1235
1236 userdef = 1;
1237 if (*p == '@') {
1238 start_at = memparse(p+1, &p);
d0be6bde 1239 e820_add_region(start_at, mem_size, E820_RAM);
ab4a465e
YL
1240 } else if (*p == '#') {
1241 start_at = memparse(p+1, &p);
d0be6bde 1242 e820_add_region(start_at, mem_size, E820_ACPI);
ab4a465e
YL
1243 } else if (*p == '$') {
1244 start_at = memparse(p+1, &p);
d0be6bde 1245 e820_add_region(start_at, mem_size, E820_RESERVED);
7b479bec 1246 } else
69a7704d 1247 e820_remove_range(mem_size, ULLONG_MAX - mem_size, E820_RAM, 1);
7b479bec 1248
ab4a465e
YL
1249 return *p == '\0' ? 0 : -EINVAL;
1250}
1251early_param("memmap", parse_memmap_opt);
1252
1253void __init finish_e820_parsing(void)
1254{
1255 if (userdef) {
1256 int nr = e820.nr_map;
1257
1258 if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr) < 0)
1259 early_panic("Invalid user supplied memory map");
1260 e820.nr_map = nr;
1261
1262 printk(KERN_INFO "user-defined physical RAM map:\n");
1263 e820_print_map("user");
1264 }
1265}
41c094fd 1266
5dfcf14d
BW
1267static inline const char *e820_type_to_string(int e820_type)
1268{
1269 switch (e820_type) {
1270 case E820_RESERVED_KERN:
1271 case E820_RAM: return "System RAM";
1272 case E820_ACPI: return "ACPI Tables";
1273 case E820_NVS: return "ACPI Non-volatile Storage";
671eef85 1274 case E820_UNUSABLE: return "Unusable memory";
5dfcf14d
BW
1275 default: return "reserved";
1276 }
1277}
1278
41c094fd
YL
1279/*
1280 * Mark e820 reserved areas as busy for the resource manager.
1281 */
a5444d15 1282static struct resource __initdata *e820_res;
41c094fd
YL
1283void __init e820_reserve_resources(void)
1284{
1285 int i;
58f7c988 1286 struct resource *res;
a5444d15 1287 u64 end;
41c094fd
YL
1288
1289 res = alloc_bootmem_low(sizeof(struct resource) * e820.nr_map);
58f7c988 1290 e820_res = res;
41c094fd 1291 for (i = 0; i < e820.nr_map; i++) {
b4df32f4 1292 end = e820.map[i].addr + e820.map[i].size - 1;
8308c54d 1293 if (end != (resource_size_t)end) {
41c094fd
YL
1294 res++;
1295 continue;
1296 }
5dfcf14d 1297 res->name = e820_type_to_string(e820.map[i].type);
b4df32f4
YL
1298 res->start = e820.map[i].addr;
1299 res->end = end;
1300
1f987577 1301 res->flags = IORESOURCE_MEM;
a5444d15
IM
1302
1303 /*
1304 * don't register the region that could be conflicted with
1305 * pci device BAR resource and insert them later in
1306 * pcibios_resource_survey()
1307 */
1f987577
LT
1308 if (e820.map[i].type != E820_RESERVED || res->start < (1ULL<<20)) {
1309 res->flags |= IORESOURCE_BUSY;
58f7c988 1310 insert_resource(&iomem_resource, res);
1f987577 1311 }
41c094fd
YL
1312 res++;
1313 }
5dfcf14d
BW
1314
1315 for (i = 0; i < e820_saved.nr_map; i++) {
1316 struct e820entry *entry = &e820_saved.map[i];
1317 firmware_map_add_early(entry->addr,
1318 entry->addr + entry->size - 1,
1319 e820_type_to_string(entry->type));
1320 }
41c094fd
YL
1321}
1322
58f7c988
YL
1323void __init e820_reserve_resources_late(void)
1324{
1325 int i;
1326 struct resource *res;
1327
1328 res = e820_res;
1329 for (i = 0; i < e820.nr_map; i++) {
a5444d15 1330 if (!res->parent && res->end)
1f987577 1331 insert_resource_expand_to_fit(&iomem_resource, res);
58f7c988
YL
1332 res++;
1333 }
1334}
1335
95a71a45 1336char *__init default_machine_specific_memory_setup(void)
064d25f1
YL
1337{
1338 char *who = "BIOS-e820";
1339 int new_nr;
1340 /*
1341 * Try to copy the BIOS-supplied E820-map.
1342 *
1343 * Otherwise fake a memory map; one section from 0k->640k,
1344 * the next section from 1mb->appropriate_mem_k
1345 */
1346 new_nr = boot_params.e820_entries;
1347 sanitize_e820_map(boot_params.e820_map,
1348 ARRAY_SIZE(boot_params.e820_map),
1349 &new_nr);
1350 boot_params.e820_entries = new_nr;
dc8e8120
YL
1351 if (append_e820_map(boot_params.e820_map, boot_params.e820_entries)
1352 < 0) {
95a71a45 1353 u64 mem_size;
064d25f1
YL
1354
1355 /* compare results from other methods and take the greater */
1356 if (boot_params.alt_mem_k
1357 < boot_params.screen_info.ext_mem_k) {
1358 mem_size = boot_params.screen_info.ext_mem_k;
1359 who = "BIOS-88";
1360 } else {
1361 mem_size = boot_params.alt_mem_k;
1362 who = "BIOS-e801";
1363 }
1364
1365 e820.nr_map = 0;
1366 e820_add_region(0, LOWMEMSIZE(), E820_RAM);
1367 e820_add_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
064d25f1
YL
1368 }
1369
1370 /* In case someone cares... */
1371 return who;
1372}
1373
95a71a45
YL
1374char *__init __attribute__((weak)) machine_specific_memory_setup(void)
1375{
3c9cb6de
YL
1376 if (x86_quirks->arch_memory_setup) {
1377 char *who = x86_quirks->arch_memory_setup();
3b33553b
IM
1378
1379 if (who)
1380 return who;
1381 }
95a71a45
YL
1382 return default_machine_specific_memory_setup();
1383}
1384
064d25f1
YL
1385/* Overridden in paravirt.c if CONFIG_PARAVIRT */
1386char * __init __attribute__((weak)) memory_setup(void)
1387{
1388 return machine_specific_memory_setup();
1389}
1390
1391void __init setup_memory_map(void)
1392{
0be15526
YL
1393 char *who;
1394
1395 who = memory_setup();
1396 memcpy(&e820_saved, &e820, sizeof(struct e820map));
064d25f1 1397 printk(KERN_INFO "BIOS-provided physical RAM map:\n");
0be15526 1398 e820_print_map(who);
064d25f1 1399}
This page took 0.15881 seconds and 5 git commands to generate.