Merge branch 'fix/misc' into for-linus
[deliverable/linux.git] / arch / x86 / mm / numa_64.c
CommitLineData
e3cfe529 1/*
1da177e4
LT
2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
e3cfe529 4 */
1da177e4
LT
5#include <linux/kernel.h>
6#include <linux/mm.h>
7#include <linux/string.h>
8#include <linux/init.h>
9#include <linux/bootmem.h>
10#include <linux/mmzone.h>
11#include <linux/ctype.h>
12#include <linux/module.h>
13#include <linux/nodemask.h>
3cc87e3f 14#include <linux/sched.h>
1da177e4
LT
15
16#include <asm/e820.h>
17#include <asm/proto.h>
18#include <asm/dma.h>
19#include <asm/numa.h>
20#include <asm/acpi.h>
c9ff0342 21#include <asm/k8.h>
1da177e4 22
6c231b7b 23struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
e3cfe529
TG
24EXPORT_SYMBOL(node_data);
25
dcf36bfa 26struct memnode memnode;
1da177e4 27
43238382 28s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
e3cfe529 29 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
3f098c26 30};
e3cfe529 31
1da177e4 32int numa_off __initdata;
864fc31e
TG
33static unsigned long __initdata nodemap_addr;
34static unsigned long __initdata nodemap_size;
1da177e4 35
6470aff6
BG
36DEFINE_PER_CPU(int, node_number) = 0;
37EXPORT_PER_CPU_SYMBOL(node_number);
38
39/*
40 * Map cpu index to node index
41 */
42DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
43EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
44
529a3404
ED
45/*
46 * Given a shift value, try to populate memnodemap[]
47 * Returns :
48 * 1 if OK
49 * 0 if memnodmap[] too small (of shift too small)
50 * -1 if node overlap or lost ram (shift too big)
51 */
e3cfe529 52static int __init populate_memnodemap(const struct bootnode *nodes,
6ec6e0d9 53 int numnodes, int shift, int *nodeids)
1da177e4 54{
529a3404 55 unsigned long addr, end;
e3cfe529 56 int i, res = -1;
b684664f 57
43238382 58 memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize);
b684664f 59 for (i = 0; i < numnodes; i++) {
529a3404
ED
60 addr = nodes[i].start;
61 end = nodes[i].end;
62 if (addr >= end)
b684664f 63 continue;
076422d2 64 if ((end >> shift) >= memnodemapsize)
529a3404
ED
65 return 0;
66 do {
43238382 67 if (memnodemap[addr >> shift] != NUMA_NO_NODE)
b684664f 68 return -1;
6ec6e0d9
SS
69
70 if (!nodeids)
71 memnodemap[addr >> shift] = i;
72 else
73 memnodemap[addr >> shift] = nodeids[i];
74
076422d2 75 addr += (1UL << shift);
529a3404
ED
76 } while (addr < end);
77 res = 1;
e3cfe529 78 }
529a3404
ED
79 return res;
80}
81
076422d2
AS
82static int __init allocate_cachealigned_memnodemap(void)
83{
24a5da73 84 unsigned long addr;
076422d2
AS
85
86 memnodemap = memnode.embedded_map;
316390b0 87 if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map))
076422d2 88 return 0;
076422d2 89
24a5da73 90 addr = 0x8000;
be3e89ee 91 nodemap_size = roundup(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
c987d12f 92 nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT,
24a5da73 93 nodemap_size, L1_CACHE_BYTES);
076422d2
AS
94 if (nodemap_addr == -1UL) {
95 printk(KERN_ERR
96 "NUMA: Unable to allocate Memory to Node hash map\n");
97 nodemap_addr = nodemap_size = 0;
98 return -1;
99 }
24a5da73 100 memnodemap = phys_to_virt(nodemap_addr);
25eff8d4 101 reserve_early(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP");
076422d2
AS
102
103 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
104 nodemap_addr, nodemap_addr + nodemap_size);
105 return 0;
106}
107
108/*
109 * The LSB of all start and end addresses in the node map is the value of the
110 * maximum possible shift.
111 */
e3cfe529
TG
112static int __init extract_lsb_from_nodes(const struct bootnode *nodes,
113 int numnodes)
529a3404 114{
54413927 115 int i, nodes_used = 0;
076422d2
AS
116 unsigned long start, end;
117 unsigned long bitfield = 0, memtop = 0;
118
119 for (i = 0; i < numnodes; i++) {
120 start = nodes[i].start;
121 end = nodes[i].end;
122 if (start >= end)
123 continue;
54413927
AS
124 bitfield |= start;
125 nodes_used++;
076422d2
AS
126 if (end > memtop)
127 memtop = end;
128 }
54413927
AS
129 if (nodes_used <= 1)
130 i = 63;
131 else
132 i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
076422d2
AS
133 memnodemapsize = (memtop >> i)+1;
134 return i;
135}
529a3404 136
6ec6e0d9
SS
137int __init compute_hash_shift(struct bootnode *nodes, int numnodes,
138 int *nodeids)
076422d2
AS
139{
140 int shift;
529a3404 141
076422d2
AS
142 shift = extract_lsb_from_nodes(nodes, numnodes);
143 if (allocate_cachealigned_memnodemap())
144 return -1;
6b050f80 145 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
529a3404
ED
146 shift);
147
6ec6e0d9 148 if (populate_memnodemap(nodes, numnodes, shift, nodeids) != 1) {
e3cfe529
TG
149 printk(KERN_INFO "Your memory is not aligned you need to "
150 "rebuild your kernel with a bigger NODEMAPSIZE "
151 "shift=%d\n", shift);
529a3404
ED
152 return -1;
153 }
b684664f 154 return shift;
1da177e4
LT
155}
156
f2dbcfa7 157int __meminit __early_pfn_to_nid(unsigned long pfn)
bbfceef4
MT
158{
159 return phys_to_nid(pfn << PAGE_SHIFT);
160}
bbfceef4 161
e3cfe529 162static void * __init early_node_mem(int nodeid, unsigned long start,
24a5da73
YL
163 unsigned long end, unsigned long size,
164 unsigned long align)
a8062231 165{
24a5da73 166 unsigned long mem = find_e820_area(start, end, size, align);
a8062231 167 void *ptr;
e3cfe529 168
9347e0b0 169 if (mem != -1L)
a8062231 170 return __va(mem);
9347e0b0 171
24a5da73 172 ptr = __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
83e83d54 173 if (ptr == NULL) {
a8062231 174 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
e3cfe529 175 size, nodeid);
a8062231
AK
176 return NULL;
177 }
178 return ptr;
179}
180
1da177e4 181/* Initialize bootmem allocator for a node */
e3cfe529
TG
182void __init setup_node_bootmem(int nodeid, unsigned long start,
183 unsigned long end)
184{
886533a3 185 unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size;
e3cfe529 186 unsigned long bootmap_start, nodedata_phys;
a8062231 187 void *bootmap;
be3e89ee 188 const int pgdat_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
1a27fc0a 189 int nid;
1da177e4 190
4c31e92b
YL
191 if (!end)
192 return;
193
be3e89ee 194 start = roundup(start, ZONE_ALIGN);
1da177e4 195
e3cfe529
TG
196 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid,
197 start, end);
1da177e4
LT
198
199 start_pfn = start >> PAGE_SHIFT;
886533a3 200 last_pfn = end >> PAGE_SHIFT;
1da177e4 201
24a5da73
YL
202 node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size,
203 SMP_CACHE_BYTES);
a8062231
AK
204 if (node_data[nodeid] == NULL)
205 return;
206 nodedata_phys = __pa(node_data[nodeid]);
6118f76f
YL
207 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys,
208 nodedata_phys + pgdat_size - 1);
1da177e4 209
1da177e4 210 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
b61bfa3c 211 NODE_DATA(nodeid)->bdata = &bootmem_node_data[nodeid];
1da177e4 212 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
886533a3 213 NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn;
1da177e4 214
1a27fc0a
YL
215 /*
216 * Find a place for the bootmem map
217 * nodedata_phys could be on other nodes by alloc_bootmem,
218 * so need to sure bootmap_start not to be small, otherwise
219 * early_node_mem will get that with find_e820_area instead
220 * of alloc_bootmem, that could clash with reserved range
221 */
886533a3 222 bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn);
1a27fc0a
YL
223 nid = phys_to_nid(nodedata_phys);
224 if (nid == nodeid)
be3e89ee 225 bootmap_start = roundup(nodedata_phys + pgdat_size, PAGE_SIZE);
1a27fc0a 226 else
be3e89ee 227 bootmap_start = roundup(start, PAGE_SIZE);
24a5da73 228 /*
e9197bf0 229 * SMP_CACHE_BYTES could be enough, but init_bootmem_node like
24a5da73
YL
230 * to use that to align to PAGE_SIZE
231 */
a8062231 232 bootmap = early_node_mem(nodeid, bootmap_start, end,
24a5da73 233 bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
a8062231
AK
234 if (bootmap == NULL) {
235 if (nodedata_phys < start || nodedata_phys >= end)
37bff62e 236 free_bootmem(nodedata_phys, pgdat_size);
a8062231
AK
237 node_data[nodeid] = NULL;
238 return;
239 }
240 bootmap_start = __pa(bootmap);
e3cfe529 241
1da177e4 242 bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
e3cfe529 243 bootmap_start >> PAGE_SHIFT,
886533a3 244 start_pfn, last_pfn);
1da177e4 245
6118f76f
YL
246 printk(KERN_INFO " bootmap [%016lx - %016lx] pages %lx\n",
247 bootmap_start, bootmap_start + bootmap_size - 1,
248 bootmap_pages);
249
5cb248ab 250 free_bootmem_with_active_regions(nodeid, end);
1da177e4 251
1a27fc0a
YL
252 /*
253 * convert early reserve to bootmem reserve earlier
254 * otherwise early_node_mem could use early reserved mem
255 * on previous node
256 */
257 early_res_to_bootmem(start, end);
258
259 /*
260 * in some case early_node_mem could use alloc_bootmem
261 * to get range on other node, don't reserve that again
262 */
263 if (nid != nodeid)
264 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid);
265 else
266 reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys,
267 pgdat_size, BOOTMEM_DEFAULT);
268 nid = phys_to_nid(bootmap_start);
269 if (nid != nodeid)
270 printk(KERN_INFO " bootmap(%d) on node %d\n", nodeid, nid);
271 else
272 reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start,
273 bootmap_pages<<PAGE_SHIFT, BOOTMEM_DEFAULT);
274
68a3a7fe
AK
275#ifdef CONFIG_ACPI_NUMA
276 srat_reserve_add_area(nodeid);
277#endif
1da177e4 278 node_set_online(nodeid);
e3cfe529 279}
1da177e4 280
e3cfe529
TG
281/*
282 * There are unfortunately some poorly designed mainboards around that
283 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
284 * mapping. To avoid this fill in the mapping for all possible CPUs,
285 * as the number of CPUs is not known yet. We round robin the existing
286 * nodes.
287 */
1da177e4
LT
288void __init numa_init_array(void)
289{
290 int rr, i;
e3cfe529 291
85cc5135 292 rr = first_node(node_online_map);
168ef543 293 for (i = 0; i < nr_cpu_ids; i++) {
1ce35712 294 if (early_cpu_to_node(i) != NUMA_NO_NODE)
1da177e4 295 continue;
e3cfe529 296 numa_set_node(i, rr);
1da177e4
LT
297 rr = next_node(rr, node_online_map);
298 if (rr == MAX_NUMNODES)
299 rr = first_node(node_online_map);
1da177e4 300 }
1da177e4
LT
301}
302
303#ifdef CONFIG_NUMA_EMU
53fee04f 304/* Numa emulation */
864fc31e 305static char *cmdline __initdata;
1da177e4 306
53fee04f 307/*
e3cfe529
TG
308 * Setups up nid to range from addr to addr + size. If the end
309 * boundary is greater than max_addr, then max_addr is used instead.
310 * The return value is 0 if there is additional memory left for
311 * allocation past addr and -1 otherwise. addr is adjusted to be at
312 * the end of the node.
53fee04f 313 */
8b8ca80e
DR
314static int __init setup_node_range(int nid, struct bootnode *nodes, u64 *addr,
315 u64 size, u64 max_addr)
53fee04f 316{
8b8ca80e 317 int ret = 0;
e3cfe529 318
8b8ca80e
DR
319 nodes[nid].start = *addr;
320 *addr += size;
321 if (*addr >= max_addr) {
322 *addr = max_addr;
323 ret = -1;
324 }
325 nodes[nid].end = *addr;
e3f1caee 326 node_set(nid, node_possible_map);
8b8ca80e
DR
327 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
328 nodes[nid].start, nodes[nid].end,
329 (nodes[nid].end - nodes[nid].start) >> 20);
330 return ret;
53fee04f
RS
331}
332
8b8ca80e
DR
333/*
334 * Splits num_nodes nodes up equally starting at node_start. The return value
335 * is the number of nodes split up and addr is adjusted to be at the end of the
336 * last node allocated.
337 */
338static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr,
339 u64 max_addr, int node_start,
340 int num_nodes)
1da177e4 341{
8b8ca80e
DR
342 unsigned int big;
343 u64 size;
344 int i;
53fee04f 345
8b8ca80e
DR
346 if (num_nodes <= 0)
347 return -1;
348 if (num_nodes > MAX_NUMNODES)
349 num_nodes = MAX_NUMNODES;
a7e96629 350 size = (max_addr - *addr - e820_hole_size(*addr, max_addr)) /
8b8ca80e 351 num_nodes;
53fee04f 352 /*
8b8ca80e
DR
353 * Calculate the number of big nodes that can be allocated as a result
354 * of consolidating the leftovers.
53fee04f 355 */
8b8ca80e
DR
356 big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * num_nodes) /
357 FAKE_NODE_MIN_SIZE;
358
359 /* Round down to nearest FAKE_NODE_MIN_SIZE. */
360 size &= FAKE_NODE_MIN_HASH_MASK;
361 if (!size) {
362 printk(KERN_ERR "Not enough memory for each node. "
363 "NUMA emulation disabled.\n");
364 return -1;
53fee04f 365 }
8b8ca80e
DR
366
367 for (i = node_start; i < num_nodes + node_start; i++) {
368 u64 end = *addr + size;
e3cfe529 369
53fee04f
RS
370 if (i < big)
371 end += FAKE_NODE_MIN_SIZE;
372 /*
8b8ca80e
DR
373 * The final node can have the remaining system RAM. Other
374 * nodes receive roughly the same amount of available pages.
53fee04f 375 */
8b8ca80e
DR
376 if (i == num_nodes + node_start - 1)
377 end = max_addr;
378 else
a7e96629 379 while (end - *addr - e820_hole_size(*addr, end) <
8b8ca80e
DR
380 size) {
381 end += FAKE_NODE_MIN_SIZE;
382 if (end > max_addr) {
383 end = max_addr;
384 break;
385 }
386 }
387 if (setup_node_range(i, nodes, addr, end - *addr, max_addr) < 0)
388 break;
389 }
390 return i - node_start + 1;
391}
392
382591d5
DR
393/*
394 * Splits the remaining system RAM into chunks of size. The remaining memory is
395 * always assigned to a final node and can be asymmetric. Returns the number of
396 * nodes split.
397 */
398static int __init split_nodes_by_size(struct bootnode *nodes, u64 *addr,
399 u64 max_addr, int node_start, u64 size)
400{
401 int i = node_start;
402 size = (size << 20) & FAKE_NODE_MIN_HASH_MASK;
403 while (!setup_node_range(i++, nodes, addr, size, max_addr))
404 ;
405 return i - node_start;
406}
407
8b8ca80e 408/*
886533a3 409 * Sets up the system RAM area from start_pfn to last_pfn according to the
8b8ca80e
DR
410 * numa=fake command-line option.
411 */
f46bdf2d
MT
412static struct bootnode nodes[MAX_NUMNODES] __initdata;
413
886533a3 414static int __init numa_emulation(unsigned long start_pfn, unsigned long last_pfn)
8b8ca80e 415{
e3cfe529 416 u64 size, addr = start_pfn << PAGE_SHIFT;
886533a3 417 u64 max_addr = last_pfn << PAGE_SHIFT;
e3cfe529 418 int num_nodes = 0, num = 0, coeff_flag, coeff = -1, i;
8b8ca80e
DR
419
420 memset(&nodes, 0, sizeof(nodes));
421 /*
422 * If the numa=fake command-line is just a single number N, split the
423 * system RAM into N fake nodes.
424 */
425 if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) {
e3cfe529
TG
426 long n = simple_strtol(cmdline, NULL, 0);
427
428 num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0, n);
8b8ca80e
DR
429 if (num_nodes < 0)
430 return num_nodes;
431 goto out;
432 }
433
434 /* Parse the command line. */
382591d5 435 for (coeff_flag = 0; ; cmdline++) {
8b8ca80e
DR
436 if (*cmdline && isdigit(*cmdline)) {
437 num = num * 10 + *cmdline - '0';
438 continue;
53fee04f 439 }
382591d5
DR
440 if (*cmdline == '*') {
441 if (num > 0)
442 coeff = num;
443 coeff_flag = 1;
444 }
8b8ca80e 445 if (!*cmdline || *cmdline == ',') {
382591d5
DR
446 if (!coeff_flag)
447 coeff = 1;
8b8ca80e
DR
448 /*
449 * Round down to the nearest FAKE_NODE_MIN_SIZE.
450 * Command-line coefficients are in megabytes.
451 */
452 size = ((u64)num << 20) & FAKE_NODE_MIN_HASH_MASK;
382591d5 453 if (size)
8b8ca80e
DR
454 for (i = 0; i < coeff; i++, num_nodes++)
455 if (setup_node_range(num_nodes, nodes,
456 &addr, size, max_addr) < 0)
457 goto done;
382591d5
DR
458 if (!*cmdline)
459 break;
460 coeff_flag = 0;
461 coeff = -1;
53fee04f 462 }
8b8ca80e
DR
463 num = 0;
464 }
465done:
466 if (!num_nodes)
467 return -1;
14694d73 468 /* Fill remainder of system RAM, if appropriate. */
8b8ca80e 469 if (addr < max_addr) {
382591d5
DR
470 if (coeff_flag && coeff < 0) {
471 /* Split remaining nodes into num-sized chunks */
472 num_nodes += split_nodes_by_size(nodes, &addr, max_addr,
473 num_nodes, num);
474 goto out;
475 }
14694d73
DR
476 switch (*(cmdline - 1)) {
477 case '*':
478 /* Split remaining nodes into coeff chunks */
479 if (coeff <= 0)
480 break;
481 num_nodes += split_nodes_equally(nodes, &addr, max_addr,
482 num_nodes, coeff);
483 break;
484 case ',':
485 /* Do not allocate remaining system RAM */
486 break;
487 default:
488 /* Give one final node */
489 setup_node_range(num_nodes, nodes, &addr,
490 max_addr - addr, max_addr);
491 num_nodes++;
492 }
8b8ca80e
DR
493 }
494out:
6ec6e0d9 495 memnode_shift = compute_hash_shift(nodes, num_nodes, NULL);
8b8ca80e
DR
496 if (memnode_shift < 0) {
497 memnode_shift = 0;
498 printk(KERN_ERR "No NUMA hash function found. NUMA emulation "
499 "disabled.\n");
500 return -1;
501 }
502
503 /*
504 * We need to vacate all active ranges that may have been registered by
1c05f093
DR
505 * SRAT and set acpi_numa to -1 so that srat_disabled() always returns
506 * true. NUMA emulation has succeeded so we will not scan ACPI nodes.
8b8ca80e
DR
507 */
508 remove_all_active_ranges();
1c05f093
DR
509#ifdef CONFIG_ACPI_NUMA
510 acpi_numa = -1;
511#endif
e3f1caee 512 for_each_node_mask(i, node_possible_map) {
5cb248ab
MG
513 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
514 nodes[i].end >> PAGE_SHIFT);
e3cfe529 515 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
5cb248ab 516 }
3484d798 517 acpi_fake_nodes(nodes, num_nodes);
e3cfe529
TG
518 numa_init_array();
519 return 0;
1da177e4 520}
8b8ca80e 521#endif /* CONFIG_NUMA_EMU */
1da177e4 522
1f75d7e3 523void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn)
e3cfe529 524{
1da177e4
LT
525 int i;
526
e3f1caee 527 nodes_clear(node_possible_map);
b7ad149d 528 nodes_clear(node_online_map);
e3f1caee 529
1da177e4 530#ifdef CONFIG_NUMA_EMU
886533a3 531 if (cmdline && !numa_emulation(start_pfn, last_pfn))
e3cfe529 532 return;
e3f1caee 533 nodes_clear(node_possible_map);
b7ad149d 534 nodes_clear(node_online_map);
1da177e4
LT
535#endif
536
537#ifdef CONFIG_ACPI_NUMA
538 if (!numa_off && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
886533a3 539 last_pfn << PAGE_SHIFT))
e3cfe529 540 return;
e3f1caee 541 nodes_clear(node_possible_map);
b7ad149d 542 nodes_clear(node_online_map);
1da177e4
LT
543#endif
544
545#ifdef CONFIG_K8_NUMA
e3cfe529 546 if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT,
886533a3 547 last_pfn<<PAGE_SHIFT))
1da177e4 548 return;
e3f1caee 549 nodes_clear(node_possible_map);
b7ad149d 550 nodes_clear(node_online_map);
1da177e4
LT
551#endif
552 printk(KERN_INFO "%s\n",
553 numa_off ? "NUMA turned off" : "No NUMA configuration found");
554
e3cfe529 555 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
1da177e4 556 start_pfn << PAGE_SHIFT,
886533a3 557 last_pfn << PAGE_SHIFT);
e3cfe529
TG
558 /* setup dummy node covering all memory */
559 memnode_shift = 63;
076422d2 560 memnodemap = memnode.embedded_map;
1da177e4 561 memnodemap[0] = 0;
1da177e4 562 node_set_online(0);
e3f1caee 563 node_set(0, node_possible_map);
168ef543 564 for (i = 0; i < nr_cpu_ids; i++)
69d81fcd 565 numa_set_node(i, 0);
886533a3
TG
566 e820_register_active_regions(0, start_pfn, last_pfn);
567 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
69d81fcd
AK
568}
569
e3cfe529
TG
570unsigned long __init numa_free_all_bootmem(void)
571{
1da177e4 572 unsigned long pages = 0;
e3cfe529
TG
573 int i;
574
575 for_each_online_node(i)
1da177e4 576 pages += free_all_bootmem_node(NODE_DATA(i));
e3cfe529 577
1da177e4 578 return pages;
e3cfe529 579}
1da177e4
LT
580
581void __init paging_init(void)
e3cfe529 582{
6391af17 583 unsigned long max_zone_pfns[MAX_NR_ZONES];
e3cfe529 584
6391af17
MG
585 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
586 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
587 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
c987d12f 588 max_zone_pfns[ZONE_NORMAL] = max_pfn;
d3ee871e 589
f0a5a58a
BP
590 sparse_memory_present_with_active_regions(MAX_NUMNODES);
591 sparse_init();
d3ee871e 592
5cb248ab 593 free_area_init_nodes(max_zone_pfns);
e3cfe529 594}
1da177e4 595
2c8c0e6b 596static __init int numa_setup(char *opt)
e3cfe529 597{
2c8c0e6b
AK
598 if (!opt)
599 return -EINVAL;
e3cfe529 600 if (!strncmp(opt, "off", 3))
1da177e4
LT
601 numa_off = 1;
602#ifdef CONFIG_NUMA_EMU
8b8ca80e
DR
603 if (!strncmp(opt, "fake=", 5))
604 cmdline = opt + 5;
1da177e4
LT
605#endif
606#ifdef CONFIG_ACPI_NUMA
e3cfe529
TG
607 if (!strncmp(opt, "noacpi", 6))
608 acpi_numa = -1;
609 if (!strncmp(opt, "hotadd=", 7))
68a3a7fe 610 hotadd_percent = simple_strtoul(opt+7, NULL, 10);
1da177e4 611#endif
2c8c0e6b 612 return 0;
e3cfe529 613}
2c8c0e6b
AK
614early_param("numa", numa_setup);
615
23ca4bba 616#ifdef CONFIG_NUMA
05b3cbd8
RT
617/*
618 * Setup early cpu_to_node.
619 *
620 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
621 * and apicid_to_node[] tables have valid entries for a CPU.
622 * This means we skip cpu_to_node[] initialisation for NUMA
623 * emulation and faking node case (when running a kernel compiled
624 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
625 * is already initialized in a round robin manner at numa_init_array,
626 * prior to this call, and this initialization is good enough
627 * for the fake NUMA cases.
23ca4bba
MT
628 *
629 * Called before the per_cpu areas are setup.
05b3cbd8
RT
630 */
631void __init init_cpu_to_node(void)
632{
23ca4bba
MT
633 int cpu;
634 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
e3cfe529 635
23ca4bba
MT
636 BUG_ON(cpu_to_apicid == NULL);
637
638 for_each_possible_cpu(cpu) {
7c9e92b6 639 int node;
23ca4bba 640 u16 apicid = cpu_to_apicid[cpu];
e3cfe529 641
05b3cbd8
RT
642 if (apicid == BAD_APICID)
643 continue;
7c9e92b6
YL
644 node = apicid_to_node[apicid];
645 if (node == NUMA_NO_NODE)
05b3cbd8 646 continue;
7c9e92b6
YL
647 if (!node_online(node))
648 continue;
23ca4bba 649 numa_set_node(cpu, node);
05b3cbd8
RT
650 }
651}
23ca4bba 652#endif
05b3cbd8 653
cf050132 654
6470aff6
BG
655void __cpuinit numa_set_node(int cpu, int node)
656{
657 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
658
659 /* early setting, no percpu area yet */
660 if (cpu_to_node_map) {
661 cpu_to_node_map[cpu] = node;
662 return;
663 }
664
665#ifdef CONFIG_DEBUG_PER_CPU_MAPS
44581a28 666 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
6470aff6
BG
667 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
668 dump_stack();
669 return;
670 }
671#endif
672 per_cpu(x86_cpu_to_node_map, cpu) = node;
673
674 if (node != NUMA_NO_NODE)
675 per_cpu(node_number, cpu) = node;
676}
677
678void __cpuinit numa_clear_node(int cpu)
679{
680 numa_set_node(cpu, NUMA_NO_NODE);
681}
682
683#ifndef CONFIG_DEBUG_PER_CPU_MAPS
684
685void __cpuinit numa_add_cpu(int cpu)
686{
c032ef60 687 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
6470aff6
BG
688}
689
690void __cpuinit numa_remove_cpu(int cpu)
691{
c032ef60 692 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
6470aff6
BG
693}
694
695#else /* CONFIG_DEBUG_PER_CPU_MAPS */
696
697/*
698 * --------- debug versions of the numa functions ---------
699 */
700static void __cpuinit numa_set_cpumask(int cpu, int enable)
701{
702 int node = early_cpu_to_node(cpu);
73e907de 703 struct cpumask *mask;
6470aff6
BG
704 char buf[64];
705
c032ef60
RR
706 mask = node_to_cpumask_map[node];
707 if (mask == NULL) {
708 printk(KERN_ERR "node_to_cpumask_map[%i] NULL\n", node);
6470aff6
BG
709 dump_stack();
710 return;
711 }
712
6470aff6 713 if (enable)
c032ef60 714 cpumask_set_cpu(cpu, mask);
6470aff6 715 else
c032ef60 716 cpumask_clear_cpu(cpu, mask);
6470aff6
BG
717
718 cpulist_scnprintf(buf, sizeof(buf), mask);
719 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
720 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
721}
722
723void __cpuinit numa_add_cpu(int cpu)
724{
725 numa_set_cpumask(cpu, 1);
726}
727
728void __cpuinit numa_remove_cpu(int cpu)
729{
730 numa_set_cpumask(cpu, 0);
731}
732
733int cpu_to_node(int cpu)
734{
735 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
736 printk(KERN_WARNING
737 "cpu_to_node(%d): usage too early!\n", cpu);
738 dump_stack();
739 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
740 }
741 return per_cpu(x86_cpu_to_node_map, cpu);
742}
743EXPORT_SYMBOL(cpu_to_node);
744
745/*
746 * Same function as cpu_to_node() but used if called before the
747 * per_cpu areas are setup.
748 */
749int early_cpu_to_node(int cpu)
750{
751 if (early_per_cpu_ptr(x86_cpu_to_node_map))
752 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
753
44581a28 754 if (!cpu_possible(cpu)) {
6470aff6
BG
755 printk(KERN_WARNING
756 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
757 dump_stack();
758 return NUMA_NO_NODE;
759 }
760 return per_cpu(x86_cpu_to_node_map, cpu);
761}
762
6470aff6
BG
763/*
764 * --------- end of debug versions of the numa functions ---------
765 */
766
767#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
This page took 0.787143 seconds and 5 git commands to generate.