Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2000, 2003 Silicon Graphics, Inc. All rights reserved. | |
3 | * Copyright (c) 2001 Intel Corp. | |
4 | * Copyright (c) 2001 Tony Luck <tony.luck@intel.com> | |
5 | * Copyright (c) 2002 NEC Corp. | |
6 | * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com> | |
7 | * Copyright (c) 2004 Silicon Graphics, Inc | |
8 | * Russ Anderson <rja@sgi.com> | |
9 | * Jesse Barnes <jbarnes@sgi.com> | |
10 | * Jack Steiner <steiner@sgi.com> | |
11 | */ | |
12 | ||
13 | /* | |
14 | * Platform initialization for Discontig Memory | |
15 | */ | |
16 | ||
17 | #include <linux/kernel.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/swap.h> | |
20 | #include <linux/bootmem.h> | |
21 | #include <linux/acpi.h> | |
22 | #include <linux/efi.h> | |
23 | #include <linux/nodemask.h> | |
24 | #include <asm/pgalloc.h> | |
25 | #include <asm/tlb.h> | |
26 | #include <asm/meminit.h> | |
27 | #include <asm/numa.h> | |
28 | #include <asm/sections.h> | |
29 | ||
30 | /* | |
31 | * Track per-node information needed to setup the boot memory allocator, the | |
32 | * per-node areas, and the real VM. | |
33 | */ | |
34 | struct early_node_data { | |
35 | struct ia64_node_data *node_data; | |
1da177e4 LT |
36 | unsigned long pernode_addr; |
37 | unsigned long pernode_size; | |
38 | struct bootmem_data bootmem_data; | |
39 | unsigned long num_physpages; | |
40 | unsigned long num_dma_physpages; | |
41 | unsigned long min_pfn; | |
42 | unsigned long max_pfn; | |
43 | }; | |
44 | ||
45 | static struct early_node_data mem_data[MAX_NUMNODES] __initdata; | |
564601a5 | 46 | static nodemask_t memory_less_mask __initdata; |
1da177e4 | 47 | |
ae5a2c1c YG |
48 | static pg_data_t *pgdat_list[MAX_NUMNODES]; |
49 | ||
1da177e4 LT |
50 | /* |
51 | * To prevent cache aliasing effects, align per-node structures so that they | |
52 | * start at addresses that are strided by node number. | |
53 | */ | |
acb7f672 | 54 | #define MAX_NODE_ALIGN_OFFSET (32 * 1024 * 1024) |
1da177e4 | 55 | #define NODEDATA_ALIGN(addr, node) \ |
acb7f672 JS |
56 | ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + \ |
57 | (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1))) | |
1da177e4 LT |
58 | |
59 | /** | |
60 | * build_node_maps - callback to setup bootmem structs for each node | |
61 | * @start: physical start of range | |
62 | * @len: length of range | |
63 | * @node: node where this range resides | |
64 | * | |
65 | * We allocate a struct bootmem_data for each piece of memory that we wish to | |
66 | * treat as a virtually contiguous block (i.e. each node). Each such block | |
67 | * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down | |
68 | * if necessary. Any non-existent pages will simply be part of the virtual | |
69 | * memmap. We also update min_low_pfn and max_low_pfn here as we receive | |
70 | * memory ranges from the caller. | |
71 | */ | |
72 | static int __init build_node_maps(unsigned long start, unsigned long len, | |
73 | int node) | |
74 | { | |
75 | unsigned long cstart, epfn, end = start + len; | |
76 | struct bootmem_data *bdp = &mem_data[node].bootmem_data; | |
77 | ||
78 | epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT; | |
79 | cstart = GRANULEROUNDDOWN(start); | |
80 | ||
81 | if (!bdp->node_low_pfn) { | |
82 | bdp->node_boot_start = cstart; | |
83 | bdp->node_low_pfn = epfn; | |
84 | } else { | |
85 | bdp->node_boot_start = min(cstart, bdp->node_boot_start); | |
86 | bdp->node_low_pfn = max(epfn, bdp->node_low_pfn); | |
87 | } | |
88 | ||
89 | min_low_pfn = min(min_low_pfn, bdp->node_boot_start>>PAGE_SHIFT); | |
90 | max_low_pfn = max(max_low_pfn, bdp->node_low_pfn); | |
91 | ||
92 | return 0; | |
93 | } | |
94 | ||
95 | /** | |
564601a5 | 96 | * early_nr_cpus_node - return number of cpus on a given node |
1da177e4 LT |
97 | * @node: node to check |
98 | * | |
564601a5 | 99 | * Count the number of cpus on @node. We can't use nr_cpus_node() yet because |
1da177e4 | 100 | * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been |
564601a5 | 101 | * called yet. Note that node 0 will also count all non-existent cpus. |
1da177e4 | 102 | */ |
564601a5 | 103 | static int __init early_nr_cpus_node(int node) |
1da177e4 LT |
104 | { |
105 | int cpu, n = 0; | |
106 | ||
107 | for (cpu = 0; cpu < NR_CPUS; cpu++) | |
108 | if (node == node_cpuid[cpu].nid) | |
564601a5 | 109 | n++; |
1da177e4 LT |
110 | |
111 | return n; | |
112 | } | |
113 | ||
564601a5 | 114 | /** |
115 | * compute_pernodesize - compute size of pernode data | |
116 | * @node: the node id. | |
117 | */ | |
118 | static unsigned long __init compute_pernodesize(int node) | |
119 | { | |
120 | unsigned long pernodesize = 0, cpus; | |
121 | ||
122 | cpus = early_nr_cpus_node(node); | |
123 | pernodesize += PERCPU_PAGE_SIZE * cpus; | |
124 | pernodesize += node * L1_CACHE_BYTES; | |
125 | pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t)); | |
126 | pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); | |
127 | pernodesize = PAGE_ALIGN(pernodesize); | |
128 | return pernodesize; | |
129 | } | |
1da177e4 | 130 | |
8d7e3517 TL |
131 | /** |
132 | * per_cpu_node_setup - setup per-cpu areas on each node | |
133 | * @cpu_data: per-cpu area on this node | |
134 | * @node: node to setup | |
135 | * | |
136 | * Copy the static per-cpu data into the region we just set aside and then | |
137 | * setup __per_cpu_offset for each CPU on this node. Return a pointer to | |
138 | * the end of the area. | |
139 | */ | |
140 | static void *per_cpu_node_setup(void *cpu_data, int node) | |
141 | { | |
142 | #ifdef CONFIG_SMP | |
143 | int cpu; | |
144 | ||
145 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | |
146 | if (node == node_cpuid[cpu].nid) { | |
147 | memcpy(__va(cpu_data), __phys_per_cpu_start, | |
148 | __per_cpu_end - __per_cpu_start); | |
149 | __per_cpu_offset[cpu] = (char*)__va(cpu_data) - | |
150 | __per_cpu_start; | |
151 | cpu_data += PERCPU_PAGE_SIZE; | |
152 | } | |
153 | } | |
154 | #endif | |
155 | return cpu_data; | |
156 | } | |
157 | ||
1da177e4 | 158 | /** |
564601a5 | 159 | * fill_pernode - initialize pernode data. |
160 | * @node: the node id. | |
161 | * @pernode: physical address of pernode data | |
162 | * @pernodesize: size of the pernode data | |
1da177e4 | 163 | */ |
564601a5 | 164 | static void __init fill_pernode(int node, unsigned long pernode, |
165 | unsigned long pernodesize) | |
1da177e4 | 166 | { |
564601a5 | 167 | void *cpu_data; |
8d7e3517 | 168 | int cpus = early_nr_cpus_node(node); |
564601a5 | 169 | struct bootmem_data *bdp = &mem_data[node].bootmem_data; |
1da177e4 | 170 | |
564601a5 | 171 | mem_data[node].pernode_addr = pernode; |
172 | mem_data[node].pernode_size = pernodesize; | |
173 | memset(__va(pernode), 0, pernodesize); | |
1da177e4 | 174 | |
564601a5 | 175 | cpu_data = (void *)pernode; |
176 | pernode += PERCPU_PAGE_SIZE * cpus; | |
177 | pernode += node * L1_CACHE_BYTES; | |
178 | ||
ae5a2c1c | 179 | pgdat_list[node] = __va(pernode); |
564601a5 | 180 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); |
181 | ||
182 | mem_data[node].node_data = __va(pernode); | |
183 | pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data)); | |
184 | ||
ae5a2c1c | 185 | pgdat_list[node]->bdata = bdp; |
564601a5 | 186 | pernode += L1_CACHE_ALIGN(sizeof(pg_data_t)); |
187 | ||
8d7e3517 | 188 | cpu_data = per_cpu_node_setup(cpu_data, node); |
1da177e4 | 189 | |
564601a5 | 190 | return; |
191 | } | |
8d7e3517 | 192 | |
1da177e4 LT |
193 | /** |
194 | * find_pernode_space - allocate memory for memory map and per-node structures | |
195 | * @start: physical start of range | |
196 | * @len: length of range | |
197 | * @node: node where this range resides | |
198 | * | |
199 | * This routine reserves space for the per-cpu data struct, the list of | |
200 | * pg_data_ts and the per-node data struct. Each node will have something like | |
201 | * the following in the first chunk of addr. space large enough to hold it. | |
202 | * | |
203 | * ________________________ | |
204 | * | | | |
205 | * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first | |
206 | * | PERCPU_PAGE_SIZE * | start and length big enough | |
207 | * | cpus_on_this_node | Node 0 will also have entries for all non-existent cpus. | |
208 | * |------------------------| | |
209 | * | local pg_data_t * | | |
210 | * |------------------------| | |
211 | * | local ia64_node_data | | |
212 | * |------------------------| | |
213 | * | ??? | | |
214 | * |________________________| | |
215 | * | |
216 | * Once this space has been set aside, the bootmem maps are initialized. We | |
217 | * could probably move the allocation of the per-cpu and ia64_node_data space | |
218 | * outside of this function and use alloc_bootmem_node(), but doing it here | |
219 | * is straightforward and we get the alignments we want so... | |
220 | */ | |
221 | static int __init find_pernode_space(unsigned long start, unsigned long len, | |
222 | int node) | |
223 | { | |
564601a5 | 224 | unsigned long epfn; |
1da177e4 | 225 | unsigned long pernodesize = 0, pernode, pages, mapsize; |
1da177e4 LT |
226 | struct bootmem_data *bdp = &mem_data[node].bootmem_data; |
227 | ||
228 | epfn = (start + len) >> PAGE_SHIFT; | |
229 | ||
230 | pages = bdp->node_low_pfn - (bdp->node_boot_start >> PAGE_SHIFT); | |
231 | mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT; | |
232 | ||
233 | /* | |
234 | * Make sure this memory falls within this node's usable memory | |
235 | * since we may have thrown some away in build_maps(). | |
236 | */ | |
237 | if (start < bdp->node_boot_start || epfn > bdp->node_low_pfn) | |
238 | return 0; | |
239 | ||
240 | /* Don't setup this node's local space twice... */ | |
241 | if (mem_data[node].pernode_addr) | |
242 | return 0; | |
243 | ||
244 | /* | |
245 | * Calculate total size needed, incl. what's necessary | |
246 | * for good alignment and alias prevention. | |
247 | */ | |
564601a5 | 248 | pernodesize = compute_pernodesize(node); |
1da177e4 LT |
249 | pernode = NODEDATA_ALIGN(start, node); |
250 | ||
251 | /* Is this range big enough for what we want to store here? */ | |
564601a5 | 252 | if (start + len > (pernode + pernodesize + mapsize)) |
253 | fill_pernode(node, pernode, pernodesize); | |
1da177e4 LT |
254 | |
255 | return 0; | |
256 | } | |
257 | ||
258 | /** | |
259 | * free_node_bootmem - free bootmem allocator memory for use | |
260 | * @start: physical start of range | |
261 | * @len: length of range | |
262 | * @node: node where this range resides | |
263 | * | |
264 | * Simply calls the bootmem allocator to free the specified ranged from | |
265 | * the given pg_data_t's bdata struct. After this function has been called | |
266 | * for all the entries in the EFI memory map, the bootmem allocator will | |
267 | * be ready to service allocation requests. | |
268 | */ | |
269 | static int __init free_node_bootmem(unsigned long start, unsigned long len, | |
270 | int node) | |
271 | { | |
ae5a2c1c | 272 | free_bootmem_node(pgdat_list[node], start, len); |
1da177e4 LT |
273 | |
274 | return 0; | |
275 | } | |
276 | ||
277 | /** | |
278 | * reserve_pernode_space - reserve memory for per-node space | |
279 | * | |
280 | * Reserve the space used by the bootmem maps & per-node space in the boot | |
281 | * allocator so that when we actually create the real mem maps we don't | |
282 | * use their memory. | |
283 | */ | |
284 | static void __init reserve_pernode_space(void) | |
285 | { | |
286 | unsigned long base, size, pages; | |
287 | struct bootmem_data *bdp; | |
288 | int node; | |
289 | ||
290 | for_each_online_node(node) { | |
ae5a2c1c | 291 | pg_data_t *pdp = pgdat_list[node]; |
1da177e4 | 292 | |
564601a5 | 293 | if (node_isset(node, memory_less_mask)) |
294 | continue; | |
295 | ||
1da177e4 LT |
296 | bdp = pdp->bdata; |
297 | ||
298 | /* First the bootmem_map itself */ | |
299 | pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT); | |
300 | size = bootmem_bootmap_pages(pages) << PAGE_SHIFT; | |
301 | base = __pa(bdp->node_bootmem_map); | |
302 | reserve_bootmem_node(pdp, base, size); | |
303 | ||
304 | /* Now the per-node space */ | |
305 | size = mem_data[node].pernode_size; | |
306 | base = __pa(mem_data[node].pernode_addr); | |
307 | reserve_bootmem_node(pdp, base, size); | |
308 | } | |
309 | } | |
310 | ||
311 | /** | |
312 | * initialize_pernode_data - fixup per-cpu & per-node pointers | |
313 | * | |
314 | * Each node's per-node area has a copy of the global pg_data_t list, so | |
315 | * we copy that to each node here, as well as setting the per-cpu pointer | |
316 | * to the local node data structure. The active_cpus field of the per-node | |
317 | * structure gets setup by the platform_cpu_init() function later. | |
318 | */ | |
319 | static void __init initialize_pernode_data(void) | |
320 | { | |
8d7e3517 | 321 | int cpu, node; |
1da177e4 | 322 | |
1da177e4 LT |
323 | /* Copy the pg_data_t list to each node and init the node field */ |
324 | for_each_online_node(node) { | |
325 | memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list, | |
326 | sizeof(pgdat_list)); | |
327 | } | |
8d7e3517 | 328 | #ifdef CONFIG_SMP |
1da177e4 LT |
329 | /* Set the node_data pointer for each per-cpu struct */ |
330 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | |
331 | node = node_cpuid[cpu].nid; | |
332 | per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; | |
333 | } | |
8d7e3517 TL |
334 | #else |
335 | { | |
336 | struct cpuinfo_ia64 *cpu0_cpu_info; | |
337 | cpu = 0; | |
338 | node = node_cpuid[cpu].nid; | |
339 | cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + | |
340 | ((char *)&per_cpu__cpu_info - __per_cpu_start)); | |
341 | cpu0_cpu_info->node_data = mem_data[node].node_data; | |
342 | } | |
343 | #endif /* CONFIG_SMP */ | |
1da177e4 LT |
344 | } |
345 | ||
564601a5 | 346 | /** |
347 | * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit | |
348 | * node but fall back to any other node when __alloc_bootmem_node fails | |
349 | * for best. | |
350 | * @nid: node id | |
351 | * @pernodesize: size of this node's pernode data | |
564601a5 | 352 | */ |
97835245 | 353 | static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize) |
564601a5 | 354 | { |
355 | void *ptr = NULL; | |
356 | u8 best = 0xff; | |
97835245 | 357 | int bestnode = -1, node, anynode = 0; |
564601a5 | 358 | |
359 | for_each_online_node(node) { | |
360 | if (node_isset(node, memory_less_mask)) | |
361 | continue; | |
362 | else if (node_distance(nid, node) < best) { | |
363 | best = node_distance(nid, node); | |
364 | bestnode = node; | |
365 | } | |
97835245 | 366 | anynode = node; |
564601a5 | 367 | } |
368 | ||
97835245 BP |
369 | if (bestnode == -1) |
370 | bestnode = anynode; | |
371 | ||
ae5a2c1c | 372 | ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize, |
97835245 | 373 | PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); |
564601a5 | 374 | |
564601a5 | 375 | return ptr; |
376 | } | |
377 | ||
564601a5 | 378 | /** |
379 | * memory_less_nodes - allocate and initialize CPU only nodes pernode | |
380 | * information. | |
381 | */ | |
382 | static void __init memory_less_nodes(void) | |
383 | { | |
384 | unsigned long pernodesize; | |
385 | void *pernode; | |
386 | int node; | |
387 | ||
388 | for_each_node_mask(node, memory_less_mask) { | |
389 | pernodesize = compute_pernodesize(node); | |
97835245 | 390 | pernode = memory_less_node_alloc(node, pernodesize); |
564601a5 | 391 | fill_pernode(node, __pa(pernode), pernodesize); |
392 | } | |
393 | ||
394 | return; | |
395 | } | |
396 | ||
2d4b1fa2 BP |
397 | #ifdef CONFIG_SPARSEMEM |
398 | /** | |
399 | * register_sparse_mem - notify SPARSEMEM that this memory range exists. | |
400 | * @start: physical start of range | |
401 | * @end: physical end of range | |
402 | * @arg: unused | |
403 | * | |
404 | * Simply calls SPARSEMEM to register memory section(s). | |
405 | */ | |
406 | static int __init register_sparse_mem(unsigned long start, unsigned long end, | |
407 | void *arg) | |
408 | { | |
409 | int nid; | |
410 | ||
411 | start = __pa(start) >> PAGE_SHIFT; | |
412 | end = __pa(end) >> PAGE_SHIFT; | |
413 | nid = early_pfn_to_nid(start); | |
414 | memory_present(nid, start, end); | |
415 | ||
416 | return 0; | |
417 | } | |
418 | ||
419 | static void __init arch_sparse_init(void) | |
420 | { | |
421 | efi_memmap_walk(register_sparse_mem, NULL); | |
422 | sparse_init(); | |
423 | } | |
424 | #else | |
425 | #define arch_sparse_init() do {} while (0) | |
426 | #endif | |
427 | ||
1da177e4 LT |
428 | /** |
429 | * find_memory - walk the EFI memory map and setup the bootmem allocator | |
430 | * | |
431 | * Called early in boot to setup the bootmem allocator, and to | |
432 | * allocate the per-cpu and per-node structures. | |
433 | */ | |
434 | void __init find_memory(void) | |
435 | { | |
436 | int node; | |
437 | ||
438 | reserve_memory(); | |
439 | ||
440 | if (num_online_nodes() == 0) { | |
441 | printk(KERN_ERR "node info missing!\n"); | |
442 | node_set_online(0); | |
443 | } | |
444 | ||
564601a5 | 445 | nodes_or(memory_less_mask, memory_less_mask, node_online_map); |
1da177e4 LT |
446 | min_low_pfn = -1; |
447 | max_low_pfn = 0; | |
448 | ||
1da177e4 LT |
449 | /* These actually end up getting called by call_pernode_memory() */ |
450 | efi_memmap_walk(filter_rsvd_memory, build_node_maps); | |
451 | efi_memmap_walk(filter_rsvd_memory, find_pernode_space); | |
452 | ||
564601a5 | 453 | for_each_online_node(node) |
454 | if (mem_data[node].bootmem_data.node_low_pfn) { | |
455 | node_clear(node, memory_less_mask); | |
456 | mem_data[node].min_pfn = ~0UL; | |
457 | } | |
1da177e4 LT |
458 | /* |
459 | * Initialize the boot memory maps in reverse order since that's | |
460 | * what the bootmem allocator expects | |
461 | */ | |
462 | for (node = MAX_NUMNODES - 1; node >= 0; node--) { | |
463 | unsigned long pernode, pernodesize, map; | |
464 | struct bootmem_data *bdp; | |
465 | ||
466 | if (!node_online(node)) | |
467 | continue; | |
564601a5 | 468 | else if (node_isset(node, memory_less_mask)) |
469 | continue; | |
1da177e4 LT |
470 | |
471 | bdp = &mem_data[node].bootmem_data; | |
472 | pernode = mem_data[node].pernode_addr; | |
473 | pernodesize = mem_data[node].pernode_size; | |
474 | map = pernode + pernodesize; | |
475 | ||
ae5a2c1c | 476 | init_bootmem_node(pgdat_list[node], |
1da177e4 LT |
477 | map>>PAGE_SHIFT, |
478 | bdp->node_boot_start>>PAGE_SHIFT, | |
479 | bdp->node_low_pfn); | |
480 | } | |
481 | ||
482 | efi_memmap_walk(filter_rsvd_memory, free_node_bootmem); | |
483 | ||
484 | reserve_pernode_space(); | |
564601a5 | 485 | memory_less_nodes(); |
1da177e4 LT |
486 | initialize_pernode_data(); |
487 | ||
488 | max_pfn = max_low_pfn; | |
489 | ||
490 | find_initrd(); | |
491 | } | |
492 | ||
8d7e3517 | 493 | #ifdef CONFIG_SMP |
1da177e4 LT |
494 | /** |
495 | * per_cpu_init - setup per-cpu variables | |
496 | * | |
497 | * find_pernode_space() does most of this already, we just need to set | |
498 | * local_per_cpu_offset | |
499 | */ | |
244fd545 | 500 | void __cpuinit *per_cpu_init(void) |
1da177e4 LT |
501 | { |
502 | int cpu; | |
ff741906 AR |
503 | static int first_time = 1; |
504 | ||
1da177e4 | 505 | |
8d7e3517 TL |
506 | if (smp_processor_id() != 0) |
507 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; | |
508 | ||
ff741906 AR |
509 | if (first_time) { |
510 | first_time = 0; | |
511 | for (cpu = 0; cpu < NR_CPUS; cpu++) | |
512 | per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu]; | |
513 | } | |
1da177e4 LT |
514 | |
515 | return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; | |
516 | } | |
8d7e3517 | 517 | #endif /* CONFIG_SMP */ |
1da177e4 | 518 | |
ace1d816 RH |
519 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
520 | static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i) | |
521 | { | |
522 | unsigned long end_address, hole_next_pfn; | |
523 | unsigned long stop_address; | |
524 | ||
525 | end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; | |
526 | end_address = PAGE_ALIGN(end_address); | |
527 | ||
528 | stop_address = (unsigned long) &vmem_map[ | |
529 | pgdat->node_start_pfn + pgdat->node_spanned_pages]; | |
530 | ||
531 | do { | |
532 | pgd_t *pgd; | |
533 | pud_t *pud; | |
534 | pmd_t *pmd; | |
535 | pte_t *pte; | |
536 | ||
537 | pgd = pgd_offset_k(end_address); | |
538 | if (pgd_none(*pgd)) { | |
539 | end_address += PGDIR_SIZE; | |
540 | continue; | |
541 | } | |
542 | ||
543 | pud = pud_offset(pgd, end_address); | |
544 | if (pud_none(*pud)) { | |
545 | end_address += PUD_SIZE; | |
546 | continue; | |
547 | } | |
548 | ||
549 | pmd = pmd_offset(pud, end_address); | |
550 | if (pmd_none(*pmd)) { | |
551 | end_address += PMD_SIZE; | |
552 | continue; | |
553 | } | |
554 | ||
555 | pte = pte_offset_kernel(pmd, end_address); | |
556 | retry_pte: | |
557 | if (pte_none(*pte)) { | |
558 | end_address += PAGE_SIZE; | |
559 | pte++; | |
560 | if ((end_address < stop_address) && | |
561 | (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) | |
562 | goto retry_pte; | |
563 | continue; | |
564 | } | |
565 | /* Found next valid vmem_map page */ | |
566 | break; | |
567 | } while (end_address < stop_address); | |
568 | ||
569 | end_address = min(end_address, stop_address); | |
570 | end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; | |
571 | hole_next_pfn = end_address / sizeof(struct page); | |
572 | return hole_next_pfn - pgdat->node_start_pfn; | |
573 | } | |
574 | #else | |
575 | static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i) | |
576 | { | |
577 | return i + 1; | |
578 | } | |
579 | #endif | |
580 | ||
1da177e4 LT |
581 | /** |
582 | * show_mem - give short summary of memory stats | |
583 | * | |
584 | * Shows a simple page count of reserved and used pages in the system. | |
585 | * For discontig machines, it does this on a per-pgdat basis. | |
586 | */ | |
587 | void show_mem(void) | |
588 | { | |
589 | int i, total_reserved = 0; | |
590 | int total_shared = 0, total_cached = 0; | |
591 | unsigned long total_present = 0; | |
592 | pg_data_t *pgdat; | |
593 | ||
594 | printk("Mem-info:\n"); | |
595 | show_free_areas(); | |
596 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | |
ec936fc5 | 597 | for_each_online_pgdat(pgdat) { |
208d54e5 DH |
598 | unsigned long present; |
599 | unsigned long flags; | |
1da177e4 | 600 | int shared = 0, cached = 0, reserved = 0; |
208d54e5 | 601 | |
1da177e4 | 602 | printk("Node ID: %d\n", pgdat->node_id); |
208d54e5 DH |
603 | pgdat_resize_lock(pgdat, &flags); |
604 | present = pgdat->node_present_pages; | |
1da177e4 | 605 | for(i = 0; i < pgdat->node_spanned_pages; i++) { |
2d4b1fa2 BP |
606 | struct page *page; |
607 | if (pfn_valid(pgdat->node_start_pfn + i)) | |
608 | page = pfn_to_page(pgdat->node_start_pfn + i); | |
ace1d816 RH |
609 | else { |
610 | i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1; | |
1da177e4 | 611 | continue; |
ace1d816 | 612 | } |
408fde81 | 613 | if (PageReserved(page)) |
1da177e4 | 614 | reserved++; |
408fde81 | 615 | else if (PageSwapCache(page)) |
1da177e4 | 616 | cached++; |
408fde81 DH |
617 | else if (page_count(page)) |
618 | shared += page_count(page)-1; | |
1da177e4 | 619 | } |
208d54e5 | 620 | pgdat_resize_unlock(pgdat, &flags); |
1da177e4 LT |
621 | total_present += present; |
622 | total_reserved += reserved; | |
623 | total_cached += cached; | |
624 | total_shared += shared; | |
625 | printk("\t%ld pages of RAM\n", present); | |
626 | printk("\t%d reserved pages\n", reserved); | |
627 | printk("\t%d pages shared\n", shared); | |
628 | printk("\t%d pages swap cached\n", cached); | |
629 | } | |
630 | printk("%ld pages of RAM\n", total_present); | |
631 | printk("%d reserved pages\n", total_reserved); | |
632 | printk("%d pages shared\n", total_shared); | |
633 | printk("%d pages swap cached\n", total_cached); | |
fde740e4 RH |
634 | printk("Total of %ld pages in page table cache\n", |
635 | pgtable_quicklist_total_size()); | |
1da177e4 LT |
636 | printk("%d free buffer pages\n", nr_free_buffer_pages()); |
637 | } | |
638 | ||
639 | /** | |
640 | * call_pernode_memory - use SRAT to call callback functions with node info | |
641 | * @start: physical start of range | |
642 | * @len: length of range | |
643 | * @arg: function to call for each range | |
644 | * | |
645 | * efi_memmap_walk() knows nothing about layout of memory across nodes. Find | |
646 | * out to which node a block of memory belongs. Ignore memory that we cannot | |
647 | * identify, and split blocks that run across multiple nodes. | |
648 | * | |
649 | * Take this opportunity to round the start address up and the end address | |
650 | * down to page boundaries. | |
651 | */ | |
652 | void call_pernode_memory(unsigned long start, unsigned long len, void *arg) | |
653 | { | |
654 | unsigned long rs, re, end = start + len; | |
655 | void (*func)(unsigned long, unsigned long, int); | |
656 | int i; | |
657 | ||
658 | start = PAGE_ALIGN(start); | |
659 | end &= PAGE_MASK; | |
660 | if (start >= end) | |
661 | return; | |
662 | ||
663 | func = arg; | |
664 | ||
665 | if (!num_node_memblks) { | |
666 | /* No SRAT table, so assume one node (node 0) */ | |
667 | if (start < end) | |
668 | (*func)(start, end - start, 0); | |
669 | return; | |
670 | } | |
671 | ||
672 | for (i = 0; i < num_node_memblks; i++) { | |
673 | rs = max(start, node_memblk[i].start_paddr); | |
674 | re = min(end, node_memblk[i].start_paddr + | |
675 | node_memblk[i].size); | |
676 | ||
677 | if (rs < re) | |
678 | (*func)(rs, re - rs, node_memblk[i].nid); | |
679 | ||
680 | if (re == end) | |
681 | break; | |
682 | } | |
683 | } | |
684 | ||
685 | /** | |
686 | * count_node_pages - callback to build per-node memory info structures | |
687 | * @start: physical start of range | |
688 | * @len: length of range | |
689 | * @node: node where this range resides | |
690 | * | |
691 | * Each node has it's own number of physical pages, DMAable pages, start, and | |
692 | * end page frame number. This routine will be called by call_pernode_memory() | |
693 | * for each piece of usable memory and will setup these values for each node. | |
694 | * Very similar to build_maps(). | |
695 | */ | |
696 | static __init int count_node_pages(unsigned long start, unsigned long len, int node) | |
697 | { | |
698 | unsigned long end = start + len; | |
699 | ||
700 | mem_data[node].num_physpages += len >> PAGE_SHIFT; | |
701 | if (start <= __pa(MAX_DMA_ADDRESS)) | |
702 | mem_data[node].num_dma_physpages += | |
703 | (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT; | |
704 | start = GRANULEROUNDDOWN(start); | |
705 | start = ORDERROUNDDOWN(start); | |
706 | end = GRANULEROUNDUP(end); | |
707 | mem_data[node].max_pfn = max(mem_data[node].max_pfn, | |
708 | end >> PAGE_SHIFT); | |
709 | mem_data[node].min_pfn = min(mem_data[node].min_pfn, | |
710 | start >> PAGE_SHIFT); | |
711 | ||
712 | return 0; | |
713 | } | |
714 | ||
715 | /** | |
716 | * paging_init - setup page tables | |
717 | * | |
718 | * paging_init() sets up the page tables for each node of the system and frees | |
719 | * the bootmem allocator memory for general use. | |
720 | */ | |
721 | void __init paging_init(void) | |
722 | { | |
723 | unsigned long max_dma; | |
724 | unsigned long zones_size[MAX_NR_ZONES]; | |
725 | unsigned long zholes_size[MAX_NR_ZONES]; | |
726 | unsigned long pfn_offset = 0; | |
727 | int node; | |
728 | ||
729 | max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; | |
730 | ||
2d4b1fa2 BP |
731 | arch_sparse_init(); |
732 | ||
1da177e4 LT |
733 | efi_memmap_walk(filter_rsvd_memory, count_node_pages); |
734 | ||
2d4b1fa2 | 735 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
564601a5 | 736 | vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page)); |
737 | vmem_map = (struct page *) vmalloc_end; | |
738 | efi_memmap_walk(create_mem_map_page_table, NULL); | |
739 | printk("Virtual mem_map starts at 0x%p\n", vmem_map); | |
2d4b1fa2 | 740 | #endif |
564601a5 | 741 | |
1da177e4 LT |
742 | for_each_online_node(node) { |
743 | memset(zones_size, 0, sizeof(zones_size)); | |
744 | memset(zholes_size, 0, sizeof(zholes_size)); | |
745 | ||
746 | num_physpages += mem_data[node].num_physpages; | |
747 | ||
748 | if (mem_data[node].min_pfn >= max_dma) { | |
749 | /* All of this node's memory is above ZONE_DMA */ | |
750 | zones_size[ZONE_NORMAL] = mem_data[node].max_pfn - | |
751 | mem_data[node].min_pfn; | |
752 | zholes_size[ZONE_NORMAL] = mem_data[node].max_pfn - | |
753 | mem_data[node].min_pfn - | |
754 | mem_data[node].num_physpages; | |
755 | } else if (mem_data[node].max_pfn < max_dma) { | |
756 | /* All of this node's memory is in ZONE_DMA */ | |
757 | zones_size[ZONE_DMA] = mem_data[node].max_pfn - | |
758 | mem_data[node].min_pfn; | |
759 | zholes_size[ZONE_DMA] = mem_data[node].max_pfn - | |
760 | mem_data[node].min_pfn - | |
761 | mem_data[node].num_dma_physpages; | |
762 | } else { | |
763 | /* This node has memory in both zones */ | |
764 | zones_size[ZONE_DMA] = max_dma - | |
765 | mem_data[node].min_pfn; | |
766 | zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] - | |
767 | mem_data[node].num_dma_physpages; | |
768 | zones_size[ZONE_NORMAL] = mem_data[node].max_pfn - | |
769 | max_dma; | |
770 | zholes_size[ZONE_NORMAL] = zones_size[ZONE_NORMAL] - | |
771 | (mem_data[node].num_physpages - | |
772 | mem_data[node].num_dma_physpages); | |
773 | } | |
774 | ||
1da177e4 LT |
775 | pfn_offset = mem_data[node].min_pfn; |
776 | ||
2d4b1fa2 | 777 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
1da177e4 | 778 | NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; |
2d4b1fa2 | 779 | #endif |
1da177e4 LT |
780 | free_area_init_node(node, NODE_DATA(node), zones_size, |
781 | pfn_offset, zholes_size); | |
782 | } | |
783 | ||
784 | zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); | |
785 | } |