Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #include <linux/sched.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/mmzone.h> | |
18 | #include <linux/bootmem.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/node.h> | |
21 | #include <linux/cpu.h> | |
22 | #include <linux/ioport.h> | |
0707ad30 | 23 | #include <linux/irq.h> |
867e359b CM |
24 | #include <linux/kexec.h> |
25 | #include <linux/pci.h> | |
26 | #include <linux/initrd.h> | |
27 | #include <linux/io.h> | |
28 | #include <linux/highmem.h> | |
29 | #include <linux/smp.h> | |
30 | #include <linux/timex.h> | |
31 | #include <asm/setup.h> | |
32 | #include <asm/sections.h> | |
867e359b CM |
33 | #include <asm/cacheflush.h> |
34 | #include <asm/pgalloc.h> | |
35 | #include <asm/mmu_context.h> | |
36 | #include <hv/hypervisor.h> | |
37 | #include <arch/interrupts.h> | |
38 | ||
39 | /* <linux/smp.h> doesn't provide this definition. */ | |
40 | #ifndef CONFIG_SMP | |
41 | #define setup_max_cpus 1 | |
42 | #endif | |
43 | ||
44 | static inline int ABS(int x) { return x >= 0 ? x : -x; } | |
45 | ||
46 | /* Chip information */ | |
47 | char chip_model[64] __write_once; | |
48 | ||
49 | struct pglist_data node_data[MAX_NUMNODES] __read_mostly; | |
50 | EXPORT_SYMBOL(node_data); | |
51 | ||
52 | /* We only create bootmem data on node 0. */ | |
53 | static bootmem_data_t __initdata node0_bdata; | |
54 | ||
55 | /* Information on the NUMA nodes that we compute early */ | |
56 | unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES]; | |
57 | unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES]; | |
58 | unsigned long __initdata node_memmap_pfn[MAX_NUMNODES]; | |
59 | unsigned long __initdata node_percpu_pfn[MAX_NUMNODES]; | |
60 | unsigned long __initdata node_free_pfn[MAX_NUMNODES]; | |
61 | ||
76c567fb CM |
62 | static unsigned long __initdata node_percpu[MAX_NUMNODES]; |
63 | ||
867e359b CM |
64 | #ifdef CONFIG_HIGHMEM |
65 | /* Page frame index of end of lowmem on each controller. */ | |
66 | unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES]; | |
67 | ||
68 | /* Number of pages that can be mapped into lowmem. */ | |
69 | static unsigned long __initdata mappable_physpages; | |
70 | #endif | |
71 | ||
72 | /* Data on which physical memory controller corresponds to which NUMA node */ | |
73 | int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 }; | |
74 | ||
75 | #ifdef CONFIG_HIGHMEM | |
76 | /* Map information from VAs to PAs */ | |
77 | unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)] | |
78 | __write_once __attribute__((aligned(L2_CACHE_BYTES))); | |
79 | EXPORT_SYMBOL(pbase_map); | |
80 | ||
81 | /* Map information from PAs to VAs */ | |
82 | void *vbase_map[NR_PA_HIGHBIT_VALUES] | |
83 | __write_once __attribute__((aligned(L2_CACHE_BYTES))); | |
84 | EXPORT_SYMBOL(vbase_map); | |
85 | #endif | |
86 | ||
87 | /* Node number as a function of the high PA bits */ | |
88 | int highbits_to_node[NR_PA_HIGHBIT_VALUES] __write_once; | |
89 | EXPORT_SYMBOL(highbits_to_node); | |
90 | ||
91 | static unsigned int __initdata maxmem_pfn = -1U; | |
92 | static unsigned int __initdata maxnodemem_pfn[MAX_NUMNODES] = { | |
93 | [0 ... MAX_NUMNODES-1] = -1U | |
94 | }; | |
95 | static nodemask_t __initdata isolnodes; | |
96 | ||
97 | #ifdef CONFIG_PCI | |
98 | enum { DEFAULT_PCI_RESERVE_MB = 64 }; | |
99 | static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB; | |
100 | unsigned long __initdata pci_reserve_start_pfn = -1U; | |
101 | unsigned long __initdata pci_reserve_end_pfn = -1U; | |
102 | #endif | |
103 | ||
104 | static int __init setup_maxmem(char *str) | |
105 | { | |
bfffe79b CM |
106 | unsigned long long maxmem; |
107 | if (str == NULL || (maxmem = memparse(str, NULL)) == 0) | |
867e359b CM |
108 | return -EINVAL; |
109 | ||
bfffe79b | 110 | maxmem_pfn = (maxmem >> HPAGE_SHIFT) << (HPAGE_SHIFT - PAGE_SHIFT); |
0707ad30 | 111 | pr_info("Forcing RAM used to no more than %dMB\n", |
867e359b CM |
112 | maxmem_pfn >> (20 - PAGE_SHIFT)); |
113 | return 0; | |
114 | } | |
115 | early_param("maxmem", setup_maxmem); | |
116 | ||
117 | static int __init setup_maxnodemem(char *str) | |
118 | { | |
119 | char *endp; | |
bfffe79b CM |
120 | unsigned long long maxnodemem; |
121 | long node; | |
867e359b CM |
122 | |
123 | node = str ? simple_strtoul(str, &endp, 0) : INT_MAX; | |
bfffe79b | 124 | if (node >= MAX_NUMNODES || *endp != ':') |
867e359b CM |
125 | return -EINVAL; |
126 | ||
bfffe79b CM |
127 | maxnodemem = memparse(endp+1, NULL); |
128 | maxnodemem_pfn[node] = (maxnodemem >> HPAGE_SHIFT) << | |
867e359b | 129 | (HPAGE_SHIFT - PAGE_SHIFT); |
0707ad30 | 130 | pr_info("Forcing RAM used on node %ld to no more than %dMB\n", |
867e359b CM |
131 | node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); |
132 | return 0; | |
133 | } | |
134 | early_param("maxnodemem", setup_maxnodemem); | |
135 | ||
136 | static int __init setup_isolnodes(char *str) | |
137 | { | |
138 | char buf[MAX_NUMNODES * 5]; | |
139 | if (str == NULL || nodelist_parse(str, isolnodes) != 0) | |
140 | return -EINVAL; | |
141 | ||
142 | nodelist_scnprintf(buf, sizeof(buf), isolnodes); | |
0707ad30 | 143 | pr_info("Set isolnodes value to '%s'\n", buf); |
867e359b CM |
144 | return 0; |
145 | } | |
146 | early_param("isolnodes", setup_isolnodes); | |
147 | ||
148 | #ifdef CONFIG_PCI | |
149 | static int __init setup_pci_reserve(char* str) | |
150 | { | |
151 | unsigned long mb; | |
152 | ||
153 | if (str == NULL || strict_strtoul(str, 0, &mb) != 0 || | |
154 | mb > 3 * 1024) | |
155 | return -EINVAL; | |
156 | ||
157 | pci_reserve_mb = mb; | |
0707ad30 | 158 | pr_info("Reserving %dMB for PCIE root complex mappings\n", |
867e359b CM |
159 | pci_reserve_mb); |
160 | return 0; | |
161 | } | |
162 | early_param("pci_reserve", setup_pci_reserve); | |
163 | #endif | |
164 | ||
165 | #ifndef __tilegx__ | |
166 | /* | |
167 | * vmalloc=size forces the vmalloc area to be exactly 'size' bytes. | |
168 | * This can be used to increase (or decrease) the vmalloc area. | |
169 | */ | |
170 | static int __init parse_vmalloc(char *arg) | |
171 | { | |
172 | if (!arg) | |
173 | return -EINVAL; | |
174 | ||
175 | VMALLOC_RESERVE = (memparse(arg, &arg) + PGDIR_SIZE - 1) & PGDIR_MASK; | |
176 | ||
177 | /* See validate_va() for more on this test. */ | |
178 | if ((long)_VMALLOC_START >= 0) | |
179 | early_panic("\"vmalloc=%#lx\" value too large: maximum %#lx\n", | |
180 | VMALLOC_RESERVE, _VMALLOC_END - 0x80000000UL); | |
181 | ||
182 | return 0; | |
183 | } | |
184 | early_param("vmalloc", parse_vmalloc); | |
185 | #endif | |
186 | ||
187 | #ifdef CONFIG_HIGHMEM | |
188 | /* | |
a78c942d CM |
189 | * Determine for each controller where its lowmem is mapped and how much of |
190 | * it is mapped there. On controller zero, the first few megabytes are | |
191 | * already mapped in as code at MEM_SV_INTRPT, so in principle we could | |
192 | * start our data mappings higher up, but for now we don't bother, to avoid | |
193 | * additional confusion. | |
867e359b CM |
194 | * |
195 | * One question is whether, on systems with more than 768 Mb and | |
196 | * controllers of different sizes, to map in a proportionate amount of | |
197 | * each one, or to try to map the same amount from each controller. | |
198 | * (E.g. if we have three controllers with 256MB, 1GB, and 256MB | |
199 | * respectively, do we map 256MB from each, or do we map 128 MB, 512 | |
200 | * MB, and 128 MB respectively?) For now we use a proportionate | |
201 | * solution like the latter. | |
202 | * | |
203 | * The VA/PA mapping demands that we align our decisions at 16 MB | |
204 | * boundaries so that we can rapidly convert VA to PA. | |
205 | */ | |
206 | static void *__init setup_pa_va_mapping(void) | |
207 | { | |
208 | unsigned long curr_pages = 0; | |
209 | unsigned long vaddr = PAGE_OFFSET; | |
210 | nodemask_t highonlynodes = isolnodes; | |
211 | int i, j; | |
212 | ||
213 | memset(pbase_map, -1, sizeof(pbase_map)); | |
214 | memset(vbase_map, -1, sizeof(vbase_map)); | |
215 | ||
216 | /* Node zero cannot be isolated for LOWMEM purposes. */ | |
217 | node_clear(0, highonlynodes); | |
218 | ||
219 | /* Count up the number of pages on non-highonlynodes controllers. */ | |
220 | mappable_physpages = 0; | |
221 | for_each_online_node(i) { | |
222 | if (!node_isset(i, highonlynodes)) | |
223 | mappable_physpages += | |
224 | node_end_pfn[i] - node_start_pfn[i]; | |
225 | } | |
226 | ||
227 | for_each_online_node(i) { | |
228 | unsigned long start = node_start_pfn[i]; | |
229 | unsigned long end = node_end_pfn[i]; | |
230 | unsigned long size = end - start; | |
231 | unsigned long vaddr_end; | |
232 | ||
233 | if (node_isset(i, highonlynodes)) { | |
234 | /* Mark this controller as having no lowmem. */ | |
235 | node_lowmem_end_pfn[i] = start; | |
236 | continue; | |
237 | } | |
238 | ||
239 | curr_pages += size; | |
240 | if (mappable_physpages > MAXMEM_PFN) { | |
241 | vaddr_end = PAGE_OFFSET + | |
242 | (((u64)curr_pages * MAXMEM_PFN / | |
243 | mappable_physpages) | |
244 | << PAGE_SHIFT); | |
245 | } else { | |
246 | vaddr_end = PAGE_OFFSET + (curr_pages << PAGE_SHIFT); | |
247 | } | |
248 | for (j = 0; vaddr < vaddr_end; vaddr += HPAGE_SIZE, ++j) { | |
249 | unsigned long this_pfn = | |
250 | start + (j << HUGETLB_PAGE_ORDER); | |
251 | pbase_map[vaddr >> HPAGE_SHIFT] = this_pfn; | |
252 | if (vbase_map[__pfn_to_highbits(this_pfn)] == | |
253 | (void *)-1) | |
254 | vbase_map[__pfn_to_highbits(this_pfn)] = | |
255 | (void *)(vaddr & HPAGE_MASK); | |
256 | } | |
257 | node_lowmem_end_pfn[i] = start + (j << HUGETLB_PAGE_ORDER); | |
258 | BUG_ON(node_lowmem_end_pfn[i] > end); | |
259 | } | |
260 | ||
261 | /* Return highest address of any mapped memory. */ | |
262 | return (void *)vaddr; | |
263 | } | |
264 | #endif /* CONFIG_HIGHMEM */ | |
265 | ||
266 | /* | |
267 | * Register our most important memory mappings with the debug stub. | |
268 | * | |
269 | * This is up to 4 mappings for lowmem, one mapping per memory | |
270 | * controller, plus one for our text segment. | |
271 | */ | |
0707ad30 | 272 | static void __cpuinit store_permanent_mappings(void) |
867e359b CM |
273 | { |
274 | int i; | |
275 | ||
276 | for_each_online_node(i) { | |
277 | HV_PhysAddr pa = ((HV_PhysAddr)node_start_pfn[i]) << PAGE_SHIFT; | |
278 | #ifdef CONFIG_HIGHMEM | |
279 | HV_PhysAddr high_mapped_pa = node_lowmem_end_pfn[i]; | |
280 | #else | |
281 | HV_PhysAddr high_mapped_pa = node_end_pfn[i]; | |
282 | #endif | |
283 | ||
284 | unsigned long pages = high_mapped_pa - node_start_pfn[i]; | |
285 | HV_VirtAddr addr = (HV_VirtAddr) __va(pa); | |
286 | hv_store_mapping(addr, pages << PAGE_SHIFT, pa); | |
287 | } | |
288 | ||
289 | hv_store_mapping((HV_VirtAddr)_stext, | |
290 | (uint32_t)(_einittext - _stext), 0); | |
291 | } | |
292 | ||
293 | /* | |
294 | * Use hv_inquire_physical() to populate node_{start,end}_pfn[] | |
295 | * and node_online_map, doing suitable sanity-checking. | |
296 | * Also set min_low_pfn, max_low_pfn, and max_pfn. | |
297 | */ | |
298 | static void __init setup_memory(void) | |
299 | { | |
300 | int i, j; | |
301 | int highbits_seen[NR_PA_HIGHBIT_VALUES] = { 0 }; | |
302 | #ifdef CONFIG_HIGHMEM | |
303 | long highmem_pages; | |
304 | #endif | |
305 | #ifndef __tilegx__ | |
306 | int cap; | |
307 | #endif | |
308 | #if defined(CONFIG_HIGHMEM) || defined(__tilegx__) | |
309 | long lowmem_pages; | |
310 | #endif | |
311 | ||
312 | /* We are using a char to hold the cpu_2_node[] mapping */ | |
e18105c1 | 313 | BUILD_BUG_ON(MAX_NUMNODES > 127); |
867e359b CM |
314 | |
315 | /* Discover the ranges of memory available to us */ | |
316 | for (i = 0; ; ++i) { | |
317 | unsigned long start, size, end, highbits; | |
318 | HV_PhysAddrRange range = hv_inquire_physical(i); | |
319 | if (range.size == 0) | |
320 | break; | |
321 | #ifdef CONFIG_FLATMEM | |
322 | if (i > 0) { | |
0707ad30 | 323 | pr_err("Can't use discontiguous PAs: %#llx..%#llx\n", |
867e359b CM |
324 | range.size, range.start + range.size); |
325 | continue; | |
326 | } | |
327 | #endif | |
328 | #ifndef __tilegx__ | |
329 | if ((unsigned long)range.start) { | |
0707ad30 | 330 | pr_err("Range not at 4GB multiple: %#llx..%#llx\n", |
867e359b CM |
331 | range.start, range.start + range.size); |
332 | continue; | |
333 | } | |
334 | #endif | |
335 | if ((range.start & (HPAGE_SIZE-1)) != 0 || | |
336 | (range.size & (HPAGE_SIZE-1)) != 0) { | |
337 | unsigned long long start_pa = range.start; | |
0707ad30 | 338 | unsigned long long orig_size = range.size; |
867e359b CM |
339 | range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK; |
340 | range.size -= (range.start - start_pa); | |
341 | range.size &= HPAGE_MASK; | |
0707ad30 | 342 | pr_err("Range not hugepage-aligned: %#llx..%#llx:" |
867e359b | 343 | " now %#llx-%#llx\n", |
0707ad30 | 344 | start_pa, start_pa + orig_size, |
867e359b CM |
345 | range.start, range.start + range.size); |
346 | } | |
347 | highbits = __pa_to_highbits(range.start); | |
348 | if (highbits >= NR_PA_HIGHBIT_VALUES) { | |
0707ad30 | 349 | pr_err("PA high bits too high: %#llx..%#llx\n", |
867e359b CM |
350 | range.start, range.start + range.size); |
351 | continue; | |
352 | } | |
353 | if (highbits_seen[highbits]) { | |
0707ad30 | 354 | pr_err("Range overlaps in high bits: %#llx..%#llx\n", |
867e359b CM |
355 | range.start, range.start + range.size); |
356 | continue; | |
357 | } | |
358 | highbits_seen[highbits] = 1; | |
359 | if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) { | |
0707ad30 CM |
360 | int max_size = maxnodemem_pfn[i]; |
361 | if (max_size > 0) { | |
362 | pr_err("Maxnodemem reduced node %d to" | |
363 | " %d pages\n", i, max_size); | |
364 | range.size = PFN_PHYS(max_size); | |
867e359b | 365 | } else { |
0707ad30 | 366 | pr_err("Maxnodemem disabled node %d\n", i); |
867e359b CM |
367 | continue; |
368 | } | |
369 | } | |
370 | if (num_physpages + PFN_DOWN(range.size) > maxmem_pfn) { | |
0707ad30 CM |
371 | int max_size = maxmem_pfn - num_physpages; |
372 | if (max_size > 0) { | |
373 | pr_err("Maxmem reduced node %d to %d pages\n", | |
374 | i, max_size); | |
375 | range.size = PFN_PHYS(max_size); | |
867e359b | 376 | } else { |
0707ad30 | 377 | pr_err("Maxmem disabled node %d\n", i); |
867e359b CM |
378 | continue; |
379 | } | |
380 | } | |
381 | if (i >= MAX_NUMNODES) { | |
0707ad30 | 382 | pr_err("Too many PA nodes (#%d): %#llx...%#llx\n", |
867e359b CM |
383 | i, range.size, range.size + range.start); |
384 | continue; | |
385 | } | |
386 | ||
387 | start = range.start >> PAGE_SHIFT; | |
388 | size = range.size >> PAGE_SHIFT; | |
389 | end = start + size; | |
390 | ||
391 | #ifndef __tilegx__ | |
392 | if (((HV_PhysAddr)end << PAGE_SHIFT) != | |
393 | (range.start + range.size)) { | |
0707ad30 | 394 | pr_err("PAs too high to represent: %#llx..%#llx\n", |
867e359b CM |
395 | range.start, range.start + range.size); |
396 | continue; | |
397 | } | |
398 | #endif | |
399 | #ifdef CONFIG_PCI | |
400 | /* | |
401 | * Blocks that overlap the pci reserved region must | |
402 | * have enough space to hold the maximum percpu data | |
403 | * region at the top of the range. If there isn't | |
404 | * enough space above the reserved region, just | |
405 | * truncate the node. | |
406 | */ | |
407 | if (start <= pci_reserve_start_pfn && | |
408 | end > pci_reserve_start_pfn) { | |
409 | unsigned int per_cpu_size = | |
410 | __per_cpu_end - __per_cpu_start; | |
411 | unsigned int percpu_pages = | |
412 | NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT); | |
413 | if (end < pci_reserve_end_pfn + percpu_pages) { | |
414 | end = pci_reserve_start_pfn; | |
0707ad30 | 415 | pr_err("PCI mapping region reduced node %d to" |
867e359b CM |
416 | " %ld pages\n", i, end - start); |
417 | } | |
418 | } | |
419 | #endif | |
420 | ||
421 | for (j = __pfn_to_highbits(start); | |
422 | j <= __pfn_to_highbits(end - 1); j++) | |
423 | highbits_to_node[j] = i; | |
424 | ||
425 | node_start_pfn[i] = start; | |
426 | node_end_pfn[i] = end; | |
427 | node_controller[i] = range.controller; | |
428 | num_physpages += size; | |
429 | max_pfn = end; | |
430 | ||
431 | /* Mark node as online */ | |
432 | node_set(i, node_online_map); | |
433 | node_set(i, node_possible_map); | |
434 | } | |
435 | ||
436 | #ifndef __tilegx__ | |
437 | /* | |
438 | * For 4KB pages, mem_map "struct page" data is 1% of the size | |
439 | * of the physical memory, so can be quite big (640 MB for | |
440 | * four 16G zones). These structures must be mapped in | |
441 | * lowmem, and since we currently cap out at about 768 MB, | |
442 | * it's impractical to try to use this much address space. | |
443 | * For now, arbitrarily cap the amount of physical memory | |
444 | * we're willing to use at 8 million pages (32GB of 4KB pages). | |
445 | */ | |
446 | cap = 8 * 1024 * 1024; /* 8 million pages */ | |
447 | if (num_physpages > cap) { | |
448 | int num_nodes = num_online_nodes(); | |
449 | int cap_each = cap / num_nodes; | |
450 | unsigned long dropped_pages = 0; | |
451 | for (i = 0; i < num_nodes; ++i) { | |
452 | int size = node_end_pfn[i] - node_start_pfn[i]; | |
453 | if (size > cap_each) { | |
454 | dropped_pages += (size - cap_each); | |
455 | node_end_pfn[i] = node_start_pfn[i] + cap_each; | |
456 | } | |
457 | } | |
458 | num_physpages -= dropped_pages; | |
0707ad30 | 459 | pr_warning("Only using %ldMB memory;" |
867e359b CM |
460 | " ignoring %ldMB.\n", |
461 | num_physpages >> (20 - PAGE_SHIFT), | |
462 | dropped_pages >> (20 - PAGE_SHIFT)); | |
0707ad30 | 463 | pr_warning("Consider using a larger page size.\n"); |
867e359b CM |
464 | } |
465 | #endif | |
466 | ||
467 | /* Heap starts just above the last loaded address. */ | |
468 | min_low_pfn = PFN_UP((unsigned long)_end - PAGE_OFFSET); | |
469 | ||
470 | #ifdef CONFIG_HIGHMEM | |
471 | /* Find where we map lowmem from each controller. */ | |
472 | high_memory = setup_pa_va_mapping(); | |
473 | ||
474 | /* Set max_low_pfn based on what node 0 can directly address. */ | |
475 | max_low_pfn = node_lowmem_end_pfn[0]; | |
476 | ||
477 | lowmem_pages = (mappable_physpages > MAXMEM_PFN) ? | |
478 | MAXMEM_PFN : mappable_physpages; | |
479 | highmem_pages = (long) (num_physpages - lowmem_pages); | |
480 | ||
0707ad30 | 481 | pr_notice("%ldMB HIGHMEM available.\n", |
867e359b | 482 | pages_to_mb(highmem_pages > 0 ? highmem_pages : 0)); |
0707ad30 | 483 | pr_notice("%ldMB LOWMEM available.\n", |
867e359b CM |
484 | pages_to_mb(lowmem_pages)); |
485 | #else | |
486 | /* Set max_low_pfn based on what node 0 can directly address. */ | |
487 | max_low_pfn = node_end_pfn[0]; | |
488 | ||
489 | #ifndef __tilegx__ | |
490 | if (node_end_pfn[0] > MAXMEM_PFN) { | |
0707ad30 | 491 | pr_warning("Only using %ldMB LOWMEM.\n", |
867e359b | 492 | MAXMEM>>20); |
0707ad30 | 493 | pr_warning("Use a HIGHMEM enabled kernel.\n"); |
867e359b CM |
494 | max_low_pfn = MAXMEM_PFN; |
495 | max_pfn = MAXMEM_PFN; | |
496 | num_physpages = MAXMEM_PFN; | |
497 | node_end_pfn[0] = MAXMEM_PFN; | |
498 | } else { | |
0707ad30 | 499 | pr_notice("%ldMB memory available.\n", |
867e359b CM |
500 | pages_to_mb(node_end_pfn[0])); |
501 | } | |
502 | for (i = 1; i < MAX_NUMNODES; ++i) { | |
503 | node_start_pfn[i] = 0; | |
504 | node_end_pfn[i] = 0; | |
505 | } | |
506 | high_memory = __va(node_end_pfn[0]); | |
507 | #else | |
508 | lowmem_pages = 0; | |
509 | for (i = 0; i < MAX_NUMNODES; ++i) { | |
510 | int pages = node_end_pfn[i] - node_start_pfn[i]; | |
511 | lowmem_pages += pages; | |
512 | if (pages) | |
513 | high_memory = pfn_to_kaddr(node_end_pfn[i]); | |
514 | } | |
0707ad30 | 515 | pr_notice("%ldMB memory available.\n", |
867e359b CM |
516 | pages_to_mb(lowmem_pages)); |
517 | #endif | |
518 | #endif | |
519 | } | |
520 | ||
521 | static void __init setup_bootmem_allocator(void) | |
522 | { | |
523 | unsigned long bootmap_size, first_alloc_pfn, last_alloc_pfn; | |
524 | ||
525 | /* Provide a node 0 bdata. */ | |
526 | NODE_DATA(0)->bdata = &node0_bdata; | |
527 | ||
528 | #ifdef CONFIG_PCI | |
529 | /* Don't let boot memory alias the PCI region. */ | |
530 | last_alloc_pfn = min(max_low_pfn, pci_reserve_start_pfn); | |
531 | #else | |
532 | last_alloc_pfn = max_low_pfn; | |
533 | #endif | |
534 | ||
535 | /* | |
536 | * Initialize the boot-time allocator (with low memory only): | |
537 | * The first argument says where to put the bitmap, and the | |
538 | * second says where the end of allocatable memory is. | |
539 | */ | |
540 | bootmap_size = init_bootmem(min_low_pfn, last_alloc_pfn); | |
541 | ||
542 | /* | |
543 | * Let the bootmem allocator use all the space we've given it | |
544 | * except for its own bitmap. | |
545 | */ | |
546 | first_alloc_pfn = min_low_pfn + PFN_UP(bootmap_size); | |
547 | if (first_alloc_pfn >= last_alloc_pfn) | |
548 | early_panic("Not enough memory on controller 0 for bootmem\n"); | |
549 | ||
550 | free_bootmem(PFN_PHYS(first_alloc_pfn), | |
551 | PFN_PHYS(last_alloc_pfn - first_alloc_pfn)); | |
552 | ||
553 | #ifdef CONFIG_KEXEC | |
554 | if (crashk_res.start != crashk_res.end) | |
28f65c11 | 555 | reserve_bootmem(crashk_res.start, resource_size(&crashk_res), 0); |
867e359b | 556 | #endif |
867e359b CM |
557 | } |
558 | ||
559 | void *__init alloc_remap(int nid, unsigned long size) | |
560 | { | |
561 | int pages = node_end_pfn[nid] - node_start_pfn[nid]; | |
562 | void *map = pfn_to_kaddr(node_memmap_pfn[nid]); | |
563 | BUG_ON(size != pages * sizeof(struct page)); | |
564 | memset(map, 0, size); | |
565 | return map; | |
566 | } | |
567 | ||
568 | static int __init percpu_size(void) | |
569 | { | |
76c567fb CM |
570 | int size = __per_cpu_end - __per_cpu_start; |
571 | size += PERCPU_MODULE_RESERVE; | |
572 | size += PERCPU_DYNAMIC_EARLY_SIZE; | |
573 | if (size < PCPU_MIN_UNIT_SIZE) | |
574 | size = PCPU_MIN_UNIT_SIZE; | |
575 | size = roundup(size, PAGE_SIZE); | |
576 | ||
867e359b CM |
577 | /* In several places we assume the per-cpu data fits on a huge page. */ |
578 | BUG_ON(kdata_huge && size > HPAGE_SIZE); | |
579 | return size; | |
580 | } | |
581 | ||
582 | static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal) | |
583 | { | |
584 | void *kva = __alloc_bootmem(size, PAGE_SIZE, goal); | |
585 | unsigned long pfn = kaddr_to_pfn(kva); | |
586 | BUG_ON(goal && PFN_PHYS(pfn) != goal); | |
587 | return pfn; | |
588 | } | |
589 | ||
590 | static void __init zone_sizes_init(void) | |
591 | { | |
592 | unsigned long zones_size[MAX_NR_ZONES] = { 0 }; | |
867e359b CM |
593 | int size = percpu_size(); |
594 | int num_cpus = smp_height * smp_width; | |
595 | int i; | |
596 | ||
597 | for (i = 0; i < num_cpus; ++i) | |
598 | node_percpu[cpu_to_node(i)] += size; | |
599 | ||
600 | for_each_online_node(i) { | |
601 | unsigned long start = node_start_pfn[i]; | |
602 | unsigned long end = node_end_pfn[i]; | |
603 | #ifdef CONFIG_HIGHMEM | |
604 | unsigned long lowmem_end = node_lowmem_end_pfn[i]; | |
605 | #else | |
606 | unsigned long lowmem_end = end; | |
607 | #endif | |
608 | int memmap_size = (end - start) * sizeof(struct page); | |
609 | node_free_pfn[i] = start; | |
610 | ||
611 | /* | |
612 | * Set aside pages for per-cpu data and the mem_map array. | |
613 | * | |
614 | * Since the per-cpu data requires special homecaching, | |
615 | * if we are in kdata_huge mode, we put it at the end of | |
616 | * the lowmem region. If we're not in kdata_huge mode, | |
617 | * we take the per-cpu pages from the bottom of the | |
618 | * controller, since that avoids fragmenting a huge page | |
619 | * that users might want. We always take the memmap | |
620 | * from the bottom of the controller, since with | |
621 | * kdata_huge that lets it be under a huge TLB entry. | |
622 | * | |
623 | * If the user has requested isolnodes for a controller, | |
624 | * though, there'll be no lowmem, so we just alloc_bootmem | |
625 | * the memmap. There will be no percpu memory either. | |
626 | */ | |
627 | if (__pfn_to_highbits(start) == 0) { | |
628 | /* In low PAs, allocate via bootmem. */ | |
629 | unsigned long goal = 0; | |
630 | node_memmap_pfn[i] = | |
631 | alloc_bootmem_pfn(memmap_size, goal); | |
632 | if (kdata_huge) | |
633 | goal = PFN_PHYS(lowmem_end) - node_percpu[i]; | |
634 | if (node_percpu[i]) | |
635 | node_percpu_pfn[i] = | |
636 | alloc_bootmem_pfn(node_percpu[i], goal); | |
637 | } else if (cpu_isset(i, isolnodes)) { | |
638 | node_memmap_pfn[i] = alloc_bootmem_pfn(memmap_size, 0); | |
639 | BUG_ON(node_percpu[i] != 0); | |
640 | } else { | |
641 | /* In high PAs, just reserve some pages. */ | |
642 | node_memmap_pfn[i] = node_free_pfn[i]; | |
643 | node_free_pfn[i] += PFN_UP(memmap_size); | |
644 | if (!kdata_huge) { | |
645 | node_percpu_pfn[i] = node_free_pfn[i]; | |
646 | node_free_pfn[i] += PFN_UP(node_percpu[i]); | |
647 | } else { | |
648 | node_percpu_pfn[i] = | |
649 | lowmem_end - PFN_UP(node_percpu[i]); | |
650 | } | |
651 | } | |
652 | ||
653 | #ifdef CONFIG_HIGHMEM | |
654 | if (start > lowmem_end) { | |
655 | zones_size[ZONE_NORMAL] = 0; | |
656 | zones_size[ZONE_HIGHMEM] = end - start; | |
657 | } else { | |
658 | zones_size[ZONE_NORMAL] = lowmem_end - start; | |
659 | zones_size[ZONE_HIGHMEM] = end - lowmem_end; | |
660 | } | |
661 | #else | |
662 | zones_size[ZONE_NORMAL] = end - start; | |
663 | #endif | |
664 | ||
665 | /* | |
666 | * Everyone shares node 0's bootmem allocator, but | |
667 | * we use alloc_remap(), above, to put the actual | |
668 | * struct page array on the individual controllers, | |
669 | * which is most of the data that we actually care about. | |
670 | * We can't place bootmem allocators on the other | |
671 | * controllers since the bootmem allocator can only | |
672 | * operate on 32-bit physical addresses. | |
673 | */ | |
674 | NODE_DATA(i)->bdata = NODE_DATA(0)->bdata; | |
675 | ||
676 | free_area_init_node(i, zones_size, start, NULL); | |
76c567fb | 677 | printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n", |
867e359b CM |
678 | PFN_UP(node_percpu[i])); |
679 | ||
680 | /* Track the type of memory on each node */ | |
681 | if (zones_size[ZONE_NORMAL]) | |
682 | node_set_state(i, N_NORMAL_MEMORY); | |
683 | #ifdef CONFIG_HIGHMEM | |
684 | if (end != start) | |
685 | node_set_state(i, N_HIGH_MEMORY); | |
686 | #endif | |
687 | ||
688 | node_set_online(i); | |
689 | } | |
690 | } | |
691 | ||
692 | #ifdef CONFIG_NUMA | |
693 | ||
694 | /* which logical CPUs are on which nodes */ | |
695 | struct cpumask node_2_cpu_mask[MAX_NUMNODES] __write_once; | |
696 | EXPORT_SYMBOL(node_2_cpu_mask); | |
697 | ||
698 | /* which node each logical CPU is on */ | |
699 | char cpu_2_node[NR_CPUS] __write_once __attribute__((aligned(L2_CACHE_BYTES))); | |
700 | EXPORT_SYMBOL(cpu_2_node); | |
701 | ||
702 | /* Return cpu_to_node() except for cpus not yet assigned, which return -1 */ | |
703 | static int __init cpu_to_bound_node(int cpu, struct cpumask* unbound_cpus) | |
704 | { | |
705 | if (!cpu_possible(cpu) || cpumask_test_cpu(cpu, unbound_cpus)) | |
706 | return -1; | |
707 | else | |
708 | return cpu_to_node(cpu); | |
709 | } | |
710 | ||
711 | /* Return number of immediately-adjacent tiles sharing the same NUMA node. */ | |
712 | static int __init node_neighbors(int node, int cpu, | |
713 | struct cpumask *unbound_cpus) | |
714 | { | |
715 | int neighbors = 0; | |
716 | int w = smp_width; | |
717 | int h = smp_height; | |
718 | int x = cpu % w; | |
719 | int y = cpu / w; | |
720 | if (x > 0 && cpu_to_bound_node(cpu-1, unbound_cpus) == node) | |
721 | ++neighbors; | |
722 | if (x < w-1 && cpu_to_bound_node(cpu+1, unbound_cpus) == node) | |
723 | ++neighbors; | |
724 | if (y > 0 && cpu_to_bound_node(cpu-w, unbound_cpus) == node) | |
725 | ++neighbors; | |
726 | if (y < h-1 && cpu_to_bound_node(cpu+w, unbound_cpus) == node) | |
727 | ++neighbors; | |
728 | return neighbors; | |
729 | } | |
730 | ||
731 | static void __init setup_numa_mapping(void) | |
732 | { | |
733 | int distance[MAX_NUMNODES][NR_CPUS]; | |
734 | HV_Coord coord; | |
735 | int cpu, node, cpus, i, x, y; | |
736 | int num_nodes = num_online_nodes(); | |
737 | struct cpumask unbound_cpus; | |
738 | nodemask_t default_nodes; | |
739 | ||
740 | cpumask_clear(&unbound_cpus); | |
741 | ||
742 | /* Get set of nodes we will use for defaults */ | |
743 | nodes_andnot(default_nodes, node_online_map, isolnodes); | |
744 | if (nodes_empty(default_nodes)) { | |
745 | BUG_ON(!node_isset(0, node_online_map)); | |
0707ad30 | 746 | pr_err("Forcing NUMA node zero available as a default node\n"); |
867e359b CM |
747 | node_set(0, default_nodes); |
748 | } | |
749 | ||
750 | /* Populate the distance[] array */ | |
751 | memset(distance, -1, sizeof(distance)); | |
752 | cpu = 0; | |
753 | for (coord.y = 0; coord.y < smp_height; ++coord.y) { | |
754 | for (coord.x = 0; coord.x < smp_width; | |
755 | ++coord.x, ++cpu) { | |
756 | BUG_ON(cpu >= nr_cpu_ids); | |
757 | if (!cpu_possible(cpu)) { | |
758 | cpu_2_node[cpu] = -1; | |
759 | continue; | |
760 | } | |
761 | for_each_node_mask(node, default_nodes) { | |
762 | HV_MemoryControllerInfo info = | |
763 | hv_inquire_memory_controller( | |
764 | coord, node_controller[node]); | |
765 | distance[node][cpu] = | |
766 | ABS(info.coord.x) + ABS(info.coord.y); | |
767 | } | |
768 | cpumask_set_cpu(cpu, &unbound_cpus); | |
769 | } | |
770 | } | |
771 | cpus = cpu; | |
772 | ||
773 | /* | |
774 | * Round-robin through the NUMA nodes until all the cpus are | |
775 | * assigned. We could be more clever here (e.g. create four | |
776 | * sorted linked lists on the same set of cpu nodes, and pull | |
777 | * off them in round-robin sequence, removing from all four | |
778 | * lists each time) but given the relatively small numbers | |
779 | * involved, O(n^2) seem OK for a one-time cost. | |
780 | */ | |
781 | node = first_node(default_nodes); | |
782 | while (!cpumask_empty(&unbound_cpus)) { | |
783 | int best_cpu = -1; | |
784 | int best_distance = INT_MAX; | |
785 | for (cpu = 0; cpu < cpus; ++cpu) { | |
786 | if (cpumask_test_cpu(cpu, &unbound_cpus)) { | |
787 | /* | |
788 | * Compute metric, which is how much | |
789 | * closer the cpu is to this memory | |
790 | * controller than the others, shifted | |
791 | * up, and then the number of | |
792 | * neighbors already in the node as an | |
793 | * epsilon adjustment to try to keep | |
794 | * the nodes compact. | |
795 | */ | |
796 | int d = distance[node][cpu] * num_nodes; | |
797 | for_each_node_mask(i, default_nodes) { | |
798 | if (i != node) | |
799 | d -= distance[i][cpu]; | |
800 | } | |
801 | d *= 8; /* allow space for epsilon */ | |
802 | d -= node_neighbors(node, cpu, &unbound_cpus); | |
803 | if (d < best_distance) { | |
804 | best_cpu = cpu; | |
805 | best_distance = d; | |
806 | } | |
807 | } | |
808 | } | |
809 | BUG_ON(best_cpu < 0); | |
810 | cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]); | |
811 | cpu_2_node[best_cpu] = node; | |
812 | cpumask_clear_cpu(best_cpu, &unbound_cpus); | |
813 | node = next_node(node, default_nodes); | |
814 | if (node == MAX_NUMNODES) | |
815 | node = first_node(default_nodes); | |
816 | } | |
817 | ||
818 | /* Print out node assignments and set defaults for disabled cpus */ | |
819 | cpu = 0; | |
820 | for (y = 0; y < smp_height; ++y) { | |
821 | printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y); | |
822 | for (x = 0; x < smp_width; ++x, ++cpu) { | |
823 | if (cpu_to_node(cpu) < 0) { | |
0707ad30 | 824 | pr_cont(" -"); |
867e359b CM |
825 | cpu_2_node[cpu] = first_node(default_nodes); |
826 | } else { | |
0707ad30 | 827 | pr_cont(" %d", cpu_to_node(cpu)); |
867e359b CM |
828 | } |
829 | } | |
0707ad30 | 830 | pr_cont("\n"); |
867e359b CM |
831 | } |
832 | } | |
833 | ||
834 | static struct cpu cpu_devices[NR_CPUS]; | |
835 | ||
836 | static int __init topology_init(void) | |
837 | { | |
838 | int i; | |
839 | ||
840 | for_each_online_node(i) | |
841 | register_one_node(i); | |
842 | ||
4d658d13 | 843 | for (i = 0; i < smp_height * smp_width; ++i) |
867e359b CM |
844 | register_cpu(&cpu_devices[i], i); |
845 | ||
846 | return 0; | |
847 | } | |
848 | ||
849 | subsys_initcall(topology_init); | |
850 | ||
851 | #else /* !CONFIG_NUMA */ | |
852 | ||
853 | #define setup_numa_mapping() do { } while (0) | |
854 | ||
855 | #endif /* CONFIG_NUMA */ | |
856 | ||
857 | /** | |
0707ad30 CM |
858 | * setup_cpu() - Do all necessary per-cpu, tile-specific initialization. |
859 | * @boot: Is this the boot cpu? | |
867e359b | 860 | * |
0707ad30 | 861 | * Called from setup_arch() on the boot cpu, or online_secondary(). |
867e359b | 862 | */ |
0707ad30 | 863 | void __cpuinit setup_cpu(int boot) |
867e359b | 864 | { |
0707ad30 CM |
865 | /* The boot cpu sets up its permanent mappings much earlier. */ |
866 | if (!boot) | |
867 | store_permanent_mappings(); | |
868 | ||
867e359b CM |
869 | /* Allow asynchronous TLB interrupts. */ |
870 | #if CHIP_HAS_TILE_DMA() | |
5d966115 CM |
871 | arch_local_irq_unmask(INT_DMATLB_MISS); |
872 | arch_local_irq_unmask(INT_DMATLB_ACCESS); | |
867e359b CM |
873 | #endif |
874 | #if CHIP_HAS_SN_PROC() | |
5d966115 | 875 | arch_local_irq_unmask(INT_SNITLB_MISS); |
867e359b | 876 | #endif |
a78c942d | 877 | #ifdef __tilegx__ |
5d966115 | 878 | arch_local_irq_unmask(INT_SINGLE_STEP_K); |
a78c942d | 879 | #endif |
867e359b CM |
880 | |
881 | /* | |
882 | * Allow user access to many generic SPRs, like the cycle | |
883 | * counter, PASS/FAIL/DONE, INTERRUPT_CRITICAL_SECTION, etc. | |
884 | */ | |
885 | __insn_mtspr(SPR_MPL_WORLD_ACCESS_SET_0, 1); | |
886 | ||
887 | #if CHIP_HAS_SN() | |
888 | /* Static network is not restricted. */ | |
889 | __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1); | |
890 | #endif | |
891 | #if CHIP_HAS_SN_PROC() | |
892 | __insn_mtspr(SPR_MPL_SN_NOTIFY_SET_0, 1); | |
893 | __insn_mtspr(SPR_MPL_SN_CPL_SET_0, 1); | |
894 | #endif | |
895 | ||
896 | /* | |
a78c942d CM |
897 | * Set the MPL for interrupt control 0 & 1 to the corresponding |
898 | * values. This includes access to the SYSTEM_SAVE and EX_CONTEXT | |
899 | * SPRs, as well as the interrupt mask. | |
867e359b CM |
900 | */ |
901 | __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1); | |
a78c942d | 902 | __insn_mtspr(SPR_MPL_INTCTRL_1_SET_1, 1); |
0707ad30 CM |
903 | |
904 | /* Initialize IRQ support for this cpu. */ | |
905 | setup_irq_regs(); | |
906 | ||
907 | #ifdef CONFIG_HARDWALL | |
908 | /* Reset the network state on this cpu. */ | |
909 | reset_network_state(); | |
910 | #endif | |
867e359b CM |
911 | } |
912 | ||
43d9ebba CM |
913 | #ifdef CONFIG_BLK_DEV_INITRD |
914 | ||
51bcdf88 CM |
915 | /* |
916 | * Note that the kernel can potentially support other compression | |
917 | * techniques than gz, though we don't do so by default. If we ever | |
918 | * decide to do so we can either look for other filename extensions, | |
919 | * or just allow a file with this name to be compressed with an | |
920 | * arbitrary compressor (somewhat counterintuitively). | |
921 | */ | |
867e359b CM |
922 | static int __initdata set_initramfs_file; |
923 | static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; | |
924 | ||
925 | static int __init setup_initramfs_file(char *str) | |
926 | { | |
927 | if (str == NULL) | |
928 | return -EINVAL; | |
929 | strncpy(initramfs_file, str, sizeof(initramfs_file) - 1); | |
930 | set_initramfs_file = 1; | |
931 | ||
932 | return 0; | |
933 | } | |
934 | early_param("initramfs_file", setup_initramfs_file); | |
935 | ||
936 | /* | |
51bcdf88 | 937 | * We look for an "initramfs.cpio.gz" file in the hvfs. |
867e359b | 938 | * If there is one, we allocate some memory for it and it will be |
51bcdf88 | 939 | * unpacked to the initramfs. |
867e359b CM |
940 | */ |
941 | static void __init load_hv_initrd(void) | |
942 | { | |
943 | HV_FS_StatInfo stat; | |
944 | int fd, rc; | |
945 | void *initrd; | |
946 | ||
947 | fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); | |
948 | if (fd == HV_ENOENT) { | |
949 | if (set_initramfs_file) | |
0707ad30 CM |
950 | pr_warning("No such hvfs initramfs file '%s'\n", |
951 | initramfs_file); | |
867e359b CM |
952 | return; |
953 | } | |
954 | BUG_ON(fd < 0); | |
955 | stat = hv_fs_fstat(fd); | |
956 | BUG_ON(stat.size < 0); | |
957 | if (stat.flags & HV_FS_ISDIR) { | |
0707ad30 CM |
958 | pr_warning("Ignoring hvfs file '%s': it's a directory.\n", |
959 | initramfs_file); | |
867e359b CM |
960 | return; |
961 | } | |
962 | initrd = alloc_bootmem_pages(stat.size); | |
963 | rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0); | |
964 | if (rc != stat.size) { | |
0707ad30 | 965 | pr_err("Error reading %d bytes from hvfs file '%s': %d\n", |
867e359b | 966 | stat.size, initramfs_file, rc); |
bc63de7c | 967 | free_initrd_mem((unsigned long) initrd, stat.size); |
867e359b CM |
968 | return; |
969 | } | |
970 | initrd_start = (unsigned long) initrd; | |
971 | initrd_end = initrd_start + stat.size; | |
972 | } | |
973 | ||
974 | void __init free_initrd_mem(unsigned long begin, unsigned long end) | |
975 | { | |
bc63de7c | 976 | free_bootmem(__pa(begin), end - begin); |
867e359b CM |
977 | } |
978 | ||
43d9ebba CM |
979 | #else |
980 | static inline void load_hv_initrd(void) {} | |
981 | #endif /* CONFIG_BLK_DEV_INITRD */ | |
982 | ||
867e359b CM |
983 | static void __init validate_hv(void) |
984 | { | |
985 | /* | |
986 | * It may already be too late, but let's check our built-in | |
987 | * configuration against what the hypervisor is providing. | |
988 | */ | |
989 | unsigned long glue_size = hv_sysconf(HV_SYSCONF_GLUE_SIZE); | |
990 | int hv_page_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL); | |
991 | int hv_hpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE); | |
992 | HV_ASIDRange asid_range; | |
993 | ||
994 | #ifndef CONFIG_SMP | |
995 | HV_Topology topology = hv_inquire_topology(); | |
996 | BUG_ON(topology.coord.x != 0 || topology.coord.y != 0); | |
997 | if (topology.width != 1 || topology.height != 1) { | |
0707ad30 CM |
998 | pr_warning("Warning: booting UP kernel on %dx%d grid;" |
999 | " will ignore all but first tile.\n", | |
1000 | topology.width, topology.height); | |
867e359b CM |
1001 | } |
1002 | #endif | |
1003 | ||
1004 | if (PAGE_OFFSET + HV_GLUE_START_CPA + glue_size > (unsigned long)_text) | |
1005 | early_panic("Hypervisor glue size %ld is too big!\n", | |
1006 | glue_size); | |
1007 | if (hv_page_size != PAGE_SIZE) | |
1008 | early_panic("Hypervisor page size %#x != our %#lx\n", | |
1009 | hv_page_size, PAGE_SIZE); | |
1010 | if (hv_hpage_size != HPAGE_SIZE) | |
1011 | early_panic("Hypervisor huge page size %#x != our %#lx\n", | |
1012 | hv_hpage_size, HPAGE_SIZE); | |
1013 | ||
1014 | #ifdef CONFIG_SMP | |
1015 | /* | |
1016 | * Some hypervisor APIs take a pointer to a bitmap array | |
1017 | * whose size is at least the number of cpus on the chip. | |
1018 | * We use a struct cpumask for this, so it must be big enough. | |
1019 | */ | |
1020 | if ((smp_height * smp_width) > nr_cpu_ids) | |
1021 | early_panic("Hypervisor %d x %d grid too big for Linux" | |
1022 | " NR_CPUS %d\n", smp_height, smp_width, | |
1023 | nr_cpu_ids); | |
1024 | #endif | |
1025 | ||
1026 | /* | |
1027 | * Check that we're using allowed ASIDs, and initialize the | |
1028 | * various asid variables to their appropriate initial states. | |
1029 | */ | |
1030 | asid_range = hv_inquire_asid(0); | |
1031 | __get_cpu_var(current_asid) = min_asid = asid_range.start; | |
1032 | max_asid = asid_range.start + asid_range.size - 1; | |
1033 | ||
1034 | if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model, | |
1035 | sizeof(chip_model)) < 0) { | |
0707ad30 | 1036 | pr_err("Warning: HV_CONFSTR_CHIP_MODEL not available\n"); |
867e359b CM |
1037 | strlcpy(chip_model, "unknown", sizeof(chip_model)); |
1038 | } | |
1039 | } | |
1040 | ||
1041 | static void __init validate_va(void) | |
1042 | { | |
1043 | #ifndef __tilegx__ /* FIXME: GX: probably some validation relevant here */ | |
1044 | /* | |
1045 | * Similarly, make sure we're only using allowed VAs. | |
1046 | * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_INTRPT, | |
1047 | * and 0 .. KERNEL_HIGH_VADDR. | |
1048 | * In addition, make sure we CAN'T use the end of memory, since | |
1049 | * we use the last chunk of each pgd for the pgd_list. | |
1050 | */ | |
a78c942d | 1051 | int i, user_kernel_ok = 0; |
867e359b CM |
1052 | unsigned long max_va = 0; |
1053 | unsigned long list_va = | |
1054 | ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT); | |
1055 | ||
1056 | for (i = 0; ; ++i) { | |
1057 | HV_VirtAddrRange range = hv_inquire_virtual(i); | |
1058 | if (range.size == 0) | |
1059 | break; | |
1060 | if (range.start <= MEM_USER_INTRPT && | |
1061 | range.start + range.size >= MEM_HV_INTRPT) | |
a78c942d | 1062 | user_kernel_ok = 1; |
867e359b CM |
1063 | if (range.start == 0) |
1064 | max_va = range.size; | |
1065 | BUG_ON(range.start + range.size > list_va); | |
1066 | } | |
a78c942d CM |
1067 | if (!user_kernel_ok) |
1068 | early_panic("Hypervisor not configured for user/kernel VAs\n"); | |
867e359b CM |
1069 | if (max_va == 0) |
1070 | early_panic("Hypervisor not configured for low VAs\n"); | |
1071 | if (max_va < KERNEL_HIGH_VADDR) | |
1072 | early_panic("Hypervisor max VA %#lx smaller than %#lx\n", | |
1073 | max_va, KERNEL_HIGH_VADDR); | |
1074 | ||
1075 | /* Kernel PCs must have their high bit set; see intvec.S. */ | |
1076 | if ((long)VMALLOC_START >= 0) | |
1077 | early_panic( | |
1078 | "Linux VMALLOC region below the 2GB line (%#lx)!\n" | |
1079 | "Reconfigure the kernel with fewer NR_HUGE_VMAPS\n" | |
1080 | "or smaller VMALLOC_RESERVE.\n", | |
1081 | VMALLOC_START); | |
1082 | #endif | |
1083 | } | |
1084 | ||
1085 | /* | |
1086 | * cpu_lotar_map lists all the cpus that are valid for the supervisor | |
1087 | * to cache data on at a page level, i.e. what cpus can be placed in | |
1088 | * the LOTAR field of a PTE. It is equivalent to the set of possible | |
1089 | * cpus plus any other cpus that are willing to share their cache. | |
1090 | * It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR). | |
1091 | */ | |
1092 | struct cpumask __write_once cpu_lotar_map; | |
1093 | EXPORT_SYMBOL(cpu_lotar_map); | |
1094 | ||
1095 | #if CHIP_HAS_CBOX_HOME_MAP() | |
1096 | /* | |
1097 | * hash_for_home_map lists all the tiles that hash-for-home data | |
1098 | * will be cached on. Note that this may includes tiles that are not | |
1099 | * valid for this supervisor to use otherwise (e.g. if a hypervisor | |
1100 | * device is being shared between multiple supervisors). | |
1101 | * It is set by hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE). | |
1102 | */ | |
1103 | struct cpumask hash_for_home_map; | |
1104 | EXPORT_SYMBOL(hash_for_home_map); | |
1105 | #endif | |
1106 | ||
1107 | /* | |
1108 | * cpu_cacheable_map lists all the cpus whose caches the hypervisor can | |
5f054e31 | 1109 | * flush on our behalf. It is set to cpu_possible_mask OR'ed with |
867e359b CM |
1110 | * hash_for_home_map, and it is what should be passed to |
1111 | * hv_flush_remote() to flush all caches. Note that if there are | |
1112 | * dedicated hypervisor driver tiles that have authorized use of their | |
1113 | * cache, those tiles will only appear in cpu_lotar_map, NOT in | |
1114 | * cpu_cacheable_map, as they are a special case. | |
1115 | */ | |
1116 | struct cpumask __write_once cpu_cacheable_map; | |
1117 | EXPORT_SYMBOL(cpu_cacheable_map); | |
1118 | ||
1119 | static __initdata struct cpumask disabled_map; | |
1120 | ||
1121 | static int __init disabled_cpus(char *str) | |
1122 | { | |
1123 | int boot_cpu = smp_processor_id(); | |
1124 | ||
1125 | if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0) | |
1126 | return -EINVAL; | |
1127 | if (cpumask_test_cpu(boot_cpu, &disabled_map)) { | |
0707ad30 | 1128 | pr_err("disabled_cpus: can't disable boot cpu %d\n", boot_cpu); |
867e359b CM |
1129 | cpumask_clear_cpu(boot_cpu, &disabled_map); |
1130 | } | |
1131 | return 0; | |
1132 | } | |
1133 | ||
1134 | early_param("disabled_cpus", disabled_cpus); | |
1135 | ||
0707ad30 | 1136 | void __init print_disabled_cpus(void) |
867e359b CM |
1137 | { |
1138 | if (!cpumask_empty(&disabled_map)) { | |
1139 | char buf[100]; | |
1140 | cpulist_scnprintf(buf, sizeof(buf), &disabled_map); | |
0707ad30 | 1141 | pr_info("CPUs not available for Linux: %s\n", buf); |
867e359b CM |
1142 | } |
1143 | } | |
1144 | ||
1145 | static void __init setup_cpu_maps(void) | |
1146 | { | |
1147 | struct cpumask hv_disabled_map, cpu_possible_init; | |
1148 | int boot_cpu = smp_processor_id(); | |
1149 | int cpus, i, rc; | |
1150 | ||
1151 | /* Learn which cpus are allowed by the hypervisor. */ | |
1152 | rc = hv_inquire_tiles(HV_INQ_TILES_AVAIL, | |
1153 | (HV_VirtAddr) cpumask_bits(&cpu_possible_init), | |
1154 | sizeof(cpu_cacheable_map)); | |
1155 | if (rc < 0) | |
1156 | early_panic("hv_inquire_tiles(AVAIL) failed: rc %d\n", rc); | |
1157 | if (!cpumask_test_cpu(boot_cpu, &cpu_possible_init)) | |
1158 | early_panic("Boot CPU %d disabled by hypervisor!\n", boot_cpu); | |
1159 | ||
1160 | /* Compute the cpus disabled by the hvconfig file. */ | |
1161 | cpumask_complement(&hv_disabled_map, &cpu_possible_init); | |
1162 | ||
1163 | /* Include them with the cpus disabled by "disabled_cpus". */ | |
1164 | cpumask_or(&disabled_map, &disabled_map, &hv_disabled_map); | |
1165 | ||
1166 | /* | |
1167 | * Disable every cpu after "setup_max_cpus". But don't mark | |
1168 | * as disabled the cpus that are outside of our initial rectangle, | |
1169 | * since that turns out to be confusing. | |
1170 | */ | |
1171 | cpus = 1; /* this cpu */ | |
1172 | cpumask_set_cpu(boot_cpu, &disabled_map); /* ignore this cpu */ | |
1173 | for (i = 0; cpus < setup_max_cpus; ++i) | |
1174 | if (!cpumask_test_cpu(i, &disabled_map)) | |
1175 | ++cpus; | |
1176 | for (; i < smp_height * smp_width; ++i) | |
1177 | cpumask_set_cpu(i, &disabled_map); | |
1178 | cpumask_clear_cpu(boot_cpu, &disabled_map); /* reset this cpu */ | |
1179 | for (i = smp_height * smp_width; i < NR_CPUS; ++i) | |
1180 | cpumask_clear_cpu(i, &disabled_map); | |
1181 | ||
1182 | /* | |
1183 | * Setup cpu_possible map as every cpu allocated to us, minus | |
1184 | * the results of any "disabled_cpus" settings. | |
1185 | */ | |
1186 | cpumask_andnot(&cpu_possible_init, &cpu_possible_init, &disabled_map); | |
1187 | init_cpu_possible(&cpu_possible_init); | |
1188 | ||
1189 | /* Learn which cpus are valid for LOTAR caching. */ | |
1190 | rc = hv_inquire_tiles(HV_INQ_TILES_LOTAR, | |
1191 | (HV_VirtAddr) cpumask_bits(&cpu_lotar_map), | |
1192 | sizeof(cpu_lotar_map)); | |
1193 | if (rc < 0) { | |
0707ad30 | 1194 | pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n"); |
0b5f9c00 | 1195 | cpu_lotar_map = *cpu_possible_mask; |
867e359b CM |
1196 | } |
1197 | ||
1198 | #if CHIP_HAS_CBOX_HOME_MAP() | |
1199 | /* Retrieve set of CPUs used for hash-for-home caching */ | |
1200 | rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE, | |
1201 | (HV_VirtAddr) hash_for_home_map.bits, | |
1202 | sizeof(hash_for_home_map)); | |
1203 | if (rc < 0) | |
1204 | early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc); | |
0b5f9c00 | 1205 | cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map); |
867e359b | 1206 | #else |
0b5f9c00 | 1207 | cpu_cacheable_map = *cpu_possible_mask; |
867e359b CM |
1208 | #endif |
1209 | } | |
1210 | ||
1211 | ||
1212 | static int __init dataplane(char *str) | |
1213 | { | |
0707ad30 | 1214 | pr_warning("WARNING: dataplane support disabled in this kernel\n"); |
867e359b CM |
1215 | return 0; |
1216 | } | |
1217 | ||
1218 | early_param("dataplane", dataplane); | |
1219 | ||
1220 | #ifdef CONFIG_CMDLINE_BOOL | |
1221 | static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; | |
1222 | #endif | |
1223 | ||
1224 | void __init setup_arch(char **cmdline_p) | |
1225 | { | |
1226 | int len; | |
1227 | ||
1228 | #if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE) | |
1229 | len = hv_get_command_line((HV_VirtAddr) boot_command_line, | |
1230 | COMMAND_LINE_SIZE); | |
1231 | if (boot_command_line[0]) | |
0707ad30 CM |
1232 | pr_warning("WARNING: ignoring dynamic command line \"%s\"\n", |
1233 | boot_command_line); | |
867e359b CM |
1234 | strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); |
1235 | #else | |
1236 | char *hv_cmdline; | |
1237 | #if defined(CONFIG_CMDLINE_BOOL) | |
1238 | if (builtin_cmdline[0]) { | |
1239 | int builtin_len = strlcpy(boot_command_line, builtin_cmdline, | |
1240 | COMMAND_LINE_SIZE); | |
1241 | if (builtin_len < COMMAND_LINE_SIZE-1) | |
1242 | boot_command_line[builtin_len++] = ' '; | |
1243 | hv_cmdline = &boot_command_line[builtin_len]; | |
1244 | len = COMMAND_LINE_SIZE - builtin_len; | |
1245 | } else | |
1246 | #endif | |
1247 | { | |
1248 | hv_cmdline = boot_command_line; | |
1249 | len = COMMAND_LINE_SIZE; | |
1250 | } | |
1251 | len = hv_get_command_line((HV_VirtAddr) hv_cmdline, len); | |
1252 | if (len < 0 || len > COMMAND_LINE_SIZE) | |
1253 | early_panic("hv_get_command_line failed: %d\n", len); | |
1254 | #endif | |
1255 | ||
1256 | *cmdline_p = boot_command_line; | |
1257 | ||
1258 | /* Set disabled_map and setup_max_cpus very early */ | |
1259 | parse_early_param(); | |
1260 | ||
1261 | /* Make sure the kernel is compatible with the hypervisor. */ | |
1262 | validate_hv(); | |
1263 | validate_va(); | |
1264 | ||
1265 | setup_cpu_maps(); | |
1266 | ||
1267 | ||
1268 | #ifdef CONFIG_PCI | |
1269 | /* | |
1270 | * Initialize the PCI structures. This is done before memory | |
1271 | * setup so that we know whether or not a pci_reserve region | |
1272 | * is necessary. | |
1273 | */ | |
1274 | if (tile_pci_init() == 0) | |
1275 | pci_reserve_mb = 0; | |
1276 | ||
1277 | /* PCI systems reserve a region just below 4GB for mapping iomem. */ | |
1278 | pci_reserve_end_pfn = (1 << (32 - PAGE_SHIFT)); | |
1279 | pci_reserve_start_pfn = pci_reserve_end_pfn - | |
1280 | (pci_reserve_mb << (20 - PAGE_SHIFT)); | |
1281 | #endif | |
1282 | ||
1283 | init_mm.start_code = (unsigned long) _text; | |
1284 | init_mm.end_code = (unsigned long) _etext; | |
1285 | init_mm.end_data = (unsigned long) _edata; | |
1286 | init_mm.brk = (unsigned long) _end; | |
1287 | ||
1288 | setup_memory(); | |
1289 | store_permanent_mappings(); | |
1290 | setup_bootmem_allocator(); | |
1291 | ||
1292 | /* | |
1293 | * NOTE: before this point _nobody_ is allowed to allocate | |
1294 | * any memory using the bootmem allocator. | |
1295 | */ | |
1296 | ||
1297 | paging_init(); | |
1298 | setup_numa_mapping(); | |
1299 | zone_sizes_init(); | |
1300 | set_page_homes(); | |
0707ad30 | 1301 | setup_cpu(1); |
867e359b CM |
1302 | setup_clock(); |
1303 | load_hv_initrd(); | |
1304 | } | |
1305 | ||
1306 | ||
1307 | /* | |
1308 | * Set up per-cpu memory. | |
1309 | */ | |
1310 | ||
1311 | unsigned long __per_cpu_offset[NR_CPUS] __write_once; | |
1312 | EXPORT_SYMBOL(__per_cpu_offset); | |
1313 | ||
1314 | static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 }; | |
1315 | static unsigned long __initdata percpu_pfn[NR_CPUS] = { 0 }; | |
1316 | ||
1317 | /* | |
1318 | * As the percpu code allocates pages, we return the pages from the | |
1319 | * end of the node for the specified cpu. | |
1320 | */ | |
1321 | static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) | |
1322 | { | |
1323 | int nid = cpu_to_node(cpu); | |
1324 | unsigned long pfn = node_percpu_pfn[nid] + pfn_offset[nid]; | |
1325 | ||
1326 | BUG_ON(size % PAGE_SIZE != 0); | |
1327 | pfn_offset[nid] += size / PAGE_SIZE; | |
76c567fb CM |
1328 | BUG_ON(node_percpu[nid] < size); |
1329 | node_percpu[nid] -= size; | |
867e359b CM |
1330 | if (percpu_pfn[cpu] == 0) |
1331 | percpu_pfn[cpu] = pfn; | |
1332 | return pfn_to_kaddr(pfn); | |
1333 | } | |
1334 | ||
1335 | /* | |
1336 | * Pages reserved for percpu memory are not freeable, and in any case we are | |
1337 | * on a short path to panic() in setup_per_cpu_area() at this point anyway. | |
1338 | */ | |
1339 | static void __init pcpu_fc_free(void *ptr, size_t size) | |
1340 | { | |
1341 | } | |
1342 | ||
1343 | /* | |
1344 | * Set up vmalloc page tables using bootmem for the percpu code. | |
1345 | */ | |
1346 | static void __init pcpu_fc_populate_pte(unsigned long addr) | |
1347 | { | |
1348 | pgd_t *pgd; | |
1349 | pud_t *pud; | |
1350 | pmd_t *pmd; | |
1351 | pte_t *pte; | |
1352 | ||
1353 | BUG_ON(pgd_addr_invalid(addr)); | |
77d23303 CM |
1354 | if (addr < VMALLOC_START || addr >= VMALLOC_END) |
1355 | panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;" | |
1356 | " try increasing CONFIG_VMALLOC_RESERVE\n", | |
1357 | addr, VMALLOC_START, VMALLOC_END); | |
867e359b CM |
1358 | |
1359 | pgd = swapper_pg_dir + pgd_index(addr); | |
1360 | pud = pud_offset(pgd, addr); | |
1361 | BUG_ON(!pud_present(*pud)); | |
1362 | pmd = pmd_offset(pud, addr); | |
1363 | if (pmd_present(*pmd)) { | |
1364 | BUG_ON(pmd_huge_page(*pmd)); | |
1365 | } else { | |
1366 | pte = __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE, | |
1367 | HV_PAGE_TABLE_ALIGN, 0); | |
1368 | pmd_populate_kernel(&init_mm, pmd, pte); | |
1369 | } | |
1370 | } | |
1371 | ||
1372 | void __init setup_per_cpu_areas(void) | |
1373 | { | |
1374 | struct page *pg; | |
1375 | unsigned long delta, pfn, lowmem_va; | |
1376 | unsigned long size = percpu_size(); | |
1377 | char *ptr; | |
1378 | int rc, cpu, i; | |
1379 | ||
1380 | rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_fc_alloc, | |
1381 | pcpu_fc_free, pcpu_fc_populate_pte); | |
1382 | if (rc < 0) | |
1383 | panic("Cannot initialize percpu area (err=%d)", rc); | |
1384 | ||
1385 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | |
1386 | for_each_possible_cpu(cpu) { | |
1387 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; | |
1388 | ||
1389 | /* finv the copy out of cache so we can change homecache */ | |
1390 | ptr = pcpu_base_addr + pcpu_unit_offsets[cpu]; | |
1391 | __finv_buffer(ptr, size); | |
1392 | pfn = percpu_pfn[cpu]; | |
1393 | ||
1394 | /* Rewrite the page tables to cache on that cpu */ | |
1395 | pg = pfn_to_page(pfn); | |
1396 | for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) { | |
1397 | ||
1398 | /* Update the vmalloc mapping and page home. */ | |
1399 | pte_t *ptep = | |
1400 | virt_to_pte(NULL, (unsigned long)ptr + i); | |
1401 | pte_t pte = *ptep; | |
1402 | BUG_ON(pfn != pte_pfn(pte)); | |
1403 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); | |
1404 | pte = set_remote_cache_cpu(pte, cpu); | |
1405 | set_pte(ptep, pte); | |
1406 | ||
1407 | /* Update the lowmem mapping for consistency. */ | |
1408 | lowmem_va = (unsigned long)pfn_to_kaddr(pfn); | |
1409 | ptep = virt_to_pte(NULL, lowmem_va); | |
1410 | if (pte_huge(*ptep)) { | |
1411 | printk(KERN_DEBUG "early shatter of huge page" | |
1412 | " at %#lx\n", lowmem_va); | |
1413 | shatter_pmd((pmd_t *)ptep); | |
1414 | ptep = virt_to_pte(NULL, lowmem_va); | |
1415 | BUG_ON(pte_huge(*ptep)); | |
1416 | } | |
1417 | BUG_ON(pfn != pte_pfn(*ptep)); | |
1418 | set_pte(ptep, pte); | |
1419 | } | |
1420 | } | |
1421 | ||
1422 | /* Set our thread pointer appropriately. */ | |
1423 | set_my_cpu_offset(__per_cpu_offset[smp_processor_id()]); | |
1424 | ||
1425 | /* Make sure the finv's have completed. */ | |
1426 | mb_incoherent(); | |
1427 | ||
1428 | /* Flush the TLB so we reference it properly from here on out. */ | |
1429 | local_flush_tlb_all(); | |
1430 | } | |
1431 | ||
1432 | static struct resource data_resource = { | |
1433 | .name = "Kernel data", | |
1434 | .start = 0, | |
1435 | .end = 0, | |
1436 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | |
1437 | }; | |
1438 | ||
1439 | static struct resource code_resource = { | |
1440 | .name = "Kernel code", | |
1441 | .start = 0, | |
1442 | .end = 0, | |
1443 | .flags = IORESOURCE_BUSY | IORESOURCE_MEM | |
1444 | }; | |
1445 | ||
1446 | /* | |
1447 | * We reserve all resources above 4GB so that PCI won't try to put | |
1448 | * mappings above 4GB; the standard allows that for some devices but | |
1449 | * the probing code trunates values to 32 bits. | |
1450 | */ | |
1451 | #ifdef CONFIG_PCI | |
1452 | static struct resource* __init | |
1453 | insert_non_bus_resource(void) | |
1454 | { | |
1455 | struct resource *res = | |
1456 | kzalloc(sizeof(struct resource), GFP_ATOMIC); | |
1457 | res->name = "Non-Bus Physical Address Space"; | |
1458 | res->start = (1ULL << 32); | |
1459 | res->end = -1LL; | |
1460 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | |
1461 | if (insert_resource(&iomem_resource, res)) { | |
1462 | kfree(res); | |
1463 | return NULL; | |
1464 | } | |
1465 | return res; | |
1466 | } | |
1467 | #endif | |
1468 | ||
1469 | static struct resource* __init | |
1470 | insert_ram_resource(u64 start_pfn, u64 end_pfn) | |
1471 | { | |
1472 | struct resource *res = | |
1473 | kzalloc(sizeof(struct resource), GFP_ATOMIC); | |
1474 | res->name = "System RAM"; | |
1475 | res->start = start_pfn << PAGE_SHIFT; | |
1476 | res->end = (end_pfn << PAGE_SHIFT) - 1; | |
1477 | res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; | |
1478 | if (insert_resource(&iomem_resource, res)) { | |
1479 | kfree(res); | |
1480 | return NULL; | |
1481 | } | |
1482 | return res; | |
1483 | } | |
1484 | ||
1485 | /* | |
1486 | * Request address space for all standard resources | |
1487 | * | |
1488 | * If the system includes PCI root complex drivers, we need to create | |
1489 | * a window just below 4GB where PCI BARs can be mapped. | |
1490 | */ | |
1491 | static int __init request_standard_resources(void) | |
1492 | { | |
1493 | int i; | |
1494 | enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET }; | |
1495 | ||
1496 | iomem_resource.end = -1LL; | |
1497 | #ifdef CONFIG_PCI | |
1498 | insert_non_bus_resource(); | |
1499 | #endif | |
1500 | ||
1501 | for_each_online_node(i) { | |
1502 | u64 start_pfn = node_start_pfn[i]; | |
1503 | u64 end_pfn = node_end_pfn[i]; | |
1504 | ||
1505 | #ifdef CONFIG_PCI | |
1506 | if (start_pfn <= pci_reserve_start_pfn && | |
1507 | end_pfn > pci_reserve_start_pfn) { | |
1508 | if (end_pfn > pci_reserve_end_pfn) | |
1509 | insert_ram_resource(pci_reserve_end_pfn, | |
1510 | end_pfn); | |
1511 | end_pfn = pci_reserve_start_pfn; | |
1512 | } | |
1513 | #endif | |
1514 | insert_ram_resource(start_pfn, end_pfn); | |
1515 | } | |
1516 | ||
1517 | code_resource.start = __pa(_text - CODE_DELTA); | |
1518 | code_resource.end = __pa(_etext - CODE_DELTA)-1; | |
1519 | data_resource.start = __pa(_sdata); | |
1520 | data_resource.end = __pa(_end)-1; | |
1521 | ||
1522 | insert_resource(&iomem_resource, &code_resource); | |
1523 | insert_resource(&iomem_resource, &data_resource); | |
1524 | ||
1525 | #ifdef CONFIG_KEXEC | |
1526 | insert_resource(&iomem_resource, &crashk_res); | |
1527 | #endif | |
1528 | ||
1529 | return 0; | |
1530 | } | |
1531 | ||
1532 | subsys_initcall(request_standard_resources); |