Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * pSeries NUMA support | |
3 | * | |
4 | * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | #include <linux/threads.h> | |
12 | #include <linux/bootmem.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/mm.h> | |
15 | #include <linux/mmzone.h> | |
4b16f8e2 | 16 | #include <linux/export.h> |
1da177e4 LT |
17 | #include <linux/nodemask.h> |
18 | #include <linux/cpu.h> | |
19 | #include <linux/notifier.h> | |
95f72d1e | 20 | #include <linux/memblock.h> |
6df1646e | 21 | #include <linux/of.h> |
06eccea6 | 22 | #include <linux/pfn.h> |
9eff1a38 JL |
23 | #include <linux/cpuset.h> |
24 | #include <linux/node.h> | |
30c05350 | 25 | #include <linux/stop_machine.h> |
e04fa612 NF |
26 | #include <linux/proc_fs.h> |
27 | #include <linux/seq_file.h> | |
28 | #include <linux/uaccess.h> | |
191a7120 | 29 | #include <linux/slab.h> |
3be7db6a | 30 | #include <asm/cputhreads.h> |
45fb6cea | 31 | #include <asm/sparsemem.h> |
d9b2b2a2 | 32 | #include <asm/prom.h> |
2249ca9d | 33 | #include <asm/smp.h> |
d4edc5b6 SB |
34 | #include <asm/cputhreads.h> |
35 | #include <asm/topology.h> | |
9eff1a38 JL |
36 | #include <asm/firmware.h> |
37 | #include <asm/paca.h> | |
39bf990e | 38 | #include <asm/hvcall.h> |
ae3a197e | 39 | #include <asm/setup.h> |
176bbf14 | 40 | #include <asm/vdso.h> |
1da177e4 LT |
41 | |
42 | static int numa_enabled = 1; | |
43 | ||
1daa6d08 BS |
44 | static char *cmdline __initdata; |
45 | ||
1da177e4 LT |
46 | static int numa_debug; |
47 | #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); } | |
48 | ||
45fb6cea | 49 | int numa_cpu_lookup_table[NR_CPUS]; |
25863de0 | 50 | cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; |
1da177e4 | 51 | struct pglist_data *node_data[MAX_NUMNODES]; |
45fb6cea AB |
52 | |
53 | EXPORT_SYMBOL(numa_cpu_lookup_table); | |
25863de0 | 54 | EXPORT_SYMBOL(node_to_cpumask_map); |
45fb6cea AB |
55 | EXPORT_SYMBOL(node_data); |
56 | ||
1da177e4 | 57 | static int min_common_depth; |
237a0989 | 58 | static int n_mem_addr_cells, n_mem_size_cells; |
41eab6f8 AB |
59 | static int form1_affinity; |
60 | ||
61 | #define MAX_DISTANCE_REF_POINTS 4 | |
62 | static int distance_ref_points_depth; | |
b08a2a12 | 63 | static const __be32 *distance_ref_points; |
41eab6f8 | 64 | static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS]; |
1da177e4 | 65 | |
25863de0 AB |
66 | /* |
67 | * Allocate node_to_cpumask_map based on number of available nodes | |
68 | * Requires node_possible_map to be valid. | |
69 | * | |
9512938b | 70 | * Note: cpumask_of_node() is not valid until after this is done. |
25863de0 AB |
71 | */ |
72 | static void __init setup_node_to_cpumask_map(void) | |
73 | { | |
f9d531b8 | 74 | unsigned int node; |
25863de0 AB |
75 | |
76 | /* setup nr_node_ids if not done yet */ | |
f9d531b8 CS |
77 | if (nr_node_ids == MAX_NUMNODES) |
78 | setup_nr_node_ids(); | |
25863de0 AB |
79 | |
80 | /* allocate the map */ | |
81 | for (node = 0; node < nr_node_ids; node++) | |
82 | alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); | |
83 | ||
84 | /* cpumask_of_node() will now work */ | |
85 | dbg("Node to cpumask map for %d nodes\n", nr_node_ids); | |
86 | } | |
87 | ||
55671f3c | 88 | static int __init fake_numa_create_new_node(unsigned long end_pfn, |
1daa6d08 BS |
89 | unsigned int *nid) |
90 | { | |
91 | unsigned long long mem; | |
92 | char *p = cmdline; | |
93 | static unsigned int fake_nid; | |
94 | static unsigned long long curr_boundary; | |
95 | ||
96 | /* | |
97 | * Modify node id, iff we started creating NUMA nodes | |
98 | * We want to continue from where we left of the last time | |
99 | */ | |
100 | if (fake_nid) | |
101 | *nid = fake_nid; | |
102 | /* | |
103 | * In case there are no more arguments to parse, the | |
104 | * node_id should be the same as the last fake node id | |
105 | * (we've handled this above). | |
106 | */ | |
107 | if (!p) | |
108 | return 0; | |
109 | ||
110 | mem = memparse(p, &p); | |
111 | if (!mem) | |
112 | return 0; | |
113 | ||
114 | if (mem < curr_boundary) | |
115 | return 0; | |
116 | ||
117 | curr_boundary = mem; | |
118 | ||
119 | if ((end_pfn << PAGE_SHIFT) > mem) { | |
120 | /* | |
121 | * Skip commas and spaces | |
122 | */ | |
123 | while (*p == ',' || *p == ' ' || *p == '\t') | |
124 | p++; | |
125 | ||
126 | cmdline = p; | |
127 | fake_nid++; | |
128 | *nid = fake_nid; | |
129 | dbg("created new fake_node with id %d\n", fake_nid); | |
130 | return 1; | |
131 | } | |
132 | return 0; | |
133 | } | |
134 | ||
8f64e1f2 | 135 | /* |
5dfe8660 | 136 | * get_node_active_region - Return active region containing pfn |
e8170372 | 137 | * Active range returned is empty if none found. |
5dfe8660 TH |
138 | * @pfn: The page to return the region for |
139 | * @node_ar: Returned set to the active region containing @pfn | |
8f64e1f2 | 140 | */ |
5dfe8660 TH |
141 | static void __init get_node_active_region(unsigned long pfn, |
142 | struct node_active_region *node_ar) | |
8f64e1f2 | 143 | { |
5dfe8660 TH |
144 | unsigned long start_pfn, end_pfn; |
145 | int i, nid; | |
146 | ||
147 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { | |
148 | if (pfn >= start_pfn && pfn < end_pfn) { | |
149 | node_ar->nid = nid; | |
150 | node_ar->start_pfn = start_pfn; | |
151 | node_ar->end_pfn = end_pfn; | |
152 | break; | |
153 | } | |
154 | } | |
8f64e1f2 JT |
155 | } |
156 | ||
d4edc5b6 SB |
157 | static void reset_numa_cpu_lookup_table(void) |
158 | { | |
159 | unsigned int cpu; | |
160 | ||
161 | for_each_possible_cpu(cpu) | |
162 | numa_cpu_lookup_table[cpu] = -1; | |
163 | } | |
164 | ||
165 | static void update_numa_cpu_lookup_table(unsigned int cpu, int node) | |
1da177e4 LT |
166 | { |
167 | numa_cpu_lookup_table[cpu] = node; | |
d4edc5b6 SB |
168 | } |
169 | ||
170 | static void map_cpu_to_node(int cpu, int node) | |
171 | { | |
172 | update_numa_cpu_lookup_table(cpu, node); | |
45fb6cea | 173 | |
bf4b85b0 NL |
174 | dbg("adding cpu %d to node %d\n", cpu, node); |
175 | ||
25863de0 AB |
176 | if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node]))) |
177 | cpumask_set_cpu(cpu, node_to_cpumask_map[node]); | |
1da177e4 LT |
178 | } |
179 | ||
39bf990e | 180 | #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR) |
1da177e4 LT |
181 | static void unmap_cpu_from_node(unsigned long cpu) |
182 | { | |
183 | int node = numa_cpu_lookup_table[cpu]; | |
184 | ||
185 | dbg("removing cpu %lu from node %d\n", cpu, node); | |
186 | ||
25863de0 | 187 | if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { |
429f4d8d | 188 | cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); |
1da177e4 LT |
189 | } else { |
190 | printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", | |
191 | cpu, node); | |
192 | } | |
193 | } | |
39bf990e | 194 | #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */ |
1da177e4 | 195 | |
1da177e4 | 196 | /* must hold reference to node during call */ |
b08a2a12 | 197 | static const __be32 *of_get_associativity(struct device_node *dev) |
1da177e4 | 198 | { |
e2eb6392 | 199 | return of_get_property(dev, "ibm,associativity", NULL); |
1da177e4 LT |
200 | } |
201 | ||
cf00085d C |
202 | /* |
203 | * Returns the property linux,drconf-usable-memory if | |
204 | * it exists (the property exists only in kexec/kdump kernels, | |
205 | * added by kexec-tools) | |
206 | */ | |
b08a2a12 | 207 | static const __be32 *of_get_usable_memory(struct device_node *memory) |
cf00085d | 208 | { |
b08a2a12 | 209 | const __be32 *prop; |
cf00085d C |
210 | u32 len; |
211 | prop = of_get_property(memory, "linux,drconf-usable-memory", &len); | |
212 | if (!prop || len < sizeof(unsigned int)) | |
ec32dd66 | 213 | return NULL; |
cf00085d C |
214 | return prop; |
215 | } | |
216 | ||
41eab6f8 AB |
217 | int __node_distance(int a, int b) |
218 | { | |
219 | int i; | |
220 | int distance = LOCAL_DISTANCE; | |
221 | ||
222 | if (!form1_affinity) | |
7122beee | 223 | return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE); |
41eab6f8 AB |
224 | |
225 | for (i = 0; i < distance_ref_points_depth; i++) { | |
226 | if (distance_lookup_table[a][i] == distance_lookup_table[b][i]) | |
227 | break; | |
228 | ||
229 | /* Double the distance for each NUMA level */ | |
230 | distance *= 2; | |
231 | } | |
232 | ||
233 | return distance; | |
234 | } | |
12c743eb | 235 | EXPORT_SYMBOL(__node_distance); |
41eab6f8 AB |
236 | |
237 | static void initialize_distance_lookup_table(int nid, | |
b08a2a12 | 238 | const __be32 *associativity) |
41eab6f8 AB |
239 | { |
240 | int i; | |
241 | ||
242 | if (!form1_affinity) | |
243 | return; | |
244 | ||
245 | for (i = 0; i < distance_ref_points_depth; i++) { | |
b08a2a12 AP |
246 | const __be32 *entry; |
247 | ||
248 | entry = &associativity[be32_to_cpu(distance_ref_points[i])]; | |
249 | distance_lookup_table[nid][i] = of_read_number(entry, 1); | |
41eab6f8 AB |
250 | } |
251 | } | |
252 | ||
482ec7c4 NL |
253 | /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa |
254 | * info is found. | |
255 | */ | |
b08a2a12 | 256 | static int associativity_to_nid(const __be32 *associativity) |
1da177e4 | 257 | { |
482ec7c4 | 258 | int nid = -1; |
1da177e4 LT |
259 | |
260 | if (min_common_depth == -1) | |
482ec7c4 | 261 | goto out; |
1da177e4 | 262 | |
b08a2a12 AP |
263 | if (of_read_number(associativity, 1) >= min_common_depth) |
264 | nid = of_read_number(&associativity[min_common_depth], 1); | |
bc16a759 NL |
265 | |
266 | /* POWER4 LPAR uses 0xffff as invalid node */ | |
482ec7c4 NL |
267 | if (nid == 0xffff || nid >= MAX_NUMNODES) |
268 | nid = -1; | |
41eab6f8 | 269 | |
b08a2a12 AP |
270 | if (nid > 0 && |
271 | of_read_number(associativity, 1) >= distance_ref_points_depth) | |
9eff1a38 | 272 | initialize_distance_lookup_table(nid, associativity); |
41eab6f8 | 273 | |
482ec7c4 | 274 | out: |
cf950b7a | 275 | return nid; |
1da177e4 LT |
276 | } |
277 | ||
9eff1a38 JL |
278 | /* Returns the nid associated with the given device tree node, |
279 | * or -1 if not found. | |
280 | */ | |
281 | static int of_node_to_nid_single(struct device_node *device) | |
282 | { | |
283 | int nid = -1; | |
b08a2a12 | 284 | const __be32 *tmp; |
9eff1a38 JL |
285 | |
286 | tmp = of_get_associativity(device); | |
287 | if (tmp) | |
288 | nid = associativity_to_nid(tmp); | |
289 | return nid; | |
290 | } | |
291 | ||
953039c8 JK |
292 | /* Walk the device tree upwards, looking for an associativity id */ |
293 | int of_node_to_nid(struct device_node *device) | |
294 | { | |
295 | struct device_node *tmp; | |
296 | int nid = -1; | |
297 | ||
298 | of_node_get(device); | |
299 | while (device) { | |
300 | nid = of_node_to_nid_single(device); | |
301 | if (nid != -1) | |
302 | break; | |
303 | ||
304 | tmp = device; | |
305 | device = of_get_parent(tmp); | |
306 | of_node_put(tmp); | |
307 | } | |
308 | of_node_put(device); | |
309 | ||
310 | return nid; | |
311 | } | |
312 | EXPORT_SYMBOL_GPL(of_node_to_nid); | |
313 | ||
1da177e4 LT |
314 | static int __init find_min_common_depth(void) |
315 | { | |
41eab6f8 | 316 | int depth; |
e70606eb | 317 | struct device_node *root; |
1da177e4 | 318 | |
1c8ee733 DS |
319 | if (firmware_has_feature(FW_FEATURE_OPAL)) |
320 | root = of_find_node_by_path("/ibm,opal"); | |
321 | else | |
322 | root = of_find_node_by_path("/rtas"); | |
e70606eb ME |
323 | if (!root) |
324 | root = of_find_node_by_path("/"); | |
1da177e4 LT |
325 | |
326 | /* | |
41eab6f8 AB |
327 | * This property is a set of 32-bit integers, each representing |
328 | * an index into the ibm,associativity nodes. | |
329 | * | |
330 | * With form 0 affinity the first integer is for an SMP configuration | |
331 | * (should be all 0's) and the second is for a normal NUMA | |
332 | * configuration. We have only one level of NUMA. | |
333 | * | |
334 | * With form 1 affinity the first integer is the most significant | |
335 | * NUMA boundary and the following are progressively less significant | |
336 | * boundaries. There can be more than one level of NUMA. | |
1da177e4 | 337 | */ |
e70606eb | 338 | distance_ref_points = of_get_property(root, |
41eab6f8 AB |
339 | "ibm,associativity-reference-points", |
340 | &distance_ref_points_depth); | |
341 | ||
342 | if (!distance_ref_points) { | |
343 | dbg("NUMA: ibm,associativity-reference-points not found.\n"); | |
344 | goto err; | |
345 | } | |
346 | ||
347 | distance_ref_points_depth /= sizeof(int); | |
1da177e4 | 348 | |
8002b0c5 NF |
349 | if (firmware_has_feature(FW_FEATURE_OPAL) || |
350 | firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) { | |
351 | dbg("Using form 1 affinity\n"); | |
1c8ee733 | 352 | form1_affinity = 1; |
4b83c330 AB |
353 | } |
354 | ||
41eab6f8 | 355 | if (form1_affinity) { |
b08a2a12 | 356 | depth = of_read_number(distance_ref_points, 1); |
1da177e4 | 357 | } else { |
41eab6f8 AB |
358 | if (distance_ref_points_depth < 2) { |
359 | printk(KERN_WARNING "NUMA: " | |
360 | "short ibm,associativity-reference-points\n"); | |
361 | goto err; | |
362 | } | |
363 | ||
b08a2a12 | 364 | depth = of_read_number(&distance_ref_points[1], 1); |
1da177e4 | 365 | } |
1da177e4 | 366 | |
41eab6f8 AB |
367 | /* |
368 | * Warn and cap if the hardware supports more than | |
369 | * MAX_DISTANCE_REF_POINTS domains. | |
370 | */ | |
371 | if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) { | |
372 | printk(KERN_WARNING "NUMA: distance array capped at " | |
373 | "%d entries\n", MAX_DISTANCE_REF_POINTS); | |
374 | distance_ref_points_depth = MAX_DISTANCE_REF_POINTS; | |
375 | } | |
376 | ||
e70606eb | 377 | of_node_put(root); |
1da177e4 | 378 | return depth; |
41eab6f8 AB |
379 | |
380 | err: | |
e70606eb | 381 | of_node_put(root); |
41eab6f8 | 382 | return -1; |
1da177e4 LT |
383 | } |
384 | ||
84c9fdd1 | 385 | static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) |
1da177e4 LT |
386 | { |
387 | struct device_node *memory = NULL; | |
1da177e4 LT |
388 | |
389 | memory = of_find_node_by_type(memory, "memory"); | |
54c23310 | 390 | if (!memory) |
84c9fdd1 | 391 | panic("numa.c: No memory nodes found!"); |
54c23310 | 392 | |
a8bda5dd | 393 | *n_addr_cells = of_n_addr_cells(memory); |
9213feea | 394 | *n_size_cells = of_n_size_cells(memory); |
84c9fdd1 | 395 | of_node_put(memory); |
1da177e4 LT |
396 | } |
397 | ||
b08a2a12 | 398 | static unsigned long read_n_cells(int n, const __be32 **buf) |
1da177e4 LT |
399 | { |
400 | unsigned long result = 0; | |
401 | ||
402 | while (n--) { | |
b08a2a12 | 403 | result = (result << 32) | of_read_number(*buf, 1); |
1da177e4 LT |
404 | (*buf)++; |
405 | } | |
406 | return result; | |
407 | } | |
408 | ||
8342681d | 409 | /* |
95f72d1e | 410 | * Read the next memblock list entry from the ibm,dynamic-memory property |
8342681d NF |
411 | * and return the information in the provided of_drconf_cell structure. |
412 | */ | |
b08a2a12 | 413 | static void read_drconf_cell(struct of_drconf_cell *drmem, const __be32 **cellp) |
8342681d | 414 | { |
b08a2a12 | 415 | const __be32 *cp; |
8342681d NF |
416 | |
417 | drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp); | |
418 | ||
419 | cp = *cellp; | |
b08a2a12 AP |
420 | drmem->drc_index = of_read_number(cp, 1); |
421 | drmem->reserved = of_read_number(&cp[1], 1); | |
422 | drmem->aa_index = of_read_number(&cp[2], 1); | |
423 | drmem->flags = of_read_number(&cp[3], 1); | |
8342681d NF |
424 | |
425 | *cellp = cp + 4; | |
426 | } | |
427 | ||
428 | /* | |
25985edc | 429 | * Retrieve and validate the ibm,dynamic-memory property of the device tree. |
8342681d | 430 | * |
95f72d1e YL |
431 | * The layout of the ibm,dynamic-memory property is a number N of memblock |
432 | * list entries followed by N memblock list entries. Each memblock list entry | |
25985edc | 433 | * contains information as laid out in the of_drconf_cell struct above. |
8342681d | 434 | */ |
b08a2a12 | 435 | static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm) |
8342681d | 436 | { |
b08a2a12 | 437 | const __be32 *prop; |
8342681d NF |
438 | u32 len, entries; |
439 | ||
440 | prop = of_get_property(memory, "ibm,dynamic-memory", &len); | |
441 | if (!prop || len < sizeof(unsigned int)) | |
442 | return 0; | |
443 | ||
b08a2a12 | 444 | entries = of_read_number(prop++, 1); |
8342681d NF |
445 | |
446 | /* Now that we know the number of entries, revalidate the size | |
447 | * of the property read in to ensure we have everything | |
448 | */ | |
449 | if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int)) | |
450 | return 0; | |
451 | ||
452 | *dm = prop; | |
453 | return entries; | |
454 | } | |
455 | ||
456 | /* | |
25985edc | 457 | * Retrieve and validate the ibm,lmb-size property for drconf memory |
8342681d NF |
458 | * from the device tree. |
459 | */ | |
3fdfd990 | 460 | static u64 of_get_lmb_size(struct device_node *memory) |
8342681d | 461 | { |
b08a2a12 | 462 | const __be32 *prop; |
8342681d NF |
463 | u32 len; |
464 | ||
3fdfd990 | 465 | prop = of_get_property(memory, "ibm,lmb-size", &len); |
8342681d NF |
466 | if (!prop || len < sizeof(unsigned int)) |
467 | return 0; | |
468 | ||
469 | return read_n_cells(n_mem_size_cells, &prop); | |
470 | } | |
471 | ||
472 | struct assoc_arrays { | |
473 | u32 n_arrays; | |
474 | u32 array_sz; | |
b08a2a12 | 475 | const __be32 *arrays; |
8342681d NF |
476 | }; |
477 | ||
478 | /* | |
25985edc | 479 | * Retrieve and validate the list of associativity arrays for drconf |
8342681d NF |
480 | * memory from the ibm,associativity-lookup-arrays property of the |
481 | * device tree.. | |
482 | * | |
483 | * The layout of the ibm,associativity-lookup-arrays property is a number N | |
484 | * indicating the number of associativity arrays, followed by a number M | |
485 | * indicating the size of each associativity array, followed by a list | |
486 | * of N associativity arrays. | |
487 | */ | |
488 | static int of_get_assoc_arrays(struct device_node *memory, | |
489 | struct assoc_arrays *aa) | |
490 | { | |
b08a2a12 | 491 | const __be32 *prop; |
8342681d NF |
492 | u32 len; |
493 | ||
494 | prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len); | |
495 | if (!prop || len < 2 * sizeof(unsigned int)) | |
496 | return -1; | |
497 | ||
b08a2a12 AP |
498 | aa->n_arrays = of_read_number(prop++, 1); |
499 | aa->array_sz = of_read_number(prop++, 1); | |
8342681d | 500 | |
42b2aa86 | 501 | /* Now that we know the number of arrays and size of each array, |
8342681d NF |
502 | * revalidate the size of the property read in. |
503 | */ | |
504 | if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int)) | |
505 | return -1; | |
506 | ||
507 | aa->arrays = prop; | |
508 | return 0; | |
509 | } | |
510 | ||
511 | /* | |
512 | * This is like of_node_to_nid_single() for memory represented in the | |
513 | * ibm,dynamic-reconfiguration-memory node. | |
514 | */ | |
515 | static int of_drconf_to_nid_single(struct of_drconf_cell *drmem, | |
516 | struct assoc_arrays *aa) | |
517 | { | |
518 | int default_nid = 0; | |
519 | int nid = default_nid; | |
520 | int index; | |
521 | ||
522 | if (min_common_depth > 0 && min_common_depth <= aa->array_sz && | |
523 | !(drmem->flags & DRCONF_MEM_AI_INVALID) && | |
524 | drmem->aa_index < aa->n_arrays) { | |
525 | index = drmem->aa_index * aa->array_sz + min_common_depth - 1; | |
b08a2a12 | 526 | nid = of_read_number(&aa->arrays[index], 1); |
8342681d NF |
527 | |
528 | if (nid == 0xffff || nid >= MAX_NUMNODES) | |
529 | nid = default_nid; | |
530 | } | |
531 | ||
532 | return nid; | |
533 | } | |
534 | ||
1da177e4 LT |
535 | /* |
536 | * Figure out to which domain a cpu belongs and stick it there. | |
537 | * Return the id of the domain used. | |
538 | */ | |
061d19f2 | 539 | static int numa_setup_cpu(unsigned long lcpu) |
1da177e4 | 540 | { |
d4edc5b6 SB |
541 | int nid; |
542 | struct device_node *cpu; | |
543 | ||
544 | /* | |
545 | * If a valid cpu-to-node mapping is already available, use it | |
546 | * directly instead of querying the firmware, since it represents | |
547 | * the most recent mapping notified to us by the platform (eg: VPHN). | |
548 | */ | |
549 | if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) { | |
550 | map_cpu_to_node(lcpu, nid); | |
551 | return nid; | |
552 | } | |
553 | ||
554 | cpu = of_get_cpu_node(lcpu, NULL); | |
1da177e4 LT |
555 | |
556 | if (!cpu) { | |
557 | WARN_ON(1); | |
d4edc5b6 | 558 | nid = 0; |
1da177e4 LT |
559 | goto out; |
560 | } | |
561 | ||
953039c8 | 562 | nid = of_node_to_nid_single(cpu); |
1da177e4 | 563 | |
482ec7c4 | 564 | if (nid < 0 || !node_online(nid)) |
72c33688 | 565 | nid = first_online_node; |
1da177e4 | 566 | out: |
cf950b7a | 567 | map_cpu_to_node(lcpu, nid); |
1da177e4 LT |
568 | |
569 | of_node_put(cpu); | |
570 | ||
cf950b7a | 571 | return nid; |
1da177e4 LT |
572 | } |
573 | ||
68fb18aa SB |
574 | static void verify_cpu_node_mapping(int cpu, int node) |
575 | { | |
576 | int base, sibling, i; | |
577 | ||
578 | /* Verify that all the threads in the core belong to the same node */ | |
579 | base = cpu_first_thread_sibling(cpu); | |
580 | ||
581 | for (i = 0; i < threads_per_core; i++) { | |
582 | sibling = base + i; | |
583 | ||
584 | if (sibling == cpu || cpu_is_offline(sibling)) | |
585 | continue; | |
586 | ||
587 | if (cpu_to_node(sibling) != node) { | |
588 | WARN(1, "CPU thread siblings %d and %d don't belong" | |
589 | " to the same node!\n", cpu, sibling); | |
590 | break; | |
591 | } | |
592 | } | |
593 | } | |
594 | ||
061d19f2 | 595 | static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action, |
1da177e4 LT |
596 | void *hcpu) |
597 | { | |
598 | unsigned long lcpu = (unsigned long)hcpu; | |
68fb18aa | 599 | int ret = NOTIFY_DONE, nid; |
1da177e4 LT |
600 | |
601 | switch (action) { | |
602 | case CPU_UP_PREPARE: | |
8bb78442 | 603 | case CPU_UP_PREPARE_FROZEN: |
68fb18aa SB |
604 | nid = numa_setup_cpu(lcpu); |
605 | verify_cpu_node_mapping((int)lcpu, nid); | |
1da177e4 LT |
606 | ret = NOTIFY_OK; |
607 | break; | |
608 | #ifdef CONFIG_HOTPLUG_CPU | |
609 | case CPU_DEAD: | |
8bb78442 | 610 | case CPU_DEAD_FROZEN: |
1da177e4 | 611 | case CPU_UP_CANCELED: |
8bb78442 | 612 | case CPU_UP_CANCELED_FROZEN: |
1da177e4 | 613 | unmap_cpu_from_node(lcpu); |
1da177e4 | 614 | ret = NOTIFY_OK; |
b00fc6ec | 615 | break; |
1da177e4 LT |
616 | #endif |
617 | } | |
618 | return ret; | |
619 | } | |
620 | ||
621 | /* | |
622 | * Check and possibly modify a memory region to enforce the memory limit. | |
623 | * | |
624 | * Returns the size the region should have to enforce the memory limit. | |
625 | * This will either be the original value of size, a truncated value, | |
626 | * or zero. If the returned value of size is 0 the region should be | |
25985edc | 627 | * discarded as it lies wholly above the memory limit. |
1da177e4 | 628 | */ |
45fb6cea AB |
629 | static unsigned long __init numa_enforce_memory_limit(unsigned long start, |
630 | unsigned long size) | |
1da177e4 LT |
631 | { |
632 | /* | |
95f72d1e | 633 | * We use memblock_end_of_DRAM() in here instead of memory_limit because |
1da177e4 | 634 | * we've already adjusted it for the limit and it takes care of |
fe55249d MM |
635 | * having memory holes below the limit. Also, in the case of |
636 | * iommu_is_off, memory_limit is not set but is implicitly enforced. | |
1da177e4 | 637 | */ |
1da177e4 | 638 | |
95f72d1e | 639 | if (start + size <= memblock_end_of_DRAM()) |
1da177e4 LT |
640 | return size; |
641 | ||
95f72d1e | 642 | if (start >= memblock_end_of_DRAM()) |
1da177e4 LT |
643 | return 0; |
644 | ||
95f72d1e | 645 | return memblock_end_of_DRAM() - start; |
1da177e4 LT |
646 | } |
647 | ||
cf00085d C |
648 | /* |
649 | * Reads the counter for a given entry in | |
650 | * linux,drconf-usable-memory property | |
651 | */ | |
b08a2a12 | 652 | static inline int __init read_usm_ranges(const __be32 **usm) |
cf00085d C |
653 | { |
654 | /* | |
3fdfd990 | 655 | * For each lmb in ibm,dynamic-memory a corresponding |
cf00085d C |
656 | * entry in linux,drconf-usable-memory property contains |
657 | * a counter followed by that many (base, size) duple. | |
658 | * read the counter from linux,drconf-usable-memory | |
659 | */ | |
660 | return read_n_cells(n_mem_size_cells, usm); | |
661 | } | |
662 | ||
0204568a PM |
663 | /* |
664 | * Extract NUMA information from the ibm,dynamic-reconfiguration-memory | |
665 | * node. This assumes n_mem_{addr,size}_cells have been set. | |
666 | */ | |
667 | static void __init parse_drconf_memory(struct device_node *memory) | |
668 | { | |
b08a2a12 | 669 | const __be32 *uninitialized_var(dm), *usm; |
cf00085d | 670 | unsigned int n, rc, ranges, is_kexec_kdump = 0; |
3fdfd990 | 671 | unsigned long lmb_size, base, size, sz; |
8342681d | 672 | int nid; |
aa709f3b | 673 | struct assoc_arrays aa = { .arrays = NULL }; |
8342681d NF |
674 | |
675 | n = of_get_drconf_memory(memory, &dm); | |
676 | if (!n) | |
0204568a PM |
677 | return; |
678 | ||
3fdfd990 BH |
679 | lmb_size = of_get_lmb_size(memory); |
680 | if (!lmb_size) | |
8342681d NF |
681 | return; |
682 | ||
683 | rc = of_get_assoc_arrays(memory, &aa); | |
684 | if (rc) | |
0204568a PM |
685 | return; |
686 | ||
cf00085d C |
687 | /* check if this is a kexec/kdump kernel */ |
688 | usm = of_get_usable_memory(memory); | |
689 | if (usm != NULL) | |
690 | is_kexec_kdump = 1; | |
691 | ||
0204568a | 692 | for (; n != 0; --n) { |
8342681d NF |
693 | struct of_drconf_cell drmem; |
694 | ||
695 | read_drconf_cell(&drmem, &dm); | |
696 | ||
697 | /* skip this block if the reserved bit is set in flags (0x80) | |
698 | or if the block is not assigned to this partition (0x8) */ | |
699 | if ((drmem.flags & DRCONF_MEM_RESERVED) | |
700 | || !(drmem.flags & DRCONF_MEM_ASSIGNED)) | |
0204568a | 701 | continue; |
1daa6d08 | 702 | |
cf00085d | 703 | base = drmem.base_addr; |
3fdfd990 | 704 | size = lmb_size; |
cf00085d | 705 | ranges = 1; |
8342681d | 706 | |
cf00085d C |
707 | if (is_kexec_kdump) { |
708 | ranges = read_usm_ranges(&usm); | |
709 | if (!ranges) /* there are no (base, size) duple */ | |
710 | continue; | |
711 | } | |
712 | do { | |
713 | if (is_kexec_kdump) { | |
714 | base = read_n_cells(n_mem_addr_cells, &usm); | |
715 | size = read_n_cells(n_mem_size_cells, &usm); | |
716 | } | |
717 | nid = of_drconf_to_nid_single(&drmem, &aa); | |
718 | fake_numa_create_new_node( | |
719 | ((base + size) >> PAGE_SHIFT), | |
8342681d | 720 | &nid); |
cf00085d C |
721 | node_set_online(nid); |
722 | sz = numa_enforce_memory_limit(base, size); | |
723 | if (sz) | |
e7e8de59 TC |
724 | memblock_set_node(base, sz, |
725 | &memblock.memory, nid); | |
cf00085d | 726 | } while (--ranges); |
0204568a PM |
727 | } |
728 | } | |
729 | ||
1da177e4 LT |
730 | static int __init parse_numa_properties(void) |
731 | { | |
94db7c5e | 732 | struct device_node *memory; |
482ec7c4 | 733 | int default_nid = 0; |
1da177e4 LT |
734 | unsigned long i; |
735 | ||
736 | if (numa_enabled == 0) { | |
737 | printk(KERN_WARNING "NUMA disabled by user\n"); | |
738 | return -1; | |
739 | } | |
740 | ||
1da177e4 LT |
741 | min_common_depth = find_min_common_depth(); |
742 | ||
1da177e4 LT |
743 | if (min_common_depth < 0) |
744 | return min_common_depth; | |
745 | ||
bf4b85b0 NL |
746 | dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth); |
747 | ||
1da177e4 | 748 | /* |
482ec7c4 NL |
749 | * Even though we connect cpus to numa domains later in SMP |
750 | * init, we need to know the node ids now. This is because | |
751 | * each node to be onlined must have NODE_DATA etc backing it. | |
1da177e4 | 752 | */ |
482ec7c4 | 753 | for_each_present_cpu(i) { |
dfbe93a2 | 754 | struct device_node *cpu; |
cf950b7a | 755 | int nid; |
1da177e4 | 756 | |
8b16cd23 | 757 | cpu = of_get_cpu_node(i, NULL); |
482ec7c4 | 758 | BUG_ON(!cpu); |
953039c8 | 759 | nid = of_node_to_nid_single(cpu); |
482ec7c4 | 760 | of_node_put(cpu); |
1da177e4 | 761 | |
482ec7c4 NL |
762 | /* |
763 | * Don't fall back to default_nid yet -- we will plug | |
764 | * cpus into nodes once the memory scan has discovered | |
765 | * the topology. | |
766 | */ | |
767 | if (nid < 0) | |
768 | continue; | |
769 | node_set_online(nid); | |
1da177e4 LT |
770 | } |
771 | ||
237a0989 | 772 | get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); |
94db7c5e AB |
773 | |
774 | for_each_node_by_type(memory, "memory") { | |
1da177e4 LT |
775 | unsigned long start; |
776 | unsigned long size; | |
cf950b7a | 777 | int nid; |
1da177e4 | 778 | int ranges; |
b08a2a12 | 779 | const __be32 *memcell_buf; |
1da177e4 LT |
780 | unsigned int len; |
781 | ||
e2eb6392 | 782 | memcell_buf = of_get_property(memory, |
ba759485 ME |
783 | "linux,usable-memory", &len); |
784 | if (!memcell_buf || len <= 0) | |
e2eb6392 | 785 | memcell_buf = of_get_property(memory, "reg", &len); |
1da177e4 LT |
786 | if (!memcell_buf || len <= 0) |
787 | continue; | |
788 | ||
cc5d0189 BH |
789 | /* ranges in cell */ |
790 | ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); | |
1da177e4 LT |
791 | new_range: |
792 | /* these are order-sensitive, and modify the buffer pointer */ | |
237a0989 MK |
793 | start = read_n_cells(n_mem_addr_cells, &memcell_buf); |
794 | size = read_n_cells(n_mem_size_cells, &memcell_buf); | |
1da177e4 | 795 | |
482ec7c4 NL |
796 | /* |
797 | * Assumption: either all memory nodes or none will | |
798 | * have associativity properties. If none, then | |
799 | * everything goes to default_nid. | |
800 | */ | |
953039c8 | 801 | nid = of_node_to_nid_single(memory); |
482ec7c4 NL |
802 | if (nid < 0) |
803 | nid = default_nid; | |
1daa6d08 BS |
804 | |
805 | fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid); | |
482ec7c4 | 806 | node_set_online(nid); |
1da177e4 | 807 | |
45fb6cea | 808 | if (!(size = numa_enforce_memory_limit(start, size))) { |
1da177e4 LT |
809 | if (--ranges) |
810 | goto new_range; | |
811 | else | |
812 | continue; | |
813 | } | |
814 | ||
e7e8de59 | 815 | memblock_set_node(start, size, &memblock.memory, nid); |
1da177e4 LT |
816 | |
817 | if (--ranges) | |
818 | goto new_range; | |
819 | } | |
820 | ||
0204568a | 821 | /* |
dfbe93a2 AB |
822 | * Now do the same thing for each MEMBLOCK listed in the |
823 | * ibm,dynamic-memory property in the | |
824 | * ibm,dynamic-reconfiguration-memory node. | |
0204568a PM |
825 | */ |
826 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | |
827 | if (memory) | |
828 | parse_drconf_memory(memory); | |
829 | ||
1da177e4 LT |
830 | return 0; |
831 | } | |
832 | ||
833 | static void __init setup_nonnuma(void) | |
834 | { | |
95f72d1e YL |
835 | unsigned long top_of_ram = memblock_end_of_DRAM(); |
836 | unsigned long total_ram = memblock_phys_mem_size(); | |
c67c3cb4 | 837 | unsigned long start_pfn, end_pfn; |
28be7072 BH |
838 | unsigned int nid = 0; |
839 | struct memblock_region *reg; | |
1da177e4 | 840 | |
e110b281 | 841 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", |
1da177e4 | 842 | top_of_ram, total_ram); |
e110b281 | 843 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", |
1da177e4 LT |
844 | (top_of_ram - total_ram) >> 20); |
845 | ||
28be7072 | 846 | for_each_memblock(memory, reg) { |
c7fc2de0 YL |
847 | start_pfn = memblock_region_memory_base_pfn(reg); |
848 | end_pfn = memblock_region_memory_end_pfn(reg); | |
1daa6d08 BS |
849 | |
850 | fake_numa_create_new_node(end_pfn, &nid); | |
1d7cfe18 | 851 | memblock_set_node(PFN_PHYS(start_pfn), |
e7e8de59 TC |
852 | PFN_PHYS(end_pfn - start_pfn), |
853 | &memblock.memory, nid); | |
1daa6d08 | 854 | node_set_online(nid); |
c67c3cb4 | 855 | } |
1da177e4 LT |
856 | } |
857 | ||
4b703a23 AB |
858 | void __init dump_numa_cpu_topology(void) |
859 | { | |
860 | unsigned int node; | |
861 | unsigned int cpu, count; | |
862 | ||
863 | if (min_common_depth == -1 || !numa_enabled) | |
864 | return; | |
865 | ||
866 | for_each_online_node(node) { | |
e110b281 | 867 | printk(KERN_DEBUG "Node %d CPUs:", node); |
4b703a23 AB |
868 | |
869 | count = 0; | |
870 | /* | |
871 | * If we used a CPU iterator here we would miss printing | |
872 | * the holes in the cpumap. | |
873 | */ | |
25863de0 AB |
874 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) { |
875 | if (cpumask_test_cpu(cpu, | |
876 | node_to_cpumask_map[node])) { | |
4b703a23 AB |
877 | if (count == 0) |
878 | printk(" %u", cpu); | |
879 | ++count; | |
880 | } else { | |
881 | if (count > 1) | |
882 | printk("-%u", cpu - 1); | |
883 | count = 0; | |
884 | } | |
885 | } | |
886 | ||
887 | if (count > 1) | |
25863de0 | 888 | printk("-%u", nr_cpu_ids - 1); |
4b703a23 AB |
889 | printk("\n"); |
890 | } | |
891 | } | |
892 | ||
893 | static void __init dump_numa_memory_topology(void) | |
1da177e4 LT |
894 | { |
895 | unsigned int node; | |
896 | unsigned int count; | |
897 | ||
898 | if (min_common_depth == -1 || !numa_enabled) | |
899 | return; | |
900 | ||
901 | for_each_online_node(node) { | |
902 | unsigned long i; | |
903 | ||
e110b281 | 904 | printk(KERN_DEBUG "Node %d Memory:", node); |
1da177e4 LT |
905 | |
906 | count = 0; | |
907 | ||
95f72d1e | 908 | for (i = 0; i < memblock_end_of_DRAM(); |
45fb6cea AB |
909 | i += (1 << SECTION_SIZE_BITS)) { |
910 | if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) { | |
1da177e4 LT |
911 | if (count == 0) |
912 | printk(" 0x%lx", i); | |
913 | ++count; | |
914 | } else { | |
915 | if (count > 0) | |
916 | printk("-0x%lx", i); | |
917 | count = 0; | |
918 | } | |
919 | } | |
920 | ||
921 | if (count > 0) | |
922 | printk("-0x%lx", i); | |
923 | printk("\n"); | |
924 | } | |
1da177e4 LT |
925 | } |
926 | ||
927 | /* | |
95f72d1e | 928 | * Allocate some memory, satisfying the memblock or bootmem allocator where |
1da177e4 LT |
929 | * required. nid is the preferred node and end is the physical address of |
930 | * the highest address in the node. | |
931 | * | |
0be210fd | 932 | * Returns the virtual address of the memory. |
1da177e4 | 933 | */ |
893473df | 934 | static void __init *careful_zallocation(int nid, unsigned long size, |
45fb6cea AB |
935 | unsigned long align, |
936 | unsigned long end_pfn) | |
1da177e4 | 937 | { |
0be210fd | 938 | void *ret; |
45fb6cea | 939 | int new_nid; |
0be210fd DH |
940 | unsigned long ret_paddr; |
941 | ||
95f72d1e | 942 | ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT); |
1da177e4 LT |
943 | |
944 | /* retry over all memory */ | |
0be210fd | 945 | if (!ret_paddr) |
95f72d1e | 946 | ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM()); |
1da177e4 | 947 | |
0be210fd | 948 | if (!ret_paddr) |
5d21ea2b | 949 | panic("numa.c: cannot allocate %lu bytes for node %d", |
1da177e4 LT |
950 | size, nid); |
951 | ||
0be210fd DH |
952 | ret = __va(ret_paddr); |
953 | ||
1da177e4 | 954 | /* |
c555e520 | 955 | * We initialize the nodes in numeric order: 0, 1, 2... |
95f72d1e | 956 | * and hand over control from the MEMBLOCK allocator to the |
c555e520 DH |
957 | * bootmem allocator. If this function is called for |
958 | * node 5, then we know that all nodes <5 are using the | |
95f72d1e | 959 | * bootmem allocator instead of the MEMBLOCK allocator. |
c555e520 DH |
960 | * |
961 | * So, check the nid from which this allocation came | |
962 | * and double check to see if we need to use bootmem | |
95f72d1e | 963 | * instead of the MEMBLOCK. We don't free the MEMBLOCK memory |
c555e520 | 964 | * since it would be useless. |
1da177e4 | 965 | */ |
0be210fd | 966 | new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT); |
45fb6cea | 967 | if (new_nid < nid) { |
0be210fd | 968 | ret = __alloc_bootmem_node(NODE_DATA(new_nid), |
1da177e4 LT |
969 | size, align, 0); |
970 | ||
0be210fd | 971 | dbg("alloc_bootmem %p %lx\n", ret, size); |
1da177e4 LT |
972 | } |
973 | ||
893473df | 974 | memset(ret, 0, size); |
0be210fd | 975 | return ret; |
1da177e4 LT |
976 | } |
977 | ||
061d19f2 | 978 | static struct notifier_block ppc64_numa_nb = { |
74b85f37 CS |
979 | .notifier_call = cpu_numa_callback, |
980 | .priority = 1 /* Must run before sched domains notifier. */ | |
981 | }; | |
982 | ||
28e86bdb | 983 | static void __init mark_reserved_regions_for_nid(int nid) |
4a618669 DH |
984 | { |
985 | struct pglist_data *node = NODE_DATA(nid); | |
28be7072 | 986 | struct memblock_region *reg; |
4a618669 | 987 | |
28be7072 BH |
988 | for_each_memblock(reserved, reg) { |
989 | unsigned long physbase = reg->base; | |
990 | unsigned long size = reg->size; | |
4a618669 | 991 | unsigned long start_pfn = physbase >> PAGE_SHIFT; |
06eccea6 | 992 | unsigned long end_pfn = PFN_UP(physbase + size); |
4a618669 | 993 | struct node_active_region node_ar; |
6408068e | 994 | unsigned long node_end_pfn = pgdat_end_pfn(node); |
4a618669 DH |
995 | |
996 | /* | |
95f72d1e | 997 | * Check to make sure that this memblock.reserved area is |
4a618669 DH |
998 | * within the bounds of the node that we care about. |
999 | * Checking the nid of the start and end points is not | |
1000 | * sufficient because the reserved area could span the | |
1001 | * entire node. | |
1002 | */ | |
1003 | if (end_pfn <= node->node_start_pfn || | |
1004 | start_pfn >= node_end_pfn) | |
1005 | continue; | |
1006 | ||
1007 | get_node_active_region(start_pfn, &node_ar); | |
1008 | while (start_pfn < end_pfn && | |
1009 | node_ar.start_pfn < node_ar.end_pfn) { | |
1010 | unsigned long reserve_size = size; | |
1011 | /* | |
1012 | * if reserved region extends past active region | |
1013 | * then trim size to active region | |
1014 | */ | |
1015 | if (end_pfn > node_ar.end_pfn) | |
1016 | reserve_size = (node_ar.end_pfn << PAGE_SHIFT) | |
06eccea6 | 1017 | - physbase; |
a4c74ddd DH |
1018 | /* |
1019 | * Only worry about *this* node, others may not | |
1020 | * yet have valid NODE_DATA(). | |
1021 | */ | |
1022 | if (node_ar.nid == nid) { | |
1023 | dbg("reserve_bootmem %lx %lx nid=%d\n", | |
1024 | physbase, reserve_size, node_ar.nid); | |
1025 | reserve_bootmem_node(NODE_DATA(node_ar.nid), | |
1026 | physbase, reserve_size, | |
1027 | BOOTMEM_DEFAULT); | |
1028 | } | |
4a618669 DH |
1029 | /* |
1030 | * if reserved region is contained in the active region | |
1031 | * then done. | |
1032 | */ | |
1033 | if (end_pfn <= node_ar.end_pfn) | |
1034 | break; | |
1035 | ||
1036 | /* | |
1037 | * reserved region extends past the active region | |
1038 | * get next active region that contains this | |
1039 | * reserved region | |
1040 | */ | |
1041 | start_pfn = node_ar.end_pfn; | |
1042 | physbase = start_pfn << PAGE_SHIFT; | |
1043 | size = size - reserve_size; | |
1044 | get_node_active_region(start_pfn, &node_ar); | |
1045 | } | |
1046 | } | |
1047 | } | |
1048 | ||
1049 | ||
1da177e4 LT |
1050 | void __init do_init_bootmem(void) |
1051 | { | |
2fabf084 | 1052 | int nid, cpu; |
1da177e4 LT |
1053 | |
1054 | min_low_pfn = 0; | |
95f72d1e | 1055 | max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; |
1da177e4 LT |
1056 | max_pfn = max_low_pfn; |
1057 | ||
1058 | if (parse_numa_properties()) | |
1059 | setup_nonnuma(); | |
1060 | else | |
4b703a23 | 1061 | dump_numa_memory_topology(); |
1da177e4 | 1062 | |
1da177e4 | 1063 | for_each_online_node(nid) { |
c67c3cb4 | 1064 | unsigned long start_pfn, end_pfn; |
0be210fd | 1065 | void *bootmem_vaddr; |
1da177e4 LT |
1066 | unsigned long bootmap_pages; |
1067 | ||
c67c3cb4 | 1068 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); |
1da177e4 | 1069 | |
4a618669 DH |
1070 | /* |
1071 | * Allocate the node structure node local if possible | |
1072 | * | |
1073 | * Be careful moving this around, as it relies on all | |
1074 | * previous nodes' bootmem to be initialized and have | |
1075 | * all reserved areas marked. | |
1076 | */ | |
893473df | 1077 | NODE_DATA(nid) = careful_zallocation(nid, |
1da177e4 | 1078 | sizeof(struct pglist_data), |
45fb6cea | 1079 | SMP_CACHE_BYTES, end_pfn); |
1da177e4 LT |
1080 | |
1081 | dbg("node %d\n", nid); | |
1082 | dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); | |
1083 | ||
b61bfa3c | 1084 | NODE_DATA(nid)->bdata = &bootmem_node_data[nid]; |
45fb6cea AB |
1085 | NODE_DATA(nid)->node_start_pfn = start_pfn; |
1086 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; | |
1da177e4 LT |
1087 | |
1088 | if (NODE_DATA(nid)->node_spanned_pages == 0) | |
1089 | continue; | |
1090 | ||
45fb6cea AB |
1091 | dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT); |
1092 | dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT); | |
1da177e4 | 1093 | |
45fb6cea | 1094 | bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); |
893473df | 1095 | bootmem_vaddr = careful_zallocation(nid, |
45fb6cea AB |
1096 | bootmap_pages << PAGE_SHIFT, |
1097 | PAGE_SIZE, end_pfn); | |
1da177e4 | 1098 | |
0be210fd | 1099 | dbg("bootmap_vaddr = %p\n", bootmem_vaddr); |
1da177e4 | 1100 | |
0be210fd DH |
1101 | init_bootmem_node(NODE_DATA(nid), |
1102 | __pa(bootmem_vaddr) >> PAGE_SHIFT, | |
45fb6cea | 1103 | start_pfn, end_pfn); |
1da177e4 | 1104 | |
c67c3cb4 | 1105 | free_bootmem_with_active_regions(nid, end_pfn); |
4a618669 DH |
1106 | /* |
1107 | * Be very careful about moving this around. Future | |
893473df | 1108 | * calls to careful_zallocation() depend on this getting |
4a618669 DH |
1109 | * done correctly. |
1110 | */ | |
1111 | mark_reserved_regions_for_nid(nid); | |
8f64e1f2 | 1112 | sparse_memory_present_with_active_regions(nid); |
4a618669 | 1113 | } |
d3f6204a BH |
1114 | |
1115 | init_bootmem_done = 1; | |
25863de0 AB |
1116 | |
1117 | /* | |
1118 | * Now bootmem is initialised we can create the node to cpumask | |
1119 | * lookup tables and setup the cpu callback to populate them. | |
1120 | */ | |
1121 | setup_node_to_cpumask_map(); | |
1122 | ||
d4edc5b6 | 1123 | reset_numa_cpu_lookup_table(); |
25863de0 | 1124 | register_cpu_notifier(&ppc64_numa_nb); |
2fabf084 NA |
1125 | /* |
1126 | * We need the numa_cpu_lookup_table to be accurate for all CPUs, | |
1127 | * even before we online them, so that we can use cpu_to_{node,mem} | |
1128 | * early in boot, cf. smp_prepare_cpus(). | |
1129 | */ | |
bc3c4327 | 1130 | for_each_present_cpu(cpu) { |
70ad2375 | 1131 | numa_setup_cpu((unsigned long)cpu); |
2fabf084 | 1132 | } |
1da177e4 LT |
1133 | } |
1134 | ||
1135 | void __init paging_init(void) | |
1136 | { | |
6391af17 MG |
1137 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
1138 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | |
95f72d1e | 1139 | max_zone_pfns[ZONE_DMA] = memblock_end_of_DRAM() >> PAGE_SHIFT; |
c67c3cb4 | 1140 | free_area_init_nodes(max_zone_pfns); |
1da177e4 LT |
1141 | } |
1142 | ||
1143 | static int __init early_numa(char *p) | |
1144 | { | |
1145 | if (!p) | |
1146 | return 0; | |
1147 | ||
1148 | if (strstr(p, "off")) | |
1149 | numa_enabled = 0; | |
1150 | ||
1151 | if (strstr(p, "debug")) | |
1152 | numa_debug = 1; | |
1153 | ||
1daa6d08 BS |
1154 | p = strstr(p, "fake="); |
1155 | if (p) | |
1156 | cmdline = p + strlen("fake="); | |
1157 | ||
1da177e4 LT |
1158 | return 0; |
1159 | } | |
1160 | early_param("numa", early_numa); | |
237a0989 MK |
1161 | |
1162 | #ifdef CONFIG_MEMORY_HOTPLUG | |
0db9360a | 1163 | /* |
0f16ef7f NF |
1164 | * Find the node associated with a hot added memory section for |
1165 | * memory represented in the device tree by the property | |
1166 | * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory. | |
0db9360a NF |
1167 | */ |
1168 | static int hot_add_drconf_scn_to_nid(struct device_node *memory, | |
1169 | unsigned long scn_addr) | |
1170 | { | |
b08a2a12 | 1171 | const __be32 *dm; |
0f16ef7f | 1172 | unsigned int drconf_cell_cnt, rc; |
3fdfd990 | 1173 | unsigned long lmb_size; |
0db9360a | 1174 | struct assoc_arrays aa; |
0f16ef7f | 1175 | int nid = -1; |
0db9360a | 1176 | |
0f16ef7f NF |
1177 | drconf_cell_cnt = of_get_drconf_memory(memory, &dm); |
1178 | if (!drconf_cell_cnt) | |
1179 | return -1; | |
0db9360a | 1180 | |
3fdfd990 BH |
1181 | lmb_size = of_get_lmb_size(memory); |
1182 | if (!lmb_size) | |
0f16ef7f | 1183 | return -1; |
0db9360a NF |
1184 | |
1185 | rc = of_get_assoc_arrays(memory, &aa); | |
1186 | if (rc) | |
0f16ef7f | 1187 | return -1; |
0db9360a | 1188 | |
0f16ef7f | 1189 | for (; drconf_cell_cnt != 0; --drconf_cell_cnt) { |
0db9360a NF |
1190 | struct of_drconf_cell drmem; |
1191 | ||
1192 | read_drconf_cell(&drmem, &dm); | |
1193 | ||
1194 | /* skip this block if it is reserved or not assigned to | |
1195 | * this partition */ | |
1196 | if ((drmem.flags & DRCONF_MEM_RESERVED) | |
1197 | || !(drmem.flags & DRCONF_MEM_ASSIGNED)) | |
1198 | continue; | |
1199 | ||
0f16ef7f | 1200 | if ((scn_addr < drmem.base_addr) |
3fdfd990 | 1201 | || (scn_addr >= (drmem.base_addr + lmb_size))) |
0f16ef7f NF |
1202 | continue; |
1203 | ||
0db9360a | 1204 | nid = of_drconf_to_nid_single(&drmem, &aa); |
0f16ef7f NF |
1205 | break; |
1206 | } | |
1207 | ||
1208 | return nid; | |
1209 | } | |
1210 | ||
1211 | /* | |
1212 | * Find the node associated with a hot added memory section for memory | |
1213 | * represented in the device tree as a node (i.e. memory@XXXX) for | |
95f72d1e | 1214 | * each memblock. |
0f16ef7f | 1215 | */ |
ec32dd66 | 1216 | static int hot_add_node_scn_to_nid(unsigned long scn_addr) |
0f16ef7f | 1217 | { |
94db7c5e | 1218 | struct device_node *memory; |
0f16ef7f NF |
1219 | int nid = -1; |
1220 | ||
94db7c5e | 1221 | for_each_node_by_type(memory, "memory") { |
0f16ef7f NF |
1222 | unsigned long start, size; |
1223 | int ranges; | |
b08a2a12 | 1224 | const __be32 *memcell_buf; |
0f16ef7f NF |
1225 | unsigned int len; |
1226 | ||
1227 | memcell_buf = of_get_property(memory, "reg", &len); | |
1228 | if (!memcell_buf || len <= 0) | |
1229 | continue; | |
1230 | ||
1231 | /* ranges in cell */ | |
1232 | ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); | |
1233 | ||
1234 | while (ranges--) { | |
1235 | start = read_n_cells(n_mem_addr_cells, &memcell_buf); | |
1236 | size = read_n_cells(n_mem_size_cells, &memcell_buf); | |
1237 | ||
1238 | if ((scn_addr < start) || (scn_addr >= (start + size))) | |
1239 | continue; | |
1240 | ||
1241 | nid = of_node_to_nid_single(memory); | |
1242 | break; | |
1243 | } | |
0db9360a | 1244 | |
0f16ef7f NF |
1245 | if (nid >= 0) |
1246 | break; | |
0db9360a NF |
1247 | } |
1248 | ||
60831842 AB |
1249 | of_node_put(memory); |
1250 | ||
0f16ef7f | 1251 | return nid; |
0db9360a NF |
1252 | } |
1253 | ||
237a0989 MK |
1254 | /* |
1255 | * Find the node associated with a hot added memory section. Section | |
95f72d1e YL |
1256 | * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that |
1257 | * sections are fully contained within a single MEMBLOCK. | |
237a0989 MK |
1258 | */ |
1259 | int hot_add_scn_to_nid(unsigned long scn_addr) | |
1260 | { | |
1261 | struct device_node *memory = NULL; | |
0f16ef7f | 1262 | int nid, found = 0; |
237a0989 MK |
1263 | |
1264 | if (!numa_enabled || (min_common_depth < 0)) | |
72c33688 | 1265 | return first_online_node; |
0db9360a NF |
1266 | |
1267 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | |
1268 | if (memory) { | |
1269 | nid = hot_add_drconf_scn_to_nid(memory, scn_addr); | |
1270 | of_node_put(memory); | |
0f16ef7f NF |
1271 | } else { |
1272 | nid = hot_add_node_scn_to_nid(scn_addr); | |
0db9360a | 1273 | } |
237a0989 | 1274 | |
0f16ef7f | 1275 | if (nid < 0 || !node_online(nid)) |
72c33688 | 1276 | nid = first_online_node; |
237a0989 | 1277 | |
0f16ef7f NF |
1278 | if (NODE_DATA(nid)->node_spanned_pages) |
1279 | return nid; | |
237a0989 | 1280 | |
0f16ef7f NF |
1281 | for_each_online_node(nid) { |
1282 | if (NODE_DATA(nid)->node_spanned_pages) { | |
1283 | found = 1; | |
1284 | break; | |
237a0989 | 1285 | } |
237a0989 | 1286 | } |
0f16ef7f NF |
1287 | |
1288 | BUG_ON(!found); | |
1289 | return nid; | |
237a0989 | 1290 | } |
0f16ef7f | 1291 | |
cd34206e NA |
1292 | static u64 hot_add_drconf_memory_max(void) |
1293 | { | |
1294 | struct device_node *memory = NULL; | |
1295 | unsigned int drconf_cell_cnt = 0; | |
1296 | u64 lmb_size = 0; | |
ec32dd66 | 1297 | const __be32 *dm = NULL; |
cd34206e NA |
1298 | |
1299 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | |
1300 | if (memory) { | |
1301 | drconf_cell_cnt = of_get_drconf_memory(memory, &dm); | |
1302 | lmb_size = of_get_lmb_size(memory); | |
1303 | of_node_put(memory); | |
1304 | } | |
1305 | return lmb_size * drconf_cell_cnt; | |
1306 | } | |
1307 | ||
1308 | /* | |
1309 | * memory_hotplug_max - return max address of memory that may be added | |
1310 | * | |
1311 | * This is currently only used on systems that support drconfig memory | |
1312 | * hotplug. | |
1313 | */ | |
1314 | u64 memory_hotplug_max(void) | |
1315 | { | |
1316 | return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM()); | |
1317 | } | |
237a0989 | 1318 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
9eff1a38 | 1319 | |
bd03403a | 1320 | /* Virtual Processor Home Node (VPHN) support */ |
39bf990e | 1321 | #ifdef CONFIG_PPC_SPLPAR |
30c05350 NF |
1322 | struct topology_update_data { |
1323 | struct topology_update_data *next; | |
1324 | unsigned int cpu; | |
1325 | int old_nid; | |
1326 | int new_nid; | |
1327 | }; | |
1328 | ||
5de16699 | 1329 | static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS]; |
9eff1a38 JL |
1330 | static cpumask_t cpu_associativity_changes_mask; |
1331 | static int vphn_enabled; | |
5d88aa85 JL |
1332 | static int prrn_enabled; |
1333 | static void reset_topology_timer(void); | |
9eff1a38 JL |
1334 | |
1335 | /* | |
1336 | * Store the current values of the associativity change counters in the | |
1337 | * hypervisor. | |
1338 | */ | |
1339 | static void setup_cpu_associativity_change_counters(void) | |
1340 | { | |
cd9d6cc7 | 1341 | int cpu; |
9eff1a38 | 1342 | |
5de16699 AB |
1343 | /* The VPHN feature supports a maximum of 8 reference points */ |
1344 | BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8); | |
1345 | ||
9eff1a38 | 1346 | for_each_possible_cpu(cpu) { |
cd9d6cc7 | 1347 | int i; |
9eff1a38 JL |
1348 | u8 *counts = vphn_cpu_change_counts[cpu]; |
1349 | volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; | |
1350 | ||
5de16699 | 1351 | for (i = 0; i < distance_ref_points_depth; i++) |
9eff1a38 | 1352 | counts[i] = hypervisor_counts[i]; |
9eff1a38 JL |
1353 | } |
1354 | } | |
1355 | ||
1356 | /* | |
1357 | * The hypervisor maintains a set of 8 associativity change counters in | |
1358 | * the VPA of each cpu that correspond to the associativity levels in the | |
1359 | * ibm,associativity-reference-points property. When an associativity | |
1360 | * level changes, the corresponding counter is incremented. | |
1361 | * | |
1362 | * Set a bit in cpu_associativity_changes_mask for each cpu whose home | |
1363 | * node associativity levels have changed. | |
1364 | * | |
1365 | * Returns the number of cpus with unhandled associativity changes. | |
1366 | */ | |
1367 | static int update_cpu_associativity_changes_mask(void) | |
1368 | { | |
5d88aa85 | 1369 | int cpu; |
9eff1a38 JL |
1370 | cpumask_t *changes = &cpu_associativity_changes_mask; |
1371 | ||
9eff1a38 JL |
1372 | for_each_possible_cpu(cpu) { |
1373 | int i, changed = 0; | |
1374 | u8 *counts = vphn_cpu_change_counts[cpu]; | |
1375 | volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; | |
1376 | ||
5de16699 | 1377 | for (i = 0; i < distance_ref_points_depth; i++) { |
d69043e8 | 1378 | if (hypervisor_counts[i] != counts[i]) { |
9eff1a38 JL |
1379 | counts[i] = hypervisor_counts[i]; |
1380 | changed = 1; | |
1381 | } | |
1382 | } | |
1383 | if (changed) { | |
3be7db6a RJ |
1384 | cpumask_or(changes, changes, cpu_sibling_mask(cpu)); |
1385 | cpu = cpu_last_thread_sibling(cpu); | |
9eff1a38 JL |
1386 | } |
1387 | } | |
1388 | ||
5d88aa85 | 1389 | return cpumask_weight(changes); |
9eff1a38 JL |
1390 | } |
1391 | ||
c0e5e46f AB |
1392 | /* |
1393 | * 6 64-bit registers unpacked into 12 32-bit associativity values. To form | |
1394 | * the complete property we have to add the length in the first cell. | |
1395 | */ | |
1396 | #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1) | |
9eff1a38 JL |
1397 | |
1398 | /* | |
1399 | * Convert the associativity domain numbers returned from the hypervisor | |
1400 | * to the sequence they would appear in the ibm,associativity property. | |
1401 | */ | |
b08a2a12 | 1402 | static int vphn_unpack_associativity(const long *packed, __be32 *unpacked) |
9eff1a38 | 1403 | { |
cd9d6cc7 | 1404 | int i, nr_assoc_doms = 0; |
b08a2a12 | 1405 | const __be16 *field = (const __be16 *) packed; |
9eff1a38 JL |
1406 | |
1407 | #define VPHN_FIELD_UNUSED (0xffff) | |
1408 | #define VPHN_FIELD_MSB (0x8000) | |
1409 | #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB) | |
1410 | ||
c0e5e46f | 1411 | for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) { |
b08a2a12 | 1412 | if (be16_to_cpup(field) == VPHN_FIELD_UNUSED) { |
9eff1a38 JL |
1413 | /* All significant fields processed, and remaining |
1414 | * fields contain the reserved value of all 1's. | |
1415 | * Just store them. | |
1416 | */ | |
b08a2a12 | 1417 | unpacked[i] = *((__be32 *)field); |
9eff1a38 | 1418 | field += 2; |
b08a2a12 | 1419 | } else if (be16_to_cpup(field) & VPHN_FIELD_MSB) { |
9eff1a38 | 1420 | /* Data is in the lower 15 bits of this field */ |
b08a2a12 AP |
1421 | unpacked[i] = cpu_to_be32( |
1422 | be16_to_cpup(field) & VPHN_FIELD_MASK); | |
9eff1a38 JL |
1423 | field++; |
1424 | nr_assoc_doms++; | |
7639adaa | 1425 | } else { |
9eff1a38 JL |
1426 | /* Data is in the lower 15 bits of this field |
1427 | * concatenated with the next 16 bit field | |
1428 | */ | |
b08a2a12 | 1429 | unpacked[i] = *((__be32 *)field); |
9eff1a38 JL |
1430 | field += 2; |
1431 | nr_assoc_doms++; | |
1432 | } | |
1433 | } | |
1434 | ||
c0e5e46f | 1435 | /* The first cell contains the length of the property */ |
b08a2a12 | 1436 | unpacked[0] = cpu_to_be32(nr_assoc_doms); |
c0e5e46f | 1437 | |
9eff1a38 JL |
1438 | return nr_assoc_doms; |
1439 | } | |
1440 | ||
1441 | /* | |
1442 | * Retrieve the new associativity information for a virtual processor's | |
1443 | * home node. | |
1444 | */ | |
b08a2a12 | 1445 | static long hcall_vphn(unsigned long cpu, __be32 *associativity) |
9eff1a38 | 1446 | { |
cd9d6cc7 | 1447 | long rc; |
9eff1a38 JL |
1448 | long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; |
1449 | u64 flags = 1; | |
1450 | int hwcpu = get_hard_smp_processor_id(cpu); | |
1451 | ||
1452 | rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu); | |
1453 | vphn_unpack_associativity(retbuf, associativity); | |
1454 | ||
1455 | return rc; | |
1456 | } | |
1457 | ||
1458 | static long vphn_get_associativity(unsigned long cpu, | |
b08a2a12 | 1459 | __be32 *associativity) |
9eff1a38 | 1460 | { |
cd9d6cc7 | 1461 | long rc; |
9eff1a38 JL |
1462 | |
1463 | rc = hcall_vphn(cpu, associativity); | |
1464 | ||
1465 | switch (rc) { | |
1466 | case H_FUNCTION: | |
1467 | printk(KERN_INFO | |
1468 | "VPHN is not supported. Disabling polling...\n"); | |
1469 | stop_topology_update(); | |
1470 | break; | |
1471 | case H_HARDWARE: | |
1472 | printk(KERN_ERR | |
1473 | "hcall_vphn() experienced a hardware fault " | |
1474 | "preventing VPHN. Disabling polling...\n"); | |
1475 | stop_topology_update(); | |
1476 | } | |
1477 | ||
1478 | return rc; | |
1479 | } | |
1480 | ||
30c05350 NF |
1481 | /* |
1482 | * Update the CPU maps and sysfs entries for a single CPU when its NUMA | |
1483 | * characteristics change. This function doesn't perform any locking and is | |
1484 | * only safe to call from stop_machine(). | |
1485 | */ | |
1486 | static int update_cpu_topology(void *data) | |
1487 | { | |
1488 | struct topology_update_data *update; | |
1489 | unsigned long cpu; | |
1490 | ||
1491 | if (!data) | |
1492 | return -EINVAL; | |
1493 | ||
3be7db6a | 1494 | cpu = smp_processor_id(); |
30c05350 NF |
1495 | |
1496 | for (update = data; update; update = update->next) { | |
1497 | if (cpu != update->cpu) | |
1498 | continue; | |
1499 | ||
30c05350 NF |
1500 | unmap_cpu_from_node(update->cpu); |
1501 | map_cpu_to_node(update->cpu, update->new_nid); | |
176bbf14 | 1502 | vdso_getcpu_init(); |
30c05350 NF |
1503 | } |
1504 | ||
1505 | return 0; | |
1506 | } | |
1507 | ||
d4edc5b6 SB |
1508 | static int update_lookup_table(void *data) |
1509 | { | |
1510 | struct topology_update_data *update; | |
1511 | ||
1512 | if (!data) | |
1513 | return -EINVAL; | |
1514 | ||
1515 | /* | |
1516 | * Upon topology update, the numa-cpu lookup table needs to be updated | |
1517 | * for all threads in the core, including offline CPUs, to ensure that | |
1518 | * future hotplug operations respect the cpu-to-node associativity | |
1519 | * properly. | |
1520 | */ | |
1521 | for (update = data; update; update = update->next) { | |
1522 | int nid, base, j; | |
1523 | ||
1524 | nid = update->new_nid; | |
1525 | base = cpu_first_thread_sibling(update->cpu); | |
1526 | ||
1527 | for (j = 0; j < threads_per_core; j++) { | |
1528 | update_numa_cpu_lookup_table(base + j, nid); | |
1529 | } | |
1530 | } | |
1531 | ||
1532 | return 0; | |
1533 | } | |
1534 | ||
9eff1a38 JL |
1535 | /* |
1536 | * Update the node maps and sysfs entries for each cpu whose home node | |
79c5fceb | 1537 | * has changed. Returns 1 when the topology has changed, and 0 otherwise. |
9eff1a38 JL |
1538 | */ |
1539 | int arch_update_cpu_topology(void) | |
1540 | { | |
3be7db6a | 1541 | unsigned int cpu, sibling, changed = 0; |
30c05350 | 1542 | struct topology_update_data *updates, *ud; |
b08a2a12 | 1543 | __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; |
176bbf14 | 1544 | cpumask_t updated_cpus; |
8a25a2fd | 1545 | struct device *dev; |
3be7db6a | 1546 | int weight, new_nid, i = 0; |
9eff1a38 | 1547 | |
30c05350 NF |
1548 | weight = cpumask_weight(&cpu_associativity_changes_mask); |
1549 | if (!weight) | |
1550 | return 0; | |
1551 | ||
1552 | updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL); | |
1553 | if (!updates) | |
1554 | return 0; | |
9eff1a38 | 1555 | |
176bbf14 JL |
1556 | cpumask_clear(&updated_cpus); |
1557 | ||
5d88aa85 | 1558 | for_each_cpu(cpu, &cpu_associativity_changes_mask) { |
3be7db6a RJ |
1559 | /* |
1560 | * If siblings aren't flagged for changes, updates list | |
1561 | * will be too short. Skip on this update and set for next | |
1562 | * update. | |
1563 | */ | |
1564 | if (!cpumask_subset(cpu_sibling_mask(cpu), | |
1565 | &cpu_associativity_changes_mask)) { | |
1566 | pr_info("Sibling bits not set for associativity " | |
1567 | "change, cpu%d\n", cpu); | |
1568 | cpumask_or(&cpu_associativity_changes_mask, | |
1569 | &cpu_associativity_changes_mask, | |
1570 | cpu_sibling_mask(cpu)); | |
1571 | cpu = cpu_last_thread_sibling(cpu); | |
1572 | continue; | |
1573 | } | |
9eff1a38 | 1574 | |
3be7db6a RJ |
1575 | /* Use associativity from first thread for all siblings */ |
1576 | vphn_get_associativity(cpu, associativity); | |
1577 | new_nid = associativity_to_nid(associativity); | |
1578 | if (new_nid < 0 || !node_online(new_nid)) | |
1579 | new_nid = first_online_node; | |
1580 | ||
1581 | if (new_nid == numa_cpu_lookup_table[cpu]) { | |
1582 | cpumask_andnot(&cpu_associativity_changes_mask, | |
1583 | &cpu_associativity_changes_mask, | |
1584 | cpu_sibling_mask(cpu)); | |
1585 | cpu = cpu_last_thread_sibling(cpu); | |
1586 | continue; | |
1587 | } | |
9eff1a38 | 1588 | |
3be7db6a RJ |
1589 | for_each_cpu(sibling, cpu_sibling_mask(cpu)) { |
1590 | ud = &updates[i++]; | |
1591 | ud->cpu = sibling; | |
1592 | ud->new_nid = new_nid; | |
1593 | ud->old_nid = numa_cpu_lookup_table[sibling]; | |
1594 | cpumask_set_cpu(sibling, &updated_cpus); | |
1595 | if (i < weight) | |
1596 | ud->next = &updates[i]; | |
1597 | } | |
1598 | cpu = cpu_last_thread_sibling(cpu); | |
30c05350 NF |
1599 | } |
1600 | ||
9a013361 MW |
1601 | /* |
1602 | * In cases where we have nothing to update (because the updates list | |
1603 | * is too short or because the new topology is same as the old one), | |
1604 | * skip invoking update_cpu_topology() via stop-machine(). This is | |
1605 | * necessary (and not just a fast-path optimization) since stop-machine | |
1606 | * can end up electing a random CPU to run update_cpu_topology(), and | |
1607 | * thus trick us into setting up incorrect cpu-node mappings (since | |
1608 | * 'updates' is kzalloc()'ed). | |
1609 | * | |
1610 | * And for the similar reason, we will skip all the following updating. | |
1611 | */ | |
1612 | if (!cpumask_weight(&updated_cpus)) | |
1613 | goto out; | |
1614 | ||
176bbf14 | 1615 | stop_machine(update_cpu_topology, &updates[0], &updated_cpus); |
30c05350 | 1616 | |
d4edc5b6 SB |
1617 | /* |
1618 | * Update the numa-cpu lookup table with the new mappings, even for | |
1619 | * offline CPUs. It is best to perform this update from the stop- | |
1620 | * machine context. | |
1621 | */ | |
1622 | stop_machine(update_lookup_table, &updates[0], | |
1623 | cpumask_of(raw_smp_processor_id())); | |
1624 | ||
30c05350 | 1625 | for (ud = &updates[0]; ud; ud = ud->next) { |
dd023217 NF |
1626 | unregister_cpu_under_node(ud->cpu, ud->old_nid); |
1627 | register_cpu_under_node(ud->cpu, ud->new_nid); | |
1628 | ||
30c05350 | 1629 | dev = get_cpu_device(ud->cpu); |
8a25a2fd KS |
1630 | if (dev) |
1631 | kobject_uevent(&dev->kobj, KOBJ_CHANGE); | |
30c05350 | 1632 | cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask); |
79c5fceb | 1633 | changed = 1; |
9eff1a38 JL |
1634 | } |
1635 | ||
9a013361 | 1636 | out: |
30c05350 | 1637 | kfree(updates); |
79c5fceb | 1638 | return changed; |
9eff1a38 JL |
1639 | } |
1640 | ||
1641 | static void topology_work_fn(struct work_struct *work) | |
1642 | { | |
1643 | rebuild_sched_domains(); | |
1644 | } | |
1645 | static DECLARE_WORK(topology_work, topology_work_fn); | |
1646 | ||
ec32dd66 | 1647 | static void topology_schedule_update(void) |
9eff1a38 JL |
1648 | { |
1649 | schedule_work(&topology_work); | |
1650 | } | |
1651 | ||
1652 | static void topology_timer_fn(unsigned long ignored) | |
1653 | { | |
5d88aa85 | 1654 | if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask)) |
9eff1a38 | 1655 | topology_schedule_update(); |
5d88aa85 JL |
1656 | else if (vphn_enabled) { |
1657 | if (update_cpu_associativity_changes_mask() > 0) | |
1658 | topology_schedule_update(); | |
1659 | reset_topology_timer(); | |
1660 | } | |
9eff1a38 JL |
1661 | } |
1662 | static struct timer_list topology_timer = | |
1663 | TIMER_INITIALIZER(topology_timer_fn, 0, 0); | |
1664 | ||
5d88aa85 | 1665 | static void reset_topology_timer(void) |
9eff1a38 JL |
1666 | { |
1667 | topology_timer.data = 0; | |
1668 | topology_timer.expires = jiffies + 60 * HZ; | |
5d88aa85 | 1669 | mod_timer(&topology_timer, topology_timer.expires); |
9eff1a38 JL |
1670 | } |
1671 | ||
601abdc3 NF |
1672 | #ifdef CONFIG_SMP |
1673 | ||
5d88aa85 JL |
1674 | static void stage_topology_update(int core_id) |
1675 | { | |
1676 | cpumask_or(&cpu_associativity_changes_mask, | |
1677 | &cpu_associativity_changes_mask, cpu_sibling_mask(core_id)); | |
1678 | reset_topology_timer(); | |
1679 | } | |
1680 | ||
1681 | static int dt_update_callback(struct notifier_block *nb, | |
1682 | unsigned long action, void *data) | |
1683 | { | |
1684 | struct of_prop_reconfig *update; | |
1685 | int rc = NOTIFY_DONE; | |
1686 | ||
1687 | switch (action) { | |
5d88aa85 JL |
1688 | case OF_RECONFIG_UPDATE_PROPERTY: |
1689 | update = (struct of_prop_reconfig *)data; | |
30c05350 NF |
1690 | if (!of_prop_cmp(update->dn->type, "cpu") && |
1691 | !of_prop_cmp(update->prop->name, "ibm,associativity")) { | |
5d88aa85 JL |
1692 | u32 core_id; |
1693 | of_property_read_u32(update->dn, "reg", &core_id); | |
1694 | stage_topology_update(core_id); | |
1695 | rc = NOTIFY_OK; | |
1696 | } | |
1697 | break; | |
1698 | } | |
1699 | ||
1700 | return rc; | |
9eff1a38 JL |
1701 | } |
1702 | ||
5d88aa85 JL |
1703 | static struct notifier_block dt_update_nb = { |
1704 | .notifier_call = dt_update_callback, | |
1705 | }; | |
1706 | ||
601abdc3 NF |
1707 | #endif |
1708 | ||
9eff1a38 | 1709 | /* |
5d88aa85 | 1710 | * Start polling for associativity changes. |
9eff1a38 JL |
1711 | */ |
1712 | int start_topology_update(void) | |
1713 | { | |
1714 | int rc = 0; | |
1715 | ||
5d88aa85 JL |
1716 | if (firmware_has_feature(FW_FEATURE_PRRN)) { |
1717 | if (!prrn_enabled) { | |
1718 | prrn_enabled = 1; | |
1719 | vphn_enabled = 0; | |
601abdc3 | 1720 | #ifdef CONFIG_SMP |
5d88aa85 | 1721 | rc = of_reconfig_notifier_register(&dt_update_nb); |
601abdc3 | 1722 | #endif |
5d88aa85 | 1723 | } |
b7abef04 | 1724 | } else if (firmware_has_feature(FW_FEATURE_VPHN) && |
f13c13a0 | 1725 | lppaca_shared_proc(get_lppaca())) { |
5d88aa85 JL |
1726 | if (!vphn_enabled) { |
1727 | prrn_enabled = 0; | |
1728 | vphn_enabled = 1; | |
1729 | setup_cpu_associativity_change_counters(); | |
1730 | init_timer_deferrable(&topology_timer); | |
1731 | reset_topology_timer(); | |
1732 | } | |
9eff1a38 JL |
1733 | } |
1734 | ||
1735 | return rc; | |
1736 | } | |
9eff1a38 JL |
1737 | |
1738 | /* | |
1739 | * Disable polling for VPHN associativity changes. | |
1740 | */ | |
1741 | int stop_topology_update(void) | |
1742 | { | |
5d88aa85 JL |
1743 | int rc = 0; |
1744 | ||
1745 | if (prrn_enabled) { | |
1746 | prrn_enabled = 0; | |
601abdc3 | 1747 | #ifdef CONFIG_SMP |
5d88aa85 | 1748 | rc = of_reconfig_notifier_unregister(&dt_update_nb); |
601abdc3 | 1749 | #endif |
5d88aa85 JL |
1750 | } else if (vphn_enabled) { |
1751 | vphn_enabled = 0; | |
1752 | rc = del_timer_sync(&topology_timer); | |
1753 | } | |
1754 | ||
1755 | return rc; | |
9eff1a38 | 1756 | } |
e04fa612 NF |
1757 | |
1758 | int prrn_is_enabled(void) | |
1759 | { | |
1760 | return prrn_enabled; | |
1761 | } | |
1762 | ||
1763 | static int topology_read(struct seq_file *file, void *v) | |
1764 | { | |
1765 | if (vphn_enabled || prrn_enabled) | |
1766 | seq_puts(file, "on\n"); | |
1767 | else | |
1768 | seq_puts(file, "off\n"); | |
1769 | ||
1770 | return 0; | |
1771 | } | |
1772 | ||
1773 | static int topology_open(struct inode *inode, struct file *file) | |
1774 | { | |
1775 | return single_open(file, topology_read, NULL); | |
1776 | } | |
1777 | ||
1778 | static ssize_t topology_write(struct file *file, const char __user *buf, | |
1779 | size_t count, loff_t *off) | |
1780 | { | |
1781 | char kbuf[4]; /* "on" or "off" plus null. */ | |
1782 | int read_len; | |
1783 | ||
1784 | read_len = count < 3 ? count : 3; | |
1785 | if (copy_from_user(kbuf, buf, read_len)) | |
1786 | return -EINVAL; | |
1787 | ||
1788 | kbuf[read_len] = '\0'; | |
1789 | ||
1790 | if (!strncmp(kbuf, "on", 2)) | |
1791 | start_topology_update(); | |
1792 | else if (!strncmp(kbuf, "off", 3)) | |
1793 | stop_topology_update(); | |
1794 | else | |
1795 | return -EINVAL; | |
1796 | ||
1797 | return count; | |
1798 | } | |
1799 | ||
1800 | static const struct file_operations topology_ops = { | |
1801 | .read = seq_read, | |
1802 | .write = topology_write, | |
1803 | .open = topology_open, | |
1804 | .release = single_release | |
1805 | }; | |
1806 | ||
1807 | static int topology_update_init(void) | |
1808 | { | |
1809 | start_topology_update(); | |
316d7188 | 1810 | proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops); |
e04fa612 NF |
1811 | |
1812 | return 0; | |
9eff1a38 | 1813 | } |
e04fa612 | 1814 | device_initcall(topology_update_init); |
39bf990e | 1815 | #endif /* CONFIG_PPC_SPLPAR */ |