4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
15 #include <linux/mmzone.h>
16 #include <linux/export.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <linux/memblock.h>
22 #include <linux/pfn.h>
23 #include <linux/cpuset.h>
24 #include <linux/node.h>
25 #include <linux/stop_machine.h>
26 #include <linux/proc_fs.h>
27 #include <linux/seq_file.h>
28 #include <linux/uaccess.h>
29 #include <linux/slab.h>
30 #include <asm/sparsemem.h>
33 #include <asm/firmware.h>
35 #include <asm/hvcall.h>
36 #include <asm/setup.h>
39 static int numa_enabled
= 1;
41 static char *cmdline __initdata
;
43 static int numa_debug
;
44 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
46 int numa_cpu_lookup_table
[NR_CPUS
];
47 cpumask_var_t node_to_cpumask_map
[MAX_NUMNODES
];
48 struct pglist_data
*node_data
[MAX_NUMNODES
];
50 EXPORT_SYMBOL(numa_cpu_lookup_table
);
51 EXPORT_SYMBOL(node_to_cpumask_map
);
52 EXPORT_SYMBOL(node_data
);
54 static int min_common_depth
;
55 static int n_mem_addr_cells
, n_mem_size_cells
;
56 static int form1_affinity
;
58 #define MAX_DISTANCE_REF_POINTS 4
59 static int distance_ref_points_depth
;
60 static const unsigned int *distance_ref_points
;
61 static int distance_lookup_table
[MAX_NUMNODES
][MAX_DISTANCE_REF_POINTS
];
64 * Allocate node_to_cpumask_map based on number of available nodes
65 * Requires node_possible_map to be valid.
67 * Note: cpumask_of_node() is not valid until after this is done.
69 static void __init
setup_node_to_cpumask_map(void)
73 /* setup nr_node_ids if not done yet */
74 if (nr_node_ids
== MAX_NUMNODES
)
77 /* allocate the map */
78 for (node
= 0; node
< nr_node_ids
; node
++)
79 alloc_bootmem_cpumask_var(&node_to_cpumask_map
[node
]);
81 /* cpumask_of_node() will now work */
82 dbg("Node to cpumask map for %d nodes\n", nr_node_ids
);
85 static int __init
fake_numa_create_new_node(unsigned long end_pfn
,
88 unsigned long long mem
;
90 static unsigned int fake_nid
;
91 static unsigned long long curr_boundary
;
94 * Modify node id, iff we started creating NUMA nodes
95 * We want to continue from where we left of the last time
100 * In case there are no more arguments to parse, the
101 * node_id should be the same as the last fake node id
102 * (we've handled this above).
107 mem
= memparse(p
, &p
);
111 if (mem
< curr_boundary
)
116 if ((end_pfn
<< PAGE_SHIFT
) > mem
) {
118 * Skip commas and spaces
120 while (*p
== ',' || *p
== ' ' || *p
== '\t')
126 dbg("created new fake_node with id %d\n", fake_nid
);
133 * get_node_active_region - Return active region containing pfn
134 * Active range returned is empty if none found.
135 * @pfn: The page to return the region for
136 * @node_ar: Returned set to the active region containing @pfn
138 static void __init
get_node_active_region(unsigned long pfn
,
139 struct node_active_region
*node_ar
)
141 unsigned long start_pfn
, end_pfn
;
144 for_each_mem_pfn_range(i
, MAX_NUMNODES
, &start_pfn
, &end_pfn
, &nid
) {
145 if (pfn
>= start_pfn
&& pfn
< end_pfn
) {
147 node_ar
->start_pfn
= start_pfn
;
148 node_ar
->end_pfn
= end_pfn
;
154 static void map_cpu_to_node(int cpu
, int node
)
156 numa_cpu_lookup_table
[cpu
] = node
;
158 dbg("adding cpu %d to node %d\n", cpu
, node
);
160 if (!(cpumask_test_cpu(cpu
, node_to_cpumask_map
[node
])))
161 cpumask_set_cpu(cpu
, node_to_cpumask_map
[node
]);
164 #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
165 static void unmap_cpu_from_node(unsigned long cpu
)
167 int node
= numa_cpu_lookup_table
[cpu
];
169 dbg("removing cpu %lu from node %d\n", cpu
, node
);
171 if (cpumask_test_cpu(cpu
, node_to_cpumask_map
[node
])) {
172 cpumask_clear_cpu(cpu
, node_to_cpumask_map
[node
]);
174 printk(KERN_ERR
"WARNING: cpu %lu not found in node %d\n",
178 #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
180 /* must hold reference to node during call */
181 static const int *of_get_associativity(struct device_node
*dev
)
183 return of_get_property(dev
, "ibm,associativity", NULL
);
187 * Returns the property linux,drconf-usable-memory if
188 * it exists (the property exists only in kexec/kdump kernels,
189 * added by kexec-tools)
191 static const u32
*of_get_usable_memory(struct device_node
*memory
)
195 prop
= of_get_property(memory
, "linux,drconf-usable-memory", &len
);
196 if (!prop
|| len
< sizeof(unsigned int))
201 int __node_distance(int a
, int b
)
204 int distance
= LOCAL_DISTANCE
;
207 return ((a
== b
) ? LOCAL_DISTANCE
: REMOTE_DISTANCE
);
209 for (i
= 0; i
< distance_ref_points_depth
; i
++) {
210 if (distance_lookup_table
[a
][i
] == distance_lookup_table
[b
][i
])
213 /* Double the distance for each NUMA level */
220 static void initialize_distance_lookup_table(int nid
,
221 const unsigned int *associativity
)
228 for (i
= 0; i
< distance_ref_points_depth
; i
++) {
229 distance_lookup_table
[nid
][i
] =
230 associativity
[distance_ref_points
[i
]];
234 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
237 static int associativity_to_nid(const unsigned int *associativity
)
241 if (min_common_depth
== -1)
244 if (associativity
[0] >= min_common_depth
)
245 nid
= associativity
[min_common_depth
];
247 /* POWER4 LPAR uses 0xffff as invalid node */
248 if (nid
== 0xffff || nid
>= MAX_NUMNODES
)
251 if (nid
> 0 && associativity
[0] >= distance_ref_points_depth
)
252 initialize_distance_lookup_table(nid
, associativity
);
258 /* Returns the nid associated with the given device tree node,
259 * or -1 if not found.
261 static int of_node_to_nid_single(struct device_node
*device
)
264 const unsigned int *tmp
;
266 tmp
= of_get_associativity(device
);
268 nid
= associativity_to_nid(tmp
);
272 /* Walk the device tree upwards, looking for an associativity id */
273 int of_node_to_nid(struct device_node
*device
)
275 struct device_node
*tmp
;
280 nid
= of_node_to_nid_single(device
);
285 device
= of_get_parent(tmp
);
292 EXPORT_SYMBOL_GPL(of_node_to_nid
);
294 static int __init
find_min_common_depth(void)
297 struct device_node
*root
;
299 if (firmware_has_feature(FW_FEATURE_OPAL
))
300 root
= of_find_node_by_path("/ibm,opal");
302 root
= of_find_node_by_path("/rtas");
304 root
= of_find_node_by_path("/");
307 * This property is a set of 32-bit integers, each representing
308 * an index into the ibm,associativity nodes.
310 * With form 0 affinity the first integer is for an SMP configuration
311 * (should be all 0's) and the second is for a normal NUMA
312 * configuration. We have only one level of NUMA.
314 * With form 1 affinity the first integer is the most significant
315 * NUMA boundary and the following are progressively less significant
316 * boundaries. There can be more than one level of NUMA.
318 distance_ref_points
= of_get_property(root
,
319 "ibm,associativity-reference-points",
320 &distance_ref_points_depth
);
322 if (!distance_ref_points
) {
323 dbg("NUMA: ibm,associativity-reference-points not found.\n");
327 distance_ref_points_depth
/= sizeof(int);
329 if (firmware_has_feature(FW_FEATURE_OPAL
) ||
330 firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY
)) {
331 dbg("Using form 1 affinity\n");
335 if (form1_affinity
) {
336 depth
= distance_ref_points
[0];
338 if (distance_ref_points_depth
< 2) {
339 printk(KERN_WARNING
"NUMA: "
340 "short ibm,associativity-reference-points\n");
344 depth
= distance_ref_points
[1];
348 * Warn and cap if the hardware supports more than
349 * MAX_DISTANCE_REF_POINTS domains.
351 if (distance_ref_points_depth
> MAX_DISTANCE_REF_POINTS
) {
352 printk(KERN_WARNING
"NUMA: distance array capped at "
353 "%d entries\n", MAX_DISTANCE_REF_POINTS
);
354 distance_ref_points_depth
= MAX_DISTANCE_REF_POINTS
;
365 static void __init
get_n_mem_cells(int *n_addr_cells
, int *n_size_cells
)
367 struct device_node
*memory
= NULL
;
369 memory
= of_find_node_by_type(memory
, "memory");
371 panic("numa.c: No memory nodes found!");
373 *n_addr_cells
= of_n_addr_cells(memory
);
374 *n_size_cells
= of_n_size_cells(memory
);
378 static unsigned long read_n_cells(int n
, const unsigned int **buf
)
380 unsigned long result
= 0;
383 result
= (result
<< 32) | **buf
;
390 * Read the next memblock list entry from the ibm,dynamic-memory property
391 * and return the information in the provided of_drconf_cell structure.
393 static void read_drconf_cell(struct of_drconf_cell
*drmem
, const u32
**cellp
)
397 drmem
->base_addr
= read_n_cells(n_mem_addr_cells
, cellp
);
400 drmem
->drc_index
= cp
[0];
401 drmem
->reserved
= cp
[1];
402 drmem
->aa_index
= cp
[2];
403 drmem
->flags
= cp
[3];
409 * Retrieve and validate the ibm,dynamic-memory property of the device tree.
411 * The layout of the ibm,dynamic-memory property is a number N of memblock
412 * list entries followed by N memblock list entries. Each memblock list entry
413 * contains information as laid out in the of_drconf_cell struct above.
415 static int of_get_drconf_memory(struct device_node
*memory
, const u32
**dm
)
420 prop
= of_get_property(memory
, "ibm,dynamic-memory", &len
);
421 if (!prop
|| len
< sizeof(unsigned int))
426 /* Now that we know the number of entries, revalidate the size
427 * of the property read in to ensure we have everything
429 if (len
< (entries
* (n_mem_addr_cells
+ 4) + 1) * sizeof(unsigned int))
437 * Retrieve and validate the ibm,lmb-size property for drconf memory
438 * from the device tree.
440 static u64
of_get_lmb_size(struct device_node
*memory
)
445 prop
= of_get_property(memory
, "ibm,lmb-size", &len
);
446 if (!prop
|| len
< sizeof(unsigned int))
449 return read_n_cells(n_mem_size_cells
, &prop
);
452 struct assoc_arrays
{
459 * Retrieve and validate the list of associativity arrays for drconf
460 * memory from the ibm,associativity-lookup-arrays property of the
463 * The layout of the ibm,associativity-lookup-arrays property is a number N
464 * indicating the number of associativity arrays, followed by a number M
465 * indicating the size of each associativity array, followed by a list
466 * of N associativity arrays.
468 static int of_get_assoc_arrays(struct device_node
*memory
,
469 struct assoc_arrays
*aa
)
474 prop
= of_get_property(memory
, "ibm,associativity-lookup-arrays", &len
);
475 if (!prop
|| len
< 2 * sizeof(unsigned int))
478 aa
->n_arrays
= *prop
++;
479 aa
->array_sz
= *prop
++;
481 /* Now that we know the number of arrays and size of each array,
482 * revalidate the size of the property read in.
484 if (len
< (aa
->n_arrays
* aa
->array_sz
+ 2) * sizeof(unsigned int))
492 * This is like of_node_to_nid_single() for memory represented in the
493 * ibm,dynamic-reconfiguration-memory node.
495 static int of_drconf_to_nid_single(struct of_drconf_cell
*drmem
,
496 struct assoc_arrays
*aa
)
499 int nid
= default_nid
;
502 if (min_common_depth
> 0 && min_common_depth
<= aa
->array_sz
&&
503 !(drmem
->flags
& DRCONF_MEM_AI_INVALID
) &&
504 drmem
->aa_index
< aa
->n_arrays
) {
505 index
= drmem
->aa_index
* aa
->array_sz
+ min_common_depth
- 1;
506 nid
= aa
->arrays
[index
];
508 if (nid
== 0xffff || nid
>= MAX_NUMNODES
)
516 * Figure out to which domain a cpu belongs and stick it there.
517 * Return the id of the domain used.
519 static int numa_setup_cpu(unsigned long lcpu
)
522 struct device_node
*cpu
= of_get_cpu_node(lcpu
, NULL
);
529 nid
= of_node_to_nid_single(cpu
);
531 if (nid
< 0 || !node_online(nid
))
532 nid
= first_online_node
;
534 map_cpu_to_node(lcpu
, nid
);
541 static int cpu_numa_callback(struct notifier_block
*nfb
, unsigned long action
,
544 unsigned long lcpu
= (unsigned long)hcpu
;
545 int ret
= NOTIFY_DONE
;
549 case CPU_UP_PREPARE_FROZEN
:
550 numa_setup_cpu(lcpu
);
553 #ifdef CONFIG_HOTPLUG_CPU
555 case CPU_DEAD_FROZEN
:
556 case CPU_UP_CANCELED
:
557 case CPU_UP_CANCELED_FROZEN
:
558 unmap_cpu_from_node(lcpu
);
567 * Check and possibly modify a memory region to enforce the memory limit.
569 * Returns the size the region should have to enforce the memory limit.
570 * This will either be the original value of size, a truncated value,
571 * or zero. If the returned value of size is 0 the region should be
572 * discarded as it lies wholly above the memory limit.
574 static unsigned long __init
numa_enforce_memory_limit(unsigned long start
,
578 * We use memblock_end_of_DRAM() in here instead of memory_limit because
579 * we've already adjusted it for the limit and it takes care of
580 * having memory holes below the limit. Also, in the case of
581 * iommu_is_off, memory_limit is not set but is implicitly enforced.
584 if (start
+ size
<= memblock_end_of_DRAM())
587 if (start
>= memblock_end_of_DRAM())
590 return memblock_end_of_DRAM() - start
;
594 * Reads the counter for a given entry in
595 * linux,drconf-usable-memory property
597 static inline int __init
read_usm_ranges(const u32
**usm
)
600 * For each lmb in ibm,dynamic-memory a corresponding
601 * entry in linux,drconf-usable-memory property contains
602 * a counter followed by that many (base, size) duple.
603 * read the counter from linux,drconf-usable-memory
605 return read_n_cells(n_mem_size_cells
, usm
);
609 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
610 * node. This assumes n_mem_{addr,size}_cells have been set.
612 static void __init
parse_drconf_memory(struct device_node
*memory
)
614 const u32
*uninitialized_var(dm
), *usm
;
615 unsigned int n
, rc
, ranges
, is_kexec_kdump
= 0;
616 unsigned long lmb_size
, base
, size
, sz
;
618 struct assoc_arrays aa
= { .arrays
= NULL
};
620 n
= of_get_drconf_memory(memory
, &dm
);
624 lmb_size
= of_get_lmb_size(memory
);
628 rc
= of_get_assoc_arrays(memory
, &aa
);
632 /* check if this is a kexec/kdump kernel */
633 usm
= of_get_usable_memory(memory
);
637 for (; n
!= 0; --n
) {
638 struct of_drconf_cell drmem
;
640 read_drconf_cell(&drmem
, &dm
);
642 /* skip this block if the reserved bit is set in flags (0x80)
643 or if the block is not assigned to this partition (0x8) */
644 if ((drmem
.flags
& DRCONF_MEM_RESERVED
)
645 || !(drmem
.flags
& DRCONF_MEM_ASSIGNED
))
648 base
= drmem
.base_addr
;
652 if (is_kexec_kdump
) {
653 ranges
= read_usm_ranges(&usm
);
654 if (!ranges
) /* there are no (base, size) duple */
658 if (is_kexec_kdump
) {
659 base
= read_n_cells(n_mem_addr_cells
, &usm
);
660 size
= read_n_cells(n_mem_size_cells
, &usm
);
662 nid
= of_drconf_to_nid_single(&drmem
, &aa
);
663 fake_numa_create_new_node(
664 ((base
+ size
) >> PAGE_SHIFT
),
666 node_set_online(nid
);
667 sz
= numa_enforce_memory_limit(base
, size
);
669 memblock_set_node(base
, sz
, nid
);
674 static int __init
parse_numa_properties(void)
676 struct device_node
*memory
;
680 if (numa_enabled
== 0) {
681 printk(KERN_WARNING
"NUMA disabled by user\n");
685 min_common_depth
= find_min_common_depth();
687 if (min_common_depth
< 0)
688 return min_common_depth
;
690 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth
);
693 * Even though we connect cpus to numa domains later in SMP
694 * init, we need to know the node ids now. This is because
695 * each node to be onlined must have NODE_DATA etc backing it.
697 for_each_present_cpu(i
) {
698 struct device_node
*cpu
;
701 cpu
= of_get_cpu_node(i
, NULL
);
703 nid
= of_node_to_nid_single(cpu
);
707 * Don't fall back to default_nid yet -- we will plug
708 * cpus into nodes once the memory scan has discovered
713 node_set_online(nid
);
716 get_n_mem_cells(&n_mem_addr_cells
, &n_mem_size_cells
);
718 for_each_node_by_type(memory
, "memory") {
723 const unsigned int *memcell_buf
;
726 memcell_buf
= of_get_property(memory
,
727 "linux,usable-memory", &len
);
728 if (!memcell_buf
|| len
<= 0)
729 memcell_buf
= of_get_property(memory
, "reg", &len
);
730 if (!memcell_buf
|| len
<= 0)
734 ranges
= (len
>> 2) / (n_mem_addr_cells
+ n_mem_size_cells
);
736 /* these are order-sensitive, and modify the buffer pointer */
737 start
= read_n_cells(n_mem_addr_cells
, &memcell_buf
);
738 size
= read_n_cells(n_mem_size_cells
, &memcell_buf
);
741 * Assumption: either all memory nodes or none will
742 * have associativity properties. If none, then
743 * everything goes to default_nid.
745 nid
= of_node_to_nid_single(memory
);
749 fake_numa_create_new_node(((start
+ size
) >> PAGE_SHIFT
), &nid
);
750 node_set_online(nid
);
752 if (!(size
= numa_enforce_memory_limit(start
, size
))) {
759 memblock_set_node(start
, size
, nid
);
766 * Now do the same thing for each MEMBLOCK listed in the
767 * ibm,dynamic-memory property in the
768 * ibm,dynamic-reconfiguration-memory node.
770 memory
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
772 parse_drconf_memory(memory
);
777 static void __init
setup_nonnuma(void)
779 unsigned long top_of_ram
= memblock_end_of_DRAM();
780 unsigned long total_ram
= memblock_phys_mem_size();
781 unsigned long start_pfn
, end_pfn
;
782 unsigned int nid
= 0;
783 struct memblock_region
*reg
;
785 printk(KERN_DEBUG
"Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
786 top_of_ram
, total_ram
);
787 printk(KERN_DEBUG
"Memory hole size: %ldMB\n",
788 (top_of_ram
- total_ram
) >> 20);
790 for_each_memblock(memory
, reg
) {
791 start_pfn
= memblock_region_memory_base_pfn(reg
);
792 end_pfn
= memblock_region_memory_end_pfn(reg
);
794 fake_numa_create_new_node(end_pfn
, &nid
);
795 memblock_set_node(PFN_PHYS(start_pfn
),
796 PFN_PHYS(end_pfn
- start_pfn
), nid
);
797 node_set_online(nid
);
801 void __init
dump_numa_cpu_topology(void)
804 unsigned int cpu
, count
;
806 if (min_common_depth
== -1 || !numa_enabled
)
809 for_each_online_node(node
) {
810 printk(KERN_DEBUG
"Node %d CPUs:", node
);
814 * If we used a CPU iterator here we would miss printing
815 * the holes in the cpumap.
817 for (cpu
= 0; cpu
< nr_cpu_ids
; cpu
++) {
818 if (cpumask_test_cpu(cpu
,
819 node_to_cpumask_map
[node
])) {
825 printk("-%u", cpu
- 1);
831 printk("-%u", nr_cpu_ids
- 1);
836 static void __init
dump_numa_memory_topology(void)
841 if (min_common_depth
== -1 || !numa_enabled
)
844 for_each_online_node(node
) {
847 printk(KERN_DEBUG
"Node %d Memory:", node
);
851 for (i
= 0; i
< memblock_end_of_DRAM();
852 i
+= (1 << SECTION_SIZE_BITS
)) {
853 if (early_pfn_to_nid(i
>> PAGE_SHIFT
) == node
) {
871 * Allocate some memory, satisfying the memblock or bootmem allocator where
872 * required. nid is the preferred node and end is the physical address of
873 * the highest address in the node.
875 * Returns the virtual address of the memory.
877 static void __init
*careful_zallocation(int nid
, unsigned long size
,
879 unsigned long end_pfn
)
883 unsigned long ret_paddr
;
885 ret_paddr
= __memblock_alloc_base(size
, align
, end_pfn
<< PAGE_SHIFT
);
887 /* retry over all memory */
889 ret_paddr
= __memblock_alloc_base(size
, align
, memblock_end_of_DRAM());
892 panic("numa.c: cannot allocate %lu bytes for node %d",
895 ret
= __va(ret_paddr
);
898 * We initialize the nodes in numeric order: 0, 1, 2...
899 * and hand over control from the MEMBLOCK allocator to the
900 * bootmem allocator. If this function is called for
901 * node 5, then we know that all nodes <5 are using the
902 * bootmem allocator instead of the MEMBLOCK allocator.
904 * So, check the nid from which this allocation came
905 * and double check to see if we need to use bootmem
906 * instead of the MEMBLOCK. We don't free the MEMBLOCK memory
907 * since it would be useless.
909 new_nid
= early_pfn_to_nid(ret_paddr
>> PAGE_SHIFT
);
911 ret
= __alloc_bootmem_node(NODE_DATA(new_nid
),
914 dbg("alloc_bootmem %p %lx\n", ret
, size
);
917 memset(ret
, 0, size
);
921 static struct notifier_block ppc64_numa_nb
= {
922 .notifier_call
= cpu_numa_callback
,
923 .priority
= 1 /* Must run before sched domains notifier. */
926 static void __init
mark_reserved_regions_for_nid(int nid
)
928 struct pglist_data
*node
= NODE_DATA(nid
);
929 struct memblock_region
*reg
;
931 for_each_memblock(reserved
, reg
) {
932 unsigned long physbase
= reg
->base
;
933 unsigned long size
= reg
->size
;
934 unsigned long start_pfn
= physbase
>> PAGE_SHIFT
;
935 unsigned long end_pfn
= PFN_UP(physbase
+ size
);
936 struct node_active_region node_ar
;
937 unsigned long node_end_pfn
= node
->node_start_pfn
+
938 node
->node_spanned_pages
;
941 * Check to make sure that this memblock.reserved area is
942 * within the bounds of the node that we care about.
943 * Checking the nid of the start and end points is not
944 * sufficient because the reserved area could span the
947 if (end_pfn
<= node
->node_start_pfn
||
948 start_pfn
>= node_end_pfn
)
951 get_node_active_region(start_pfn
, &node_ar
);
952 while (start_pfn
< end_pfn
&&
953 node_ar
.start_pfn
< node_ar
.end_pfn
) {
954 unsigned long reserve_size
= size
;
956 * if reserved region extends past active region
957 * then trim size to active region
959 if (end_pfn
> node_ar
.end_pfn
)
960 reserve_size
= (node_ar
.end_pfn
<< PAGE_SHIFT
)
963 * Only worry about *this* node, others may not
964 * yet have valid NODE_DATA().
966 if (node_ar
.nid
== nid
) {
967 dbg("reserve_bootmem %lx %lx nid=%d\n",
968 physbase
, reserve_size
, node_ar
.nid
);
969 reserve_bootmem_node(NODE_DATA(node_ar
.nid
),
970 physbase
, reserve_size
,
974 * if reserved region is contained in the active region
977 if (end_pfn
<= node_ar
.end_pfn
)
981 * reserved region extends past the active region
982 * get next active region that contains this
985 start_pfn
= node_ar
.end_pfn
;
986 physbase
= start_pfn
<< PAGE_SHIFT
;
987 size
= size
- reserve_size
;
988 get_node_active_region(start_pfn
, &node_ar
);
994 void __init
do_init_bootmem(void)
999 max_low_pfn
= memblock_end_of_DRAM() >> PAGE_SHIFT
;
1000 max_pfn
= max_low_pfn
;
1002 if (parse_numa_properties())
1005 dump_numa_memory_topology();
1007 for_each_online_node(nid
) {
1008 unsigned long start_pfn
, end_pfn
;
1009 void *bootmem_vaddr
;
1010 unsigned long bootmap_pages
;
1012 get_pfn_range_for_nid(nid
, &start_pfn
, &end_pfn
);
1015 * Allocate the node structure node local if possible
1017 * Be careful moving this around, as it relies on all
1018 * previous nodes' bootmem to be initialized and have
1019 * all reserved areas marked.
1021 NODE_DATA(nid
) = careful_zallocation(nid
,
1022 sizeof(struct pglist_data
),
1023 SMP_CACHE_BYTES
, end_pfn
);
1025 dbg("node %d\n", nid
);
1026 dbg("NODE_DATA() = %p\n", NODE_DATA(nid
));
1028 NODE_DATA(nid
)->bdata
= &bootmem_node_data
[nid
];
1029 NODE_DATA(nid
)->node_start_pfn
= start_pfn
;
1030 NODE_DATA(nid
)->node_spanned_pages
= end_pfn
- start_pfn
;
1032 if (NODE_DATA(nid
)->node_spanned_pages
== 0)
1035 dbg("start_paddr = %lx\n", start_pfn
<< PAGE_SHIFT
);
1036 dbg("end_paddr = %lx\n", end_pfn
<< PAGE_SHIFT
);
1038 bootmap_pages
= bootmem_bootmap_pages(end_pfn
- start_pfn
);
1039 bootmem_vaddr
= careful_zallocation(nid
,
1040 bootmap_pages
<< PAGE_SHIFT
,
1041 PAGE_SIZE
, end_pfn
);
1043 dbg("bootmap_vaddr = %p\n", bootmem_vaddr
);
1045 init_bootmem_node(NODE_DATA(nid
),
1046 __pa(bootmem_vaddr
) >> PAGE_SHIFT
,
1047 start_pfn
, end_pfn
);
1049 free_bootmem_with_active_regions(nid
, end_pfn
);
1051 * Be very careful about moving this around. Future
1052 * calls to careful_zallocation() depend on this getting
1055 mark_reserved_regions_for_nid(nid
);
1056 sparse_memory_present_with_active_regions(nid
);
1059 init_bootmem_done
= 1;
1062 * Now bootmem is initialised we can create the node to cpumask
1063 * lookup tables and setup the cpu callback to populate them.
1065 setup_node_to_cpumask_map();
1067 register_cpu_notifier(&ppc64_numa_nb
);
1068 cpu_numa_callback(&ppc64_numa_nb
, CPU_UP_PREPARE
,
1069 (void *)(unsigned long)boot_cpuid
);
1072 void __init
paging_init(void)
1074 unsigned long max_zone_pfns
[MAX_NR_ZONES
];
1075 memset(max_zone_pfns
, 0, sizeof(max_zone_pfns
));
1076 max_zone_pfns
[ZONE_DMA
] = memblock_end_of_DRAM() >> PAGE_SHIFT
;
1077 free_area_init_nodes(max_zone_pfns
);
1080 static int __init
early_numa(char *p
)
1085 if (strstr(p
, "off"))
1088 if (strstr(p
, "debug"))
1091 p
= strstr(p
, "fake=");
1093 cmdline
= p
+ strlen("fake=");
1097 early_param("numa", early_numa
);
1099 #ifdef CONFIG_MEMORY_HOTPLUG
1101 * Find the node associated with a hot added memory section for
1102 * memory represented in the device tree by the property
1103 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1105 static int hot_add_drconf_scn_to_nid(struct device_node
*memory
,
1106 unsigned long scn_addr
)
1109 unsigned int drconf_cell_cnt
, rc
;
1110 unsigned long lmb_size
;
1111 struct assoc_arrays aa
;
1114 drconf_cell_cnt
= of_get_drconf_memory(memory
, &dm
);
1115 if (!drconf_cell_cnt
)
1118 lmb_size
= of_get_lmb_size(memory
);
1122 rc
= of_get_assoc_arrays(memory
, &aa
);
1126 for (; drconf_cell_cnt
!= 0; --drconf_cell_cnt
) {
1127 struct of_drconf_cell drmem
;
1129 read_drconf_cell(&drmem
, &dm
);
1131 /* skip this block if it is reserved or not assigned to
1133 if ((drmem
.flags
& DRCONF_MEM_RESERVED
)
1134 || !(drmem
.flags
& DRCONF_MEM_ASSIGNED
))
1137 if ((scn_addr
< drmem
.base_addr
)
1138 || (scn_addr
>= (drmem
.base_addr
+ lmb_size
)))
1141 nid
= of_drconf_to_nid_single(&drmem
, &aa
);
1149 * Find the node associated with a hot added memory section for memory
1150 * represented in the device tree as a node (i.e. memory@XXXX) for
1153 int hot_add_node_scn_to_nid(unsigned long scn_addr
)
1155 struct device_node
*memory
;
1158 for_each_node_by_type(memory
, "memory") {
1159 unsigned long start
, size
;
1161 const unsigned int *memcell_buf
;
1164 memcell_buf
= of_get_property(memory
, "reg", &len
);
1165 if (!memcell_buf
|| len
<= 0)
1168 /* ranges in cell */
1169 ranges
= (len
>> 2) / (n_mem_addr_cells
+ n_mem_size_cells
);
1172 start
= read_n_cells(n_mem_addr_cells
, &memcell_buf
);
1173 size
= read_n_cells(n_mem_size_cells
, &memcell_buf
);
1175 if ((scn_addr
< start
) || (scn_addr
>= (start
+ size
)))
1178 nid
= of_node_to_nid_single(memory
);
1186 of_node_put(memory
);
1192 * Find the node associated with a hot added memory section. Section
1193 * corresponds to a SPARSEMEM section, not an MEMBLOCK. It is assumed that
1194 * sections are fully contained within a single MEMBLOCK.
1196 int hot_add_scn_to_nid(unsigned long scn_addr
)
1198 struct device_node
*memory
= NULL
;
1201 if (!numa_enabled
|| (min_common_depth
< 0))
1202 return first_online_node
;
1204 memory
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1206 nid
= hot_add_drconf_scn_to_nid(memory
, scn_addr
);
1207 of_node_put(memory
);
1209 nid
= hot_add_node_scn_to_nid(scn_addr
);
1212 if (nid
< 0 || !node_online(nid
))
1213 nid
= first_online_node
;
1215 if (NODE_DATA(nid
)->node_spanned_pages
)
1218 for_each_online_node(nid
) {
1219 if (NODE_DATA(nid
)->node_spanned_pages
) {
1229 static u64
hot_add_drconf_memory_max(void)
1231 struct device_node
*memory
= NULL
;
1232 unsigned int drconf_cell_cnt
= 0;
1236 memory
= of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1238 drconf_cell_cnt
= of_get_drconf_memory(memory
, &dm
);
1239 lmb_size
= of_get_lmb_size(memory
);
1240 of_node_put(memory
);
1242 return lmb_size
* drconf_cell_cnt
;
1246 * memory_hotplug_max - return max address of memory that may be added
1248 * This is currently only used on systems that support drconfig memory
1251 u64
memory_hotplug_max(void)
1253 return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
1255 #endif /* CONFIG_MEMORY_HOTPLUG */
1257 /* Virtual Processor Home Node (VPHN) support */
1258 #ifdef CONFIG_PPC_SPLPAR
1259 struct topology_update_data
{
1260 struct topology_update_data
*next
;
1266 static u8 vphn_cpu_change_counts
[NR_CPUS
][MAX_DISTANCE_REF_POINTS
];
1267 static cpumask_t cpu_associativity_changes_mask
;
1268 static int vphn_enabled
;
1269 static int prrn_enabled
;
1270 static void reset_topology_timer(void);
1273 * Store the current values of the associativity change counters in the
1276 static void setup_cpu_associativity_change_counters(void)
1280 /* The VPHN feature supports a maximum of 8 reference points */
1281 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS
> 8);
1283 for_each_possible_cpu(cpu
) {
1285 u8
*counts
= vphn_cpu_change_counts
[cpu
];
1286 volatile u8
*hypervisor_counts
= lppaca
[cpu
].vphn_assoc_counts
;
1288 for (i
= 0; i
< distance_ref_points_depth
; i
++)
1289 counts
[i
] = hypervisor_counts
[i
];
1294 * The hypervisor maintains a set of 8 associativity change counters in
1295 * the VPA of each cpu that correspond to the associativity levels in the
1296 * ibm,associativity-reference-points property. When an associativity
1297 * level changes, the corresponding counter is incremented.
1299 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
1300 * node associativity levels have changed.
1302 * Returns the number of cpus with unhandled associativity changes.
1304 static int update_cpu_associativity_changes_mask(void)
1307 cpumask_t
*changes
= &cpu_associativity_changes_mask
;
1309 for_each_possible_cpu(cpu
) {
1311 u8
*counts
= vphn_cpu_change_counts
[cpu
];
1312 volatile u8
*hypervisor_counts
= lppaca
[cpu
].vphn_assoc_counts
;
1314 for (i
= 0; i
< distance_ref_points_depth
; i
++) {
1315 if (hypervisor_counts
[i
] != counts
[i
]) {
1316 counts
[i
] = hypervisor_counts
[i
];
1321 cpumask_set_cpu(cpu
, changes
);
1325 return cpumask_weight(changes
);
1329 * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
1330 * the complete property we have to add the length in the first cell.
1332 #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1335 * Convert the associativity domain numbers returned from the hypervisor
1336 * to the sequence they would appear in the ibm,associativity property.
1338 static int vphn_unpack_associativity(const long *packed
, unsigned int *unpacked
)
1340 int i
, nr_assoc_doms
= 0;
1341 const u16
*field
= (const u16
*) packed
;
1343 #define VPHN_FIELD_UNUSED (0xffff)
1344 #define VPHN_FIELD_MSB (0x8000)
1345 #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
1347 for (i
= 1; i
< VPHN_ASSOC_BUFSIZE
; i
++) {
1348 if (*field
== VPHN_FIELD_UNUSED
) {
1349 /* All significant fields processed, and remaining
1350 * fields contain the reserved value of all 1's.
1353 unpacked
[i
] = *((u32
*)field
);
1355 } else if (*field
& VPHN_FIELD_MSB
) {
1356 /* Data is in the lower 15 bits of this field */
1357 unpacked
[i
] = *field
& VPHN_FIELD_MASK
;
1361 /* Data is in the lower 15 bits of this field
1362 * concatenated with the next 16 bit field
1364 unpacked
[i
] = *((u32
*)field
);
1370 /* The first cell contains the length of the property */
1371 unpacked
[0] = nr_assoc_doms
;
1373 return nr_assoc_doms
;
1377 * Retrieve the new associativity information for a virtual processor's
1380 static long hcall_vphn(unsigned long cpu
, unsigned int *associativity
)
1383 long retbuf
[PLPAR_HCALL9_BUFSIZE
] = {0};
1385 int hwcpu
= get_hard_smp_processor_id(cpu
);
1387 rc
= plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY
, retbuf
, flags
, hwcpu
);
1388 vphn_unpack_associativity(retbuf
, associativity
);
1393 static long vphn_get_associativity(unsigned long cpu
,
1394 unsigned int *associativity
)
1398 rc
= hcall_vphn(cpu
, associativity
);
1403 "VPHN is not supported. Disabling polling...\n");
1404 stop_topology_update();
1408 "hcall_vphn() experienced a hardware fault "
1409 "preventing VPHN. Disabling polling...\n");
1410 stop_topology_update();
1417 * Update the CPU maps and sysfs entries for a single CPU when its NUMA
1418 * characteristics change. This function doesn't perform any locking and is
1419 * only safe to call from stop_machine().
1421 static int update_cpu_topology(void *data
)
1423 struct topology_update_data
*update
;
1431 for (update
= data
; update
; update
= update
->next
) {
1432 if (cpu
!= update
->cpu
)
1435 unmap_cpu_from_node(update
->cpu
);
1436 map_cpu_to_node(update
->cpu
, update
->new_nid
);
1444 * Update the node maps and sysfs entries for each cpu whose home node
1445 * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1447 int arch_update_cpu_topology(void)
1449 unsigned int cpu
, changed
= 0;
1450 struct topology_update_data
*updates
, *ud
;
1451 unsigned int associativity
[VPHN_ASSOC_BUFSIZE
] = {0};
1452 cpumask_t updated_cpus
;
1456 weight
= cpumask_weight(&cpu_associativity_changes_mask
);
1460 updates
= kzalloc(weight
* (sizeof(*updates
)), GFP_KERNEL
);
1464 cpumask_clear(&updated_cpus
);
1466 for_each_cpu(cpu
, &cpu_associativity_changes_mask
) {
1469 vphn_get_associativity(cpu
, associativity
);
1470 ud
->new_nid
= associativity_to_nid(associativity
);
1472 if (ud
->new_nid
< 0 || !node_online(ud
->new_nid
))
1473 ud
->new_nid
= first_online_node
;
1475 ud
->old_nid
= numa_cpu_lookup_table
[cpu
];
1476 cpumask_set_cpu(cpu
, &updated_cpus
);
1479 ud
->next
= &updates
[i
];
1482 stop_machine(update_cpu_topology
, &updates
[0], &updated_cpus
);
1484 for (ud
= &updates
[0]; ud
; ud
= ud
->next
) {
1485 unregister_cpu_under_node(ud
->cpu
, ud
->old_nid
);
1486 register_cpu_under_node(ud
->cpu
, ud
->new_nid
);
1488 dev
= get_cpu_device(ud
->cpu
);
1490 kobject_uevent(&dev
->kobj
, KOBJ_CHANGE
);
1491 cpumask_clear_cpu(ud
->cpu
, &cpu_associativity_changes_mask
);
1499 static void topology_work_fn(struct work_struct
*work
)
1501 rebuild_sched_domains();
1503 static DECLARE_WORK(topology_work
, topology_work_fn
);
1505 void topology_schedule_update(void)
1507 schedule_work(&topology_work
);
1510 static void topology_timer_fn(unsigned long ignored
)
1512 if (prrn_enabled
&& cpumask_weight(&cpu_associativity_changes_mask
))
1513 topology_schedule_update();
1514 else if (vphn_enabled
) {
1515 if (update_cpu_associativity_changes_mask() > 0)
1516 topology_schedule_update();
1517 reset_topology_timer();
1520 static struct timer_list topology_timer
=
1521 TIMER_INITIALIZER(topology_timer_fn
, 0, 0);
1523 static void reset_topology_timer(void)
1525 topology_timer
.data
= 0;
1526 topology_timer
.expires
= jiffies
+ 60 * HZ
;
1527 mod_timer(&topology_timer
, topology_timer
.expires
);
1532 static void stage_topology_update(int core_id
)
1534 cpumask_or(&cpu_associativity_changes_mask
,
1535 &cpu_associativity_changes_mask
, cpu_sibling_mask(core_id
));
1536 reset_topology_timer();
1539 static int dt_update_callback(struct notifier_block
*nb
,
1540 unsigned long action
, void *data
)
1542 struct of_prop_reconfig
*update
;
1543 int rc
= NOTIFY_DONE
;
1546 case OF_RECONFIG_UPDATE_PROPERTY
:
1547 update
= (struct of_prop_reconfig
*)data
;
1548 if (!of_prop_cmp(update
->dn
->type
, "cpu") &&
1549 !of_prop_cmp(update
->prop
->name
, "ibm,associativity")) {
1551 of_property_read_u32(update
->dn
, "reg", &core_id
);
1552 stage_topology_update(core_id
);
1561 static struct notifier_block dt_update_nb
= {
1562 .notifier_call
= dt_update_callback
,
1568 * Start polling for associativity changes.
1570 int start_topology_update(void)
1574 if (firmware_has_feature(FW_FEATURE_PRRN
)) {
1575 if (!prrn_enabled
) {
1579 rc
= of_reconfig_notifier_register(&dt_update_nb
);
1582 } else if (firmware_has_feature(FW_FEATURE_VPHN
) &&
1583 get_lppaca()->shared_proc
) {
1584 if (!vphn_enabled
) {
1587 setup_cpu_associativity_change_counters();
1588 init_timer_deferrable(&topology_timer
);
1589 reset_topology_timer();
1597 * Disable polling for VPHN associativity changes.
1599 int stop_topology_update(void)
1606 rc
= of_reconfig_notifier_unregister(&dt_update_nb
);
1608 } else if (vphn_enabled
) {
1610 rc
= del_timer_sync(&topology_timer
);
1616 int prrn_is_enabled(void)
1618 return prrn_enabled
;
1621 static int topology_read(struct seq_file
*file
, void *v
)
1623 if (vphn_enabled
|| prrn_enabled
)
1624 seq_puts(file
, "on\n");
1626 seq_puts(file
, "off\n");
1631 static int topology_open(struct inode
*inode
, struct file
*file
)
1633 return single_open(file
, topology_read
, NULL
);
1636 static ssize_t
topology_write(struct file
*file
, const char __user
*buf
,
1637 size_t count
, loff_t
*off
)
1639 char kbuf
[4]; /* "on" or "off" plus null. */
1642 read_len
= count
< 3 ? count
: 3;
1643 if (copy_from_user(kbuf
, buf
, read_len
))
1646 kbuf
[read_len
] = '\0';
1648 if (!strncmp(kbuf
, "on", 2))
1649 start_topology_update();
1650 else if (!strncmp(kbuf
, "off", 3))
1651 stop_topology_update();
1658 static const struct file_operations topology_ops
= {
1660 .write
= topology_write
,
1661 .open
= topology_open
,
1662 .release
= single_release
1665 static int topology_update_init(void)
1667 start_topology_update();
1668 proc_create("powerpc/topology_updates", 644, NULL
, &topology_ops
);
1672 device_initcall(topology_update_init
);
1673 #endif /* CONFIG_PPC_SPLPAR */