if (!memcell_buf || len <= 0)
continue;
- ranges = memory->n_addrs;
+ /* ranges in cell */
+ ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
new_range:
/* these are order-sensitive, and modify the buffer pointer */
start = read_n_cells(n_mem_addr_cells, &memcell_buf);
unsigned long end_pfn)
{
int new_nid;
- unsigned long ret = lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
+ unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
/* retry over all memory */
if (!ret)
- ret = lmb_alloc_base(size, align, lmb_end_of_DRAM());
+ ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
if (!ret)
panic("numa.c: cannot allocate %lu bytes on node %d",
int hot_add_scn_to_nid(unsigned long scn_addr)
{
struct device_node *memory = NULL;
+ nodemask_t nodes;
+ int numa_domain = 0;
if (!numa_enabled || (min_common_depth < 0))
- return 0;
+ return numa_domain;
while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
unsigned long start, size;
- int numa_domain, ranges;
+ int ranges;
unsigned int *memcell_buf;
unsigned int len;
if (!memcell_buf || len <= 0)
continue;
- ranges = memory->n_addrs; /* ranges in cell */
+ /* ranges in cell */
+ ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
ha_new_range:
start = read_n_cells(n_mem_addr_cells, &memcell_buf);
size = read_n_cells(n_mem_size_cells, &memcell_buf);
if ((scn_addr >= start) && (scn_addr < (start + size))) {
of_node_put(memory);
- return numa_domain;
+ goto got_numa_domain;
}
if (--ranges) /* process all ranges in cell */
goto ha_new_range;
}
-
BUG(); /* section address should be found above */
- return 0;
+
+ /* Temporary code to ensure that returned node is not empty */
+got_numa_domain:
+ nodes_setall(nodes);
+ while (NODE_DATA(numa_domain)->node_spanned_pages == 0) {
+ node_clear(numa_domain, nodes);
+ numa_domain = any_online_node(nodes);
+ }
+ return numa_domain;
}
#endif /* CONFIG_MEMORY_HOTPLUG */