libceph: fix decoding of pgids
[deliverable/linux.git] / mm / bootmem.c
index f468185b3b28a517aaceb79b80e7f799df0dfc6f..b93376c39b61308fe2ef7de2466e83306c865cf9 100644 (file)
@@ -147,21 +147,21 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
 
 /*
  * free_bootmem_late - free bootmem pages directly to page allocator
- * @addr: starting address of the range
+ * @addr: starting physical address of the range
  * @size: size of the range in bytes
  *
  * This is only useful when the bootmem allocator has already been torn
  * down, but we are still initializing the system.  Pages are given directly
  * to the page allocator, no bootmem metadata is updated because it is gone.
  */
-void __init free_bootmem_late(unsigned long addr, unsigned long size)
+void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
 {
        unsigned long cursor, end;
 
-       kmemleak_free_part(__va(addr), size);
+       kmemleak_free_part(__va(physaddr), size);
 
-       cursor = PFN_UP(addr);
-       end = PFN_DOWN(addr + size);
+       cursor = PFN_UP(physaddr);
+       end = PFN_DOWN(physaddr + size);
 
        for (; cursor < end; cursor++) {
                __free_pages_bootmem(pfn_to_page(cursor), 0);
@@ -185,10 +185,23 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
 
        while (start < end) {
                unsigned long *map, idx, vec;
+               unsigned shift;
 
                map = bdata->node_bootmem_map;
                idx = start - bdata->node_min_pfn;
+               shift = idx & (BITS_PER_LONG - 1);
+               /*
+                * vec holds at most BITS_PER_LONG map bits,
+                * bit 0 corresponds to start.
+                */
                vec = ~map[idx / BITS_PER_LONG];
+
+               if (shift) {
+                       vec >>= shift;
+                       if (end - start >= BITS_PER_LONG)
+                               vec |= ~map[idx / BITS_PER_LONG + 1] <<
+                                       (BITS_PER_LONG - shift);
+               }
                /*
                 * If we have a properly aligned and fully unreserved
                 * BITS_PER_LONG block of pages in front of us, free
@@ -201,19 +214,18 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
                        count += BITS_PER_LONG;
                        start += BITS_PER_LONG;
                } else {
-                       unsigned long off = 0;
+                       unsigned long cur = start;
 
-                       vec >>= start & (BITS_PER_LONG - 1);
-                       while (vec) {
+                       start = ALIGN(start + 1, BITS_PER_LONG);
+                       while (vec && cur != start) {
                                if (vec & 1) {
-                                       page = pfn_to_page(start + off);
+                                       page = pfn_to_page(cur);
                                        __free_pages_bootmem(page, 0);
                                        count++;
                                }
                                vec >>= 1;
-                               off++;
+                               ++cur;
                        }
-                       start = ALIGN(start + 1, BITS_PER_LONG);
                }
        }
 
@@ -229,6 +241,22 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
        return count;
 }
 
+static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
+{
+       struct zone *z;
+
+       /*
+        * In free_area_init_core(), highmem zone's managed_pages is set to
+        * present_pages, and bootmem allocator doesn't allocate from highmem
+        * zones. So there's no need to recalculate managed_pages because all
+        * highmem pages will be managed by the buddy system. Here highmem
+        * zone also includes highmem movable zone.
+        */
+       for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
+               if (!is_highmem(z))
+                       z->managed_pages = 0;
+}
+
 /**
  * free_all_bootmem_node - release a node's free pages to the buddy allocator
  * @pgdat: node to be released
@@ -238,6 +266,7 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
 {
        register_page_bootmem_info_node(pgdat);
+       reset_node_lowmem_managed_pages(pgdat);
        return free_all_bootmem_core(pgdat->bdata);
 }
 
@@ -250,6 +279,10 @@ unsigned long __init free_all_bootmem(void)
 {
        unsigned long total_pages = 0;
        bootmem_data_t *bdata;
+       struct pglist_data *pgdat;
+
+       for_each_online_pgdat(pgdat)
+               reset_node_lowmem_managed_pages(pgdat);
 
        list_for_each_entry(bdata, &bdata_list, list)
                total_pages += free_all_bootmem_core(bdata);
@@ -377,21 +410,21 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
 
 /**
  * free_bootmem - mark a page range as usable
- * @addr: starting address of the range
+ * @addr: starting physical address of the range
  * @size: size of the range in bytes
  *
  * Partial pages will be considered reserved and left as they are.
  *
  * The range must be contiguous but may span node boundaries.
  */
-void __init free_bootmem(unsigned long addr, unsigned long size)
+void __init free_bootmem(unsigned long physaddr, unsigned long size)
 {
        unsigned long start, end;
 
-       kmemleak_free_part(__va(addr), size);
+       kmemleak_free_part(__va(physaddr), size);
 
-       start = PFN_UP(addr);
-       end = PFN_DOWN(addr + size);
+       start = PFN_UP(physaddr);
+       end = PFN_DOWN(physaddr + size);
 
        mark_bootmem(start, end, 0, 0);
 }
@@ -439,12 +472,6 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size,
        return mark_bootmem(start, end, 1, flags);
 }
 
-int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
-                                  int flags)
-{
-       return reserve_bootmem(phys, len, flags);
-}
-
 static unsigned long __init align_idx(struct bootmem_data *bdata,
                                      unsigned long idx, unsigned long step)
 {
@@ -575,27 +602,6 @@ find_block:
        return NULL;
 }
 
-static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
-                                       unsigned long size, unsigned long align,
-                                       unsigned long goal, unsigned long limit)
-{
-       if (WARN_ON_ONCE(slab_is_available()))
-               return kzalloc(size, GFP_NOWAIT);
-
-#ifdef CONFIG_HAVE_ARCH_BOOTMEM
-       {
-               bootmem_data_t *p_bdata;
-
-               p_bdata = bootmem_arch_preferred_node(bdata, size, align,
-                                                       goal, limit);
-               if (p_bdata)
-                       return alloc_bootmem_bdata(p_bdata, size, align,
-                                                       goal, limit);
-       }
-#endif
-       return NULL;
-}
-
 static void * __init alloc_bootmem_core(unsigned long size,
                                        unsigned long align,
                                        unsigned long goal,
@@ -604,9 +610,8 @@ static void * __init alloc_bootmem_core(unsigned long size,
        bootmem_data_t *bdata;
        void *region;
 
-       region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
-       if (region)
-               return region;
+       if (WARN_ON_ONCE(slab_is_available()))
+               return kzalloc(size, GFP_NOWAIT);
 
        list_for_each_entry(bdata, &bdata_list, list) {
                if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
@@ -704,11 +709,9 @@ void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
 {
        void *ptr;
 
+       if (WARN_ON_ONCE(slab_is_available()))
+               return kzalloc(size, GFP_NOWAIT);
 again:
-       ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size,
-                                          align, goal, limit);
-       if (ptr)
-               return ptr;
 
        /* do not panic in alloc_bootmem_bdata() */
        if (limit && goal + size > limit)
This page took 0.027108 seconds and 5 git commands to generate.