#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
+#include <linux/oom.h>
#include <linux/notifier.h>
#include <linux/topology.h>
#include <linux/sysctl.h>
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
/*
- * MAX_ACTIVE_REGIONS determines the maxmimum number of distinct
+ * MAX_ACTIVE_REGIONS determines the maximum number of distinct
* ranges of memory (RAM) that may be registered with add_active_range().
* Ranges passed to add_active_range() will be merged if possible
* so the number of times add_active_range() can be called is
struct list_head *list, int order)
{
spin_lock(&zone->lock);
- zone->all_unreclaimable = 0;
+ zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
zone->pages_scanned = 0;
while (count--) {
struct page *page;
static void free_one_page(struct zone *zone, struct page *page, int order)
{
spin_lock(&zone->lock);
- zone->all_unreclaimable = 0;
+ zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
zone->pages_scanned = 0;
__free_one_page(page, zone, order);
spin_unlock(&zone->lock);
* skip over zones that are not allowed by the cpuset, or that have
* been recently (in last second) found to be nearly full. See further
* comments in mmzone.h. Reduces cache footprint of zonelist scans
- * that have to skip over alot of full or unallowed zones.
+ * that have to skip over a lot of full or unallowed zones.
*
* If the zonelist cache is present in the passed in zonelist, then
* returns a pointer to the allowed node mask (either the current
if (page)
goto got_pg;
} else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
+ if (!try_set_zone_oom(zonelist)) {
+ schedule_timeout_uninterruptible(1);
+ goto restart;
+ }
+
/*
* Go through the zonelist yet one more time, keep
* very high watermark here, this is only to catch
*/
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
- if (page)
+ if (page) {
+ clear_zonelist_oom(zonelist);
goto got_pg;
+ }
/* The OOM killer will not help higher order allocs so fail */
- if (order > PAGE_ALLOC_COSTLY_ORDER)
+ if (order > PAGE_ALLOC_COSTLY_ORDER) {
+ clear_zonelist_oom(zonelist);
goto nopage;
+ }
out_of_memory(zonelist, gfp_mask, order);
+ clear_zonelist_oom(zonelist);
goto restart;
}
K(zone_page_state(zone, NR_INACTIVE)),
K(zone->present_pages),
zone->pages_scanned,
- (zone->all_unreclaimable ? "yes" : "no")
+ (zone_is_all_unreclaimable(zone) ? "yes" : "no")
);
printk("lowmem_reserve[]:");
for (i = 0; i < MAX_NR_ZONES; i++)
__build_all_zonelists(NULL);
cpuset_init_current_mems_allowed();
} else {
- /* we have to stop all cpus to guaranntee there is no user
+ /* we have to stop all cpus to guarantee there is no user
of zonelist */
stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
/* cpuset refresh routine should be here */
/*
* Basic iterator support. Return the next active range of PFNs for a node
- * Note: nid == MAX_NUMNODES returns next region regardles of node
+ * Note: nid == MAX_NUMNODES returns next region regardless of node
*/
static int __meminit next_active_region_index_in_nid(int index, int nid)
{
zone->nr_scan_active = 0;
zone->nr_scan_inactive = 0;
zap_zone_vm_stats(zone);
- atomic_set(&zone->reclaim_in_progress, 0);
+ zone->flags = 0;
if (!size)
continue;