NR_CHARGE_TYPE,
};
-/* only for here (for easy reading.) */
-#define PCGF_CACHE (1UL << PCG_CACHE)
-#define PCGF_USED (1UL << PCG_USED)
-#define PCGF_LOCK (1UL << PCG_LOCK)
-/* Not used, but added here for completeness */
-#define PCGF_ACCT (1UL << PCG_ACCT)
-
/* for encoding cft->private value on file */
#define _MEM (0)
#define _MEMSWAP (1)
}
static struct mem_cgroup_per_zone *
-page_cgroup_zoneinfo(struct page_cgroup *pc)
+page_cgroup_zoneinfo(struct mem_cgroup *mem, struct page *page)
{
- struct mem_cgroup *mem = pc->mem_cgroup;
- int nid = page_cgroup_nid(pc);
- int zid = page_cgroup_zid(pc);
-
- if (!mem)
- return NULL;
+ int nid = page_to_nid(page);
+ int zid = page_zonenum(page);
return mem_cgroup_zoneinfo(mem, nid, zid);
}
}
}
-static inline unsigned long mem_cgroup_get_excess(struct mem_cgroup *mem)
-{
- return res_counter_soft_limit_excess(&mem->res) >> PAGE_SHIFT;
-}
-
static struct mem_cgroup_per_zone *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
{
* We don't check PCG_USED bit. It's cleared when the "page" is finally
* removed from global LRU.
*/
- mz = page_cgroup_zoneinfo(pc);
+ mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
/* huge page split is done under lru_lock. so, we have no races. */
MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
if (mem_cgroup_is_root(pc->mem_cgroup))
mem_cgroup_del_lru_list(page, page_lru(page));
}
+/*
+ * Writeback is about to end against a page which has been marked for immediate
+ * reclaim. If it still appears to be reclaimable, move it to the tail of the
+ * inactive list.
+ */
+void mem_cgroup_rotate_reclaimable_page(struct page *page)
+{
+ struct mem_cgroup_per_zone *mz;
+ struct page_cgroup *pc;
+ enum lru_list lru = page_lru(page);
+
+ if (mem_cgroup_disabled())
+ return;
+
+ pc = lookup_page_cgroup(page);
+ /* unused or root page is not rotated. */
+ if (!PageCgroupUsed(pc))
+ return;
+ /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
+ smp_rmb();
+ if (mem_cgroup_is_root(pc->mem_cgroup))
+ return;
+ mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
+ list_move_tail(&pc->lru, &mz->lists[lru]);
+}
+
void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
{
struct mem_cgroup_per_zone *mz;
smp_rmb();
if (mem_cgroup_is_root(pc->mem_cgroup))
return;
- mz = page_cgroup_zoneinfo(pc);
+ mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
list_move(&pc->lru, &mz->lists[lru]);
}
return;
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
smp_rmb();
- mz = page_cgroup_zoneinfo(pc);
+ mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
/* huge page split is done under lru_lock. so, we have no races. */
MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
SetPageCgroupAcctLRU(pc);
return NULL;
/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
smp_rmb();
- mz = page_cgroup_zoneinfo(pc);
+ mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
if (!mz)
return NULL;
#define mem_cgroup_from_res_counter(counter, member) \
container_of(counter, struct mem_cgroup, member)
-static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
-{
- if (do_swap_account) {
- if (res_counter_check_under_limit(&mem->res) &&
- res_counter_check_under_limit(&mem->memsw))
- return true;
- } else
- if (res_counter_check_under_limit(&mem->res))
- return true;
- return false;
-}
-
/**
- * mem_cgroup_check_margin - check if the memory cgroup allows charging
- * @mem: memory cgroup to check
- * @bytes: the number of bytes the caller intends to charge
+ * mem_cgroup_margin - calculate chargeable space of a memory cgroup
+ * @mem: the memory cgroup
*
- * Returns a boolean value on whether @mem can be charged @bytes or
- * whether this would exceed the limit.
+ * Returns the maximum amount of memory @mem can be charged with, in
+ * bytes.
*/
-static bool mem_cgroup_check_margin(struct mem_cgroup *mem, unsigned long bytes)
+static unsigned long long mem_cgroup_margin(struct mem_cgroup *mem)
{
- if (!res_counter_check_margin(&mem->res, bytes))
- return false;
- if (do_swap_account && !res_counter_check_margin(&mem->memsw, bytes))
- return false;
- return true;
+ unsigned long long margin;
+
+ margin = res_counter_margin(&mem->res);
+ if (do_swap_account)
+ margin = min(margin, res_counter_margin(&mem->memsw));
+ return margin;
}
static unsigned int get_swappiness(struct mem_cgroup *memcg)
bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
- unsigned long excess = mem_cgroup_get_excess(root_mem);
+ unsigned long excess;
+
+ excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
/* If memsw_is_minimum==1, swap-out is of-no-use. */
if (root_mem->memsw_is_minimum)
return ret;
total += ret;
if (check_soft) {
- if (res_counter_check_under_soft_limit(&root_mem->res))
+ if (!res_counter_soft_limit_excess(&root_mem->res))
return total;
- } else if (mem_cgroup_check_under_limit(root_mem))
+ } else if (mem_cgroup_margin(root_mem))
return 1 + total;
}
return total;
ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
gfp_mask, flags);
- if (mem_cgroup_check_margin(mem_over_limit, csize))
+ if (mem_cgroup_margin(mem_over_limit) >= csize)
return CHARGE_RETRY;
/*
* Even though the limit is exceeded at this point, reclaim
{
int nr_pages = page_size >> PAGE_SHIFT;
- /* try_charge() can return NULL to *memcg, taking care of it. */
- if (!mem)
- return;
-
lock_page_cgroup(pc);
if (unlikely(PageCgroupUsed(pc))) {
unlock_page_cgroup(pc);
* We hold lru_lock, then, reduce counter directly.
*/
lru = page_lru(head);
- mz = page_cgroup_zoneinfo(head_pc);
+ mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head);
MEM_CGROUP_ZSTAT(mz, lru) -= 1;
}
tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
#endif
/**
- * __mem_cgroup_move_account - move account of the page
+ * mem_cgroup_move_account - move account of the page
* @pc: page_cgroup of the page.
* @from: mem_cgroup which the page is moved from.
* @to: mem_cgroup which the page is moved to. @from != @to.
* @uncharge: whether we should call uncharge and css_put against @from.
+ * @charge_size: number of bytes to charge (regular or huge page)
*
* The caller must confirm following.
* - page is not on LRU (isolate_page() is useful.)
- * - the pc is locked, used, and ->mem_cgroup points to @from.
+ * - compound_lock is held when charge_size > PAGE_SIZE
*
* This function doesn't do "charge" nor css_get to new cgroup. It should be
* done by a caller(__mem_cgroup_try_charge would be usefull). If @uncharge is
* true, this function does "uncharge" from old cgroup, but it doesn't if
* @uncharge is false, so a caller should do "uncharge".
*/
-
-static void __mem_cgroup_move_account(struct page_cgroup *pc,
- struct mem_cgroup *from, struct mem_cgroup *to, bool uncharge,
- int charge_size)
+static int mem_cgroup_move_account(struct page_cgroup *pc,
+ struct mem_cgroup *from, struct mem_cgroup *to,
+ bool uncharge, int charge_size)
{
int nr_pages = charge_size >> PAGE_SHIFT;
+ unsigned long flags;
+ int ret;
VM_BUG_ON(from == to);
VM_BUG_ON(PageLRU(pc->page));
- VM_BUG_ON(!page_is_cgroup_locked(pc));
- VM_BUG_ON(!PageCgroupUsed(pc));
- VM_BUG_ON(pc->mem_cgroup != from);
+ /*
+ * The page is isolated from LRU. So, collapse function
+ * will not handle this page. But page splitting can happen.
+ * Do this check under compound_page_lock(). The caller should
+ * hold it.
+ */
+ ret = -EBUSY;
+ if (charge_size > PAGE_SIZE && !PageTransHuge(pc->page))
+ goto out;
+
+ lock_page_cgroup(pc);
+
+ ret = -EINVAL;
+ if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
+ goto unlock;
+
+ move_lock_page_cgroup(pc, &flags);
if (PageCgroupFileMapped(pc)) {
/* Update mapped_file data for mem_cgroup */
* garanteed that "to" is never removed. So, we don't check rmdir
* status here.
*/
-}
-
-/*
- * check whether the @pc is valid for moving account and call
- * __mem_cgroup_move_account()
- */
-static int mem_cgroup_move_account(struct page_cgroup *pc,
- struct mem_cgroup *from, struct mem_cgroup *to,
- bool uncharge, int charge_size)
-{
- int ret = -EINVAL;
- unsigned long flags;
- /*
- * The page is isolated from LRU. So, collapse function
- * will not handle this page. But page splitting can happen.
- * Do this check under compound_page_lock(). The caller should
- * hold it.
- */
- if ((charge_size > PAGE_SIZE) && !PageTransHuge(pc->page))
- return -EBUSY;
-
- lock_page_cgroup(pc);
- if (PageCgroupUsed(pc) && pc->mem_cgroup == from) {
- move_lock_page_cgroup(pc, &flags);
- __mem_cgroup_move_account(pc, from, to, uncharge, charge_size);
- move_unlock_page_cgroup(pc, &flags);
- ret = 0;
- }
+ move_unlock_page_cgroup(pc, &flags);
+ ret = 0;
+unlock:
unlock_page_cgroup(pc);
/*
* check events
*/
memcg_check_events(to, pc->page);
memcg_check_events(from, pc->page);
+out:
return ret;
}
}
pc = lookup_page_cgroup(page);
- /* can happen at boot */
- if (unlikely(!pc))
- return 0;
- prefetchw(pc);
+ BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */
ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, oom, page_size);
if (ret || !mem)
/* shmem */
if (PageSwapCache(page)) {
- struct mem_cgroup *mem = NULL;
+ struct mem_cgroup *mem;
ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
if (!ret)
struct mem_cgroup *mem;
int ret;
+ *ptr = NULL;
+
if (mem_cgroup_disabled())
return 0;
* page belongs to.
*/
int mem_cgroup_prepare_migration(struct page *page,
- struct page *newpage, struct mem_cgroup **ptr)
+ struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask)
{
struct page_cgroup *pc;
struct mem_cgroup *mem = NULL;
enum charge_type ctype;
int ret = 0;
+ *ptr = NULL;
+
VM_BUG_ON(PageTransHuge(page));
if (mem_cgroup_disabled())
return 0;
return 0;
*ptr = mem;
- ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, ptr, false, PAGE_SIZE);
+ ret = __mem_cgroup_try_charge(NULL, gfp_mask, ptr, false, PAGE_SIZE);
css_put(&mem->css);/* drop extra refcnt */
if (ret || *ptr == NULL) {
if (PageAnon(page)) {
struct mm_struct *mm,
gfp_t gfp_mask)
{
- struct mem_cgroup *mem = NULL;
+ struct mem_cgroup *mem;
int ret;
if (mem_cgroup_disabled())
return ret;
}
+#ifdef CONFIG_DEBUG_VM
+static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
+{
+ struct page_cgroup *pc;
+
+ pc = lookup_page_cgroup(page);
+ if (likely(pc) && PageCgroupUsed(pc))
+ return pc;
+ return NULL;
+}
+
+bool mem_cgroup_bad_page_check(struct page *page)
+{
+ if (mem_cgroup_disabled())
+ return false;
+
+ return lookup_page_cgroup_used(page) != NULL;
+}
+
+void mem_cgroup_print_bad_page(struct page *page)
+{
+ struct page_cgroup *pc;
+
+ pc = lookup_page_cgroup_used(page);
+ if (pc) {
+ int ret = -1;
+ char *path;
+
+ printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p",
+ pc, pc->flags, pc->mem_cgroup);
+
+ path = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (path) {
+ rcu_read_lock();
+ ret = cgroup_path(pc->mem_cgroup->css.cgroup,
+ path, PATH_MAX);
+ rcu_read_unlock();
+ }
+
+ printk(KERN_CONT "(%s)\n",
+ (ret < 0) ? "cannot get the path" : path);
+ kfree(path);
+ }
+}
+#endif
+
static DEFINE_MUTEX(set_limit_mutex);
static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
pte_t *pte;
spinlock_t *ptl;
- VM_BUG_ON(pmd_trans_huge(*pmd));
+ split_huge_page_pmd(walk->mm, pmd);
+
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE)
if (is_target_pte_for_mc(vma, addr, *pte, NULL))
pte_t *pte;
spinlock_t *ptl;
+ split_huge_page_pmd(walk->mm, pmd);
retry:
- VM_BUG_ON(pmd_trans_huge(*pmd));
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; addr += PAGE_SIZE) {
pte_t ptent = *(pte++);