gpio: pca953x: add support for pca9555 I2C I/O expander
[deliverable/linux.git] / mm / memcontrol.c
CommitLineData
8cdea7c0
BS
1/* memcontrol.c - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
78fb7466
PE
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
8cdea7c0
BS
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#include <linux/res_counter.h>
21#include <linux/memcontrol.h>
22#include <linux/cgroup.h>
78fb7466 23#include <linux/mm.h>
d52aa412 24#include <linux/smp.h>
8a9f3ccd 25#include <linux/page-flags.h>
66e1707b 26#include <linux/backing-dev.h>
8a9f3ccd
BS
27#include <linux/bit_spinlock.h>
28#include <linux/rcupdate.h>
b6ac57d5 29#include <linux/slab.h>
66e1707b
BS
30#include <linux/swap.h>
31#include <linux/spinlock.h>
32#include <linux/fs.h>
d2ceb9b7 33#include <linux/seq_file.h>
33327948 34#include <linux/vmalloc.h>
8cdea7c0 35
8697d331
BS
36#include <asm/uaccess.h>
37
8cdea7c0 38struct cgroup_subsys mem_cgroup_subsys;
66e1707b 39static const int MEM_CGROUP_RECLAIM_RETRIES = 5;
b6ac57d5 40static struct kmem_cache *page_cgroup_cache;
8cdea7c0 41
d52aa412
KH
42/*
43 * Statistics for memory cgroup.
44 */
45enum mem_cgroup_stat_index {
46 /*
47 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
48 */
49 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
50 MEM_CGROUP_STAT_RSS, /* # of pages charged as rss */
51
52 MEM_CGROUP_STAT_NSTATS,
53};
54
55struct mem_cgroup_stat_cpu {
56 s64 count[MEM_CGROUP_STAT_NSTATS];
57} ____cacheline_aligned_in_smp;
58
59struct mem_cgroup_stat {
60 struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
61};
62
63/*
64 * For accounting under irq disable, no need for increment preempt count.
65 */
66static void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat *stat,
67 enum mem_cgroup_stat_index idx, int val)
68{
69 int cpu = smp_processor_id();
70 stat->cpustat[cpu].count[idx] += val;
71}
72
73static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
74 enum mem_cgroup_stat_index idx)
75{
76 int cpu;
77 s64 ret = 0;
78 for_each_possible_cpu(cpu)
79 ret += stat->cpustat[cpu].count[idx];
80 return ret;
81}
82
6d12e2d8
KH
83/*
84 * per-zone information in memory controller.
85 */
86
87enum mem_cgroup_zstat_index {
88 MEM_CGROUP_ZSTAT_ACTIVE,
89 MEM_CGROUP_ZSTAT_INACTIVE,
90
91 NR_MEM_CGROUP_ZSTAT,
92};
93
94struct mem_cgroup_per_zone {
072c56c1
KH
95 /*
96 * spin_lock to protect the per cgroup LRU
97 */
98 spinlock_t lru_lock;
1ecaab2b
KH
99 struct list_head active_list;
100 struct list_head inactive_list;
6d12e2d8
KH
101 unsigned long count[NR_MEM_CGROUP_ZSTAT];
102};
103/* Macro for accessing counter */
104#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)])
105
106struct mem_cgroup_per_node {
107 struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
108};
109
110struct mem_cgroup_lru_info {
111 struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
112};
113
8cdea7c0
BS
114/*
115 * The memory controller data structure. The memory controller controls both
116 * page cache and RSS per cgroup. We would eventually like to provide
117 * statistics based on the statistics developed by Rik Van Riel for clock-pro,
118 * to help the administrator determine what knobs to tune.
119 *
120 * TODO: Add a water mark for the memory controller. Reclaim will begin when
8a9f3ccd
BS
121 * we hit the water mark. May be even add a low water mark, such that
122 * no reclaim occurs from a cgroup at it's low water mark, this is
123 * a feature that will be implemented much later in the future.
8cdea7c0
BS
124 */
125struct mem_cgroup {
126 struct cgroup_subsys_state css;
127 /*
128 * the counter to account for memory usage
129 */
130 struct res_counter res;
78fb7466
PE
131 /*
132 * Per cgroup active and inactive list, similar to the
133 * per zone LRU lists.
78fb7466 134 */
6d12e2d8 135 struct mem_cgroup_lru_info info;
072c56c1 136
6c48a1d0 137 int prev_priority; /* for recording reclaim priority */
d52aa412
KH
138 /*
139 * statistics.
140 */
141 struct mem_cgroup_stat stat;
8cdea7c0 142};
8869b8f6 143static struct mem_cgroup init_mem_cgroup;
8cdea7c0 144
8a9f3ccd
BS
145/*
146 * We use the lower bit of the page->page_cgroup pointer as a bit spin
9442ec9d
HD
147 * lock. We need to ensure that page->page_cgroup is at least two
148 * byte aligned (based on comments from Nick Piggin). But since
149 * bit_spin_lock doesn't actually set that lock bit in a non-debug
150 * uniprocessor kernel, we should avoid setting it here too.
8a9f3ccd
BS
151 */
152#define PAGE_CGROUP_LOCK_BIT 0x0
9442ec9d
HD
153#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
154#define PAGE_CGROUP_LOCK (1 << PAGE_CGROUP_LOCK_BIT)
155#else
156#define PAGE_CGROUP_LOCK 0x0
157#endif
8a9f3ccd 158
8cdea7c0
BS
159/*
160 * A page_cgroup page is associated with every page descriptor. The
161 * page_cgroup helps us identify information about the cgroup
162 */
163struct page_cgroup {
164 struct list_head lru; /* per cgroup LRU list */
165 struct page *page;
166 struct mem_cgroup *mem_cgroup;
b9c565d5 167 int ref_cnt; /* cached, mapped, migrating */
8869b8f6 168 int flags;
8cdea7c0 169};
217bc319 170#define PAGE_CGROUP_FLAG_CACHE (0x1) /* charged as cache */
3564c7c4 171#define PAGE_CGROUP_FLAG_ACTIVE (0x2) /* page is active in this cgroup */
8cdea7c0 172
d5b69e38 173static int page_cgroup_nid(struct page_cgroup *pc)
c0149530
KH
174{
175 return page_to_nid(pc->page);
176}
177
d5b69e38 178static enum zone_type page_cgroup_zid(struct page_cgroup *pc)
c0149530
KH
179{
180 return page_zonenum(pc->page);
181}
182
217bc319
KH
183enum charge_type {
184 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
185 MEM_CGROUP_CHARGE_TYPE_MAPPED,
186};
187
d52aa412
KH
188/*
189 * Always modified under lru lock. Then, not necessary to preempt_disable()
190 */
191static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
192 bool charge)
193{
194 int val = (charge)? 1 : -1;
195 struct mem_cgroup_stat *stat = &mem->stat;
d52aa412 196
8869b8f6 197 VM_BUG_ON(!irqs_disabled());
d52aa412 198 if (flags & PAGE_CGROUP_FLAG_CACHE)
8869b8f6 199 __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val);
d52aa412
KH
200 else
201 __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
6d12e2d8
KH
202}
203
d5b69e38 204static struct mem_cgroup_per_zone *
6d12e2d8
KH
205mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
206{
6d12e2d8
KH
207 return &mem->info.nodeinfo[nid]->zoneinfo[zid];
208}
209
d5b69e38 210static struct mem_cgroup_per_zone *
6d12e2d8
KH
211page_cgroup_zoneinfo(struct page_cgroup *pc)
212{
213 struct mem_cgroup *mem = pc->mem_cgroup;
214 int nid = page_cgroup_nid(pc);
215 int zid = page_cgroup_zid(pc);
d52aa412 216
6d12e2d8
KH
217 return mem_cgroup_zoneinfo(mem, nid, zid);
218}
219
220static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
221 enum mem_cgroup_zstat_index idx)
222{
223 int nid, zid;
224 struct mem_cgroup_per_zone *mz;
225 u64 total = 0;
226
227 for_each_online_node(nid)
228 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
229 mz = mem_cgroup_zoneinfo(mem, nid, zid);
230 total += MEM_CGROUP_ZSTAT(mz, idx);
231 }
232 return total;
d52aa412
KH
233}
234
d5b69e38 235static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
8cdea7c0
BS
236{
237 return container_of(cgroup_subsys_state(cont,
238 mem_cgroup_subsys_id), struct mem_cgroup,
239 css);
240}
241
cf475ad2 242struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
78fb7466
PE
243{
244 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
245 struct mem_cgroup, css);
246}
247
8a9f3ccd
BS
248static inline int page_cgroup_locked(struct page *page)
249{
8869b8f6 250 return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
8a9f3ccd
BS
251}
252
9442ec9d 253static void page_assign_page_cgroup(struct page *page, struct page_cgroup *pc)
78fb7466 254{
9442ec9d
HD
255 VM_BUG_ON(!page_cgroup_locked(page));
256 page->page_cgroup = ((unsigned long)pc | PAGE_CGROUP_LOCK);
78fb7466
PE
257}
258
259struct page_cgroup *page_get_page_cgroup(struct page *page)
260{
8869b8f6 261 return (struct page_cgroup *) (page->page_cgroup & ~PAGE_CGROUP_LOCK);
8a9f3ccd
BS
262}
263
d5b69e38 264static void lock_page_cgroup(struct page *page)
8a9f3ccd
BS
265{
266 bit_spin_lock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
8a9f3ccd
BS
267}
268
2680eed7
HD
269static int try_lock_page_cgroup(struct page *page)
270{
271 return bit_spin_trylock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
272}
273
d5b69e38 274static void unlock_page_cgroup(struct page *page)
8a9f3ccd
BS
275{
276 bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
277}
278
3eae90c3
KH
279static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
280 struct page_cgroup *pc)
6d12e2d8
KH
281{
282 int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
6d12e2d8
KH
283
284 if (from)
285 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
286 else
287 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
288
289 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, false);
290 list_del_init(&pc->lru);
291}
292
3eae90c3
KH
293static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
294 struct page_cgroup *pc)
6d12e2d8
KH
295{
296 int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
6d12e2d8
KH
297
298 if (!to) {
299 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
1ecaab2b 300 list_add(&pc->lru, &mz->inactive_list);
6d12e2d8
KH
301 } else {
302 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
1ecaab2b 303 list_add(&pc->lru, &mz->active_list);
6d12e2d8
KH
304 }
305 mem_cgroup_charge_statistics(pc->mem_cgroup, pc->flags, true);
306}
307
8697d331 308static void __mem_cgroup_move_lists(struct page_cgroup *pc, bool active)
66e1707b 309{
6d12e2d8
KH
310 int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
311 struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
312
313 if (from)
314 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
315 else
316 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) -= 1;
317
3564c7c4 318 if (active) {
6d12e2d8 319 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) += 1;
3564c7c4 320 pc->flags |= PAGE_CGROUP_FLAG_ACTIVE;
1ecaab2b 321 list_move(&pc->lru, &mz->active_list);
3564c7c4 322 } else {
6d12e2d8 323 MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
3564c7c4 324 pc->flags &= ~PAGE_CGROUP_FLAG_ACTIVE;
1ecaab2b 325 list_move(&pc->lru, &mz->inactive_list);
3564c7c4 326 }
66e1707b
BS
327}
328
4c4a2214
DR
329int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
330{
331 int ret;
332
333 task_lock(task);
bd845e38 334 ret = task->mm && mm_match_cgroup(task->mm, mem);
4c4a2214
DR
335 task_unlock(task);
336 return ret;
337}
338
66e1707b
BS
339/*
340 * This routine assumes that the appropriate zone's lru lock is already held
341 */
427d5416 342void mem_cgroup_move_lists(struct page *page, bool active)
66e1707b 343{
427d5416 344 struct page_cgroup *pc;
072c56c1
KH
345 struct mem_cgroup_per_zone *mz;
346 unsigned long flags;
347
2680eed7
HD
348 /*
349 * We cannot lock_page_cgroup while holding zone's lru_lock,
350 * because other holders of lock_page_cgroup can be interrupted
351 * with an attempt to rotate_reclaimable_page. But we cannot
352 * safely get to page_cgroup without it, so just try_lock it:
353 * mem_cgroup_isolate_pages allows for page left on wrong list.
354 */
355 if (!try_lock_page_cgroup(page))
66e1707b
BS
356 return;
357
2680eed7
HD
358 pc = page_get_page_cgroup(page);
359 if (pc) {
2680eed7 360 mz = page_cgroup_zoneinfo(pc);
2680eed7 361 spin_lock_irqsave(&mz->lru_lock, flags);
9b3c0a07 362 __mem_cgroup_move_lists(pc, active);
2680eed7 363 spin_unlock_irqrestore(&mz->lru_lock, flags);
9b3c0a07
HT
364 }
365 unlock_page_cgroup(page);
66e1707b
BS
366}
367
58ae83db
KH
368/*
369 * Calculate mapped_ratio under memory controller. This will be used in
370 * vmscan.c for deteremining we have to reclaim mapped pages.
371 */
372int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
373{
374 long total, rss;
375
376 /*
377 * usage is recorded in bytes. But, here, we assume the number of
378 * physical pages can be represented by "long" on any arch.
379 */
380 total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
381 rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
382 return (int)((rss * 100L) / total);
383}
8869b8f6 384
5932f367
KH
385/*
386 * This function is called from vmscan.c. In page reclaiming loop. balance
387 * between active and inactive list is calculated. For memory controller
388 * page reclaiming, we should use using mem_cgroup's imbalance rather than
389 * zone's global lru imbalance.
390 */
391long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
392{
393 unsigned long active, inactive;
394 /* active and inactive are the number of pages. 'long' is ok.*/
395 active = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_ACTIVE);
396 inactive = mem_cgroup_get_all_zonestat(mem, MEM_CGROUP_ZSTAT_INACTIVE);
397 return (long) (active / (inactive + 1));
398}
58ae83db 399
6c48a1d0
KH
400/*
401 * prev_priority control...this will be used in memory reclaim path.
402 */
403int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
404{
405 return mem->prev_priority;
406}
407
408void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
409{
410 if (priority < mem->prev_priority)
411 mem->prev_priority = priority;
412}
413
414void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
415{
416 mem->prev_priority = priority;
417}
418
cc38108e
KH
419/*
420 * Calculate # of pages to be scanned in this priority/zone.
421 * See also vmscan.c
422 *
423 * priority starts from "DEF_PRIORITY" and decremented in each loop.
424 * (see include/linux/mmzone.h)
425 */
426
427long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem,
428 struct zone *zone, int priority)
429{
430 long nr_active;
431 int nid = zone->zone_pgdat->node_id;
432 int zid = zone_idx(zone);
433 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
434
435 nr_active = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE);
436 return (nr_active >> priority);
437}
438
439long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
440 struct zone *zone, int priority)
441{
442 long nr_inactive;
443 int nid = zone->zone_pgdat->node_id;
444 int zid = zone_idx(zone);
445 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
446
447 nr_inactive = MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE);
cc38108e
KH
448 return (nr_inactive >> priority);
449}
450
66e1707b
BS
451unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
452 struct list_head *dst,
453 unsigned long *scanned, int order,
454 int mode, struct zone *z,
455 struct mem_cgroup *mem_cont,
456 int active)
457{
458 unsigned long nr_taken = 0;
459 struct page *page;
460 unsigned long scan;
461 LIST_HEAD(pc_list);
462 struct list_head *src;
ff7283fa 463 struct page_cgroup *pc, *tmp;
1ecaab2b
KH
464 int nid = z->zone_pgdat->node_id;
465 int zid = zone_idx(z);
466 struct mem_cgroup_per_zone *mz;
66e1707b 467
cf475ad2 468 BUG_ON(!mem_cont);
1ecaab2b 469 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
66e1707b 470 if (active)
1ecaab2b 471 src = &mz->active_list;
66e1707b 472 else
1ecaab2b
KH
473 src = &mz->inactive_list;
474
66e1707b 475
072c56c1 476 spin_lock(&mz->lru_lock);
ff7283fa
KH
477 scan = 0;
478 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
436c6541 479 if (scan >= nr_to_scan)
ff7283fa 480 break;
66e1707b 481 page = pc->page;
66e1707b 482
436c6541 483 if (unlikely(!PageLRU(page)))
ff7283fa 484 continue;
ff7283fa 485
66e1707b
BS
486 if (PageActive(page) && !active) {
487 __mem_cgroup_move_lists(pc, true);
66e1707b
BS
488 continue;
489 }
490 if (!PageActive(page) && active) {
491 __mem_cgroup_move_lists(pc, false);
66e1707b
BS
492 continue;
493 }
494
436c6541
HD
495 scan++;
496 list_move(&pc->lru, &pc_list);
66e1707b
BS
497
498 if (__isolate_lru_page(page, mode) == 0) {
499 list_move(&page->lru, dst);
500 nr_taken++;
501 }
502 }
503
504 list_splice(&pc_list, src);
072c56c1 505 spin_unlock(&mz->lru_lock);
66e1707b
BS
506
507 *scanned = scan;
508 return nr_taken;
509}
510
8a9f3ccd
BS
511/*
512 * Charge the memory controller for page usage.
513 * Return
514 * 0 if the charge was successful
515 * < 0 if the cgroup is over its limit
516 */
217bc319
KH
517static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
518 gfp_t gfp_mask, enum charge_type ctype)
8a9f3ccd
BS
519{
520 struct mem_cgroup *mem;
9175e031 521 struct page_cgroup *pc;
66e1707b
BS
522 unsigned long flags;
523 unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
072c56c1 524 struct mem_cgroup_per_zone *mz;
8a9f3ccd 525
4077960e
BS
526 if (mem_cgroup_subsys.disabled)
527 return 0;
528
8a9f3ccd
BS
529 /*
530 * Should page_cgroup's go to their own slab?
531 * One could optimize the performance of the charging routine
532 * by saving a bit in the page_flags and using it as a lock
533 * to see if the cgroup page already has a page_cgroup associated
534 * with it
535 */
66e1707b 536retry:
7e924aaf
HD
537 lock_page_cgroup(page);
538 pc = page_get_page_cgroup(page);
539 /*
540 * The page_cgroup exists and
541 * the page has already been accounted.
542 */
543 if (pc) {
b9c565d5
HD
544 VM_BUG_ON(pc->page != page);
545 VM_BUG_ON(pc->ref_cnt <= 0);
546
547 pc->ref_cnt++;
548 unlock_page_cgroup(page);
549 goto done;
8a9f3ccd 550 }
7e924aaf 551 unlock_page_cgroup(page);
8a9f3ccd 552
b6ac57d5 553 pc = kmem_cache_zalloc(page_cgroup_cache, gfp_mask);
8a9f3ccd
BS
554 if (pc == NULL)
555 goto err;
556
8a9f3ccd 557 /*
3be91277
HD
558 * We always charge the cgroup the mm_struct belongs to.
559 * The mm_struct's mem_cgroup changes on task migration if the
8a9f3ccd
BS
560 * thread group leader migrates. It's possible that mm is not
561 * set, if so charge the init_mm (happens for pagecache usage).
562 */
563 if (!mm)
564 mm = &init_mm;
565
3be91277 566 rcu_read_lock();
cf475ad2 567 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
8a9f3ccd 568 /*
8869b8f6 569 * For every charge from the cgroup, increment reference count
8a9f3ccd
BS
570 */
571 css_get(&mem->css);
572 rcu_read_unlock();
573
0eea1030 574 while (res_counter_charge(&mem->res, PAGE_SIZE)) {
3be91277
HD
575 if (!(gfp_mask & __GFP_WAIT))
576 goto out;
e1a1cd59
BS
577
578 if (try_to_free_mem_cgroup_pages(mem, gfp_mask))
66e1707b
BS
579 continue;
580
581 /*
8869b8f6
HD
582 * try_to_free_mem_cgroup_pages() might not give us a full
583 * picture of reclaim. Some pages are reclaimed and might be
584 * moved to swap cache or just unmapped from the cgroup.
585 * Check the limit again to see if the reclaim reduced the
586 * current usage of the cgroup before giving up
587 */
66e1707b
BS
588 if (res_counter_check_under_limit(&mem->res))
589 continue;
3be91277
HD
590
591 if (!nr_retries--) {
592 mem_cgroup_out_of_memory(mem, gfp_mask);
593 goto out;
66e1707b 594 }
8a9f3ccd
BS
595 }
596
b9c565d5 597 pc->ref_cnt = 1;
8a9f3ccd
BS
598 pc->mem_cgroup = mem;
599 pc->page = page;
3564c7c4 600 pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
217bc319 601 if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
4a56d02e 602 pc->flags = PAGE_CGROUP_FLAG_CACHE;
3be91277 603
7e924aaf
HD
604 lock_page_cgroup(page);
605 if (page_get_page_cgroup(page)) {
606 unlock_page_cgroup(page);
9175e031 607 /*
3be91277
HD
608 * Another charge has been added to this page already.
609 * We take lock_page_cgroup(page) again and read
9175e031
KH
610 * page->cgroup, increment refcnt.... just retry is OK.
611 */
612 res_counter_uncharge(&mem->res, PAGE_SIZE);
613 css_put(&mem->css);
b6ac57d5 614 kmem_cache_free(page_cgroup_cache, pc);
9175e031
KH
615 goto retry;
616 }
7e924aaf 617 page_assign_page_cgroup(page, pc);
8a9f3ccd 618
072c56c1
KH
619 mz = page_cgroup_zoneinfo(pc);
620 spin_lock_irqsave(&mz->lru_lock, flags);
3eae90c3 621 __mem_cgroup_add_list(mz, pc);
072c56c1 622 spin_unlock_irqrestore(&mz->lru_lock, flags);
66e1707b 623
fb59e9f1 624 unlock_page_cgroup(page);
8a9f3ccd 625done:
8a9f3ccd 626 return 0;
3be91277
HD
627out:
628 css_put(&mem->css);
b6ac57d5 629 kmem_cache_free(page_cgroup_cache, pc);
8a9f3ccd 630err:
8a9f3ccd
BS
631 return -ENOMEM;
632}
633
8869b8f6 634int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
217bc319
KH
635{
636 return mem_cgroup_charge_common(page, mm, gfp_mask,
8869b8f6 637 MEM_CGROUP_CHARGE_TYPE_MAPPED);
217bc319
KH
638}
639
e1a1cd59
BS
640int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
641 gfp_t gfp_mask)
8697d331 642{
8697d331
BS
643 if (!mm)
644 mm = &init_mm;
8869b8f6 645 return mem_cgroup_charge_common(page, mm, gfp_mask,
217bc319 646 MEM_CGROUP_CHARGE_TYPE_CACHE);
8697d331
BS
647}
648
8a9f3ccd
BS
649/*
650 * Uncharging is always a welcome operation, we never complain, simply
8289546e 651 * uncharge.
8a9f3ccd 652 */
8289546e 653void mem_cgroup_uncharge_page(struct page *page)
8a9f3ccd 654{
8289546e 655 struct page_cgroup *pc;
8a9f3ccd 656 struct mem_cgroup *mem;
072c56c1 657 struct mem_cgroup_per_zone *mz;
66e1707b 658 unsigned long flags;
8a9f3ccd 659
4077960e
BS
660 if (mem_cgroup_subsys.disabled)
661 return;
662
8697d331 663 /*
3c541e14 664 * Check if our page_cgroup is valid
8697d331 665 */
8289546e
HD
666 lock_page_cgroup(page);
667 pc = page_get_page_cgroup(page);
8a9f3ccd 668 if (!pc)
8289546e 669 goto unlock;
8a9f3ccd 670
b9c565d5
HD
671 VM_BUG_ON(pc->page != page);
672 VM_BUG_ON(pc->ref_cnt <= 0);
673
674 if (--(pc->ref_cnt) == 0) {
b9c565d5
HD
675 mz = page_cgroup_zoneinfo(pc);
676 spin_lock_irqsave(&mz->lru_lock, flags);
3eae90c3 677 __mem_cgroup_remove_list(mz, pc);
b9c565d5
HD
678 spin_unlock_irqrestore(&mz->lru_lock, flags);
679
fb59e9f1
HD
680 page_assign_page_cgroup(page, NULL);
681 unlock_page_cgroup(page);
682
6d48ff8b
HD
683 mem = pc->mem_cgroup;
684 res_counter_uncharge(&mem->res, PAGE_SIZE);
685 css_put(&mem->css);
686
b6ac57d5 687 kmem_cache_free(page_cgroup_cache, pc);
b9c565d5 688 return;
8a9f3ccd 689 }
6d12e2d8 690
8289546e 691unlock:
3c541e14
BS
692 unlock_page_cgroup(page);
693}
694
ae41be37
KH
695/*
696 * Returns non-zero if a page (under migration) has valid page_cgroup member.
697 * Refcnt of page_cgroup is incremented.
698 */
ae41be37
KH
699int mem_cgroup_prepare_migration(struct page *page)
700{
701 struct page_cgroup *pc;
8869b8f6 702
4077960e
BS
703 if (mem_cgroup_subsys.disabled)
704 return 0;
705
ae41be37
KH
706 lock_page_cgroup(page);
707 pc = page_get_page_cgroup(page);
b9c565d5
HD
708 if (pc)
709 pc->ref_cnt++;
ae41be37 710 unlock_page_cgroup(page);
b9c565d5 711 return pc != NULL;
ae41be37
KH
712}
713
714void mem_cgroup_end_migration(struct page *page)
715{
8289546e 716 mem_cgroup_uncharge_page(page);
ae41be37 717}
8869b8f6 718
ae41be37 719/*
8869b8f6 720 * We know both *page* and *newpage* are now not-on-LRU and PG_locked.
ae41be37
KH
721 * And no race with uncharge() routines because page_cgroup for *page*
722 * has extra one reference by mem_cgroup_prepare_migration.
723 */
ae41be37
KH
724void mem_cgroup_page_migration(struct page *page, struct page *newpage)
725{
726 struct page_cgroup *pc;
072c56c1 727 struct mem_cgroup_per_zone *mz;
d5b69e38 728 unsigned long flags;
8869b8f6 729
b9c565d5 730 lock_page_cgroup(page);
ae41be37 731 pc = page_get_page_cgroup(page);
b9c565d5
HD
732 if (!pc) {
733 unlock_page_cgroup(page);
ae41be37 734 return;
b9c565d5 735 }
8869b8f6 736
b9c565d5 737 mz = page_cgroup_zoneinfo(pc);
8869b8f6 738 spin_lock_irqsave(&mz->lru_lock, flags);
3eae90c3 739 __mem_cgroup_remove_list(mz, pc);
072c56c1
KH
740 spin_unlock_irqrestore(&mz->lru_lock, flags);
741
fb59e9f1
HD
742 page_assign_page_cgroup(page, NULL);
743 unlock_page_cgroup(page);
744
ae41be37
KH
745 pc->page = newpage;
746 lock_page_cgroup(newpage);
747 page_assign_page_cgroup(newpage, pc);
6d12e2d8 748
072c56c1
KH
749 mz = page_cgroup_zoneinfo(pc);
750 spin_lock_irqsave(&mz->lru_lock, flags);
3eae90c3 751 __mem_cgroup_add_list(mz, pc);
072c56c1 752 spin_unlock_irqrestore(&mz->lru_lock, flags);
fb59e9f1
HD
753
754 unlock_page_cgroup(newpage);
ae41be37 755}
78fb7466 756
cc847582
KH
757/*
758 * This routine traverse page_cgroup in given list and drop them all.
759 * This routine ignores page_cgroup->ref_cnt.
760 * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
761 */
762#define FORCE_UNCHARGE_BATCH (128)
8869b8f6 763static void mem_cgroup_force_empty_list(struct mem_cgroup *mem,
072c56c1
KH
764 struct mem_cgroup_per_zone *mz,
765 int active)
cc847582
KH
766{
767 struct page_cgroup *pc;
768 struct page *page;
9b3c0a07 769 int count = FORCE_UNCHARGE_BATCH;
cc847582 770 unsigned long flags;
072c56c1
KH
771 struct list_head *list;
772
773 if (active)
774 list = &mz->active_list;
775 else
776 list = &mz->inactive_list;
cc847582 777
072c56c1 778 spin_lock_irqsave(&mz->lru_lock, flags);
9b3c0a07 779 while (!list_empty(list)) {
cc847582
KH
780 pc = list_entry(list->prev, struct page_cgroup, lru);
781 page = pc->page;
9b3c0a07
HT
782 get_page(page);
783 spin_unlock_irqrestore(&mz->lru_lock, flags);
784 mem_cgroup_uncharge_page(page);
785 put_page(page);
786 if (--count <= 0) {
787 count = FORCE_UNCHARGE_BATCH;
788 cond_resched();
b9c565d5 789 }
9b3c0a07 790 spin_lock_irqsave(&mz->lru_lock, flags);
cc847582 791 }
072c56c1 792 spin_unlock_irqrestore(&mz->lru_lock, flags);
cc847582
KH
793}
794
795/*
796 * make mem_cgroup's charge to be 0 if there is no task.
797 * This enables deleting this mem_cgroup.
798 */
d5b69e38 799static int mem_cgroup_force_empty(struct mem_cgroup *mem)
cc847582
KH
800{
801 int ret = -EBUSY;
1ecaab2b 802 int node, zid;
8869b8f6 803
4077960e
BS
804 if (mem_cgroup_subsys.disabled)
805 return 0;
806
cc847582
KH
807 css_get(&mem->css);
808 /*
809 * page reclaim code (kswapd etc..) will move pages between
8869b8f6 810 * active_list <-> inactive_list while we don't take a lock.
cc847582
KH
811 * So, we have to do loop here until all lists are empty.
812 */
1ecaab2b 813 while (mem->res.usage > 0) {
cc847582
KH
814 if (atomic_read(&mem->css.cgroup->count) > 0)
815 goto out;
1ecaab2b
KH
816 for_each_node_state(node, N_POSSIBLE)
817 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
818 struct mem_cgroup_per_zone *mz;
819 mz = mem_cgroup_zoneinfo(mem, node, zid);
820 /* drop all page_cgroup in active_list */
072c56c1 821 mem_cgroup_force_empty_list(mem, mz, 1);
1ecaab2b 822 /* drop all page_cgroup in inactive_list */
072c56c1 823 mem_cgroup_force_empty_list(mem, mz, 0);
1ecaab2b 824 }
cc847582
KH
825 }
826 ret = 0;
827out:
828 css_put(&mem->css);
829 return ret;
830}
831
d5b69e38 832static int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
0eea1030
BS
833{
834 *tmp = memparse(buf, &buf);
835 if (*buf != '\0')
836 return -EINVAL;
837
838 /*
839 * Round up the value to the closest page size
840 */
841 *tmp = ((*tmp + PAGE_SIZE - 1) >> PAGE_SHIFT) << PAGE_SHIFT;
842 return 0;
843}
844
2c3daa72 845static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
8cdea7c0 846{
2c3daa72
PM
847 return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
848 cft->private);
8cdea7c0
BS
849}
850
851static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
852 struct file *file, const char __user *userbuf,
853 size_t nbytes, loff_t *ppos)
854{
855 return res_counter_write(&mem_cgroup_from_cont(cont)->res,
0eea1030
BS
856 cft->private, userbuf, nbytes, ppos,
857 mem_cgroup_write_strategy);
8cdea7c0
BS
858}
859
29f2a4da 860static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
c84872e1
PE
861{
862 struct mem_cgroup *mem;
863
864 mem = mem_cgroup_from_cont(cont);
29f2a4da
PE
865 switch (event) {
866 case RES_MAX_USAGE:
867 res_counter_reset_max(&mem->res);
868 break;
869 case RES_FAILCNT:
870 res_counter_reset_failcnt(&mem->res);
871 break;
872 }
85cc59db 873 return 0;
c84872e1
PE
874}
875
85cc59db 876static int mem_force_empty_write(struct cgroup *cont, unsigned int event)
cc847582 877{
85cc59db 878 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont));
cc847582
KH
879}
880
d2ceb9b7
KH
881static const struct mem_cgroup_stat_desc {
882 const char *msg;
883 u64 unit;
884} mem_cgroup_stat_desc[] = {
885 [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
886 [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
887};
888
c64745cf
PM
889static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
890 struct cgroup_map_cb *cb)
d2ceb9b7 891{
d2ceb9b7
KH
892 struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
893 struct mem_cgroup_stat *stat = &mem_cont->stat;
894 int i;
895
896 for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
897 s64 val;
898
899 val = mem_cgroup_read_stat(stat, i);
900 val *= mem_cgroup_stat_desc[i].unit;
c64745cf 901 cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
d2ceb9b7 902 }
6d12e2d8
KH
903 /* showing # of active pages */
904 {
905 unsigned long active, inactive;
906
907 inactive = mem_cgroup_get_all_zonestat(mem_cont,
908 MEM_CGROUP_ZSTAT_INACTIVE);
909 active = mem_cgroup_get_all_zonestat(mem_cont,
910 MEM_CGROUP_ZSTAT_ACTIVE);
c64745cf
PM
911 cb->fill(cb, "active", (active) * PAGE_SIZE);
912 cb->fill(cb, "inactive", (inactive) * PAGE_SIZE);
6d12e2d8 913 }
d2ceb9b7
KH
914 return 0;
915}
916
8cdea7c0
BS
917static struct cftype mem_cgroup_files[] = {
918 {
0eea1030 919 .name = "usage_in_bytes",
8cdea7c0 920 .private = RES_USAGE,
2c3daa72 921 .read_u64 = mem_cgroup_read,
8cdea7c0 922 },
c84872e1
PE
923 {
924 .name = "max_usage_in_bytes",
925 .private = RES_MAX_USAGE,
29f2a4da 926 .trigger = mem_cgroup_reset,
c84872e1
PE
927 .read_u64 = mem_cgroup_read,
928 },
8cdea7c0 929 {
0eea1030 930 .name = "limit_in_bytes",
8cdea7c0
BS
931 .private = RES_LIMIT,
932 .write = mem_cgroup_write,
2c3daa72 933 .read_u64 = mem_cgroup_read,
8cdea7c0
BS
934 },
935 {
936 .name = "failcnt",
937 .private = RES_FAILCNT,
29f2a4da 938 .trigger = mem_cgroup_reset,
2c3daa72 939 .read_u64 = mem_cgroup_read,
8cdea7c0 940 },
cc847582
KH
941 {
942 .name = "force_empty",
85cc59db 943 .trigger = mem_force_empty_write,
cc847582 944 },
d2ceb9b7
KH
945 {
946 .name = "stat",
c64745cf 947 .read_map = mem_control_stat_show,
d2ceb9b7 948 },
8cdea7c0
BS
949};
950
6d12e2d8
KH
951static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
952{
953 struct mem_cgroup_per_node *pn;
1ecaab2b 954 struct mem_cgroup_per_zone *mz;
41e3355d 955 int zone, tmp = node;
1ecaab2b
KH
956 /*
957 * This routine is called against possible nodes.
958 * But it's BUG to call kmalloc() against offline node.
959 *
960 * TODO: this routine can waste much memory for nodes which will
961 * never be onlined. It's better to use memory hotplug callback
962 * function.
963 */
41e3355d
KH
964 if (!node_state(node, N_NORMAL_MEMORY))
965 tmp = -1;
966 pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
6d12e2d8
KH
967 if (!pn)
968 return 1;
1ecaab2b 969
6d12e2d8
KH
970 mem->info.nodeinfo[node] = pn;
971 memset(pn, 0, sizeof(*pn));
1ecaab2b
KH
972
973 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
974 mz = &pn->zoneinfo[zone];
975 INIT_LIST_HEAD(&mz->active_list);
976 INIT_LIST_HEAD(&mz->inactive_list);
072c56c1 977 spin_lock_init(&mz->lru_lock);
1ecaab2b 978 }
6d12e2d8
KH
979 return 0;
980}
981
1ecaab2b
KH
982static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
983{
984 kfree(mem->info.nodeinfo[node]);
985}
986
33327948
KH
987static struct mem_cgroup *mem_cgroup_alloc(void)
988{
989 struct mem_cgroup *mem;
990
991 if (sizeof(*mem) < PAGE_SIZE)
992 mem = kmalloc(sizeof(*mem), GFP_KERNEL);
993 else
994 mem = vmalloc(sizeof(*mem));
995
996 if (mem)
997 memset(mem, 0, sizeof(*mem));
998 return mem;
999}
1000
1001static void mem_cgroup_free(struct mem_cgroup *mem)
1002{
1003 if (sizeof(*mem) < PAGE_SIZE)
1004 kfree(mem);
1005 else
1006 vfree(mem);
1007}
1008
1009
8cdea7c0
BS
1010static struct cgroup_subsys_state *
1011mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
1012{
1013 struct mem_cgroup *mem;
6d12e2d8 1014 int node;
8cdea7c0 1015
b6ac57d5 1016 if (unlikely((cont->parent) == NULL)) {
78fb7466 1017 mem = &init_mem_cgroup;
b6ac57d5
BS
1018 page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
1019 } else {
33327948
KH
1020 mem = mem_cgroup_alloc();
1021 if (!mem)
1022 return ERR_PTR(-ENOMEM);
b6ac57d5 1023 }
78fb7466 1024
8cdea7c0 1025 res_counter_init(&mem->res);
1ecaab2b 1026
6d12e2d8
KH
1027 for_each_node_state(node, N_POSSIBLE)
1028 if (alloc_mem_cgroup_per_zone_info(mem, node))
1029 goto free_out;
1030
8cdea7c0 1031 return &mem->css;
6d12e2d8
KH
1032free_out:
1033 for_each_node_state(node, N_POSSIBLE)
1ecaab2b 1034 free_mem_cgroup_per_zone_info(mem, node);
6d12e2d8 1035 if (cont->parent != NULL)
33327948 1036 mem_cgroup_free(mem);
2dda81ca 1037 return ERR_PTR(-ENOMEM);
8cdea7c0
BS
1038}
1039
df878fb0
KH
1040static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
1041 struct cgroup *cont)
1042{
1043 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1044 mem_cgroup_force_empty(mem);
1045}
1046
8cdea7c0
BS
1047static void mem_cgroup_destroy(struct cgroup_subsys *ss,
1048 struct cgroup *cont)
1049{
6d12e2d8
KH
1050 int node;
1051 struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1052
1053 for_each_node_state(node, N_POSSIBLE)
1ecaab2b 1054 free_mem_cgroup_per_zone_info(mem, node);
6d12e2d8 1055
33327948 1056 mem_cgroup_free(mem_cgroup_from_cont(cont));
8cdea7c0
BS
1057}
1058
1059static int mem_cgroup_populate(struct cgroup_subsys *ss,
1060 struct cgroup *cont)
1061{
4077960e
BS
1062 if (mem_cgroup_subsys.disabled)
1063 return 0;
8cdea7c0
BS
1064 return cgroup_add_files(cont, ss, mem_cgroup_files,
1065 ARRAY_SIZE(mem_cgroup_files));
1066}
1067
67e465a7
BS
1068static void mem_cgroup_move_task(struct cgroup_subsys *ss,
1069 struct cgroup *cont,
1070 struct cgroup *old_cont,
1071 struct task_struct *p)
1072{
1073 struct mm_struct *mm;
1074 struct mem_cgroup *mem, *old_mem;
1075
4077960e
BS
1076 if (mem_cgroup_subsys.disabled)
1077 return;
1078
67e465a7
BS
1079 mm = get_task_mm(p);
1080 if (mm == NULL)
1081 return;
1082
1083 mem = mem_cgroup_from_cont(cont);
1084 old_mem = mem_cgroup_from_cont(old_cont);
1085
1086 if (mem == old_mem)
1087 goto out;
1088
1089 /*
1090 * Only thread group leaders are allowed to migrate, the mm_struct is
1091 * in effect owned by the leader
1092 */
52ea27eb 1093 if (!thread_group_leader(p))
67e465a7
BS
1094 goto out;
1095
67e465a7
BS
1096out:
1097 mmput(mm);
67e465a7
BS
1098}
1099
8cdea7c0
BS
1100struct cgroup_subsys mem_cgroup_subsys = {
1101 .name = "memory",
1102 .subsys_id = mem_cgroup_subsys_id,
1103 .create = mem_cgroup_create,
df878fb0 1104 .pre_destroy = mem_cgroup_pre_destroy,
8cdea7c0
BS
1105 .destroy = mem_cgroup_destroy,
1106 .populate = mem_cgroup_populate,
67e465a7 1107 .attach = mem_cgroup_move_task,
6d12e2d8 1108 .early_init = 0,
8cdea7c0 1109};
This page took 0.141067 seconds and 5 git commands to generate.