slab/slub: consider a memcg parameter in kmem_create_cache
[deliverable/linux.git] / include / linux / memcontrol.h
1 /* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
23 #include <linux/vm_event_item.h>
24 #include <linux/hardirq.h>
25 #include <linux/jump_label.h>
26
27 struct mem_cgroup;
28 struct page_cgroup;
29 struct page;
30 struct mm_struct;
31 struct kmem_cache;
32
33 /* Stats that can be updated by kernel. */
34 enum mem_cgroup_page_stat_item {
35 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
36 };
37
38 struct mem_cgroup_reclaim_cookie {
39 struct zone *zone;
40 int priority;
41 unsigned int generation;
42 };
43
44 #ifdef CONFIG_MEMCG
45 /*
46 * All "charge" functions with gfp_mask should use GFP_KERNEL or
47 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
48 * alloc memory but reclaims memory from all available zones. So, "where I want
49 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
50 * available but adding a rule is better. charge functions' gfp_mask should
51 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
52 * codes.
53 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
54 */
55
56 extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
57 gfp_t gfp_mask);
58 /* for swap handling */
59 extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
60 struct page *page, gfp_t mask, struct mem_cgroup **memcgp);
61 extern void mem_cgroup_commit_charge_swapin(struct page *page,
62 struct mem_cgroup *memcg);
63 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
64
65 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
66 gfp_t gfp_mask);
67
68 struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
69 struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
70
71 /* For coalescing uncharge for reducing memcg' overhead*/
72 extern void mem_cgroup_uncharge_start(void);
73 extern void mem_cgroup_uncharge_end(void);
74
75 extern void mem_cgroup_uncharge_page(struct page *page);
76 extern void mem_cgroup_uncharge_cache_page(struct page *page);
77
78 bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
79 struct mem_cgroup *memcg);
80 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
81
82 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
83 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
84 extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
85
86 extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
87 extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
88
89 static inline
90 bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg)
91 {
92 struct mem_cgroup *task_memcg;
93 bool match;
94
95 rcu_read_lock();
96 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
97 match = __mem_cgroup_same_or_subtree(memcg, task_memcg);
98 rcu_read_unlock();
99 return match;
100 }
101
102 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
103
104 extern void
105 mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
106 struct mem_cgroup **memcgp);
107 extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
108 struct page *oldpage, struct page *newpage, bool migration_ok);
109
110 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
111 struct mem_cgroup *,
112 struct mem_cgroup_reclaim_cookie *);
113 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
114
115 /*
116 * For memory reclaim.
117 */
118 int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
119 int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec);
120 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
121 unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
122 void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
123 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
124 struct task_struct *p);
125 extern void mem_cgroup_replace_page_cache(struct page *oldpage,
126 struct page *newpage);
127
128 #ifdef CONFIG_MEMCG_SWAP
129 extern int do_swap_account;
130 #endif
131
132 static inline bool mem_cgroup_disabled(void)
133 {
134 if (mem_cgroup_subsys.disabled)
135 return true;
136 return false;
137 }
138
139 void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
140 unsigned long *flags);
141
142 extern atomic_t memcg_moving;
143
144 static inline void mem_cgroup_begin_update_page_stat(struct page *page,
145 bool *locked, unsigned long *flags)
146 {
147 if (mem_cgroup_disabled())
148 return;
149 rcu_read_lock();
150 *locked = false;
151 if (atomic_read(&memcg_moving))
152 __mem_cgroup_begin_update_page_stat(page, locked, flags);
153 }
154
155 void __mem_cgroup_end_update_page_stat(struct page *page,
156 unsigned long *flags);
157 static inline void mem_cgroup_end_update_page_stat(struct page *page,
158 bool *locked, unsigned long *flags)
159 {
160 if (mem_cgroup_disabled())
161 return;
162 if (*locked)
163 __mem_cgroup_end_update_page_stat(page, flags);
164 rcu_read_unlock();
165 }
166
167 void mem_cgroup_update_page_stat(struct page *page,
168 enum mem_cgroup_page_stat_item idx,
169 int val);
170
171 static inline void mem_cgroup_inc_page_stat(struct page *page,
172 enum mem_cgroup_page_stat_item idx)
173 {
174 mem_cgroup_update_page_stat(page, idx, 1);
175 }
176
177 static inline void mem_cgroup_dec_page_stat(struct page *page,
178 enum mem_cgroup_page_stat_item idx)
179 {
180 mem_cgroup_update_page_stat(page, idx, -1);
181 }
182
183 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
184 gfp_t gfp_mask,
185 unsigned long *total_scanned);
186
187 void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
188 static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
189 enum vm_event_item idx)
190 {
191 if (mem_cgroup_disabled())
192 return;
193 __mem_cgroup_count_vm_event(mm, idx);
194 }
195 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
196 void mem_cgroup_split_huge_fixup(struct page *head);
197 #endif
198
199 #ifdef CONFIG_DEBUG_VM
200 bool mem_cgroup_bad_page_check(struct page *page);
201 void mem_cgroup_print_bad_page(struct page *page);
202 #endif
203 #else /* CONFIG_MEMCG */
204 struct mem_cgroup;
205
206 static inline int mem_cgroup_newpage_charge(struct page *page,
207 struct mm_struct *mm, gfp_t gfp_mask)
208 {
209 return 0;
210 }
211
212 static inline int mem_cgroup_cache_charge(struct page *page,
213 struct mm_struct *mm, gfp_t gfp_mask)
214 {
215 return 0;
216 }
217
218 static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
219 struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp)
220 {
221 return 0;
222 }
223
224 static inline void mem_cgroup_commit_charge_swapin(struct page *page,
225 struct mem_cgroup *memcg)
226 {
227 }
228
229 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
230 {
231 }
232
233 static inline void mem_cgroup_uncharge_start(void)
234 {
235 }
236
237 static inline void mem_cgroup_uncharge_end(void)
238 {
239 }
240
241 static inline void mem_cgroup_uncharge_page(struct page *page)
242 {
243 }
244
245 static inline void mem_cgroup_uncharge_cache_page(struct page *page)
246 {
247 }
248
249 static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
250 struct mem_cgroup *memcg)
251 {
252 return &zone->lruvec;
253 }
254
255 static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
256 struct zone *zone)
257 {
258 return &zone->lruvec;
259 }
260
261 static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
262 {
263 return NULL;
264 }
265
266 static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
267 {
268 return NULL;
269 }
270
271 static inline bool mm_match_cgroup(struct mm_struct *mm,
272 struct mem_cgroup *memcg)
273 {
274 return true;
275 }
276
277 static inline int task_in_mem_cgroup(struct task_struct *task,
278 const struct mem_cgroup *memcg)
279 {
280 return 1;
281 }
282
283 static inline struct cgroup_subsys_state
284 *mem_cgroup_css(struct mem_cgroup *memcg)
285 {
286 return NULL;
287 }
288
289 static inline void
290 mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
291 struct mem_cgroup **memcgp)
292 {
293 }
294
295 static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
296 struct page *oldpage, struct page *newpage, bool migration_ok)
297 {
298 }
299
300 static inline struct mem_cgroup *
301 mem_cgroup_iter(struct mem_cgroup *root,
302 struct mem_cgroup *prev,
303 struct mem_cgroup_reclaim_cookie *reclaim)
304 {
305 return NULL;
306 }
307
308 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
309 struct mem_cgroup *prev)
310 {
311 }
312
313 static inline bool mem_cgroup_disabled(void)
314 {
315 return true;
316 }
317
318 static inline int
319 mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
320 {
321 return 1;
322 }
323
324 static inline int
325 mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
326 {
327 return 1;
328 }
329
330 static inline unsigned long
331 mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
332 {
333 return 0;
334 }
335
336 static inline void
337 mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
338 int increment)
339 {
340 }
341
342 static inline void
343 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
344 {
345 }
346
347 static inline void mem_cgroup_begin_update_page_stat(struct page *page,
348 bool *locked, unsigned long *flags)
349 {
350 }
351
352 static inline void mem_cgroup_end_update_page_stat(struct page *page,
353 bool *locked, unsigned long *flags)
354 {
355 }
356
357 static inline void mem_cgroup_inc_page_stat(struct page *page,
358 enum mem_cgroup_page_stat_item idx)
359 {
360 }
361
362 static inline void mem_cgroup_dec_page_stat(struct page *page,
363 enum mem_cgroup_page_stat_item idx)
364 {
365 }
366
367 static inline
368 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
369 gfp_t gfp_mask,
370 unsigned long *total_scanned)
371 {
372 return 0;
373 }
374
375 static inline void mem_cgroup_split_huge_fixup(struct page *head)
376 {
377 }
378
379 static inline
380 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
381 {
382 }
383 static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
384 struct page *newpage)
385 {
386 }
387 #endif /* CONFIG_MEMCG */
388
389 #if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM)
390 static inline bool
391 mem_cgroup_bad_page_check(struct page *page)
392 {
393 return false;
394 }
395
396 static inline void
397 mem_cgroup_print_bad_page(struct page *page)
398 {
399 }
400 #endif
401
402 enum {
403 UNDER_LIMIT,
404 SOFT_LIMIT,
405 OVER_LIMIT,
406 };
407
408 struct sock;
409 #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
410 void sock_update_memcg(struct sock *sk);
411 void sock_release_memcg(struct sock *sk);
412 #else
413 static inline void sock_update_memcg(struct sock *sk)
414 {
415 }
416 static inline void sock_release_memcg(struct sock *sk)
417 {
418 }
419 #endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
420
421 #ifdef CONFIG_MEMCG_KMEM
422 extern struct static_key memcg_kmem_enabled_key;
423 static inline bool memcg_kmem_enabled(void)
424 {
425 return static_key_false(&memcg_kmem_enabled_key);
426 }
427
428 /*
429 * In general, we'll do everything in our power to not incur in any overhead
430 * for non-memcg users for the kmem functions. Not even a function call, if we
431 * can avoid it.
432 *
433 * Therefore, we'll inline all those functions so that in the best case, we'll
434 * see that kmemcg is off for everybody and proceed quickly. If it is on,
435 * we'll still do most of the flag checking inline. We check a lot of
436 * conditions, but because they are pretty simple, they are expected to be
437 * fast.
438 */
439 bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg,
440 int order);
441 void __memcg_kmem_commit_charge(struct page *page,
442 struct mem_cgroup *memcg, int order);
443 void __memcg_kmem_uncharge_pages(struct page *page, int order);
444
445 int memcg_cache_id(struct mem_cgroup *memcg);
446 int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s);
447 void memcg_release_cache(struct kmem_cache *cachep);
448 void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep);
449
450 /**
451 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
452 * @gfp: the gfp allocation flags.
453 * @memcg: a pointer to the memcg this was charged against.
454 * @order: allocation order.
455 *
456 * returns true if the memcg where the current task belongs can hold this
457 * allocation.
458 *
459 * We return true automatically if this allocation is not to be accounted to
460 * any memcg.
461 */
462 static inline bool
463 memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
464 {
465 if (!memcg_kmem_enabled())
466 return true;
467
468 /*
469 * __GFP_NOFAIL allocations will move on even if charging is not
470 * possible. Therefore we don't even try, and have this allocation
471 * unaccounted. We could in theory charge it with
472 * res_counter_charge_nofail, but we hope those allocations are rare,
473 * and won't be worth the trouble.
474 */
475 if (!(gfp & __GFP_KMEMCG) || (gfp & __GFP_NOFAIL))
476 return true;
477 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
478 return true;
479
480 /* If the test is dying, just let it go. */
481 if (unlikely(fatal_signal_pending(current)))
482 return true;
483
484 return __memcg_kmem_newpage_charge(gfp, memcg, order);
485 }
486
487 /**
488 * memcg_kmem_uncharge_pages: uncharge pages from memcg
489 * @page: pointer to struct page being freed
490 * @order: allocation order.
491 *
492 * there is no need to specify memcg here, since it is embedded in page_cgroup
493 */
494 static inline void
495 memcg_kmem_uncharge_pages(struct page *page, int order)
496 {
497 if (memcg_kmem_enabled())
498 __memcg_kmem_uncharge_pages(page, order);
499 }
500
501 /**
502 * memcg_kmem_commit_charge: embeds correct memcg in a page
503 * @page: pointer to struct page recently allocated
504 * @memcg: the memcg structure we charged against
505 * @order: allocation order.
506 *
507 * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
508 * failure of the allocation. if @page is NULL, this function will revert the
509 * charges. Otherwise, it will commit the memcg given by @memcg to the
510 * corresponding page_cgroup.
511 */
512 static inline void
513 memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
514 {
515 if (memcg_kmem_enabled() && memcg)
516 __memcg_kmem_commit_charge(page, memcg, order);
517 }
518
519 #else
520 static inline bool
521 memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
522 {
523 return true;
524 }
525
526 static inline void memcg_kmem_uncharge_pages(struct page *page, int order)
527 {
528 }
529
530 static inline void
531 memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
532 {
533 }
534
535 static inline int memcg_cache_id(struct mem_cgroup *memcg)
536 {
537 return -1;
538 }
539
540 static inline int memcg_register_cache(struct mem_cgroup *memcg,
541 struct kmem_cache *s)
542 {
543 return 0;
544 }
545
546 static inline void memcg_release_cache(struct kmem_cache *cachep)
547 {
548 }
549
550 static inline void memcg_cache_list_add(struct mem_cgroup *memcg,
551 struct kmem_cache *s)
552 {
553 }
554 #endif /* CONFIG_MEMCG_KMEM */
555 #endif /* _LINUX_MEMCONTROL_H */
556
This page took 0.080101 seconds and 5 git commands to generate.