mm, vmscan: Update all zone LRU sizes before updating memcg
[deliverable/linux.git] / include / linux / mm_inline.h
1 #ifndef LINUX_MM_INLINE_H
2 #define LINUX_MM_INLINE_H
3
4 #include <linux/huge_mm.h>
5 #include <linux/swap.h>
6
7 #ifdef CONFIG_HIGHMEM
8 extern atomic_t highmem_file_pages;
9
10 static inline void acct_highmem_file_pages(int zid, enum lru_list lru,
11 int nr_pages)
12 {
13 if (is_highmem_idx(zid) && is_file_lru(lru))
14 atomic_add(nr_pages, &highmem_file_pages);
15 }
16 #else
17 static inline void acct_highmem_file_pages(int zid, enum lru_list lru,
18 int nr_pages)
19 {
20 }
21 #endif
22
23 /**
24 * page_is_file_cache - should the page be on a file LRU or anon LRU?
25 * @page: the page to test
26 *
27 * Returns 1 if @page is page cache page backed by a regular filesystem,
28 * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
29 * Used by functions that manipulate the LRU lists, to sort a page
30 * onto the right LRU list.
31 *
32 * We would like to get this info without a page flag, but the state
33 * needs to survive until the page is last deleted from the LRU, which
34 * could be as far down as __page_cache_release.
35 */
36 static inline int page_is_file_cache(struct page *page)
37 {
38 return !PageSwapBacked(page);
39 }
40
41 static __always_inline void __update_lru_size(struct lruvec *lruvec,
42 enum lru_list lru, enum zone_type zid,
43 int nr_pages)
44 {
45 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
46
47 __mod_node_page_state(pgdat, NR_LRU_BASE + lru, nr_pages);
48 acct_highmem_file_pages(zid, lru, nr_pages);
49 }
50
51 static __always_inline void update_lru_size(struct lruvec *lruvec,
52 enum lru_list lru, enum zone_type zid,
53 int nr_pages)
54 {
55 __update_lru_size(lruvec, lru, zid, nr_pages);
56 #ifdef CONFIG_MEMCG
57 mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
58 #endif
59 }
60
61 static __always_inline void add_page_to_lru_list(struct page *page,
62 struct lruvec *lruvec, enum lru_list lru)
63 {
64 update_lru_size(lruvec, lru, page_zonenum(page), hpage_nr_pages(page));
65 list_add(&page->lru, &lruvec->lists[lru]);
66 }
67
68 static __always_inline void del_page_from_lru_list(struct page *page,
69 struct lruvec *lruvec, enum lru_list lru)
70 {
71 list_del(&page->lru);
72 update_lru_size(lruvec, lru, page_zonenum(page), -hpage_nr_pages(page));
73 }
74
75 /**
76 * page_lru_base_type - which LRU list type should a page be on?
77 * @page: the page to test
78 *
79 * Used for LRU list index arithmetic.
80 *
81 * Returns the base LRU type - file or anon - @page should be on.
82 */
83 static inline enum lru_list page_lru_base_type(struct page *page)
84 {
85 if (page_is_file_cache(page))
86 return LRU_INACTIVE_FILE;
87 return LRU_INACTIVE_ANON;
88 }
89
90 /**
91 * page_off_lru - which LRU list was page on? clearing its lru flags.
92 * @page: the page to test
93 *
94 * Returns the LRU list a page was on, as an index into the array of LRU
95 * lists; and clears its Unevictable or Active flags, ready for freeing.
96 */
97 static __always_inline enum lru_list page_off_lru(struct page *page)
98 {
99 enum lru_list lru;
100
101 if (PageUnevictable(page)) {
102 __ClearPageUnevictable(page);
103 lru = LRU_UNEVICTABLE;
104 } else {
105 lru = page_lru_base_type(page);
106 if (PageActive(page)) {
107 __ClearPageActive(page);
108 lru += LRU_ACTIVE;
109 }
110 }
111 return lru;
112 }
113
114 /**
115 * page_lru - which LRU list should a page be on?
116 * @page: the page to test
117 *
118 * Returns the LRU list a page should be on, as an index
119 * into the array of LRU lists.
120 */
121 static __always_inline enum lru_list page_lru(struct page *page)
122 {
123 enum lru_list lru;
124
125 if (PageUnevictable(page))
126 lru = LRU_UNEVICTABLE;
127 else {
128 lru = page_lru_base_type(page);
129 if (PageActive(page))
130 lru += LRU_ACTIVE;
131 }
132 return lru;
133 }
134
135 #define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
136
137 #endif
This page took 0.033731 seconds and 5 git commands to generate.