Merge branches 'acpica-fixes' and 'device-properties-fixes'
[deliverable/linux.git] / include / linux / page_ref.h
CommitLineData
fe896d18
JK
1#ifndef _LINUX_PAGE_REF_H
2#define _LINUX_PAGE_REF_H
3
4#include <linux/atomic.h>
5#include <linux/mm_types.h>
6#include <linux/page-flags.h>
95813b8f
JK
7#include <linux/tracepoint-defs.h>
8
9extern struct tracepoint __tracepoint_page_ref_set;
10extern struct tracepoint __tracepoint_page_ref_mod;
11extern struct tracepoint __tracepoint_page_ref_mod_and_test;
12extern struct tracepoint __tracepoint_page_ref_mod_and_return;
13extern struct tracepoint __tracepoint_page_ref_mod_unless;
14extern struct tracepoint __tracepoint_page_ref_freeze;
15extern struct tracepoint __tracepoint_page_ref_unfreeze;
16
17#ifdef CONFIG_DEBUG_PAGE_REF
18
19/*
20 * Ideally we would want to use the trace_<tracepoint>_enabled() helper
21 * functions. But due to include header file issues, that is not
22 * feasible. Instead we have to open code the static key functions.
23 *
24 * See trace_##name##_enabled(void) in include/linux/tracepoint.h
25 */
26#define page_ref_tracepoint_active(t) static_key_false(&(t).key)
27
28extern void __page_ref_set(struct page *page, int v);
29extern void __page_ref_mod(struct page *page, int v);
30extern void __page_ref_mod_and_test(struct page *page, int v, int ret);
31extern void __page_ref_mod_and_return(struct page *page, int v, int ret);
32extern void __page_ref_mod_unless(struct page *page, int v, int u);
33extern void __page_ref_freeze(struct page *page, int v, int ret);
34extern void __page_ref_unfreeze(struct page *page, int v);
35
36#else
37
38#define page_ref_tracepoint_active(t) false
39
40static inline void __page_ref_set(struct page *page, int v)
41{
42}
43static inline void __page_ref_mod(struct page *page, int v)
44{
45}
46static inline void __page_ref_mod_and_test(struct page *page, int v, int ret)
47{
48}
49static inline void __page_ref_mod_and_return(struct page *page, int v, int ret)
50{
51}
52static inline void __page_ref_mod_unless(struct page *page, int v, int u)
53{
54}
55static inline void __page_ref_freeze(struct page *page, int v, int ret)
56{
57}
58static inline void __page_ref_unfreeze(struct page *page, int v)
59{
60}
61
62#endif
fe896d18
JK
63
64static inline int page_ref_count(struct page *page)
65{
66 return atomic_read(&page->_count);
67}
68
69static inline int page_count(struct page *page)
70{
71 return atomic_read(&compound_head(page)->_count);
72}
73
74static inline void set_page_count(struct page *page, int v)
75{
76 atomic_set(&page->_count, v);
95813b8f
JK
77 if (page_ref_tracepoint_active(__tracepoint_page_ref_set))
78 __page_ref_set(page, v);
fe896d18
JK
79}
80
81/*
82 * Setup the page count before being freed into the page allocator for
83 * the first time (boot or memory hotplug)
84 */
85static inline void init_page_count(struct page *page)
86{
87 set_page_count(page, 1);
88}
89
90static inline void page_ref_add(struct page *page, int nr)
91{
92 atomic_add(nr, &page->_count);
95813b8f
JK
93 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
94 __page_ref_mod(page, nr);
fe896d18
JK
95}
96
97static inline void page_ref_sub(struct page *page, int nr)
98{
99 atomic_sub(nr, &page->_count);
95813b8f
JK
100 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
101 __page_ref_mod(page, -nr);
fe896d18
JK
102}
103
104static inline void page_ref_inc(struct page *page)
105{
106 atomic_inc(&page->_count);
95813b8f
JK
107 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
108 __page_ref_mod(page, 1);
fe896d18
JK
109}
110
111static inline void page_ref_dec(struct page *page)
112{
113 atomic_dec(&page->_count);
95813b8f
JK
114 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod))
115 __page_ref_mod(page, -1);
fe896d18
JK
116}
117
118static inline int page_ref_sub_and_test(struct page *page, int nr)
119{
95813b8f
JK
120 int ret = atomic_sub_and_test(nr, &page->_count);
121
122 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
123 __page_ref_mod_and_test(page, -nr, ret);
124 return ret;
fe896d18
JK
125}
126
127static inline int page_ref_dec_and_test(struct page *page)
128{
95813b8f
JK
129 int ret = atomic_dec_and_test(&page->_count);
130
131 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_test))
132 __page_ref_mod_and_test(page, -1, ret);
133 return ret;
fe896d18
JK
134}
135
136static inline int page_ref_dec_return(struct page *page)
137{
95813b8f
JK
138 int ret = atomic_dec_return(&page->_count);
139
140 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_and_return))
141 __page_ref_mod_and_return(page, -1, ret);
142 return ret;
fe896d18
JK
143}
144
145static inline int page_ref_add_unless(struct page *page, int nr, int u)
146{
95813b8f
JK
147 int ret = atomic_add_unless(&page->_count, nr, u);
148
149 if (page_ref_tracepoint_active(__tracepoint_page_ref_mod_unless))
150 __page_ref_mod_unless(page, nr, ret);
151 return ret;
fe896d18
JK
152}
153
154static inline int page_ref_freeze(struct page *page, int count)
155{
95813b8f
JK
156 int ret = likely(atomic_cmpxchg(&page->_count, count, 0) == count);
157
158 if (page_ref_tracepoint_active(__tracepoint_page_ref_freeze))
159 __page_ref_freeze(page, count, ret);
160 return ret;
fe896d18
JK
161}
162
163static inline void page_ref_unfreeze(struct page *page, int count)
164{
165 VM_BUG_ON_PAGE(page_count(page) != 0, page);
166 VM_BUG_ON(count == 0);
167
168 atomic_set(&page->_count, count);
95813b8f
JK
169 if (page_ref_tracepoint_active(__tracepoint_page_ref_unfreeze))
170 __page_ref_unfreeze(page, count);
fe896d18
JK
171}
172
173#endif
This page took 0.044506 seconds and 5 git commands to generate.