Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_CPUSET_H |
2 | #define _LINUX_CPUSET_H | |
3 | /* | |
4 | * cpuset interface | |
5 | * | |
6 | * Copyright (C) 2003 BULL SA | |
825a46af | 7 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. |
1da177e4 LT |
8 | * |
9 | */ | |
10 | ||
11 | #include <linux/sched.h> | |
12 | #include <linux/cpumask.h> | |
13 | #include <linux/nodemask.h> | |
a1bc5a4e | 14 | #include <linux/mm.h> |
664eedde | 15 | #include <linux/jump_label.h> |
1da177e4 LT |
16 | |
17 | #ifdef CONFIG_CPUSETS | |
18 | ||
664eedde MG |
19 | extern struct static_key cpusets_enabled_key; |
20 | static inline bool cpusets_enabled(void) | |
21 | { | |
22 | return static_key_false(&cpusets_enabled_key); | |
23 | } | |
24 | ||
25 | static inline int nr_cpusets(void) | |
26 | { | |
27 | /* jump label reference count + the top-level cpuset */ | |
28 | return static_key_count(&cpusets_enabled_key) + 1; | |
29 | } | |
30 | ||
31 | static inline void cpuset_inc(void) | |
32 | { | |
33 | static_key_slow_inc(&cpusets_enabled_key); | |
34 | } | |
35 | ||
36 | static inline void cpuset_dec(void) | |
37 | { | |
38 | static_key_slow_dec(&cpusets_enabled_key); | |
39 | } | |
202f72d5 | 40 | |
1da177e4 LT |
41 | extern int cpuset_init(void); |
42 | extern void cpuset_init_smp(void); | |
7ddf96b0 | 43 | extern void cpuset_update_active_cpus(bool cpu_online); |
6af866af | 44 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); |
2baab4e9 | 45 | extern void cpuset_cpus_allowed_fallback(struct task_struct *p); |
909d75a3 | 46 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); |
9276b1bc | 47 | #define cpuset_current_mems_allowed (current->mems_allowed) |
1da177e4 | 48 | void cpuset_init_current_mems_allowed(void); |
19770b32 | 49 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); |
202f72d5 | 50 | |
a1bc5a4e DR |
51 | extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); |
52 | extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); | |
02a0e53d | 53 | |
a1bc5a4e | 54 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) |
02a0e53d | 55 | { |
664eedde | 56 | return nr_cpusets() <= 1 || |
a1bc5a4e | 57 | __cpuset_node_allowed_softwall(node, gfp_mask); |
02a0e53d PJ |
58 | } |
59 | ||
a1bc5a4e | 60 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) |
202f72d5 | 61 | { |
664eedde | 62 | return nr_cpusets() <= 1 || |
a1bc5a4e DR |
63 | __cpuset_node_allowed_hardwall(node, gfp_mask); |
64 | } | |
65 | ||
66 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | |
67 | { | |
68 | return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask); | |
69 | } | |
70 | ||
71 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | |
72 | { | |
73 | return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask); | |
202f72d5 PJ |
74 | } |
75 | ||
bbe373f2 DR |
76 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
77 | const struct task_struct *tsk2); | |
3e0d98b9 PJ |
78 | |
79 | #define cpuset_memory_pressure_bump() \ | |
80 | do { \ | |
81 | if (cpuset_memory_pressure_enabled) \ | |
82 | __cpuset_memory_pressure_bump(); \ | |
83 | } while (0) | |
84 | extern int cpuset_memory_pressure_enabled; | |
85 | extern void __cpuset_memory_pressure_bump(void); | |
86 | ||
df5f8314 EB |
87 | extern void cpuset_task_status_allowed(struct seq_file *m, |
88 | struct task_struct *task); | |
52de4779 ZL |
89 | extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, |
90 | struct pid *pid, struct task_struct *tsk); | |
1da177e4 | 91 | |
825a46af | 92 | extern int cpuset_mem_spread_node(void); |
6adef3eb | 93 | extern int cpuset_slab_spread_node(void); |
825a46af PJ |
94 | |
95 | static inline int cpuset_do_page_mem_spread(void) | |
96 | { | |
2ad654bc | 97 | return task_spread_page(current); |
825a46af PJ |
98 | } |
99 | ||
100 | static inline int cpuset_do_slab_mem_spread(void) | |
101 | { | |
2ad654bc | 102 | return task_spread_slab(current); |
825a46af PJ |
103 | } |
104 | ||
8793d854 PM |
105 | extern int current_cpuset_is_being_rebound(void); |
106 | ||
e761b772 MK |
107 | extern void rebuild_sched_domains(void); |
108 | ||
75aa1994 DR |
109 | extern void cpuset_print_task_mems_allowed(struct task_struct *p); |
110 | ||
c0ff7453 | 111 | /* |
d26914d1 MG |
112 | * read_mems_allowed_begin is required when making decisions involving |
113 | * mems_allowed such as during page allocation. mems_allowed can be updated in | |
114 | * parallel and depending on the new value an operation can fail potentially | |
115 | * causing process failure. A retry loop with read_mems_allowed_begin and | |
116 | * read_mems_allowed_retry prevents these artificial failures. | |
c0ff7453 | 117 | */ |
d26914d1 | 118 | static inline unsigned int read_mems_allowed_begin(void) |
c0ff7453 | 119 | { |
cc9a6c87 | 120 | return read_seqcount_begin(¤t->mems_allowed_seq); |
c0ff7453 MX |
121 | } |
122 | ||
cc9a6c87 | 123 | /* |
d26914d1 MG |
124 | * If this returns true, the operation that took place after |
125 | * read_mems_allowed_begin may have failed artificially due to a concurrent | |
126 | * update of mems_allowed. It is up to the caller to retry the operation if | |
cc9a6c87 MG |
127 | * appropriate. |
128 | */ | |
d26914d1 | 129 | static inline bool read_mems_allowed_retry(unsigned int seq) |
c0ff7453 | 130 | { |
d26914d1 | 131 | return read_seqcount_retry(¤t->mems_allowed_seq, seq); |
c0ff7453 MX |
132 | } |
133 | ||
58568d2a MX |
134 | static inline void set_mems_allowed(nodemask_t nodemask) |
135 | { | |
db751fe3 JS |
136 | unsigned long flags; |
137 | ||
c0ff7453 | 138 | task_lock(current); |
db751fe3 | 139 | local_irq_save(flags); |
cc9a6c87 | 140 | write_seqcount_begin(¤t->mems_allowed_seq); |
58568d2a | 141 | current->mems_allowed = nodemask; |
cc9a6c87 | 142 | write_seqcount_end(¤t->mems_allowed_seq); |
db751fe3 | 143 | local_irq_restore(flags); |
c0ff7453 | 144 | task_unlock(current); |
58568d2a MX |
145 | } |
146 | ||
1da177e4 LT |
147 | #else /* !CONFIG_CPUSETS */ |
148 | ||
664eedde MG |
149 | static inline bool cpusets_enabled(void) { return false; } |
150 | ||
1da177e4 LT |
151 | static inline int cpuset_init(void) { return 0; } |
152 | static inline void cpuset_init_smp(void) {} | |
1da177e4 | 153 | |
7ddf96b0 | 154 | static inline void cpuset_update_active_cpus(bool cpu_online) |
3a101d05 TH |
155 | { |
156 | partition_sched_domains(1, NULL, NULL); | |
157 | } | |
158 | ||
6af866af LZ |
159 | static inline void cpuset_cpus_allowed(struct task_struct *p, |
160 | struct cpumask *mask) | |
1da177e4 | 161 | { |
aa85ea5b | 162 | cpumask_copy(mask, cpu_possible_mask); |
1da177e4 LT |
163 | } |
164 | ||
2baab4e9 | 165 | static inline void cpuset_cpus_allowed_fallback(struct task_struct *p) |
9084bb82 | 166 | { |
9084bb82 ON |
167 | } |
168 | ||
909d75a3 PJ |
169 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) |
170 | { | |
171 | return node_possible_map; | |
172 | } | |
173 | ||
38d7bee9 | 174 | #define cpuset_current_mems_allowed (node_states[N_MEMORY]) |
1da177e4 | 175 | static inline void cpuset_init_current_mems_allowed(void) {} |
1da177e4 | 176 | |
19770b32 | 177 | static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) |
1da177e4 LT |
178 | { |
179 | return 1; | |
180 | } | |
181 | ||
a1bc5a4e DR |
182 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) |
183 | { | |
184 | return 1; | |
185 | } | |
186 | ||
187 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | |
188 | { | |
189 | return 1; | |
190 | } | |
191 | ||
02a0e53d PJ |
192 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) |
193 | { | |
194 | return 1; | |
195 | } | |
196 | ||
197 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | |
1da177e4 LT |
198 | { |
199 | return 1; | |
200 | } | |
201 | ||
bbe373f2 DR |
202 | static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
203 | const struct task_struct *tsk2) | |
ef08e3b4 PJ |
204 | { |
205 | return 1; | |
206 | } | |
207 | ||
3e0d98b9 PJ |
208 | static inline void cpuset_memory_pressure_bump(void) {} |
209 | ||
df5f8314 EB |
210 | static inline void cpuset_task_status_allowed(struct seq_file *m, |
211 | struct task_struct *task) | |
1da177e4 | 212 | { |
1da177e4 LT |
213 | } |
214 | ||
825a46af PJ |
215 | static inline int cpuset_mem_spread_node(void) |
216 | { | |
217 | return 0; | |
218 | } | |
219 | ||
6adef3eb JS |
220 | static inline int cpuset_slab_spread_node(void) |
221 | { | |
222 | return 0; | |
223 | } | |
224 | ||
825a46af PJ |
225 | static inline int cpuset_do_page_mem_spread(void) |
226 | { | |
227 | return 0; | |
228 | } | |
229 | ||
230 | static inline int cpuset_do_slab_mem_spread(void) | |
231 | { | |
232 | return 0; | |
233 | } | |
234 | ||
8793d854 PM |
235 | static inline int current_cpuset_is_being_rebound(void) |
236 | { | |
237 | return 0; | |
238 | } | |
239 | ||
e761b772 MK |
240 | static inline void rebuild_sched_domains(void) |
241 | { | |
dfb512ec | 242 | partition_sched_domains(1, NULL, NULL); |
e761b772 MK |
243 | } |
244 | ||
75aa1994 DR |
245 | static inline void cpuset_print_task_mems_allowed(struct task_struct *p) |
246 | { | |
247 | } | |
248 | ||
58568d2a MX |
249 | static inline void set_mems_allowed(nodemask_t nodemask) |
250 | { | |
251 | } | |
252 | ||
d26914d1 | 253 | static inline unsigned int read_mems_allowed_begin(void) |
c0ff7453 | 254 | { |
cc9a6c87 | 255 | return 0; |
c0ff7453 MX |
256 | } |
257 | ||
d26914d1 | 258 | static inline bool read_mems_allowed_retry(unsigned int seq) |
c0ff7453 | 259 | { |
d26914d1 | 260 | return false; |
c0ff7453 MX |
261 | } |
262 | ||
1da177e4 LT |
263 | #endif /* !CONFIG_CPUSETS */ |
264 | ||
265 | #endif /* _LINUX_CPUSET_H */ |