mm, page_alloc: remove unnecessary recalculations for dirty zone balancing
[deliverable/linux.git] / include / linux / cpuset.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_CPUSET_H
2#define _LINUX_CPUSET_H
3/*
4 * cpuset interface
5 *
6 * Copyright (C) 2003 BULL SA
825a46af 7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
1da177e4
LT
8 *
9 */
10
11#include <linux/sched.h>
12#include <linux/cpumask.h>
13#include <linux/nodemask.h>
a1bc5a4e 14#include <linux/mm.h>
664eedde 15#include <linux/jump_label.h>
1da177e4
LT
16
17#ifdef CONFIG_CPUSETS
18
664eedde
MG
19extern struct static_key cpusets_enabled_key;
20static inline bool cpusets_enabled(void)
21{
22 return static_key_false(&cpusets_enabled_key);
23}
24
25static inline int nr_cpusets(void)
26{
27 /* jump label reference count + the top-level cpuset */
28 return static_key_count(&cpusets_enabled_key) + 1;
29}
30
31static inline void cpuset_inc(void)
32{
33 static_key_slow_inc(&cpusets_enabled_key);
34}
35
36static inline void cpuset_dec(void)
37{
38 static_key_slow_dec(&cpusets_enabled_key);
39}
202f72d5 40
1da177e4
LT
41extern int cpuset_init(void);
42extern void cpuset_init_smp(void);
7ddf96b0 43extern void cpuset_update_active_cpus(bool cpu_online);
6af866af 44extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
2baab4e9 45extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
909d75a3 46extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
9276b1bc 47#define cpuset_current_mems_allowed (current->mems_allowed)
1da177e4 48void cpuset_init_current_mems_allowed(void);
19770b32 49int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
202f72d5 50
344736f2 51extern int __cpuset_node_allowed(int node, gfp_t gfp_mask);
02a0e53d 52
344736f2 53static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
02a0e53d 54{
344736f2 55 return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask);
02a0e53d
PJ
56}
57
344736f2 58static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
202f72d5 59{
344736f2 60 return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
202f72d5
PJ
61}
62
bbe373f2
DR
63extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
64 const struct task_struct *tsk2);
3e0d98b9
PJ
65
66#define cpuset_memory_pressure_bump() \
67 do { \
68 if (cpuset_memory_pressure_enabled) \
69 __cpuset_memory_pressure_bump(); \
70 } while (0)
71extern int cpuset_memory_pressure_enabled;
72extern void __cpuset_memory_pressure_bump(void);
73
df5f8314
EB
74extern void cpuset_task_status_allowed(struct seq_file *m,
75 struct task_struct *task);
52de4779
ZL
76extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
77 struct pid *pid, struct task_struct *tsk);
1da177e4 78
825a46af 79extern int cpuset_mem_spread_node(void);
6adef3eb 80extern int cpuset_slab_spread_node(void);
825a46af
PJ
81
82static inline int cpuset_do_page_mem_spread(void)
83{
2ad654bc 84 return task_spread_page(current);
825a46af
PJ
85}
86
87static inline int cpuset_do_slab_mem_spread(void)
88{
2ad654bc 89 return task_spread_slab(current);
825a46af
PJ
90}
91
8793d854
PM
92extern int current_cpuset_is_being_rebound(void);
93
e761b772
MK
94extern void rebuild_sched_domains(void);
95
da39da3a 96extern void cpuset_print_current_mems_allowed(void);
75aa1994 97
c0ff7453 98/*
d26914d1
MG
99 * read_mems_allowed_begin is required when making decisions involving
100 * mems_allowed such as during page allocation. mems_allowed can be updated in
101 * parallel and depending on the new value an operation can fail potentially
102 * causing process failure. A retry loop with read_mems_allowed_begin and
103 * read_mems_allowed_retry prevents these artificial failures.
c0ff7453 104 */
d26914d1 105static inline unsigned int read_mems_allowed_begin(void)
c0ff7453 106{
cc9a6c87 107 return read_seqcount_begin(&current->mems_allowed_seq);
c0ff7453
MX
108}
109
cc9a6c87 110/*
d26914d1
MG
111 * If this returns true, the operation that took place after
112 * read_mems_allowed_begin may have failed artificially due to a concurrent
113 * update of mems_allowed. It is up to the caller to retry the operation if
cc9a6c87
MG
114 * appropriate.
115 */
d26914d1 116static inline bool read_mems_allowed_retry(unsigned int seq)
c0ff7453 117{
d26914d1 118 return read_seqcount_retry(&current->mems_allowed_seq, seq);
c0ff7453
MX
119}
120
58568d2a
MX
121static inline void set_mems_allowed(nodemask_t nodemask)
122{
db751fe3
JS
123 unsigned long flags;
124
c0ff7453 125 task_lock(current);
db751fe3 126 local_irq_save(flags);
cc9a6c87 127 write_seqcount_begin(&current->mems_allowed_seq);
58568d2a 128 current->mems_allowed = nodemask;
cc9a6c87 129 write_seqcount_end(&current->mems_allowed_seq);
db751fe3 130 local_irq_restore(flags);
c0ff7453 131 task_unlock(current);
58568d2a
MX
132}
133
1da177e4
LT
134#else /* !CONFIG_CPUSETS */
135
664eedde
MG
136static inline bool cpusets_enabled(void) { return false; }
137
1da177e4
LT
138static inline int cpuset_init(void) { return 0; }
139static inline void cpuset_init_smp(void) {}
1da177e4 140
7ddf96b0 141static inline void cpuset_update_active_cpus(bool cpu_online)
3a101d05
TH
142{
143 partition_sched_domains(1, NULL, NULL);
144}
145
6af866af
LZ
146static inline void cpuset_cpus_allowed(struct task_struct *p,
147 struct cpumask *mask)
1da177e4 148{
aa85ea5b 149 cpumask_copy(mask, cpu_possible_mask);
1da177e4
LT
150}
151
2baab4e9 152static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
9084bb82 153{
9084bb82
ON
154}
155
909d75a3
PJ
156static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
157{
158 return node_possible_map;
159}
160
38d7bee9 161#define cpuset_current_mems_allowed (node_states[N_MEMORY])
1da177e4 162static inline void cpuset_init_current_mems_allowed(void) {}
1da177e4 163
19770b32 164static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
1da177e4
LT
165{
166 return 1;
167}
168
344736f2 169static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
02a0e53d
PJ
170{
171 return 1;
172}
173
344736f2 174static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
1da177e4
LT
175{
176 return 1;
177}
178
bbe373f2
DR
179static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
180 const struct task_struct *tsk2)
ef08e3b4
PJ
181{
182 return 1;
183}
184
3e0d98b9
PJ
185static inline void cpuset_memory_pressure_bump(void) {}
186
df5f8314
EB
187static inline void cpuset_task_status_allowed(struct seq_file *m,
188 struct task_struct *task)
1da177e4 189{
1da177e4
LT
190}
191
825a46af
PJ
192static inline int cpuset_mem_spread_node(void)
193{
194 return 0;
195}
196
6adef3eb
JS
197static inline int cpuset_slab_spread_node(void)
198{
199 return 0;
200}
201
825a46af
PJ
202static inline int cpuset_do_page_mem_spread(void)
203{
204 return 0;
205}
206
207static inline int cpuset_do_slab_mem_spread(void)
208{
209 return 0;
210}
211
8793d854
PM
212static inline int current_cpuset_is_being_rebound(void)
213{
214 return 0;
215}
216
e761b772
MK
217static inline void rebuild_sched_domains(void)
218{
dfb512ec 219 partition_sched_domains(1, NULL, NULL);
e761b772
MK
220}
221
da39da3a 222static inline void cpuset_print_current_mems_allowed(void)
75aa1994
DR
223{
224}
225
58568d2a
MX
226static inline void set_mems_allowed(nodemask_t nodemask)
227{
228}
229
d26914d1 230static inline unsigned int read_mems_allowed_begin(void)
c0ff7453 231{
cc9a6c87 232 return 0;
c0ff7453
MX
233}
234
d26914d1 235static inline bool read_mems_allowed_retry(unsigned int seq)
c0ff7453 236{
d26914d1 237 return false;
c0ff7453
MX
238}
239
1da177e4
LT
240#endif /* !CONFIG_CPUSETS */
241
242#endif /* _LINUX_CPUSET_H */
This page took 1.055781 seconds and 5 git commands to generate.