Merge branch 'sh/cpufreq' into sh-latest
[deliverable/linux.git] / include / linux / cpuset.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_CPUSET_H
2#define _LINUX_CPUSET_H
3/*
4 * cpuset interface
5 *
6 * Copyright (C) 2003 BULL SA
825a46af 7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
1da177e4
LT
8 *
9 */
10
11#include <linux/sched.h>
12#include <linux/cpumask.h>
13#include <linux/nodemask.h>
8793d854 14#include <linux/cgroup.h>
a1bc5a4e 15#include <linux/mm.h>
1da177e4
LT
16
17#ifdef CONFIG_CPUSETS
18
202f72d5
PJ
19extern int number_of_cpusets; /* How many cpusets are defined in system? */
20
1da177e4
LT
21extern int cpuset_init(void);
22extern void cpuset_init_smp(void);
3a101d05 23extern void cpuset_update_active_cpus(void);
6af866af 24extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
9084bb82 25extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
909d75a3 26extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
9276b1bc 27#define cpuset_current_mems_allowed (current->mems_allowed)
1da177e4 28void cpuset_init_current_mems_allowed(void);
19770b32 29int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
202f72d5 30
a1bc5a4e
DR
31extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
32extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
02a0e53d 33
a1bc5a4e 34static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
02a0e53d
PJ
35{
36 return number_of_cpusets <= 1 ||
a1bc5a4e 37 __cpuset_node_allowed_softwall(node, gfp_mask);
02a0e53d
PJ
38}
39
a1bc5a4e 40static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
202f72d5 41{
02a0e53d 42 return number_of_cpusets <= 1 ||
a1bc5a4e
DR
43 __cpuset_node_allowed_hardwall(node, gfp_mask);
44}
45
46static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
47{
48 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
49}
50
51static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
52{
53 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
202f72d5
PJ
54}
55
bbe373f2
DR
56extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
57 const struct task_struct *tsk2);
3e0d98b9
PJ
58
59#define cpuset_memory_pressure_bump() \
60 do { \
61 if (cpuset_memory_pressure_enabled) \
62 __cpuset_memory_pressure_bump(); \
63 } while (0)
64extern int cpuset_memory_pressure_enabled;
65extern void __cpuset_memory_pressure_bump(void);
66
54047320 67extern const struct file_operations proc_cpuset_operations;
df5f8314
EB
68struct seq_file;
69extern void cpuset_task_status_allowed(struct seq_file *m,
70 struct task_struct *task);
1da177e4 71
825a46af 72extern int cpuset_mem_spread_node(void);
6adef3eb 73extern int cpuset_slab_spread_node(void);
825a46af
PJ
74
75static inline int cpuset_do_page_mem_spread(void)
76{
77 return current->flags & PF_SPREAD_PAGE;
78}
79
80static inline int cpuset_do_slab_mem_spread(void)
81{
82 return current->flags & PF_SPREAD_SLAB;
83}
84
8793d854
PM
85extern int current_cpuset_is_being_rebound(void);
86
e761b772
MK
87extern void rebuild_sched_domains(void);
88
75aa1994
DR
89extern void cpuset_print_task_mems_allowed(struct task_struct *p);
90
c0ff7453 91/*
cc9a6c87
MG
92 * get_mems_allowed is required when making decisions involving mems_allowed
93 * such as during page allocation. mems_allowed can be updated in parallel
94 * and depending on the new value an operation can fail potentially causing
95 * process failure. A retry loop with get_mems_allowed and put_mems_allowed
96 * prevents these artificial failures.
c0ff7453 97 */
cc9a6c87 98static inline unsigned int get_mems_allowed(void)
c0ff7453 99{
cc9a6c87 100 return read_seqcount_begin(&current->mems_allowed_seq);
c0ff7453
MX
101}
102
cc9a6c87
MG
103/*
104 * If this returns false, the operation that took place after get_mems_allowed
105 * may have failed. It is up to the caller to retry the operation if
106 * appropriate.
107 */
108static inline bool put_mems_allowed(unsigned int seq)
c0ff7453 109{
cc9a6c87 110 return !read_seqcount_retry(&current->mems_allowed_seq, seq);
c0ff7453
MX
111}
112
58568d2a
MX
113static inline void set_mems_allowed(nodemask_t nodemask)
114{
c0ff7453 115 task_lock(current);
cc9a6c87 116 write_seqcount_begin(&current->mems_allowed_seq);
58568d2a 117 current->mems_allowed = nodemask;
cc9a6c87 118 write_seqcount_end(&current->mems_allowed_seq);
c0ff7453 119 task_unlock(current);
58568d2a
MX
120}
121
1da177e4
LT
122#else /* !CONFIG_CPUSETS */
123
124static inline int cpuset_init(void) { return 0; }
125static inline void cpuset_init_smp(void) {}
1da177e4 126
3a101d05
TH
127static inline void cpuset_update_active_cpus(void)
128{
129 partition_sched_domains(1, NULL, NULL);
130}
131
6af866af
LZ
132static inline void cpuset_cpus_allowed(struct task_struct *p,
133 struct cpumask *mask)
1da177e4 134{
aa85ea5b 135 cpumask_copy(mask, cpu_possible_mask);
1da177e4
LT
136}
137
9084bb82
ON
138static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
139{
1e1b6c51 140 do_set_cpus_allowed(p, cpu_possible_mask);
9084bb82
ON
141 return cpumask_any(cpu_active_mask);
142}
143
909d75a3
PJ
144static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
145{
146 return node_possible_map;
147}
148
0e1e7c7a 149#define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
1da177e4 150static inline void cpuset_init_current_mems_allowed(void) {}
1da177e4 151
19770b32 152static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
1da177e4
LT
153{
154 return 1;
155}
156
a1bc5a4e
DR
157static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
158{
159 return 1;
160}
161
162static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
163{
164 return 1;
165}
166
02a0e53d
PJ
167static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
168{
169 return 1;
170}
171
172static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
1da177e4
LT
173{
174 return 1;
175}
176
bbe373f2
DR
177static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
178 const struct task_struct *tsk2)
ef08e3b4
PJ
179{
180 return 1;
181}
182
3e0d98b9
PJ
183static inline void cpuset_memory_pressure_bump(void) {}
184
df5f8314
EB
185static inline void cpuset_task_status_allowed(struct seq_file *m,
186 struct task_struct *task)
1da177e4 187{
1da177e4
LT
188}
189
825a46af
PJ
190static inline int cpuset_mem_spread_node(void)
191{
192 return 0;
193}
194
6adef3eb
JS
195static inline int cpuset_slab_spread_node(void)
196{
197 return 0;
198}
199
825a46af
PJ
200static inline int cpuset_do_page_mem_spread(void)
201{
202 return 0;
203}
204
205static inline int cpuset_do_slab_mem_spread(void)
206{
207 return 0;
208}
209
8793d854
PM
210static inline int current_cpuset_is_being_rebound(void)
211{
212 return 0;
213}
214
e761b772
MK
215static inline void rebuild_sched_domains(void)
216{
dfb512ec 217 partition_sched_domains(1, NULL, NULL);
e761b772
MK
218}
219
75aa1994
DR
220static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
221{
222}
223
58568d2a
MX
224static inline void set_mems_allowed(nodemask_t nodemask)
225{
226}
227
cc9a6c87 228static inline unsigned int get_mems_allowed(void)
c0ff7453 229{
cc9a6c87 230 return 0;
c0ff7453
MX
231}
232
cc9a6c87 233static inline bool put_mems_allowed(unsigned int seq)
c0ff7453 234{
cc9a6c87 235 return true;
c0ff7453
MX
236}
237
1da177e4
LT
238#endif /* !CONFIG_CPUSETS */
239
240#endif /* _LINUX_CPUSET_H */
This page took 0.997053 seconds and 5 git commands to generate.