mm: place page->pmd_huge_pte to right union
[deliverable/linux.git] / include / linux / cpuset.h
... / ...
CommitLineData
1#ifndef _LINUX_CPUSET_H
2#define _LINUX_CPUSET_H
3/*
4 * cpuset interface
5 *
6 * Copyright (C) 2003 BULL SA
7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
8 *
9 */
10
11#include <linux/sched.h>
12#include <linux/cpumask.h>
13#include <linux/nodemask.h>
14#include <linux/mm.h>
15
16#ifdef CONFIG_CPUSETS
17
18extern int number_of_cpusets; /* How many cpusets are defined in system? */
19
20extern int cpuset_init(void);
21extern void cpuset_init_smp(void);
22extern void cpuset_update_active_cpus(bool cpu_online);
23extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
24extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
25extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
26#define cpuset_current_mems_allowed (current->mems_allowed)
27void cpuset_init_current_mems_allowed(void);
28int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
29
30extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
31extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
32
33static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
34{
35 return number_of_cpusets <= 1 ||
36 __cpuset_node_allowed_softwall(node, gfp_mask);
37}
38
39static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
40{
41 return number_of_cpusets <= 1 ||
42 __cpuset_node_allowed_hardwall(node, gfp_mask);
43}
44
45static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
46{
47 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
48}
49
50static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
51{
52 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
53}
54
55extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
56 const struct task_struct *tsk2);
57
58#define cpuset_memory_pressure_bump() \
59 do { \
60 if (cpuset_memory_pressure_enabled) \
61 __cpuset_memory_pressure_bump(); \
62 } while (0)
63extern int cpuset_memory_pressure_enabled;
64extern void __cpuset_memory_pressure_bump(void);
65
66extern void cpuset_task_status_allowed(struct seq_file *m,
67 struct task_struct *task);
68extern int proc_cpuset_show(struct seq_file *, void *);
69
70extern int cpuset_mem_spread_node(void);
71extern int cpuset_slab_spread_node(void);
72
73static inline int cpuset_do_page_mem_spread(void)
74{
75 return current->flags & PF_SPREAD_PAGE;
76}
77
78static inline int cpuset_do_slab_mem_spread(void)
79{
80 return current->flags & PF_SPREAD_SLAB;
81}
82
83extern int current_cpuset_is_being_rebound(void);
84
85extern void rebuild_sched_domains(void);
86
87extern void cpuset_print_task_mems_allowed(struct task_struct *p);
88
89/*
90 * get_mems_allowed is required when making decisions involving mems_allowed
91 * such as during page allocation. mems_allowed can be updated in parallel
92 * and depending on the new value an operation can fail potentially causing
93 * process failure. A retry loop with get_mems_allowed and put_mems_allowed
94 * prevents these artificial failures.
95 */
96static inline unsigned int get_mems_allowed(void)
97{
98 return read_seqcount_begin(&current->mems_allowed_seq);
99}
100
101/*
102 * If this returns false, the operation that took place after get_mems_allowed
103 * may have failed. It is up to the caller to retry the operation if
104 * appropriate.
105 */
106static inline bool put_mems_allowed(unsigned int seq)
107{
108 return !read_seqcount_retry(&current->mems_allowed_seq, seq);
109}
110
111static inline void set_mems_allowed(nodemask_t nodemask)
112{
113 unsigned long flags;
114
115 task_lock(current);
116 local_irq_save(flags);
117 write_seqcount_begin(&current->mems_allowed_seq);
118 current->mems_allowed = nodemask;
119 write_seqcount_end(&current->mems_allowed_seq);
120 local_irq_restore(flags);
121 task_unlock(current);
122}
123
124#else /* !CONFIG_CPUSETS */
125
126static inline int cpuset_init(void) { return 0; }
127static inline void cpuset_init_smp(void) {}
128
129static inline void cpuset_update_active_cpus(bool cpu_online)
130{
131 partition_sched_domains(1, NULL, NULL);
132}
133
134static inline void cpuset_cpus_allowed(struct task_struct *p,
135 struct cpumask *mask)
136{
137 cpumask_copy(mask, cpu_possible_mask);
138}
139
140static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
141{
142}
143
144static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
145{
146 return node_possible_map;
147}
148
149#define cpuset_current_mems_allowed (node_states[N_MEMORY])
150static inline void cpuset_init_current_mems_allowed(void) {}
151
152static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
153{
154 return 1;
155}
156
157static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
158{
159 return 1;
160}
161
162static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
163{
164 return 1;
165}
166
167static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
168{
169 return 1;
170}
171
172static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
173{
174 return 1;
175}
176
177static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
178 const struct task_struct *tsk2)
179{
180 return 1;
181}
182
183static inline void cpuset_memory_pressure_bump(void) {}
184
185static inline void cpuset_task_status_allowed(struct seq_file *m,
186 struct task_struct *task)
187{
188}
189
190static inline int cpuset_mem_spread_node(void)
191{
192 return 0;
193}
194
195static inline int cpuset_slab_spread_node(void)
196{
197 return 0;
198}
199
200static inline int cpuset_do_page_mem_spread(void)
201{
202 return 0;
203}
204
205static inline int cpuset_do_slab_mem_spread(void)
206{
207 return 0;
208}
209
210static inline int current_cpuset_is_being_rebound(void)
211{
212 return 0;
213}
214
215static inline void rebuild_sched_domains(void)
216{
217 partition_sched_domains(1, NULL, NULL);
218}
219
220static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
221{
222}
223
224static inline void set_mems_allowed(nodemask_t nodemask)
225{
226}
227
228static inline unsigned int get_mems_allowed(void)
229{
230 return 0;
231}
232
233static inline bool put_mems_allowed(unsigned int seq)
234{
235 return true;
236}
237
238#endif /* !CONFIG_CPUSETS */
239
240#endif /* _LINUX_CPUSET_H */
This page took 0.02591 seconds and 5 git commands to generate.