cpuset: speed up sched domain partition
[deliverable/linux.git] / kernel / cpuset.c
CommitLineData
1da177e4
LT
1/*
2 * kernel/cpuset.c
3 *
4 * Processor and Memory placement constraints for sets of tasks.
5 *
6 * Copyright (C) 2003 BULL SA.
029190c5 7 * Copyright (C) 2004-2007 Silicon Graphics, Inc.
8793d854 8 * Copyright (C) 2006 Google, Inc
1da177e4
LT
9 *
10 * Portions derived from Patrick Mochel's sysfs code.
11 * sysfs is Copyright (c) 2001-3 Patrick Mochel
1da177e4 12 *
825a46af 13 * 2003-10-10 Written by Simon Derr.
1da177e4 14 * 2003-10-22 Updates by Stephen Hemminger.
825a46af 15 * 2004 May-July Rework by Paul Jackson.
8793d854 16 * 2006 Rework by Paul Menage to use generic cgroups
1da177e4
LT
17 *
18 * This file is subject to the terms and conditions of the GNU General Public
19 * License. See the file COPYING in the main directory of the Linux
20 * distribution for more details.
21 */
22
1da177e4
LT
23#include <linux/cpu.h>
24#include <linux/cpumask.h>
25#include <linux/cpuset.h>
26#include <linux/err.h>
27#include <linux/errno.h>
28#include <linux/file.h>
29#include <linux/fs.h>
30#include <linux/init.h>
31#include <linux/interrupt.h>
32#include <linux/kernel.h>
33#include <linux/kmod.h>
34#include <linux/list.h>
68860ec1 35#include <linux/mempolicy.h>
1da177e4
LT
36#include <linux/mm.h>
37#include <linux/module.h>
38#include <linux/mount.h>
39#include <linux/namei.h>
40#include <linux/pagemap.h>
41#include <linux/proc_fs.h>
6b9c2603 42#include <linux/rcupdate.h>
1da177e4
LT
43#include <linux/sched.h>
44#include <linux/seq_file.h>
22fb52dd 45#include <linux/security.h>
1da177e4 46#include <linux/slab.h>
1da177e4
LT
47#include <linux/spinlock.h>
48#include <linux/stat.h>
49#include <linux/string.h>
50#include <linux/time.h>
51#include <linux/backing-dev.h>
52#include <linux/sort.h>
53
54#include <asm/uaccess.h>
55#include <asm/atomic.h>
3d3f26a7 56#include <linux/mutex.h>
029190c5 57#include <linux/kfifo.h>
956db3ca
CW
58#include <linux/workqueue.h>
59#include <linux/cgroup.h>
1da177e4 60
202f72d5
PJ
61/*
62 * Tracks how many cpusets are currently defined in system.
63 * When there is only one cpuset (the root cpuset) we can
64 * short circuit some hooks.
65 */
7edc5962 66int number_of_cpusets __read_mostly;
202f72d5 67
2df167a3 68/* Forward declare cgroup structures */
8793d854
PM
69struct cgroup_subsys cpuset_subsys;
70struct cpuset;
71
3e0d98b9
PJ
72/* See "Frequency meter" comments, below. */
73
74struct fmeter {
75 int cnt; /* unprocessed events count */
76 int val; /* most recent output value */
77 time_t time; /* clock (secs) when val computed */
78 spinlock_t lock; /* guards read or write of above */
79};
80
1da177e4 81struct cpuset {
8793d854
PM
82 struct cgroup_subsys_state css;
83
1da177e4
LT
84 unsigned long flags; /* "unsigned long" so bitops work */
85 cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */
86 nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */
87
1da177e4 88 struct cpuset *parent; /* my parent */
1da177e4
LT
89
90 /*
91 * Copy of global cpuset_mems_generation as of the most
92 * recent time this cpuset changed its mems_allowed.
93 */
3e0d98b9
PJ
94 int mems_generation;
95
96 struct fmeter fmeter; /* memory_pressure filter */
029190c5
PJ
97
98 /* partition number for rebuild_sched_domains() */
99 int pn;
956db3ca 100
1d3504fc
HS
101 /* for custom sched domain */
102 int relax_domain_level;
103
956db3ca
CW
104 /* used for walking a cpuset heirarchy */
105 struct list_head stack_list;
1da177e4
LT
106};
107
8793d854
PM
108/* Retrieve the cpuset for a cgroup */
109static inline struct cpuset *cgroup_cs(struct cgroup *cont)
110{
111 return container_of(cgroup_subsys_state(cont, cpuset_subsys_id),
112 struct cpuset, css);
113}
114
115/* Retrieve the cpuset for a task */
116static inline struct cpuset *task_cs(struct task_struct *task)
117{
118 return container_of(task_subsys_state(task, cpuset_subsys_id),
119 struct cpuset, css);
120}
956db3ca
CW
121struct cpuset_hotplug_scanner {
122 struct cgroup_scanner scan;
123 struct cgroup *to;
124};
8793d854 125
1da177e4
LT
126/* bits in struct cpuset flags field */
127typedef enum {
128 CS_CPU_EXCLUSIVE,
129 CS_MEM_EXCLUSIVE,
78608366 130 CS_MEM_HARDWALL,
45b07ef3 131 CS_MEMORY_MIGRATE,
029190c5 132 CS_SCHED_LOAD_BALANCE,
825a46af
PJ
133 CS_SPREAD_PAGE,
134 CS_SPREAD_SLAB,
1da177e4
LT
135} cpuset_flagbits_t;
136
137/* convenient tests for these bits */
138static inline int is_cpu_exclusive(const struct cpuset *cs)
139{
7b5b9ef0 140 return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
1da177e4
LT
141}
142
143static inline int is_mem_exclusive(const struct cpuset *cs)
144{
7b5b9ef0 145 return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
1da177e4
LT
146}
147
78608366
PM
148static inline int is_mem_hardwall(const struct cpuset *cs)
149{
150 return test_bit(CS_MEM_HARDWALL, &cs->flags);
151}
152
029190c5
PJ
153static inline int is_sched_load_balance(const struct cpuset *cs)
154{
155 return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
156}
157
45b07ef3
PJ
158static inline int is_memory_migrate(const struct cpuset *cs)
159{
7b5b9ef0 160 return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
45b07ef3
PJ
161}
162
825a46af
PJ
163static inline int is_spread_page(const struct cpuset *cs)
164{
165 return test_bit(CS_SPREAD_PAGE, &cs->flags);
166}
167
168static inline int is_spread_slab(const struct cpuset *cs)
169{
170 return test_bit(CS_SPREAD_SLAB, &cs->flags);
171}
172
1da177e4 173/*
151a4420 174 * Increment this integer everytime any cpuset changes its
1da177e4
LT
175 * mems_allowed value. Users of cpusets can track this generation
176 * number, and avoid having to lock and reload mems_allowed unless
177 * the cpuset they're using changes generation.
178 *
2df167a3 179 * A single, global generation is needed because cpuset_attach_task() could
1da177e4
LT
180 * reattach a task to a different cpuset, which must not have its
181 * generation numbers aliased with those of that tasks previous cpuset.
182 *
183 * Generations are needed for mems_allowed because one task cannot
2df167a3 184 * modify another's memory placement. So we must enable every task,
1da177e4
LT
185 * on every visit to __alloc_pages(), to efficiently check whether
186 * its current->cpuset->mems_allowed has changed, requiring an update
187 * of its current->mems_allowed.
151a4420 188 *
2df167a3 189 * Since writes to cpuset_mems_generation are guarded by the cgroup lock
151a4420 190 * there is no need to mark it atomic.
1da177e4 191 */
151a4420 192static int cpuset_mems_generation;
1da177e4
LT
193
194static struct cpuset top_cpuset = {
195 .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)),
196 .cpus_allowed = CPU_MASK_ALL,
197 .mems_allowed = NODE_MASK_ALL,
1da177e4
LT
198};
199
1da177e4 200/*
2df167a3
PM
201 * There are two global mutexes guarding cpuset structures. The first
202 * is the main control groups cgroup_mutex, accessed via
203 * cgroup_lock()/cgroup_unlock(). The second is the cpuset-specific
204 * callback_mutex, below. They can nest. It is ok to first take
205 * cgroup_mutex, then nest callback_mutex. We also require taking
206 * task_lock() when dereferencing a task's cpuset pointer. See "The
207 * task_lock() exception", at the end of this comment.
053199ed 208 *
3d3f26a7 209 * A task must hold both mutexes to modify cpusets. If a task
2df167a3 210 * holds cgroup_mutex, then it blocks others wanting that mutex,
3d3f26a7 211 * ensuring that it is the only task able to also acquire callback_mutex
053199ed
PJ
212 * and be able to modify cpusets. It can perform various checks on
213 * the cpuset structure first, knowing nothing will change. It can
2df167a3 214 * also allocate memory while just holding cgroup_mutex. While it is
053199ed 215 * performing these checks, various callback routines can briefly
3d3f26a7
IM
216 * acquire callback_mutex to query cpusets. Once it is ready to make
217 * the changes, it takes callback_mutex, blocking everyone else.
053199ed
PJ
218 *
219 * Calls to the kernel memory allocator can not be made while holding
3d3f26a7 220 * callback_mutex, as that would risk double tripping on callback_mutex
053199ed
PJ
221 * from one of the callbacks into the cpuset code from within
222 * __alloc_pages().
223 *
3d3f26a7 224 * If a task is only holding callback_mutex, then it has read-only
053199ed
PJ
225 * access to cpusets.
226 *
227 * The task_struct fields mems_allowed and mems_generation may only
228 * be accessed in the context of that task, so require no locks.
229 *
3d3f26a7 230 * The cpuset_common_file_read() handlers only hold callback_mutex across
053199ed
PJ
231 * small pieces of code, such as when reading out possibly multi-word
232 * cpumasks and nodemasks.
233 *
2df167a3
PM
234 * Accessing a task's cpuset should be done in accordance with the
235 * guidelines for accessing subsystem state in kernel/cgroup.c
1da177e4
LT
236 */
237
3d3f26a7 238static DEFINE_MUTEX(callback_mutex);
4247bdc6 239
8793d854
PM
240/* This is ugly, but preserves the userspace API for existing cpuset
241 * users. If someone tries to mount the "cpuset" filesystem, we
242 * silently switch it to mount "cgroup" instead */
454e2398
DH
243static int cpuset_get_sb(struct file_system_type *fs_type,
244 int flags, const char *unused_dev_name,
245 void *data, struct vfsmount *mnt)
1da177e4 246{
8793d854
PM
247 struct file_system_type *cgroup_fs = get_fs_type("cgroup");
248 int ret = -ENODEV;
249 if (cgroup_fs) {
250 char mountopts[] =
251 "cpuset,noprefix,"
252 "release_agent=/sbin/cpuset_release_agent";
253 ret = cgroup_fs->get_sb(cgroup_fs, flags,
254 unused_dev_name, mountopts, mnt);
255 put_filesystem(cgroup_fs);
256 }
257 return ret;
1da177e4
LT
258}
259
260static struct file_system_type cpuset_fs_type = {
261 .name = "cpuset",
262 .get_sb = cpuset_get_sb,
1da177e4
LT
263};
264
1da177e4
LT
265/*
266 * Return in *pmask the portion of a cpusets's cpus_allowed that
267 * are online. If none are online, walk up the cpuset hierarchy
268 * until we find one that does have some online cpus. If we get
269 * all the way to the top and still haven't found any online cpus,
270 * return cpu_online_map. Or if passed a NULL cs from an exit'ing
271 * task, return cpu_online_map.
272 *
273 * One way or another, we guarantee to return some non-empty subset
274 * of cpu_online_map.
275 *
3d3f26a7 276 * Call with callback_mutex held.
1da177e4
LT
277 */
278
279static void guarantee_online_cpus(const struct cpuset *cs, cpumask_t *pmask)
280{
281 while (cs && !cpus_intersects(cs->cpus_allowed, cpu_online_map))
282 cs = cs->parent;
283 if (cs)
284 cpus_and(*pmask, cs->cpus_allowed, cpu_online_map);
285 else
286 *pmask = cpu_online_map;
287 BUG_ON(!cpus_intersects(*pmask, cpu_online_map));
288}
289
290/*
291 * Return in *pmask the portion of a cpusets's mems_allowed that
0e1e7c7a
CL
292 * are online, with memory. If none are online with memory, walk
293 * up the cpuset hierarchy until we find one that does have some
294 * online mems. If we get all the way to the top and still haven't
295 * found any online mems, return node_states[N_HIGH_MEMORY].
1da177e4
LT
296 *
297 * One way or another, we guarantee to return some non-empty subset
0e1e7c7a 298 * of node_states[N_HIGH_MEMORY].
1da177e4 299 *
3d3f26a7 300 * Call with callback_mutex held.
1da177e4
LT
301 */
302
303static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
304{
0e1e7c7a
CL
305 while (cs && !nodes_intersects(cs->mems_allowed,
306 node_states[N_HIGH_MEMORY]))
1da177e4
LT
307 cs = cs->parent;
308 if (cs)
0e1e7c7a
CL
309 nodes_and(*pmask, cs->mems_allowed,
310 node_states[N_HIGH_MEMORY]);
1da177e4 311 else
0e1e7c7a
CL
312 *pmask = node_states[N_HIGH_MEMORY];
313 BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY]));
1da177e4
LT
314}
315
cf2a473c
PJ
316/**
317 * cpuset_update_task_memory_state - update task memory placement
318 *
319 * If the current tasks cpusets mems_allowed changed behind our
320 * backs, update current->mems_allowed, mems_generation and task NUMA
321 * mempolicy to the new value.
053199ed 322 *
cf2a473c
PJ
323 * Task mempolicy is updated by rebinding it relative to the
324 * current->cpuset if a task has its memory placement changed.
325 * Do not call this routine if in_interrupt().
326 *
4a01c8d5 327 * Call without callback_mutex or task_lock() held. May be
2df167a3
PM
328 * called with or without cgroup_mutex held. Thanks in part to
329 * 'the_top_cpuset_hack', the task's cpuset pointer will never
41f7f60d
DR
330 * be NULL. This routine also might acquire callback_mutex during
331 * call.
053199ed 332 *
6b9c2603
PJ
333 * Reading current->cpuset->mems_generation doesn't need task_lock
334 * to guard the current->cpuset derefence, because it is guarded
2df167a3 335 * from concurrent freeing of current->cpuset using RCU.
6b9c2603
PJ
336 *
337 * The rcu_dereference() is technically probably not needed,
338 * as I don't actually mind if I see a new cpuset pointer but
339 * an old value of mems_generation. However this really only
340 * matters on alpha systems using cpusets heavily. If I dropped
341 * that rcu_dereference(), it would save them a memory barrier.
342 * For all other arch's, rcu_dereference is a no-op anyway, and for
343 * alpha systems not using cpusets, another planned optimization,
344 * avoiding the rcu critical section for tasks in the root cpuset
345 * which is statically allocated, so can't vanish, will make this
346 * irrelevant. Better to use RCU as intended, than to engage in
347 * some cute trick to save a memory barrier that is impossible to
348 * test, for alpha systems using cpusets heavily, which might not
349 * even exist.
053199ed
PJ
350 *
351 * This routine is needed to update the per-task mems_allowed data,
352 * within the tasks context, when it is trying to allocate memory
353 * (in various mm/mempolicy.c routines) and notices that some other
354 * task has been modifying its cpuset.
1da177e4
LT
355 */
356
fe85a998 357void cpuset_update_task_memory_state(void)
1da177e4 358{
053199ed 359 int my_cpusets_mem_gen;
cf2a473c 360 struct task_struct *tsk = current;
6b9c2603 361 struct cpuset *cs;
053199ed 362
8793d854 363 if (task_cs(tsk) == &top_cpuset) {
03a285f5
PJ
364 /* Don't need rcu for top_cpuset. It's never freed. */
365 my_cpusets_mem_gen = top_cpuset.mems_generation;
366 } else {
367 rcu_read_lock();
da5ef6bb 368 my_cpusets_mem_gen = task_cs(tsk)->mems_generation;
03a285f5
PJ
369 rcu_read_unlock();
370 }
1da177e4 371
cf2a473c 372 if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) {
3d3f26a7 373 mutex_lock(&callback_mutex);
cf2a473c 374 task_lock(tsk);
8793d854 375 cs = task_cs(tsk); /* Maybe changed when task not locked */
cf2a473c
PJ
376 guarantee_online_mems(cs, &tsk->mems_allowed);
377 tsk->cpuset_mems_generation = cs->mems_generation;
825a46af
PJ
378 if (is_spread_page(cs))
379 tsk->flags |= PF_SPREAD_PAGE;
380 else
381 tsk->flags &= ~PF_SPREAD_PAGE;
382 if (is_spread_slab(cs))
383 tsk->flags |= PF_SPREAD_SLAB;
384 else
385 tsk->flags &= ~PF_SPREAD_SLAB;
cf2a473c 386 task_unlock(tsk);
3d3f26a7 387 mutex_unlock(&callback_mutex);
74cb2155 388 mpol_rebind_task(tsk, &tsk->mems_allowed);
1da177e4
LT
389 }
390}
391
392/*
393 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
394 *
395 * One cpuset is a subset of another if all its allowed CPUs and
396 * Memory Nodes are a subset of the other, and its exclusive flags
2df167a3 397 * are only set if the other's are set. Call holding cgroup_mutex.
1da177e4
LT
398 */
399
400static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
401{
402 return cpus_subset(p->cpus_allowed, q->cpus_allowed) &&
403 nodes_subset(p->mems_allowed, q->mems_allowed) &&
404 is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
405 is_mem_exclusive(p) <= is_mem_exclusive(q);
406}
407
408/*
409 * validate_change() - Used to validate that any proposed cpuset change
410 * follows the structural rules for cpusets.
411 *
412 * If we replaced the flag and mask values of the current cpuset
413 * (cur) with those values in the trial cpuset (trial), would
414 * our various subset and exclusive rules still be valid? Presumes
2df167a3 415 * cgroup_mutex held.
1da177e4
LT
416 *
417 * 'cur' is the address of an actual, in-use cpuset. Operations
418 * such as list traversal that depend on the actual address of the
419 * cpuset in the list must use cur below, not trial.
420 *
421 * 'trial' is the address of bulk structure copy of cur, with
422 * perhaps one or more of the fields cpus_allowed, mems_allowed,
423 * or flags changed to new, trial values.
424 *
425 * Return 0 if valid, -errno if not.
426 */
427
428static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
429{
8793d854 430 struct cgroup *cont;
1da177e4
LT
431 struct cpuset *c, *par;
432
433 /* Each of our child cpusets must be a subset of us */
8793d854
PM
434 list_for_each_entry(cont, &cur->css.cgroup->children, sibling) {
435 if (!is_cpuset_subset(cgroup_cs(cont), trial))
1da177e4
LT
436 return -EBUSY;
437 }
438
439 /* Remaining checks don't apply to root cpuset */
69604067 440 if (cur == &top_cpuset)
1da177e4
LT
441 return 0;
442
69604067
PJ
443 par = cur->parent;
444
1da177e4
LT
445 /* We must be a subset of our parent cpuset */
446 if (!is_cpuset_subset(trial, par))
447 return -EACCES;
448
2df167a3
PM
449 /*
450 * If either I or some sibling (!= me) is exclusive, we can't
451 * overlap
452 */
8793d854
PM
453 list_for_each_entry(cont, &par->css.cgroup->children, sibling) {
454 c = cgroup_cs(cont);
1da177e4
LT
455 if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
456 c != cur &&
457 cpus_intersects(trial->cpus_allowed, c->cpus_allowed))
458 return -EINVAL;
459 if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
460 c != cur &&
461 nodes_intersects(trial->mems_allowed, c->mems_allowed))
462 return -EINVAL;
463 }
464
020958b6
PJ
465 /* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */
466 if (cgroup_task_count(cur->css.cgroup)) {
467 if (cpus_empty(trial->cpus_allowed) ||
468 nodes_empty(trial->mems_allowed)) {
469 return -ENOSPC;
470 }
471 }
472
1da177e4
LT
473 return 0;
474}
475
029190c5
PJ
476/*
477 * Helper routine for rebuild_sched_domains().
478 * Do cpusets a, b have overlapping cpus_allowed masks?
479 */
480
481static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
482{
483 return cpus_intersects(a->cpus_allowed, b->cpus_allowed);
484}
485
1d3504fc
HS
486static void
487update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
488{
1d3504fc
HS
489 if (dattr->relax_domain_level < c->relax_domain_level)
490 dattr->relax_domain_level = c->relax_domain_level;
491 return;
492}
493
f5393693
LJ
494static void
495update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
496{
497 LIST_HEAD(q);
498
499 list_add(&c->stack_list, &q);
500 while (!list_empty(&q)) {
501 struct cpuset *cp;
502 struct cgroup *cont;
503 struct cpuset *child;
504
505 cp = list_first_entry(&q, struct cpuset, stack_list);
506 list_del(q.next);
507
508 if (cpus_empty(cp->cpus_allowed))
509 continue;
510
511 if (is_sched_load_balance(cp))
512 update_domain_attr(dattr, cp);
513
514 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
515 child = cgroup_cs(cont);
516 list_add_tail(&child->stack_list, &q);
517 }
518 }
519}
520
029190c5
PJ
521/*
522 * rebuild_sched_domains()
523 *
c372e817
LZ
524 * This routine will be called to rebuild the scheduler's dynamic
525 * sched domains:
526 * - if the flag 'sched_load_balance' of any cpuset with non-empty
527 * 'cpus' changes,
528 * - or if the 'cpus' allowed changes in any cpuset which has that
529 * flag enabled,
530 * - or if the 'sched_relax_domain_level' of any cpuset which has
531 * that flag enabled and with non-empty 'cpus' changes,
532 * - or if any cpuset with non-empty 'cpus' is removed,
533 * - or if a cpu gets offlined.
029190c5
PJ
534 *
535 * This routine builds a partial partition of the systems CPUs
536 * (the set of non-overlappping cpumask_t's in the array 'part'
537 * below), and passes that partial partition to the kernel/sched.c
538 * partition_sched_domains() routine, which will rebuild the
539 * schedulers load balancing domains (sched domains) as specified
540 * by that partial partition. A 'partial partition' is a set of
541 * non-overlapping subsets whose union is a subset of that set.
542 *
543 * See "What is sched_load_balance" in Documentation/cpusets.txt
544 * for a background explanation of this.
545 *
546 * Does not return errors, on the theory that the callers of this
547 * routine would rather not worry about failures to rebuild sched
548 * domains when operating in the severe memory shortage situations
549 * that could cause allocation failures below.
550 *
551 * Call with cgroup_mutex held. May take callback_mutex during
552 * call due to the kfifo_alloc() and kmalloc() calls. May nest
86ef5c9a 553 * a call to the get_online_cpus()/put_online_cpus() pair.
029190c5 554 * Must not be called holding callback_mutex, because we must not
86ef5c9a
GS
555 * call get_online_cpus() while holding callback_mutex. Elsewhere
556 * the kernel nests callback_mutex inside get_online_cpus() calls.
029190c5
PJ
557 * So the reverse nesting would risk an ABBA deadlock.
558 *
559 * The three key local variables below are:
560 * q - a kfifo queue of cpuset pointers, used to implement a
561 * top-down scan of all cpusets. This scan loads a pointer
562 * to each cpuset marked is_sched_load_balance into the
563 * array 'csa'. For our purposes, rebuilding the schedulers
564 * sched domains, we can ignore !is_sched_load_balance cpusets.
565 * csa - (for CpuSet Array) Array of pointers to all the cpusets
566 * that need to be load balanced, for convenient iterative
567 * access by the subsequent code that finds the best partition,
568 * i.e the set of domains (subsets) of CPUs such that the
569 * cpus_allowed of every cpuset marked is_sched_load_balance
570 * is a subset of one of these domains, while there are as
571 * many such domains as possible, each as small as possible.
572 * doms - Conversion of 'csa' to an array of cpumasks, for passing to
573 * the kernel/sched.c routine partition_sched_domains() in a
574 * convenient format, that can be easily compared to the prior
575 * value to determine what partition elements (sched domains)
576 * were changed (added or removed.)
577 *
578 * Finding the best partition (set of domains):
579 * The triple nested loops below over i, j, k scan over the
580 * load balanced cpusets (using the array of cpuset pointers in
581 * csa[]) looking for pairs of cpusets that have overlapping
582 * cpus_allowed, but which don't have the same 'pn' partition
583 * number and gives them in the same partition number. It keeps
584 * looping on the 'restart' label until it can no longer find
585 * any such pairs.
586 *
587 * The union of the cpus_allowed masks from the set of
588 * all cpusets having the same 'pn' value then form the one
589 * element of the partition (one sched domain) to be passed to
590 * partition_sched_domains().
591 */
592
e761b772 593void rebuild_sched_domains(void)
029190c5
PJ
594{
595 struct kfifo *q; /* queue of cpusets to be scanned */
596 struct cpuset *cp; /* scans q */
597 struct cpuset **csa; /* array of all cpuset ptrs */
598 int csn; /* how many cpuset ptrs in csa so far */
599 int i, j, k; /* indices for partition finding loops */
600 cpumask_t *doms; /* resulting partition; i.e. sched domains */
1d3504fc 601 struct sched_domain_attr *dattr; /* attributes for custom domains */
029190c5
PJ
602 int ndoms; /* number of sched domains in result */
603 int nslot; /* next empty doms[] cpumask_t slot */
604
605 q = NULL;
606 csa = NULL;
607 doms = NULL;
1d3504fc 608 dattr = NULL;
029190c5
PJ
609
610 /* Special case for the 99% of systems with one, full, sched domain */
611 if (is_sched_load_balance(&top_cpuset)) {
612 ndoms = 1;
613 doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
614 if (!doms)
615 goto rebuild;
1d3504fc
HS
616 dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
617 if (dattr) {
618 *dattr = SD_ATTR_INIT;
619 update_domain_attr(dattr, &top_cpuset);
620 }
029190c5
PJ
621 *doms = top_cpuset.cpus_allowed;
622 goto rebuild;
623 }
624
625 q = kfifo_alloc(number_of_cpusets * sizeof(cp), GFP_KERNEL, NULL);
626 if (IS_ERR(q))
627 goto done;
628 csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
629 if (!csa)
630 goto done;
631 csn = 0;
632
633 cp = &top_cpuset;
634 __kfifo_put(q, (void *)&cp, sizeof(cp));
635 while (__kfifo_get(q, (void *)&cp, sizeof(cp))) {
636 struct cgroup *cont;
637 struct cpuset *child; /* scans child cpusets of cp */
489a5393
LJ
638
639 if (cpus_empty(cp->cpus_allowed))
640 continue;
641
f5393693
LJ
642 /*
643 * All child cpusets contain a subset of the parent's cpus, so
644 * just skip them, and then we call update_domain_attr_tree()
645 * to calc relax_domain_level of the corresponding sched
646 * domain.
647 */
648 if (is_sched_load_balance(cp)) {
029190c5 649 csa[csn++] = cp;
f5393693
LJ
650 continue;
651 }
489a5393 652
029190c5
PJ
653 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
654 child = cgroup_cs(cont);
655 __kfifo_put(q, (void *)&child, sizeof(cp));
656 }
657 }
658
659 for (i = 0; i < csn; i++)
660 csa[i]->pn = i;
661 ndoms = csn;
662
663restart:
664 /* Find the best partition (set of sched domains) */
665 for (i = 0; i < csn; i++) {
666 struct cpuset *a = csa[i];
667 int apn = a->pn;
668
669 for (j = 0; j < csn; j++) {
670 struct cpuset *b = csa[j];
671 int bpn = b->pn;
672
673 if (apn != bpn && cpusets_overlap(a, b)) {
674 for (k = 0; k < csn; k++) {
675 struct cpuset *c = csa[k];
676
677 if (c->pn == bpn)
678 c->pn = apn;
679 }
680 ndoms--; /* one less element */
681 goto restart;
682 }
683 }
684 }
685
686 /* Convert <csn, csa> to <ndoms, doms> */
687 doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL);
688 if (!doms)
689 goto rebuild;
1d3504fc 690 dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
029190c5
PJ
691
692 for (nslot = 0, i = 0; i < csn; i++) {
693 struct cpuset *a = csa[i];
694 int apn = a->pn;
695
696 if (apn >= 0) {
697 cpumask_t *dp = doms + nslot;
698
699 if (nslot == ndoms) {
700 static int warnings = 10;
701 if (warnings) {
702 printk(KERN_WARNING
703 "rebuild_sched_domains confused:"
704 " nslot %d, ndoms %d, csn %d, i %d,"
705 " apn %d\n",
706 nslot, ndoms, csn, i, apn);
707 warnings--;
708 }
709 continue;
710 }
711
712 cpus_clear(*dp);
1d3504fc
HS
713 if (dattr)
714 *(dattr + nslot) = SD_ATTR_INIT;
029190c5
PJ
715 for (j = i; j < csn; j++) {
716 struct cpuset *b = csa[j];
717
718 if (apn == b->pn) {
719 cpus_or(*dp, *dp, b->cpus_allowed);
720 b->pn = -1;
91cd4d6e 721 if (dattr)
f5393693 722 update_domain_attr_tree(dattr
91cd4d6e 723 + nslot, b);
029190c5
PJ
724 }
725 }
726 nslot++;
727 }
728 }
729 BUG_ON(nslot != ndoms);
730
731rebuild:
732 /* Have scheduler rebuild sched domains */
86ef5c9a 733 get_online_cpus();
1d3504fc 734 partition_sched_domains(ndoms, doms, dattr);
86ef5c9a 735 put_online_cpus();
029190c5
PJ
736
737done:
738 if (q && !IS_ERR(q))
739 kfifo_free(q);
740 kfree(csa);
741 /* Don't kfree(doms) -- partition_sched_domains() does that. */
1d3504fc 742 /* Don't kfree(dattr) -- partition_sched_domains() does that. */
029190c5
PJ
743}
744
58f4790b
CW
745/**
746 * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's
747 * @tsk: task to test
748 * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
749 *
2df167a3 750 * Call with cgroup_mutex held. May take callback_mutex during call.
58f4790b
CW
751 * Called for each task in a cgroup by cgroup_scan_tasks().
752 * Return nonzero if this tasks's cpus_allowed mask should be changed (in other
753 * words, if its mask is not equal to its cpuset's mask).
053199ed 754 */
9e0c914c
AB
755static int cpuset_test_cpumask(struct task_struct *tsk,
756 struct cgroup_scanner *scan)
58f4790b
CW
757{
758 return !cpus_equal(tsk->cpus_allowed,
759 (cgroup_cs(scan->cg))->cpus_allowed);
760}
053199ed 761
58f4790b
CW
762/**
763 * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
764 * @tsk: task to test
765 * @scan: struct cgroup_scanner containing the cgroup of the task
766 *
767 * Called by cgroup_scan_tasks() for each task in a cgroup whose
768 * cpus_allowed mask needs to be changed.
769 *
770 * We don't need to re-check for the cgroup/cpuset membership, since we're
771 * holding cgroup_lock() at this point.
772 */
9e0c914c
AB
773static void cpuset_change_cpumask(struct task_struct *tsk,
774 struct cgroup_scanner *scan)
58f4790b 775{
f9a86fcb 776 set_cpus_allowed_ptr(tsk, &((cgroup_cs(scan->cg))->cpus_allowed));
58f4790b
CW
777}
778
0b2f630a
MX
779/**
780 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
781 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
782 *
783 * Called with cgroup_mutex held
784 *
785 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
786 * calling callback functions for each.
787 *
788 * Return 0 if successful, -errno if not.
789 */
790static int update_tasks_cpumask(struct cpuset *cs)
791{
792 struct cgroup_scanner scan;
793 struct ptr_heap heap;
794 int retval;
795
02412483
LJ
796 /*
797 * cgroup_scan_tasks() will initialize heap->gt for us.
798 * heap_init() is still needed here for we should not change
799 * cs->cpus_allowed when heap_init() fails.
800 */
801 retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
0b2f630a
MX
802 if (retval)
803 return retval;
804
805 scan.cg = cs->css.cgroup;
806 scan.test_task = cpuset_test_cpumask;
807 scan.process_task = cpuset_change_cpumask;
808 scan.heap = &heap;
809 retval = cgroup_scan_tasks(&scan);
810
811 heap_free(&heap);
812 return retval;
813}
814
58f4790b
CW
815/**
816 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
817 * @cs: the cpuset to consider
818 * @buf: buffer of cpu numbers written to this cpuset
819 */
e3712395 820static int update_cpumask(struct cpuset *cs, const char *buf)
1da177e4
LT
821{
822 struct cpuset trialcs;
58f4790b
CW
823 int retval;
824 int is_load_balanced;
1da177e4 825
4c4d50f7
PJ
826 /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */
827 if (cs == &top_cpuset)
828 return -EACCES;
829
1da177e4 830 trialcs = *cs;
6f7f02e7
DR
831
832 /*
c8d9c90c 833 * An empty cpus_allowed is ok only if the cpuset has no tasks.
020958b6
PJ
834 * Since cpulist_parse() fails on an empty mask, we special case
835 * that parsing. The validate_change() call ensures that cpusets
836 * with tasks have cpus.
6f7f02e7 837 */
020958b6 838 if (!*buf) {
6f7f02e7
DR
839 cpus_clear(trialcs.cpus_allowed);
840 } else {
841 retval = cpulist_parse(buf, trialcs.cpus_allowed);
842 if (retval < 0)
843 return retval;
37340746
LJ
844
845 if (!cpus_subset(trialcs.cpus_allowed, cpu_online_map))
846 return -EINVAL;
6f7f02e7 847 }
1da177e4 848 retval = validate_change(cs, &trialcs);
85d7b949
DG
849 if (retval < 0)
850 return retval;
029190c5 851
8707d8b8
PM
852 /* Nothing to do if the cpus didn't change */
853 if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed))
854 return 0;
58f4790b 855
029190c5
PJ
856 is_load_balanced = is_sched_load_balance(&trialcs);
857
3d3f26a7 858 mutex_lock(&callback_mutex);
85d7b949 859 cs->cpus_allowed = trialcs.cpus_allowed;
3d3f26a7 860 mutex_unlock(&callback_mutex);
029190c5 861
8707d8b8
PM
862 /*
863 * Scan tasks in the cpuset, and update the cpumasks of any
58f4790b 864 * that need an update.
8707d8b8 865 */
0b2f630a
MX
866 retval = update_tasks_cpumask(cs);
867 if (retval < 0)
868 return retval;
58f4790b 869
8707d8b8 870 if (is_load_balanced)
029190c5 871 rebuild_sched_domains();
85d7b949 872 return 0;
1da177e4
LT
873}
874
e4e364e8
PJ
875/*
876 * cpuset_migrate_mm
877 *
878 * Migrate memory region from one set of nodes to another.
879 *
880 * Temporarilly set tasks mems_allowed to target nodes of migration,
881 * so that the migration code can allocate pages on these nodes.
882 *
2df167a3 883 * Call holding cgroup_mutex, so current's cpuset won't change
c8d9c90c 884 * during this call, as manage_mutex holds off any cpuset_attach()
e4e364e8
PJ
885 * calls. Therefore we don't need to take task_lock around the
886 * call to guarantee_online_mems(), as we know no one is changing
2df167a3 887 * our task's cpuset.
e4e364e8
PJ
888 *
889 * Hold callback_mutex around the two modifications of our tasks
890 * mems_allowed to synchronize with cpuset_mems_allowed().
891 *
892 * While the mm_struct we are migrating is typically from some
893 * other task, the task_struct mems_allowed that we are hacking
894 * is for our current task, which must allocate new pages for that
895 * migrating memory region.
896 *
897 * We call cpuset_update_task_memory_state() before hacking
898 * our tasks mems_allowed, so that we are assured of being in
899 * sync with our tasks cpuset, and in particular, callbacks to
900 * cpuset_update_task_memory_state() from nested page allocations
901 * won't see any mismatch of our cpuset and task mems_generation
902 * values, so won't overwrite our hacked tasks mems_allowed
903 * nodemask.
904 */
905
906static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
907 const nodemask_t *to)
908{
909 struct task_struct *tsk = current;
910
911 cpuset_update_task_memory_state();
912
913 mutex_lock(&callback_mutex);
914 tsk->mems_allowed = *to;
915 mutex_unlock(&callback_mutex);
916
917 do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
918
919 mutex_lock(&callback_mutex);
8793d854 920 guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed);
e4e364e8
PJ
921 mutex_unlock(&callback_mutex);
922}
923
8793d854
PM
924static void *cpuset_being_rebound;
925
0b2f630a
MX
926/**
927 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
928 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
929 * @oldmem: old mems_allowed of cpuset cs
930 *
931 * Called with cgroup_mutex held
932 * Return 0 if successful, -errno if not.
933 */
934static int update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem)
1da177e4 935{
8793d854 936 struct task_struct *p;
4225399a
PJ
937 struct mm_struct **mmarray;
938 int i, n, ntasks;
04c19fa6 939 int migrate;
4225399a 940 int fudge;
8793d854 941 struct cgroup_iter it;
0b2f630a 942 int retval;
59dac16f 943
846a16bf 944 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
4225399a
PJ
945
946 fudge = 10; /* spare mmarray[] slots */
947 fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */
948 retval = -ENOMEM;
949
950 /*
951 * Allocate mmarray[] to hold mm reference for each task
952 * in cpuset cs. Can't kmalloc GFP_KERNEL while holding
953 * tasklist_lock. We could use GFP_ATOMIC, but with a
954 * few more lines of code, we can retry until we get a big
955 * enough mmarray[] w/o using GFP_ATOMIC.
956 */
957 while (1) {
8793d854 958 ntasks = cgroup_task_count(cs->css.cgroup); /* guess */
4225399a
PJ
959 ntasks += fudge;
960 mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL);
961 if (!mmarray)
962 goto done;
c2aef333 963 read_lock(&tasklist_lock); /* block fork */
8793d854 964 if (cgroup_task_count(cs->css.cgroup) <= ntasks)
4225399a 965 break; /* got enough */
c2aef333 966 read_unlock(&tasklist_lock); /* try again */
4225399a
PJ
967 kfree(mmarray);
968 }
969
970 n = 0;
971
972 /* Load up mmarray[] with mm reference for each task in cpuset. */
8793d854
PM
973 cgroup_iter_start(cs->css.cgroup, &it);
974 while ((p = cgroup_iter_next(cs->css.cgroup, &it))) {
4225399a
PJ
975 struct mm_struct *mm;
976
977 if (n >= ntasks) {
978 printk(KERN_WARNING
979 "Cpuset mempolicy rebind incomplete.\n");
8793d854 980 break;
4225399a 981 }
4225399a
PJ
982 mm = get_task_mm(p);
983 if (!mm)
984 continue;
985 mmarray[n++] = mm;
8793d854
PM
986 }
987 cgroup_iter_end(cs->css.cgroup, &it);
c2aef333 988 read_unlock(&tasklist_lock);
4225399a
PJ
989
990 /*
991 * Now that we've dropped the tasklist spinlock, we can
992 * rebind the vma mempolicies of each mm in mmarray[] to their
993 * new cpuset, and release that mm. The mpol_rebind_mm()
994 * call takes mmap_sem, which we couldn't take while holding
846a16bf 995 * tasklist_lock. Forks can happen again now - the mpol_dup()
4225399a
PJ
996 * cpuset_being_rebound check will catch such forks, and rebind
997 * their vma mempolicies too. Because we still hold the global
2df167a3 998 * cgroup_mutex, we know that no other rebind effort will
4225399a
PJ
999 * be contending for the global variable cpuset_being_rebound.
1000 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
04c19fa6 1001 * is idempotent. Also migrate pages in each mm to new nodes.
4225399a 1002 */
04c19fa6 1003 migrate = is_memory_migrate(cs);
4225399a
PJ
1004 for (i = 0; i < n; i++) {
1005 struct mm_struct *mm = mmarray[i];
1006
1007 mpol_rebind_mm(mm, &cs->mems_allowed);
e4e364e8 1008 if (migrate)
0b2f630a 1009 cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed);
4225399a
PJ
1010 mmput(mm);
1011 }
1012
2df167a3 1013 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
4225399a 1014 kfree(mmarray);
8793d854 1015 cpuset_being_rebound = NULL;
4225399a 1016 retval = 0;
59dac16f 1017done:
1da177e4
LT
1018 return retval;
1019}
1020
0b2f630a
MX
1021/*
1022 * Handle user request to change the 'mems' memory placement
1023 * of a cpuset. Needs to validate the request, update the
1024 * cpusets mems_allowed and mems_generation, and for each
1025 * task in the cpuset, rebind any vma mempolicies and if
1026 * the cpuset is marked 'memory_migrate', migrate the tasks
1027 * pages to the new memory.
1028 *
1029 * Call with cgroup_mutex held. May take callback_mutex during call.
1030 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1031 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1032 * their mempolicies to the cpusets new mems_allowed.
1033 */
1034static int update_nodemask(struct cpuset *cs, const char *buf)
1035{
1036 struct cpuset trialcs;
1037 nodemask_t oldmem;
1038 int retval;
1039
1040 /*
1041 * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
1042 * it's read-only
1043 */
1044 if (cs == &top_cpuset)
1045 return -EACCES;
1046
1047 trialcs = *cs;
1048
1049 /*
1050 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
1051 * Since nodelist_parse() fails on an empty mask, we special case
1052 * that parsing. The validate_change() call ensures that cpusets
1053 * with tasks have memory.
1054 */
1055 if (!*buf) {
1056 nodes_clear(trialcs.mems_allowed);
1057 } else {
1058 retval = nodelist_parse(buf, trialcs.mems_allowed);
1059 if (retval < 0)
1060 goto done;
1061
1062 if (!nodes_subset(trialcs.mems_allowed,
1063 node_states[N_HIGH_MEMORY]))
1064 return -EINVAL;
1065 }
1066 oldmem = cs->mems_allowed;
1067 if (nodes_equal(oldmem, trialcs.mems_allowed)) {
1068 retval = 0; /* Too easy - nothing to do */
1069 goto done;
1070 }
1071 retval = validate_change(cs, &trialcs);
1072 if (retval < 0)
1073 goto done;
1074
1075 mutex_lock(&callback_mutex);
1076 cs->mems_allowed = trialcs.mems_allowed;
1077 cs->mems_generation = cpuset_mems_generation++;
1078 mutex_unlock(&callback_mutex);
1079
1080 retval = update_tasks_nodemask(cs, &oldmem);
1081done:
1082 return retval;
1083}
1084
8793d854
PM
1085int current_cpuset_is_being_rebound(void)
1086{
1087 return task_cs(current) == cpuset_being_rebound;
1088}
1089
5be7a479 1090static int update_relax_domain_level(struct cpuset *cs, s64 val)
1d3504fc 1091{
30e0e178
LZ
1092 if (val < -1 || val >= SD_LV_MAX)
1093 return -EINVAL;
1d3504fc
HS
1094
1095 if (val != cs->relax_domain_level) {
1096 cs->relax_domain_level = val;
c372e817
LZ
1097 if (!cpus_empty(cs->cpus_allowed) && is_sched_load_balance(cs))
1098 rebuild_sched_domains();
1d3504fc
HS
1099 }
1100
1101 return 0;
1102}
1103
1da177e4
LT
1104/*
1105 * update_flag - read a 0 or a 1 in a file and update associated flag
78608366
PM
1106 * bit: the bit to update (see cpuset_flagbits_t)
1107 * cs: the cpuset to update
1108 * turning_on: whether the flag is being set or cleared
053199ed 1109 *
2df167a3 1110 * Call with cgroup_mutex held.
1da177e4
LT
1111 */
1112
700fe1ab
PM
1113static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1114 int turning_on)
1da177e4 1115{
1da177e4 1116 struct cpuset trialcs;
607717a6 1117 int err;
029190c5 1118 int cpus_nonempty, balance_flag_changed;
1da177e4 1119
1da177e4
LT
1120 trialcs = *cs;
1121 if (turning_on)
1122 set_bit(bit, &trialcs.flags);
1123 else
1124 clear_bit(bit, &trialcs.flags);
1125
1126 err = validate_change(cs, &trialcs);
85d7b949
DG
1127 if (err < 0)
1128 return err;
029190c5
PJ
1129
1130 cpus_nonempty = !cpus_empty(trialcs.cpus_allowed);
1131 balance_flag_changed = (is_sched_load_balance(cs) !=
1132 is_sched_load_balance(&trialcs));
1133
3d3f26a7 1134 mutex_lock(&callback_mutex);
69604067 1135 cs->flags = trialcs.flags;
3d3f26a7 1136 mutex_unlock(&callback_mutex);
85d7b949 1137
029190c5
PJ
1138 if (cpus_nonempty && balance_flag_changed)
1139 rebuild_sched_domains();
1140
85d7b949 1141 return 0;
1da177e4
LT
1142}
1143
3e0d98b9 1144/*
80f7228b 1145 * Frequency meter - How fast is some event occurring?
3e0d98b9
PJ
1146 *
1147 * These routines manage a digitally filtered, constant time based,
1148 * event frequency meter. There are four routines:
1149 * fmeter_init() - initialize a frequency meter.
1150 * fmeter_markevent() - called each time the event happens.
1151 * fmeter_getrate() - returns the recent rate of such events.
1152 * fmeter_update() - internal routine used to update fmeter.
1153 *
1154 * A common data structure is passed to each of these routines,
1155 * which is used to keep track of the state required to manage the
1156 * frequency meter and its digital filter.
1157 *
1158 * The filter works on the number of events marked per unit time.
1159 * The filter is single-pole low-pass recursive (IIR). The time unit
1160 * is 1 second. Arithmetic is done using 32-bit integers scaled to
1161 * simulate 3 decimal digits of precision (multiplied by 1000).
1162 *
1163 * With an FM_COEF of 933, and a time base of 1 second, the filter
1164 * has a half-life of 10 seconds, meaning that if the events quit
1165 * happening, then the rate returned from the fmeter_getrate()
1166 * will be cut in half each 10 seconds, until it converges to zero.
1167 *
1168 * It is not worth doing a real infinitely recursive filter. If more
1169 * than FM_MAXTICKS ticks have elapsed since the last filter event,
1170 * just compute FM_MAXTICKS ticks worth, by which point the level
1171 * will be stable.
1172 *
1173 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
1174 * arithmetic overflow in the fmeter_update() routine.
1175 *
1176 * Given the simple 32 bit integer arithmetic used, this meter works
1177 * best for reporting rates between one per millisecond (msec) and
1178 * one per 32 (approx) seconds. At constant rates faster than one
1179 * per msec it maxes out at values just under 1,000,000. At constant
1180 * rates between one per msec, and one per second it will stabilize
1181 * to a value N*1000, where N is the rate of events per second.
1182 * At constant rates between one per second and one per 32 seconds,
1183 * it will be choppy, moving up on the seconds that have an event,
1184 * and then decaying until the next event. At rates slower than
1185 * about one in 32 seconds, it decays all the way back to zero between
1186 * each event.
1187 */
1188
1189#define FM_COEF 933 /* coefficient for half-life of 10 secs */
1190#define FM_MAXTICKS ((time_t)99) /* useless computing more ticks than this */
1191#define FM_MAXCNT 1000000 /* limit cnt to avoid overflow */
1192#define FM_SCALE 1000 /* faux fixed point scale */
1193
1194/* Initialize a frequency meter */
1195static void fmeter_init(struct fmeter *fmp)
1196{
1197 fmp->cnt = 0;
1198 fmp->val = 0;
1199 fmp->time = 0;
1200 spin_lock_init(&fmp->lock);
1201}
1202
1203/* Internal meter update - process cnt events and update value */
1204static void fmeter_update(struct fmeter *fmp)
1205{
1206 time_t now = get_seconds();
1207 time_t ticks = now - fmp->time;
1208
1209 if (ticks == 0)
1210 return;
1211
1212 ticks = min(FM_MAXTICKS, ticks);
1213 while (ticks-- > 0)
1214 fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
1215 fmp->time = now;
1216
1217 fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
1218 fmp->cnt = 0;
1219}
1220
1221/* Process any previous ticks, then bump cnt by one (times scale). */
1222static void fmeter_markevent(struct fmeter *fmp)
1223{
1224 spin_lock(&fmp->lock);
1225 fmeter_update(fmp);
1226 fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
1227 spin_unlock(&fmp->lock);
1228}
1229
1230/* Process any previous ticks, then return current value. */
1231static int fmeter_getrate(struct fmeter *fmp)
1232{
1233 int val;
1234
1235 spin_lock(&fmp->lock);
1236 fmeter_update(fmp);
1237 val = fmp->val;
1238 spin_unlock(&fmp->lock);
1239 return val;
1240}
1241
2df167a3 1242/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
8793d854
PM
1243static int cpuset_can_attach(struct cgroup_subsys *ss,
1244 struct cgroup *cont, struct task_struct *tsk)
1da177e4 1245{
8793d854 1246 struct cpuset *cs = cgroup_cs(cont);
1da177e4 1247
1da177e4
LT
1248 if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1249 return -ENOSPC;
9985b0ba
DR
1250 if (tsk->flags & PF_THREAD_BOUND) {
1251 cpumask_t mask;
1252
1253 mutex_lock(&callback_mutex);
1254 mask = cs->cpus_allowed;
1255 mutex_unlock(&callback_mutex);
1256 if (!cpus_equal(tsk->cpus_allowed, mask))
1257 return -EINVAL;
1258 }
1da177e4 1259
8793d854
PM
1260 return security_task_setscheduler(tsk, 0, NULL);
1261}
1da177e4 1262
8793d854
PM
1263static void cpuset_attach(struct cgroup_subsys *ss,
1264 struct cgroup *cont, struct cgroup *oldcont,
1265 struct task_struct *tsk)
1266{
1267 cpumask_t cpus;
1268 nodemask_t from, to;
1269 struct mm_struct *mm;
1270 struct cpuset *cs = cgroup_cs(cont);
1271 struct cpuset *oldcs = cgroup_cs(oldcont);
9985b0ba 1272 int err;
22fb52dd 1273
3d3f26a7 1274 mutex_lock(&callback_mutex);
1da177e4 1275 guarantee_online_cpus(cs, &cpus);
9985b0ba 1276 err = set_cpus_allowed_ptr(tsk, &cpus);
8793d854 1277 mutex_unlock(&callback_mutex);
9985b0ba
DR
1278 if (err)
1279 return;
1da177e4 1280
45b07ef3
PJ
1281 from = oldcs->mems_allowed;
1282 to = cs->mems_allowed;
4225399a
PJ
1283 mm = get_task_mm(tsk);
1284 if (mm) {
1285 mpol_rebind_mm(mm, &to);
2741a559 1286 if (is_memory_migrate(cs))
e4e364e8 1287 cpuset_migrate_mm(mm, &from, &to);
4225399a
PJ
1288 mmput(mm);
1289 }
1290
1da177e4
LT
1291}
1292
1293/* The various types of files and directories in a cpuset file system */
1294
1295typedef enum {
45b07ef3 1296 FILE_MEMORY_MIGRATE,
1da177e4
LT
1297 FILE_CPULIST,
1298 FILE_MEMLIST,
1299 FILE_CPU_EXCLUSIVE,
1300 FILE_MEM_EXCLUSIVE,
78608366 1301 FILE_MEM_HARDWALL,
029190c5 1302 FILE_SCHED_LOAD_BALANCE,
1d3504fc 1303 FILE_SCHED_RELAX_DOMAIN_LEVEL,
3e0d98b9
PJ
1304 FILE_MEMORY_PRESSURE_ENABLED,
1305 FILE_MEMORY_PRESSURE,
825a46af
PJ
1306 FILE_SPREAD_PAGE,
1307 FILE_SPREAD_SLAB,
1da177e4
LT
1308} cpuset_filetype_t;
1309
700fe1ab
PM
1310static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1311{
1312 int retval = 0;
1313 struct cpuset *cs = cgroup_cs(cgrp);
1314 cpuset_filetype_t type = cft->private;
1315
e3712395 1316 if (!cgroup_lock_live_group(cgrp))
700fe1ab 1317 return -ENODEV;
700fe1ab
PM
1318
1319 switch (type) {
1da177e4 1320 case FILE_CPU_EXCLUSIVE:
700fe1ab 1321 retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
1da177e4
LT
1322 break;
1323 case FILE_MEM_EXCLUSIVE:
700fe1ab 1324 retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
1da177e4 1325 break;
78608366
PM
1326 case FILE_MEM_HARDWALL:
1327 retval = update_flag(CS_MEM_HARDWALL, cs, val);
1328 break;
029190c5 1329 case FILE_SCHED_LOAD_BALANCE:
700fe1ab 1330 retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
1d3504fc 1331 break;
45b07ef3 1332 case FILE_MEMORY_MIGRATE:
700fe1ab 1333 retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
45b07ef3 1334 break;
3e0d98b9 1335 case FILE_MEMORY_PRESSURE_ENABLED:
700fe1ab 1336 cpuset_memory_pressure_enabled = !!val;
3e0d98b9
PJ
1337 break;
1338 case FILE_MEMORY_PRESSURE:
1339 retval = -EACCES;
1340 break;
825a46af 1341 case FILE_SPREAD_PAGE:
700fe1ab 1342 retval = update_flag(CS_SPREAD_PAGE, cs, val);
151a4420 1343 cs->mems_generation = cpuset_mems_generation++;
825a46af
PJ
1344 break;
1345 case FILE_SPREAD_SLAB:
700fe1ab 1346 retval = update_flag(CS_SPREAD_SLAB, cs, val);
151a4420 1347 cs->mems_generation = cpuset_mems_generation++;
825a46af 1348 break;
1da177e4
LT
1349 default:
1350 retval = -EINVAL;
700fe1ab 1351 break;
1da177e4 1352 }
8793d854 1353 cgroup_unlock();
1da177e4
LT
1354 return retval;
1355}
1356
5be7a479
PM
1357static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
1358{
1359 int retval = 0;
1360 struct cpuset *cs = cgroup_cs(cgrp);
1361 cpuset_filetype_t type = cft->private;
1362
e3712395 1363 if (!cgroup_lock_live_group(cgrp))
5be7a479 1364 return -ENODEV;
e3712395 1365
5be7a479
PM
1366 switch (type) {
1367 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1368 retval = update_relax_domain_level(cs, val);
1369 break;
1370 default:
1371 retval = -EINVAL;
1372 break;
1373 }
1374 cgroup_unlock();
1375 return retval;
1376}
1377
e3712395
PM
1378/*
1379 * Common handling for a write to a "cpus" or "mems" file.
1380 */
1381static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1382 const char *buf)
1383{
1384 int retval = 0;
1385
1386 if (!cgroup_lock_live_group(cgrp))
1387 return -ENODEV;
1388
1389 switch (cft->private) {
1390 case FILE_CPULIST:
1391 retval = update_cpumask(cgroup_cs(cgrp), buf);
1392 break;
1393 case FILE_MEMLIST:
1394 retval = update_nodemask(cgroup_cs(cgrp), buf);
1395 break;
1396 default:
1397 retval = -EINVAL;
1398 break;
1399 }
1400 cgroup_unlock();
1401 return retval;
1402}
1403
1da177e4
LT
1404/*
1405 * These ascii lists should be read in a single call, by using a user
1406 * buffer large enough to hold the entire map. If read in smaller
1407 * chunks, there is no guarantee of atomicity. Since the display format
1408 * used, list of ranges of sequential numbers, is variable length,
1409 * and since these maps can change value dynamically, one could read
1410 * gibberish by doing partial reads while a list was changing.
1411 * A single large read to a buffer that crosses a page boundary is
1412 * ok, because the result being copied to user land is not recomputed
1413 * across a page fault.
1414 */
1415
1416static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
1417{
1418 cpumask_t mask;
1419
3d3f26a7 1420 mutex_lock(&callback_mutex);
1da177e4 1421 mask = cs->cpus_allowed;
3d3f26a7 1422 mutex_unlock(&callback_mutex);
1da177e4
LT
1423
1424 return cpulist_scnprintf(page, PAGE_SIZE, mask);
1425}
1426
1427static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
1428{
1429 nodemask_t mask;
1430
3d3f26a7 1431 mutex_lock(&callback_mutex);
1da177e4 1432 mask = cs->mems_allowed;
3d3f26a7 1433 mutex_unlock(&callback_mutex);
1da177e4
LT
1434
1435 return nodelist_scnprintf(page, PAGE_SIZE, mask);
1436}
1437
8793d854
PM
1438static ssize_t cpuset_common_file_read(struct cgroup *cont,
1439 struct cftype *cft,
1440 struct file *file,
1441 char __user *buf,
1442 size_t nbytes, loff_t *ppos)
1da177e4 1443{
8793d854 1444 struct cpuset *cs = cgroup_cs(cont);
1da177e4
LT
1445 cpuset_filetype_t type = cft->private;
1446 char *page;
1447 ssize_t retval = 0;
1448 char *s;
1da177e4 1449
e12ba74d 1450 if (!(page = (char *)__get_free_page(GFP_TEMPORARY)))
1da177e4
LT
1451 return -ENOMEM;
1452
1453 s = page;
1454
1455 switch (type) {
1456 case FILE_CPULIST:
1457 s += cpuset_sprintf_cpulist(s, cs);
1458 break;
1459 case FILE_MEMLIST:
1460 s += cpuset_sprintf_memlist(s, cs);
1461 break;
1da177e4
LT
1462 default:
1463 retval = -EINVAL;
1464 goto out;
1465 }
1466 *s++ = '\n';
1da177e4 1467
eacaa1f5 1468 retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
1da177e4
LT
1469out:
1470 free_page((unsigned long)page);
1471 return retval;
1472}
1473
700fe1ab
PM
1474static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft)
1475{
1476 struct cpuset *cs = cgroup_cs(cont);
1477 cpuset_filetype_t type = cft->private;
1478 switch (type) {
1479 case FILE_CPU_EXCLUSIVE:
1480 return is_cpu_exclusive(cs);
1481 case FILE_MEM_EXCLUSIVE:
1482 return is_mem_exclusive(cs);
78608366
PM
1483 case FILE_MEM_HARDWALL:
1484 return is_mem_hardwall(cs);
700fe1ab
PM
1485 case FILE_SCHED_LOAD_BALANCE:
1486 return is_sched_load_balance(cs);
1487 case FILE_MEMORY_MIGRATE:
1488 return is_memory_migrate(cs);
1489 case FILE_MEMORY_PRESSURE_ENABLED:
1490 return cpuset_memory_pressure_enabled;
1491 case FILE_MEMORY_PRESSURE:
1492 return fmeter_getrate(&cs->fmeter);
1493 case FILE_SPREAD_PAGE:
1494 return is_spread_page(cs);
1495 case FILE_SPREAD_SLAB:
1496 return is_spread_slab(cs);
1497 default:
1498 BUG();
1499 }
1500}
1da177e4 1501
5be7a479
PM
1502static s64 cpuset_read_s64(struct cgroup *cont, struct cftype *cft)
1503{
1504 struct cpuset *cs = cgroup_cs(cont);
1505 cpuset_filetype_t type = cft->private;
1506 switch (type) {
1507 case FILE_SCHED_RELAX_DOMAIN_LEVEL:
1508 return cs->relax_domain_level;
1509 default:
1510 BUG();
1511 }
1512}
1513
1da177e4
LT
1514
1515/*
1516 * for the common functions, 'private' gives the type of file
1517 */
1518
addf2c73
PM
1519static struct cftype files[] = {
1520 {
1521 .name = "cpus",
1522 .read = cpuset_common_file_read,
e3712395
PM
1523 .write_string = cpuset_write_resmask,
1524 .max_write_len = (100U + 6 * NR_CPUS),
addf2c73
PM
1525 .private = FILE_CPULIST,
1526 },
1527
1528 {
1529 .name = "mems",
1530 .read = cpuset_common_file_read,
e3712395
PM
1531 .write_string = cpuset_write_resmask,
1532 .max_write_len = (100U + 6 * MAX_NUMNODES),
addf2c73
PM
1533 .private = FILE_MEMLIST,
1534 },
1535
1536 {
1537 .name = "cpu_exclusive",
1538 .read_u64 = cpuset_read_u64,
1539 .write_u64 = cpuset_write_u64,
1540 .private = FILE_CPU_EXCLUSIVE,
1541 },
1542
1543 {
1544 .name = "mem_exclusive",
1545 .read_u64 = cpuset_read_u64,
1546 .write_u64 = cpuset_write_u64,
1547 .private = FILE_MEM_EXCLUSIVE,
1548 },
1549
78608366
PM
1550 {
1551 .name = "mem_hardwall",
1552 .read_u64 = cpuset_read_u64,
1553 .write_u64 = cpuset_write_u64,
1554 .private = FILE_MEM_HARDWALL,
1555 },
1556
addf2c73
PM
1557 {
1558 .name = "sched_load_balance",
1559 .read_u64 = cpuset_read_u64,
1560 .write_u64 = cpuset_write_u64,
1561 .private = FILE_SCHED_LOAD_BALANCE,
1562 },
1563
1564 {
1565 .name = "sched_relax_domain_level",
5be7a479
PM
1566 .read_s64 = cpuset_read_s64,
1567 .write_s64 = cpuset_write_s64,
addf2c73
PM
1568 .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
1569 },
1570
1571 {
1572 .name = "memory_migrate",
1573 .read_u64 = cpuset_read_u64,
1574 .write_u64 = cpuset_write_u64,
1575 .private = FILE_MEMORY_MIGRATE,
1576 },
1577
1578 {
1579 .name = "memory_pressure",
1580 .read_u64 = cpuset_read_u64,
1581 .write_u64 = cpuset_write_u64,
1582 .private = FILE_MEMORY_PRESSURE,
1583 },
1584
1585 {
1586 .name = "memory_spread_page",
1587 .read_u64 = cpuset_read_u64,
1588 .write_u64 = cpuset_write_u64,
1589 .private = FILE_SPREAD_PAGE,
1590 },
1591
1592 {
1593 .name = "memory_spread_slab",
1594 .read_u64 = cpuset_read_u64,
1595 .write_u64 = cpuset_write_u64,
1596 .private = FILE_SPREAD_SLAB,
1597 },
45b07ef3
PJ
1598};
1599
3e0d98b9
PJ
1600static struct cftype cft_memory_pressure_enabled = {
1601 .name = "memory_pressure_enabled",
700fe1ab
PM
1602 .read_u64 = cpuset_read_u64,
1603 .write_u64 = cpuset_write_u64,
3e0d98b9
PJ
1604 .private = FILE_MEMORY_PRESSURE_ENABLED,
1605};
1606
8793d854 1607static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
1da177e4
LT
1608{
1609 int err;
1610
addf2c73
PM
1611 err = cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
1612 if (err)
1da177e4 1613 return err;
8793d854 1614 /* memory_pressure_enabled is in root cpuset only */
addf2c73 1615 if (!cont->parent)
8793d854 1616 err = cgroup_add_file(cont, ss,
addf2c73
PM
1617 &cft_memory_pressure_enabled);
1618 return err;
1da177e4
LT
1619}
1620
8793d854
PM
1621/*
1622 * post_clone() is called at the end of cgroup_clone().
1623 * 'cgroup' was just created automatically as a result of
1624 * a cgroup_clone(), and the current task is about to
1625 * be moved into 'cgroup'.
1626 *
1627 * Currently we refuse to set up the cgroup - thereby
1628 * refusing the task to be entered, and as a result refusing
1629 * the sys_unshare() or clone() which initiated it - if any
1630 * sibling cpusets have exclusive cpus or mem.
1631 *
1632 * If this becomes a problem for some users who wish to
1633 * allow that scenario, then cpuset_post_clone() could be
1634 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
2df167a3
PM
1635 * (and likewise for mems) to the new cgroup. Called with cgroup_mutex
1636 * held.
8793d854
PM
1637 */
1638static void cpuset_post_clone(struct cgroup_subsys *ss,
1639 struct cgroup *cgroup)
1640{
1641 struct cgroup *parent, *child;
1642 struct cpuset *cs, *parent_cs;
1643
1644 parent = cgroup->parent;
1645 list_for_each_entry(child, &parent->children, sibling) {
1646 cs = cgroup_cs(child);
1647 if (is_mem_exclusive(cs) || is_cpu_exclusive(cs))
1648 return;
1649 }
1650 cs = cgroup_cs(cgroup);
1651 parent_cs = cgroup_cs(parent);
1652
1653 cs->mems_allowed = parent_cs->mems_allowed;
1654 cs->cpus_allowed = parent_cs->cpus_allowed;
1655 return;
1656}
1657
1da177e4
LT
1658/*
1659 * cpuset_create - create a cpuset
2df167a3
PM
1660 * ss: cpuset cgroup subsystem
1661 * cont: control group that the new cpuset will be part of
1da177e4
LT
1662 */
1663
8793d854
PM
1664static struct cgroup_subsys_state *cpuset_create(
1665 struct cgroup_subsys *ss,
1666 struct cgroup *cont)
1da177e4
LT
1667{
1668 struct cpuset *cs;
8793d854 1669 struct cpuset *parent;
1da177e4 1670
8793d854
PM
1671 if (!cont->parent) {
1672 /* This is early initialization for the top cgroup */
1673 top_cpuset.mems_generation = cpuset_mems_generation++;
1674 return &top_cpuset.css;
1675 }
1676 parent = cgroup_cs(cont->parent);
1da177e4
LT
1677 cs = kmalloc(sizeof(*cs), GFP_KERNEL);
1678 if (!cs)
8793d854 1679 return ERR_PTR(-ENOMEM);
1da177e4 1680
cf2a473c 1681 cpuset_update_task_memory_state();
1da177e4 1682 cs->flags = 0;
825a46af
PJ
1683 if (is_spread_page(parent))
1684 set_bit(CS_SPREAD_PAGE, &cs->flags);
1685 if (is_spread_slab(parent))
1686 set_bit(CS_SPREAD_SLAB, &cs->flags);
029190c5 1687 set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
f9a86fcb
MT
1688 cpus_clear(cs->cpus_allowed);
1689 nodes_clear(cs->mems_allowed);
151a4420 1690 cs->mems_generation = cpuset_mems_generation++;
3e0d98b9 1691 fmeter_init(&cs->fmeter);
1d3504fc 1692 cs->relax_domain_level = -1;
1da177e4
LT
1693
1694 cs->parent = parent;
202f72d5 1695 number_of_cpusets++;
8793d854 1696 return &cs->css ;
1da177e4
LT
1697}
1698
029190c5
PJ
1699/*
1700 * Locking note on the strange update_flag() call below:
1701 *
1702 * If the cpuset being removed has its flag 'sched_load_balance'
1703 * enabled, then simulate turning sched_load_balance off, which
86ef5c9a 1704 * will call rebuild_sched_domains(). The get_online_cpus()
029190c5
PJ
1705 * call in rebuild_sched_domains() must not be made while holding
1706 * callback_mutex. Elsewhere the kernel nests callback_mutex inside
86ef5c9a 1707 * get_online_cpus() calls. So the reverse nesting would risk an
029190c5
PJ
1708 * ABBA deadlock.
1709 */
1710
8793d854 1711static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
1da177e4 1712{
8793d854 1713 struct cpuset *cs = cgroup_cs(cont);
1da177e4 1714
cf2a473c 1715 cpuset_update_task_memory_state();
029190c5
PJ
1716
1717 if (is_sched_load_balance(cs))
700fe1ab 1718 update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
029190c5 1719
202f72d5 1720 number_of_cpusets--;
8793d854 1721 kfree(cs);
1da177e4
LT
1722}
1723
8793d854
PM
1724struct cgroup_subsys cpuset_subsys = {
1725 .name = "cpuset",
1726 .create = cpuset_create,
1727 .destroy = cpuset_destroy,
1728 .can_attach = cpuset_can_attach,
1729 .attach = cpuset_attach,
1730 .populate = cpuset_populate,
1731 .post_clone = cpuset_post_clone,
1732 .subsys_id = cpuset_subsys_id,
1733 .early_init = 1,
1734};
1735
c417f024
PJ
1736/*
1737 * cpuset_init_early - just enough so that the calls to
1738 * cpuset_update_task_memory_state() in early init code
1739 * are harmless.
1740 */
1741
1742int __init cpuset_init_early(void)
1743{
8793d854 1744 top_cpuset.mems_generation = cpuset_mems_generation++;
c417f024
PJ
1745 return 0;
1746}
1747
8793d854 1748
1da177e4
LT
1749/**
1750 * cpuset_init - initialize cpusets at system boot
1751 *
1752 * Description: Initialize top_cpuset and the cpuset internal file system,
1753 **/
1754
1755int __init cpuset_init(void)
1756{
8793d854 1757 int err = 0;
1da177e4 1758
f9a86fcb
MT
1759 cpus_setall(top_cpuset.cpus_allowed);
1760 nodes_setall(top_cpuset.mems_allowed);
1da177e4 1761
3e0d98b9 1762 fmeter_init(&top_cpuset.fmeter);
151a4420 1763 top_cpuset.mems_generation = cpuset_mems_generation++;
029190c5 1764 set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
1d3504fc 1765 top_cpuset.relax_domain_level = -1;
1da177e4 1766
1da177e4
LT
1767 err = register_filesystem(&cpuset_fs_type);
1768 if (err < 0)
8793d854
PM
1769 return err;
1770
202f72d5 1771 number_of_cpusets = 1;
8793d854 1772 return 0;
1da177e4
LT
1773}
1774
956db3ca
CW
1775/**
1776 * cpuset_do_move_task - move a given task to another cpuset
1777 * @tsk: pointer to task_struct the task to move
1778 * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner
1779 *
1780 * Called by cgroup_scan_tasks() for each task in a cgroup.
1781 * Return nonzero to stop the walk through the tasks.
1782 */
9e0c914c
AB
1783static void cpuset_do_move_task(struct task_struct *tsk,
1784 struct cgroup_scanner *scan)
956db3ca
CW
1785{
1786 struct cpuset_hotplug_scanner *chsp;
1787
1788 chsp = container_of(scan, struct cpuset_hotplug_scanner, scan);
1789 cgroup_attach_task(chsp->to, tsk);
1790}
1791
1792/**
1793 * move_member_tasks_to_cpuset - move tasks from one cpuset to another
1794 * @from: cpuset in which the tasks currently reside
1795 * @to: cpuset to which the tasks will be moved
1796 *
c8d9c90c
PJ
1797 * Called with cgroup_mutex held
1798 * callback_mutex must not be held, as cpuset_attach() will take it.
956db3ca
CW
1799 *
1800 * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
1801 * calling callback functions for each.
1802 */
1803static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
1804{
1805 struct cpuset_hotplug_scanner scan;
1806
1807 scan.scan.cg = from->css.cgroup;
1808 scan.scan.test_task = NULL; /* select all tasks in cgroup */
1809 scan.scan.process_task = cpuset_do_move_task;
1810 scan.scan.heap = NULL;
1811 scan.to = to->css.cgroup;
1812
da5ef6bb 1813 if (cgroup_scan_tasks(&scan.scan))
956db3ca
CW
1814 printk(KERN_ERR "move_member_tasks_to_cpuset: "
1815 "cgroup_scan_tasks failed\n");
1816}
1817
b1aac8bb
PJ
1818/*
1819 * If common_cpu_mem_hotplug_unplug(), below, unplugs any CPUs
1820 * or memory nodes, we need to walk over the cpuset hierarchy,
1821 * removing that CPU or node from all cpusets. If this removes the
956db3ca
CW
1822 * last CPU or node from a cpuset, then move the tasks in the empty
1823 * cpuset to its next-highest non-empty parent.
b1aac8bb 1824 *
c8d9c90c
PJ
1825 * Called with cgroup_mutex held
1826 * callback_mutex must not be held, as cpuset_attach() will take it.
b1aac8bb 1827 */
956db3ca
CW
1828static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
1829{
1830 struct cpuset *parent;
1831
c8d9c90c
PJ
1832 /*
1833 * The cgroup's css_sets list is in use if there are tasks
1834 * in the cpuset; the list is empty if there are none;
1835 * the cs->css.refcnt seems always 0.
1836 */
956db3ca
CW
1837 if (list_empty(&cs->css.cgroup->css_sets))
1838 return;
b1aac8bb 1839
956db3ca
CW
1840 /*
1841 * Find its next-highest non-empty parent, (top cpuset
1842 * has online cpus, so can't be empty).
1843 */
1844 parent = cs->parent;
b4501295
PJ
1845 while (cpus_empty(parent->cpus_allowed) ||
1846 nodes_empty(parent->mems_allowed))
956db3ca 1847 parent = parent->parent;
956db3ca
CW
1848
1849 move_member_tasks_to_cpuset(cs, parent);
1850}
1851
1852/*
1853 * Walk the specified cpuset subtree and look for empty cpusets.
1854 * The tasks of such cpuset must be moved to a parent cpuset.
1855 *
2df167a3 1856 * Called with cgroup_mutex held. We take callback_mutex to modify
956db3ca
CW
1857 * cpus_allowed and mems_allowed.
1858 *
1859 * This walk processes the tree from top to bottom, completing one layer
1860 * before dropping down to the next. It always processes a node before
1861 * any of its children.
1862 *
1863 * For now, since we lack memory hot unplug, we'll never see a cpuset
1864 * that has tasks along with an empty 'mems'. But if we did see such
1865 * a cpuset, we'd handle it just like we do if its 'cpus' was empty.
1866 */
1867static void scan_for_empty_cpusets(const struct cpuset *root)
b1aac8bb 1868{
8d1e6266 1869 LIST_HEAD(queue);
956db3ca
CW
1870 struct cpuset *cp; /* scans cpusets being updated */
1871 struct cpuset *child; /* scans child cpusets of cp */
8793d854 1872 struct cgroup *cont;
f9b4fb8d 1873 nodemask_t oldmems;
b1aac8bb 1874
956db3ca
CW
1875 list_add_tail((struct list_head *)&root->stack_list, &queue);
1876
956db3ca 1877 while (!list_empty(&queue)) {
8d1e6266 1878 cp = list_first_entry(&queue, struct cpuset, stack_list);
956db3ca
CW
1879 list_del(queue.next);
1880 list_for_each_entry(cont, &cp->css.cgroup->children, sibling) {
1881 child = cgroup_cs(cont);
1882 list_add_tail(&child->stack_list, &queue);
1883 }
b4501295
PJ
1884
1885 /* Continue past cpusets with all cpus, mems online */
1886 if (cpus_subset(cp->cpus_allowed, cpu_online_map) &&
1887 nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
1888 continue;
1889
f9b4fb8d
MX
1890 oldmems = cp->mems_allowed;
1891
956db3ca 1892 /* Remove offline cpus and mems from this cpuset. */
b4501295 1893 mutex_lock(&callback_mutex);
956db3ca
CW
1894 cpus_and(cp->cpus_allowed, cp->cpus_allowed, cpu_online_map);
1895 nodes_and(cp->mems_allowed, cp->mems_allowed,
1896 node_states[N_HIGH_MEMORY]);
b4501295
PJ
1897 mutex_unlock(&callback_mutex);
1898
1899 /* Move tasks from the empty cpuset to a parent */
c8d9c90c 1900 if (cpus_empty(cp->cpus_allowed) ||
b4501295 1901 nodes_empty(cp->mems_allowed))
956db3ca 1902 remove_tasks_in_empty_cpuset(cp);
f9b4fb8d
MX
1903 else {
1904 update_tasks_cpumask(cp);
1905 update_tasks_nodemask(cp, &oldmems);
1906 }
b1aac8bb
PJ
1907 }
1908}
1909
1910/*
1911 * The cpus_allowed and mems_allowed nodemasks in the top_cpuset track
0e1e7c7a 1912 * cpu_online_map and node_states[N_HIGH_MEMORY]. Force the top cpuset to
956db3ca 1913 * track what's online after any CPU or memory node hotplug or unplug event.
b1aac8bb
PJ
1914 *
1915 * Since there are two callers of this routine, one for CPU hotplug
1916 * events and one for memory node hotplug events, we could have coded
1917 * two separate routines here. We code it as a single common routine
1918 * in order to minimize text size.
1919 */
1920
3e84050c 1921static void common_cpu_mem_hotplug_unplug(int rebuild_sd)
b1aac8bb 1922{
8793d854 1923 cgroup_lock();
b1aac8bb 1924
b1aac8bb 1925 top_cpuset.cpus_allowed = cpu_online_map;
0e1e7c7a 1926 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
956db3ca 1927 scan_for_empty_cpusets(&top_cpuset);
b1aac8bb 1928
5c8e1ed1
MK
1929 /*
1930 * Scheduler destroys domains on hotplug events.
1931 * Rebuild them based on the current settings.
1932 */
3e84050c
DA
1933 if (rebuild_sd)
1934 rebuild_sched_domains();
5c8e1ed1 1935
8793d854 1936 cgroup_unlock();
b1aac8bb 1937}
b1aac8bb 1938
4c4d50f7
PJ
1939/*
1940 * The top_cpuset tracks what CPUs and Memory Nodes are online,
1941 * period. This is necessary in order to make cpusets transparent
1942 * (of no affect) on systems that are actively using CPU hotplug
1943 * but making no active use of cpusets.
1944 *
38837fc7
PJ
1945 * This routine ensures that top_cpuset.cpus_allowed tracks
1946 * cpu_online_map on each CPU hotplug (cpuhp) event.
4c4d50f7
PJ
1947 */
1948
029190c5
PJ
1949static int cpuset_handle_cpuhp(struct notifier_block *unused_nb,
1950 unsigned long phase, void *unused_cpu)
4c4d50f7 1951{
3e84050c
DA
1952 switch (phase) {
1953 case CPU_UP_CANCELED:
1954 case CPU_UP_CANCELED_FROZEN:
1955 case CPU_DOWN_FAILED:
1956 case CPU_DOWN_FAILED_FROZEN:
1957 case CPU_ONLINE:
1958 case CPU_ONLINE_FROZEN:
1959 case CPU_DEAD:
1960 case CPU_DEAD_FROZEN:
1961 common_cpu_mem_hotplug_unplug(1);
1962 break;
1963 default:
ac076758 1964 return NOTIFY_DONE;
3e84050c 1965 }
ac076758 1966
3e84050c 1967 return NOTIFY_OK;
4c4d50f7 1968}
4c4d50f7 1969
b1aac8bb 1970#ifdef CONFIG_MEMORY_HOTPLUG
38837fc7 1971/*
0e1e7c7a
CL
1972 * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY].
1973 * Call this routine anytime after you change
1974 * node_states[N_HIGH_MEMORY].
38837fc7
PJ
1975 * See also the previous routine cpuset_handle_cpuhp().
1976 */
1977
1af98928 1978void cpuset_track_online_nodes(void)
38837fc7 1979{
3e84050c 1980 common_cpu_mem_hotplug_unplug(0);
38837fc7
PJ
1981}
1982#endif
1983
1da177e4
LT
1984/**
1985 * cpuset_init_smp - initialize cpus_allowed
1986 *
1987 * Description: Finish top cpuset after cpu, node maps are initialized
1988 **/
1989
1990void __init cpuset_init_smp(void)
1991{
1992 top_cpuset.cpus_allowed = cpu_online_map;
0e1e7c7a 1993 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
4c4d50f7
PJ
1994
1995 hotcpu_notifier(cpuset_handle_cpuhp, 0);
1da177e4
LT
1996}
1997
1998/**
1da177e4
LT
1999 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
2000 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
f9a86fcb 2001 * @pmask: pointer to cpumask_t variable to receive cpus_allowed set.
1da177e4
LT
2002 *
2003 * Description: Returns the cpumask_t cpus_allowed of the cpuset
2004 * attached to the specified @tsk. Guaranteed to return some non-empty
2005 * subset of cpu_online_map, even if this means going outside the
2006 * tasks cpuset.
2007 **/
2008
f9a86fcb 2009void cpuset_cpus_allowed(struct task_struct *tsk, cpumask_t *pmask)
1da177e4 2010{
3d3f26a7 2011 mutex_lock(&callback_mutex);
f9a86fcb 2012 cpuset_cpus_allowed_locked(tsk, pmask);
470fd646 2013 mutex_unlock(&callback_mutex);
470fd646
CW
2014}
2015
2016/**
2017 * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
2df167a3 2018 * Must be called with callback_mutex held.
470fd646 2019 **/
f9a86fcb 2020void cpuset_cpus_allowed_locked(struct task_struct *tsk, cpumask_t *pmask)
470fd646 2021{
909d75a3 2022 task_lock(tsk);
f9a86fcb 2023 guarantee_online_cpus(task_cs(tsk), pmask);
909d75a3 2024 task_unlock(tsk);
1da177e4
LT
2025}
2026
2027void cpuset_init_current_mems_allowed(void)
2028{
f9a86fcb 2029 nodes_setall(current->mems_allowed);
1da177e4
LT
2030}
2031
909d75a3
PJ
2032/**
2033 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
2034 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
2035 *
2036 * Description: Returns the nodemask_t mems_allowed of the cpuset
2037 * attached to the specified @tsk. Guaranteed to return some non-empty
0e1e7c7a 2038 * subset of node_states[N_HIGH_MEMORY], even if this means going outside the
909d75a3
PJ
2039 * tasks cpuset.
2040 **/
2041
2042nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2043{
2044 nodemask_t mask;
2045
3d3f26a7 2046 mutex_lock(&callback_mutex);
909d75a3 2047 task_lock(tsk);
8793d854 2048 guarantee_online_mems(task_cs(tsk), &mask);
909d75a3 2049 task_unlock(tsk);
3d3f26a7 2050 mutex_unlock(&callback_mutex);
909d75a3
PJ
2051
2052 return mask;
2053}
2054
d9fd8a6d 2055/**
19770b32
MG
2056 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
2057 * @nodemask: the nodemask to be checked
d9fd8a6d 2058 *
19770b32 2059 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
1da177e4 2060 */
19770b32 2061int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
1da177e4 2062{
19770b32 2063 return nodes_intersects(*nodemask, current->mems_allowed);
1da177e4
LT
2064}
2065
9bf2229f 2066/*
78608366
PM
2067 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
2068 * mem_hardwall ancestor to the specified cpuset. Call holding
2069 * callback_mutex. If no ancestor is mem_exclusive or mem_hardwall
2070 * (an unusual configuration), then returns the root cpuset.
9bf2229f 2071 */
78608366 2072static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
9bf2229f 2073{
78608366 2074 while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && cs->parent)
9bf2229f
PJ
2075 cs = cs->parent;
2076 return cs;
2077}
2078
d9fd8a6d 2079/**
02a0e53d 2080 * cpuset_zone_allowed_softwall - Can we allocate on zone z's memory node?
9bf2229f 2081 * @z: is this zone on an allowed node?
02a0e53d 2082 * @gfp_mask: memory allocation flags
d9fd8a6d 2083 *
02a0e53d
PJ
2084 * If we're in interrupt, yes, we can always allocate. If
2085 * __GFP_THISNODE is set, yes, we can always allocate. If zone
9bf2229f
PJ
2086 * z's node is in our tasks mems_allowed, yes. If it's not a
2087 * __GFP_HARDWALL request and this zone's nodes is in the nearest
78608366 2088 * hardwalled cpuset ancestor to this tasks cpuset, yes.
c596d9f3
DR
2089 * If the task has been OOM killed and has access to memory reserves
2090 * as specified by the TIF_MEMDIE flag, yes.
9bf2229f
PJ
2091 * Otherwise, no.
2092 *
02a0e53d
PJ
2093 * If __GFP_HARDWALL is set, cpuset_zone_allowed_softwall()
2094 * reduces to cpuset_zone_allowed_hardwall(). Otherwise,
2095 * cpuset_zone_allowed_softwall() might sleep, and might allow a zone
2096 * from an enclosing cpuset.
2097 *
2098 * cpuset_zone_allowed_hardwall() only handles the simpler case of
2099 * hardwall cpusets, and never sleeps.
2100 *
2101 * The __GFP_THISNODE placement logic is really handled elsewhere,
2102 * by forcibly using a zonelist starting at a specified node, and by
2103 * (in get_page_from_freelist()) refusing to consider the zones for
2104 * any node on the zonelist except the first. By the time any such
2105 * calls get to this routine, we should just shut up and say 'yes'.
2106 *
9bf2229f 2107 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
c596d9f3
DR
2108 * and do not allow allocations outside the current tasks cpuset
2109 * unless the task has been OOM killed as is marked TIF_MEMDIE.
9bf2229f 2110 * GFP_KERNEL allocations are not so marked, so can escape to the
78608366 2111 * nearest enclosing hardwalled ancestor cpuset.
9bf2229f 2112 *
02a0e53d
PJ
2113 * Scanning up parent cpusets requires callback_mutex. The
2114 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
2115 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
2116 * current tasks mems_allowed came up empty on the first pass over
2117 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
2118 * cpuset are short of memory, might require taking the callback_mutex
2119 * mutex.
9bf2229f 2120 *
36be57ff 2121 * The first call here from mm/page_alloc:get_page_from_freelist()
02a0e53d
PJ
2122 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
2123 * so no allocation on a node outside the cpuset is allowed (unless
2124 * in interrupt, of course).
36be57ff
PJ
2125 *
2126 * The second pass through get_page_from_freelist() doesn't even call
2127 * here for GFP_ATOMIC calls. For those calls, the __alloc_pages()
2128 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
2129 * in alloc_flags. That logic and the checks below have the combined
2130 * affect that:
9bf2229f
PJ
2131 * in_interrupt - any node ok (current task context irrelevant)
2132 * GFP_ATOMIC - any node ok
c596d9f3 2133 * TIF_MEMDIE - any node ok
78608366 2134 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
9bf2229f 2135 * GFP_USER - only nodes in current tasks mems allowed ok.
36be57ff
PJ
2136 *
2137 * Rule:
02a0e53d 2138 * Don't call cpuset_zone_allowed_softwall if you can't sleep, unless you
36be57ff
PJ
2139 * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
2140 * the code that might scan up ancestor cpusets and sleep.
02a0e53d 2141 */
9bf2229f 2142
02a0e53d 2143int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
1da177e4 2144{
9bf2229f
PJ
2145 int node; /* node that zone z is on */
2146 const struct cpuset *cs; /* current cpuset ancestors */
29afd49b 2147 int allowed; /* is allocation in zone z allowed? */
9bf2229f 2148
9b819d20 2149 if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
9bf2229f 2150 return 1;
89fa3024 2151 node = zone_to_nid(z);
92d1dbd2 2152 might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
9bf2229f
PJ
2153 if (node_isset(node, current->mems_allowed))
2154 return 1;
c596d9f3
DR
2155 /*
2156 * Allow tasks that have access to memory reserves because they have
2157 * been OOM killed to get memory anywhere.
2158 */
2159 if (unlikely(test_thread_flag(TIF_MEMDIE)))
2160 return 1;
9bf2229f
PJ
2161 if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */
2162 return 0;
2163
5563e770
BP
2164 if (current->flags & PF_EXITING) /* Let dying task have memory */
2165 return 1;
2166
9bf2229f 2167 /* Not hardwall and node outside mems_allowed: scan up cpusets */
3d3f26a7 2168 mutex_lock(&callback_mutex);
053199ed 2169
053199ed 2170 task_lock(current);
78608366 2171 cs = nearest_hardwall_ancestor(task_cs(current));
053199ed
PJ
2172 task_unlock(current);
2173
9bf2229f 2174 allowed = node_isset(node, cs->mems_allowed);
3d3f26a7 2175 mutex_unlock(&callback_mutex);
9bf2229f 2176 return allowed;
1da177e4
LT
2177}
2178
02a0e53d
PJ
2179/*
2180 * cpuset_zone_allowed_hardwall - Can we allocate on zone z's memory node?
2181 * @z: is this zone on an allowed node?
2182 * @gfp_mask: memory allocation flags
2183 *
2184 * If we're in interrupt, yes, we can always allocate.
2185 * If __GFP_THISNODE is set, yes, we can always allocate. If zone
c596d9f3
DR
2186 * z's node is in our tasks mems_allowed, yes. If the task has been
2187 * OOM killed and has access to memory reserves as specified by the
2188 * TIF_MEMDIE flag, yes. Otherwise, no.
02a0e53d
PJ
2189 *
2190 * The __GFP_THISNODE placement logic is really handled elsewhere,
2191 * by forcibly using a zonelist starting at a specified node, and by
2192 * (in get_page_from_freelist()) refusing to consider the zones for
2193 * any node on the zonelist except the first. By the time any such
2194 * calls get to this routine, we should just shut up and say 'yes'.
2195 *
2196 * Unlike the cpuset_zone_allowed_softwall() variant, above,
2197 * this variant requires that the zone be in the current tasks
2198 * mems_allowed or that we're in interrupt. It does not scan up the
2199 * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
2200 * It never sleeps.
2201 */
2202
2203int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
2204{
2205 int node; /* node that zone z is on */
2206
2207 if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2208 return 1;
2209 node = zone_to_nid(z);
2210 if (node_isset(node, current->mems_allowed))
2211 return 1;
dedf8b79
DW
2212 /*
2213 * Allow tasks that have access to memory reserves because they have
2214 * been OOM killed to get memory anywhere.
2215 */
2216 if (unlikely(test_thread_flag(TIF_MEMDIE)))
2217 return 1;
02a0e53d
PJ
2218 return 0;
2219}
2220
505970b9
PJ
2221/**
2222 * cpuset_lock - lock out any changes to cpuset structures
2223 *
3d3f26a7 2224 * The out of memory (oom) code needs to mutex_lock cpusets
505970b9 2225 * from being changed while it scans the tasklist looking for a
3d3f26a7 2226 * task in an overlapping cpuset. Expose callback_mutex via this
505970b9
PJ
2227 * cpuset_lock() routine, so the oom code can lock it, before
2228 * locking the task list. The tasklist_lock is a spinlock, so
3d3f26a7 2229 * must be taken inside callback_mutex.
505970b9
PJ
2230 */
2231
2232void cpuset_lock(void)
2233{
3d3f26a7 2234 mutex_lock(&callback_mutex);
505970b9
PJ
2235}
2236
2237/**
2238 * cpuset_unlock - release lock on cpuset changes
2239 *
2240 * Undo the lock taken in a previous cpuset_lock() call.
2241 */
2242
2243void cpuset_unlock(void)
2244{
3d3f26a7 2245 mutex_unlock(&callback_mutex);
505970b9
PJ
2246}
2247
825a46af
PJ
2248/**
2249 * cpuset_mem_spread_node() - On which node to begin search for a page
2250 *
2251 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
2252 * tasks in a cpuset with is_spread_page or is_spread_slab set),
2253 * and if the memory allocation used cpuset_mem_spread_node()
2254 * to determine on which node to start looking, as it will for
2255 * certain page cache or slab cache pages such as used for file
2256 * system buffers and inode caches, then instead of starting on the
2257 * local node to look for a free page, rather spread the starting
2258 * node around the tasks mems_allowed nodes.
2259 *
2260 * We don't have to worry about the returned node being offline
2261 * because "it can't happen", and even if it did, it would be ok.
2262 *
2263 * The routines calling guarantee_online_mems() are careful to
2264 * only set nodes in task->mems_allowed that are online. So it
2265 * should not be possible for the following code to return an
2266 * offline node. But if it did, that would be ok, as this routine
2267 * is not returning the node where the allocation must be, only
2268 * the node where the search should start. The zonelist passed to
2269 * __alloc_pages() will include all nodes. If the slab allocator
2270 * is passed an offline node, it will fall back to the local node.
2271 * See kmem_cache_alloc_node().
2272 */
2273
2274int cpuset_mem_spread_node(void)
2275{
2276 int node;
2277
2278 node = next_node(current->cpuset_mem_spread_rotor, current->mems_allowed);
2279 if (node == MAX_NUMNODES)
2280 node = first_node(current->mems_allowed);
2281 current->cpuset_mem_spread_rotor = node;
2282 return node;
2283}
2284EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);
2285
ef08e3b4 2286/**
bbe373f2
DR
2287 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
2288 * @tsk1: pointer to task_struct of some task.
2289 * @tsk2: pointer to task_struct of some other task.
2290 *
2291 * Description: Return true if @tsk1's mems_allowed intersects the
2292 * mems_allowed of @tsk2. Used by the OOM killer to determine if
2293 * one of the task's memory usage might impact the memory available
2294 * to the other.
ef08e3b4
PJ
2295 **/
2296
bbe373f2
DR
2297int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
2298 const struct task_struct *tsk2)
ef08e3b4 2299{
bbe373f2 2300 return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
ef08e3b4
PJ
2301}
2302
3e0d98b9
PJ
2303/*
2304 * Collection of memory_pressure is suppressed unless
2305 * this flag is enabled by writing "1" to the special
2306 * cpuset file 'memory_pressure_enabled' in the root cpuset.
2307 */
2308
c5b2aff8 2309int cpuset_memory_pressure_enabled __read_mostly;
3e0d98b9
PJ
2310
2311/**
2312 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
2313 *
2314 * Keep a running average of the rate of synchronous (direct)
2315 * page reclaim efforts initiated by tasks in each cpuset.
2316 *
2317 * This represents the rate at which some task in the cpuset
2318 * ran low on memory on all nodes it was allowed to use, and
2319 * had to enter the kernels page reclaim code in an effort to
2320 * create more free memory by tossing clean pages or swapping
2321 * or writing dirty pages.
2322 *
2323 * Display to user space in the per-cpuset read-only file
2324 * "memory_pressure". Value displayed is an integer
2325 * representing the recent rate of entry into the synchronous
2326 * (direct) page reclaim by any task attached to the cpuset.
2327 **/
2328
2329void __cpuset_memory_pressure_bump(void)
2330{
3e0d98b9 2331 task_lock(current);
8793d854 2332 fmeter_markevent(&task_cs(current)->fmeter);
3e0d98b9
PJ
2333 task_unlock(current);
2334}
2335
8793d854 2336#ifdef CONFIG_PROC_PID_CPUSET
1da177e4
LT
2337/*
2338 * proc_cpuset_show()
2339 * - Print tasks cpuset path into seq_file.
2340 * - Used for /proc/<pid>/cpuset.
053199ed
PJ
2341 * - No need to task_lock(tsk) on this tsk->cpuset reference, as it
2342 * doesn't really matter if tsk->cpuset changes after we read it,
c8d9c90c 2343 * and we take cgroup_mutex, keeping cpuset_attach() from changing it
2df167a3 2344 * anyway.
1da177e4 2345 */
029190c5 2346static int proc_cpuset_show(struct seq_file *m, void *unused_v)
1da177e4 2347{
13b41b09 2348 struct pid *pid;
1da177e4
LT
2349 struct task_struct *tsk;
2350 char *buf;
8793d854 2351 struct cgroup_subsys_state *css;
99f89551 2352 int retval;
1da177e4 2353
99f89551 2354 retval = -ENOMEM;
1da177e4
LT
2355 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
2356 if (!buf)
99f89551
EB
2357 goto out;
2358
2359 retval = -ESRCH;
13b41b09
EB
2360 pid = m->private;
2361 tsk = get_pid_task(pid, PIDTYPE_PID);
99f89551
EB
2362 if (!tsk)
2363 goto out_free;
1da177e4 2364
99f89551 2365 retval = -EINVAL;
8793d854
PM
2366 cgroup_lock();
2367 css = task_subsys_state(tsk, cpuset_subsys_id);
2368 retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
1da177e4 2369 if (retval < 0)
99f89551 2370 goto out_unlock;
1da177e4
LT
2371 seq_puts(m, buf);
2372 seq_putc(m, '\n');
99f89551 2373out_unlock:
8793d854 2374 cgroup_unlock();
99f89551
EB
2375 put_task_struct(tsk);
2376out_free:
1da177e4 2377 kfree(buf);
99f89551 2378out:
1da177e4
LT
2379 return retval;
2380}
2381
2382static int cpuset_open(struct inode *inode, struct file *file)
2383{
13b41b09
EB
2384 struct pid *pid = PROC_I(inode)->pid;
2385 return single_open(file, proc_cpuset_show, pid);
1da177e4
LT
2386}
2387
9a32144e 2388const struct file_operations proc_cpuset_operations = {
1da177e4
LT
2389 .open = cpuset_open,
2390 .read = seq_read,
2391 .llseek = seq_lseek,
2392 .release = single_release,
2393};
8793d854 2394#endif /* CONFIG_PROC_PID_CPUSET */
1da177e4
LT
2395
2396/* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */
df5f8314
EB
2397void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
2398{
2399 seq_printf(m, "Cpus_allowed:\t");
2400 m->count += cpumask_scnprintf(m->buf + m->count, m->size - m->count,
2401 task->cpus_allowed);
2402 seq_printf(m, "\n");
39106dcf
MT
2403 seq_printf(m, "Cpus_allowed_list:\t");
2404 m->count += cpulist_scnprintf(m->buf + m->count, m->size - m->count,
2405 task->cpus_allowed);
2406 seq_printf(m, "\n");
df5f8314
EB
2407 seq_printf(m, "Mems_allowed:\t");
2408 m->count += nodemask_scnprintf(m->buf + m->count, m->size - m->count,
2409 task->mems_allowed);
2410 seq_printf(m, "\n");
39106dcf
MT
2411 seq_printf(m, "Mems_allowed_list:\t");
2412 m->count += nodelist_scnprintf(m->buf + m->count, m->size - m->count,
2413 task->mems_allowed);
2414 seq_printf(m, "\n");
1da177e4 2415}
This page took 0.513704 seconds and 5 git commands to generate.