Merge branch 'linus'; commit 'v3.0-rc2' into next
[deliverable/linux.git] / kernel / cgroup.c
CommitLineData
ddbcc7e8 1/*
ddbcc7e8
PM
2 * Generic process-grouping system.
3 *
4 * Based originally on the cpuset system, extracted by Paul Menage
5 * Copyright (C) 2006 Google, Inc
6 *
0dea1168
KS
7 * Notifications support
8 * Copyright (C) 2009 Nokia Corporation
9 * Author: Kirill A. Shutemov
10 *
ddbcc7e8
PM
11 * Copyright notices from the original cpuset code:
12 * --------------------------------------------------
13 * Copyright (C) 2003 BULL SA.
14 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
15 *
16 * Portions derived from Patrick Mochel's sysfs code.
17 * sysfs is Copyright (c) 2001-3 Patrick Mochel
18 *
19 * 2003-10-10 Written by Simon Derr.
20 * 2003-10-22 Updates by Stephen Hemminger.
21 * 2004 May-July Rework by Paul Jackson.
22 * ---------------------------------------------------
23 *
24 * This file is subject to the terms and conditions of the GNU General Public
25 * License. See the file COPYING in the main directory of the Linux
26 * distribution for more details.
27 */
28
29#include <linux/cgroup.h>
c6d57f33 30#include <linux/ctype.h>
ddbcc7e8
PM
31#include <linux/errno.h>
32#include <linux/fs.h>
33#include <linux/kernel.h>
34#include <linux/list.h>
35#include <linux/mm.h>
36#include <linux/mutex.h>
37#include <linux/mount.h>
38#include <linux/pagemap.h>
a424316c 39#include <linux/proc_fs.h>
ddbcc7e8
PM
40#include <linux/rcupdate.h>
41#include <linux/sched.h>
817929ec 42#include <linux/backing-dev.h>
ddbcc7e8
PM
43#include <linux/seq_file.h>
44#include <linux/slab.h>
45#include <linux/magic.h>
46#include <linux/spinlock.h>
47#include <linux/string.h>
bbcb81d0 48#include <linux/sort.h>
81a6a5cd 49#include <linux/kmod.h>
e6a1105b 50#include <linux/module.h>
846c7bb0
BS
51#include <linux/delayacct.h>
52#include <linux/cgroupstats.h>
472b1053 53#include <linux/hash.h>
3f8206d4 54#include <linux/namei.h>
096b7fe0 55#include <linux/pid_namespace.h>
2c6ab6d2 56#include <linux/idr.h>
d1d9fd33 57#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
0dea1168
KS
58#include <linux/eventfd.h>
59#include <linux/poll.h>
d846687d 60#include <linux/flex_array.h> /* used in cgroup_attach_proc */
846c7bb0 61
ddbcc7e8
PM
62#include <asm/atomic.h>
63
81a6a5cd
PM
64static DEFINE_MUTEX(cgroup_mutex);
65
aae8aab4
BB
66/*
67 * Generate an array of cgroup subsystem pointers. At boot time, this is
68 * populated up to CGROUP_BUILTIN_SUBSYS_COUNT, and modular subsystems are
69 * registered after that. The mutable section of this array is protected by
70 * cgroup_mutex.
71 */
ddbcc7e8 72#define SUBSYS(_x) &_x ## _subsys,
aae8aab4 73static struct cgroup_subsys *subsys[CGROUP_SUBSYS_COUNT] = {
ddbcc7e8
PM
74#include <linux/cgroup_subsys.h>
75};
76
c6d57f33
PM
77#define MAX_CGROUP_ROOT_NAMELEN 64
78
ddbcc7e8
PM
79/*
80 * A cgroupfs_root represents the root of a cgroup hierarchy,
81 * and may be associated with a superblock to form an active
82 * hierarchy
83 */
84struct cgroupfs_root {
85 struct super_block *sb;
86
87 /*
88 * The bitmask of subsystems intended to be attached to this
89 * hierarchy
90 */
91 unsigned long subsys_bits;
92
2c6ab6d2
PM
93 /* Unique id for this hierarchy. */
94 int hierarchy_id;
95
ddbcc7e8
PM
96 /* The bitmask of subsystems currently attached to this hierarchy */
97 unsigned long actual_subsys_bits;
98
99 /* A list running through the attached subsystems */
100 struct list_head subsys_list;
101
102 /* The root cgroup for this hierarchy */
103 struct cgroup top_cgroup;
104
105 /* Tracks how many cgroups are currently defined in hierarchy.*/
106 int number_of_cgroups;
107
e5f6a860 108 /* A list running through the active hierarchies */
ddbcc7e8
PM
109 struct list_head root_list;
110
111 /* Hierarchy-specific flags */
112 unsigned long flags;
81a6a5cd 113
e788e066 114 /* The path to use for release notifications. */
81a6a5cd 115 char release_agent_path[PATH_MAX];
c6d57f33
PM
116
117 /* The name for this hierarchy - may be empty */
118 char name[MAX_CGROUP_ROOT_NAMELEN];
ddbcc7e8
PM
119};
120
ddbcc7e8
PM
121/*
122 * The "rootnode" hierarchy is the "dummy hierarchy", reserved for the
123 * subsystems that are otherwise unattached - it never has more than a
124 * single cgroup, and all tasks are part of that cgroup.
125 */
126static struct cgroupfs_root rootnode;
127
38460b48
KH
128/*
129 * CSS ID -- ID per subsys's Cgroup Subsys State(CSS). used only when
130 * cgroup_subsys->use_id != 0.
131 */
132#define CSS_ID_MAX (65535)
133struct css_id {
134 /*
135 * The css to which this ID points. This pointer is set to valid value
136 * after cgroup is populated. If cgroup is removed, this will be NULL.
137 * This pointer is expected to be RCU-safe because destroy()
138 * is called after synchronize_rcu(). But for safe use, css_is_removed()
139 * css_tryget() should be used for avoiding race.
140 */
2c392b8c 141 struct cgroup_subsys_state __rcu *css;
38460b48
KH
142 /*
143 * ID of this css.
144 */
145 unsigned short id;
146 /*
147 * Depth in hierarchy which this ID belongs to.
148 */
149 unsigned short depth;
150 /*
151 * ID is freed by RCU. (and lookup routine is RCU safe.)
152 */
153 struct rcu_head rcu_head;
154 /*
155 * Hierarchy of CSS ID belongs to.
156 */
157 unsigned short stack[0]; /* Array of Length (depth+1) */
158};
159
0dea1168 160/*
25985edc 161 * cgroup_event represents events which userspace want to receive.
0dea1168
KS
162 */
163struct cgroup_event {
164 /*
165 * Cgroup which the event belongs to.
166 */
167 struct cgroup *cgrp;
168 /*
169 * Control file which the event associated.
170 */
171 struct cftype *cft;
172 /*
173 * eventfd to signal userspace about the event.
174 */
175 struct eventfd_ctx *eventfd;
176 /*
177 * Each of these stored in a list by the cgroup.
178 */
179 struct list_head list;
180 /*
181 * All fields below needed to unregister event when
182 * userspace closes eventfd.
183 */
184 poll_table pt;
185 wait_queue_head_t *wqh;
186 wait_queue_t wait;
187 struct work_struct remove;
188};
38460b48 189
ddbcc7e8
PM
190/* The list of hierarchy roots */
191
192static LIST_HEAD(roots);
817929ec 193static int root_count;
ddbcc7e8 194
2c6ab6d2
PM
195static DEFINE_IDA(hierarchy_ida);
196static int next_hierarchy_id;
197static DEFINE_SPINLOCK(hierarchy_id_lock);
198
ddbcc7e8
PM
199/* dummytop is a shorthand for the dummy hierarchy's top cgroup */
200#define dummytop (&rootnode.top_cgroup)
201
202/* This flag indicates whether tasks in the fork and exit paths should
a043e3b2
LZ
203 * check for fork/exit handlers to call. This avoids us having to do
204 * extra work in the fork/exit path if none of the subsystems need to
205 * be called.
ddbcc7e8 206 */
8947f9d5 207static int need_forkexit_callback __read_mostly;
ddbcc7e8 208
d11c563d
PM
209#ifdef CONFIG_PROVE_LOCKING
210int cgroup_lock_is_held(void)
211{
212 return lockdep_is_held(&cgroup_mutex);
213}
214#else /* #ifdef CONFIG_PROVE_LOCKING */
215int cgroup_lock_is_held(void)
216{
217 return mutex_is_locked(&cgroup_mutex);
218}
219#endif /* #else #ifdef CONFIG_PROVE_LOCKING */
220
221EXPORT_SYMBOL_GPL(cgroup_lock_is_held);
222
ddbcc7e8 223/* convenient tests for these bits */
bd89aabc 224inline int cgroup_is_removed(const struct cgroup *cgrp)
ddbcc7e8 225{
bd89aabc 226 return test_bit(CGRP_REMOVED, &cgrp->flags);
ddbcc7e8
PM
227}
228
229/* bits in struct cgroupfs_root flags field */
230enum {
231 ROOT_NOPREFIX, /* mounted subsystems have no named prefix */
232};
233
e9685a03 234static int cgroup_is_releasable(const struct cgroup *cgrp)
81a6a5cd
PM
235{
236 const int bits =
bd89aabc
PM
237 (1 << CGRP_RELEASABLE) |
238 (1 << CGRP_NOTIFY_ON_RELEASE);
239 return (cgrp->flags & bits) == bits;
81a6a5cd
PM
240}
241
e9685a03 242static int notify_on_release(const struct cgroup *cgrp)
81a6a5cd 243{
bd89aabc 244 return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
81a6a5cd
PM
245}
246
97978e6d
DL
247static int clone_children(const struct cgroup *cgrp)
248{
249 return test_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
250}
251
ddbcc7e8
PM
252/*
253 * for_each_subsys() allows you to iterate on each subsystem attached to
254 * an active hierarchy
255 */
256#define for_each_subsys(_root, _ss) \
257list_for_each_entry(_ss, &_root->subsys_list, sibling)
258
e5f6a860
LZ
259/* for_each_active_root() allows you to iterate across the active hierarchies */
260#define for_each_active_root(_root) \
ddbcc7e8
PM
261list_for_each_entry(_root, &roots, root_list)
262
81a6a5cd
PM
263/* the list of cgroups eligible for automatic release. Protected by
264 * release_list_lock */
265static LIST_HEAD(release_list);
266static DEFINE_SPINLOCK(release_list_lock);
267static void cgroup_release_agent(struct work_struct *work);
268static DECLARE_WORK(release_agent_work, cgroup_release_agent);
bd89aabc 269static void check_for_release(struct cgroup *cgrp);
81a6a5cd 270
817929ec
PM
271/* Link structure for associating css_set objects with cgroups */
272struct cg_cgroup_link {
273 /*
274 * List running through cg_cgroup_links associated with a
275 * cgroup, anchored on cgroup->css_sets
276 */
bd89aabc 277 struct list_head cgrp_link_list;
7717f7ba 278 struct cgroup *cgrp;
817929ec
PM
279 /*
280 * List running through cg_cgroup_links pointing at a
281 * single css_set object, anchored on css_set->cg_links
282 */
283 struct list_head cg_link_list;
284 struct css_set *cg;
285};
286
287/* The default css_set - used by init and its children prior to any
288 * hierarchies being mounted. It contains a pointer to the root state
289 * for each subsystem. Also used to anchor the list of css_sets. Not
290 * reference-counted, to improve performance when child cgroups
291 * haven't been created.
292 */
293
294static struct css_set init_css_set;
295static struct cg_cgroup_link init_css_set_link;
296
e6a1105b
BB
297static int cgroup_init_idr(struct cgroup_subsys *ss,
298 struct cgroup_subsys_state *css);
38460b48 299
817929ec
PM
300/* css_set_lock protects the list of css_set objects, and the
301 * chain of tasks off each css_set. Nests outside task->alloc_lock
302 * due to cgroup_iter_start() */
303static DEFINE_RWLOCK(css_set_lock);
304static int css_set_count;
305
7717f7ba
PM
306/*
307 * hash table for cgroup groups. This improves the performance to find
308 * an existing css_set. This hash doesn't (currently) take into
309 * account cgroups in empty hierarchies.
310 */
472b1053
LZ
311#define CSS_SET_HASH_BITS 7
312#define CSS_SET_TABLE_SIZE (1 << CSS_SET_HASH_BITS)
313static struct hlist_head css_set_table[CSS_SET_TABLE_SIZE];
314
315static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[])
316{
317 int i;
318 int index;
319 unsigned long tmp = 0UL;
320
321 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++)
322 tmp += (unsigned long)css[i];
323 tmp = (tmp >> 16) ^ tmp;
324
325 index = hash_long(tmp, CSS_SET_HASH_BITS);
326
327 return &css_set_table[index];
328}
329
817929ec
PM
330/* We don't maintain the lists running through each css_set to its
331 * task until after the first call to cgroup_iter_start(). This
332 * reduces the fork()/exit() overhead for people who have cgroups
333 * compiled into their kernel but not actually in use */
8947f9d5 334static int use_task_css_set_links __read_mostly;
817929ec 335
2c6ab6d2 336static void __put_css_set(struct css_set *cg, int taskexit)
b4f48b63 337{
71cbb949
KM
338 struct cg_cgroup_link *link;
339 struct cg_cgroup_link *saved_link;
146aa1bd
LJ
340 /*
341 * Ensure that the refcount doesn't hit zero while any readers
342 * can see it. Similar to atomic_dec_and_lock(), but for an
343 * rwlock
344 */
345 if (atomic_add_unless(&cg->refcount, -1, 1))
346 return;
347 write_lock(&css_set_lock);
348 if (!atomic_dec_and_test(&cg->refcount)) {
349 write_unlock(&css_set_lock);
350 return;
351 }
81a6a5cd 352
2c6ab6d2
PM
353 /* This css_set is dead. unlink it and release cgroup refcounts */
354 hlist_del(&cg->hlist);
355 css_set_count--;
356
357 list_for_each_entry_safe(link, saved_link, &cg->cg_links,
358 cg_link_list) {
359 struct cgroup *cgrp = link->cgrp;
360 list_del(&link->cg_link_list);
361 list_del(&link->cgrp_link_list);
bd89aabc
PM
362 if (atomic_dec_and_test(&cgrp->count) &&
363 notify_on_release(cgrp)) {
81a6a5cd 364 if (taskexit)
bd89aabc
PM
365 set_bit(CGRP_RELEASABLE, &cgrp->flags);
366 check_for_release(cgrp);
81a6a5cd 367 }
2c6ab6d2
PM
368
369 kfree(link);
81a6a5cd 370 }
2c6ab6d2
PM
371
372 write_unlock(&css_set_lock);
30088ad8 373 kfree_rcu(cg, rcu_head);
b4f48b63
PM
374}
375
817929ec
PM
376/*
377 * refcounted get/put for css_set objects
378 */
379static inline void get_css_set(struct css_set *cg)
380{
146aa1bd 381 atomic_inc(&cg->refcount);
817929ec
PM
382}
383
384static inline void put_css_set(struct css_set *cg)
385{
146aa1bd 386 __put_css_set(cg, 0);
817929ec
PM
387}
388
81a6a5cd
PM
389static inline void put_css_set_taskexit(struct css_set *cg)
390{
146aa1bd 391 __put_css_set(cg, 1);
81a6a5cd
PM
392}
393
7717f7ba
PM
394/*
395 * compare_css_sets - helper function for find_existing_css_set().
396 * @cg: candidate css_set being tested
397 * @old_cg: existing css_set for a task
398 * @new_cgrp: cgroup that's being entered by the task
399 * @template: desired set of css pointers in css_set (pre-calculated)
400 *
401 * Returns true if "cg" matches "old_cg" except for the hierarchy
402 * which "new_cgrp" belongs to, for which it should match "new_cgrp".
403 */
404static bool compare_css_sets(struct css_set *cg,
405 struct css_set *old_cg,
406 struct cgroup *new_cgrp,
407 struct cgroup_subsys_state *template[])
408{
409 struct list_head *l1, *l2;
410
411 if (memcmp(template, cg->subsys, sizeof(cg->subsys))) {
412 /* Not all subsystems matched */
413 return false;
414 }
415
416 /*
417 * Compare cgroup pointers in order to distinguish between
418 * different cgroups in heirarchies with no subsystems. We
419 * could get by with just this check alone (and skip the
420 * memcmp above) but on most setups the memcmp check will
421 * avoid the need for this more expensive check on almost all
422 * candidates.
423 */
424
425 l1 = &cg->cg_links;
426 l2 = &old_cg->cg_links;
427 while (1) {
428 struct cg_cgroup_link *cgl1, *cgl2;
429 struct cgroup *cg1, *cg2;
430
431 l1 = l1->next;
432 l2 = l2->next;
433 /* See if we reached the end - both lists are equal length. */
434 if (l1 == &cg->cg_links) {
435 BUG_ON(l2 != &old_cg->cg_links);
436 break;
437 } else {
438 BUG_ON(l2 == &old_cg->cg_links);
439 }
440 /* Locate the cgroups associated with these links. */
441 cgl1 = list_entry(l1, struct cg_cgroup_link, cg_link_list);
442 cgl2 = list_entry(l2, struct cg_cgroup_link, cg_link_list);
443 cg1 = cgl1->cgrp;
444 cg2 = cgl2->cgrp;
445 /* Hierarchies should be linked in the same order. */
446 BUG_ON(cg1->root != cg2->root);
447
448 /*
449 * If this hierarchy is the hierarchy of the cgroup
450 * that's changing, then we need to check that this
451 * css_set points to the new cgroup; if it's any other
452 * hierarchy, then this css_set should point to the
453 * same cgroup as the old css_set.
454 */
455 if (cg1->root == new_cgrp->root) {
456 if (cg1 != new_cgrp)
457 return false;
458 } else {
459 if (cg1 != cg2)
460 return false;
461 }
462 }
463 return true;
464}
465
817929ec
PM
466/*
467 * find_existing_css_set() is a helper for
468 * find_css_set(), and checks to see whether an existing
472b1053 469 * css_set is suitable.
817929ec
PM
470 *
471 * oldcg: the cgroup group that we're using before the cgroup
472 * transition
473 *
bd89aabc 474 * cgrp: the cgroup that we're moving into
817929ec
PM
475 *
476 * template: location in which to build the desired set of subsystem
477 * state objects for the new cgroup group
478 */
817929ec
PM
479static struct css_set *find_existing_css_set(
480 struct css_set *oldcg,
bd89aabc 481 struct cgroup *cgrp,
817929ec 482 struct cgroup_subsys_state *template[])
b4f48b63
PM
483{
484 int i;
bd89aabc 485 struct cgroupfs_root *root = cgrp->root;
472b1053
LZ
486 struct hlist_head *hhead;
487 struct hlist_node *node;
488 struct css_set *cg;
817929ec 489
aae8aab4
BB
490 /*
491 * Build the set of subsystem state objects that we want to see in the
492 * new css_set. while subsystems can change globally, the entries here
493 * won't change, so no need for locking.
494 */
817929ec 495 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
8d53d55d 496 if (root->subsys_bits & (1UL << i)) {
817929ec
PM
497 /* Subsystem is in this hierarchy. So we want
498 * the subsystem state from the new
499 * cgroup */
bd89aabc 500 template[i] = cgrp->subsys[i];
817929ec
PM
501 } else {
502 /* Subsystem is not in this hierarchy, so we
503 * don't want to change the subsystem state */
504 template[i] = oldcg->subsys[i];
505 }
506 }
507
472b1053
LZ
508 hhead = css_set_hash(template);
509 hlist_for_each_entry(cg, node, hhead, hlist) {
7717f7ba
PM
510 if (!compare_css_sets(cg, oldcg, cgrp, template))
511 continue;
512
513 /* This css_set matches what we need */
514 return cg;
472b1053 515 }
817929ec
PM
516
517 /* No existing cgroup group matched */
518 return NULL;
519}
520
36553434
LZ
521static void free_cg_links(struct list_head *tmp)
522{
523 struct cg_cgroup_link *link;
524 struct cg_cgroup_link *saved_link;
525
526 list_for_each_entry_safe(link, saved_link, tmp, cgrp_link_list) {
527 list_del(&link->cgrp_link_list);
528 kfree(link);
529 }
530}
531
817929ec
PM
532/*
533 * allocate_cg_links() allocates "count" cg_cgroup_link structures
bd89aabc 534 * and chains them on tmp through their cgrp_link_list fields. Returns 0 on
817929ec
PM
535 * success or a negative error
536 */
817929ec
PM
537static int allocate_cg_links(int count, struct list_head *tmp)
538{
539 struct cg_cgroup_link *link;
540 int i;
541 INIT_LIST_HEAD(tmp);
542 for (i = 0; i < count; i++) {
543 link = kmalloc(sizeof(*link), GFP_KERNEL);
544 if (!link) {
36553434 545 free_cg_links(tmp);
817929ec
PM
546 return -ENOMEM;
547 }
bd89aabc 548 list_add(&link->cgrp_link_list, tmp);
817929ec
PM
549 }
550 return 0;
551}
552
c12f65d4
LZ
553/**
554 * link_css_set - a helper function to link a css_set to a cgroup
555 * @tmp_cg_links: cg_cgroup_link objects allocated by allocate_cg_links()
556 * @cg: the css_set to be linked
557 * @cgrp: the destination cgroup
558 */
559static void link_css_set(struct list_head *tmp_cg_links,
560 struct css_set *cg, struct cgroup *cgrp)
561{
562 struct cg_cgroup_link *link;
563
564 BUG_ON(list_empty(tmp_cg_links));
565 link = list_first_entry(tmp_cg_links, struct cg_cgroup_link,
566 cgrp_link_list);
567 link->cg = cg;
7717f7ba 568 link->cgrp = cgrp;
2c6ab6d2 569 atomic_inc(&cgrp->count);
c12f65d4 570 list_move(&link->cgrp_link_list, &cgrp->css_sets);
7717f7ba
PM
571 /*
572 * Always add links to the tail of the list so that the list
573 * is sorted by order of hierarchy creation
574 */
575 list_add_tail(&link->cg_link_list, &cg->cg_links);
c12f65d4
LZ
576}
577
817929ec
PM
578/*
579 * find_css_set() takes an existing cgroup group and a
580 * cgroup object, and returns a css_set object that's
581 * equivalent to the old group, but with the given cgroup
582 * substituted into the appropriate hierarchy. Must be called with
583 * cgroup_mutex held
584 */
817929ec 585static struct css_set *find_css_set(
bd89aabc 586 struct css_set *oldcg, struct cgroup *cgrp)
817929ec
PM
587{
588 struct css_set *res;
589 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
817929ec
PM
590
591 struct list_head tmp_cg_links;
817929ec 592
472b1053 593 struct hlist_head *hhead;
7717f7ba 594 struct cg_cgroup_link *link;
472b1053 595
817929ec
PM
596 /* First see if we already have a cgroup group that matches
597 * the desired set */
7e9abd89 598 read_lock(&css_set_lock);
bd89aabc 599 res = find_existing_css_set(oldcg, cgrp, template);
817929ec
PM
600 if (res)
601 get_css_set(res);
7e9abd89 602 read_unlock(&css_set_lock);
817929ec
PM
603
604 if (res)
605 return res;
606
607 res = kmalloc(sizeof(*res), GFP_KERNEL);
608 if (!res)
609 return NULL;
610
611 /* Allocate all the cg_cgroup_link objects that we'll need */
612 if (allocate_cg_links(root_count, &tmp_cg_links) < 0) {
613 kfree(res);
614 return NULL;
615 }
616
146aa1bd 617 atomic_set(&res->refcount, 1);
817929ec
PM
618 INIT_LIST_HEAD(&res->cg_links);
619 INIT_LIST_HEAD(&res->tasks);
472b1053 620 INIT_HLIST_NODE(&res->hlist);
817929ec
PM
621
622 /* Copy the set of subsystem state objects generated in
623 * find_existing_css_set() */
624 memcpy(res->subsys, template, sizeof(res->subsys));
625
626 write_lock(&css_set_lock);
627 /* Add reference counts and links from the new css_set. */
7717f7ba
PM
628 list_for_each_entry(link, &oldcg->cg_links, cg_link_list) {
629 struct cgroup *c = link->cgrp;
630 if (c->root == cgrp->root)
631 c = cgrp;
632 link_css_set(&tmp_cg_links, res, c);
633 }
817929ec
PM
634
635 BUG_ON(!list_empty(&tmp_cg_links));
636
817929ec 637 css_set_count++;
472b1053
LZ
638
639 /* Add this cgroup group to the hash table */
640 hhead = css_set_hash(res->subsys);
641 hlist_add_head(&res->hlist, hhead);
642
817929ec
PM
643 write_unlock(&css_set_lock);
644
645 return res;
b4f48b63
PM
646}
647
7717f7ba
PM
648/*
649 * Return the cgroup for "task" from the given hierarchy. Must be
650 * called with cgroup_mutex held.
651 */
652static struct cgroup *task_cgroup_from_root(struct task_struct *task,
653 struct cgroupfs_root *root)
654{
655 struct css_set *css;
656 struct cgroup *res = NULL;
657
658 BUG_ON(!mutex_is_locked(&cgroup_mutex));
659 read_lock(&css_set_lock);
660 /*
661 * No need to lock the task - since we hold cgroup_mutex the
662 * task can't change groups, so the only thing that can happen
663 * is that it exits and its css is set back to init_css_set.
664 */
665 css = task->cgroups;
666 if (css == &init_css_set) {
667 res = &root->top_cgroup;
668 } else {
669 struct cg_cgroup_link *link;
670 list_for_each_entry(link, &css->cg_links, cg_link_list) {
671 struct cgroup *c = link->cgrp;
672 if (c->root == root) {
673 res = c;
674 break;
675 }
676 }
677 }
678 read_unlock(&css_set_lock);
679 BUG_ON(!res);
680 return res;
681}
682
ddbcc7e8
PM
683/*
684 * There is one global cgroup mutex. We also require taking
685 * task_lock() when dereferencing a task's cgroup subsys pointers.
686 * See "The task_lock() exception", at the end of this comment.
687 *
688 * A task must hold cgroup_mutex to modify cgroups.
689 *
690 * Any task can increment and decrement the count field without lock.
691 * So in general, code holding cgroup_mutex can't rely on the count
692 * field not changing. However, if the count goes to zero, then only
956db3ca 693 * cgroup_attach_task() can increment it again. Because a count of zero
ddbcc7e8
PM
694 * means that no tasks are currently attached, therefore there is no
695 * way a task attached to that cgroup can fork (the other way to
696 * increment the count). So code holding cgroup_mutex can safely
697 * assume that if the count is zero, it will stay zero. Similarly, if
698 * a task holds cgroup_mutex on a cgroup with zero count, it
699 * knows that the cgroup won't be removed, as cgroup_rmdir()
700 * needs that mutex.
701 *
ddbcc7e8
PM
702 * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
703 * (usually) take cgroup_mutex. These are the two most performance
704 * critical pieces of code here. The exception occurs on cgroup_exit(),
705 * when a task in a notify_on_release cgroup exits. Then cgroup_mutex
706 * is taken, and if the cgroup count is zero, a usermode call made
a043e3b2
LZ
707 * to the release agent with the name of the cgroup (path relative to
708 * the root of cgroup file system) as the argument.
ddbcc7e8
PM
709 *
710 * A cgroup can only be deleted if both its 'count' of using tasks
711 * is zero, and its list of 'children' cgroups is empty. Since all
712 * tasks in the system use _some_ cgroup, and since there is always at
713 * least one task in the system (init, pid == 1), therefore, top_cgroup
714 * always has either children cgroups and/or using tasks. So we don't
715 * need a special hack to ensure that top_cgroup cannot be deleted.
716 *
717 * The task_lock() exception
718 *
719 * The need for this exception arises from the action of
956db3ca 720 * cgroup_attach_task(), which overwrites one tasks cgroup pointer with
a043e3b2 721 * another. It does so using cgroup_mutex, however there are
ddbcc7e8
PM
722 * several performance critical places that need to reference
723 * task->cgroup without the expense of grabbing a system global
724 * mutex. Therefore except as noted below, when dereferencing or, as
956db3ca 725 * in cgroup_attach_task(), modifying a task'ss cgroup pointer we use
ddbcc7e8
PM
726 * task_lock(), which acts on a spinlock (task->alloc_lock) already in
727 * the task_struct routinely used for such matters.
728 *
729 * P.S. One more locking exception. RCU is used to guard the
956db3ca 730 * update of a tasks cgroup pointer by cgroup_attach_task()
ddbcc7e8
PM
731 */
732
ddbcc7e8
PM
733/**
734 * cgroup_lock - lock out any changes to cgroup structures
735 *
736 */
ddbcc7e8
PM
737void cgroup_lock(void)
738{
739 mutex_lock(&cgroup_mutex);
740}
67523c48 741EXPORT_SYMBOL_GPL(cgroup_lock);
ddbcc7e8
PM
742
743/**
744 * cgroup_unlock - release lock on cgroup changes
745 *
746 * Undo the lock taken in a previous cgroup_lock() call.
747 */
ddbcc7e8
PM
748void cgroup_unlock(void)
749{
750 mutex_unlock(&cgroup_mutex);
751}
67523c48 752EXPORT_SYMBOL_GPL(cgroup_unlock);
ddbcc7e8
PM
753
754/*
755 * A couple of forward declarations required, due to cyclic reference loop:
756 * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir ->
757 * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations
758 * -> cgroup_mkdir.
759 */
760
761static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
c72a04e3 762static struct dentry *cgroup_lookup(struct inode *, struct dentry *, struct nameidata *);
ddbcc7e8 763static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
bd89aabc 764static int cgroup_populate_dir(struct cgroup *cgrp);
6e1d5dcc 765static const struct inode_operations cgroup_dir_inode_operations;
828c0950 766static const struct file_operations proc_cgroupstats_operations;
a424316c
PM
767
768static struct backing_dev_info cgroup_backing_dev_info = {
d993831f 769 .name = "cgroup",
e4ad08fe 770 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
a424316c 771};
ddbcc7e8 772
38460b48
KH
773static int alloc_css_id(struct cgroup_subsys *ss,
774 struct cgroup *parent, struct cgroup *child);
775
ddbcc7e8
PM
776static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
777{
778 struct inode *inode = new_inode(sb);
ddbcc7e8
PM
779
780 if (inode) {
85fe4025 781 inode->i_ino = get_next_ino();
ddbcc7e8 782 inode->i_mode = mode;
76aac0e9
DH
783 inode->i_uid = current_fsuid();
784 inode->i_gid = current_fsgid();
ddbcc7e8
PM
785 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
786 inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info;
787 }
788 return inode;
789}
790
4fca88c8
KH
791/*
792 * Call subsys's pre_destroy handler.
793 * This is called before css refcnt check.
794 */
ec64f515 795static int cgroup_call_pre_destroy(struct cgroup *cgrp)
4fca88c8
KH
796{
797 struct cgroup_subsys *ss;
ec64f515
KH
798 int ret = 0;
799
4fca88c8 800 for_each_subsys(cgrp->root, ss)
ec64f515
KH
801 if (ss->pre_destroy) {
802 ret = ss->pre_destroy(ss, cgrp);
803 if (ret)
4ab78683 804 break;
ec64f515 805 }
0dea1168 806
ec64f515 807 return ret;
4fca88c8
KH
808}
809
ddbcc7e8
PM
810static void cgroup_diput(struct dentry *dentry, struct inode *inode)
811{
812 /* is dentry a directory ? if so, kfree() associated cgroup */
813 if (S_ISDIR(inode->i_mode)) {
bd89aabc 814 struct cgroup *cgrp = dentry->d_fsdata;
8dc4f3e1 815 struct cgroup_subsys *ss;
bd89aabc 816 BUG_ON(!(cgroup_is_removed(cgrp)));
81a6a5cd
PM
817 /* It's possible for external users to be holding css
818 * reference counts on a cgroup; css_put() needs to
819 * be able to access the cgroup after decrementing
820 * the reference count in order to know if it needs to
821 * queue the cgroup to be handled by the release
822 * agent */
823 synchronize_rcu();
8dc4f3e1
PM
824
825 mutex_lock(&cgroup_mutex);
826 /*
827 * Release the subsystem state objects.
828 */
75139b82
LZ
829 for_each_subsys(cgrp->root, ss)
830 ss->destroy(ss, cgrp);
8dc4f3e1
PM
831
832 cgrp->root->number_of_cgroups--;
833 mutex_unlock(&cgroup_mutex);
834
a47295e6
PM
835 /*
836 * Drop the active superblock reference that we took when we
837 * created the cgroup
838 */
8dc4f3e1
PM
839 deactivate_super(cgrp->root->sb);
840
72a8cb30
BB
841 /*
842 * if we're getting rid of the cgroup, refcount should ensure
843 * that there are no pidlists left.
844 */
845 BUG_ON(!list_empty(&cgrp->pidlists));
846
f2da1c40 847 kfree_rcu(cgrp, rcu_head);
ddbcc7e8
PM
848 }
849 iput(inode);
850}
851
c72a04e3
AV
852static int cgroup_delete(const struct dentry *d)
853{
854 return 1;
855}
856
ddbcc7e8
PM
857static void remove_dir(struct dentry *d)
858{
859 struct dentry *parent = dget(d->d_parent);
860
861 d_delete(d);
862 simple_rmdir(parent->d_inode, d);
863 dput(parent);
864}
865
866static void cgroup_clear_directory(struct dentry *dentry)
867{
868 struct list_head *node;
869
870 BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
2fd6b7f5 871 spin_lock(&dentry->d_lock);
ddbcc7e8
PM
872 node = dentry->d_subdirs.next;
873 while (node != &dentry->d_subdirs) {
874 struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
2fd6b7f5
NP
875
876 spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
ddbcc7e8
PM
877 list_del_init(node);
878 if (d->d_inode) {
879 /* This should never be called on a cgroup
880 * directory with child cgroups */
881 BUG_ON(d->d_inode->i_mode & S_IFDIR);
dc0474be 882 dget_dlock(d);
2fd6b7f5
NP
883 spin_unlock(&d->d_lock);
884 spin_unlock(&dentry->d_lock);
ddbcc7e8
PM
885 d_delete(d);
886 simple_unlink(dentry->d_inode, d);
887 dput(d);
2fd6b7f5
NP
888 spin_lock(&dentry->d_lock);
889 } else
890 spin_unlock(&d->d_lock);
ddbcc7e8
PM
891 node = dentry->d_subdirs.next;
892 }
2fd6b7f5 893 spin_unlock(&dentry->d_lock);
ddbcc7e8
PM
894}
895
896/*
897 * NOTE : the dentry must have been dget()'ed
898 */
899static void cgroup_d_remove_dir(struct dentry *dentry)
900{
2fd6b7f5
NP
901 struct dentry *parent;
902
ddbcc7e8
PM
903 cgroup_clear_directory(dentry);
904
2fd6b7f5
NP
905 parent = dentry->d_parent;
906 spin_lock(&parent->d_lock);
3ec762ad 907 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
ddbcc7e8 908 list_del_init(&dentry->d_u.d_child);
2fd6b7f5
NP
909 spin_unlock(&dentry->d_lock);
910 spin_unlock(&parent->d_lock);
ddbcc7e8
PM
911 remove_dir(dentry);
912}
913
ec64f515
KH
914/*
915 * A queue for waiters to do rmdir() cgroup. A tasks will sleep when
916 * cgroup->count == 0 && list_empty(&cgroup->children) && subsys has some
917 * reference to css->refcnt. In general, this refcnt is expected to goes down
918 * to zero, soon.
919 *
88703267 920 * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex;
ec64f515
KH
921 */
922DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
923
88703267 924static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp)
ec64f515 925{
88703267 926 if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
ec64f515
KH
927 wake_up_all(&cgroup_rmdir_waitq);
928}
929
88703267
KH
930void cgroup_exclude_rmdir(struct cgroup_subsys_state *css)
931{
932 css_get(css);
933}
934
935void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css)
936{
937 cgroup_wakeup_rmdir_waiter(css->cgroup);
938 css_put(css);
939}
940
aae8aab4 941/*
cf5d5941
BB
942 * Call with cgroup_mutex held. Drops reference counts on modules, including
943 * any duplicate ones that parse_cgroupfs_options took. If this function
944 * returns an error, no reference counts are touched.
aae8aab4 945 */
ddbcc7e8
PM
946static int rebind_subsystems(struct cgroupfs_root *root,
947 unsigned long final_bits)
948{
949 unsigned long added_bits, removed_bits;
bd89aabc 950 struct cgroup *cgrp = &root->top_cgroup;
ddbcc7e8
PM
951 int i;
952
aae8aab4
BB
953 BUG_ON(!mutex_is_locked(&cgroup_mutex));
954
ddbcc7e8
PM
955 removed_bits = root->actual_subsys_bits & ~final_bits;
956 added_bits = final_bits & ~root->actual_subsys_bits;
957 /* Check that any added subsystems are currently free */
958 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
8d53d55d 959 unsigned long bit = 1UL << i;
ddbcc7e8
PM
960 struct cgroup_subsys *ss = subsys[i];
961 if (!(bit & added_bits))
962 continue;
aae8aab4
BB
963 /*
964 * Nobody should tell us to do a subsys that doesn't exist:
965 * parse_cgroupfs_options should catch that case and refcounts
966 * ensure that subsystems won't disappear once selected.
967 */
968 BUG_ON(ss == NULL);
ddbcc7e8
PM
969 if (ss->root != &rootnode) {
970 /* Subsystem isn't free */
971 return -EBUSY;
972 }
973 }
974
975 /* Currently we don't handle adding/removing subsystems when
976 * any child cgroups exist. This is theoretically supportable
977 * but involves complex error handling, so it's being left until
978 * later */
307257cf 979 if (root->number_of_cgroups > 1)
ddbcc7e8
PM
980 return -EBUSY;
981
982 /* Process each subsystem */
983 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
984 struct cgroup_subsys *ss = subsys[i];
985 unsigned long bit = 1UL << i;
986 if (bit & added_bits) {
987 /* We're binding this subsystem to this hierarchy */
aae8aab4 988 BUG_ON(ss == NULL);
bd89aabc 989 BUG_ON(cgrp->subsys[i]);
ddbcc7e8
PM
990 BUG_ON(!dummytop->subsys[i]);
991 BUG_ON(dummytop->subsys[i]->cgroup != dummytop);
999cd8a4 992 mutex_lock(&ss->hierarchy_mutex);
bd89aabc
PM
993 cgrp->subsys[i] = dummytop->subsys[i];
994 cgrp->subsys[i]->cgroup = cgrp;
33a68ac1 995 list_move(&ss->sibling, &root->subsys_list);
b2aa30f7 996 ss->root = root;
ddbcc7e8 997 if (ss->bind)
bd89aabc 998 ss->bind(ss, cgrp);
999cd8a4 999 mutex_unlock(&ss->hierarchy_mutex);
cf5d5941 1000 /* refcount was already taken, and we're keeping it */
ddbcc7e8
PM
1001 } else if (bit & removed_bits) {
1002 /* We're removing this subsystem */
aae8aab4 1003 BUG_ON(ss == NULL);
bd89aabc
PM
1004 BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]);
1005 BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
999cd8a4 1006 mutex_lock(&ss->hierarchy_mutex);
ddbcc7e8
PM
1007 if (ss->bind)
1008 ss->bind(ss, dummytop);
1009 dummytop->subsys[i]->cgroup = dummytop;
bd89aabc 1010 cgrp->subsys[i] = NULL;
b2aa30f7 1011 subsys[i]->root = &rootnode;
33a68ac1 1012 list_move(&ss->sibling, &rootnode.subsys_list);
999cd8a4 1013 mutex_unlock(&ss->hierarchy_mutex);
cf5d5941
BB
1014 /* subsystem is now free - drop reference on module */
1015 module_put(ss->module);
ddbcc7e8
PM
1016 } else if (bit & final_bits) {
1017 /* Subsystem state should already exist */
aae8aab4 1018 BUG_ON(ss == NULL);
bd89aabc 1019 BUG_ON(!cgrp->subsys[i]);
cf5d5941
BB
1020 /*
1021 * a refcount was taken, but we already had one, so
1022 * drop the extra reference.
1023 */
1024 module_put(ss->module);
1025#ifdef CONFIG_MODULE_UNLOAD
1026 BUG_ON(ss->module && !module_refcount(ss->module));
1027#endif
ddbcc7e8
PM
1028 } else {
1029 /* Subsystem state shouldn't exist */
bd89aabc 1030 BUG_ON(cgrp->subsys[i]);
ddbcc7e8
PM
1031 }
1032 }
1033 root->subsys_bits = root->actual_subsys_bits = final_bits;
1034 synchronize_rcu();
1035
1036 return 0;
1037}
1038
1039static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)
1040{
1041 struct cgroupfs_root *root = vfs->mnt_sb->s_fs_info;
1042 struct cgroup_subsys *ss;
1043
1044 mutex_lock(&cgroup_mutex);
1045 for_each_subsys(root, ss)
1046 seq_printf(seq, ",%s", ss->name);
1047 if (test_bit(ROOT_NOPREFIX, &root->flags))
1048 seq_puts(seq, ",noprefix");
81a6a5cd
PM
1049 if (strlen(root->release_agent_path))
1050 seq_printf(seq, ",release_agent=%s", root->release_agent_path);
97978e6d
DL
1051 if (clone_children(&root->top_cgroup))
1052 seq_puts(seq, ",clone_children");
c6d57f33
PM
1053 if (strlen(root->name))
1054 seq_printf(seq, ",name=%s", root->name);
ddbcc7e8
PM
1055 mutex_unlock(&cgroup_mutex);
1056 return 0;
1057}
1058
1059struct cgroup_sb_opts {
1060 unsigned long subsys_bits;
1061 unsigned long flags;
81a6a5cd 1062 char *release_agent;
97978e6d 1063 bool clone_children;
c6d57f33 1064 char *name;
2c6ab6d2
PM
1065 /* User explicitly requested empty subsystem */
1066 bool none;
c6d57f33
PM
1067
1068 struct cgroupfs_root *new_root;
2c6ab6d2 1069
ddbcc7e8
PM
1070};
1071
aae8aab4
BB
1072/*
1073 * Convert a hierarchy specifier into a bitmask of subsystems and flags. Call
cf5d5941
BB
1074 * with cgroup_mutex held to protect the subsys[] array. This function takes
1075 * refcounts on subsystems to be used, unless it returns error, in which case
1076 * no refcounts are taken.
aae8aab4 1077 */
cf5d5941 1078static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
ddbcc7e8 1079{
32a8cf23
DL
1080 char *token, *o = data;
1081 bool all_ss = false, one_ss = false;
f9ab5b5b 1082 unsigned long mask = (unsigned long)-1;
cf5d5941
BB
1083 int i;
1084 bool module_pin_failed = false;
f9ab5b5b 1085
aae8aab4
BB
1086 BUG_ON(!mutex_is_locked(&cgroup_mutex));
1087
f9ab5b5b
LZ
1088#ifdef CONFIG_CPUSETS
1089 mask = ~(1UL << cpuset_subsys_id);
1090#endif
ddbcc7e8 1091
c6d57f33 1092 memset(opts, 0, sizeof(*opts));
ddbcc7e8
PM
1093
1094 while ((token = strsep(&o, ",")) != NULL) {
1095 if (!*token)
1096 return -EINVAL;
32a8cf23 1097 if (!strcmp(token, "none")) {
2c6ab6d2
PM
1098 /* Explicitly have no subsystems */
1099 opts->none = true;
32a8cf23
DL
1100 continue;
1101 }
1102 if (!strcmp(token, "all")) {
1103 /* Mutually exclusive option 'all' + subsystem name */
1104 if (one_ss)
1105 return -EINVAL;
1106 all_ss = true;
1107 continue;
1108 }
1109 if (!strcmp(token, "noprefix")) {
ddbcc7e8 1110 set_bit(ROOT_NOPREFIX, &opts->flags);
32a8cf23
DL
1111 continue;
1112 }
1113 if (!strcmp(token, "clone_children")) {
97978e6d 1114 opts->clone_children = true;
32a8cf23
DL
1115 continue;
1116 }
1117 if (!strncmp(token, "release_agent=", 14)) {
81a6a5cd
PM
1118 /* Specifying two release agents is forbidden */
1119 if (opts->release_agent)
1120 return -EINVAL;
c6d57f33 1121 opts->release_agent =
e400c285 1122 kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
81a6a5cd
PM
1123 if (!opts->release_agent)
1124 return -ENOMEM;
32a8cf23
DL
1125 continue;
1126 }
1127 if (!strncmp(token, "name=", 5)) {
c6d57f33
PM
1128 const char *name = token + 5;
1129 /* Can't specify an empty name */
1130 if (!strlen(name))
1131 return -EINVAL;
1132 /* Must match [\w.-]+ */
1133 for (i = 0; i < strlen(name); i++) {
1134 char c = name[i];
1135 if (isalnum(c))
1136 continue;
1137 if ((c == '.') || (c == '-') || (c == '_'))
1138 continue;
1139 return -EINVAL;
1140 }
1141 /* Specifying two names is forbidden */
1142 if (opts->name)
1143 return -EINVAL;
1144 opts->name = kstrndup(name,
e400c285 1145 MAX_CGROUP_ROOT_NAMELEN - 1,
c6d57f33
PM
1146 GFP_KERNEL);
1147 if (!opts->name)
1148 return -ENOMEM;
32a8cf23
DL
1149
1150 continue;
1151 }
1152
1153 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
1154 struct cgroup_subsys *ss = subsys[i];
1155 if (ss == NULL)
1156 continue;
1157 if (strcmp(token, ss->name))
1158 continue;
1159 if (ss->disabled)
1160 continue;
1161
1162 /* Mutually exclusive option 'all' + subsystem name */
1163 if (all_ss)
1164 return -EINVAL;
1165 set_bit(i, &opts->subsys_bits);
1166 one_ss = true;
1167
1168 break;
1169 }
1170 if (i == CGROUP_SUBSYS_COUNT)
1171 return -ENOENT;
1172 }
1173
1174 /*
1175 * If the 'all' option was specified select all the subsystems,
1176 * otherwise 'all, 'none' and a subsystem name options were not
1177 * specified, let's default to 'all'
1178 */
1179 if (all_ss || (!all_ss && !one_ss && !opts->none)) {
1180 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
1181 struct cgroup_subsys *ss = subsys[i];
1182 if (ss == NULL)
1183 continue;
1184 if (ss->disabled)
1185 continue;
1186 set_bit(i, &opts->subsys_bits);
ddbcc7e8
PM
1187 }
1188 }
1189
2c6ab6d2
PM
1190 /* Consistency checks */
1191
f9ab5b5b
LZ
1192 /*
1193 * Option noprefix was introduced just for backward compatibility
1194 * with the old cpuset, so we allow noprefix only if mounting just
1195 * the cpuset subsystem.
1196 */
1197 if (test_bit(ROOT_NOPREFIX, &opts->flags) &&
1198 (opts->subsys_bits & mask))
1199 return -EINVAL;
1200
2c6ab6d2
PM
1201
1202 /* Can't specify "none" and some subsystems */
1203 if (opts->subsys_bits && opts->none)
1204 return -EINVAL;
1205
1206 /*
1207 * We either have to specify by name or by subsystems. (So all
1208 * empty hierarchies must have a name).
1209 */
c6d57f33 1210 if (!opts->subsys_bits && !opts->name)
ddbcc7e8
PM
1211 return -EINVAL;
1212
cf5d5941
BB
1213 /*
1214 * Grab references on all the modules we'll need, so the subsystems
1215 * don't dance around before rebind_subsystems attaches them. This may
1216 * take duplicate reference counts on a subsystem that's already used,
1217 * but rebind_subsystems handles this case.
1218 */
1219 for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
1220 unsigned long bit = 1UL << i;
1221
1222 if (!(bit & opts->subsys_bits))
1223 continue;
1224 if (!try_module_get(subsys[i]->module)) {
1225 module_pin_failed = true;
1226 break;
1227 }
1228 }
1229 if (module_pin_failed) {
1230 /*
1231 * oops, one of the modules was going away. this means that we
1232 * raced with a module_delete call, and to the user this is
1233 * essentially a "subsystem doesn't exist" case.
1234 */
1235 for (i--; i >= CGROUP_BUILTIN_SUBSYS_COUNT; i--) {
1236 /* drop refcounts only on the ones we took */
1237 unsigned long bit = 1UL << i;
1238
1239 if (!(bit & opts->subsys_bits))
1240 continue;
1241 module_put(subsys[i]->module);
1242 }
1243 return -ENOENT;
1244 }
1245
ddbcc7e8
PM
1246 return 0;
1247}
1248
cf5d5941
BB
1249static void drop_parsed_module_refcounts(unsigned long subsys_bits)
1250{
1251 int i;
1252 for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
1253 unsigned long bit = 1UL << i;
1254
1255 if (!(bit & subsys_bits))
1256 continue;
1257 module_put(subsys[i]->module);
1258 }
1259}
1260
ddbcc7e8
PM
1261static int cgroup_remount(struct super_block *sb, int *flags, char *data)
1262{
1263 int ret = 0;
1264 struct cgroupfs_root *root = sb->s_fs_info;
bd89aabc 1265 struct cgroup *cgrp = &root->top_cgroup;
ddbcc7e8
PM
1266 struct cgroup_sb_opts opts;
1267
bd89aabc 1268 mutex_lock(&cgrp->dentry->d_inode->i_mutex);
ddbcc7e8
PM
1269 mutex_lock(&cgroup_mutex);
1270
1271 /* See what subsystems are wanted */
1272 ret = parse_cgroupfs_options(data, &opts);
1273 if (ret)
1274 goto out_unlock;
1275
cf5d5941
BB
1276 /* Don't allow flags or name to change at remount */
1277 if (opts.flags != root->flags ||
1278 (opts.name && strcmp(opts.name, root->name))) {
c6d57f33 1279 ret = -EINVAL;
cf5d5941 1280 drop_parsed_module_refcounts(opts.subsys_bits);
c6d57f33
PM
1281 goto out_unlock;
1282 }
1283
ddbcc7e8 1284 ret = rebind_subsystems(root, opts.subsys_bits);
cf5d5941
BB
1285 if (ret) {
1286 drop_parsed_module_refcounts(opts.subsys_bits);
0670e08b 1287 goto out_unlock;
cf5d5941 1288 }
ddbcc7e8
PM
1289
1290 /* (re)populate subsystem files */
0670e08b 1291 cgroup_populate_dir(cgrp);
ddbcc7e8 1292
81a6a5cd
PM
1293 if (opts.release_agent)
1294 strcpy(root->release_agent_path, opts.release_agent);
ddbcc7e8 1295 out_unlock:
66bdc9cf 1296 kfree(opts.release_agent);
c6d57f33 1297 kfree(opts.name);
ddbcc7e8 1298 mutex_unlock(&cgroup_mutex);
bd89aabc 1299 mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
ddbcc7e8
PM
1300 return ret;
1301}
1302
b87221de 1303static const struct super_operations cgroup_ops = {
ddbcc7e8
PM
1304 .statfs = simple_statfs,
1305 .drop_inode = generic_delete_inode,
1306 .show_options = cgroup_show_options,
1307 .remount_fs = cgroup_remount,
1308};
1309
cc31edce
PM
1310static void init_cgroup_housekeeping(struct cgroup *cgrp)
1311{
1312 INIT_LIST_HEAD(&cgrp->sibling);
1313 INIT_LIST_HEAD(&cgrp->children);
1314 INIT_LIST_HEAD(&cgrp->css_sets);
1315 INIT_LIST_HEAD(&cgrp->release_list);
72a8cb30
BB
1316 INIT_LIST_HEAD(&cgrp->pidlists);
1317 mutex_init(&cgrp->pidlist_mutex);
0dea1168
KS
1318 INIT_LIST_HEAD(&cgrp->event_list);
1319 spin_lock_init(&cgrp->event_list_lock);
cc31edce 1320}
c6d57f33 1321
ddbcc7e8
PM
1322static void init_cgroup_root(struct cgroupfs_root *root)
1323{
bd89aabc 1324 struct cgroup *cgrp = &root->top_cgroup;
ddbcc7e8
PM
1325 INIT_LIST_HEAD(&root->subsys_list);
1326 INIT_LIST_HEAD(&root->root_list);
1327 root->number_of_cgroups = 1;
bd89aabc
PM
1328 cgrp->root = root;
1329 cgrp->top_cgroup = cgrp;
cc31edce 1330 init_cgroup_housekeeping(cgrp);
ddbcc7e8
PM
1331}
1332
2c6ab6d2
PM
1333static bool init_root_id(struct cgroupfs_root *root)
1334{
1335 int ret = 0;
1336
1337 do {
1338 if (!ida_pre_get(&hierarchy_ida, GFP_KERNEL))
1339 return false;
1340 spin_lock(&hierarchy_id_lock);
1341 /* Try to allocate the next unused ID */
1342 ret = ida_get_new_above(&hierarchy_ida, next_hierarchy_id,
1343 &root->hierarchy_id);
1344 if (ret == -ENOSPC)
1345 /* Try again starting from 0 */
1346 ret = ida_get_new(&hierarchy_ida, &root->hierarchy_id);
1347 if (!ret) {
1348 next_hierarchy_id = root->hierarchy_id + 1;
1349 } else if (ret != -EAGAIN) {
1350 /* Can only get here if the 31-bit IDR is full ... */
1351 BUG_ON(ret);
1352 }
1353 spin_unlock(&hierarchy_id_lock);
1354 } while (ret);
1355 return true;
1356}
1357
ddbcc7e8
PM
1358static int cgroup_test_super(struct super_block *sb, void *data)
1359{
c6d57f33 1360 struct cgroup_sb_opts *opts = data;
ddbcc7e8
PM
1361 struct cgroupfs_root *root = sb->s_fs_info;
1362
c6d57f33
PM
1363 /* If we asked for a name then it must match */
1364 if (opts->name && strcmp(opts->name, root->name))
1365 return 0;
ddbcc7e8 1366
2c6ab6d2
PM
1367 /*
1368 * If we asked for subsystems (or explicitly for no
1369 * subsystems) then they must match
1370 */
1371 if ((opts->subsys_bits || opts->none)
1372 && (opts->subsys_bits != root->subsys_bits))
ddbcc7e8
PM
1373 return 0;
1374
1375 return 1;
1376}
1377
c6d57f33
PM
1378static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
1379{
1380 struct cgroupfs_root *root;
1381
2c6ab6d2 1382 if (!opts->subsys_bits && !opts->none)
c6d57f33
PM
1383 return NULL;
1384
1385 root = kzalloc(sizeof(*root), GFP_KERNEL);
1386 if (!root)
1387 return ERR_PTR(-ENOMEM);
1388
2c6ab6d2
PM
1389 if (!init_root_id(root)) {
1390 kfree(root);
1391 return ERR_PTR(-ENOMEM);
1392 }
c6d57f33 1393 init_cgroup_root(root);
2c6ab6d2 1394
c6d57f33
PM
1395 root->subsys_bits = opts->subsys_bits;
1396 root->flags = opts->flags;
1397 if (opts->release_agent)
1398 strcpy(root->release_agent_path, opts->release_agent);
1399 if (opts->name)
1400 strcpy(root->name, opts->name);
97978e6d
DL
1401 if (opts->clone_children)
1402 set_bit(CGRP_CLONE_CHILDREN, &root->top_cgroup.flags);
c6d57f33
PM
1403 return root;
1404}
1405
2c6ab6d2
PM
1406static void cgroup_drop_root(struct cgroupfs_root *root)
1407{
1408 if (!root)
1409 return;
1410
1411 BUG_ON(!root->hierarchy_id);
1412 spin_lock(&hierarchy_id_lock);
1413 ida_remove(&hierarchy_ida, root->hierarchy_id);
1414 spin_unlock(&hierarchy_id_lock);
1415 kfree(root);
1416}
1417
ddbcc7e8
PM
1418static int cgroup_set_super(struct super_block *sb, void *data)
1419{
1420 int ret;
c6d57f33
PM
1421 struct cgroup_sb_opts *opts = data;
1422
1423 /* If we don't have a new root, we can't set up a new sb */
1424 if (!opts->new_root)
1425 return -EINVAL;
1426
2c6ab6d2 1427 BUG_ON(!opts->subsys_bits && !opts->none);
ddbcc7e8
PM
1428
1429 ret = set_anon_super(sb, NULL);
1430 if (ret)
1431 return ret;
1432
c6d57f33
PM
1433 sb->s_fs_info = opts->new_root;
1434 opts->new_root->sb = sb;
ddbcc7e8
PM
1435
1436 sb->s_blocksize = PAGE_CACHE_SIZE;
1437 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
1438 sb->s_magic = CGROUP_SUPER_MAGIC;
1439 sb->s_op = &cgroup_ops;
1440
1441 return 0;
1442}
1443
1444static int cgroup_get_rootdir(struct super_block *sb)
1445{
0df6a63f
AV
1446 static const struct dentry_operations cgroup_dops = {
1447 .d_iput = cgroup_diput,
c72a04e3 1448 .d_delete = cgroup_delete,
0df6a63f
AV
1449 };
1450
ddbcc7e8
PM
1451 struct inode *inode =
1452 cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb);
1453 struct dentry *dentry;
1454
1455 if (!inode)
1456 return -ENOMEM;
1457
ddbcc7e8
PM
1458 inode->i_fop = &simple_dir_operations;
1459 inode->i_op = &cgroup_dir_inode_operations;
1460 /* directories start off with i_nlink == 2 (for "." entry) */
1461 inc_nlink(inode);
1462 dentry = d_alloc_root(inode);
1463 if (!dentry) {
1464 iput(inode);
1465 return -ENOMEM;
1466 }
1467 sb->s_root = dentry;
0df6a63f
AV
1468 /* for everything else we want ->d_op set */
1469 sb->s_d_op = &cgroup_dops;
ddbcc7e8
PM
1470 return 0;
1471}
1472
f7e83571 1473static struct dentry *cgroup_mount(struct file_system_type *fs_type,
ddbcc7e8 1474 int flags, const char *unused_dev_name,
f7e83571 1475 void *data)
ddbcc7e8
PM
1476{
1477 struct cgroup_sb_opts opts;
c6d57f33 1478 struct cgroupfs_root *root;
ddbcc7e8
PM
1479 int ret = 0;
1480 struct super_block *sb;
c6d57f33 1481 struct cgroupfs_root *new_root;
ddbcc7e8
PM
1482
1483 /* First find the desired set of subsystems */
aae8aab4 1484 mutex_lock(&cgroup_mutex);
ddbcc7e8 1485 ret = parse_cgroupfs_options(data, &opts);
aae8aab4 1486 mutex_unlock(&cgroup_mutex);
c6d57f33
PM
1487 if (ret)
1488 goto out_err;
ddbcc7e8 1489
c6d57f33
PM
1490 /*
1491 * Allocate a new cgroup root. We may not need it if we're
1492 * reusing an existing hierarchy.
1493 */
1494 new_root = cgroup_root_from_opts(&opts);
1495 if (IS_ERR(new_root)) {
1496 ret = PTR_ERR(new_root);
cf5d5941 1497 goto drop_modules;
81a6a5cd 1498 }
c6d57f33 1499 opts.new_root = new_root;
ddbcc7e8 1500
c6d57f33
PM
1501 /* Locate an existing or new sb for this hierarchy */
1502 sb = sget(fs_type, cgroup_test_super, cgroup_set_super, &opts);
ddbcc7e8 1503 if (IS_ERR(sb)) {
c6d57f33 1504 ret = PTR_ERR(sb);
2c6ab6d2 1505 cgroup_drop_root(opts.new_root);
cf5d5941 1506 goto drop_modules;
ddbcc7e8
PM
1507 }
1508
c6d57f33
PM
1509 root = sb->s_fs_info;
1510 BUG_ON(!root);
1511 if (root == opts.new_root) {
1512 /* We used the new root structure, so this is a new hierarchy */
1513 struct list_head tmp_cg_links;
c12f65d4 1514 struct cgroup *root_cgrp = &root->top_cgroup;
817929ec 1515 struct inode *inode;
c6d57f33 1516 struct cgroupfs_root *existing_root;
28fd5dfc 1517 int i;
ddbcc7e8
PM
1518
1519 BUG_ON(sb->s_root != NULL);
1520
1521 ret = cgroup_get_rootdir(sb);
1522 if (ret)
1523 goto drop_new_super;
817929ec 1524 inode = sb->s_root->d_inode;
ddbcc7e8 1525
817929ec 1526 mutex_lock(&inode->i_mutex);
ddbcc7e8
PM
1527 mutex_lock(&cgroup_mutex);
1528
c6d57f33
PM
1529 if (strlen(root->name)) {
1530 /* Check for name clashes with existing mounts */
1531 for_each_active_root(existing_root) {
1532 if (!strcmp(existing_root->name, root->name)) {
1533 ret = -EBUSY;
1534 mutex_unlock(&cgroup_mutex);
1535 mutex_unlock(&inode->i_mutex);
1536 goto drop_new_super;
1537 }
1538 }
1539 }
1540
817929ec
PM
1541 /*
1542 * We're accessing css_set_count without locking
1543 * css_set_lock here, but that's OK - it can only be
1544 * increased by someone holding cgroup_lock, and
1545 * that's us. The worst that can happen is that we
1546 * have some link structures left over
1547 */
1548 ret = allocate_cg_links(css_set_count, &tmp_cg_links);
1549 if (ret) {
1550 mutex_unlock(&cgroup_mutex);
1551 mutex_unlock(&inode->i_mutex);
1552 goto drop_new_super;
1553 }
1554
ddbcc7e8
PM
1555 ret = rebind_subsystems(root, root->subsys_bits);
1556 if (ret == -EBUSY) {
1557 mutex_unlock(&cgroup_mutex);
817929ec 1558 mutex_unlock(&inode->i_mutex);
c6d57f33
PM
1559 free_cg_links(&tmp_cg_links);
1560 goto drop_new_super;
ddbcc7e8 1561 }
cf5d5941
BB
1562 /*
1563 * There must be no failure case after here, since rebinding
1564 * takes care of subsystems' refcounts, which are explicitly
1565 * dropped in the failure exit path.
1566 */
ddbcc7e8
PM
1567
1568 /* EBUSY should be the only error here */
1569 BUG_ON(ret);
1570
1571 list_add(&root->root_list, &roots);
817929ec 1572 root_count++;
ddbcc7e8 1573
c12f65d4 1574 sb->s_root->d_fsdata = root_cgrp;
ddbcc7e8
PM
1575 root->top_cgroup.dentry = sb->s_root;
1576
817929ec
PM
1577 /* Link the top cgroup in this hierarchy into all
1578 * the css_set objects */
1579 write_lock(&css_set_lock);
28fd5dfc
LZ
1580 for (i = 0; i < CSS_SET_TABLE_SIZE; i++) {
1581 struct hlist_head *hhead = &css_set_table[i];
1582 struct hlist_node *node;
817929ec 1583 struct css_set *cg;
28fd5dfc 1584
c12f65d4
LZ
1585 hlist_for_each_entry(cg, node, hhead, hlist)
1586 link_css_set(&tmp_cg_links, cg, root_cgrp);
28fd5dfc 1587 }
817929ec
PM
1588 write_unlock(&css_set_lock);
1589
1590 free_cg_links(&tmp_cg_links);
1591
c12f65d4
LZ
1592 BUG_ON(!list_empty(&root_cgrp->sibling));
1593 BUG_ON(!list_empty(&root_cgrp->children));
ddbcc7e8
PM
1594 BUG_ON(root->number_of_cgroups != 1);
1595
c12f65d4 1596 cgroup_populate_dir(root_cgrp);
ddbcc7e8 1597 mutex_unlock(&cgroup_mutex);
34f77a90 1598 mutex_unlock(&inode->i_mutex);
c6d57f33
PM
1599 } else {
1600 /*
1601 * We re-used an existing hierarchy - the new root (if
1602 * any) is not needed
1603 */
2c6ab6d2 1604 cgroup_drop_root(opts.new_root);
cf5d5941
BB
1605 /* no subsys rebinding, so refcounts don't change */
1606 drop_parsed_module_refcounts(opts.subsys_bits);
ddbcc7e8
PM
1607 }
1608
c6d57f33
PM
1609 kfree(opts.release_agent);
1610 kfree(opts.name);
f7e83571 1611 return dget(sb->s_root);
ddbcc7e8
PM
1612
1613 drop_new_super:
6f5bbff9 1614 deactivate_locked_super(sb);
cf5d5941
BB
1615 drop_modules:
1616 drop_parsed_module_refcounts(opts.subsys_bits);
c6d57f33
PM
1617 out_err:
1618 kfree(opts.release_agent);
1619 kfree(opts.name);
f7e83571 1620 return ERR_PTR(ret);
ddbcc7e8
PM
1621}
1622
1623static void cgroup_kill_sb(struct super_block *sb) {
1624 struct cgroupfs_root *root = sb->s_fs_info;
bd89aabc 1625 struct cgroup *cgrp = &root->top_cgroup;
ddbcc7e8 1626 int ret;
71cbb949
KM
1627 struct cg_cgroup_link *link;
1628 struct cg_cgroup_link *saved_link;
ddbcc7e8
PM
1629
1630 BUG_ON(!root);
1631
1632 BUG_ON(root->number_of_cgroups != 1);
bd89aabc
PM
1633 BUG_ON(!list_empty(&cgrp->children));
1634 BUG_ON(!list_empty(&cgrp->sibling));
ddbcc7e8
PM
1635
1636 mutex_lock(&cgroup_mutex);
1637
1638 /* Rebind all subsystems back to the default hierarchy */
1639 ret = rebind_subsystems(root, 0);
1640 /* Shouldn't be able to fail ... */
1641 BUG_ON(ret);
1642
817929ec
PM
1643 /*
1644 * Release all the links from css_sets to this hierarchy's
1645 * root cgroup
1646 */
1647 write_lock(&css_set_lock);
71cbb949
KM
1648
1649 list_for_each_entry_safe(link, saved_link, &cgrp->css_sets,
1650 cgrp_link_list) {
817929ec 1651 list_del(&link->cg_link_list);
bd89aabc 1652 list_del(&link->cgrp_link_list);
817929ec
PM
1653 kfree(link);
1654 }
1655 write_unlock(&css_set_lock);
1656
839ec545
PM
1657 if (!list_empty(&root->root_list)) {
1658 list_del(&root->root_list);
1659 root_count--;
1660 }
e5f6a860 1661
ddbcc7e8
PM
1662 mutex_unlock(&cgroup_mutex);
1663
ddbcc7e8 1664 kill_litter_super(sb);
2c6ab6d2 1665 cgroup_drop_root(root);
ddbcc7e8
PM
1666}
1667
1668static struct file_system_type cgroup_fs_type = {
1669 .name = "cgroup",
f7e83571 1670 .mount = cgroup_mount,
ddbcc7e8
PM
1671 .kill_sb = cgroup_kill_sb,
1672};
1673
676db4af
GK
1674static struct kobject *cgroup_kobj;
1675
bd89aabc 1676static inline struct cgroup *__d_cgrp(struct dentry *dentry)
ddbcc7e8
PM
1677{
1678 return dentry->d_fsdata;
1679}
1680
1681static inline struct cftype *__d_cft(struct dentry *dentry)
1682{
1683 return dentry->d_fsdata;
1684}
1685
a043e3b2
LZ
1686/**
1687 * cgroup_path - generate the path of a cgroup
1688 * @cgrp: the cgroup in question
1689 * @buf: the buffer to write the path into
1690 * @buflen: the length of the buffer
1691 *
a47295e6
PM
1692 * Called with cgroup_mutex held or else with an RCU-protected cgroup
1693 * reference. Writes path of cgroup into buf. Returns 0 on success,
1694 * -errno on error.
ddbcc7e8 1695 */
bd89aabc 1696int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
ddbcc7e8
PM
1697{
1698 char *start;
9a9686b6
LZ
1699 struct dentry *dentry = rcu_dereference_check(cgrp->dentry,
1700 rcu_read_lock_held() ||
1701 cgroup_lock_is_held());
ddbcc7e8 1702
a47295e6 1703 if (!dentry || cgrp == dummytop) {
ddbcc7e8
PM
1704 /*
1705 * Inactive subsystems have no dentry for their root
1706 * cgroup
1707 */
1708 strcpy(buf, "/");
1709 return 0;
1710 }
1711
1712 start = buf + buflen;
1713
1714 *--start = '\0';
1715 for (;;) {
a47295e6 1716 int len = dentry->d_name.len;
9a9686b6 1717
ddbcc7e8
PM
1718 if ((start -= len) < buf)
1719 return -ENAMETOOLONG;
9a9686b6 1720 memcpy(start, dentry->d_name.name, len);
bd89aabc
PM
1721 cgrp = cgrp->parent;
1722 if (!cgrp)
ddbcc7e8 1723 break;
9a9686b6
LZ
1724
1725 dentry = rcu_dereference_check(cgrp->dentry,
1726 rcu_read_lock_held() ||
1727 cgroup_lock_is_held());
bd89aabc 1728 if (!cgrp->parent)
ddbcc7e8
PM
1729 continue;
1730 if (--start < buf)
1731 return -ENAMETOOLONG;
1732 *start = '/';
1733 }
1734 memmove(buf, start, buf + buflen - start);
1735 return 0;
1736}
67523c48 1737EXPORT_SYMBOL_GPL(cgroup_path);
ddbcc7e8 1738
74a1166d
BB
1739/*
1740 * cgroup_task_migrate - move a task from one cgroup to another.
1741 *
1742 * 'guarantee' is set if the caller promises that a new css_set for the task
1743 * will already exist. If not set, this function might sleep, and can fail with
1744 * -ENOMEM. Otherwise, it can only fail with -ESRCH.
1745 */
1746static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
1747 struct task_struct *tsk, bool guarantee)
1748{
1749 struct css_set *oldcg;
1750 struct css_set *newcg;
1751
1752 /*
1753 * get old css_set. we need to take task_lock and refcount it, because
1754 * an exiting task can change its css_set to init_css_set and drop its
1755 * old one without taking cgroup_mutex.
1756 */
1757 task_lock(tsk);
1758 oldcg = tsk->cgroups;
1759 get_css_set(oldcg);
1760 task_unlock(tsk);
1761
1762 /* locate or allocate a new css_set for this task. */
1763 if (guarantee) {
1764 /* we know the css_set we want already exists. */
1765 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
1766 read_lock(&css_set_lock);
1767 newcg = find_existing_css_set(oldcg, cgrp, template);
1768 BUG_ON(!newcg);
1769 get_css_set(newcg);
1770 read_unlock(&css_set_lock);
1771 } else {
1772 might_sleep();
1773 /* find_css_set will give us newcg already referenced. */
1774 newcg = find_css_set(oldcg, cgrp);
1775 if (!newcg) {
1776 put_css_set(oldcg);
1777 return -ENOMEM;
1778 }
1779 }
1780 put_css_set(oldcg);
1781
1782 /* if PF_EXITING is set, the tsk->cgroups pointer is no longer safe. */
1783 task_lock(tsk);
1784 if (tsk->flags & PF_EXITING) {
1785 task_unlock(tsk);
1786 put_css_set(newcg);
1787 return -ESRCH;
1788 }
1789 rcu_assign_pointer(tsk->cgroups, newcg);
1790 task_unlock(tsk);
1791
1792 /* Update the css_set linked lists if we're using them */
1793 write_lock(&css_set_lock);
1794 if (!list_empty(&tsk->cg_list))
1795 list_move(&tsk->cg_list, &newcg->tasks);
1796 write_unlock(&css_set_lock);
1797
1798 /*
1799 * We just gained a reference on oldcg by taking it from the task. As
1800 * trading it for newcg is protected by cgroup_mutex, we're safe to drop
1801 * it here; it will be freed under RCU.
1802 */
1803 put_css_set(oldcg);
1804
1805 set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
1806 return 0;
1807}
1808
a043e3b2
LZ
1809/**
1810 * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp'
1811 * @cgrp: the cgroup the task is attaching to
1812 * @tsk: the task to be attached
bbcb81d0 1813 *
a043e3b2
LZ
1814 * Call holding cgroup_mutex. May take task_lock of
1815 * the task 'tsk' during call.
bbcb81d0 1816 */
956db3ca 1817int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
bbcb81d0 1818{
74a1166d 1819 int retval;
2468c723 1820 struct cgroup_subsys *ss, *failed_ss = NULL;
bd89aabc 1821 struct cgroup *oldcgrp;
bd89aabc 1822 struct cgroupfs_root *root = cgrp->root;
bbcb81d0
PM
1823
1824 /* Nothing to do if the task is already in that cgroup */
7717f7ba 1825 oldcgrp = task_cgroup_from_root(tsk, root);
bd89aabc 1826 if (cgrp == oldcgrp)
bbcb81d0
PM
1827 return 0;
1828
1829 for_each_subsys(root, ss) {
1830 if (ss->can_attach) {
f780bdb7 1831 retval = ss->can_attach(ss, cgrp, tsk);
2468c723
DN
1832 if (retval) {
1833 /*
1834 * Remember on which subsystem the can_attach()
1835 * failed, so that we only call cancel_attach()
1836 * against the subsystems whose can_attach()
1837 * succeeded. (See below)
1838 */
1839 failed_ss = ss;
1840 goto out;
1841 }
bbcb81d0 1842 }
f780bdb7
BB
1843 if (ss->can_attach_task) {
1844 retval = ss->can_attach_task(cgrp, tsk);
1845 if (retval) {
1846 failed_ss = ss;
1847 goto out;
1848 }
1849 }
bbcb81d0
PM
1850 }
1851
74a1166d
BB
1852 retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, false);
1853 if (retval)
2468c723 1854 goto out;
817929ec 1855
bbcb81d0 1856 for_each_subsys(root, ss) {
f780bdb7
BB
1857 if (ss->pre_attach)
1858 ss->pre_attach(cgrp);
1859 if (ss->attach_task)
1860 ss->attach_task(cgrp, tsk);
e18f6318 1861 if (ss->attach)
f780bdb7 1862 ss->attach(ss, cgrp, oldcgrp, tsk);
bbcb81d0 1863 }
74a1166d 1864
bbcb81d0 1865 synchronize_rcu();
ec64f515
KH
1866
1867 /*
1868 * wake up rmdir() waiter. the rmdir should fail since the cgroup
1869 * is no longer empty.
1870 */
88703267 1871 cgroup_wakeup_rmdir_waiter(cgrp);
2468c723
DN
1872out:
1873 if (retval) {
1874 for_each_subsys(root, ss) {
1875 if (ss == failed_ss)
1876 /*
1877 * This subsystem was the one that failed the
1878 * can_attach() check earlier, so we don't need
1879 * to call cancel_attach() against it or any
1880 * remaining subsystems.
1881 */
1882 break;
1883 if (ss->cancel_attach)
f780bdb7 1884 ss->cancel_attach(ss, cgrp, tsk);
2468c723
DN
1885 }
1886 }
1887 return retval;
bbcb81d0
PM
1888}
1889
d7926ee3 1890/**
31583bb0
MT
1891 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
1892 * @from: attach to all cgroups of a given task
d7926ee3
SS
1893 * @tsk: the task to be attached
1894 */
31583bb0 1895int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
d7926ee3
SS
1896{
1897 struct cgroupfs_root *root;
d7926ee3
SS
1898 int retval = 0;
1899
1900 cgroup_lock();
1901 for_each_active_root(root) {
31583bb0
MT
1902 struct cgroup *from_cg = task_cgroup_from_root(from, root);
1903
1904 retval = cgroup_attach_task(from_cg, tsk);
d7926ee3
SS
1905 if (retval)
1906 break;
1907 }
1908 cgroup_unlock();
1909
1910 return retval;
1911}
31583bb0 1912EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
d7926ee3 1913
bbcb81d0 1914/*
74a1166d
BB
1915 * cgroup_attach_proc works in two stages, the first of which prefetches all
1916 * new css_sets needed (to make sure we have enough memory before committing
1917 * to the move) and stores them in a list of entries of the following type.
1918 * TODO: possible optimization: use css_set->rcu_head for chaining instead
1919 */
1920struct cg_list_entry {
1921 struct css_set *cg;
1922 struct list_head links;
1923};
1924
1925static bool css_set_check_fetched(struct cgroup *cgrp,
1926 struct task_struct *tsk, struct css_set *cg,
1927 struct list_head *newcg_list)
1928{
1929 struct css_set *newcg;
1930 struct cg_list_entry *cg_entry;
1931 struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
1932
1933 read_lock(&css_set_lock);
1934 newcg = find_existing_css_set(cg, cgrp, template);
1935 if (newcg)
1936 get_css_set(newcg);
1937 read_unlock(&css_set_lock);
1938
1939 /* doesn't exist at all? */
1940 if (!newcg)
1941 return false;
1942 /* see if it's already in the list */
1943 list_for_each_entry(cg_entry, newcg_list, links) {
1944 if (cg_entry->cg == newcg) {
1945 put_css_set(newcg);
1946 return true;
1947 }
1948 }
1949
1950 /* not found */
1951 put_css_set(newcg);
1952 return false;
1953}
1954
1955/*
1956 * Find the new css_set and store it in the list in preparation for moving the
1957 * given task to the given cgroup. Returns 0 or -ENOMEM.
1958 */
1959static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg,
1960 struct list_head *newcg_list)
1961{
1962 struct css_set *newcg;
1963 struct cg_list_entry *cg_entry;
1964
1965 /* ensure a new css_set will exist for this thread */
1966 newcg = find_css_set(cg, cgrp);
1967 if (!newcg)
1968 return -ENOMEM;
1969 /* add it to the list */
1970 cg_entry = kmalloc(sizeof(struct cg_list_entry), GFP_KERNEL);
1971 if (!cg_entry) {
1972 put_css_set(newcg);
1973 return -ENOMEM;
1974 }
1975 cg_entry->cg = newcg;
1976 list_add(&cg_entry->links, newcg_list);
1977 return 0;
1978}
1979
1980/**
1981 * cgroup_attach_proc - attach all threads in a threadgroup to a cgroup
1982 * @cgrp: the cgroup to attach to
1983 * @leader: the threadgroup leader task_struct of the group to be attached
1984 *
1985 * Call holding cgroup_mutex and the threadgroup_fork_lock of the leader. Will
1986 * take task_lock of each thread in leader's threadgroup individually in turn.
1987 */
1988int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
1989{
1990 int retval, i, group_size;
1991 struct cgroup_subsys *ss, *failed_ss = NULL;
1992 bool cancel_failed_ss = false;
1993 /* guaranteed to be initialized later, but the compiler needs this */
1994 struct cgroup *oldcgrp = NULL;
1995 struct css_set *oldcg;
1996 struct cgroupfs_root *root = cgrp->root;
1997 /* threadgroup list cursor and array */
1998 struct task_struct *tsk;
d846687d 1999 struct flex_array *group;
74a1166d
BB
2000 /*
2001 * we need to make sure we have css_sets for all the tasks we're
2002 * going to move -before- we actually start moving them, so that in
2003 * case we get an ENOMEM we can bail out before making any changes.
2004 */
2005 struct list_head newcg_list;
2006 struct cg_list_entry *cg_entry, *temp_nobe;
2007
2008 /*
2009 * step 0: in order to do expensive, possibly blocking operations for
2010 * every thread, we cannot iterate the thread group list, since it needs
2011 * rcu or tasklist locked. instead, build an array of all threads in the
2012 * group - threadgroup_fork_lock prevents new threads from appearing,
2013 * and if threads exit, this will just be an over-estimate.
2014 */
2015 group_size = get_nr_threads(leader);
d846687d
BB
2016 /* flex_array supports very large thread-groups better than kmalloc. */
2017 group = flex_array_alloc(sizeof(struct task_struct *), group_size,
2018 GFP_KERNEL);
74a1166d
BB
2019 if (!group)
2020 return -ENOMEM;
d846687d
BB
2021 /* pre-allocate to guarantee space while iterating in rcu read-side. */
2022 retval = flex_array_prealloc(group, 0, group_size - 1, GFP_KERNEL);
2023 if (retval)
2024 goto out_free_group_list;
74a1166d
BB
2025
2026 /* prevent changes to the threadgroup list while we take a snapshot. */
2027 rcu_read_lock();
2028 if (!thread_group_leader(leader)) {
2029 /*
2030 * a race with de_thread from another thread's exec() may strip
2031 * us of our leadership, making while_each_thread unsafe to use
2032 * on this task. if this happens, there is no choice but to
2033 * throw this task away and try again (from cgroup_procs_write);
2034 * this is "double-double-toil-and-trouble-check locking".
2035 */
2036 rcu_read_unlock();
2037 retval = -EAGAIN;
2038 goto out_free_group_list;
2039 }
2040 /* take a reference on each task in the group to go in the array. */
2041 tsk = leader;
2042 i = 0;
2043 do {
2044 /* as per above, nr_threads may decrease, but not increase. */
2045 BUG_ON(i >= group_size);
2046 get_task_struct(tsk);
d846687d
BB
2047 /*
2048 * saying GFP_ATOMIC has no effect here because we did prealloc
2049 * earlier, but it's good form to communicate our expectations.
2050 */
2051 retval = flex_array_put_ptr(group, i, tsk, GFP_ATOMIC);
2052 BUG_ON(retval != 0);
74a1166d
BB
2053 i++;
2054 } while_each_thread(leader, tsk);
2055 /* remember the number of threads in the array for later. */
2056 group_size = i;
2057 rcu_read_unlock();
2058
2059 /*
2060 * step 1: check that we can legitimately attach to the cgroup.
2061 */
2062 for_each_subsys(root, ss) {
2063 if (ss->can_attach) {
2064 retval = ss->can_attach(ss, cgrp, leader);
2065 if (retval) {
2066 failed_ss = ss;
2067 goto out_cancel_attach;
2068 }
2069 }
2070 /* a callback to be run on every thread in the threadgroup. */
2071 if (ss->can_attach_task) {
2072 /* run on each task in the threadgroup. */
2073 for (i = 0; i < group_size; i++) {
d846687d
BB
2074 tsk = flex_array_get_ptr(group, i);
2075 retval = ss->can_attach_task(cgrp, tsk);
74a1166d
BB
2076 if (retval) {
2077 failed_ss = ss;
2078 cancel_failed_ss = true;
2079 goto out_cancel_attach;
2080 }
2081 }
2082 }
2083 }
2084
2085 /*
2086 * step 2: make sure css_sets exist for all threads to be migrated.
2087 * we use find_css_set, which allocates a new one if necessary.
2088 */
2089 INIT_LIST_HEAD(&newcg_list);
2090 for (i = 0; i < group_size; i++) {
d846687d 2091 tsk = flex_array_get_ptr(group, i);
74a1166d
BB
2092 /* nothing to do if this task is already in the cgroup */
2093 oldcgrp = task_cgroup_from_root(tsk, root);
2094 if (cgrp == oldcgrp)
2095 continue;
2096 /* get old css_set pointer */
2097 task_lock(tsk);
2098 if (tsk->flags & PF_EXITING) {
2099 /* ignore this task if it's going away */
2100 task_unlock(tsk);
2101 continue;
2102 }
2103 oldcg = tsk->cgroups;
2104 get_css_set(oldcg);
2105 task_unlock(tsk);
2106 /* see if the new one for us is already in the list? */
2107 if (css_set_check_fetched(cgrp, tsk, oldcg, &newcg_list)) {
2108 /* was already there, nothing to do. */
2109 put_css_set(oldcg);
2110 } else {
2111 /* we don't already have it. get new one. */
2112 retval = css_set_prefetch(cgrp, oldcg, &newcg_list);
2113 put_css_set(oldcg);
2114 if (retval)
2115 goto out_list_teardown;
2116 }
2117 }
2118
2119 /*
2120 * step 3: now that we're guaranteed success wrt the css_sets, proceed
2121 * to move all tasks to the new cgroup, calling ss->attach_task for each
2122 * one along the way. there are no failure cases after here, so this is
2123 * the commit point.
2124 */
2125 for_each_subsys(root, ss) {
2126 if (ss->pre_attach)
2127 ss->pre_attach(cgrp);
2128 }
2129 for (i = 0; i < group_size; i++) {
d846687d 2130 tsk = flex_array_get_ptr(group, i);
74a1166d
BB
2131 /* leave current thread as it is if it's already there */
2132 oldcgrp = task_cgroup_from_root(tsk, root);
2133 if (cgrp == oldcgrp)
2134 continue;
2135 /* attach each task to each subsystem */
2136 for_each_subsys(root, ss) {
2137 if (ss->attach_task)
2138 ss->attach_task(cgrp, tsk);
2139 }
2140 /* if the thread is PF_EXITING, it can just get skipped. */
2141 retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, true);
2142 BUG_ON(retval != 0 && retval != -ESRCH);
2143 }
2144 /* nothing is sensitive to fork() after this point. */
2145
2146 /*
2147 * step 4: do expensive, non-thread-specific subsystem callbacks.
2148 * TODO: if ever a subsystem needs to know the oldcgrp for each task
2149 * being moved, this call will need to be reworked to communicate that.
2150 */
2151 for_each_subsys(root, ss) {
2152 if (ss->attach)
2153 ss->attach(ss, cgrp, oldcgrp, leader);
2154 }
2155
2156 /*
2157 * step 5: success! and cleanup
2158 */
2159 synchronize_rcu();
2160 cgroup_wakeup_rmdir_waiter(cgrp);
2161 retval = 0;
2162out_list_teardown:
2163 /* clean up the list of prefetched css_sets. */
2164 list_for_each_entry_safe(cg_entry, temp_nobe, &newcg_list, links) {
2165 list_del(&cg_entry->links);
2166 put_css_set(cg_entry->cg);
2167 kfree(cg_entry);
2168 }
2169out_cancel_attach:
2170 /* same deal as in cgroup_attach_task */
2171 if (retval) {
2172 for_each_subsys(root, ss) {
2173 if (ss == failed_ss) {
2174 if (cancel_failed_ss && ss->cancel_attach)
2175 ss->cancel_attach(ss, cgrp, leader);
2176 break;
2177 }
2178 if (ss->cancel_attach)
2179 ss->cancel_attach(ss, cgrp, leader);
2180 }
2181 }
2182 /* clean up the array of referenced threads in the group. */
d846687d
BB
2183 for (i = 0; i < group_size; i++) {
2184 tsk = flex_array_get_ptr(group, i);
2185 put_task_struct(tsk);
2186 }
74a1166d 2187out_free_group_list:
d846687d 2188 flex_array_free(group);
74a1166d
BB
2189 return retval;
2190}
2191
2192/*
2193 * Find the task_struct of the task to attach by vpid and pass it along to the
2194 * function to attach either it or all tasks in its threadgroup. Will take
2195 * cgroup_mutex; may take task_lock of task.
bbcb81d0 2196 */
74a1166d 2197static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
bbcb81d0 2198{
bbcb81d0 2199 struct task_struct *tsk;
c69e8d9c 2200 const struct cred *cred = current_cred(), *tcred;
bbcb81d0
PM
2201 int ret;
2202
74a1166d
BB
2203 if (!cgroup_lock_live_group(cgrp))
2204 return -ENODEV;
2205
bbcb81d0
PM
2206 if (pid) {
2207 rcu_read_lock();
73507f33 2208 tsk = find_task_by_vpid(pid);
74a1166d
BB
2209 if (!tsk) {
2210 rcu_read_unlock();
2211 cgroup_unlock();
2212 return -ESRCH;
2213 }
2214 if (threadgroup) {
2215 /*
2216 * RCU protects this access, since tsk was found in the
2217 * tid map. a race with de_thread may cause group_leader
2218 * to stop being the leader, but cgroup_attach_proc will
2219 * detect it later.
2220 */
2221 tsk = tsk->group_leader;
2222 } else if (tsk->flags & PF_EXITING) {
2223 /* optimization for the single-task-only case */
bbcb81d0 2224 rcu_read_unlock();
74a1166d 2225 cgroup_unlock();
bbcb81d0
PM
2226 return -ESRCH;
2227 }
bbcb81d0 2228
74a1166d
BB
2229 /*
2230 * even if we're attaching all tasks in the thread group, we
2231 * only need to check permissions on one of them.
2232 */
c69e8d9c
DH
2233 tcred = __task_cred(tsk);
2234 if (cred->euid &&
2235 cred->euid != tcred->uid &&
2236 cred->euid != tcred->suid) {
2237 rcu_read_unlock();
74a1166d 2238 cgroup_unlock();
bbcb81d0
PM
2239 return -EACCES;
2240 }
c69e8d9c
DH
2241 get_task_struct(tsk);
2242 rcu_read_unlock();
bbcb81d0 2243 } else {
74a1166d
BB
2244 if (threadgroup)
2245 tsk = current->group_leader;
2246 else
2247 tsk = current;
bbcb81d0
PM
2248 get_task_struct(tsk);
2249 }
2250
74a1166d
BB
2251 if (threadgroup) {
2252 threadgroup_fork_write_lock(tsk);
2253 ret = cgroup_attach_proc(cgrp, tsk);
2254 threadgroup_fork_write_unlock(tsk);
2255 } else {
2256 ret = cgroup_attach_task(cgrp, tsk);
2257 }
bbcb81d0 2258 put_task_struct(tsk);
74a1166d 2259 cgroup_unlock();
bbcb81d0
PM
2260 return ret;
2261}
2262
af351026 2263static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
74a1166d
BB
2264{
2265 return attach_task_by_pid(cgrp, pid, false);
2266}
2267
2268static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid)
af351026
PM
2269{
2270 int ret;
74a1166d
BB
2271 do {
2272 /*
2273 * attach_proc fails with -EAGAIN if threadgroup leadership
2274 * changes in the middle of the operation, in which case we need
2275 * to find the task_struct for the new leader and start over.
2276 */
2277 ret = attach_task_by_pid(cgrp, tgid, true);
2278 } while (ret == -EAGAIN);
af351026
PM
2279 return ret;
2280}
2281
e788e066
PM
2282/**
2283 * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
2284 * @cgrp: the cgroup to be checked for liveness
2285 *
84eea842
PM
2286 * On success, returns true; the lock should be later released with
2287 * cgroup_unlock(). On failure returns false with no lock held.
e788e066 2288 */
84eea842 2289bool cgroup_lock_live_group(struct cgroup *cgrp)
e788e066
PM
2290{
2291 mutex_lock(&cgroup_mutex);
2292 if (cgroup_is_removed(cgrp)) {
2293 mutex_unlock(&cgroup_mutex);
2294 return false;
2295 }
2296 return true;
2297}
67523c48 2298EXPORT_SYMBOL_GPL(cgroup_lock_live_group);
e788e066
PM
2299
2300static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
2301 const char *buffer)
2302{
2303 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
f4a2589f
EK
2304 if (strlen(buffer) >= PATH_MAX)
2305 return -EINVAL;
e788e066
PM
2306 if (!cgroup_lock_live_group(cgrp))
2307 return -ENODEV;
2308 strcpy(cgrp->root->release_agent_path, buffer);
84eea842 2309 cgroup_unlock();
e788e066
PM
2310 return 0;
2311}
2312
2313static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
2314 struct seq_file *seq)
2315{
2316 if (!cgroup_lock_live_group(cgrp))
2317 return -ENODEV;
2318 seq_puts(seq, cgrp->root->release_agent_path);
2319 seq_putc(seq, '\n');
84eea842 2320 cgroup_unlock();
e788e066
PM
2321 return 0;
2322}
2323
84eea842
PM
2324/* A buffer size big enough for numbers or short strings */
2325#define CGROUP_LOCAL_BUFFER_SIZE 64
2326
e73d2c61 2327static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
f4c753b7
PM
2328 struct file *file,
2329 const char __user *userbuf,
2330 size_t nbytes, loff_t *unused_ppos)
355e0c48 2331{
84eea842 2332 char buffer[CGROUP_LOCAL_BUFFER_SIZE];
355e0c48 2333 int retval = 0;
355e0c48
PM
2334 char *end;
2335
2336 if (!nbytes)
2337 return -EINVAL;
2338 if (nbytes >= sizeof(buffer))
2339 return -E2BIG;
2340 if (copy_from_user(buffer, userbuf, nbytes))
2341 return -EFAULT;
2342
2343 buffer[nbytes] = 0; /* nul-terminate */
e73d2c61 2344 if (cft->write_u64) {
478988d3 2345 u64 val = simple_strtoull(strstrip(buffer), &end, 0);
e73d2c61
PM
2346 if (*end)
2347 return -EINVAL;
2348 retval = cft->write_u64(cgrp, cft, val);
2349 } else {
478988d3 2350 s64 val = simple_strtoll(strstrip(buffer), &end, 0);
e73d2c61
PM
2351 if (*end)
2352 return -EINVAL;
2353 retval = cft->write_s64(cgrp, cft, val);
2354 }
355e0c48
PM
2355 if (!retval)
2356 retval = nbytes;
2357 return retval;
2358}
2359
db3b1497
PM
2360static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
2361 struct file *file,
2362 const char __user *userbuf,
2363 size_t nbytes, loff_t *unused_ppos)
2364{
84eea842 2365 char local_buffer[CGROUP_LOCAL_BUFFER_SIZE];
db3b1497
PM
2366 int retval = 0;
2367 size_t max_bytes = cft->max_write_len;
2368 char *buffer = local_buffer;
2369
2370 if (!max_bytes)
2371 max_bytes = sizeof(local_buffer) - 1;
2372 if (nbytes >= max_bytes)
2373 return -E2BIG;
2374 /* Allocate a dynamic buffer if we need one */
2375 if (nbytes >= sizeof(local_buffer)) {
2376 buffer = kmalloc(nbytes + 1, GFP_KERNEL);
2377 if (buffer == NULL)
2378 return -ENOMEM;
2379 }
5a3eb9f6
LZ
2380 if (nbytes && copy_from_user(buffer, userbuf, nbytes)) {
2381 retval = -EFAULT;
2382 goto out;
2383 }
db3b1497
PM
2384
2385 buffer[nbytes] = 0; /* nul-terminate */
478988d3 2386 retval = cft->write_string(cgrp, cft, strstrip(buffer));
db3b1497
PM
2387 if (!retval)
2388 retval = nbytes;
5a3eb9f6 2389out:
db3b1497
PM
2390 if (buffer != local_buffer)
2391 kfree(buffer);
2392 return retval;
2393}
2394
ddbcc7e8
PM
2395static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
2396 size_t nbytes, loff_t *ppos)
2397{
2398 struct cftype *cft = __d_cft(file->f_dentry);
bd89aabc 2399 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
ddbcc7e8 2400
75139b82 2401 if (cgroup_is_removed(cgrp))
ddbcc7e8 2402 return -ENODEV;
355e0c48 2403 if (cft->write)
bd89aabc 2404 return cft->write(cgrp, cft, file, buf, nbytes, ppos);
e73d2c61
PM
2405 if (cft->write_u64 || cft->write_s64)
2406 return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos);
db3b1497
PM
2407 if (cft->write_string)
2408 return cgroup_write_string(cgrp, cft, file, buf, nbytes, ppos);
d447ea2f
PE
2409 if (cft->trigger) {
2410 int ret = cft->trigger(cgrp, (unsigned int)cft->private);
2411 return ret ? ret : nbytes;
2412 }
355e0c48 2413 return -EINVAL;
ddbcc7e8
PM
2414}
2415
f4c753b7
PM
2416static ssize_t cgroup_read_u64(struct cgroup *cgrp, struct cftype *cft,
2417 struct file *file,
2418 char __user *buf, size_t nbytes,
2419 loff_t *ppos)
ddbcc7e8 2420{
84eea842 2421 char tmp[CGROUP_LOCAL_BUFFER_SIZE];
f4c753b7 2422 u64 val = cft->read_u64(cgrp, cft);
ddbcc7e8
PM
2423 int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
2424
2425 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
2426}
2427
e73d2c61
PM
2428static ssize_t cgroup_read_s64(struct cgroup *cgrp, struct cftype *cft,
2429 struct file *file,
2430 char __user *buf, size_t nbytes,
2431 loff_t *ppos)
2432{
84eea842 2433 char tmp[CGROUP_LOCAL_BUFFER_SIZE];
e73d2c61
PM
2434 s64 val = cft->read_s64(cgrp, cft);
2435 int len = sprintf(tmp, "%lld\n", (long long) val);
2436
2437 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
2438}
2439
ddbcc7e8
PM
2440static ssize_t cgroup_file_read(struct file *file, char __user *buf,
2441 size_t nbytes, loff_t *ppos)
2442{
2443 struct cftype *cft = __d_cft(file->f_dentry);
bd89aabc 2444 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
ddbcc7e8 2445
75139b82 2446 if (cgroup_is_removed(cgrp))
ddbcc7e8
PM
2447 return -ENODEV;
2448
2449 if (cft->read)
bd89aabc 2450 return cft->read(cgrp, cft, file, buf, nbytes, ppos);
f4c753b7
PM
2451 if (cft->read_u64)
2452 return cgroup_read_u64(cgrp, cft, file, buf, nbytes, ppos);
e73d2c61
PM
2453 if (cft->read_s64)
2454 return cgroup_read_s64(cgrp, cft, file, buf, nbytes, ppos);
ddbcc7e8
PM
2455 return -EINVAL;
2456}
2457
91796569
PM
2458/*
2459 * seqfile ops/methods for returning structured data. Currently just
2460 * supports string->u64 maps, but can be extended in future.
2461 */
2462
2463struct cgroup_seqfile_state {
2464 struct cftype *cft;
2465 struct cgroup *cgroup;
2466};
2467
2468static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value)
2469{
2470 struct seq_file *sf = cb->state;
2471 return seq_printf(sf, "%s %llu\n", key, (unsigned long long)value);
2472}
2473
2474static int cgroup_seqfile_show(struct seq_file *m, void *arg)
2475{
2476 struct cgroup_seqfile_state *state = m->private;
2477 struct cftype *cft = state->cft;
29486df3
SH
2478 if (cft->read_map) {
2479 struct cgroup_map_cb cb = {
2480 .fill = cgroup_map_add,
2481 .state = m,
2482 };
2483 return cft->read_map(state->cgroup, cft, &cb);
2484 }
2485 return cft->read_seq_string(state->cgroup, cft, m);
91796569
PM
2486}
2487
96930a63 2488static int cgroup_seqfile_release(struct inode *inode, struct file *file)
91796569
PM
2489{
2490 struct seq_file *seq = file->private_data;
2491 kfree(seq->private);
2492 return single_release(inode, file);
2493}
2494
828c0950 2495static const struct file_operations cgroup_seqfile_operations = {
91796569 2496 .read = seq_read,
e788e066 2497 .write = cgroup_file_write,
91796569
PM
2498 .llseek = seq_lseek,
2499 .release = cgroup_seqfile_release,
2500};
2501
ddbcc7e8
PM
2502static int cgroup_file_open(struct inode *inode, struct file *file)
2503{
2504 int err;
2505 struct cftype *cft;
2506
2507 err = generic_file_open(inode, file);
2508 if (err)
2509 return err;
ddbcc7e8 2510 cft = __d_cft(file->f_dentry);
75139b82 2511
29486df3 2512 if (cft->read_map || cft->read_seq_string) {
91796569
PM
2513 struct cgroup_seqfile_state *state =
2514 kzalloc(sizeof(*state), GFP_USER);
2515 if (!state)
2516 return -ENOMEM;
2517 state->cft = cft;
2518 state->cgroup = __d_cgrp(file->f_dentry->d_parent);
2519 file->f_op = &cgroup_seqfile_operations;
2520 err = single_open(file, cgroup_seqfile_show, state);
2521 if (err < 0)
2522 kfree(state);
2523 } else if (cft->open)
ddbcc7e8
PM
2524 err = cft->open(inode, file);
2525 else
2526 err = 0;
2527
2528 return err;
2529}
2530
2531static int cgroup_file_release(struct inode *inode, struct file *file)
2532{
2533 struct cftype *cft = __d_cft(file->f_dentry);
2534 if (cft->release)
2535 return cft->release(inode, file);
2536 return 0;
2537}
2538
2539/*
2540 * cgroup_rename - Only allow simple rename of directories in place.
2541 */
2542static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
2543 struct inode *new_dir, struct dentry *new_dentry)
2544{
2545 if (!S_ISDIR(old_dentry->d_inode->i_mode))
2546 return -ENOTDIR;
2547 if (new_dentry->d_inode)
2548 return -EEXIST;
2549 if (old_dir != new_dir)
2550 return -EIO;
2551 return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
2552}
2553
828c0950 2554static const struct file_operations cgroup_file_operations = {
ddbcc7e8
PM
2555 .read = cgroup_file_read,
2556 .write = cgroup_file_write,
2557 .llseek = generic_file_llseek,
2558 .open = cgroup_file_open,
2559 .release = cgroup_file_release,
2560};
2561
6e1d5dcc 2562static const struct inode_operations cgroup_dir_inode_operations = {
c72a04e3 2563 .lookup = cgroup_lookup,
ddbcc7e8
PM
2564 .mkdir = cgroup_mkdir,
2565 .rmdir = cgroup_rmdir,
2566 .rename = cgroup_rename,
2567};
2568
c72a04e3
AV
2569static struct dentry *cgroup_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
2570{
2571 if (dentry->d_name.len > NAME_MAX)
2572 return ERR_PTR(-ENAMETOOLONG);
2573 d_add(dentry, NULL);
2574 return NULL;
2575}
2576
0dea1168
KS
2577/*
2578 * Check if a file is a control file
2579 */
2580static inline struct cftype *__file_cft(struct file *file)
2581{
2582 if (file->f_dentry->d_inode->i_fop != &cgroup_file_operations)
2583 return ERR_PTR(-EINVAL);
2584 return __d_cft(file->f_dentry);
2585}
2586
5adcee1d
NP
2587static int cgroup_create_file(struct dentry *dentry, mode_t mode,
2588 struct super_block *sb)
2589{
ddbcc7e8
PM
2590 struct inode *inode;
2591
2592 if (!dentry)
2593 return -ENOENT;
2594 if (dentry->d_inode)
2595 return -EEXIST;
2596
2597 inode = cgroup_new_inode(mode, sb);
2598 if (!inode)
2599 return -ENOMEM;
2600
2601 if (S_ISDIR(mode)) {
2602 inode->i_op = &cgroup_dir_inode_operations;
2603 inode->i_fop = &simple_dir_operations;
2604
2605 /* start off with i_nlink == 2 (for "." entry) */
2606 inc_nlink(inode);
2607
2608 /* start with the directory inode held, so that we can
2609 * populate it without racing with another mkdir */
817929ec 2610 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
ddbcc7e8
PM
2611 } else if (S_ISREG(mode)) {
2612 inode->i_size = 0;
2613 inode->i_fop = &cgroup_file_operations;
2614 }
ddbcc7e8
PM
2615 d_instantiate(dentry, inode);
2616 dget(dentry); /* Extra count - pin the dentry in core */
2617 return 0;
2618}
2619
2620/*
a043e3b2
LZ
2621 * cgroup_create_dir - create a directory for an object.
2622 * @cgrp: the cgroup we create the directory for. It must have a valid
2623 * ->parent field. And we are going to fill its ->dentry field.
2624 * @dentry: dentry of the new cgroup
2625 * @mode: mode to set on new directory.
ddbcc7e8 2626 */
bd89aabc 2627static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
099fca32 2628 mode_t mode)
ddbcc7e8
PM
2629{
2630 struct dentry *parent;
2631 int error = 0;
2632
bd89aabc
PM
2633 parent = cgrp->parent->dentry;
2634 error = cgroup_create_file(dentry, S_IFDIR | mode, cgrp->root->sb);
ddbcc7e8 2635 if (!error) {
bd89aabc 2636 dentry->d_fsdata = cgrp;
ddbcc7e8 2637 inc_nlink(parent->d_inode);
a47295e6 2638 rcu_assign_pointer(cgrp->dentry, dentry);
ddbcc7e8
PM
2639 dget(dentry);
2640 }
2641 dput(dentry);
2642
2643 return error;
2644}
2645
099fca32
LZ
2646/**
2647 * cgroup_file_mode - deduce file mode of a control file
2648 * @cft: the control file in question
2649 *
2650 * returns cft->mode if ->mode is not 0
2651 * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
2652 * returns S_IRUGO if it has only a read handler
2653 * returns S_IWUSR if it has only a write hander
2654 */
2655static mode_t cgroup_file_mode(const struct cftype *cft)
2656{
2657 mode_t mode = 0;
2658
2659 if (cft->mode)
2660 return cft->mode;
2661
2662 if (cft->read || cft->read_u64 || cft->read_s64 ||
2663 cft->read_map || cft->read_seq_string)
2664 mode |= S_IRUGO;
2665
2666 if (cft->write || cft->write_u64 || cft->write_s64 ||
2667 cft->write_string || cft->trigger)
2668 mode |= S_IWUSR;
2669
2670 return mode;
2671}
2672
bd89aabc 2673int cgroup_add_file(struct cgroup *cgrp,
ddbcc7e8
PM
2674 struct cgroup_subsys *subsys,
2675 const struct cftype *cft)
2676{
bd89aabc 2677 struct dentry *dir = cgrp->dentry;
ddbcc7e8
PM
2678 struct dentry *dentry;
2679 int error;
099fca32 2680 mode_t mode;
ddbcc7e8
PM
2681
2682 char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
bd89aabc 2683 if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) {
ddbcc7e8
PM
2684 strcpy(name, subsys->name);
2685 strcat(name, ".");
2686 }
2687 strcat(name, cft->name);
2688 BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex));
2689 dentry = lookup_one_len(name, dir, strlen(name));
2690 if (!IS_ERR(dentry)) {
099fca32
LZ
2691 mode = cgroup_file_mode(cft);
2692 error = cgroup_create_file(dentry, mode | S_IFREG,
bd89aabc 2693 cgrp->root->sb);
ddbcc7e8
PM
2694 if (!error)
2695 dentry->d_fsdata = (void *)cft;
2696 dput(dentry);
2697 } else
2698 error = PTR_ERR(dentry);
2699 return error;
2700}
e6a1105b 2701EXPORT_SYMBOL_GPL(cgroup_add_file);
ddbcc7e8 2702
bd89aabc 2703int cgroup_add_files(struct cgroup *cgrp,
ddbcc7e8
PM
2704 struct cgroup_subsys *subsys,
2705 const struct cftype cft[],
2706 int count)
2707{
2708 int i, err;
2709 for (i = 0; i < count; i++) {
bd89aabc 2710 err = cgroup_add_file(cgrp, subsys, &cft[i]);
ddbcc7e8
PM
2711 if (err)
2712 return err;
2713 }
2714 return 0;
2715}
e6a1105b 2716EXPORT_SYMBOL_GPL(cgroup_add_files);
ddbcc7e8 2717
a043e3b2
LZ
2718/**
2719 * cgroup_task_count - count the number of tasks in a cgroup.
2720 * @cgrp: the cgroup in question
2721 *
2722 * Return the number of tasks in the cgroup.
2723 */
bd89aabc 2724int cgroup_task_count(const struct cgroup *cgrp)
bbcb81d0
PM
2725{
2726 int count = 0;
71cbb949 2727 struct cg_cgroup_link *link;
817929ec
PM
2728
2729 read_lock(&css_set_lock);
71cbb949 2730 list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) {
146aa1bd 2731 count += atomic_read(&link->cg->refcount);
817929ec
PM
2732 }
2733 read_unlock(&css_set_lock);
bbcb81d0
PM
2734 return count;
2735}
2736
817929ec
PM
2737/*
2738 * Advance a list_head iterator. The iterator should be positioned at
2739 * the start of a css_set
2740 */
bd89aabc 2741static void cgroup_advance_iter(struct cgroup *cgrp,
7717f7ba 2742 struct cgroup_iter *it)
817929ec
PM
2743{
2744 struct list_head *l = it->cg_link;
2745 struct cg_cgroup_link *link;
2746 struct css_set *cg;
2747
2748 /* Advance to the next non-empty css_set */
2749 do {
2750 l = l->next;
bd89aabc 2751 if (l == &cgrp->css_sets) {
817929ec
PM
2752 it->cg_link = NULL;
2753 return;
2754 }
bd89aabc 2755 link = list_entry(l, struct cg_cgroup_link, cgrp_link_list);
817929ec
PM
2756 cg = link->cg;
2757 } while (list_empty(&cg->tasks));
2758 it->cg_link = l;
2759 it->task = cg->tasks.next;
2760}
2761
31a7df01
CW
2762/*
2763 * To reduce the fork() overhead for systems that are not actually
2764 * using their cgroups capability, we don't maintain the lists running
2765 * through each css_set to its tasks until we see the list actually
2766 * used - in other words after the first call to cgroup_iter_start().
2767 *
2768 * The tasklist_lock is not held here, as do_each_thread() and
2769 * while_each_thread() are protected by RCU.
2770 */
3df91fe3 2771static void cgroup_enable_task_cg_lists(void)
31a7df01
CW
2772{
2773 struct task_struct *p, *g;
2774 write_lock(&css_set_lock);
2775 use_task_css_set_links = 1;
2776 do_each_thread(g, p) {
2777 task_lock(p);
0e04388f
LZ
2778 /*
2779 * We should check if the process is exiting, otherwise
2780 * it will race with cgroup_exit() in that the list
2781 * entry won't be deleted though the process has exited.
2782 */
2783 if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
31a7df01
CW
2784 list_add(&p->cg_list, &p->cgroups->tasks);
2785 task_unlock(p);
2786 } while_each_thread(g, p);
2787 write_unlock(&css_set_lock);
2788}
2789
bd89aabc 2790void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it)
817929ec
PM
2791{
2792 /*
2793 * The first time anyone tries to iterate across a cgroup,
2794 * we need to enable the list linking each css_set to its
2795 * tasks, and fix up all existing tasks.
2796 */
31a7df01
CW
2797 if (!use_task_css_set_links)
2798 cgroup_enable_task_cg_lists();
2799
817929ec 2800 read_lock(&css_set_lock);
bd89aabc
PM
2801 it->cg_link = &cgrp->css_sets;
2802 cgroup_advance_iter(cgrp, it);
817929ec
PM
2803}
2804
bd89aabc 2805struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
817929ec
PM
2806 struct cgroup_iter *it)
2807{
2808 struct task_struct *res;
2809 struct list_head *l = it->task;
2019f634 2810 struct cg_cgroup_link *link;
817929ec
PM
2811
2812 /* If the iterator cg is NULL, we have no tasks */
2813 if (!it->cg_link)
2814 return NULL;
2815 res = list_entry(l, struct task_struct, cg_list);
2816 /* Advance iterator to find next entry */
2817 l = l->next;
2019f634
LJ
2818 link = list_entry(it->cg_link, struct cg_cgroup_link, cgrp_link_list);
2819 if (l == &link->cg->tasks) {
817929ec
PM
2820 /* We reached the end of this task list - move on to
2821 * the next cg_cgroup_link */
bd89aabc 2822 cgroup_advance_iter(cgrp, it);
817929ec
PM
2823 } else {
2824 it->task = l;
2825 }
2826 return res;
2827}
2828
bd89aabc 2829void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it)
817929ec
PM
2830{
2831 read_unlock(&css_set_lock);
2832}
2833
31a7df01
CW
2834static inline int started_after_time(struct task_struct *t1,
2835 struct timespec *time,
2836 struct task_struct *t2)
2837{
2838 int start_diff = timespec_compare(&t1->start_time, time);
2839 if (start_diff > 0) {
2840 return 1;
2841 } else if (start_diff < 0) {
2842 return 0;
2843 } else {
2844 /*
2845 * Arbitrarily, if two processes started at the same
2846 * time, we'll say that the lower pointer value
2847 * started first. Note that t2 may have exited by now
2848 * so this may not be a valid pointer any longer, but
2849 * that's fine - it still serves to distinguish
2850 * between two tasks started (effectively) simultaneously.
2851 */
2852 return t1 > t2;
2853 }
2854}
2855
2856/*
2857 * This function is a callback from heap_insert() and is used to order
2858 * the heap.
2859 * In this case we order the heap in descending task start time.
2860 */
2861static inline int started_after(void *p1, void *p2)
2862{
2863 struct task_struct *t1 = p1;
2864 struct task_struct *t2 = p2;
2865 return started_after_time(t1, &t2->start_time, t2);
2866}
2867
2868/**
2869 * cgroup_scan_tasks - iterate though all the tasks in a cgroup
2870 * @scan: struct cgroup_scanner containing arguments for the scan
2871 *
2872 * Arguments include pointers to callback functions test_task() and
2873 * process_task().
2874 * Iterate through all the tasks in a cgroup, calling test_task() for each,
2875 * and if it returns true, call process_task() for it also.
2876 * The test_task pointer may be NULL, meaning always true (select all tasks).
2877 * Effectively duplicates cgroup_iter_{start,next,end}()
2878 * but does not lock css_set_lock for the call to process_task().
2879 * The struct cgroup_scanner may be embedded in any structure of the caller's
2880 * creation.
2881 * It is guaranteed that process_task() will act on every task that
2882 * is a member of the cgroup for the duration of this call. This
2883 * function may or may not call process_task() for tasks that exit
2884 * or move to a different cgroup during the call, or are forked or
2885 * move into the cgroup during the call.
2886 *
2887 * Note that test_task() may be called with locks held, and may in some
2888 * situations be called multiple times for the same task, so it should
2889 * be cheap.
2890 * If the heap pointer in the struct cgroup_scanner is non-NULL, a heap has been
2891 * pre-allocated and will be used for heap operations (and its "gt" member will
2892 * be overwritten), else a temporary heap will be used (allocation of which
2893 * may cause this function to fail).
2894 */
2895int cgroup_scan_tasks(struct cgroup_scanner *scan)
2896{
2897 int retval, i;
2898 struct cgroup_iter it;
2899 struct task_struct *p, *dropped;
2900 /* Never dereference latest_task, since it's not refcounted */
2901 struct task_struct *latest_task = NULL;
2902 struct ptr_heap tmp_heap;
2903 struct ptr_heap *heap;
2904 struct timespec latest_time = { 0, 0 };
2905
2906 if (scan->heap) {
2907 /* The caller supplied our heap and pre-allocated its memory */
2908 heap = scan->heap;
2909 heap->gt = &started_after;
2910 } else {
2911 /* We need to allocate our own heap memory */
2912 heap = &tmp_heap;
2913 retval = heap_init(heap, PAGE_SIZE, GFP_KERNEL, &started_after);
2914 if (retval)
2915 /* cannot allocate the heap */
2916 return retval;
2917 }
2918
2919 again:
2920 /*
2921 * Scan tasks in the cgroup, using the scanner's "test_task" callback
2922 * to determine which are of interest, and using the scanner's
2923 * "process_task" callback to process any of them that need an update.
2924 * Since we don't want to hold any locks during the task updates,
2925 * gather tasks to be processed in a heap structure.
2926 * The heap is sorted by descending task start time.
2927 * If the statically-sized heap fills up, we overflow tasks that
2928 * started later, and in future iterations only consider tasks that
2929 * started after the latest task in the previous pass. This
2930 * guarantees forward progress and that we don't miss any tasks.
2931 */
2932 heap->size = 0;
2933 cgroup_iter_start(scan->cg, &it);
2934 while ((p = cgroup_iter_next(scan->cg, &it))) {
2935 /*
2936 * Only affect tasks that qualify per the caller's callback,
2937 * if he provided one
2938 */
2939 if (scan->test_task && !scan->test_task(p, scan))
2940 continue;
2941 /*
2942 * Only process tasks that started after the last task
2943 * we processed
2944 */
2945 if (!started_after_time(p, &latest_time, latest_task))
2946 continue;
2947 dropped = heap_insert(heap, p);
2948 if (dropped == NULL) {
2949 /*
2950 * The new task was inserted; the heap wasn't
2951 * previously full
2952 */
2953 get_task_struct(p);
2954 } else if (dropped != p) {
2955 /*
2956 * The new task was inserted, and pushed out a
2957 * different task
2958 */
2959 get_task_struct(p);
2960 put_task_struct(dropped);
2961 }
2962 /*
2963 * Else the new task was newer than anything already in
2964 * the heap and wasn't inserted
2965 */
2966 }
2967 cgroup_iter_end(scan->cg, &it);
2968
2969 if (heap->size) {
2970 for (i = 0; i < heap->size; i++) {
4fe91d51 2971 struct task_struct *q = heap->ptrs[i];
31a7df01 2972 if (i == 0) {
4fe91d51
PJ
2973 latest_time = q->start_time;
2974 latest_task = q;
31a7df01
CW
2975 }
2976 /* Process the task per the caller's callback */
4fe91d51
PJ
2977 scan->process_task(q, scan);
2978 put_task_struct(q);
31a7df01
CW
2979 }
2980 /*
2981 * If we had to process any tasks at all, scan again
2982 * in case some of them were in the middle of forking
2983 * children that didn't get processed.
2984 * Not the most efficient way to do it, but it avoids
2985 * having to take callback_mutex in the fork path
2986 */
2987 goto again;
2988 }
2989 if (heap == &tmp_heap)
2990 heap_free(&tmp_heap);
2991 return 0;
2992}
2993
bbcb81d0 2994/*
102a775e 2995 * Stuff for reading the 'tasks'/'procs' files.
bbcb81d0
PM
2996 *
2997 * Reading this file can return large amounts of data if a cgroup has
2998 * *lots* of attached tasks. So it may need several calls to read(),
2999 * but we cannot guarantee that the information we produce is correct
3000 * unless we produce it entirely atomically.
3001 *
bbcb81d0 3002 */
bbcb81d0 3003
d1d9fd33
BB
3004/*
3005 * The following two functions "fix" the issue where there are more pids
3006 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
3007 * TODO: replace with a kernel-wide solution to this problem
3008 */
3009#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
3010static void *pidlist_allocate(int count)
3011{
3012 if (PIDLIST_TOO_LARGE(count))
3013 return vmalloc(count * sizeof(pid_t));
3014 else
3015 return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
3016}
3017static void pidlist_free(void *p)
3018{
3019 if (is_vmalloc_addr(p))
3020 vfree(p);
3021 else
3022 kfree(p);
3023}
3024static void *pidlist_resize(void *p, int newcount)
3025{
3026 void *newlist;
3027 /* note: if new alloc fails, old p will still be valid either way */
3028 if (is_vmalloc_addr(p)) {
3029 newlist = vmalloc(newcount * sizeof(pid_t));
3030 if (!newlist)
3031 return NULL;
3032 memcpy(newlist, p, newcount * sizeof(pid_t));
3033 vfree(p);
3034 } else {
3035 newlist = krealloc(p, newcount * sizeof(pid_t), GFP_KERNEL);
3036 }
3037 return newlist;
3038}
3039
bbcb81d0 3040/*
102a775e
BB
3041 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
3042 * If the new stripped list is sufficiently smaller and there's enough memory
3043 * to allocate a new buffer, will let go of the unneeded memory. Returns the
3044 * number of unique elements.
bbcb81d0 3045 */
102a775e
BB
3046/* is the size difference enough that we should re-allocate the array? */
3047#define PIDLIST_REALLOC_DIFFERENCE(old, new) ((old) - PAGE_SIZE >= (new))
3048static int pidlist_uniq(pid_t **p, int length)
bbcb81d0 3049{
102a775e
BB
3050 int src, dest = 1;
3051 pid_t *list = *p;
3052 pid_t *newlist;
3053
3054 /*
3055 * we presume the 0th element is unique, so i starts at 1. trivial
3056 * edge cases first; no work needs to be done for either
3057 */
3058 if (length == 0 || length == 1)
3059 return length;
3060 /* src and dest walk down the list; dest counts unique elements */
3061 for (src = 1; src < length; src++) {
3062 /* find next unique element */
3063 while (list[src] == list[src-1]) {
3064 src++;
3065 if (src == length)
3066 goto after;
3067 }
3068 /* dest always points to where the next unique element goes */
3069 list[dest] = list[src];
3070 dest++;
3071 }
3072after:
3073 /*
3074 * if the length difference is large enough, we want to allocate a
3075 * smaller buffer to save memory. if this fails due to out of memory,
3076 * we'll just stay with what we've got.
3077 */
3078 if (PIDLIST_REALLOC_DIFFERENCE(length, dest)) {
d1d9fd33 3079 newlist = pidlist_resize(list, dest);
102a775e
BB
3080 if (newlist)
3081 *p = newlist;
3082 }
3083 return dest;
3084}
3085
3086static int cmppid(const void *a, const void *b)
3087{
3088 return *(pid_t *)a - *(pid_t *)b;
3089}
3090
72a8cb30
BB
3091/*
3092 * find the appropriate pidlist for our purpose (given procs vs tasks)
3093 * returns with the lock on that pidlist already held, and takes care
3094 * of the use count, or returns NULL with no locks held if we're out of
3095 * memory.
3096 */
3097static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
3098 enum cgroup_filetype type)
3099{
3100 struct cgroup_pidlist *l;
3101 /* don't need task_nsproxy() if we're looking at ourself */
b70cc5fd
LZ
3102 struct pid_namespace *ns = current->nsproxy->pid_ns;
3103
72a8cb30
BB
3104 /*
3105 * We can't drop the pidlist_mutex before taking the l->mutex in case
3106 * the last ref-holder is trying to remove l from the list at the same
3107 * time. Holding the pidlist_mutex precludes somebody taking whichever
3108 * list we find out from under us - compare release_pid_array().
3109 */
3110 mutex_lock(&cgrp->pidlist_mutex);
3111 list_for_each_entry(l, &cgrp->pidlists, links) {
3112 if (l->key.type == type && l->key.ns == ns) {
72a8cb30
BB
3113 /* make sure l doesn't vanish out from under us */
3114 down_write(&l->mutex);
3115 mutex_unlock(&cgrp->pidlist_mutex);
72a8cb30
BB
3116 return l;
3117 }
3118 }
3119 /* entry not found; create a new one */
3120 l = kmalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
3121 if (!l) {
3122 mutex_unlock(&cgrp->pidlist_mutex);
72a8cb30
BB
3123 return l;
3124 }
3125 init_rwsem(&l->mutex);
3126 down_write(&l->mutex);
3127 l->key.type = type;
b70cc5fd 3128 l->key.ns = get_pid_ns(ns);
72a8cb30
BB
3129 l->use_count = 0; /* don't increment here */
3130 l->list = NULL;
3131 l->owner = cgrp;
3132 list_add(&l->links, &cgrp->pidlists);
3133 mutex_unlock(&cgrp->pidlist_mutex);
3134 return l;
3135}
3136
102a775e
BB
3137/*
3138 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
3139 */
72a8cb30
BB
3140static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
3141 struct cgroup_pidlist **lp)
102a775e
BB
3142{
3143 pid_t *array;
3144 int length;
3145 int pid, n = 0; /* used for populating the array */
817929ec
PM
3146 struct cgroup_iter it;
3147 struct task_struct *tsk;
102a775e
BB
3148 struct cgroup_pidlist *l;
3149
3150 /*
3151 * If cgroup gets more users after we read count, we won't have
3152 * enough space - tough. This race is indistinguishable to the
3153 * caller from the case that the additional cgroup users didn't
3154 * show up until sometime later on.
3155 */
3156 length = cgroup_task_count(cgrp);
d1d9fd33 3157 array = pidlist_allocate(length);
102a775e
BB
3158 if (!array)
3159 return -ENOMEM;
3160 /* now, populate the array */
bd89aabc
PM
3161 cgroup_iter_start(cgrp, &it);
3162 while ((tsk = cgroup_iter_next(cgrp, &it))) {
102a775e 3163 if (unlikely(n == length))
817929ec 3164 break;
102a775e 3165 /* get tgid or pid for procs or tasks file respectively */
72a8cb30
BB
3166 if (type == CGROUP_FILE_PROCS)
3167 pid = task_tgid_vnr(tsk);
3168 else
3169 pid = task_pid_vnr(tsk);
102a775e
BB
3170 if (pid > 0) /* make sure to only use valid results */
3171 array[n++] = pid;
817929ec 3172 }
bd89aabc 3173 cgroup_iter_end(cgrp, &it);
102a775e
BB
3174 length = n;
3175 /* now sort & (if procs) strip out duplicates */
3176 sort(array, length, sizeof(pid_t), cmppid, NULL);
72a8cb30 3177 if (type == CGROUP_FILE_PROCS)
102a775e 3178 length = pidlist_uniq(&array, length);
72a8cb30
BB
3179 l = cgroup_pidlist_find(cgrp, type);
3180 if (!l) {
d1d9fd33 3181 pidlist_free(array);
72a8cb30 3182 return -ENOMEM;
102a775e 3183 }
72a8cb30 3184 /* store array, freeing old if necessary - lock already held */
d1d9fd33 3185 pidlist_free(l->list);
102a775e
BB
3186 l->list = array;
3187 l->length = length;
3188 l->use_count++;
3189 up_write(&l->mutex);
72a8cb30 3190 *lp = l;
102a775e 3191 return 0;
bbcb81d0
PM
3192}
3193
846c7bb0 3194/**
a043e3b2 3195 * cgroupstats_build - build and fill cgroupstats
846c7bb0
BS
3196 * @stats: cgroupstats to fill information into
3197 * @dentry: A dentry entry belonging to the cgroup for which stats have
3198 * been requested.
a043e3b2
LZ
3199 *
3200 * Build and fill cgroupstats so that taskstats can export it to user
3201 * space.
846c7bb0
BS
3202 */
3203int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
3204{
3205 int ret = -EINVAL;
bd89aabc 3206 struct cgroup *cgrp;
846c7bb0
BS
3207 struct cgroup_iter it;
3208 struct task_struct *tsk;
33d283be 3209
846c7bb0 3210 /*
33d283be
LZ
3211 * Validate dentry by checking the superblock operations,
3212 * and make sure it's a directory.
846c7bb0 3213 */
33d283be
LZ
3214 if (dentry->d_sb->s_op != &cgroup_ops ||
3215 !S_ISDIR(dentry->d_inode->i_mode))
846c7bb0
BS
3216 goto err;
3217
3218 ret = 0;
bd89aabc 3219 cgrp = dentry->d_fsdata;
846c7bb0 3220
bd89aabc
PM
3221 cgroup_iter_start(cgrp, &it);
3222 while ((tsk = cgroup_iter_next(cgrp, &it))) {
846c7bb0
BS
3223 switch (tsk->state) {
3224 case TASK_RUNNING:
3225 stats->nr_running++;
3226 break;
3227 case TASK_INTERRUPTIBLE:
3228 stats->nr_sleeping++;
3229 break;
3230 case TASK_UNINTERRUPTIBLE:
3231 stats->nr_uninterruptible++;
3232 break;
3233 case TASK_STOPPED:
3234 stats->nr_stopped++;
3235 break;
3236 default:
3237 if (delayacct_is_task_waiting_on_io(tsk))
3238 stats->nr_io_wait++;
3239 break;
3240 }
3241 }
bd89aabc 3242 cgroup_iter_end(cgrp, &it);
846c7bb0 3243
846c7bb0
BS
3244err:
3245 return ret;
3246}
3247
8f3ff208 3248
bbcb81d0 3249/*
102a775e 3250 * seq_file methods for the tasks/procs files. The seq_file position is the
cc31edce 3251 * next pid to display; the seq_file iterator is a pointer to the pid
102a775e 3252 * in the cgroup->l->list array.
bbcb81d0 3253 */
cc31edce 3254
102a775e 3255static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
bbcb81d0 3256{
cc31edce
PM
3257 /*
3258 * Initially we receive a position value that corresponds to
3259 * one more than the last pid shown (or 0 on the first call or
3260 * after a seek to the start). Use a binary-search to find the
3261 * next pid to display, if any
3262 */
102a775e 3263 struct cgroup_pidlist *l = s->private;
cc31edce
PM
3264 int index = 0, pid = *pos;
3265 int *iter;
3266
102a775e 3267 down_read(&l->mutex);
cc31edce 3268 if (pid) {
102a775e 3269 int end = l->length;
20777766 3270
cc31edce
PM
3271 while (index < end) {
3272 int mid = (index + end) / 2;
102a775e 3273 if (l->list[mid] == pid) {
cc31edce
PM
3274 index = mid;
3275 break;
102a775e 3276 } else if (l->list[mid] <= pid)
cc31edce
PM
3277 index = mid + 1;
3278 else
3279 end = mid;
3280 }
3281 }
3282 /* If we're off the end of the array, we're done */
102a775e 3283 if (index >= l->length)
cc31edce
PM
3284 return NULL;
3285 /* Update the abstract position to be the actual pid that we found */
102a775e 3286 iter = l->list + index;
cc31edce
PM
3287 *pos = *iter;
3288 return iter;
3289}
3290
102a775e 3291static void cgroup_pidlist_stop(struct seq_file *s, void *v)
cc31edce 3292{
102a775e
BB
3293 struct cgroup_pidlist *l = s->private;
3294 up_read(&l->mutex);
cc31edce
PM
3295}
3296
102a775e 3297static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
cc31edce 3298{
102a775e
BB
3299 struct cgroup_pidlist *l = s->private;
3300 pid_t *p = v;
3301 pid_t *end = l->list + l->length;
cc31edce
PM
3302 /*
3303 * Advance to the next pid in the array. If this goes off the
3304 * end, we're done
3305 */
3306 p++;
3307 if (p >= end) {
3308 return NULL;
3309 } else {
3310 *pos = *p;
3311 return p;
3312 }
3313}
3314
102a775e 3315static int cgroup_pidlist_show(struct seq_file *s, void *v)
cc31edce
PM
3316{
3317 return seq_printf(s, "%d\n", *(int *)v);
3318}
bbcb81d0 3319
102a775e
BB
3320/*
3321 * seq_operations functions for iterating on pidlists through seq_file -
3322 * independent of whether it's tasks or procs
3323 */
3324static const struct seq_operations cgroup_pidlist_seq_operations = {
3325 .start = cgroup_pidlist_start,
3326 .stop = cgroup_pidlist_stop,
3327 .next = cgroup_pidlist_next,
3328 .show = cgroup_pidlist_show,
cc31edce
PM
3329};
3330
102a775e 3331static void cgroup_release_pid_array(struct cgroup_pidlist *l)
cc31edce 3332{
72a8cb30
BB
3333 /*
3334 * the case where we're the last user of this particular pidlist will
3335 * have us remove it from the cgroup's list, which entails taking the
3336 * mutex. since in pidlist_find the pidlist->lock depends on cgroup->
3337 * pidlist_mutex, we have to take pidlist_mutex first.
3338 */
3339 mutex_lock(&l->owner->pidlist_mutex);
102a775e
BB
3340 down_write(&l->mutex);
3341 BUG_ON(!l->use_count);
3342 if (!--l->use_count) {
72a8cb30
BB
3343 /* we're the last user if refcount is 0; remove and free */
3344 list_del(&l->links);
3345 mutex_unlock(&l->owner->pidlist_mutex);
d1d9fd33 3346 pidlist_free(l->list);
72a8cb30
BB
3347 put_pid_ns(l->key.ns);
3348 up_write(&l->mutex);
3349 kfree(l);
3350 return;
cc31edce 3351 }
72a8cb30 3352 mutex_unlock(&l->owner->pidlist_mutex);
102a775e 3353 up_write(&l->mutex);
bbcb81d0
PM
3354}
3355
102a775e 3356static int cgroup_pidlist_release(struct inode *inode, struct file *file)
cc31edce 3357{
102a775e 3358 struct cgroup_pidlist *l;
cc31edce
PM
3359 if (!(file->f_mode & FMODE_READ))
3360 return 0;
102a775e
BB
3361 /*
3362 * the seq_file will only be initialized if the file was opened for
3363 * reading; hence we check if it's not null only in that case.
3364 */
3365 l = ((struct seq_file *)file->private_data)->private;
3366 cgroup_release_pid_array(l);
cc31edce
PM
3367 return seq_release(inode, file);
3368}
3369
102a775e 3370static const struct file_operations cgroup_pidlist_operations = {
cc31edce
PM
3371 .read = seq_read,
3372 .llseek = seq_lseek,
3373 .write = cgroup_file_write,
102a775e 3374 .release = cgroup_pidlist_release,
cc31edce
PM
3375};
3376
bbcb81d0 3377/*
102a775e
BB
3378 * The following functions handle opens on a file that displays a pidlist
3379 * (tasks or procs). Prepare an array of the process/thread IDs of whoever's
3380 * in the cgroup.
bbcb81d0 3381 */
102a775e 3382/* helper function for the two below it */
72a8cb30 3383static int cgroup_pidlist_open(struct file *file, enum cgroup_filetype type)
bbcb81d0 3384{
bd89aabc 3385 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
72a8cb30 3386 struct cgroup_pidlist *l;
cc31edce 3387 int retval;
bbcb81d0 3388
cc31edce 3389 /* Nothing to do for write-only files */
bbcb81d0
PM
3390 if (!(file->f_mode & FMODE_READ))
3391 return 0;
3392
102a775e 3393 /* have the array populated */
72a8cb30 3394 retval = pidlist_array_load(cgrp, type, &l);
102a775e
BB
3395 if (retval)
3396 return retval;
3397 /* configure file information */
3398 file->f_op = &cgroup_pidlist_operations;
cc31edce 3399
102a775e 3400 retval = seq_open(file, &cgroup_pidlist_seq_operations);
cc31edce 3401 if (retval) {
102a775e 3402 cgroup_release_pid_array(l);
cc31edce 3403 return retval;
bbcb81d0 3404 }
102a775e 3405 ((struct seq_file *)file->private_data)->private = l;
bbcb81d0
PM
3406 return 0;
3407}
102a775e
BB
3408static int cgroup_tasks_open(struct inode *unused, struct file *file)
3409{
72a8cb30 3410 return cgroup_pidlist_open(file, CGROUP_FILE_TASKS);
102a775e
BB
3411}
3412static int cgroup_procs_open(struct inode *unused, struct file *file)
3413{
72a8cb30 3414 return cgroup_pidlist_open(file, CGROUP_FILE_PROCS);
102a775e 3415}
bbcb81d0 3416
bd89aabc 3417static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
81a6a5cd
PM
3418 struct cftype *cft)
3419{
bd89aabc 3420 return notify_on_release(cgrp);
81a6a5cd
PM
3421}
3422
6379c106
PM
3423static int cgroup_write_notify_on_release(struct cgroup *cgrp,
3424 struct cftype *cft,
3425 u64 val)
3426{
3427 clear_bit(CGRP_RELEASABLE, &cgrp->flags);
3428 if (val)
3429 set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
3430 else
3431 clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
3432 return 0;
3433}
3434
0dea1168
KS
3435/*
3436 * Unregister event and free resources.
3437 *
3438 * Gets called from workqueue.
3439 */
3440static void cgroup_event_remove(struct work_struct *work)
3441{
3442 struct cgroup_event *event = container_of(work, struct cgroup_event,
3443 remove);
3444 struct cgroup *cgrp = event->cgrp;
3445
0dea1168
KS
3446 event->cft->unregister_event(cgrp, event->cft, event->eventfd);
3447
3448 eventfd_ctx_put(event->eventfd);
0dea1168 3449 kfree(event);
a0a4db54 3450 dput(cgrp->dentry);
0dea1168
KS
3451}
3452
3453/*
3454 * Gets called on POLLHUP on eventfd when user closes it.
3455 *
3456 * Called with wqh->lock held and interrupts disabled.
3457 */
3458static int cgroup_event_wake(wait_queue_t *wait, unsigned mode,
3459 int sync, void *key)
3460{
3461 struct cgroup_event *event = container_of(wait,
3462 struct cgroup_event, wait);
3463 struct cgroup *cgrp = event->cgrp;
3464 unsigned long flags = (unsigned long)key;
3465
3466 if (flags & POLLHUP) {
a93d2f17 3467 __remove_wait_queue(event->wqh, &event->wait);
0dea1168
KS
3468 spin_lock(&cgrp->event_list_lock);
3469 list_del(&event->list);
3470 spin_unlock(&cgrp->event_list_lock);
3471 /*
3472 * We are in atomic context, but cgroup_event_remove() may
3473 * sleep, so we have to call it in workqueue.
3474 */
3475 schedule_work(&event->remove);
3476 }
3477
3478 return 0;
3479}
3480
3481static void cgroup_event_ptable_queue_proc(struct file *file,
3482 wait_queue_head_t *wqh, poll_table *pt)
3483{
3484 struct cgroup_event *event = container_of(pt,
3485 struct cgroup_event, pt);
3486
3487 event->wqh = wqh;
3488 add_wait_queue(wqh, &event->wait);
3489}
3490
3491/*
3492 * Parse input and register new cgroup event handler.
3493 *
3494 * Input must be in format '<event_fd> <control_fd> <args>'.
3495 * Interpretation of args is defined by control file implementation.
3496 */
3497static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
3498 const char *buffer)
3499{
3500 struct cgroup_event *event = NULL;
3501 unsigned int efd, cfd;
3502 struct file *efile = NULL;
3503 struct file *cfile = NULL;
3504 char *endp;
3505 int ret;
3506
3507 efd = simple_strtoul(buffer, &endp, 10);
3508 if (*endp != ' ')
3509 return -EINVAL;
3510 buffer = endp + 1;
3511
3512 cfd = simple_strtoul(buffer, &endp, 10);
3513 if ((*endp != ' ') && (*endp != '\0'))
3514 return -EINVAL;
3515 buffer = endp + 1;
3516
3517 event = kzalloc(sizeof(*event), GFP_KERNEL);
3518 if (!event)
3519 return -ENOMEM;
3520 event->cgrp = cgrp;
3521 INIT_LIST_HEAD(&event->list);
3522 init_poll_funcptr(&event->pt, cgroup_event_ptable_queue_proc);
3523 init_waitqueue_func_entry(&event->wait, cgroup_event_wake);
3524 INIT_WORK(&event->remove, cgroup_event_remove);
3525
3526 efile = eventfd_fget(efd);
3527 if (IS_ERR(efile)) {
3528 ret = PTR_ERR(efile);
3529 goto fail;
3530 }
3531
3532 event->eventfd = eventfd_ctx_fileget(efile);
3533 if (IS_ERR(event->eventfd)) {
3534 ret = PTR_ERR(event->eventfd);
3535 goto fail;
3536 }
3537
3538 cfile = fget(cfd);
3539 if (!cfile) {
3540 ret = -EBADF;
3541 goto fail;
3542 }
3543
3544 /* the process need read permission on control file */
3545 ret = file_permission(cfile, MAY_READ);
3546 if (ret < 0)
3547 goto fail;
3548
3549 event->cft = __file_cft(cfile);
3550 if (IS_ERR(event->cft)) {
3551 ret = PTR_ERR(event->cft);
3552 goto fail;
3553 }
3554
3555 if (!event->cft->register_event || !event->cft->unregister_event) {
3556 ret = -EINVAL;
3557 goto fail;
3558 }
3559
3560 ret = event->cft->register_event(cgrp, event->cft,
3561 event->eventfd, buffer);
3562 if (ret)
3563 goto fail;
3564
3565 if (efile->f_op->poll(efile, &event->pt) & POLLHUP) {
3566 event->cft->unregister_event(cgrp, event->cft, event->eventfd);
3567 ret = 0;
3568 goto fail;
3569 }
3570
a0a4db54
KS
3571 /*
3572 * Events should be removed after rmdir of cgroup directory, but before
3573 * destroying subsystem state objects. Let's take reference to cgroup
3574 * directory dentry to do that.
3575 */
3576 dget(cgrp->dentry);
3577
0dea1168
KS
3578 spin_lock(&cgrp->event_list_lock);
3579 list_add(&event->list, &cgrp->event_list);
3580 spin_unlock(&cgrp->event_list_lock);
3581
3582 fput(cfile);
3583 fput(efile);
3584
3585 return 0;
3586
3587fail:
3588 if (cfile)
3589 fput(cfile);
3590
3591 if (event && event->eventfd && !IS_ERR(event->eventfd))
3592 eventfd_ctx_put(event->eventfd);
3593
3594 if (!IS_ERR_OR_NULL(efile))
3595 fput(efile);
3596
3597 kfree(event);
3598
3599 return ret;
3600}
3601
97978e6d
DL
3602static u64 cgroup_clone_children_read(struct cgroup *cgrp,
3603 struct cftype *cft)
3604{
3605 return clone_children(cgrp);
3606}
3607
3608static int cgroup_clone_children_write(struct cgroup *cgrp,
3609 struct cftype *cft,
3610 u64 val)
3611{
3612 if (val)
3613 set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
3614 else
3615 clear_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
3616 return 0;
3617}
3618
bbcb81d0
PM
3619/*
3620 * for the common functions, 'private' gives the type of file
3621 */
102a775e
BB
3622/* for hysterical raisins, we can't put this on the older files */
3623#define CGROUP_FILE_GENERIC_PREFIX "cgroup."
81a6a5cd
PM
3624static struct cftype files[] = {
3625 {
3626 .name = "tasks",
3627 .open = cgroup_tasks_open,
af351026 3628 .write_u64 = cgroup_tasks_write,
102a775e 3629 .release = cgroup_pidlist_release,
099fca32 3630 .mode = S_IRUGO | S_IWUSR,
81a6a5cd 3631 },
102a775e
BB
3632 {
3633 .name = CGROUP_FILE_GENERIC_PREFIX "procs",
3634 .open = cgroup_procs_open,
74a1166d 3635 .write_u64 = cgroup_procs_write,
102a775e 3636 .release = cgroup_pidlist_release,
74a1166d 3637 .mode = S_IRUGO | S_IWUSR,
102a775e 3638 },
81a6a5cd
PM
3639 {
3640 .name = "notify_on_release",
f4c753b7 3641 .read_u64 = cgroup_read_notify_on_release,
6379c106 3642 .write_u64 = cgroup_write_notify_on_release,
81a6a5cd 3643 },
0dea1168
KS
3644 {
3645 .name = CGROUP_FILE_GENERIC_PREFIX "event_control",
3646 .write_string = cgroup_write_event_control,
3647 .mode = S_IWUGO,
3648 },
97978e6d
DL
3649 {
3650 .name = "cgroup.clone_children",
3651 .read_u64 = cgroup_clone_children_read,
3652 .write_u64 = cgroup_clone_children_write,
3653 },
81a6a5cd
PM
3654};
3655
3656static struct cftype cft_release_agent = {
3657 .name = "release_agent",
e788e066
PM
3658 .read_seq_string = cgroup_release_agent_show,
3659 .write_string = cgroup_release_agent_write,
3660 .max_write_len = PATH_MAX,
bbcb81d0
PM
3661};
3662
bd89aabc 3663static int cgroup_populate_dir(struct cgroup *cgrp)
ddbcc7e8
PM
3664{
3665 int err;
3666 struct cgroup_subsys *ss;
3667
3668 /* First clear out any existing files */
bd89aabc 3669 cgroup_clear_directory(cgrp->dentry);
ddbcc7e8 3670
bd89aabc 3671 err = cgroup_add_files(cgrp, NULL, files, ARRAY_SIZE(files));
bbcb81d0
PM
3672 if (err < 0)
3673 return err;
3674
bd89aabc
PM
3675 if (cgrp == cgrp->top_cgroup) {
3676 if ((err = cgroup_add_file(cgrp, NULL, &cft_release_agent)) < 0)
81a6a5cd
PM
3677 return err;
3678 }
3679
bd89aabc
PM
3680 for_each_subsys(cgrp->root, ss) {
3681 if (ss->populate && (err = ss->populate(ss, cgrp)) < 0)
ddbcc7e8
PM
3682 return err;
3683 }
38460b48
KH
3684 /* This cgroup is ready now */
3685 for_each_subsys(cgrp->root, ss) {
3686 struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
3687 /*
3688 * Update id->css pointer and make this css visible from
3689 * CSS ID functions. This pointer will be dereferened
3690 * from RCU-read-side without locks.
3691 */
3692 if (css->id)
3693 rcu_assign_pointer(css->id->css, css);
3694 }
ddbcc7e8
PM
3695
3696 return 0;
3697}
3698
3699static void init_cgroup_css(struct cgroup_subsys_state *css,
3700 struct cgroup_subsys *ss,
bd89aabc 3701 struct cgroup *cgrp)
ddbcc7e8 3702{
bd89aabc 3703 css->cgroup = cgrp;
e7c5ec91 3704 atomic_set(&css->refcnt, 1);
ddbcc7e8 3705 css->flags = 0;
38460b48 3706 css->id = NULL;
bd89aabc 3707 if (cgrp == dummytop)
ddbcc7e8 3708 set_bit(CSS_ROOT, &css->flags);
bd89aabc
PM
3709 BUG_ON(cgrp->subsys[ss->subsys_id]);
3710 cgrp->subsys[ss->subsys_id] = css;
ddbcc7e8
PM
3711}
3712
999cd8a4
PM
3713static void cgroup_lock_hierarchy(struct cgroupfs_root *root)
3714{
3715 /* We need to take each hierarchy_mutex in a consistent order */
3716 int i;
3717
aae8aab4
BB
3718 /*
3719 * No worry about a race with rebind_subsystems that might mess up the
3720 * locking order, since both parties are under cgroup_mutex.
3721 */
999cd8a4
PM
3722 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
3723 struct cgroup_subsys *ss = subsys[i];
aae8aab4
BB
3724 if (ss == NULL)
3725 continue;
999cd8a4 3726 if (ss->root == root)
cfebe563 3727 mutex_lock(&ss->hierarchy_mutex);
999cd8a4
PM
3728 }
3729}
3730
3731static void cgroup_unlock_hierarchy(struct cgroupfs_root *root)
3732{
3733 int i;
3734
3735 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
3736 struct cgroup_subsys *ss = subsys[i];
aae8aab4
BB
3737 if (ss == NULL)
3738 continue;
999cd8a4
PM
3739 if (ss->root == root)
3740 mutex_unlock(&ss->hierarchy_mutex);
3741 }
3742}
3743
ddbcc7e8 3744/*
a043e3b2
LZ
3745 * cgroup_create - create a cgroup
3746 * @parent: cgroup that will be parent of the new cgroup
3747 * @dentry: dentry of the new cgroup
3748 * @mode: mode to set on new inode
ddbcc7e8 3749 *
a043e3b2 3750 * Must be called with the mutex on the parent inode held
ddbcc7e8 3751 */
ddbcc7e8 3752static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
099fca32 3753 mode_t mode)
ddbcc7e8 3754{
bd89aabc 3755 struct cgroup *cgrp;
ddbcc7e8
PM
3756 struct cgroupfs_root *root = parent->root;
3757 int err = 0;
3758 struct cgroup_subsys *ss;
3759 struct super_block *sb = root->sb;
3760
bd89aabc
PM
3761 cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
3762 if (!cgrp)
ddbcc7e8
PM
3763 return -ENOMEM;
3764
3765 /* Grab a reference on the superblock so the hierarchy doesn't
3766 * get deleted on unmount if there are child cgroups. This
3767 * can be done outside cgroup_mutex, since the sb can't
3768 * disappear while someone has an open control file on the
3769 * fs */
3770 atomic_inc(&sb->s_active);
3771
3772 mutex_lock(&cgroup_mutex);
3773
cc31edce 3774 init_cgroup_housekeeping(cgrp);
ddbcc7e8 3775
bd89aabc
PM
3776 cgrp->parent = parent;
3777 cgrp->root = parent->root;
3778 cgrp->top_cgroup = parent->top_cgroup;
ddbcc7e8 3779
b6abdb0e
LZ
3780 if (notify_on_release(parent))
3781 set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
3782
97978e6d
DL
3783 if (clone_children(parent))
3784 set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
3785
ddbcc7e8 3786 for_each_subsys(root, ss) {
bd89aabc 3787 struct cgroup_subsys_state *css = ss->create(ss, cgrp);
4528fd05 3788
ddbcc7e8
PM
3789 if (IS_ERR(css)) {
3790 err = PTR_ERR(css);
3791 goto err_destroy;
3792 }
bd89aabc 3793 init_cgroup_css(css, ss, cgrp);
4528fd05
LZ
3794 if (ss->use_id) {
3795 err = alloc_css_id(ss, parent, cgrp);
3796 if (err)
38460b48 3797 goto err_destroy;
4528fd05 3798 }
38460b48 3799 /* At error, ->destroy() callback has to free assigned ID. */
97978e6d
DL
3800 if (clone_children(parent) && ss->post_clone)
3801 ss->post_clone(ss, cgrp);
ddbcc7e8
PM
3802 }
3803
999cd8a4 3804 cgroup_lock_hierarchy(root);
bd89aabc 3805 list_add(&cgrp->sibling, &cgrp->parent->children);
999cd8a4 3806 cgroup_unlock_hierarchy(root);
ddbcc7e8
PM
3807 root->number_of_cgroups++;
3808
bd89aabc 3809 err = cgroup_create_dir(cgrp, dentry, mode);
ddbcc7e8
PM
3810 if (err < 0)
3811 goto err_remove;
3812
3813 /* The cgroup directory was pre-locked for us */
bd89aabc 3814 BUG_ON(!mutex_is_locked(&cgrp->dentry->d_inode->i_mutex));
ddbcc7e8 3815
bd89aabc 3816 err = cgroup_populate_dir(cgrp);
ddbcc7e8
PM
3817 /* If err < 0, we have a half-filled directory - oh well ;) */
3818
3819 mutex_unlock(&cgroup_mutex);
bd89aabc 3820 mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
ddbcc7e8
PM
3821
3822 return 0;
3823
3824 err_remove:
3825
baef99a0 3826 cgroup_lock_hierarchy(root);
bd89aabc 3827 list_del(&cgrp->sibling);
baef99a0 3828 cgroup_unlock_hierarchy(root);
ddbcc7e8
PM
3829 root->number_of_cgroups--;
3830
3831 err_destroy:
3832
3833 for_each_subsys(root, ss) {
bd89aabc
PM
3834 if (cgrp->subsys[ss->subsys_id])
3835 ss->destroy(ss, cgrp);
ddbcc7e8
PM
3836 }
3837
3838 mutex_unlock(&cgroup_mutex);
3839
3840 /* Release the reference count that we took on the superblock */
3841 deactivate_super(sb);
3842
bd89aabc 3843 kfree(cgrp);
ddbcc7e8
PM
3844 return err;
3845}
3846
3847static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode)
3848{
3849 struct cgroup *c_parent = dentry->d_parent->d_fsdata;
3850
3851 /* the vfs holds inode->i_mutex already */
3852 return cgroup_create(c_parent, dentry, mode | S_IFDIR);
3853}
3854
55b6fd01 3855static int cgroup_has_css_refs(struct cgroup *cgrp)
81a6a5cd
PM
3856{
3857 /* Check the reference count on each subsystem. Since we
3858 * already established that there are no tasks in the
e7c5ec91 3859 * cgroup, if the css refcount is also 1, then there should
81a6a5cd
PM
3860 * be no outstanding references, so the subsystem is safe to
3861 * destroy. We scan across all subsystems rather than using
3862 * the per-hierarchy linked list of mounted subsystems since
3863 * we can be called via check_for_release() with no
3864 * synchronization other than RCU, and the subsystem linked
3865 * list isn't RCU-safe */
3866 int i;
aae8aab4
BB
3867 /*
3868 * We won't need to lock the subsys array, because the subsystems
3869 * we're concerned about aren't going anywhere since our cgroup root
3870 * has a reference on them.
3871 */
81a6a5cd
PM
3872 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
3873 struct cgroup_subsys *ss = subsys[i];
3874 struct cgroup_subsys_state *css;
aae8aab4
BB
3875 /* Skip subsystems not present or not in this hierarchy */
3876 if (ss == NULL || ss->root != cgrp->root)
81a6a5cd 3877 continue;
bd89aabc 3878 css = cgrp->subsys[ss->subsys_id];
81a6a5cd
PM
3879 /* When called from check_for_release() it's possible
3880 * that by this point the cgroup has been removed
3881 * and the css deleted. But a false-positive doesn't
3882 * matter, since it can only happen if the cgroup
3883 * has been deleted and hence no longer needs the
3884 * release agent to be called anyway. */
e7c5ec91 3885 if (css && (atomic_read(&css->refcnt) > 1))
81a6a5cd 3886 return 1;
81a6a5cd
PM
3887 }
3888 return 0;
3889}
3890
e7c5ec91
PM
3891/*
3892 * Atomically mark all (or else none) of the cgroup's CSS objects as
3893 * CSS_REMOVED. Return true on success, or false if the cgroup has
3894 * busy subsystems. Call with cgroup_mutex held
3895 */
3896
3897static int cgroup_clear_css_refs(struct cgroup *cgrp)
3898{
3899 struct cgroup_subsys *ss;
3900 unsigned long flags;
3901 bool failed = false;
3902 local_irq_save(flags);
3903 for_each_subsys(cgrp->root, ss) {
3904 struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
3905 int refcnt;
804b3c28 3906 while (1) {
e7c5ec91
PM
3907 /* We can only remove a CSS with a refcnt==1 */
3908 refcnt = atomic_read(&css->refcnt);
3909 if (refcnt > 1) {
3910 failed = true;
3911 goto done;
3912 }
3913 BUG_ON(!refcnt);
3914 /*
3915 * Drop the refcnt to 0 while we check other
3916 * subsystems. This will cause any racing
3917 * css_tryget() to spin until we set the
3918 * CSS_REMOVED bits or abort
3919 */
804b3c28
PM
3920 if (atomic_cmpxchg(&css->refcnt, refcnt, 0) == refcnt)
3921 break;
3922 cpu_relax();
3923 }
e7c5ec91
PM
3924 }
3925 done:
3926 for_each_subsys(cgrp->root, ss) {
3927 struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
3928 if (failed) {
3929 /*
3930 * Restore old refcnt if we previously managed
3931 * to clear it from 1 to 0
3932 */
3933 if (!atomic_read(&css->refcnt))
3934 atomic_set(&css->refcnt, 1);
3935 } else {
3936 /* Commit the fact that the CSS is removed */
3937 set_bit(CSS_REMOVED, &css->flags);
3938 }
3939 }
3940 local_irq_restore(flags);
3941 return !failed;
3942}
3943
ddbcc7e8
PM
3944static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
3945{
bd89aabc 3946 struct cgroup *cgrp = dentry->d_fsdata;
ddbcc7e8
PM
3947 struct dentry *d;
3948 struct cgroup *parent;
ec64f515 3949 DEFINE_WAIT(wait);
4ab78683 3950 struct cgroup_event *event, *tmp;
ec64f515 3951 int ret;
ddbcc7e8
PM
3952
3953 /* the vfs holds both inode->i_mutex already */
ec64f515 3954again:
ddbcc7e8 3955 mutex_lock(&cgroup_mutex);
bd89aabc 3956 if (atomic_read(&cgrp->count) != 0) {
ddbcc7e8
PM
3957 mutex_unlock(&cgroup_mutex);
3958 return -EBUSY;
3959 }
bd89aabc 3960 if (!list_empty(&cgrp->children)) {
ddbcc7e8
PM
3961 mutex_unlock(&cgroup_mutex);
3962 return -EBUSY;
3963 }
3fa59dfb 3964 mutex_unlock(&cgroup_mutex);
a043e3b2 3965
88703267
KH
3966 /*
3967 * In general, subsystem has no css->refcnt after pre_destroy(). But
3968 * in racy cases, subsystem may have to get css->refcnt after
3969 * pre_destroy() and it makes rmdir return with -EBUSY. This sometimes
3970 * make rmdir return -EBUSY too often. To avoid that, we use waitqueue
3971 * for cgroup's rmdir. CGRP_WAIT_ON_RMDIR is for synchronizing rmdir
3972 * and subsystem's reference count handling. Please see css_get/put
3973 * and css_tryget() and cgroup_wakeup_rmdir_waiter() implementation.
3974 */
3975 set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
3976
4fca88c8 3977 /*
a043e3b2
LZ
3978 * Call pre_destroy handlers of subsys. Notify subsystems
3979 * that rmdir() request comes.
4fca88c8 3980 */
ec64f515 3981 ret = cgroup_call_pre_destroy(cgrp);
88703267
KH
3982 if (ret) {
3983 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
ec64f515 3984 return ret;
88703267 3985 }
ddbcc7e8 3986
3fa59dfb
KH
3987 mutex_lock(&cgroup_mutex);
3988 parent = cgrp->parent;
ec64f515 3989 if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) {
88703267 3990 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
ddbcc7e8
PM
3991 mutex_unlock(&cgroup_mutex);
3992 return -EBUSY;
3993 }
ec64f515 3994 prepare_to_wait(&cgroup_rmdir_waitq, &wait, TASK_INTERRUPTIBLE);
ec64f515
KH
3995 if (!cgroup_clear_css_refs(cgrp)) {
3996 mutex_unlock(&cgroup_mutex);
88703267
KH
3997 /*
3998 * Because someone may call cgroup_wakeup_rmdir_waiter() before
3999 * prepare_to_wait(), we need to check this flag.
4000 */
4001 if (test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags))
4002 schedule();
ec64f515
KH
4003 finish_wait(&cgroup_rmdir_waitq, &wait);
4004 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
4005 if (signal_pending(current))
4006 return -EINTR;
4007 goto again;
4008 }
4009 /* NO css_tryget() can success after here. */
4010 finish_wait(&cgroup_rmdir_waitq, &wait);
4011 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
ddbcc7e8 4012
81a6a5cd 4013 spin_lock(&release_list_lock);
bd89aabc
PM
4014 set_bit(CGRP_REMOVED, &cgrp->flags);
4015 if (!list_empty(&cgrp->release_list))
8d258797 4016 list_del_init(&cgrp->release_list);
81a6a5cd 4017 spin_unlock(&release_list_lock);
999cd8a4
PM
4018
4019 cgroup_lock_hierarchy(cgrp->root);
4020 /* delete this cgroup from parent->children */
8d258797 4021 list_del_init(&cgrp->sibling);
999cd8a4
PM
4022 cgroup_unlock_hierarchy(cgrp->root);
4023
bd89aabc 4024 d = dget(cgrp->dentry);
ddbcc7e8
PM
4025
4026 cgroup_d_remove_dir(d);
4027 dput(d);
ddbcc7e8 4028
bd89aabc 4029 set_bit(CGRP_RELEASABLE, &parent->flags);
81a6a5cd
PM
4030 check_for_release(parent);
4031
4ab78683
KS
4032 /*
4033 * Unregister events and notify userspace.
4034 * Notify userspace about cgroup removing only after rmdir of cgroup
4035 * directory to avoid race between userspace and kernelspace
4036 */
4037 spin_lock(&cgrp->event_list_lock);
4038 list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) {
4039 list_del(&event->list);
4040 remove_wait_queue(event->wqh, &event->wait);
4041 eventfd_signal(event->eventfd, 1);
4042 schedule_work(&event->remove);
4043 }
4044 spin_unlock(&cgrp->event_list_lock);
4045
ddbcc7e8 4046 mutex_unlock(&cgroup_mutex);
ddbcc7e8
PM
4047 return 0;
4048}
4049
06a11920 4050static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
ddbcc7e8 4051{
ddbcc7e8 4052 struct cgroup_subsys_state *css;
cfe36bde
DC
4053
4054 printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
ddbcc7e8
PM
4055
4056 /* Create the top cgroup state for this subsystem */
33a68ac1 4057 list_add(&ss->sibling, &rootnode.subsys_list);
ddbcc7e8
PM
4058 ss->root = &rootnode;
4059 css = ss->create(ss, dummytop);
4060 /* We don't handle early failures gracefully */
4061 BUG_ON(IS_ERR(css));
4062 init_cgroup_css(css, ss, dummytop);
4063
e8d55fde 4064 /* Update the init_css_set to contain a subsys
817929ec 4065 * pointer to this state - since the subsystem is
e8d55fde
LZ
4066 * newly registered, all tasks and hence the
4067 * init_css_set is in the subsystem's top cgroup. */
4068 init_css_set.subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id];
ddbcc7e8
PM
4069
4070 need_forkexit_callback |= ss->fork || ss->exit;
4071
e8d55fde
LZ
4072 /* At system boot, before all subsystems have been
4073 * registered, no tasks have been forked, so we don't
4074 * need to invoke fork callbacks here. */
4075 BUG_ON(!list_empty(&init_task.tasks));
4076
999cd8a4 4077 mutex_init(&ss->hierarchy_mutex);
cfebe563 4078 lockdep_set_class(&ss->hierarchy_mutex, &ss->subsys_key);
ddbcc7e8 4079 ss->active = 1;
e6a1105b
BB
4080
4081 /* this function shouldn't be used with modular subsystems, since they
4082 * need to register a subsys_id, among other things */
4083 BUG_ON(ss->module);
4084}
4085
4086/**
4087 * cgroup_load_subsys: load and register a modular subsystem at runtime
4088 * @ss: the subsystem to load
4089 *
4090 * This function should be called in a modular subsystem's initcall. If the
88393161 4091 * subsystem is built as a module, it will be assigned a new subsys_id and set
e6a1105b
BB
4092 * up for use. If the subsystem is built-in anyway, work is delegated to the
4093 * simpler cgroup_init_subsys.
4094 */
4095int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
4096{
4097 int i;
4098 struct cgroup_subsys_state *css;
4099
4100 /* check name and function validity */
4101 if (ss->name == NULL || strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN ||
4102 ss->create == NULL || ss->destroy == NULL)
4103 return -EINVAL;
4104
4105 /*
4106 * we don't support callbacks in modular subsystems. this check is
4107 * before the ss->module check for consistency; a subsystem that could
4108 * be a module should still have no callbacks even if the user isn't
4109 * compiling it as one.
4110 */
4111 if (ss->fork || ss->exit)
4112 return -EINVAL;
4113
4114 /*
4115 * an optionally modular subsystem is built-in: we want to do nothing,
4116 * since cgroup_init_subsys will have already taken care of it.
4117 */
4118 if (ss->module == NULL) {
4119 /* a few sanity checks */
4120 BUG_ON(ss->subsys_id >= CGROUP_BUILTIN_SUBSYS_COUNT);
4121 BUG_ON(subsys[ss->subsys_id] != ss);
4122 return 0;
4123 }
4124
4125 /*
4126 * need to register a subsys id before anything else - for example,
4127 * init_cgroup_css needs it.
4128 */
4129 mutex_lock(&cgroup_mutex);
4130 /* find the first empty slot in the array */
4131 for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
4132 if (subsys[i] == NULL)
4133 break;
4134 }
4135 if (i == CGROUP_SUBSYS_COUNT) {
4136 /* maximum number of subsystems already registered! */
4137 mutex_unlock(&cgroup_mutex);
4138 return -EBUSY;
4139 }
4140 /* assign ourselves the subsys_id */
4141 ss->subsys_id = i;
4142 subsys[i] = ss;
4143
4144 /*
4145 * no ss->create seems to need anything important in the ss struct, so
4146 * this can happen first (i.e. before the rootnode attachment).
4147 */
4148 css = ss->create(ss, dummytop);
4149 if (IS_ERR(css)) {
4150 /* failure case - need to deassign the subsys[] slot. */
4151 subsys[i] = NULL;
4152 mutex_unlock(&cgroup_mutex);
4153 return PTR_ERR(css);
4154 }
4155
4156 list_add(&ss->sibling, &rootnode.subsys_list);
4157 ss->root = &rootnode;
4158
4159 /* our new subsystem will be attached to the dummy hierarchy. */
4160 init_cgroup_css(css, ss, dummytop);
4161 /* init_idr must be after init_cgroup_css because it sets css->id. */
4162 if (ss->use_id) {
4163 int ret = cgroup_init_idr(ss, css);
4164 if (ret) {
4165 dummytop->subsys[ss->subsys_id] = NULL;
4166 ss->destroy(ss, dummytop);
4167 subsys[i] = NULL;
4168 mutex_unlock(&cgroup_mutex);
4169 return ret;
4170 }
4171 }
4172
4173 /*
4174 * Now we need to entangle the css into the existing css_sets. unlike
4175 * in cgroup_init_subsys, there are now multiple css_sets, so each one
4176 * will need a new pointer to it; done by iterating the css_set_table.
4177 * furthermore, modifying the existing css_sets will corrupt the hash
4178 * table state, so each changed css_set will need its hash recomputed.
4179 * this is all done under the css_set_lock.
4180 */
4181 write_lock(&css_set_lock);
4182 for (i = 0; i < CSS_SET_TABLE_SIZE; i++) {
4183 struct css_set *cg;
4184 struct hlist_node *node, *tmp;
4185 struct hlist_head *bucket = &css_set_table[i], *new_bucket;
4186
4187 hlist_for_each_entry_safe(cg, node, tmp, bucket, hlist) {
4188 /* skip entries that we already rehashed */
4189 if (cg->subsys[ss->subsys_id])
4190 continue;
4191 /* remove existing entry */
4192 hlist_del(&cg->hlist);
4193 /* set new value */
4194 cg->subsys[ss->subsys_id] = css;
4195 /* recompute hash and restore entry */
4196 new_bucket = css_set_hash(cg->subsys);
4197 hlist_add_head(&cg->hlist, new_bucket);
4198 }
4199 }
4200 write_unlock(&css_set_lock);
4201
4202 mutex_init(&ss->hierarchy_mutex);
4203 lockdep_set_class(&ss->hierarchy_mutex, &ss->subsys_key);
4204 ss->active = 1;
4205
e6a1105b
BB
4206 /* success! */
4207 mutex_unlock(&cgroup_mutex);
4208 return 0;
ddbcc7e8 4209}
e6a1105b 4210EXPORT_SYMBOL_GPL(cgroup_load_subsys);
ddbcc7e8 4211
cf5d5941
BB
4212/**
4213 * cgroup_unload_subsys: unload a modular subsystem
4214 * @ss: the subsystem to unload
4215 *
4216 * This function should be called in a modular subsystem's exitcall. When this
4217 * function is invoked, the refcount on the subsystem's module will be 0, so
4218 * the subsystem will not be attached to any hierarchy.
4219 */
4220void cgroup_unload_subsys(struct cgroup_subsys *ss)
4221{
4222 struct cg_cgroup_link *link;
4223 struct hlist_head *hhead;
4224
4225 BUG_ON(ss->module == NULL);
4226
4227 /*
4228 * we shouldn't be called if the subsystem is in use, and the use of
4229 * try_module_get in parse_cgroupfs_options should ensure that it
4230 * doesn't start being used while we're killing it off.
4231 */
4232 BUG_ON(ss->root != &rootnode);
4233
4234 mutex_lock(&cgroup_mutex);
4235 /* deassign the subsys_id */
4236 BUG_ON(ss->subsys_id < CGROUP_BUILTIN_SUBSYS_COUNT);
4237 subsys[ss->subsys_id] = NULL;
4238
4239 /* remove subsystem from rootnode's list of subsystems */
8d258797 4240 list_del_init(&ss->sibling);
cf5d5941
BB
4241
4242 /*
4243 * disentangle the css from all css_sets attached to the dummytop. as
4244 * in loading, we need to pay our respects to the hashtable gods.
4245 */
4246 write_lock(&css_set_lock);
4247 list_for_each_entry(link, &dummytop->css_sets, cgrp_link_list) {
4248 struct css_set *cg = link->cg;
4249
4250 hlist_del(&cg->hlist);
4251 BUG_ON(!cg->subsys[ss->subsys_id]);
4252 cg->subsys[ss->subsys_id] = NULL;
4253 hhead = css_set_hash(cg->subsys);
4254 hlist_add_head(&cg->hlist, hhead);
4255 }
4256 write_unlock(&css_set_lock);
4257
4258 /*
4259 * remove subsystem's css from the dummytop and free it - need to free
4260 * before marking as null because ss->destroy needs the cgrp->subsys
4261 * pointer to find their state. note that this also takes care of
4262 * freeing the css_id.
4263 */
4264 ss->destroy(ss, dummytop);
4265 dummytop->subsys[ss->subsys_id] = NULL;
4266
4267 mutex_unlock(&cgroup_mutex);
4268}
4269EXPORT_SYMBOL_GPL(cgroup_unload_subsys);
4270
ddbcc7e8 4271/**
a043e3b2
LZ
4272 * cgroup_init_early - cgroup initialization at system boot
4273 *
4274 * Initialize cgroups at system boot, and initialize any
4275 * subsystems that request early init.
ddbcc7e8
PM
4276 */
4277int __init cgroup_init_early(void)
4278{
4279 int i;
146aa1bd 4280 atomic_set(&init_css_set.refcount, 1);
817929ec
PM
4281 INIT_LIST_HEAD(&init_css_set.cg_links);
4282 INIT_LIST_HEAD(&init_css_set.tasks);
472b1053 4283 INIT_HLIST_NODE(&init_css_set.hlist);
817929ec 4284 css_set_count = 1;
ddbcc7e8 4285 init_cgroup_root(&rootnode);
817929ec
PM
4286 root_count = 1;
4287 init_task.cgroups = &init_css_set;
4288
4289 init_css_set_link.cg = &init_css_set;
7717f7ba 4290 init_css_set_link.cgrp = dummytop;
bd89aabc 4291 list_add(&init_css_set_link.cgrp_link_list,
817929ec
PM
4292 &rootnode.top_cgroup.css_sets);
4293 list_add(&init_css_set_link.cg_link_list,
4294 &init_css_set.cg_links);
ddbcc7e8 4295
472b1053
LZ
4296 for (i = 0; i < CSS_SET_TABLE_SIZE; i++)
4297 INIT_HLIST_HEAD(&css_set_table[i]);
4298
aae8aab4
BB
4299 /* at bootup time, we don't worry about modular subsystems */
4300 for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
ddbcc7e8
PM
4301 struct cgroup_subsys *ss = subsys[i];
4302
4303 BUG_ON(!ss->name);
4304 BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN);
4305 BUG_ON(!ss->create);
4306 BUG_ON(!ss->destroy);
4307 if (ss->subsys_id != i) {
cfe36bde 4308 printk(KERN_ERR "cgroup: Subsys %s id == %d\n",
ddbcc7e8
PM
4309 ss->name, ss->subsys_id);
4310 BUG();
4311 }
4312
4313 if (ss->early_init)
4314 cgroup_init_subsys(ss);
4315 }
4316 return 0;
4317}
4318
4319/**
a043e3b2
LZ
4320 * cgroup_init - cgroup initialization
4321 *
4322 * Register cgroup filesystem and /proc file, and initialize
4323 * any subsystems that didn't request early init.
ddbcc7e8
PM
4324 */
4325int __init cgroup_init(void)
4326{
4327 int err;
4328 int i;
472b1053 4329 struct hlist_head *hhead;
a424316c
PM
4330
4331 err = bdi_init(&cgroup_backing_dev_info);
4332 if (err)
4333 return err;
ddbcc7e8 4334
aae8aab4
BB
4335 /* at bootup time, we don't worry about modular subsystems */
4336 for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
ddbcc7e8
PM
4337 struct cgroup_subsys *ss = subsys[i];
4338 if (!ss->early_init)
4339 cgroup_init_subsys(ss);
38460b48 4340 if (ss->use_id)
e6a1105b 4341 cgroup_init_idr(ss, init_css_set.subsys[ss->subsys_id]);
ddbcc7e8
PM
4342 }
4343
472b1053
LZ
4344 /* Add init_css_set to the hash table */
4345 hhead = css_set_hash(init_css_set.subsys);
4346 hlist_add_head(&init_css_set.hlist, hhead);
2c6ab6d2 4347 BUG_ON(!init_root_id(&rootnode));
676db4af
GK
4348
4349 cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
4350 if (!cgroup_kobj) {
4351 err = -ENOMEM;
4352 goto out;
4353 }
4354
ddbcc7e8 4355 err = register_filesystem(&cgroup_fs_type);
676db4af
GK
4356 if (err < 0) {
4357 kobject_put(cgroup_kobj);
ddbcc7e8 4358 goto out;
676db4af 4359 }
ddbcc7e8 4360
46ae220b 4361 proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
a424316c 4362
ddbcc7e8 4363out:
a424316c
PM
4364 if (err)
4365 bdi_destroy(&cgroup_backing_dev_info);
4366
ddbcc7e8
PM
4367 return err;
4368}
b4f48b63 4369
a424316c
PM
4370/*
4371 * proc_cgroup_show()
4372 * - Print task's cgroup paths into seq_file, one line for each hierarchy
4373 * - Used for /proc/<pid>/cgroup.
4374 * - No need to task_lock(tsk) on this tsk->cgroup reference, as it
4375 * doesn't really matter if tsk->cgroup changes after we read it,
956db3ca 4376 * and we take cgroup_mutex, keeping cgroup_attach_task() from changing it
a424316c
PM
4377 * anyway. No need to check that tsk->cgroup != NULL, thanks to
4378 * the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks
4379 * cgroup to top_cgroup.
4380 */
4381
4382/* TODO: Use a proper seq_file iterator */
4383static int proc_cgroup_show(struct seq_file *m, void *v)
4384{
4385 struct pid *pid;
4386 struct task_struct *tsk;
4387 char *buf;
4388 int retval;
4389 struct cgroupfs_root *root;
4390
4391 retval = -ENOMEM;
4392 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
4393 if (!buf)
4394 goto out;
4395
4396 retval = -ESRCH;
4397 pid = m->private;
4398 tsk = get_pid_task(pid, PIDTYPE_PID);
4399 if (!tsk)
4400 goto out_free;
4401
4402 retval = 0;
4403
4404 mutex_lock(&cgroup_mutex);
4405
e5f6a860 4406 for_each_active_root(root) {
a424316c 4407 struct cgroup_subsys *ss;
bd89aabc 4408 struct cgroup *cgrp;
a424316c
PM
4409 int count = 0;
4410
2c6ab6d2 4411 seq_printf(m, "%d:", root->hierarchy_id);
a424316c
PM
4412 for_each_subsys(root, ss)
4413 seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
c6d57f33
PM
4414 if (strlen(root->name))
4415 seq_printf(m, "%sname=%s", count ? "," : "",
4416 root->name);
a424316c 4417 seq_putc(m, ':');
7717f7ba 4418 cgrp = task_cgroup_from_root(tsk, root);
bd89aabc 4419 retval = cgroup_path(cgrp, buf, PAGE_SIZE);
a424316c
PM
4420 if (retval < 0)
4421 goto out_unlock;
4422 seq_puts(m, buf);
4423 seq_putc(m, '\n');
4424 }
4425
4426out_unlock:
4427 mutex_unlock(&cgroup_mutex);
4428 put_task_struct(tsk);
4429out_free:
4430 kfree(buf);
4431out:
4432 return retval;
4433}
4434
4435static int cgroup_open(struct inode *inode, struct file *file)
4436{
4437 struct pid *pid = PROC_I(inode)->pid;
4438 return single_open(file, proc_cgroup_show, pid);
4439}
4440
828c0950 4441const struct file_operations proc_cgroup_operations = {
a424316c
PM
4442 .open = cgroup_open,
4443 .read = seq_read,
4444 .llseek = seq_lseek,
4445 .release = single_release,
4446};
4447
4448/* Display information about each subsystem and each hierarchy */
4449static int proc_cgroupstats_show(struct seq_file *m, void *v)
4450{
4451 int i;
a424316c 4452
8bab8dde 4453 seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
aae8aab4
BB
4454 /*
4455 * ideally we don't want subsystems moving around while we do this.
4456 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
4457 * subsys/hierarchy state.
4458 */
a424316c 4459 mutex_lock(&cgroup_mutex);
a424316c
PM
4460 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
4461 struct cgroup_subsys *ss = subsys[i];
aae8aab4
BB
4462 if (ss == NULL)
4463 continue;
2c6ab6d2
PM
4464 seq_printf(m, "%s\t%d\t%d\t%d\n",
4465 ss->name, ss->root->hierarchy_id,
8bab8dde 4466 ss->root->number_of_cgroups, !ss->disabled);
a424316c
PM
4467 }
4468 mutex_unlock(&cgroup_mutex);
4469 return 0;
4470}
4471
4472static int cgroupstats_open(struct inode *inode, struct file *file)
4473{
9dce07f1 4474 return single_open(file, proc_cgroupstats_show, NULL);
a424316c
PM
4475}
4476
828c0950 4477static const struct file_operations proc_cgroupstats_operations = {
a424316c
PM
4478 .open = cgroupstats_open,
4479 .read = seq_read,
4480 .llseek = seq_lseek,
4481 .release = single_release,
4482};
4483
b4f48b63
PM
4484/**
4485 * cgroup_fork - attach newly forked task to its parents cgroup.
a043e3b2 4486 * @child: pointer to task_struct of forking parent process.
b4f48b63
PM
4487 *
4488 * Description: A task inherits its parent's cgroup at fork().
4489 *
4490 * A pointer to the shared css_set was automatically copied in
4491 * fork.c by dup_task_struct(). However, we ignore that copy, since
4492 * it was not made under the protection of RCU or cgroup_mutex, so
956db3ca 4493 * might no longer be a valid cgroup pointer. cgroup_attach_task() might
817929ec
PM
4494 * have already changed current->cgroups, allowing the previously
4495 * referenced cgroup group to be removed and freed.
b4f48b63
PM
4496 *
4497 * At the point that cgroup_fork() is called, 'current' is the parent
4498 * task, and the passed argument 'child' points to the child task.
4499 */
4500void cgroup_fork(struct task_struct *child)
4501{
817929ec
PM
4502 task_lock(current);
4503 child->cgroups = current->cgroups;
4504 get_css_set(child->cgroups);
4505 task_unlock(current);
4506 INIT_LIST_HEAD(&child->cg_list);
b4f48b63
PM
4507}
4508
4509/**
a043e3b2
LZ
4510 * cgroup_fork_callbacks - run fork callbacks
4511 * @child: the new task
4512 *
4513 * Called on a new task very soon before adding it to the
4514 * tasklist. No need to take any locks since no-one can
4515 * be operating on this task.
b4f48b63
PM
4516 */
4517void cgroup_fork_callbacks(struct task_struct *child)
4518{
4519 if (need_forkexit_callback) {
4520 int i;
aae8aab4
BB
4521 /*
4522 * forkexit callbacks are only supported for builtin
4523 * subsystems, and the builtin section of the subsys array is
4524 * immutable, so we don't need to lock the subsys array here.
4525 */
4526 for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
b4f48b63
PM
4527 struct cgroup_subsys *ss = subsys[i];
4528 if (ss->fork)
4529 ss->fork(ss, child);
4530 }
4531 }
4532}
4533
817929ec 4534/**
a043e3b2
LZ
4535 * cgroup_post_fork - called on a new task after adding it to the task list
4536 * @child: the task in question
4537 *
4538 * Adds the task to the list running through its css_set if necessary.
4539 * Has to be after the task is visible on the task list in case we race
4540 * with the first call to cgroup_iter_start() - to guarantee that the
4541 * new task ends up on its list.
4542 */
817929ec
PM
4543void cgroup_post_fork(struct task_struct *child)
4544{
4545 if (use_task_css_set_links) {
4546 write_lock(&css_set_lock);
b12b533f 4547 task_lock(child);
817929ec
PM
4548 if (list_empty(&child->cg_list))
4549 list_add(&child->cg_list, &child->cgroups->tasks);
b12b533f 4550 task_unlock(child);
817929ec
PM
4551 write_unlock(&css_set_lock);
4552 }
4553}
b4f48b63
PM
4554/**
4555 * cgroup_exit - detach cgroup from exiting task
4556 * @tsk: pointer to task_struct of exiting process
a043e3b2 4557 * @run_callback: run exit callbacks?
b4f48b63
PM
4558 *
4559 * Description: Detach cgroup from @tsk and release it.
4560 *
4561 * Note that cgroups marked notify_on_release force every task in
4562 * them to take the global cgroup_mutex mutex when exiting.
4563 * This could impact scaling on very large systems. Be reluctant to
4564 * use notify_on_release cgroups where very high task exit scaling
4565 * is required on large systems.
4566 *
4567 * the_top_cgroup_hack:
4568 *
4569 * Set the exiting tasks cgroup to the root cgroup (top_cgroup).
4570 *
4571 * We call cgroup_exit() while the task is still competent to
4572 * handle notify_on_release(), then leave the task attached to the
4573 * root cgroup in each hierarchy for the remainder of its exit.
4574 *
4575 * To do this properly, we would increment the reference count on
4576 * top_cgroup, and near the very end of the kernel/exit.c do_exit()
4577 * code we would add a second cgroup function call, to drop that
4578 * reference. This would just create an unnecessary hot spot on
4579 * the top_cgroup reference count, to no avail.
4580 *
4581 * Normally, holding a reference to a cgroup without bumping its
4582 * count is unsafe. The cgroup could go away, or someone could
4583 * attach us to a different cgroup, decrementing the count on
4584 * the first cgroup that we never incremented. But in this case,
4585 * top_cgroup isn't going away, and either task has PF_EXITING set,
956db3ca
CW
4586 * which wards off any cgroup_attach_task() attempts, or task is a failed
4587 * fork, never visible to cgroup_attach_task.
b4f48b63
PM
4588 */
4589void cgroup_exit(struct task_struct *tsk, int run_callbacks)
4590{
817929ec 4591 struct css_set *cg;
d41d5a01 4592 int i;
817929ec
PM
4593
4594 /*
4595 * Unlink from the css_set task list if necessary.
4596 * Optimistically check cg_list before taking
4597 * css_set_lock
4598 */
4599 if (!list_empty(&tsk->cg_list)) {
4600 write_lock(&css_set_lock);
4601 if (!list_empty(&tsk->cg_list))
8d258797 4602 list_del_init(&tsk->cg_list);
817929ec
PM
4603 write_unlock(&css_set_lock);
4604 }
4605
b4f48b63
PM
4606 /* Reassign the task to the init_css_set. */
4607 task_lock(tsk);
817929ec
PM
4608 cg = tsk->cgroups;
4609 tsk->cgroups = &init_css_set;
d41d5a01
PZ
4610
4611 if (run_callbacks && need_forkexit_callback) {
4612 /*
4613 * modular subsystems can't use callbacks, so no need to lock
4614 * the subsys array
4615 */
4616 for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
4617 struct cgroup_subsys *ss = subsys[i];
4618 if (ss->exit) {
4619 struct cgroup *old_cgrp =
4620 rcu_dereference_raw(cg->subsys[i])->cgroup;
4621 struct cgroup *cgrp = task_cgroup(tsk, i);
4622 ss->exit(ss, cgrp, old_cgrp, tsk);
4623 }
4624 }
4625 }
b4f48b63 4626 task_unlock(tsk);
d41d5a01 4627
817929ec 4628 if (cg)
81a6a5cd 4629 put_css_set_taskexit(cg);
b4f48b63 4630}
697f4161 4631
a043e3b2 4632/**
313e924c 4633 * cgroup_is_descendant - see if @cgrp is a descendant of @task's cgrp
a043e3b2 4634 * @cgrp: the cgroup in question
313e924c 4635 * @task: the task in question
a043e3b2 4636 *
313e924c
GN
4637 * See if @cgrp is a descendant of @task's cgroup in the appropriate
4638 * hierarchy.
697f4161
PM
4639 *
4640 * If we are sending in dummytop, then presumably we are creating
4641 * the top cgroup in the subsystem.
4642 *
4643 * Called only by the ns (nsproxy) cgroup.
4644 */
313e924c 4645int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task)
697f4161
PM
4646{
4647 int ret;
4648 struct cgroup *target;
697f4161 4649
bd89aabc 4650 if (cgrp == dummytop)
697f4161
PM
4651 return 1;
4652
7717f7ba 4653 target = task_cgroup_from_root(task, cgrp->root);
bd89aabc
PM
4654 while (cgrp != target && cgrp!= cgrp->top_cgroup)
4655 cgrp = cgrp->parent;
4656 ret = (cgrp == target);
697f4161
PM
4657 return ret;
4658}
81a6a5cd 4659
bd89aabc 4660static void check_for_release(struct cgroup *cgrp)
81a6a5cd
PM
4661{
4662 /* All of these checks rely on RCU to keep the cgroup
4663 * structure alive */
bd89aabc
PM
4664 if (cgroup_is_releasable(cgrp) && !atomic_read(&cgrp->count)
4665 && list_empty(&cgrp->children) && !cgroup_has_css_refs(cgrp)) {
81a6a5cd
PM
4666 /* Control Group is currently removeable. If it's not
4667 * already queued for a userspace notification, queue
4668 * it now */
4669 int need_schedule_work = 0;
4670 spin_lock(&release_list_lock);
bd89aabc
PM
4671 if (!cgroup_is_removed(cgrp) &&
4672 list_empty(&cgrp->release_list)) {
4673 list_add(&cgrp->release_list, &release_list);
81a6a5cd
PM
4674 need_schedule_work = 1;
4675 }
4676 spin_unlock(&release_list_lock);
4677 if (need_schedule_work)
4678 schedule_work(&release_agent_work);
4679 }
4680}
4681
d7b9fff7
DN
4682/* Caller must verify that the css is not for root cgroup */
4683void __css_put(struct cgroup_subsys_state *css, int count)
81a6a5cd 4684{
bd89aabc 4685 struct cgroup *cgrp = css->cgroup;
3dece834 4686 int val;
81a6a5cd 4687 rcu_read_lock();
d7b9fff7 4688 val = atomic_sub_return(count, &css->refcnt);
3dece834 4689 if (val == 1) {
ec64f515
KH
4690 if (notify_on_release(cgrp)) {
4691 set_bit(CGRP_RELEASABLE, &cgrp->flags);
4692 check_for_release(cgrp);
4693 }
88703267 4694 cgroup_wakeup_rmdir_waiter(cgrp);
81a6a5cd
PM
4695 }
4696 rcu_read_unlock();
3dece834 4697 WARN_ON_ONCE(val < 1);
81a6a5cd 4698}
67523c48 4699EXPORT_SYMBOL_GPL(__css_put);
81a6a5cd
PM
4700
4701/*
4702 * Notify userspace when a cgroup is released, by running the
4703 * configured release agent with the name of the cgroup (path
4704 * relative to the root of cgroup file system) as the argument.
4705 *
4706 * Most likely, this user command will try to rmdir this cgroup.
4707 *
4708 * This races with the possibility that some other task will be
4709 * attached to this cgroup before it is removed, or that some other
4710 * user task will 'mkdir' a child cgroup of this cgroup. That's ok.
4711 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
4712 * unused, and this cgroup will be reprieved from its death sentence,
4713 * to continue to serve a useful existence. Next time it's released,
4714 * we will get notified again, if it still has 'notify_on_release' set.
4715 *
4716 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
4717 * means only wait until the task is successfully execve()'d. The
4718 * separate release agent task is forked by call_usermodehelper(),
4719 * then control in this thread returns here, without waiting for the
4720 * release agent task. We don't bother to wait because the caller of
4721 * this routine has no use for the exit status of the release agent
4722 * task, so no sense holding our caller up for that.
81a6a5cd 4723 */
81a6a5cd
PM
4724static void cgroup_release_agent(struct work_struct *work)
4725{
4726 BUG_ON(work != &release_agent_work);
4727 mutex_lock(&cgroup_mutex);
4728 spin_lock(&release_list_lock);
4729 while (!list_empty(&release_list)) {
4730 char *argv[3], *envp[3];
4731 int i;
e788e066 4732 char *pathbuf = NULL, *agentbuf = NULL;
bd89aabc 4733 struct cgroup *cgrp = list_entry(release_list.next,
81a6a5cd
PM
4734 struct cgroup,
4735 release_list);
bd89aabc 4736 list_del_init(&cgrp->release_list);
81a6a5cd
PM
4737 spin_unlock(&release_list_lock);
4738 pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
e788e066
PM
4739 if (!pathbuf)
4740 goto continue_free;
4741 if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0)
4742 goto continue_free;
4743 agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
4744 if (!agentbuf)
4745 goto continue_free;
81a6a5cd
PM
4746
4747 i = 0;
e788e066
PM
4748 argv[i++] = agentbuf;
4749 argv[i++] = pathbuf;
81a6a5cd
PM
4750 argv[i] = NULL;
4751
4752 i = 0;
4753 /* minimal command environment */
4754 envp[i++] = "HOME=/";
4755 envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
4756 envp[i] = NULL;
4757
4758 /* Drop the lock while we invoke the usermode helper,
4759 * since the exec could involve hitting disk and hence
4760 * be a slow process */
4761 mutex_unlock(&cgroup_mutex);
4762 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
81a6a5cd 4763 mutex_lock(&cgroup_mutex);
e788e066
PM
4764 continue_free:
4765 kfree(pathbuf);
4766 kfree(agentbuf);
81a6a5cd
PM
4767 spin_lock(&release_list_lock);
4768 }
4769 spin_unlock(&release_list_lock);
4770 mutex_unlock(&cgroup_mutex);
4771}
8bab8dde
PM
4772
4773static int __init cgroup_disable(char *str)
4774{
4775 int i;
4776 char *token;
4777
4778 while ((token = strsep(&str, ",")) != NULL) {
4779 if (!*token)
4780 continue;
aae8aab4
BB
4781 /*
4782 * cgroup_disable, being at boot time, can't know about module
4783 * subsystems, so we don't worry about them.
4784 */
4785 for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
8bab8dde
PM
4786 struct cgroup_subsys *ss = subsys[i];
4787
4788 if (!strcmp(token, ss->name)) {
4789 ss->disabled = 1;
4790 printk(KERN_INFO "Disabling %s control group"
4791 " subsystem\n", ss->name);
4792 break;
4793 }
4794 }
4795 }
4796 return 1;
4797}
4798__setup("cgroup_disable=", cgroup_disable);
38460b48
KH
4799
4800/*
4801 * Functons for CSS ID.
4802 */
4803
4804/*
4805 *To get ID other than 0, this should be called when !cgroup_is_removed().
4806 */
4807unsigned short css_id(struct cgroup_subsys_state *css)
4808{
7f0f1546
KH
4809 struct css_id *cssid;
4810
4811 /*
4812 * This css_id() can return correct value when somone has refcnt
4813 * on this or this is under rcu_read_lock(). Once css->id is allocated,
4814 * it's unchanged until freed.
4815 */
4816 cssid = rcu_dereference_check(css->id,
4817 rcu_read_lock_held() || atomic_read(&css->refcnt));
38460b48
KH
4818
4819 if (cssid)
4820 return cssid->id;
4821 return 0;
4822}
67523c48 4823EXPORT_SYMBOL_GPL(css_id);
38460b48
KH
4824
4825unsigned short css_depth(struct cgroup_subsys_state *css)
4826{
7f0f1546
KH
4827 struct css_id *cssid;
4828
4829 cssid = rcu_dereference_check(css->id,
4830 rcu_read_lock_held() || atomic_read(&css->refcnt));
38460b48
KH
4831
4832 if (cssid)
4833 return cssid->depth;
4834 return 0;
4835}
67523c48 4836EXPORT_SYMBOL_GPL(css_depth);
38460b48 4837
747388d7
KH
4838/**
4839 * css_is_ancestor - test "root" css is an ancestor of "child"
4840 * @child: the css to be tested.
4841 * @root: the css supporsed to be an ancestor of the child.
4842 *
4843 * Returns true if "root" is an ancestor of "child" in its hierarchy. Because
4844 * this function reads css->id, this use rcu_dereference() and rcu_read_lock().
4845 * But, considering usual usage, the csses should be valid objects after test.
4846 * Assuming that the caller will do some action to the child if this returns
4847 * returns true, the caller must take "child";s reference count.
4848 * If "child" is valid object and this returns true, "root" is valid, too.
4849 */
4850
38460b48 4851bool css_is_ancestor(struct cgroup_subsys_state *child,
0b7f569e 4852 const struct cgroup_subsys_state *root)
38460b48 4853{
747388d7
KH
4854 struct css_id *child_id;
4855 struct css_id *root_id;
4856 bool ret = true;
38460b48 4857
747388d7
KH
4858 rcu_read_lock();
4859 child_id = rcu_dereference(child->id);
4860 root_id = rcu_dereference(root->id);
4861 if (!child_id
4862 || !root_id
4863 || (child_id->depth < root_id->depth)
4864 || (child_id->stack[root_id->depth] != root_id->id))
4865 ret = false;
4866 rcu_read_unlock();
4867 return ret;
38460b48
KH
4868}
4869
38460b48
KH
4870void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
4871{
4872 struct css_id *id = css->id;
4873 /* When this is called before css_id initialization, id can be NULL */
4874 if (!id)
4875 return;
4876
4877 BUG_ON(!ss->use_id);
4878
4879 rcu_assign_pointer(id->css, NULL);
4880 rcu_assign_pointer(css->id, NULL);
4881 spin_lock(&ss->id_lock);
4882 idr_remove(&ss->idr, id->id);
4883 spin_unlock(&ss->id_lock);
025cea99 4884 kfree_rcu(id, rcu_head);
38460b48 4885}
67523c48 4886EXPORT_SYMBOL_GPL(free_css_id);
38460b48
KH
4887
4888/*
4889 * This is called by init or create(). Then, calls to this function are
4890 * always serialized (By cgroup_mutex() at create()).
4891 */
4892
4893static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
4894{
4895 struct css_id *newid;
4896 int myid, error, size;
4897
4898 BUG_ON(!ss->use_id);
4899
4900 size = sizeof(*newid) + sizeof(unsigned short) * (depth + 1);
4901 newid = kzalloc(size, GFP_KERNEL);
4902 if (!newid)
4903 return ERR_PTR(-ENOMEM);
4904 /* get id */
4905 if (unlikely(!idr_pre_get(&ss->idr, GFP_KERNEL))) {
4906 error = -ENOMEM;
4907 goto err_out;
4908 }
4909 spin_lock(&ss->id_lock);
4910 /* Don't use 0. allocates an ID of 1-65535 */
4911 error = idr_get_new_above(&ss->idr, newid, 1, &myid);
4912 spin_unlock(&ss->id_lock);
4913
4914 /* Returns error when there are no free spaces for new ID.*/
4915 if (error) {
4916 error = -ENOSPC;
4917 goto err_out;
4918 }
4919 if (myid > CSS_ID_MAX)
4920 goto remove_idr;
4921
4922 newid->id = myid;
4923 newid->depth = depth;
4924 return newid;
4925remove_idr:
4926 error = -ENOSPC;
4927 spin_lock(&ss->id_lock);
4928 idr_remove(&ss->idr, myid);
4929 spin_unlock(&ss->id_lock);
4930err_out:
4931 kfree(newid);
4932 return ERR_PTR(error);
4933
4934}
4935
e6a1105b
BB
4936static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
4937 struct cgroup_subsys_state *rootcss)
38460b48
KH
4938{
4939 struct css_id *newid;
38460b48
KH
4940
4941 spin_lock_init(&ss->id_lock);
4942 idr_init(&ss->idr);
4943
38460b48
KH
4944 newid = get_new_cssid(ss, 0);
4945 if (IS_ERR(newid))
4946 return PTR_ERR(newid);
4947
4948 newid->stack[0] = newid->id;
4949 newid->css = rootcss;
4950 rootcss->id = newid;
4951 return 0;
4952}
4953
4954static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
4955 struct cgroup *child)
4956{
4957 int subsys_id, i, depth = 0;
4958 struct cgroup_subsys_state *parent_css, *child_css;
fae9c791 4959 struct css_id *child_id, *parent_id;
38460b48
KH
4960
4961 subsys_id = ss->subsys_id;
4962 parent_css = parent->subsys[subsys_id];
4963 child_css = child->subsys[subsys_id];
38460b48 4964 parent_id = parent_css->id;
94b3dd0f 4965 depth = parent_id->depth + 1;
38460b48
KH
4966
4967 child_id = get_new_cssid(ss, depth);
4968 if (IS_ERR(child_id))
4969 return PTR_ERR(child_id);
4970
4971 for (i = 0; i < depth; i++)
4972 child_id->stack[i] = parent_id->stack[i];
4973 child_id->stack[depth] = child_id->id;
4974 /*
4975 * child_id->css pointer will be set after this cgroup is available
4976 * see cgroup_populate_dir()
4977 */
4978 rcu_assign_pointer(child_css->id, child_id);
4979
4980 return 0;
4981}
4982
4983/**
4984 * css_lookup - lookup css by id
4985 * @ss: cgroup subsys to be looked into.
4986 * @id: the id
4987 *
4988 * Returns pointer to cgroup_subsys_state if there is valid one with id.
4989 * NULL if not. Should be called under rcu_read_lock()
4990 */
4991struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id)
4992{
4993 struct css_id *cssid = NULL;
4994
4995 BUG_ON(!ss->use_id);
4996 cssid = idr_find(&ss->idr, id);
4997
4998 if (unlikely(!cssid))
4999 return NULL;
5000
5001 return rcu_dereference(cssid->css);
5002}
67523c48 5003EXPORT_SYMBOL_GPL(css_lookup);
38460b48
KH
5004
5005/**
5006 * css_get_next - lookup next cgroup under specified hierarchy.
5007 * @ss: pointer to subsystem
5008 * @id: current position of iteration.
5009 * @root: pointer to css. search tree under this.
5010 * @foundid: position of found object.
5011 *
5012 * Search next css under the specified hierarchy of rootid. Calling under
5013 * rcu_read_lock() is necessary. Returns NULL if it reaches the end.
5014 */
5015struct cgroup_subsys_state *
5016css_get_next(struct cgroup_subsys *ss, int id,
5017 struct cgroup_subsys_state *root, int *foundid)
5018{
5019 struct cgroup_subsys_state *ret = NULL;
5020 struct css_id *tmp;
5021 int tmpid;
5022 int rootid = css_id(root);
5023 int depth = css_depth(root);
5024
5025 if (!rootid)
5026 return NULL;
5027
5028 BUG_ON(!ss->use_id);
5029 /* fill start point for scan */
5030 tmpid = id;
5031 while (1) {
5032 /*
5033 * scan next entry from bitmap(tree), tmpid is updated after
5034 * idr_get_next().
5035 */
5036 spin_lock(&ss->id_lock);
5037 tmp = idr_get_next(&ss->idr, &tmpid);
5038 spin_unlock(&ss->id_lock);
5039
5040 if (!tmp)
5041 break;
5042 if (tmp->depth >= depth && tmp->stack[depth] == rootid) {
5043 ret = rcu_dereference(tmp->css);
5044 if (ret) {
5045 *foundid = tmpid;
5046 break;
5047 }
5048 }
5049 /* continue to scan from next id */
5050 tmpid = tmpid + 1;
5051 }
5052 return ret;
5053}
5054
e5d1367f
SE
5055/*
5056 * get corresponding css from file open on cgroupfs directory
5057 */
5058struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id)
5059{
5060 struct cgroup *cgrp;
5061 struct inode *inode;
5062 struct cgroup_subsys_state *css;
5063
5064 inode = f->f_dentry->d_inode;
5065 /* check in cgroup filesystem dir */
5066 if (inode->i_op != &cgroup_dir_inode_operations)
5067 return ERR_PTR(-EBADF);
5068
5069 if (id < 0 || id >= CGROUP_SUBSYS_COUNT)
5070 return ERR_PTR(-EINVAL);
5071
5072 /* get cgroup */
5073 cgrp = __d_cgrp(f->f_dentry);
5074 css = cgrp->subsys[id];
5075 return css ? css : ERR_PTR(-ENOENT);
5076}
5077
fe693435
PM
5078#ifdef CONFIG_CGROUP_DEBUG
5079static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss,
5080 struct cgroup *cont)
5081{
5082 struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
5083
5084 if (!css)
5085 return ERR_PTR(-ENOMEM);
5086
5087 return css;
5088}
5089
5090static void debug_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
5091{
5092 kfree(cont->subsys[debug_subsys_id]);
5093}
5094
5095static u64 cgroup_refcount_read(struct cgroup *cont, struct cftype *cft)
5096{
5097 return atomic_read(&cont->count);
5098}
5099
5100static u64 debug_taskcount_read(struct cgroup *cont, struct cftype *cft)
5101{
5102 return cgroup_task_count(cont);
5103}
5104
5105static u64 current_css_set_read(struct cgroup *cont, struct cftype *cft)
5106{
5107 return (u64)(unsigned long)current->cgroups;
5108}
5109
5110static u64 current_css_set_refcount_read(struct cgroup *cont,
5111 struct cftype *cft)
5112{
5113 u64 count;
5114
5115 rcu_read_lock();
5116 count = atomic_read(&current->cgroups->refcount);
5117 rcu_read_unlock();
5118 return count;
5119}
5120
7717f7ba
PM
5121static int current_css_set_cg_links_read(struct cgroup *cont,
5122 struct cftype *cft,
5123 struct seq_file *seq)
5124{
5125 struct cg_cgroup_link *link;
5126 struct css_set *cg;
5127
5128 read_lock(&css_set_lock);
5129 rcu_read_lock();
5130 cg = rcu_dereference(current->cgroups);
5131 list_for_each_entry(link, &cg->cg_links, cg_link_list) {
5132 struct cgroup *c = link->cgrp;
5133 const char *name;
5134
5135 if (c->dentry)
5136 name = c->dentry->d_name.name;
5137 else
5138 name = "?";
2c6ab6d2
PM
5139 seq_printf(seq, "Root %d group %s\n",
5140 c->root->hierarchy_id, name);
7717f7ba
PM
5141 }
5142 rcu_read_unlock();
5143 read_unlock(&css_set_lock);
5144 return 0;
5145}
5146
5147#define MAX_TASKS_SHOWN_PER_CSS 25
5148static int cgroup_css_links_read(struct cgroup *cont,
5149 struct cftype *cft,
5150 struct seq_file *seq)
5151{
5152 struct cg_cgroup_link *link;
5153
5154 read_lock(&css_set_lock);
5155 list_for_each_entry(link, &cont->css_sets, cgrp_link_list) {
5156 struct css_set *cg = link->cg;
5157 struct task_struct *task;
5158 int count = 0;
5159 seq_printf(seq, "css_set %p\n", cg);
5160 list_for_each_entry(task, &cg->tasks, cg_list) {
5161 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
5162 seq_puts(seq, " ...\n");
5163 break;
5164 } else {
5165 seq_printf(seq, " task %d\n",
5166 task_pid_vnr(task));
5167 }
5168 }
5169 }
5170 read_unlock(&css_set_lock);
5171 return 0;
5172}
5173
fe693435
PM
5174static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft)
5175{
5176 return test_bit(CGRP_RELEASABLE, &cgrp->flags);
5177}
5178
5179static struct cftype debug_files[] = {
5180 {
5181 .name = "cgroup_refcount",
5182 .read_u64 = cgroup_refcount_read,
5183 },
5184 {
5185 .name = "taskcount",
5186 .read_u64 = debug_taskcount_read,
5187 },
5188
5189 {
5190 .name = "current_css_set",
5191 .read_u64 = current_css_set_read,
5192 },
5193
5194 {
5195 .name = "current_css_set_refcount",
5196 .read_u64 = current_css_set_refcount_read,
5197 },
5198
7717f7ba
PM
5199 {
5200 .name = "current_css_set_cg_links",
5201 .read_seq_string = current_css_set_cg_links_read,
5202 },
5203
5204 {
5205 .name = "cgroup_css_links",
5206 .read_seq_string = cgroup_css_links_read,
5207 },
5208
fe693435
PM
5209 {
5210 .name = "releasable",
5211 .read_u64 = releasable_read,
5212 },
5213};
5214
5215static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont)
5216{
5217 return cgroup_add_files(cont, ss, debug_files,
5218 ARRAY_SIZE(debug_files));
5219}
5220
5221struct cgroup_subsys debug_subsys = {
5222 .name = "debug",
5223 .create = debug_create,
5224 .destroy = debug_destroy,
5225 .populate = debug_populate,
5226 .subsys_id = debug_subsys_id,
5227};
5228#endif /* CONFIG_CGROUP_DEBUG */
This page took 0.582073 seconds and 5 git commands to generate.