writeback: make backing_dev_info host cgroup-specific bdi_writebacks
[deliverable/linux.git] / block / blk-cgroup.c
CommitLineData
31e4c28d
VG
1/*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13#include <linux/ioprio.h>
22084190 14#include <linux/kdev_t.h>
9d6a986c 15#include <linux/module.h>
accee785 16#include <linux/err.h>
9195291e 17#include <linux/blkdev.h>
52ebea74 18#include <linux/backing-dev.h>
5a0e3ad6 19#include <linux/slab.h>
34d0f179 20#include <linux/genhd.h>
72e06c25 21#include <linux/delay.h>
9a9e8a26 22#include <linux/atomic.h>
eea8f41c 23#include <linux/blk-cgroup.h>
5efd6113 24#include "blk.h"
3e252066 25
84c124da
DS
26#define MAX_KEY_LEN 100
27
bc0d6501 28static DEFINE_MUTEX(blkcg_pol_mutex);
923adde1 29
e71357e1
TH
30struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT,
31 .cfq_leaf_weight = 2 * CFQ_WEIGHT_DEFAULT, };
3c798398 32EXPORT_SYMBOL_GPL(blkcg_root);
9d6a986c 33
496d5e75
TH
34struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
35
3c798398 36static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
035d10b2 37
a2b1693b 38static bool blkcg_policy_enabled(struct request_queue *q,
3c798398 39 const struct blkcg_policy *pol)
a2b1693b
TH
40{
41 return pol && test_bit(pol->plid, q->blkcg_pols);
42}
43
0381411e
TH
44/**
45 * blkg_free - free a blkg
46 * @blkg: blkg to free
47 *
48 * Free @blkg which may be partially allocated.
49 */
3c798398 50static void blkg_free(struct blkcg_gq *blkg)
0381411e 51{
e8989fae 52 int i;
549d3aa8
TH
53
54 if (!blkg)
55 return;
56
db613670
TH
57 for (i = 0; i < BLKCG_MAX_POLS; i++)
58 kfree(blkg->pd[i]);
e8989fae 59
a051661c 60 blk_exit_rl(&blkg->rl);
549d3aa8 61 kfree(blkg);
0381411e
TH
62}
63
64/**
65 * blkg_alloc - allocate a blkg
66 * @blkcg: block cgroup the new blkg is associated with
67 * @q: request_queue the new blkg is associated with
15974993 68 * @gfp_mask: allocation mask to use
0381411e 69 *
e8989fae 70 * Allocate a new blkg assocating @blkcg and @q.
0381411e 71 */
15974993
TH
72static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
73 gfp_t gfp_mask)
0381411e 74{
3c798398 75 struct blkcg_gq *blkg;
e8989fae 76 int i;
0381411e
TH
77
78 /* alloc and init base part */
15974993 79 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node);
0381411e
TH
80 if (!blkg)
81 return NULL;
82
c875f4d0 83 blkg->q = q;
e8989fae 84 INIT_LIST_HEAD(&blkg->q_node);
0381411e 85 blkg->blkcg = blkcg;
a5049a8a 86 atomic_set(&blkg->refcnt, 1);
0381411e 87
a051661c
TH
88 /* root blkg uses @q->root_rl, init rl only for !root blkgs */
89 if (blkcg != &blkcg_root) {
90 if (blk_init_rl(&blkg->rl, q, gfp_mask))
91 goto err_free;
92 blkg->rl.blkg = blkg;
93 }
94
8bd435b3 95 for (i = 0; i < BLKCG_MAX_POLS; i++) {
3c798398 96 struct blkcg_policy *pol = blkcg_policy[i];
e8989fae 97 struct blkg_policy_data *pd;
0381411e 98
a2b1693b 99 if (!blkcg_policy_enabled(q, pol))
e8989fae
TH
100 continue;
101
102 /* alloc per-policy data and attach it to blkg */
15974993 103 pd = kzalloc_node(pol->pd_size, gfp_mask, q->node);
a051661c
TH
104 if (!pd)
105 goto err_free;
549d3aa8 106
e8989fae
TH
107 blkg->pd[i] = pd;
108 pd->blkg = blkg;
b276a876 109 pd->plid = i;
e8989fae
TH
110 }
111
0381411e 112 return blkg;
a051661c
TH
113
114err_free:
115 blkg_free(blkg);
116 return NULL;
0381411e
TH
117}
118
16b3de66
TH
119/**
120 * __blkg_lookup - internal version of blkg_lookup()
121 * @blkcg: blkcg of interest
122 * @q: request_queue of interest
123 * @update_hint: whether to update lookup hint with the result or not
124 *
125 * This is internal version and shouldn't be used by policy
126 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
127 * @q's bypass state. If @update_hint is %true, the caller should be
128 * holding @q->queue_lock and lookup hint is updated on success.
129 */
dd4a4ffc
TH
130struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
131 bool update_hint)
80fd9979 132{
3c798398 133 struct blkcg_gq *blkg;
80fd9979 134
a637120e
TH
135 blkg = rcu_dereference(blkcg->blkg_hint);
136 if (blkg && blkg->q == q)
137 return blkg;
138
139 /*
86cde6b6
TH
140 * Hint didn't match. Look up from the radix tree. Note that the
141 * hint can only be updated under queue_lock as otherwise @blkg
142 * could have already been removed from blkg_tree. The caller is
143 * responsible for grabbing queue_lock if @update_hint.
a637120e
TH
144 */
145 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
86cde6b6
TH
146 if (blkg && blkg->q == q) {
147 if (update_hint) {
148 lockdep_assert_held(q->queue_lock);
149 rcu_assign_pointer(blkcg->blkg_hint, blkg);
150 }
a637120e 151 return blkg;
86cde6b6 152 }
a637120e 153
80fd9979
TH
154 return NULL;
155}
156
157/**
158 * blkg_lookup - lookup blkg for the specified blkcg - q pair
159 * @blkcg: blkcg of interest
160 * @q: request_queue of interest
161 *
162 * Lookup blkg for the @blkcg - @q pair. This function should be called
163 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
164 * - see blk_queue_bypass_start() for details.
165 */
3c798398 166struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
80fd9979
TH
167{
168 WARN_ON_ONCE(!rcu_read_lock_held());
169
170 if (unlikely(blk_queue_bypass(q)))
171 return NULL;
86cde6b6 172 return __blkg_lookup(blkcg, q, false);
80fd9979
TH
173}
174EXPORT_SYMBOL_GPL(blkg_lookup);
175
15974993
TH
176/*
177 * If @new_blkg is %NULL, this function tries to allocate a new one as
178 * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return.
179 */
86cde6b6
TH
180static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
181 struct request_queue *q,
182 struct blkcg_gq *new_blkg)
5624a4e4 183{
3c798398 184 struct blkcg_gq *blkg;
f427d909 185 int i, ret;
5624a4e4 186
cd1604fa
TH
187 WARN_ON_ONCE(!rcu_read_lock_held());
188 lockdep_assert_held(q->queue_lock);
189
7ee9c562 190 /* blkg holds a reference to blkcg */
ec903c0c 191 if (!css_tryget_online(&blkcg->css)) {
93e6d5d8
TH
192 ret = -EINVAL;
193 goto err_free_blkg;
15974993 194 }
cd1604fa 195
496fb780 196 /* allocate */
15974993
TH
197 if (!new_blkg) {
198 new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC);
199 if (unlikely(!new_blkg)) {
93e6d5d8
TH
200 ret = -ENOMEM;
201 goto err_put_css;
15974993
TH
202 }
203 }
204 blkg = new_blkg;
cd1604fa 205
db613670 206 /* link parent */
3c547865
TH
207 if (blkcg_parent(blkcg)) {
208 blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false);
209 if (WARN_ON_ONCE(!blkg->parent)) {
2423c9c3 210 ret = -EINVAL;
3c547865
TH
211 goto err_put_css;
212 }
213 blkg_get(blkg->parent);
214 }
215
db613670
TH
216 /* invoke per-policy init */
217 for (i = 0; i < BLKCG_MAX_POLS; i++) {
218 struct blkcg_policy *pol = blkcg_policy[i];
219
220 if (blkg->pd[i] && pol->pd_init_fn)
221 pol->pd_init_fn(blkg);
222 }
223
224 /* insert */
cd1604fa 225 spin_lock(&blkcg->lock);
a637120e
TH
226 ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg);
227 if (likely(!ret)) {
228 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
229 list_add(&blkg->q_node, &q->blkg_list);
f427d909
TH
230
231 for (i = 0; i < BLKCG_MAX_POLS; i++) {
232 struct blkcg_policy *pol = blkcg_policy[i];
233
234 if (blkg->pd[i] && pol->pd_online_fn)
235 pol->pd_online_fn(blkg);
236 }
a637120e 237 }
f427d909 238 blkg->online = true;
cd1604fa 239 spin_unlock(&blkcg->lock);
496fb780 240
ec13b1d6 241 if (!ret)
a637120e 242 return blkg;
15974993 243
3c547865
TH
244 /* @blkg failed fully initialized, use the usual release path */
245 blkg_put(blkg);
246 return ERR_PTR(ret);
247
93e6d5d8 248err_put_css:
496fb780 249 css_put(&blkcg->css);
93e6d5d8 250err_free_blkg:
15974993 251 blkg_free(new_blkg);
93e6d5d8 252 return ERR_PTR(ret);
31e4c28d 253}
3c96cb32 254
86cde6b6
TH
255/**
256 * blkg_lookup_create - lookup blkg, try to create one if not there
257 * @blkcg: blkcg of interest
258 * @q: request_queue of interest
259 *
260 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
3c547865
TH
261 * create one. blkg creation is performed recursively from blkcg_root such
262 * that all non-root blkg's have access to the parent blkg. This function
263 * should be called under RCU read lock and @q->queue_lock.
86cde6b6
TH
264 *
265 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
266 * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
267 * dead and bypassing, returns ERR_PTR(-EBUSY).
268 */
3c798398
TH
269struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
270 struct request_queue *q)
3c96cb32 271{
86cde6b6
TH
272 struct blkcg_gq *blkg;
273
274 WARN_ON_ONCE(!rcu_read_lock_held());
275 lockdep_assert_held(q->queue_lock);
276
3c96cb32
TH
277 /*
278 * This could be the first entry point of blkcg implementation and
279 * we shouldn't allow anything to go through for a bypassing queue.
280 */
281 if (unlikely(blk_queue_bypass(q)))
3f3299d5 282 return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
86cde6b6
TH
283
284 blkg = __blkg_lookup(blkcg, q, true);
285 if (blkg)
286 return blkg;
287
3c547865
TH
288 /*
289 * Create blkgs walking down from blkcg_root to @blkcg, so that all
290 * non-root blkgs have access to their parents.
291 */
292 while (true) {
293 struct blkcg *pos = blkcg;
294 struct blkcg *parent = blkcg_parent(blkcg);
295
296 while (parent && !__blkg_lookup(parent, q, false)) {
297 pos = parent;
298 parent = blkcg_parent(parent);
299 }
300
301 blkg = blkg_create(pos, q, NULL);
302 if (pos == blkcg || IS_ERR(blkg))
303 return blkg;
304 }
3c96cb32 305}
cd1604fa 306EXPORT_SYMBOL_GPL(blkg_lookup_create);
31e4c28d 307
3c798398 308static void blkg_destroy(struct blkcg_gq *blkg)
03aa264a 309{
3c798398 310 struct blkcg *blkcg = blkg->blkcg;
f427d909 311 int i;
03aa264a 312
27e1f9d1 313 lockdep_assert_held(blkg->q->queue_lock);
9f13ef67 314 lockdep_assert_held(&blkcg->lock);
03aa264a
TH
315
316 /* Something wrong if we are trying to remove same group twice */
e8989fae 317 WARN_ON_ONCE(list_empty(&blkg->q_node));
9f13ef67 318 WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
a637120e 319
f427d909
TH
320 for (i = 0; i < BLKCG_MAX_POLS; i++) {
321 struct blkcg_policy *pol = blkcg_policy[i];
322
323 if (blkg->pd[i] && pol->pd_offline_fn)
324 pol->pd_offline_fn(blkg);
325 }
326 blkg->online = false;
327
a637120e 328 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
e8989fae 329 list_del_init(&blkg->q_node);
9f13ef67 330 hlist_del_init_rcu(&blkg->blkcg_node);
03aa264a 331
a637120e
TH
332 /*
333 * Both setting lookup hint to and clearing it from @blkg are done
334 * under queue_lock. If it's not pointing to @blkg now, it never
335 * will. Hint assignment itself can race safely.
336 */
ec6c676a 337 if (rcu_access_pointer(blkcg->blkg_hint) == blkg)
a637120e
TH
338 rcu_assign_pointer(blkcg->blkg_hint, NULL);
339
03aa264a
TH
340 /*
341 * Put the reference taken at the time of creation so that when all
342 * queues are gone, group can be destroyed.
343 */
344 blkg_put(blkg);
345}
346
9f13ef67
TH
347/**
348 * blkg_destroy_all - destroy all blkgs associated with a request_queue
349 * @q: request_queue of interest
9f13ef67 350 *
3c96cb32 351 * Destroy all blkgs associated with @q.
9f13ef67 352 */
3c96cb32 353static void blkg_destroy_all(struct request_queue *q)
72e06c25 354{
3c798398 355 struct blkcg_gq *blkg, *n;
72e06c25 356
6d18b008 357 lockdep_assert_held(q->queue_lock);
72e06c25 358
9f13ef67 359 list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
3c798398 360 struct blkcg *blkcg = blkg->blkcg;
72e06c25 361
9f13ef67
TH
362 spin_lock(&blkcg->lock);
363 blkg_destroy(blkg);
364 spin_unlock(&blkcg->lock);
72e06c25
TH
365 }
366}
367
2a4fd070
TH
368/*
369 * A group is RCU protected, but having an rcu lock does not mean that one
370 * can access all the fields of blkg and assume these are valid. For
371 * example, don't try to follow throtl_data and request queue links.
372 *
373 * Having a reference to blkg under an rcu allows accesses to only values
374 * local to groups like group stats and group rate limits.
375 */
376void __blkg_release_rcu(struct rcu_head *rcu_head)
1adaf3dd 377{
2a4fd070 378 struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head);
db613670
TH
379 int i;
380
381 /* tell policies that this one is being freed */
382 for (i = 0; i < BLKCG_MAX_POLS; i++) {
383 struct blkcg_policy *pol = blkcg_policy[i];
384
385 if (blkg->pd[i] && pol->pd_exit_fn)
386 pol->pd_exit_fn(blkg);
387 }
388
3c547865 389 /* release the blkcg and parent blkg refs this blkg has been holding */
1adaf3dd 390 css_put(&blkg->blkcg->css);
a5049a8a 391 if (blkg->parent)
3c547865 392 blkg_put(blkg->parent);
1adaf3dd 393
2a4fd070 394 blkg_free(blkg);
1adaf3dd 395}
2a4fd070 396EXPORT_SYMBOL_GPL(__blkg_release_rcu);
1adaf3dd 397
a051661c
TH
398/*
399 * The next function used by blk_queue_for_each_rl(). It's a bit tricky
400 * because the root blkg uses @q->root_rl instead of its own rl.
401 */
402struct request_list *__blk_queue_next_rl(struct request_list *rl,
403 struct request_queue *q)
404{
405 struct list_head *ent;
406 struct blkcg_gq *blkg;
407
408 /*
409 * Determine the current blkg list_head. The first entry is
410 * root_rl which is off @q->blkg_list and mapped to the head.
411 */
412 if (rl == &q->root_rl) {
413 ent = &q->blkg_list;
65c77fd9
JN
414 /* There are no more block groups, hence no request lists */
415 if (list_empty(ent))
416 return NULL;
a051661c
TH
417 } else {
418 blkg = container_of(rl, struct blkcg_gq, rl);
419 ent = &blkg->q_node;
420 }
421
422 /* walk to the next list_head, skip root blkcg */
423 ent = ent->next;
424 if (ent == &q->root_blkg->q_node)
425 ent = ent->next;
426 if (ent == &q->blkg_list)
427 return NULL;
428
429 blkg = container_of(ent, struct blkcg_gq, q_node);
430 return &blkg->rl;
431}
432
182446d0
TH
433static int blkcg_reset_stats(struct cgroup_subsys_state *css,
434 struct cftype *cftype, u64 val)
303a3acb 435{
182446d0 436 struct blkcg *blkcg = css_to_blkcg(css);
3c798398 437 struct blkcg_gq *blkg;
bc0d6501 438 int i;
303a3acb 439
36c38fb7
TH
440 /*
441 * XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex
442 * which ends up putting cgroup's internal cgroup_tree_mutex under
443 * it; however, cgroup_tree_mutex is nested above cgroup file
444 * active protection and grabbing blkcg_pol_mutex from a cgroup
445 * file operation creates a possible circular dependency. cgroup
446 * internal locking is planned to go through further simplification
447 * and this issue should go away soon. For now, let's trylock
448 * blkcg_pol_mutex and restart the write on failure.
449 *
450 * http://lkml.kernel.org/g/5363C04B.4010400@oracle.com
451 */
452 if (!mutex_trylock(&blkcg_pol_mutex))
453 return restart_syscall();
303a3acb 454 spin_lock_irq(&blkcg->lock);
997a026c
TH
455
456 /*
457 * Note that stat reset is racy - it doesn't synchronize against
458 * stat updates. This is a debug feature which shouldn't exist
459 * anyway. If you get hit by a race, retry.
460 */
b67bfe0d 461 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
8bd435b3 462 for (i = 0; i < BLKCG_MAX_POLS; i++) {
3c798398 463 struct blkcg_policy *pol = blkcg_policy[i];
549d3aa8 464
a2b1693b 465 if (blkcg_policy_enabled(blkg->q, pol) &&
f9fcc2d3
TH
466 pol->pd_reset_stats_fn)
467 pol->pd_reset_stats_fn(blkg);
bc0d6501 468 }
303a3acb 469 }
f0bdc8cd 470
303a3acb 471 spin_unlock_irq(&blkcg->lock);
bc0d6501 472 mutex_unlock(&blkcg_pol_mutex);
303a3acb
DS
473 return 0;
474}
475
3c798398 476static const char *blkg_dev_name(struct blkcg_gq *blkg)
303a3acb 477{
d3d32e69
TH
478 /* some drivers (floppy) instantiate a queue w/o disk registered */
479 if (blkg->q->backing_dev_info.dev)
480 return dev_name(blkg->q->backing_dev_info.dev);
481 return NULL;
303a3acb
DS
482}
483
d3d32e69
TH
484/**
485 * blkcg_print_blkgs - helper for printing per-blkg data
486 * @sf: seq_file to print to
487 * @blkcg: blkcg of interest
488 * @prfill: fill function to print out a blkg
489 * @pol: policy in question
490 * @data: data to be passed to @prfill
491 * @show_total: to print out sum of prfill return values or not
492 *
493 * This function invokes @prfill on each blkg of @blkcg if pd for the
494 * policy specified by @pol exists. @prfill is invoked with @sf, the
810ecfa7
TH
495 * policy data and @data and the matching queue lock held. If @show_total
496 * is %true, the sum of the return values from @prfill is printed with
497 * "Total" label at the end.
d3d32e69
TH
498 *
499 * This is to be used to construct print functions for
500 * cftype->read_seq_string method.
501 */
3c798398 502void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
f95a04af
TH
503 u64 (*prfill)(struct seq_file *,
504 struct blkg_policy_data *, int),
3c798398 505 const struct blkcg_policy *pol, int data,
ec399347 506 bool show_total)
5624a4e4 507{
3c798398 508 struct blkcg_gq *blkg;
d3d32e69 509 u64 total = 0;
5624a4e4 510
810ecfa7 511 rcu_read_lock();
ee89f812 512 hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
810ecfa7 513 spin_lock_irq(blkg->q->queue_lock);
a2b1693b 514 if (blkcg_policy_enabled(blkg->q, pol))
f95a04af 515 total += prfill(sf, blkg->pd[pol->plid], data);
810ecfa7
TH
516 spin_unlock_irq(blkg->q->queue_lock);
517 }
518 rcu_read_unlock();
d3d32e69
TH
519
520 if (show_total)
521 seq_printf(sf, "Total %llu\n", (unsigned long long)total);
522}
829fdb50 523EXPORT_SYMBOL_GPL(blkcg_print_blkgs);
d3d32e69
TH
524
525/**
526 * __blkg_prfill_u64 - prfill helper for a single u64 value
527 * @sf: seq_file to print to
f95a04af 528 * @pd: policy private data of interest
d3d32e69
TH
529 * @v: value to print
530 *
f95a04af 531 * Print @v to @sf for the device assocaited with @pd.
d3d32e69 532 */
f95a04af 533u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
d3d32e69 534{
f95a04af 535 const char *dname = blkg_dev_name(pd->blkg);
d3d32e69
TH
536
537 if (!dname)
538 return 0;
539
540 seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v);
541 return v;
542}
829fdb50 543EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
d3d32e69
TH
544
545/**
546 * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat
547 * @sf: seq_file to print to
f95a04af 548 * @pd: policy private data of interest
d3d32e69
TH
549 * @rwstat: rwstat to print
550 *
f95a04af 551 * Print @rwstat to @sf for the device assocaited with @pd.
d3d32e69 552 */
f95a04af 553u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
829fdb50 554 const struct blkg_rwstat *rwstat)
d3d32e69
TH
555{
556 static const char *rwstr[] = {
557 [BLKG_RWSTAT_READ] = "Read",
558 [BLKG_RWSTAT_WRITE] = "Write",
559 [BLKG_RWSTAT_SYNC] = "Sync",
560 [BLKG_RWSTAT_ASYNC] = "Async",
561 };
f95a04af 562 const char *dname = blkg_dev_name(pd->blkg);
d3d32e69
TH
563 u64 v;
564 int i;
565
566 if (!dname)
567 return 0;
568
569 for (i = 0; i < BLKG_RWSTAT_NR; i++)
570 seq_printf(sf, "%s %s %llu\n", dname, rwstr[i],
571 (unsigned long long)rwstat->cnt[i]);
572
573 v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE];
574 seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v);
575 return v;
576}
b50da39f 577EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat);
d3d32e69 578
5bc4afb1
TH
579/**
580 * blkg_prfill_stat - prfill callback for blkg_stat
581 * @sf: seq_file to print to
f95a04af
TH
582 * @pd: policy private data of interest
583 * @off: offset to the blkg_stat in @pd
5bc4afb1
TH
584 *
585 * prfill callback for printing a blkg_stat.
586 */
f95a04af 587u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off)
d3d32e69 588{
f95a04af 589 return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off));
d3d32e69 590}
5bc4afb1 591EXPORT_SYMBOL_GPL(blkg_prfill_stat);
d3d32e69 592
5bc4afb1
TH
593/**
594 * blkg_prfill_rwstat - prfill callback for blkg_rwstat
595 * @sf: seq_file to print to
f95a04af
TH
596 * @pd: policy private data of interest
597 * @off: offset to the blkg_rwstat in @pd
5bc4afb1
TH
598 *
599 * prfill callback for printing a blkg_rwstat.
600 */
f95a04af
TH
601u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
602 int off)
d3d32e69 603{
f95a04af 604 struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off);
d3d32e69 605
f95a04af 606 return __blkg_prfill_rwstat(sf, pd, &rwstat);
d3d32e69 607}
5bc4afb1 608EXPORT_SYMBOL_GPL(blkg_prfill_rwstat);
d3d32e69 609
16b3de66
TH
610/**
611 * blkg_stat_recursive_sum - collect hierarchical blkg_stat
612 * @pd: policy private data of interest
613 * @off: offset to the blkg_stat in @pd
614 *
615 * Collect the blkg_stat specified by @off from @pd and all its online
616 * descendants and return the sum. The caller must be holding the queue
617 * lock for online tests.
618 */
619u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
620{
621 struct blkcg_policy *pol = blkcg_policy[pd->plid];
622 struct blkcg_gq *pos_blkg;
492eb21b 623 struct cgroup_subsys_state *pos_css;
bd8815a6 624 u64 sum = 0;
16b3de66
TH
625
626 lockdep_assert_held(pd->blkg->q->queue_lock);
627
16b3de66 628 rcu_read_lock();
492eb21b 629 blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
16b3de66
TH
630 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
631 struct blkg_stat *stat = (void *)pos_pd + off;
632
633 if (pos_blkg->online)
634 sum += blkg_stat_read(stat);
635 }
636 rcu_read_unlock();
637
638 return sum;
639}
640EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum);
641
642/**
643 * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat
644 * @pd: policy private data of interest
645 * @off: offset to the blkg_stat in @pd
646 *
647 * Collect the blkg_rwstat specified by @off from @pd and all its online
648 * descendants and return the sum. The caller must be holding the queue
649 * lock for online tests.
650 */
651struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
652 int off)
653{
654 struct blkcg_policy *pol = blkcg_policy[pd->plid];
655 struct blkcg_gq *pos_blkg;
492eb21b 656 struct cgroup_subsys_state *pos_css;
bd8815a6 657 struct blkg_rwstat sum = { };
16b3de66
TH
658 int i;
659
660 lockdep_assert_held(pd->blkg->q->queue_lock);
661
16b3de66 662 rcu_read_lock();
492eb21b 663 blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
16b3de66
TH
664 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
665 struct blkg_rwstat *rwstat = (void *)pos_pd + off;
666 struct blkg_rwstat tmp;
667
668 if (!pos_blkg->online)
669 continue;
670
671 tmp = blkg_rwstat_read(rwstat);
672
673 for (i = 0; i < BLKG_RWSTAT_NR; i++)
674 sum.cnt[i] += tmp.cnt[i];
675 }
676 rcu_read_unlock();
677
678 return sum;
679}
680EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
681
3a8b31d3
TH
682/**
683 * blkg_conf_prep - parse and prepare for per-blkg config update
684 * @blkcg: target block cgroup
da8b0662 685 * @pol: target policy
3a8b31d3
TH
686 * @input: input string
687 * @ctx: blkg_conf_ctx to be filled
688 *
689 * Parse per-blkg config update from @input and initialize @ctx with the
690 * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new
da8b0662
TH
691 * value. This function returns with RCU read lock and queue lock held and
692 * must be paired with blkg_conf_finish().
3a8b31d3 693 */
3c798398
TH
694int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
695 const char *input, struct blkg_conf_ctx *ctx)
da8b0662 696 __acquires(rcu) __acquires(disk->queue->queue_lock)
34d0f179 697{
3a8b31d3 698 struct gendisk *disk;
3c798398 699 struct blkcg_gq *blkg;
726fa694
TH
700 unsigned int major, minor;
701 unsigned long long v;
702 int part, ret;
34d0f179 703
726fa694
TH
704 if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3)
705 return -EINVAL;
3a8b31d3 706
726fa694 707 disk = get_gendisk(MKDEV(major, minor), &part);
4bfd482e 708 if (!disk || part)
726fa694 709 return -EINVAL;
e56da7e2
TH
710
711 rcu_read_lock();
4bfd482e 712 spin_lock_irq(disk->queue->queue_lock);
da8b0662 713
a2b1693b 714 if (blkcg_policy_enabled(disk->queue, pol))
3c96cb32 715 blkg = blkg_lookup_create(blkcg, disk->queue);
a2b1693b
TH
716 else
717 blkg = ERR_PTR(-EINVAL);
e56da7e2 718
4bfd482e
TH
719 if (IS_ERR(blkg)) {
720 ret = PTR_ERR(blkg);
3a8b31d3 721 rcu_read_unlock();
da8b0662 722 spin_unlock_irq(disk->queue->queue_lock);
3a8b31d3
TH
723 put_disk(disk);
724 /*
725 * If queue was bypassing, we should retry. Do so after a
726 * short msleep(). It isn't strictly necessary but queue
727 * can be bypassing for some time and it's always nice to
728 * avoid busy looping.
729 */
730 if (ret == -EBUSY) {
731 msleep(10);
732 ret = restart_syscall();
7702e8f4 733 }
726fa694 734 return ret;
062a644d 735 }
3a8b31d3
TH
736
737 ctx->disk = disk;
738 ctx->blkg = blkg;
726fa694
TH
739 ctx->v = v;
740 return 0;
34d0f179 741}
829fdb50 742EXPORT_SYMBOL_GPL(blkg_conf_prep);
34d0f179 743
3a8b31d3
TH
744/**
745 * blkg_conf_finish - finish up per-blkg config update
746 * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep()
747 *
748 * Finish up after per-blkg config update. This function must be paired
749 * with blkg_conf_prep().
750 */
829fdb50 751void blkg_conf_finish(struct blkg_conf_ctx *ctx)
da8b0662 752 __releases(ctx->disk->queue->queue_lock) __releases(rcu)
34d0f179 753{
da8b0662 754 spin_unlock_irq(ctx->disk->queue->queue_lock);
3a8b31d3
TH
755 rcu_read_unlock();
756 put_disk(ctx->disk);
34d0f179 757}
829fdb50 758EXPORT_SYMBOL_GPL(blkg_conf_finish);
34d0f179 759
3c798398 760struct cftype blkcg_files[] = {
84c124da
DS
761 {
762 .name = "reset_stats",
3c798398 763 .write_u64 = blkcg_reset_stats,
22084190 764 },
4baf6e33 765 { } /* terminate */
31e4c28d
VG
766};
767
9f13ef67 768/**
92fb9748 769 * blkcg_css_offline - cgroup css_offline callback
eb95419b 770 * @css: css of interest
9f13ef67 771 *
eb95419b
TH
772 * This function is called when @css is about to go away and responsible
773 * for shooting down all blkgs associated with @css. blkgs should be
9f13ef67
TH
774 * removed while holding both q and blkcg locks. As blkcg lock is nested
775 * inside q lock, this function performs reverse double lock dancing.
776 *
777 * This is the blkcg counterpart of ioc_release_fn().
778 */
eb95419b 779static void blkcg_css_offline(struct cgroup_subsys_state *css)
31e4c28d 780{
eb95419b 781 struct blkcg *blkcg = css_to_blkcg(css);
b1c35769 782
9f13ef67 783 spin_lock_irq(&blkcg->lock);
7ee9c562 784
9f13ef67 785 while (!hlist_empty(&blkcg->blkg_list)) {
3c798398
TH
786 struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
787 struct blkcg_gq, blkcg_node);
c875f4d0 788 struct request_queue *q = blkg->q;
b1c35769 789
9f13ef67
TH
790 if (spin_trylock(q->queue_lock)) {
791 blkg_destroy(blkg);
792 spin_unlock(q->queue_lock);
793 } else {
794 spin_unlock_irq(&blkcg->lock);
9f13ef67 795 cpu_relax();
a5567932 796 spin_lock_irq(&blkcg->lock);
0f3942a3 797 }
9f13ef67 798 }
b1c35769 799
9f13ef67 800 spin_unlock_irq(&blkcg->lock);
52ebea74
TH
801
802 wb_blkcg_offline(blkcg);
7ee9c562
TH
803}
804
eb95419b 805static void blkcg_css_free(struct cgroup_subsys_state *css)
7ee9c562 806{
eb95419b 807 struct blkcg *blkcg = css_to_blkcg(css);
7ee9c562 808
3c798398 809 if (blkcg != &blkcg_root)
67523c48 810 kfree(blkcg);
31e4c28d
VG
811}
812
eb95419b
TH
813static struct cgroup_subsys_state *
814blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
31e4c28d 815{
3c798398 816 struct blkcg *blkcg;
31e4c28d 817
eb95419b 818 if (!parent_css) {
3c798398 819 blkcg = &blkcg_root;
31e4c28d
VG
820 goto done;
821 }
822
31e4c28d
VG
823 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
824 if (!blkcg)
825 return ERR_PTR(-ENOMEM);
826
3381cb8d 827 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
e71357e1 828 blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
31e4c28d
VG
829done:
830 spin_lock_init(&blkcg->lock);
a637120e 831 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
31e4c28d 832 INIT_HLIST_HEAD(&blkcg->blkg_list);
52ebea74
TH
833#ifdef CONFIG_CGROUP_WRITEBACK
834 INIT_LIST_HEAD(&blkcg->cgwb_list);
835#endif
31e4c28d
VG
836 return &blkcg->css;
837}
838
5efd6113
TH
839/**
840 * blkcg_init_queue - initialize blkcg part of request queue
841 * @q: request_queue to initialize
842 *
843 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
844 * part of new request_queue @q.
845 *
846 * RETURNS:
847 * 0 on success, -errno on failure.
848 */
849int blkcg_init_queue(struct request_queue *q)
850{
ec13b1d6
TH
851 struct blkcg_gq *new_blkg, *blkg;
852 bool preloaded;
853 int ret;
854
855 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
856 if (!new_blkg)
857 return -ENOMEM;
858
859 preloaded = !radix_tree_preload(GFP_KERNEL);
5efd6113 860
ec13b1d6
TH
861 /*
862 * Make sure the root blkg exists and count the existing blkgs. As
863 * @q is bypassing at this point, blkg_lookup_create() can't be
864 * used. Open code insertion.
865 */
866 rcu_read_lock();
867 spin_lock_irq(q->queue_lock);
868 blkg = blkg_create(&blkcg_root, q, new_blkg);
869 spin_unlock_irq(q->queue_lock);
870 rcu_read_unlock();
871
872 if (preloaded)
873 radix_tree_preload_end();
874
875 if (IS_ERR(blkg)) {
876 kfree(new_blkg);
877 return PTR_ERR(blkg);
878 }
879
880 q->root_blkg = blkg;
881 q->root_rl.blkg = blkg;
882
883 ret = blk_throtl_init(q);
884 if (ret) {
885 spin_lock_irq(q->queue_lock);
886 blkg_destroy_all(q);
887 spin_unlock_irq(q->queue_lock);
888 }
889 return ret;
5efd6113
TH
890}
891
892/**
893 * blkcg_drain_queue - drain blkcg part of request_queue
894 * @q: request_queue to drain
895 *
896 * Called from blk_drain_queue(). Responsible for draining blkcg part.
897 */
898void blkcg_drain_queue(struct request_queue *q)
899{
900 lockdep_assert_held(q->queue_lock);
901
0b462c89
TH
902 /*
903 * @q could be exiting and already have destroyed all blkgs as
904 * indicated by NULL root_blkg. If so, don't confuse policies.
905 */
906 if (!q->root_blkg)
907 return;
908
5efd6113
TH
909 blk_throtl_drain(q);
910}
911
912/**
913 * blkcg_exit_queue - exit and release blkcg part of request_queue
914 * @q: request_queue being released
915 *
916 * Called from blk_release_queue(). Responsible for exiting blkcg part.
917 */
918void blkcg_exit_queue(struct request_queue *q)
919{
6d18b008 920 spin_lock_irq(q->queue_lock);
3c96cb32 921 blkg_destroy_all(q);
6d18b008
TH
922 spin_unlock_irq(q->queue_lock);
923
5efd6113
TH
924 blk_throtl_exit(q);
925}
926
31e4c28d
VG
927/*
928 * We cannot support shared io contexts, as we have no mean to support
929 * two tasks with the same ioc in two different groups without major rework
930 * of the main cic data structures. For now we allow a task to change
931 * its cgroup only if it's the only owner of its ioc.
932 */
eb95419b
TH
933static int blkcg_can_attach(struct cgroup_subsys_state *css,
934 struct cgroup_taskset *tset)
31e4c28d 935{
bb9d97b6 936 struct task_struct *task;
31e4c28d
VG
937 struct io_context *ioc;
938 int ret = 0;
939
940 /* task_lock() is needed to avoid races with exit_io_context() */
924f0d9a 941 cgroup_taskset_for_each(task, tset) {
bb9d97b6
TH
942 task_lock(task);
943 ioc = task->io_context;
944 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
945 ret = -EINVAL;
946 task_unlock(task);
947 if (ret)
948 break;
949 }
31e4c28d
VG
950 return ret;
951}
952
073219e9 953struct cgroup_subsys blkio_cgrp_subsys = {
92fb9748
TH
954 .css_alloc = blkcg_css_alloc,
955 .css_offline = blkcg_css_offline,
956 .css_free = blkcg_css_free,
3c798398 957 .can_attach = blkcg_can_attach,
5577964e 958 .legacy_cftypes = blkcg_files,
1ced953b
TH
959#ifdef CONFIG_MEMCG
960 /*
961 * This ensures that, if available, memcg is automatically enabled
962 * together on the default hierarchy so that the owner cgroup can
963 * be retrieved from writeback pages.
964 */
965 .depends_on = 1 << memory_cgrp_id,
966#endif
676f7c8f 967};
073219e9 968EXPORT_SYMBOL_GPL(blkio_cgrp_subsys);
676f7c8f 969
a2b1693b
TH
970/**
971 * blkcg_activate_policy - activate a blkcg policy on a request_queue
972 * @q: request_queue of interest
973 * @pol: blkcg policy to activate
974 *
975 * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through
976 * bypass mode to populate its blkgs with policy_data for @pol.
977 *
978 * Activation happens with @q bypassed, so nobody would be accessing blkgs
979 * from IO path. Update of each blkg is protected by both queue and blkcg
980 * locks so that holding either lock and testing blkcg_policy_enabled() is
981 * always enough for dereferencing policy data.
982 *
983 * The caller is responsible for synchronizing [de]activations and policy
984 * [un]registerations. Returns 0 on success, -errno on failure.
985 */
986int blkcg_activate_policy(struct request_queue *q,
3c798398 987 const struct blkcg_policy *pol)
a2b1693b
TH
988{
989 LIST_HEAD(pds);
ec13b1d6 990 struct blkcg_gq *blkg;
a2b1693b
TH
991 struct blkg_policy_data *pd, *n;
992 int cnt = 0, ret;
993
994 if (blkcg_policy_enabled(q, pol))
995 return 0;
996
ec13b1d6 997 /* count and allocate policy_data for all existing blkgs */
a2b1693b 998 blk_queue_bypass_start(q);
a2b1693b 999 spin_lock_irq(q->queue_lock);
a2b1693b
TH
1000 list_for_each_entry(blkg, &q->blkg_list, q_node)
1001 cnt++;
a2b1693b
TH
1002 spin_unlock_irq(q->queue_lock);
1003
a2b1693b 1004 while (cnt--) {
f95a04af 1005 pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
a2b1693b
TH
1006 if (!pd) {
1007 ret = -ENOMEM;
1008 goto out_free;
1009 }
1010 list_add_tail(&pd->alloc_node, &pds);
1011 }
1012
1013 /*
1014 * Install the allocated pds. With @q bypassing, no new blkg
1015 * should have been created while the queue lock was dropped.
1016 */
1017 spin_lock_irq(q->queue_lock);
1018
1019 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1020 if (WARN_ON(list_empty(&pds))) {
1021 /* umm... this shouldn't happen, just abort */
1022 ret = -ENOMEM;
1023 goto out_unlock;
1024 }
1025 pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
1026 list_del_init(&pd->alloc_node);
1027
1028 /* grab blkcg lock too while installing @pd on @blkg */
1029 spin_lock(&blkg->blkcg->lock);
1030
1031 blkg->pd[pol->plid] = pd;
1032 pd->blkg = blkg;
b276a876 1033 pd->plid = pol->plid;
f9fcc2d3 1034 pol->pd_init_fn(blkg);
a2b1693b
TH
1035
1036 spin_unlock(&blkg->blkcg->lock);
1037 }
1038
1039 __set_bit(pol->plid, q->blkcg_pols);
1040 ret = 0;
1041out_unlock:
1042 spin_unlock_irq(q->queue_lock);
1043out_free:
1044 blk_queue_bypass_end(q);
1045 list_for_each_entry_safe(pd, n, &pds, alloc_node)
1046 kfree(pd);
1047 return ret;
1048}
1049EXPORT_SYMBOL_GPL(blkcg_activate_policy);
1050
1051/**
1052 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue
1053 * @q: request_queue of interest
1054 * @pol: blkcg policy to deactivate
1055 *
1056 * Deactivate @pol on @q. Follows the same synchronization rules as
1057 * blkcg_activate_policy().
1058 */
1059void blkcg_deactivate_policy(struct request_queue *q,
3c798398 1060 const struct blkcg_policy *pol)
a2b1693b 1061{
3c798398 1062 struct blkcg_gq *blkg;
a2b1693b
TH
1063
1064 if (!blkcg_policy_enabled(q, pol))
1065 return;
1066
1067 blk_queue_bypass_start(q);
1068 spin_lock_irq(q->queue_lock);
1069
1070 __clear_bit(pol->plid, q->blkcg_pols);
1071
1072 list_for_each_entry(blkg, &q->blkg_list, q_node) {
1073 /* grab blkcg lock too while removing @pd from @blkg */
1074 spin_lock(&blkg->blkcg->lock);
1075
f427d909
TH
1076 if (pol->pd_offline_fn)
1077 pol->pd_offline_fn(blkg);
f9fcc2d3
TH
1078 if (pol->pd_exit_fn)
1079 pol->pd_exit_fn(blkg);
a2b1693b
TH
1080
1081 kfree(blkg->pd[pol->plid]);
1082 blkg->pd[pol->plid] = NULL;
1083
1084 spin_unlock(&blkg->blkcg->lock);
1085 }
1086
1087 spin_unlock_irq(q->queue_lock);
1088 blk_queue_bypass_end(q);
1089}
1090EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1091
8bd435b3 1092/**
3c798398
TH
1093 * blkcg_policy_register - register a blkcg policy
1094 * @pol: blkcg policy to register
8bd435b3 1095 *
3c798398
TH
1096 * Register @pol with blkcg core. Might sleep and @pol may be modified on
1097 * successful registration. Returns 0 on success and -errno on failure.
8bd435b3 1098 */
d5bf0291 1099int blkcg_policy_register(struct blkcg_policy *pol)
3e252066 1100{
8bd435b3 1101 int i, ret;
e8989fae 1102
f95a04af
TH
1103 if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
1104 return -EINVAL;
1105
bc0d6501
TH
1106 mutex_lock(&blkcg_pol_mutex);
1107
8bd435b3
TH
1108 /* find an empty slot */
1109 ret = -ENOSPC;
1110 for (i = 0; i < BLKCG_MAX_POLS; i++)
3c798398 1111 if (!blkcg_policy[i])
8bd435b3
TH
1112 break;
1113 if (i >= BLKCG_MAX_POLS)
1114 goto out_unlock;
035d10b2 1115
8bd435b3 1116 /* register and update blkgs */
3c798398
TH
1117 pol->plid = i;
1118 blkcg_policy[i] = pol;
8bd435b3 1119
8bd435b3 1120 /* everything is in place, add intf files for the new policy */
3c798398 1121 if (pol->cftypes)
2cf669a5
TH
1122 WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys,
1123 pol->cftypes));
8bd435b3
TH
1124 ret = 0;
1125out_unlock:
bc0d6501 1126 mutex_unlock(&blkcg_pol_mutex);
8bd435b3 1127 return ret;
3e252066 1128}
3c798398 1129EXPORT_SYMBOL_GPL(blkcg_policy_register);
3e252066 1130
8bd435b3 1131/**
3c798398
TH
1132 * blkcg_policy_unregister - unregister a blkcg policy
1133 * @pol: blkcg policy to unregister
8bd435b3 1134 *
3c798398 1135 * Undo blkcg_policy_register(@pol). Might sleep.
8bd435b3 1136 */
3c798398 1137void blkcg_policy_unregister(struct blkcg_policy *pol)
3e252066 1138{
bc0d6501
TH
1139 mutex_lock(&blkcg_pol_mutex);
1140
3c798398 1141 if (WARN_ON(blkcg_policy[pol->plid] != pol))
8bd435b3
TH
1142 goto out_unlock;
1143
1144 /* kill the intf files first */
3c798398 1145 if (pol->cftypes)
2bb566cb 1146 cgroup_rm_cftypes(pol->cftypes);
44ea53de 1147
8bd435b3 1148 /* unregister and update blkgs */
3c798398 1149 blkcg_policy[pol->plid] = NULL;
8bd435b3 1150out_unlock:
bc0d6501 1151 mutex_unlock(&blkcg_pol_mutex);
3e252066 1152}
3c798398 1153EXPORT_SYMBOL_GPL(blkcg_policy_unregister);
This page took 0.294211 seconds and 5 git commands to generate.