Commit | Line | Data |
---|---|---|
31e4c28d VG |
1 | /* |
2 | * Common Block IO controller cgroup interface | |
3 | * | |
4 | * Based on ideas and code from CFQ, CFS and BFQ: | |
5 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> | |
6 | * | |
7 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> | |
8 | * Paolo Valente <paolo.valente@unimore.it> | |
9 | * | |
10 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> | |
11 | * Nauman Rafique <nauman@google.com> | |
12 | */ | |
13 | #include <linux/ioprio.h> | |
22084190 | 14 | #include <linux/kdev_t.h> |
9d6a986c | 15 | #include <linux/module.h> |
accee785 | 16 | #include <linux/err.h> |
9195291e | 17 | #include <linux/blkdev.h> |
5a0e3ad6 | 18 | #include <linux/slab.h> |
34d0f179 | 19 | #include <linux/genhd.h> |
72e06c25 | 20 | #include <linux/delay.h> |
9a9e8a26 | 21 | #include <linux/atomic.h> |
72e06c25 | 22 | #include "blk-cgroup.h" |
5efd6113 | 23 | #include "blk.h" |
3e252066 | 24 | |
84c124da DS |
25 | #define MAX_KEY_LEN 100 |
26 | ||
bc0d6501 | 27 | static DEFINE_MUTEX(blkcg_pol_mutex); |
923adde1 | 28 | |
3c798398 TH |
29 | struct blkcg blkcg_root = { .cfq_weight = 2 * CFQ_WEIGHT_DEFAULT }; |
30 | EXPORT_SYMBOL_GPL(blkcg_root); | |
9d6a986c | 31 | |
3c798398 | 32 | static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; |
035d10b2 | 33 | |
a2b1693b | 34 | static bool blkcg_policy_enabled(struct request_queue *q, |
3c798398 | 35 | const struct blkcg_policy *pol) |
a2b1693b TH |
36 | { |
37 | return pol && test_bit(pol->plid, q->blkcg_pols); | |
38 | } | |
39 | ||
0381411e TH |
40 | /** |
41 | * blkg_free - free a blkg | |
42 | * @blkg: blkg to free | |
43 | * | |
44 | * Free @blkg which may be partially allocated. | |
45 | */ | |
3c798398 | 46 | static void blkg_free(struct blkcg_gq *blkg) |
0381411e | 47 | { |
e8989fae | 48 | int i; |
549d3aa8 TH |
49 | |
50 | if (!blkg) | |
51 | return; | |
52 | ||
8bd435b3 | 53 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
3c798398 | 54 | struct blkcg_policy *pol = blkcg_policy[i]; |
e8989fae TH |
55 | struct blkg_policy_data *pd = blkg->pd[i]; |
56 | ||
9ade5ea4 TH |
57 | if (!pd) |
58 | continue; | |
59 | ||
f9fcc2d3 TH |
60 | if (pol && pol->pd_exit_fn) |
61 | pol->pd_exit_fn(blkg); | |
9ade5ea4 | 62 | |
9ade5ea4 | 63 | kfree(pd); |
0381411e | 64 | } |
e8989fae | 65 | |
a051661c | 66 | blk_exit_rl(&blkg->rl); |
549d3aa8 | 67 | kfree(blkg); |
0381411e TH |
68 | } |
69 | ||
70 | /** | |
71 | * blkg_alloc - allocate a blkg | |
72 | * @blkcg: block cgroup the new blkg is associated with | |
73 | * @q: request_queue the new blkg is associated with | |
15974993 | 74 | * @gfp_mask: allocation mask to use |
0381411e | 75 | * |
e8989fae | 76 | * Allocate a new blkg assocating @blkcg and @q. |
0381411e | 77 | */ |
15974993 TH |
78 | static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, |
79 | gfp_t gfp_mask) | |
0381411e | 80 | { |
3c798398 | 81 | struct blkcg_gq *blkg; |
e8989fae | 82 | int i; |
0381411e TH |
83 | |
84 | /* alloc and init base part */ | |
15974993 | 85 | blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); |
0381411e TH |
86 | if (!blkg) |
87 | return NULL; | |
88 | ||
c875f4d0 | 89 | blkg->q = q; |
e8989fae | 90 | INIT_LIST_HEAD(&blkg->q_node); |
0381411e | 91 | blkg->blkcg = blkcg; |
1adaf3dd | 92 | blkg->refcnt = 1; |
0381411e | 93 | |
a051661c TH |
94 | /* root blkg uses @q->root_rl, init rl only for !root blkgs */ |
95 | if (blkcg != &blkcg_root) { | |
96 | if (blk_init_rl(&blkg->rl, q, gfp_mask)) | |
97 | goto err_free; | |
98 | blkg->rl.blkg = blkg; | |
99 | } | |
100 | ||
8bd435b3 | 101 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
3c798398 | 102 | struct blkcg_policy *pol = blkcg_policy[i]; |
e8989fae | 103 | struct blkg_policy_data *pd; |
0381411e | 104 | |
a2b1693b | 105 | if (!blkcg_policy_enabled(q, pol)) |
e8989fae TH |
106 | continue; |
107 | ||
108 | /* alloc per-policy data and attach it to blkg */ | |
15974993 | 109 | pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); |
a051661c TH |
110 | if (!pd) |
111 | goto err_free; | |
549d3aa8 | 112 | |
e8989fae TH |
113 | blkg->pd[i] = pd; |
114 | pd->blkg = blkg; | |
e8989fae | 115 | |
9b2ea86b | 116 | /* invoke per-policy init */ |
a2b1693b | 117 | if (blkcg_policy_enabled(blkg->q, pol)) |
f9fcc2d3 | 118 | pol->pd_init_fn(blkg); |
e8989fae TH |
119 | } |
120 | ||
0381411e | 121 | return blkg; |
a051661c TH |
122 | |
123 | err_free: | |
124 | blkg_free(blkg); | |
125 | return NULL; | |
0381411e TH |
126 | } |
127 | ||
3c798398 TH |
128 | static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, |
129 | struct request_queue *q) | |
80fd9979 | 130 | { |
3c798398 | 131 | struct blkcg_gq *blkg; |
80fd9979 | 132 | |
a637120e TH |
133 | blkg = rcu_dereference(blkcg->blkg_hint); |
134 | if (blkg && blkg->q == q) | |
135 | return blkg; | |
136 | ||
137 | /* | |
138 | * Hint didn't match. Look up from the radix tree. Note that we | |
139 | * may not be holding queue_lock and thus are not sure whether | |
140 | * @blkg from blkg_tree has already been removed or not, so we | |
141 | * can't update hint to the lookup result. Leave it to the caller. | |
142 | */ | |
143 | blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); | |
144 | if (blkg && blkg->q == q) | |
145 | return blkg; | |
146 | ||
80fd9979 TH |
147 | return NULL; |
148 | } | |
149 | ||
150 | /** | |
151 | * blkg_lookup - lookup blkg for the specified blkcg - q pair | |
152 | * @blkcg: blkcg of interest | |
153 | * @q: request_queue of interest | |
154 | * | |
155 | * Lookup blkg for the @blkcg - @q pair. This function should be called | |
156 | * under RCU read lock and is guaranteed to return %NULL if @q is bypassing | |
157 | * - see blk_queue_bypass_start() for details. | |
158 | */ | |
3c798398 | 159 | struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q) |
80fd9979 TH |
160 | { |
161 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
162 | ||
163 | if (unlikely(blk_queue_bypass(q))) | |
164 | return NULL; | |
165 | return __blkg_lookup(blkcg, q); | |
166 | } | |
167 | EXPORT_SYMBOL_GPL(blkg_lookup); | |
168 | ||
15974993 TH |
169 | /* |
170 | * If @new_blkg is %NULL, this function tries to allocate a new one as | |
171 | * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return. | |
172 | */ | |
3c798398 | 173 | static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, |
15974993 TH |
174 | struct request_queue *q, |
175 | struct blkcg_gq *new_blkg) | |
5624a4e4 | 176 | { |
3c798398 | 177 | struct blkcg_gq *blkg; |
496fb780 | 178 | int ret; |
5624a4e4 | 179 | |
cd1604fa TH |
180 | WARN_ON_ONCE(!rcu_read_lock_held()); |
181 | lockdep_assert_held(q->queue_lock); | |
182 | ||
a637120e | 183 | /* lookup and update hint on success, see __blkg_lookup() for details */ |
80fd9979 | 184 | blkg = __blkg_lookup(blkcg, q); |
a637120e TH |
185 | if (blkg) { |
186 | rcu_assign_pointer(blkcg->blkg_hint, blkg); | |
15974993 | 187 | goto out_free; |
a637120e | 188 | } |
cd1604fa | 189 | |
7ee9c562 | 190 | /* blkg holds a reference to blkcg */ |
15974993 TH |
191 | if (!css_tryget(&blkcg->css)) { |
192 | blkg = ERR_PTR(-EINVAL); | |
193 | goto out_free; | |
194 | } | |
cd1604fa | 195 | |
496fb780 | 196 | /* allocate */ |
15974993 TH |
197 | if (!new_blkg) { |
198 | new_blkg = blkg_alloc(blkcg, q, GFP_ATOMIC); | |
199 | if (unlikely(!new_blkg)) { | |
200 | blkg = ERR_PTR(-ENOMEM); | |
201 | goto out_put; | |
202 | } | |
203 | } | |
204 | blkg = new_blkg; | |
cd1604fa TH |
205 | |
206 | /* insert */ | |
207 | spin_lock(&blkcg->lock); | |
a637120e TH |
208 | ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); |
209 | if (likely(!ret)) { | |
210 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); | |
211 | list_add(&blkg->q_node, &q->blkg_list); | |
212 | } | |
cd1604fa | 213 | spin_unlock(&blkcg->lock); |
496fb780 | 214 | |
a637120e TH |
215 | if (!ret) |
216 | return blkg; | |
15974993 TH |
217 | |
218 | blkg = ERR_PTR(ret); | |
219 | out_put: | |
496fb780 | 220 | css_put(&blkcg->css); |
15974993 TH |
221 | out_free: |
222 | blkg_free(new_blkg); | |
223 | return blkg; | |
31e4c28d | 224 | } |
3c96cb32 | 225 | |
3c798398 TH |
226 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, |
227 | struct request_queue *q) | |
3c96cb32 TH |
228 | { |
229 | /* | |
230 | * This could be the first entry point of blkcg implementation and | |
231 | * we shouldn't allow anything to go through for a bypassing queue. | |
232 | */ | |
233 | if (unlikely(blk_queue_bypass(q))) | |
234 | return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY); | |
15974993 | 235 | return __blkg_lookup_create(blkcg, q, NULL); |
3c96cb32 | 236 | } |
cd1604fa | 237 | EXPORT_SYMBOL_GPL(blkg_lookup_create); |
31e4c28d | 238 | |
3c798398 | 239 | static void blkg_destroy(struct blkcg_gq *blkg) |
03aa264a | 240 | { |
3c798398 | 241 | struct blkcg *blkcg = blkg->blkcg; |
03aa264a | 242 | |
27e1f9d1 | 243 | lockdep_assert_held(blkg->q->queue_lock); |
9f13ef67 | 244 | lockdep_assert_held(&blkcg->lock); |
03aa264a TH |
245 | |
246 | /* Something wrong if we are trying to remove same group twice */ | |
e8989fae | 247 | WARN_ON_ONCE(list_empty(&blkg->q_node)); |
9f13ef67 | 248 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); |
a637120e TH |
249 | |
250 | radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); | |
e8989fae | 251 | list_del_init(&blkg->q_node); |
9f13ef67 | 252 | hlist_del_init_rcu(&blkg->blkcg_node); |
03aa264a | 253 | |
a637120e TH |
254 | /* |
255 | * Both setting lookup hint to and clearing it from @blkg are done | |
256 | * under queue_lock. If it's not pointing to @blkg now, it never | |
257 | * will. Hint assignment itself can race safely. | |
258 | */ | |
259 | if (rcu_dereference_raw(blkcg->blkg_hint) == blkg) | |
260 | rcu_assign_pointer(blkcg->blkg_hint, NULL); | |
261 | ||
03aa264a TH |
262 | /* |
263 | * Put the reference taken at the time of creation so that when all | |
264 | * queues are gone, group can be destroyed. | |
265 | */ | |
266 | blkg_put(blkg); | |
267 | } | |
268 | ||
9f13ef67 TH |
269 | /** |
270 | * blkg_destroy_all - destroy all blkgs associated with a request_queue | |
271 | * @q: request_queue of interest | |
9f13ef67 | 272 | * |
3c96cb32 | 273 | * Destroy all blkgs associated with @q. |
9f13ef67 | 274 | */ |
3c96cb32 | 275 | static void blkg_destroy_all(struct request_queue *q) |
72e06c25 | 276 | { |
3c798398 | 277 | struct blkcg_gq *blkg, *n; |
72e06c25 | 278 | |
6d18b008 | 279 | lockdep_assert_held(q->queue_lock); |
72e06c25 | 280 | |
9f13ef67 | 281 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
3c798398 | 282 | struct blkcg *blkcg = blkg->blkcg; |
72e06c25 | 283 | |
9f13ef67 TH |
284 | spin_lock(&blkcg->lock); |
285 | blkg_destroy(blkg); | |
286 | spin_unlock(&blkcg->lock); | |
72e06c25 | 287 | } |
65635cbc JN |
288 | |
289 | /* | |
290 | * root blkg is destroyed. Just clear the pointer since | |
291 | * root_rl does not take reference on root blkg. | |
292 | */ | |
293 | q->root_blkg = NULL; | |
294 | q->root_rl.blkg = NULL; | |
72e06c25 TH |
295 | } |
296 | ||
1adaf3dd TH |
297 | static void blkg_rcu_free(struct rcu_head *rcu_head) |
298 | { | |
3c798398 | 299 | blkg_free(container_of(rcu_head, struct blkcg_gq, rcu_head)); |
1adaf3dd TH |
300 | } |
301 | ||
3c798398 | 302 | void __blkg_release(struct blkcg_gq *blkg) |
1adaf3dd TH |
303 | { |
304 | /* release the extra blkcg reference this blkg has been holding */ | |
305 | css_put(&blkg->blkcg->css); | |
306 | ||
307 | /* | |
308 | * A group is freed in rcu manner. But having an rcu lock does not | |
309 | * mean that one can access all the fields of blkg and assume these | |
310 | * are valid. For example, don't try to follow throtl_data and | |
311 | * request queue links. | |
312 | * | |
313 | * Having a reference to blkg under an rcu allows acess to only | |
314 | * values local to groups like group stats and group rate limits | |
315 | */ | |
316 | call_rcu(&blkg->rcu_head, blkg_rcu_free); | |
317 | } | |
318 | EXPORT_SYMBOL_GPL(__blkg_release); | |
319 | ||
a051661c TH |
320 | /* |
321 | * The next function used by blk_queue_for_each_rl(). It's a bit tricky | |
322 | * because the root blkg uses @q->root_rl instead of its own rl. | |
323 | */ | |
324 | struct request_list *__blk_queue_next_rl(struct request_list *rl, | |
325 | struct request_queue *q) | |
326 | { | |
327 | struct list_head *ent; | |
328 | struct blkcg_gq *blkg; | |
329 | ||
330 | /* | |
331 | * Determine the current blkg list_head. The first entry is | |
332 | * root_rl which is off @q->blkg_list and mapped to the head. | |
333 | */ | |
334 | if (rl == &q->root_rl) { | |
335 | ent = &q->blkg_list; | |
65c77fd9 JN |
336 | /* There are no more block groups, hence no request lists */ |
337 | if (list_empty(ent)) | |
338 | return NULL; | |
a051661c TH |
339 | } else { |
340 | blkg = container_of(rl, struct blkcg_gq, rl); | |
341 | ent = &blkg->q_node; | |
342 | } | |
343 | ||
344 | /* walk to the next list_head, skip root blkcg */ | |
345 | ent = ent->next; | |
346 | if (ent == &q->root_blkg->q_node) | |
347 | ent = ent->next; | |
348 | if (ent == &q->blkg_list) | |
349 | return NULL; | |
350 | ||
351 | blkg = container_of(ent, struct blkcg_gq, q_node); | |
352 | return &blkg->rl; | |
353 | } | |
354 | ||
3c798398 TH |
355 | static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, |
356 | u64 val) | |
303a3acb | 357 | { |
3c798398 TH |
358 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
359 | struct blkcg_gq *blkg; | |
303a3acb | 360 | struct hlist_node *n; |
bc0d6501 | 361 | int i; |
303a3acb | 362 | |
bc0d6501 | 363 | mutex_lock(&blkcg_pol_mutex); |
303a3acb | 364 | spin_lock_irq(&blkcg->lock); |
997a026c TH |
365 | |
366 | /* | |
367 | * Note that stat reset is racy - it doesn't synchronize against | |
368 | * stat updates. This is a debug feature which shouldn't exist | |
369 | * anyway. If you get hit by a race, retry. | |
370 | */ | |
303a3acb | 371 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { |
8bd435b3 | 372 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
3c798398 | 373 | struct blkcg_policy *pol = blkcg_policy[i]; |
549d3aa8 | 374 | |
a2b1693b | 375 | if (blkcg_policy_enabled(blkg->q, pol) && |
f9fcc2d3 TH |
376 | pol->pd_reset_stats_fn) |
377 | pol->pd_reset_stats_fn(blkg); | |
bc0d6501 | 378 | } |
303a3acb | 379 | } |
f0bdc8cd | 380 | |
303a3acb | 381 | spin_unlock_irq(&blkcg->lock); |
bc0d6501 | 382 | mutex_unlock(&blkcg_pol_mutex); |
303a3acb DS |
383 | return 0; |
384 | } | |
385 | ||
3c798398 | 386 | static const char *blkg_dev_name(struct blkcg_gq *blkg) |
303a3acb | 387 | { |
d3d32e69 TH |
388 | /* some drivers (floppy) instantiate a queue w/o disk registered */ |
389 | if (blkg->q->backing_dev_info.dev) | |
390 | return dev_name(blkg->q->backing_dev_info.dev); | |
391 | return NULL; | |
303a3acb DS |
392 | } |
393 | ||
d3d32e69 TH |
394 | /** |
395 | * blkcg_print_blkgs - helper for printing per-blkg data | |
396 | * @sf: seq_file to print to | |
397 | * @blkcg: blkcg of interest | |
398 | * @prfill: fill function to print out a blkg | |
399 | * @pol: policy in question | |
400 | * @data: data to be passed to @prfill | |
401 | * @show_total: to print out sum of prfill return values or not | |
402 | * | |
403 | * This function invokes @prfill on each blkg of @blkcg if pd for the | |
404 | * policy specified by @pol exists. @prfill is invoked with @sf, the | |
405 | * policy data and @data. If @show_total is %true, the sum of the return | |
406 | * values from @prfill is printed with "Total" label at the end. | |
407 | * | |
408 | * This is to be used to construct print functions for | |
409 | * cftype->read_seq_string method. | |
410 | */ | |
3c798398 | 411 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
f95a04af TH |
412 | u64 (*prfill)(struct seq_file *, |
413 | struct blkg_policy_data *, int), | |
3c798398 | 414 | const struct blkcg_policy *pol, int data, |
ec399347 | 415 | bool show_total) |
5624a4e4 | 416 | { |
3c798398 | 417 | struct blkcg_gq *blkg; |
d3d32e69 TH |
418 | struct hlist_node *n; |
419 | u64 total = 0; | |
5624a4e4 | 420 | |
d3d32e69 TH |
421 | spin_lock_irq(&blkcg->lock); |
422 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) | |
a2b1693b | 423 | if (blkcg_policy_enabled(blkg->q, pol)) |
f95a04af | 424 | total += prfill(sf, blkg->pd[pol->plid], data); |
d3d32e69 TH |
425 | spin_unlock_irq(&blkcg->lock); |
426 | ||
427 | if (show_total) | |
428 | seq_printf(sf, "Total %llu\n", (unsigned long long)total); | |
429 | } | |
829fdb50 | 430 | EXPORT_SYMBOL_GPL(blkcg_print_blkgs); |
d3d32e69 TH |
431 | |
432 | /** | |
433 | * __blkg_prfill_u64 - prfill helper for a single u64 value | |
434 | * @sf: seq_file to print to | |
f95a04af | 435 | * @pd: policy private data of interest |
d3d32e69 TH |
436 | * @v: value to print |
437 | * | |
f95a04af | 438 | * Print @v to @sf for the device assocaited with @pd. |
d3d32e69 | 439 | */ |
f95a04af | 440 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) |
d3d32e69 | 441 | { |
f95a04af | 442 | const char *dname = blkg_dev_name(pd->blkg); |
d3d32e69 TH |
443 | |
444 | if (!dname) | |
445 | return 0; | |
446 | ||
447 | seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); | |
448 | return v; | |
449 | } | |
829fdb50 | 450 | EXPORT_SYMBOL_GPL(__blkg_prfill_u64); |
d3d32e69 TH |
451 | |
452 | /** | |
453 | * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat | |
454 | * @sf: seq_file to print to | |
f95a04af | 455 | * @pd: policy private data of interest |
d3d32e69 TH |
456 | * @rwstat: rwstat to print |
457 | * | |
f95a04af | 458 | * Print @rwstat to @sf for the device assocaited with @pd. |
d3d32e69 | 459 | */ |
f95a04af | 460 | u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
829fdb50 | 461 | const struct blkg_rwstat *rwstat) |
d3d32e69 TH |
462 | { |
463 | static const char *rwstr[] = { | |
464 | [BLKG_RWSTAT_READ] = "Read", | |
465 | [BLKG_RWSTAT_WRITE] = "Write", | |
466 | [BLKG_RWSTAT_SYNC] = "Sync", | |
467 | [BLKG_RWSTAT_ASYNC] = "Async", | |
468 | }; | |
f95a04af | 469 | const char *dname = blkg_dev_name(pd->blkg); |
d3d32e69 TH |
470 | u64 v; |
471 | int i; | |
472 | ||
473 | if (!dname) | |
474 | return 0; | |
475 | ||
476 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | |
477 | seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], | |
478 | (unsigned long long)rwstat->cnt[i]); | |
479 | ||
480 | v = rwstat->cnt[BLKG_RWSTAT_READ] + rwstat->cnt[BLKG_RWSTAT_WRITE]; | |
481 | seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); | |
482 | return v; | |
483 | } | |
484 | ||
5bc4afb1 TH |
485 | /** |
486 | * blkg_prfill_stat - prfill callback for blkg_stat | |
487 | * @sf: seq_file to print to | |
f95a04af TH |
488 | * @pd: policy private data of interest |
489 | * @off: offset to the blkg_stat in @pd | |
5bc4afb1 TH |
490 | * |
491 | * prfill callback for printing a blkg_stat. | |
492 | */ | |
f95a04af | 493 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off) |
d3d32e69 | 494 | { |
f95a04af | 495 | return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off)); |
d3d32e69 | 496 | } |
5bc4afb1 | 497 | EXPORT_SYMBOL_GPL(blkg_prfill_stat); |
d3d32e69 | 498 | |
5bc4afb1 TH |
499 | /** |
500 | * blkg_prfill_rwstat - prfill callback for blkg_rwstat | |
501 | * @sf: seq_file to print to | |
f95a04af TH |
502 | * @pd: policy private data of interest |
503 | * @off: offset to the blkg_rwstat in @pd | |
5bc4afb1 TH |
504 | * |
505 | * prfill callback for printing a blkg_rwstat. | |
506 | */ | |
f95a04af TH |
507 | u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
508 | int off) | |
d3d32e69 | 509 | { |
f95a04af | 510 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off); |
d3d32e69 | 511 | |
f95a04af | 512 | return __blkg_prfill_rwstat(sf, pd, &rwstat); |
d3d32e69 | 513 | } |
5bc4afb1 | 514 | EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); |
d3d32e69 | 515 | |
3a8b31d3 TH |
516 | /** |
517 | * blkg_conf_prep - parse and prepare for per-blkg config update | |
518 | * @blkcg: target block cgroup | |
da8b0662 | 519 | * @pol: target policy |
3a8b31d3 TH |
520 | * @input: input string |
521 | * @ctx: blkg_conf_ctx to be filled | |
522 | * | |
523 | * Parse per-blkg config update from @input and initialize @ctx with the | |
524 | * result. @ctx->blkg points to the blkg to be updated and @ctx->v the new | |
da8b0662 TH |
525 | * value. This function returns with RCU read lock and queue lock held and |
526 | * must be paired with blkg_conf_finish(). | |
3a8b31d3 | 527 | */ |
3c798398 TH |
528 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
529 | const char *input, struct blkg_conf_ctx *ctx) | |
da8b0662 | 530 | __acquires(rcu) __acquires(disk->queue->queue_lock) |
34d0f179 | 531 | { |
3a8b31d3 | 532 | struct gendisk *disk; |
3c798398 | 533 | struct blkcg_gq *blkg; |
726fa694 TH |
534 | unsigned int major, minor; |
535 | unsigned long long v; | |
536 | int part, ret; | |
34d0f179 | 537 | |
726fa694 TH |
538 | if (sscanf(input, "%u:%u %llu", &major, &minor, &v) != 3) |
539 | return -EINVAL; | |
3a8b31d3 | 540 | |
726fa694 | 541 | disk = get_gendisk(MKDEV(major, minor), &part); |
4bfd482e | 542 | if (!disk || part) |
726fa694 | 543 | return -EINVAL; |
e56da7e2 TH |
544 | |
545 | rcu_read_lock(); | |
4bfd482e | 546 | spin_lock_irq(disk->queue->queue_lock); |
da8b0662 | 547 | |
a2b1693b | 548 | if (blkcg_policy_enabled(disk->queue, pol)) |
3c96cb32 | 549 | blkg = blkg_lookup_create(blkcg, disk->queue); |
a2b1693b TH |
550 | else |
551 | blkg = ERR_PTR(-EINVAL); | |
e56da7e2 | 552 | |
4bfd482e TH |
553 | if (IS_ERR(blkg)) { |
554 | ret = PTR_ERR(blkg); | |
3a8b31d3 | 555 | rcu_read_unlock(); |
da8b0662 | 556 | spin_unlock_irq(disk->queue->queue_lock); |
3a8b31d3 TH |
557 | put_disk(disk); |
558 | /* | |
559 | * If queue was bypassing, we should retry. Do so after a | |
560 | * short msleep(). It isn't strictly necessary but queue | |
561 | * can be bypassing for some time and it's always nice to | |
562 | * avoid busy looping. | |
563 | */ | |
564 | if (ret == -EBUSY) { | |
565 | msleep(10); | |
566 | ret = restart_syscall(); | |
7702e8f4 | 567 | } |
726fa694 | 568 | return ret; |
062a644d | 569 | } |
3a8b31d3 TH |
570 | |
571 | ctx->disk = disk; | |
572 | ctx->blkg = blkg; | |
726fa694 TH |
573 | ctx->v = v; |
574 | return 0; | |
34d0f179 | 575 | } |
829fdb50 | 576 | EXPORT_SYMBOL_GPL(blkg_conf_prep); |
34d0f179 | 577 | |
3a8b31d3 TH |
578 | /** |
579 | * blkg_conf_finish - finish up per-blkg config update | |
580 | * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() | |
581 | * | |
582 | * Finish up after per-blkg config update. This function must be paired | |
583 | * with blkg_conf_prep(). | |
584 | */ | |
829fdb50 | 585 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) |
da8b0662 | 586 | __releases(ctx->disk->queue->queue_lock) __releases(rcu) |
34d0f179 | 587 | { |
da8b0662 | 588 | spin_unlock_irq(ctx->disk->queue->queue_lock); |
3a8b31d3 TH |
589 | rcu_read_unlock(); |
590 | put_disk(ctx->disk); | |
34d0f179 | 591 | } |
829fdb50 | 592 | EXPORT_SYMBOL_GPL(blkg_conf_finish); |
34d0f179 | 593 | |
3c798398 | 594 | struct cftype blkcg_files[] = { |
84c124da DS |
595 | { |
596 | .name = "reset_stats", | |
3c798398 | 597 | .write_u64 = blkcg_reset_stats, |
22084190 | 598 | }, |
4baf6e33 | 599 | { } /* terminate */ |
31e4c28d VG |
600 | }; |
601 | ||
9f13ef67 | 602 | /** |
3c798398 | 603 | * blkcg_pre_destroy - cgroup pre_destroy callback |
9f13ef67 TH |
604 | * @cgroup: cgroup of interest |
605 | * | |
606 | * This function is called when @cgroup is about to go away and responsible | |
607 | * for shooting down all blkgs associated with @cgroup. blkgs should be | |
608 | * removed while holding both q and blkcg locks. As blkcg lock is nested | |
609 | * inside q lock, this function performs reverse double lock dancing. | |
610 | * | |
611 | * This is the blkcg counterpart of ioc_release_fn(). | |
612 | */ | |
3c798398 | 613 | static int blkcg_pre_destroy(struct cgroup *cgroup) |
31e4c28d | 614 | { |
3c798398 | 615 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
b1c35769 | 616 | |
9f13ef67 | 617 | spin_lock_irq(&blkcg->lock); |
7ee9c562 | 618 | |
9f13ef67 | 619 | while (!hlist_empty(&blkcg->blkg_list)) { |
3c798398 TH |
620 | struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, |
621 | struct blkcg_gq, blkcg_node); | |
c875f4d0 | 622 | struct request_queue *q = blkg->q; |
b1c35769 | 623 | |
9f13ef67 TH |
624 | if (spin_trylock(q->queue_lock)) { |
625 | blkg_destroy(blkg); | |
626 | spin_unlock(q->queue_lock); | |
627 | } else { | |
628 | spin_unlock_irq(&blkcg->lock); | |
9f13ef67 | 629 | cpu_relax(); |
a5567932 | 630 | spin_lock_irq(&blkcg->lock); |
0f3942a3 | 631 | } |
9f13ef67 | 632 | } |
b1c35769 | 633 | |
9f13ef67 | 634 | spin_unlock_irq(&blkcg->lock); |
7ee9c562 TH |
635 | return 0; |
636 | } | |
637 | ||
3c798398 | 638 | static void blkcg_destroy(struct cgroup *cgroup) |
7ee9c562 | 639 | { |
3c798398 | 640 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
7ee9c562 | 641 | |
3c798398 | 642 | if (blkcg != &blkcg_root) |
67523c48 | 643 | kfree(blkcg); |
31e4c28d VG |
644 | } |
645 | ||
3c798398 | 646 | static struct cgroup_subsys_state *blkcg_create(struct cgroup *cgroup) |
31e4c28d | 647 | { |
9a9e8a26 | 648 | static atomic64_t id_seq = ATOMIC64_INIT(0); |
3c798398 | 649 | struct blkcg *blkcg; |
0341509f | 650 | struct cgroup *parent = cgroup->parent; |
31e4c28d | 651 | |
0341509f | 652 | if (!parent) { |
3c798398 | 653 | blkcg = &blkcg_root; |
31e4c28d VG |
654 | goto done; |
655 | } | |
656 | ||
31e4c28d VG |
657 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); |
658 | if (!blkcg) | |
659 | return ERR_PTR(-ENOMEM); | |
660 | ||
3381cb8d | 661 | blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT; |
9a9e8a26 | 662 | blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */ |
31e4c28d VG |
663 | done: |
664 | spin_lock_init(&blkcg->lock); | |
a637120e | 665 | INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC); |
31e4c28d VG |
666 | INIT_HLIST_HEAD(&blkcg->blkg_list); |
667 | ||
668 | return &blkcg->css; | |
669 | } | |
670 | ||
5efd6113 TH |
671 | /** |
672 | * blkcg_init_queue - initialize blkcg part of request queue | |
673 | * @q: request_queue to initialize | |
674 | * | |
675 | * Called from blk_alloc_queue_node(). Responsible for initializing blkcg | |
676 | * part of new request_queue @q. | |
677 | * | |
678 | * RETURNS: | |
679 | * 0 on success, -errno on failure. | |
680 | */ | |
681 | int blkcg_init_queue(struct request_queue *q) | |
682 | { | |
683 | might_sleep(); | |
684 | ||
3c96cb32 | 685 | return blk_throtl_init(q); |
5efd6113 TH |
686 | } |
687 | ||
688 | /** | |
689 | * blkcg_drain_queue - drain blkcg part of request_queue | |
690 | * @q: request_queue to drain | |
691 | * | |
692 | * Called from blk_drain_queue(). Responsible for draining blkcg part. | |
693 | */ | |
694 | void blkcg_drain_queue(struct request_queue *q) | |
695 | { | |
696 | lockdep_assert_held(q->queue_lock); | |
697 | ||
698 | blk_throtl_drain(q); | |
699 | } | |
700 | ||
701 | /** | |
702 | * blkcg_exit_queue - exit and release blkcg part of request_queue | |
703 | * @q: request_queue being released | |
704 | * | |
705 | * Called from blk_release_queue(). Responsible for exiting blkcg part. | |
706 | */ | |
707 | void blkcg_exit_queue(struct request_queue *q) | |
708 | { | |
6d18b008 | 709 | spin_lock_irq(q->queue_lock); |
3c96cb32 | 710 | blkg_destroy_all(q); |
6d18b008 TH |
711 | spin_unlock_irq(q->queue_lock); |
712 | ||
5efd6113 TH |
713 | blk_throtl_exit(q); |
714 | } | |
715 | ||
31e4c28d VG |
716 | /* |
717 | * We cannot support shared io contexts, as we have no mean to support | |
718 | * two tasks with the same ioc in two different groups without major rework | |
719 | * of the main cic data structures. For now we allow a task to change | |
720 | * its cgroup only if it's the only owner of its ioc. | |
721 | */ | |
3c798398 | 722 | static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) |
31e4c28d | 723 | { |
bb9d97b6 | 724 | struct task_struct *task; |
31e4c28d VG |
725 | struct io_context *ioc; |
726 | int ret = 0; | |
727 | ||
728 | /* task_lock() is needed to avoid races with exit_io_context() */ | |
bb9d97b6 TH |
729 | cgroup_taskset_for_each(task, cgrp, tset) { |
730 | task_lock(task); | |
731 | ioc = task->io_context; | |
732 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) | |
733 | ret = -EINVAL; | |
734 | task_unlock(task); | |
735 | if (ret) | |
736 | break; | |
737 | } | |
31e4c28d VG |
738 | return ret; |
739 | } | |
740 | ||
676f7c8f TH |
741 | struct cgroup_subsys blkio_subsys = { |
742 | .name = "blkio", | |
3c798398 TH |
743 | .create = blkcg_create, |
744 | .can_attach = blkcg_can_attach, | |
745 | .pre_destroy = blkcg_pre_destroy, | |
746 | .destroy = blkcg_destroy, | |
676f7c8f | 747 | .subsys_id = blkio_subsys_id, |
3c798398 | 748 | .base_cftypes = blkcg_files, |
676f7c8f | 749 | .module = THIS_MODULE, |
8c7f6edb TH |
750 | |
751 | /* | |
752 | * blkio subsystem is utterly broken in terms of hierarchy support. | |
753 | * It treats all cgroups equally regardless of where they're | |
754 | * located in the hierarchy - all cgroups are treated as if they're | |
755 | * right below the root. Fix it and remove the following. | |
756 | */ | |
757 | .broken_hierarchy = true, | |
676f7c8f TH |
758 | }; |
759 | EXPORT_SYMBOL_GPL(blkio_subsys); | |
760 | ||
a2b1693b TH |
761 | /** |
762 | * blkcg_activate_policy - activate a blkcg policy on a request_queue | |
763 | * @q: request_queue of interest | |
764 | * @pol: blkcg policy to activate | |
765 | * | |
766 | * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through | |
767 | * bypass mode to populate its blkgs with policy_data for @pol. | |
768 | * | |
769 | * Activation happens with @q bypassed, so nobody would be accessing blkgs | |
770 | * from IO path. Update of each blkg is protected by both queue and blkcg | |
771 | * locks so that holding either lock and testing blkcg_policy_enabled() is | |
772 | * always enough for dereferencing policy data. | |
773 | * | |
774 | * The caller is responsible for synchronizing [de]activations and policy | |
775 | * [un]registerations. Returns 0 on success, -errno on failure. | |
776 | */ | |
777 | int blkcg_activate_policy(struct request_queue *q, | |
3c798398 | 778 | const struct blkcg_policy *pol) |
a2b1693b TH |
779 | { |
780 | LIST_HEAD(pds); | |
3c798398 | 781 | struct blkcg_gq *blkg; |
a2b1693b TH |
782 | struct blkg_policy_data *pd, *n; |
783 | int cnt = 0, ret; | |
15974993 | 784 | bool preloaded; |
a2b1693b TH |
785 | |
786 | if (blkcg_policy_enabled(q, pol)) | |
787 | return 0; | |
788 | ||
15974993 TH |
789 | /* preallocations for root blkg */ |
790 | blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); | |
791 | if (!blkg) | |
792 | return -ENOMEM; | |
793 | ||
794 | preloaded = !radix_tree_preload(GFP_KERNEL); | |
795 | ||
a2b1693b TH |
796 | blk_queue_bypass_start(q); |
797 | ||
798 | /* make sure the root blkg exists and count the existing blkgs */ | |
799 | spin_lock_irq(q->queue_lock); | |
800 | ||
801 | rcu_read_lock(); | |
15974993 | 802 | blkg = __blkg_lookup_create(&blkcg_root, q, blkg); |
a2b1693b TH |
803 | rcu_read_unlock(); |
804 | ||
15974993 TH |
805 | if (preloaded) |
806 | radix_tree_preload_end(); | |
807 | ||
a2b1693b TH |
808 | if (IS_ERR(blkg)) { |
809 | ret = PTR_ERR(blkg); | |
810 | goto out_unlock; | |
811 | } | |
812 | q->root_blkg = blkg; | |
a051661c | 813 | q->root_rl.blkg = blkg; |
a2b1693b TH |
814 | |
815 | list_for_each_entry(blkg, &q->blkg_list, q_node) | |
816 | cnt++; | |
817 | ||
818 | spin_unlock_irq(q->queue_lock); | |
819 | ||
820 | /* allocate policy_data for all existing blkgs */ | |
821 | while (cnt--) { | |
f95a04af | 822 | pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node); |
a2b1693b TH |
823 | if (!pd) { |
824 | ret = -ENOMEM; | |
825 | goto out_free; | |
826 | } | |
827 | list_add_tail(&pd->alloc_node, &pds); | |
828 | } | |
829 | ||
830 | /* | |
831 | * Install the allocated pds. With @q bypassing, no new blkg | |
832 | * should have been created while the queue lock was dropped. | |
833 | */ | |
834 | spin_lock_irq(q->queue_lock); | |
835 | ||
836 | list_for_each_entry(blkg, &q->blkg_list, q_node) { | |
837 | if (WARN_ON(list_empty(&pds))) { | |
838 | /* umm... this shouldn't happen, just abort */ | |
839 | ret = -ENOMEM; | |
840 | goto out_unlock; | |
841 | } | |
842 | pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node); | |
843 | list_del_init(&pd->alloc_node); | |
844 | ||
845 | /* grab blkcg lock too while installing @pd on @blkg */ | |
846 | spin_lock(&blkg->blkcg->lock); | |
847 | ||
848 | blkg->pd[pol->plid] = pd; | |
849 | pd->blkg = blkg; | |
f9fcc2d3 | 850 | pol->pd_init_fn(blkg); |
a2b1693b TH |
851 | |
852 | spin_unlock(&blkg->blkcg->lock); | |
853 | } | |
854 | ||
855 | __set_bit(pol->plid, q->blkcg_pols); | |
856 | ret = 0; | |
857 | out_unlock: | |
858 | spin_unlock_irq(q->queue_lock); | |
859 | out_free: | |
860 | blk_queue_bypass_end(q); | |
861 | list_for_each_entry_safe(pd, n, &pds, alloc_node) | |
862 | kfree(pd); | |
863 | return ret; | |
864 | } | |
865 | EXPORT_SYMBOL_GPL(blkcg_activate_policy); | |
866 | ||
867 | /** | |
868 | * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue | |
869 | * @q: request_queue of interest | |
870 | * @pol: blkcg policy to deactivate | |
871 | * | |
872 | * Deactivate @pol on @q. Follows the same synchronization rules as | |
873 | * blkcg_activate_policy(). | |
874 | */ | |
875 | void blkcg_deactivate_policy(struct request_queue *q, | |
3c798398 | 876 | const struct blkcg_policy *pol) |
a2b1693b | 877 | { |
3c798398 | 878 | struct blkcg_gq *blkg; |
a2b1693b TH |
879 | |
880 | if (!blkcg_policy_enabled(q, pol)) | |
881 | return; | |
882 | ||
883 | blk_queue_bypass_start(q); | |
884 | spin_lock_irq(q->queue_lock); | |
885 | ||
886 | __clear_bit(pol->plid, q->blkcg_pols); | |
887 | ||
6d18b008 TH |
888 | /* if no policy is left, no need for blkgs - shoot them down */ |
889 | if (bitmap_empty(q->blkcg_pols, BLKCG_MAX_POLS)) | |
890 | blkg_destroy_all(q); | |
891 | ||
a2b1693b TH |
892 | list_for_each_entry(blkg, &q->blkg_list, q_node) { |
893 | /* grab blkcg lock too while removing @pd from @blkg */ | |
894 | spin_lock(&blkg->blkcg->lock); | |
895 | ||
f9fcc2d3 TH |
896 | if (pol->pd_exit_fn) |
897 | pol->pd_exit_fn(blkg); | |
a2b1693b TH |
898 | |
899 | kfree(blkg->pd[pol->plid]); | |
900 | blkg->pd[pol->plid] = NULL; | |
901 | ||
902 | spin_unlock(&blkg->blkcg->lock); | |
903 | } | |
904 | ||
905 | spin_unlock_irq(q->queue_lock); | |
906 | blk_queue_bypass_end(q); | |
907 | } | |
908 | EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); | |
909 | ||
8bd435b3 | 910 | /** |
3c798398 TH |
911 | * blkcg_policy_register - register a blkcg policy |
912 | * @pol: blkcg policy to register | |
8bd435b3 | 913 | * |
3c798398 TH |
914 | * Register @pol with blkcg core. Might sleep and @pol may be modified on |
915 | * successful registration. Returns 0 on success and -errno on failure. | |
8bd435b3 | 916 | */ |
3c798398 | 917 | int blkcg_policy_register(struct blkcg_policy *pol) |
3e252066 | 918 | { |
8bd435b3 | 919 | int i, ret; |
e8989fae | 920 | |
f95a04af TH |
921 | if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data))) |
922 | return -EINVAL; | |
923 | ||
bc0d6501 TH |
924 | mutex_lock(&blkcg_pol_mutex); |
925 | ||
8bd435b3 TH |
926 | /* find an empty slot */ |
927 | ret = -ENOSPC; | |
928 | for (i = 0; i < BLKCG_MAX_POLS; i++) | |
3c798398 | 929 | if (!blkcg_policy[i]) |
8bd435b3 TH |
930 | break; |
931 | if (i >= BLKCG_MAX_POLS) | |
932 | goto out_unlock; | |
035d10b2 | 933 | |
8bd435b3 | 934 | /* register and update blkgs */ |
3c798398 TH |
935 | pol->plid = i; |
936 | blkcg_policy[i] = pol; | |
8bd435b3 | 937 | |
8bd435b3 | 938 | /* everything is in place, add intf files for the new policy */ |
3c798398 TH |
939 | if (pol->cftypes) |
940 | WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes)); | |
8bd435b3 TH |
941 | ret = 0; |
942 | out_unlock: | |
bc0d6501 | 943 | mutex_unlock(&blkcg_pol_mutex); |
8bd435b3 | 944 | return ret; |
3e252066 | 945 | } |
3c798398 | 946 | EXPORT_SYMBOL_GPL(blkcg_policy_register); |
3e252066 | 947 | |
8bd435b3 | 948 | /** |
3c798398 TH |
949 | * blkcg_policy_unregister - unregister a blkcg policy |
950 | * @pol: blkcg policy to unregister | |
8bd435b3 | 951 | * |
3c798398 | 952 | * Undo blkcg_policy_register(@pol). Might sleep. |
8bd435b3 | 953 | */ |
3c798398 | 954 | void blkcg_policy_unregister(struct blkcg_policy *pol) |
3e252066 | 955 | { |
bc0d6501 TH |
956 | mutex_lock(&blkcg_pol_mutex); |
957 | ||
3c798398 | 958 | if (WARN_ON(blkcg_policy[pol->plid] != pol)) |
8bd435b3 TH |
959 | goto out_unlock; |
960 | ||
961 | /* kill the intf files first */ | |
3c798398 TH |
962 | if (pol->cftypes) |
963 | cgroup_rm_cftypes(&blkio_subsys, pol->cftypes); | |
44ea53de | 964 | |
8bd435b3 | 965 | /* unregister and update blkgs */ |
3c798398 | 966 | blkcg_policy[pol->plid] = NULL; |
8bd435b3 | 967 | out_unlock: |
bc0d6501 | 968 | mutex_unlock(&blkcg_pol_mutex); |
3e252066 | 969 | } |
3c798398 | 970 | EXPORT_SYMBOL_GPL(blkcg_policy_unregister); |