blkcg: consolidate blkg creation in blkcg_bio_issue_check()
[deliverable/linux.git] / include / linux / blk-cgroup.h
1 #ifndef _BLK_CGROUP_H
2 #define _BLK_CGROUP_H
3 /*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16 #include <linux/cgroup.h>
17 #include <linux/u64_stats_sync.h>
18 #include <linux/seq_file.h>
19 #include <linux/radix-tree.h>
20 #include <linux/blkdev.h>
21 #include <linux/atomic.h>
22
23 /* Max limits for throttle policy */
24 #define THROTL_IOPS_MAX UINT_MAX
25
26 #ifdef CONFIG_BLK_CGROUP
27
28 enum blkg_rwstat_type {
29 BLKG_RWSTAT_READ,
30 BLKG_RWSTAT_WRITE,
31 BLKG_RWSTAT_SYNC,
32 BLKG_RWSTAT_ASYNC,
33
34 BLKG_RWSTAT_NR,
35 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
36 };
37
38 struct blkcg_gq;
39
40 struct blkcg {
41 struct cgroup_subsys_state css;
42 spinlock_t lock;
43
44 struct radix_tree_root blkg_tree;
45 struct blkcg_gq *blkg_hint;
46 struct hlist_head blkg_list;
47
48 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
49
50 struct list_head all_blkcgs_node;
51 #ifdef CONFIG_CGROUP_WRITEBACK
52 struct list_head cgwb_list;
53 #endif
54 };
55
56 struct blkg_stat {
57 struct u64_stats_sync syncp;
58 uint64_t cnt;
59 };
60
61 struct blkg_rwstat {
62 struct u64_stats_sync syncp;
63 uint64_t cnt[BLKG_RWSTAT_NR];
64 };
65
66 /*
67 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
68 * request_queue (q). This is used by blkcg policies which need to track
69 * information per blkcg - q pair.
70 *
71 * There can be multiple active blkcg policies and each blkg:policy pair is
72 * represented by a blkg_policy_data which is allocated and freed by each
73 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
74 * area by allocating larger data structure which embeds blkg_policy_data
75 * at the beginning.
76 */
77 struct blkg_policy_data {
78 /* the blkg and policy id this per-policy data belongs to */
79 struct blkcg_gq *blkg;
80 int plid;
81 };
82
83 /*
84 * Policies that need to keep per-blkcg data which is independent from any
85 * request_queue associated to it should implement cpd_alloc/free_fn()
86 * methods. A policy can allocate private data area by allocating larger
87 * data structure which embeds blkcg_policy_data at the beginning.
88 * cpd_init() is invoked to let each policy handle per-blkcg data.
89 */
90 struct blkcg_policy_data {
91 /* the blkcg and policy id this per-policy data belongs to */
92 struct blkcg *blkcg;
93 int plid;
94 };
95
96 /* association between a blk cgroup and a request queue */
97 struct blkcg_gq {
98 /* Pointer to the associated request_queue */
99 struct request_queue *q;
100 struct list_head q_node;
101 struct hlist_node blkcg_node;
102 struct blkcg *blkcg;
103
104 /*
105 * Each blkg gets congested separately and the congestion state is
106 * propagated to the matching bdi_writeback_congested.
107 */
108 struct bdi_writeback_congested *wb_congested;
109
110 /* all non-root blkcg_gq's are guaranteed to have access to parent */
111 struct blkcg_gq *parent;
112
113 /* request allocation list for this blkcg-q pair */
114 struct request_list rl;
115
116 /* reference count */
117 atomic_t refcnt;
118
119 /* is this blkg online? protected by both blkcg and q locks */
120 bool online;
121
122 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
123
124 struct rcu_head rcu_head;
125 };
126
127 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
128 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
129 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
130 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
131 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
132 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
133 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
134 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
135 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
136
137 struct blkcg_policy {
138 int plid;
139 /* cgroup files for the policy */
140 struct cftype *cftypes;
141
142 /* operations */
143 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
144 blkcg_pol_init_cpd_fn *cpd_init_fn;
145 blkcg_pol_free_cpd_fn *cpd_free_fn;
146
147 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
148 blkcg_pol_init_pd_fn *pd_init_fn;
149 blkcg_pol_online_pd_fn *pd_online_fn;
150 blkcg_pol_offline_pd_fn *pd_offline_fn;
151 blkcg_pol_free_pd_fn *pd_free_fn;
152 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
153 };
154
155 extern struct blkcg blkcg_root;
156 extern struct cgroup_subsys_state * const blkcg_root_css;
157
158 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
159 struct request_queue *q, bool update_hint);
160 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
161 struct request_queue *q);
162 int blkcg_init_queue(struct request_queue *q);
163 void blkcg_drain_queue(struct request_queue *q);
164 void blkcg_exit_queue(struct request_queue *q);
165
166 /* Blkio controller policy registration */
167 int blkcg_policy_register(struct blkcg_policy *pol);
168 void blkcg_policy_unregister(struct blkcg_policy *pol);
169 int blkcg_activate_policy(struct request_queue *q,
170 const struct blkcg_policy *pol);
171 void blkcg_deactivate_policy(struct request_queue *q,
172 const struct blkcg_policy *pol);
173
174 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
175 u64 (*prfill)(struct seq_file *,
176 struct blkg_policy_data *, int),
177 const struct blkcg_policy *pol, int data,
178 bool show_total);
179 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
180 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
181 const struct blkg_rwstat *rwstat);
182 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
183 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
184 int off);
185
186 u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
187 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
188 int off);
189
190 struct blkg_conf_ctx {
191 struct gendisk *disk;
192 struct blkcg_gq *blkg;
193 u64 v;
194 };
195
196 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
197 const char *input, struct blkg_conf_ctx *ctx);
198 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
199
200
201 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
202 {
203 return css ? container_of(css, struct blkcg, css) : NULL;
204 }
205
206 static inline struct blkcg *task_blkcg(struct task_struct *tsk)
207 {
208 return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
209 }
210
211 static inline struct blkcg *bio_blkcg(struct bio *bio)
212 {
213 if (bio && bio->bi_css)
214 return css_to_blkcg(bio->bi_css);
215 return task_blkcg(current);
216 }
217
218 static inline struct cgroup_subsys_state *
219 task_get_blkcg_css(struct task_struct *task)
220 {
221 return task_get_css(task, blkio_cgrp_id);
222 }
223
224 /**
225 * blkcg_parent - get the parent of a blkcg
226 * @blkcg: blkcg of interest
227 *
228 * Return the parent blkcg of @blkcg. Can be called anytime.
229 */
230 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
231 {
232 return css_to_blkcg(blkcg->css.parent);
233 }
234
235 /**
236 * __blkg_lookup - internal version of blkg_lookup()
237 * @blkcg: blkcg of interest
238 * @q: request_queue of interest
239 * @update_hint: whether to update lookup hint with the result or not
240 *
241 * This is internal version and shouldn't be used by policy
242 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
243 * @q's bypass state. If @update_hint is %true, the caller should be
244 * holding @q->queue_lock and lookup hint is updated on success.
245 */
246 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
247 struct request_queue *q,
248 bool update_hint)
249 {
250 struct blkcg_gq *blkg;
251
252 if (blkcg == &blkcg_root)
253 return q->root_blkg;
254
255 blkg = rcu_dereference(blkcg->blkg_hint);
256 if (blkg && blkg->q == q)
257 return blkg;
258
259 return blkg_lookup_slowpath(blkcg, q, update_hint);
260 }
261
262 /**
263 * blkg_lookup - lookup blkg for the specified blkcg - q pair
264 * @blkcg: blkcg of interest
265 * @q: request_queue of interest
266 *
267 * Lookup blkg for the @blkcg - @q pair. This function should be called
268 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
269 * - see blk_queue_bypass_start() for details.
270 */
271 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
272 struct request_queue *q)
273 {
274 WARN_ON_ONCE(!rcu_read_lock_held());
275
276 if (unlikely(blk_queue_bypass(q)))
277 return NULL;
278 return __blkg_lookup(blkcg, q, false);
279 }
280
281 /**
282 * blkg_to_pdata - get policy private data
283 * @blkg: blkg of interest
284 * @pol: policy of interest
285 *
286 * Return pointer to private data associated with the @blkg-@pol pair.
287 */
288 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
289 struct blkcg_policy *pol)
290 {
291 return blkg ? blkg->pd[pol->plid] : NULL;
292 }
293
294 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
295 struct blkcg_policy *pol)
296 {
297 return blkcg ? blkcg->cpd[pol->plid] : NULL;
298 }
299
300 /**
301 * pdata_to_blkg - get blkg associated with policy private data
302 * @pd: policy private data of interest
303 *
304 * @pd is policy private data. Determine the blkg it's associated with.
305 */
306 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
307 {
308 return pd ? pd->blkg : NULL;
309 }
310
311 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
312 {
313 return cpd ? cpd->blkcg : NULL;
314 }
315
316 /**
317 * blkg_path - format cgroup path of blkg
318 * @blkg: blkg of interest
319 * @buf: target buffer
320 * @buflen: target buffer length
321 *
322 * Format the path of the cgroup of @blkg into @buf.
323 */
324 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
325 {
326 char *p;
327
328 p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
329 if (!p) {
330 strncpy(buf, "<unavailable>", buflen);
331 return -ENAMETOOLONG;
332 }
333
334 memmove(buf, p, buf + buflen - p);
335 return 0;
336 }
337
338 /**
339 * blkg_get - get a blkg reference
340 * @blkg: blkg to get
341 *
342 * The caller should be holding an existing reference.
343 */
344 static inline void blkg_get(struct blkcg_gq *blkg)
345 {
346 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
347 atomic_inc(&blkg->refcnt);
348 }
349
350 void __blkg_release_rcu(struct rcu_head *rcu);
351
352 /**
353 * blkg_put - put a blkg reference
354 * @blkg: blkg to put
355 */
356 static inline void blkg_put(struct blkcg_gq *blkg)
357 {
358 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
359 if (atomic_dec_and_test(&blkg->refcnt))
360 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
361 }
362
363 /**
364 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
365 * @d_blkg: loop cursor pointing to the current descendant
366 * @pos_css: used for iteration
367 * @p_blkg: target blkg to walk descendants of
368 *
369 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
370 * read locked. If called under either blkcg or queue lock, the iteration
371 * is guaranteed to include all and only online blkgs. The caller may
372 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
373 * @p_blkg is included in the iteration and the first node to be visited.
374 */
375 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
376 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
377 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
378 (p_blkg)->q, false)))
379
380 /**
381 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
382 * @d_blkg: loop cursor pointing to the current descendant
383 * @pos_css: used for iteration
384 * @p_blkg: target blkg to walk descendants of
385 *
386 * Similar to blkg_for_each_descendant_pre() but performs post-order
387 * traversal instead. Synchronization rules are the same. @p_blkg is
388 * included in the iteration and the last node to be visited.
389 */
390 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
391 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
392 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
393 (p_blkg)->q, false)))
394
395 /**
396 * blk_get_rl - get request_list to use
397 * @q: request_queue of interest
398 * @bio: bio which will be attached to the allocated request (may be %NULL)
399 *
400 * The caller wants to allocate a request from @q to use for @bio. Find
401 * the request_list to use and obtain a reference on it. Should be called
402 * under queue_lock. This function is guaranteed to return non-%NULL
403 * request_list.
404 */
405 static inline struct request_list *blk_get_rl(struct request_queue *q,
406 struct bio *bio)
407 {
408 struct blkcg *blkcg;
409 struct blkcg_gq *blkg;
410
411 rcu_read_lock();
412
413 blkcg = bio_blkcg(bio);
414
415 /* bypass blkg lookup and use @q->root_rl directly for root */
416 if (blkcg == &blkcg_root)
417 goto root_rl;
418
419 /*
420 * Try to use blkg->rl. blkg lookup may fail under memory pressure
421 * or if either the blkcg or queue is going away. Fall back to
422 * root_rl in such cases.
423 */
424 blkg = blkg_lookup(blkcg, q);
425 if (unlikely(!blkg))
426 goto root_rl;
427
428 blkg_get(blkg);
429 rcu_read_unlock();
430 return &blkg->rl;
431 root_rl:
432 rcu_read_unlock();
433 return &q->root_rl;
434 }
435
436 /**
437 * blk_put_rl - put request_list
438 * @rl: request_list to put
439 *
440 * Put the reference acquired by blk_get_rl(). Should be called under
441 * queue_lock.
442 */
443 static inline void blk_put_rl(struct request_list *rl)
444 {
445 if (rl->blkg->blkcg != &blkcg_root)
446 blkg_put(rl->blkg);
447 }
448
449 /**
450 * blk_rq_set_rl - associate a request with a request_list
451 * @rq: request of interest
452 * @rl: target request_list
453 *
454 * Associate @rq with @rl so that accounting and freeing can know the
455 * request_list @rq came from.
456 */
457 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
458 {
459 rq->rl = rl;
460 }
461
462 /**
463 * blk_rq_rl - return the request_list a request came from
464 * @rq: request of interest
465 *
466 * Return the request_list @rq is allocated from.
467 */
468 static inline struct request_list *blk_rq_rl(struct request *rq)
469 {
470 return rq->rl;
471 }
472
473 struct request_list *__blk_queue_next_rl(struct request_list *rl,
474 struct request_queue *q);
475 /**
476 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
477 *
478 * Should be used under queue_lock.
479 */
480 #define blk_queue_for_each_rl(rl, q) \
481 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
482
483 static inline void blkg_stat_init(struct blkg_stat *stat)
484 {
485 u64_stats_init(&stat->syncp);
486 }
487
488 /**
489 * blkg_stat_add - add a value to a blkg_stat
490 * @stat: target blkg_stat
491 * @val: value to add
492 *
493 * Add @val to @stat. The caller is responsible for synchronizing calls to
494 * this function.
495 */
496 static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
497 {
498 u64_stats_update_begin(&stat->syncp);
499 stat->cnt += val;
500 u64_stats_update_end(&stat->syncp);
501 }
502
503 /**
504 * blkg_stat_read - read the current value of a blkg_stat
505 * @stat: blkg_stat to read
506 *
507 * Read the current value of @stat. This function can be called without
508 * synchroniztion and takes care of u64 atomicity.
509 */
510 static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
511 {
512 unsigned int start;
513 uint64_t v;
514
515 do {
516 start = u64_stats_fetch_begin_irq(&stat->syncp);
517 v = stat->cnt;
518 } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
519
520 return v;
521 }
522
523 /**
524 * blkg_stat_reset - reset a blkg_stat
525 * @stat: blkg_stat to reset
526 */
527 static inline void blkg_stat_reset(struct blkg_stat *stat)
528 {
529 stat->cnt = 0;
530 }
531
532 /**
533 * blkg_stat_merge - merge a blkg_stat into another
534 * @to: the destination blkg_stat
535 * @from: the source
536 *
537 * Add @from's count to @to.
538 */
539 static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
540 {
541 blkg_stat_add(to, blkg_stat_read(from));
542 }
543
544 static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
545 {
546 u64_stats_init(&rwstat->syncp);
547 }
548
549 /**
550 * blkg_rwstat_add - add a value to a blkg_rwstat
551 * @rwstat: target blkg_rwstat
552 * @rw: mask of REQ_{WRITE|SYNC}
553 * @val: value to add
554 *
555 * Add @val to @rwstat. The counters are chosen according to @rw. The
556 * caller is responsible for synchronizing calls to this function.
557 */
558 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
559 int rw, uint64_t val)
560 {
561 u64_stats_update_begin(&rwstat->syncp);
562
563 if (rw & REQ_WRITE)
564 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
565 else
566 rwstat->cnt[BLKG_RWSTAT_READ] += val;
567 if (rw & REQ_SYNC)
568 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
569 else
570 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
571
572 u64_stats_update_end(&rwstat->syncp);
573 }
574
575 /**
576 * blkg_rwstat_read - read the current values of a blkg_rwstat
577 * @rwstat: blkg_rwstat to read
578 *
579 * Read the current snapshot of @rwstat and return it as the return value.
580 * This function can be called without synchronization and takes care of
581 * u64 atomicity.
582 */
583 static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
584 {
585 unsigned int start;
586 struct blkg_rwstat tmp;
587
588 do {
589 start = u64_stats_fetch_begin_irq(&rwstat->syncp);
590 tmp = *rwstat;
591 } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
592
593 return tmp;
594 }
595
596 /**
597 * blkg_rwstat_total - read the total count of a blkg_rwstat
598 * @rwstat: blkg_rwstat to read
599 *
600 * Return the total count of @rwstat regardless of the IO direction. This
601 * function can be called without synchronization and takes care of u64
602 * atomicity.
603 */
604 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
605 {
606 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
607
608 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
609 }
610
611 /**
612 * blkg_rwstat_reset - reset a blkg_rwstat
613 * @rwstat: blkg_rwstat to reset
614 */
615 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
616 {
617 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
618 }
619
620 /**
621 * blkg_rwstat_merge - merge a blkg_rwstat into another
622 * @to: the destination blkg_rwstat
623 * @from: the source
624 *
625 * Add @from's counts to @to.
626 */
627 static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
628 struct blkg_rwstat *from)
629 {
630 struct blkg_rwstat v = blkg_rwstat_read(from);
631 int i;
632
633 u64_stats_update_begin(&to->syncp);
634 for (i = 0; i < BLKG_RWSTAT_NR; i++)
635 to->cnt[i] += v.cnt[i];
636 u64_stats_update_end(&to->syncp);
637 }
638
639 #ifdef CONFIG_BLK_DEV_THROTTLING
640 extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
641 struct bio *bio);
642 #else
643 static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
644 struct bio *bio) { return false; }
645 #endif
646
647 static inline bool blkcg_bio_issue_check(struct request_queue *q,
648 struct bio *bio)
649 {
650 struct blkcg *blkcg;
651 struct blkcg_gq *blkg;
652 bool throtl = false;
653
654 rcu_read_lock();
655 blkcg = bio_blkcg(bio);
656
657 blkg = blkg_lookup(blkcg, q);
658 if (unlikely(!blkg)) {
659 spin_lock_irq(q->queue_lock);
660 blkg = blkg_lookup_create(blkcg, q);
661 if (IS_ERR(blkg))
662 blkg = NULL;
663 spin_unlock_irq(q->queue_lock);
664 }
665
666 throtl = blk_throtl_bio(q, blkg, bio);
667
668 rcu_read_unlock();
669 return !throtl;
670 }
671
672 #else /* CONFIG_BLK_CGROUP */
673
674 struct blkcg {
675 };
676
677 struct blkg_policy_data {
678 };
679
680 struct blkcg_policy_data {
681 };
682
683 struct blkcg_gq {
684 };
685
686 struct blkcg_policy {
687 };
688
689 #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
690
691 static inline struct cgroup_subsys_state *
692 task_get_blkcg_css(struct task_struct *task)
693 {
694 return NULL;
695 }
696
697 #ifdef CONFIG_BLOCK
698
699 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
700 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
701 static inline void blkcg_drain_queue(struct request_queue *q) { }
702 static inline void blkcg_exit_queue(struct request_queue *q) { }
703 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
704 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
705 static inline int blkcg_activate_policy(struct request_queue *q,
706 const struct blkcg_policy *pol) { return 0; }
707 static inline void blkcg_deactivate_policy(struct request_queue *q,
708 const struct blkcg_policy *pol) { }
709
710 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
711
712 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
713 struct blkcg_policy *pol) { return NULL; }
714 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
715 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
716 static inline void blkg_get(struct blkcg_gq *blkg) { }
717 static inline void blkg_put(struct blkcg_gq *blkg) { }
718
719 static inline struct request_list *blk_get_rl(struct request_queue *q,
720 struct bio *bio) { return &q->root_rl; }
721 static inline void blk_put_rl(struct request_list *rl) { }
722 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
723 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
724
725 static inline bool blkcg_bio_issue_check(struct request_queue *q,
726 struct bio *bio) { return true; }
727
728 #define blk_queue_for_each_rl(rl, q) \
729 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
730
731 #endif /* CONFIG_BLOCK */
732 #endif /* CONFIG_BLK_CGROUP */
733 #endif /* _BLK_CGROUP_H */
This page took 0.046234 seconds and 5 git commands to generate.