4 * Common Block IO controller cgroup interface
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
16 #include <linux/cgroup.h>
17 #include <linux/u64_stats_sync.h>
18 #include <linux/seq_file.h>
19 #include <linux/radix-tree.h>
20 #include <linux/blkdev.h>
21 #include <linux/atomic.h>
23 /* Max limits for throttle policy */
24 #define THROTL_IOPS_MAX UINT_MAX
26 #ifdef CONFIG_BLK_CGROUP
28 enum blkg_rwstat_type
{
35 BLKG_RWSTAT_TOTAL
= BLKG_RWSTAT_NR
,
41 struct cgroup_subsys_state css
;
44 struct radix_tree_root blkg_tree
;
45 struct blkcg_gq
*blkg_hint
;
46 struct hlist_head blkg_list
;
48 struct blkcg_policy_data
*cpd
[BLKCG_MAX_POLS
];
50 struct list_head all_blkcgs_node
;
51 #ifdef CONFIG_CGROUP_WRITEBACK
52 struct list_head cgwb_list
;
57 struct u64_stats_sync syncp
;
62 struct u64_stats_sync syncp
;
63 uint64_t cnt
[BLKG_RWSTAT_NR
];
67 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
68 * request_queue (q). This is used by blkcg policies which need to track
69 * information per blkcg - q pair.
71 * There can be multiple active blkcg policies and each blkg:policy pair is
72 * represented by a blkg_policy_data which is allocated and freed by each
73 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
74 * area by allocating larger data structure which embeds blkg_policy_data
77 struct blkg_policy_data
{
78 /* the blkg and policy id this per-policy data belongs to */
79 struct blkcg_gq
*blkg
;
84 * Policies that need to keep per-blkcg data which is independent from any
85 * request_queue associated to it should implement cpd_alloc/free_fn()
86 * methods. A policy can allocate private data area by allocating larger
87 * data structure which embeds blkcg_policy_data at the beginning.
88 * cpd_init() is invoked to let each policy handle per-blkcg data.
90 struct blkcg_policy_data
{
91 /* the blkcg and policy id this per-policy data belongs to */
96 /* association between a blk cgroup and a request queue */
98 /* Pointer to the associated request_queue */
99 struct request_queue
*q
;
100 struct list_head q_node
;
101 struct hlist_node blkcg_node
;
105 * Each blkg gets congested separately and the congestion state is
106 * propagated to the matching bdi_writeback_congested.
108 struct bdi_writeback_congested
*wb_congested
;
110 /* all non-root blkcg_gq's are guaranteed to have access to parent */
111 struct blkcg_gq
*parent
;
113 /* request allocation list for this blkcg-q pair */
114 struct request_list rl
;
116 /* reference count */
119 /* is this blkg online? protected by both blkcg and q locks */
122 struct blkg_policy_data
*pd
[BLKCG_MAX_POLS
];
124 struct rcu_head rcu_head
;
127 typedef struct blkcg_policy_data
*(blkcg_pol_alloc_cpd_fn
)(gfp_t gfp
);
128 typedef void (blkcg_pol_init_cpd_fn
)(struct blkcg_policy_data
*cpd
);
129 typedef void (blkcg_pol_free_cpd_fn
)(struct blkcg_policy_data
*cpd
);
130 typedef struct blkg_policy_data
*(blkcg_pol_alloc_pd_fn
)(gfp_t gfp
, int node
);
131 typedef void (blkcg_pol_init_pd_fn
)(struct blkg_policy_data
*pd
);
132 typedef void (blkcg_pol_online_pd_fn
)(struct blkg_policy_data
*pd
);
133 typedef void (blkcg_pol_offline_pd_fn
)(struct blkg_policy_data
*pd
);
134 typedef void (blkcg_pol_free_pd_fn
)(struct blkg_policy_data
*pd
);
135 typedef void (blkcg_pol_reset_pd_stats_fn
)(struct blkg_policy_data
*pd
);
137 struct blkcg_policy
{
139 /* cgroup files for the policy */
140 struct cftype
*cftypes
;
143 blkcg_pol_alloc_cpd_fn
*cpd_alloc_fn
;
144 blkcg_pol_init_cpd_fn
*cpd_init_fn
;
145 blkcg_pol_free_cpd_fn
*cpd_free_fn
;
147 blkcg_pol_alloc_pd_fn
*pd_alloc_fn
;
148 blkcg_pol_init_pd_fn
*pd_init_fn
;
149 blkcg_pol_online_pd_fn
*pd_online_fn
;
150 blkcg_pol_offline_pd_fn
*pd_offline_fn
;
151 blkcg_pol_free_pd_fn
*pd_free_fn
;
152 blkcg_pol_reset_pd_stats_fn
*pd_reset_stats_fn
;
155 extern struct blkcg blkcg_root
;
156 extern struct cgroup_subsys_state
* const blkcg_root_css
;
158 struct blkcg_gq
*blkg_lookup_slowpath(struct blkcg
*blkcg
,
159 struct request_queue
*q
, bool update_hint
);
160 struct blkcg_gq
*blkg_lookup_create(struct blkcg
*blkcg
,
161 struct request_queue
*q
);
162 int blkcg_init_queue(struct request_queue
*q
);
163 void blkcg_drain_queue(struct request_queue
*q
);
164 void blkcg_exit_queue(struct request_queue
*q
);
166 /* Blkio controller policy registration */
167 int blkcg_policy_register(struct blkcg_policy
*pol
);
168 void blkcg_policy_unregister(struct blkcg_policy
*pol
);
169 int blkcg_activate_policy(struct request_queue
*q
,
170 const struct blkcg_policy
*pol
);
171 void blkcg_deactivate_policy(struct request_queue
*q
,
172 const struct blkcg_policy
*pol
);
174 void blkcg_print_blkgs(struct seq_file
*sf
, struct blkcg
*blkcg
,
175 u64 (*prfill
)(struct seq_file
*,
176 struct blkg_policy_data
*, int),
177 const struct blkcg_policy
*pol
, int data
,
179 u64
__blkg_prfill_u64(struct seq_file
*sf
, struct blkg_policy_data
*pd
, u64 v
);
180 u64
__blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
181 const struct blkg_rwstat
*rwstat
);
182 u64
blkg_prfill_stat(struct seq_file
*sf
, struct blkg_policy_data
*pd
, int off
);
183 u64
blkg_prfill_rwstat(struct seq_file
*sf
, struct blkg_policy_data
*pd
,
186 u64
blkg_stat_recursive_sum(struct blkg_policy_data
*pd
, int off
);
187 struct blkg_rwstat
blkg_rwstat_recursive_sum(struct blkg_policy_data
*pd
,
190 struct blkg_conf_ctx
{
191 struct gendisk
*disk
;
192 struct blkcg_gq
*blkg
;
196 int blkg_conf_prep(struct blkcg
*blkcg
, const struct blkcg_policy
*pol
,
197 const char *input
, struct blkg_conf_ctx
*ctx
);
198 void blkg_conf_finish(struct blkg_conf_ctx
*ctx
);
201 static inline struct blkcg
*css_to_blkcg(struct cgroup_subsys_state
*css
)
203 return css
? container_of(css
, struct blkcg
, css
) : NULL
;
206 static inline struct blkcg
*task_blkcg(struct task_struct
*tsk
)
208 return css_to_blkcg(task_css(tsk
, blkio_cgrp_id
));
211 static inline struct blkcg
*bio_blkcg(struct bio
*bio
)
213 if (bio
&& bio
->bi_css
)
214 return css_to_blkcg(bio
->bi_css
);
215 return task_blkcg(current
);
218 static inline struct cgroup_subsys_state
*
219 task_get_blkcg_css(struct task_struct
*task
)
221 return task_get_css(task
, blkio_cgrp_id
);
225 * blkcg_parent - get the parent of a blkcg
226 * @blkcg: blkcg of interest
228 * Return the parent blkcg of @blkcg. Can be called anytime.
230 static inline struct blkcg
*blkcg_parent(struct blkcg
*blkcg
)
232 return css_to_blkcg(blkcg
->css
.parent
);
236 * __blkg_lookup - internal version of blkg_lookup()
237 * @blkcg: blkcg of interest
238 * @q: request_queue of interest
239 * @update_hint: whether to update lookup hint with the result or not
241 * This is internal version and shouldn't be used by policy
242 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
243 * @q's bypass state. If @update_hint is %true, the caller should be
244 * holding @q->queue_lock and lookup hint is updated on success.
246 static inline struct blkcg_gq
*__blkg_lookup(struct blkcg
*blkcg
,
247 struct request_queue
*q
,
250 struct blkcg_gq
*blkg
;
252 if (blkcg
== &blkcg_root
)
255 blkg
= rcu_dereference(blkcg
->blkg_hint
);
256 if (blkg
&& blkg
->q
== q
)
259 return blkg_lookup_slowpath(blkcg
, q
, update_hint
);
263 * blkg_lookup - lookup blkg for the specified blkcg - q pair
264 * @blkcg: blkcg of interest
265 * @q: request_queue of interest
267 * Lookup blkg for the @blkcg - @q pair. This function should be called
268 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
269 * - see blk_queue_bypass_start() for details.
271 static inline struct blkcg_gq
*blkg_lookup(struct blkcg
*blkcg
,
272 struct request_queue
*q
)
274 WARN_ON_ONCE(!rcu_read_lock_held());
276 if (unlikely(blk_queue_bypass(q
)))
278 return __blkg_lookup(blkcg
, q
, false);
282 * blkg_to_pdata - get policy private data
283 * @blkg: blkg of interest
284 * @pol: policy of interest
286 * Return pointer to private data associated with the @blkg-@pol pair.
288 static inline struct blkg_policy_data
*blkg_to_pd(struct blkcg_gq
*blkg
,
289 struct blkcg_policy
*pol
)
291 return blkg
? blkg
->pd
[pol
->plid
] : NULL
;
294 static inline struct blkcg_policy_data
*blkcg_to_cpd(struct blkcg
*blkcg
,
295 struct blkcg_policy
*pol
)
297 return blkcg
? blkcg
->cpd
[pol
->plid
] : NULL
;
301 * pdata_to_blkg - get blkg associated with policy private data
302 * @pd: policy private data of interest
304 * @pd is policy private data. Determine the blkg it's associated with.
306 static inline struct blkcg_gq
*pd_to_blkg(struct blkg_policy_data
*pd
)
308 return pd
? pd
->blkg
: NULL
;
311 static inline struct blkcg
*cpd_to_blkcg(struct blkcg_policy_data
*cpd
)
313 return cpd
? cpd
->blkcg
: NULL
;
317 * blkg_path - format cgroup path of blkg
318 * @blkg: blkg of interest
319 * @buf: target buffer
320 * @buflen: target buffer length
322 * Format the path of the cgroup of @blkg into @buf.
324 static inline int blkg_path(struct blkcg_gq
*blkg
, char *buf
, int buflen
)
328 p
= cgroup_path(blkg
->blkcg
->css
.cgroup
, buf
, buflen
);
330 strncpy(buf
, "<unavailable>", buflen
);
331 return -ENAMETOOLONG
;
334 memmove(buf
, p
, buf
+ buflen
- p
);
339 * blkg_get - get a blkg reference
342 * The caller should be holding an existing reference.
344 static inline void blkg_get(struct blkcg_gq
*blkg
)
346 WARN_ON_ONCE(atomic_read(&blkg
->refcnt
) <= 0);
347 atomic_inc(&blkg
->refcnt
);
350 void __blkg_release_rcu(struct rcu_head
*rcu
);
353 * blkg_put - put a blkg reference
356 static inline void blkg_put(struct blkcg_gq
*blkg
)
358 WARN_ON_ONCE(atomic_read(&blkg
->refcnt
) <= 0);
359 if (atomic_dec_and_test(&blkg
->refcnt
))
360 call_rcu(&blkg
->rcu_head
, __blkg_release_rcu
);
364 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
365 * @d_blkg: loop cursor pointing to the current descendant
366 * @pos_css: used for iteration
367 * @p_blkg: target blkg to walk descendants of
369 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
370 * read locked. If called under either blkcg or queue lock, the iteration
371 * is guaranteed to include all and only online blkgs. The caller may
372 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
373 * @p_blkg is included in the iteration and the first node to be visited.
375 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
376 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
377 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
378 (p_blkg)->q, false)))
381 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
382 * @d_blkg: loop cursor pointing to the current descendant
383 * @pos_css: used for iteration
384 * @p_blkg: target blkg to walk descendants of
386 * Similar to blkg_for_each_descendant_pre() but performs post-order
387 * traversal instead. Synchronization rules are the same. @p_blkg is
388 * included in the iteration and the last node to be visited.
390 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
391 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
392 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
393 (p_blkg)->q, false)))
396 * blk_get_rl - get request_list to use
397 * @q: request_queue of interest
398 * @bio: bio which will be attached to the allocated request (may be %NULL)
400 * The caller wants to allocate a request from @q to use for @bio. Find
401 * the request_list to use and obtain a reference on it. Should be called
402 * under queue_lock. This function is guaranteed to return non-%NULL
405 static inline struct request_list
*blk_get_rl(struct request_queue
*q
,
409 struct blkcg_gq
*blkg
;
413 blkcg
= bio_blkcg(bio
);
415 /* bypass blkg lookup and use @q->root_rl directly for root */
416 if (blkcg
== &blkcg_root
)
420 * Try to use blkg->rl. blkg lookup may fail under memory pressure
421 * or if either the blkcg or queue is going away. Fall back to
422 * root_rl in such cases.
424 blkg
= blkg_lookup(blkcg
, q
);
437 * blk_put_rl - put request_list
438 * @rl: request_list to put
440 * Put the reference acquired by blk_get_rl(). Should be called under
443 static inline void blk_put_rl(struct request_list
*rl
)
445 if (rl
->blkg
->blkcg
!= &blkcg_root
)
450 * blk_rq_set_rl - associate a request with a request_list
451 * @rq: request of interest
452 * @rl: target request_list
454 * Associate @rq with @rl so that accounting and freeing can know the
455 * request_list @rq came from.
457 static inline void blk_rq_set_rl(struct request
*rq
, struct request_list
*rl
)
463 * blk_rq_rl - return the request_list a request came from
464 * @rq: request of interest
466 * Return the request_list @rq is allocated from.
468 static inline struct request_list
*blk_rq_rl(struct request
*rq
)
473 struct request_list
*__blk_queue_next_rl(struct request_list
*rl
,
474 struct request_queue
*q
);
476 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
478 * Should be used under queue_lock.
480 #define blk_queue_for_each_rl(rl, q) \
481 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
483 static inline void blkg_stat_init(struct blkg_stat
*stat
)
485 u64_stats_init(&stat
->syncp
);
489 * blkg_stat_add - add a value to a blkg_stat
490 * @stat: target blkg_stat
493 * Add @val to @stat. The caller is responsible for synchronizing calls to
496 static inline void blkg_stat_add(struct blkg_stat
*stat
, uint64_t val
)
498 u64_stats_update_begin(&stat
->syncp
);
500 u64_stats_update_end(&stat
->syncp
);
504 * blkg_stat_read - read the current value of a blkg_stat
505 * @stat: blkg_stat to read
507 * Read the current value of @stat. This function can be called without
508 * synchroniztion and takes care of u64 atomicity.
510 static inline uint64_t blkg_stat_read(struct blkg_stat
*stat
)
516 start
= u64_stats_fetch_begin_irq(&stat
->syncp
);
518 } while (u64_stats_fetch_retry_irq(&stat
->syncp
, start
));
524 * blkg_stat_reset - reset a blkg_stat
525 * @stat: blkg_stat to reset
527 static inline void blkg_stat_reset(struct blkg_stat
*stat
)
533 * blkg_stat_merge - merge a blkg_stat into another
534 * @to: the destination blkg_stat
537 * Add @from's count to @to.
539 static inline void blkg_stat_merge(struct blkg_stat
*to
, struct blkg_stat
*from
)
541 blkg_stat_add(to
, blkg_stat_read(from
));
544 static inline void blkg_rwstat_init(struct blkg_rwstat
*rwstat
)
546 u64_stats_init(&rwstat
->syncp
);
550 * blkg_rwstat_add - add a value to a blkg_rwstat
551 * @rwstat: target blkg_rwstat
552 * @rw: mask of REQ_{WRITE|SYNC}
555 * Add @val to @rwstat. The counters are chosen according to @rw. The
556 * caller is responsible for synchronizing calls to this function.
558 static inline void blkg_rwstat_add(struct blkg_rwstat
*rwstat
,
559 int rw
, uint64_t val
)
561 u64_stats_update_begin(&rwstat
->syncp
);
564 rwstat
->cnt
[BLKG_RWSTAT_WRITE
] += val
;
566 rwstat
->cnt
[BLKG_RWSTAT_READ
] += val
;
568 rwstat
->cnt
[BLKG_RWSTAT_SYNC
] += val
;
570 rwstat
->cnt
[BLKG_RWSTAT_ASYNC
] += val
;
572 u64_stats_update_end(&rwstat
->syncp
);
576 * blkg_rwstat_read - read the current values of a blkg_rwstat
577 * @rwstat: blkg_rwstat to read
579 * Read the current snapshot of @rwstat and return it as the return value.
580 * This function can be called without synchronization and takes care of
583 static inline struct blkg_rwstat
blkg_rwstat_read(struct blkg_rwstat
*rwstat
)
586 struct blkg_rwstat tmp
;
589 start
= u64_stats_fetch_begin_irq(&rwstat
->syncp
);
591 } while (u64_stats_fetch_retry_irq(&rwstat
->syncp
, start
));
597 * blkg_rwstat_total - read the total count of a blkg_rwstat
598 * @rwstat: blkg_rwstat to read
600 * Return the total count of @rwstat regardless of the IO direction. This
601 * function can be called without synchronization and takes care of u64
604 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat
*rwstat
)
606 struct blkg_rwstat tmp
= blkg_rwstat_read(rwstat
);
608 return tmp
.cnt
[BLKG_RWSTAT_READ
] + tmp
.cnt
[BLKG_RWSTAT_WRITE
];
612 * blkg_rwstat_reset - reset a blkg_rwstat
613 * @rwstat: blkg_rwstat to reset
615 static inline void blkg_rwstat_reset(struct blkg_rwstat
*rwstat
)
617 memset(rwstat
->cnt
, 0, sizeof(rwstat
->cnt
));
621 * blkg_rwstat_merge - merge a blkg_rwstat into another
622 * @to: the destination blkg_rwstat
625 * Add @from's counts to @to.
627 static inline void blkg_rwstat_merge(struct blkg_rwstat
*to
,
628 struct blkg_rwstat
*from
)
630 struct blkg_rwstat v
= blkg_rwstat_read(from
);
633 u64_stats_update_begin(&to
->syncp
);
634 for (i
= 0; i
< BLKG_RWSTAT_NR
; i
++)
635 to
->cnt
[i
] += v
.cnt
[i
];
636 u64_stats_update_end(&to
->syncp
);
639 #ifdef CONFIG_BLK_DEV_THROTTLING
640 extern bool blk_throtl_bio(struct request_queue
*q
, struct blkcg_gq
*blkg
,
643 static inline bool blk_throtl_bio(struct request_queue
*q
, struct blkcg_gq
*blkg
,
644 struct bio
*bio
) { return false; }
647 static inline bool blkcg_bio_issue_check(struct request_queue
*q
,
651 struct blkcg_gq
*blkg
;
655 blkcg
= bio_blkcg(bio
);
657 blkg
= blkg_lookup(blkcg
, q
);
658 if (unlikely(!blkg
)) {
659 spin_lock_irq(q
->queue_lock
);
660 blkg
= blkg_lookup_create(blkcg
, q
);
663 spin_unlock_irq(q
->queue_lock
);
666 throtl
= blk_throtl_bio(q
, blkg
, bio
);
672 #else /* CONFIG_BLK_CGROUP */
677 struct blkg_policy_data
{
680 struct blkcg_policy_data
{
686 struct blkcg_policy
{
689 #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
691 static inline struct cgroup_subsys_state
*
692 task_get_blkcg_css(struct task_struct
*task
)
699 static inline struct blkcg_gq
*blkg_lookup(struct blkcg
*blkcg
, void *key
) { return NULL
; }
700 static inline int blkcg_init_queue(struct request_queue
*q
) { return 0; }
701 static inline void blkcg_drain_queue(struct request_queue
*q
) { }
702 static inline void blkcg_exit_queue(struct request_queue
*q
) { }
703 static inline int blkcg_policy_register(struct blkcg_policy
*pol
) { return 0; }
704 static inline void blkcg_policy_unregister(struct blkcg_policy
*pol
) { }
705 static inline int blkcg_activate_policy(struct request_queue
*q
,
706 const struct blkcg_policy
*pol
) { return 0; }
707 static inline void blkcg_deactivate_policy(struct request_queue
*q
,
708 const struct blkcg_policy
*pol
) { }
710 static inline struct blkcg
*bio_blkcg(struct bio
*bio
) { return NULL
; }
712 static inline struct blkg_policy_data
*blkg_to_pd(struct blkcg_gq
*blkg
,
713 struct blkcg_policy
*pol
) { return NULL
; }
714 static inline struct blkcg_gq
*pd_to_blkg(struct blkg_policy_data
*pd
) { return NULL
; }
715 static inline char *blkg_path(struct blkcg_gq
*blkg
) { return NULL
; }
716 static inline void blkg_get(struct blkcg_gq
*blkg
) { }
717 static inline void blkg_put(struct blkcg_gq
*blkg
) { }
719 static inline struct request_list
*blk_get_rl(struct request_queue
*q
,
720 struct bio
*bio
) { return &q
->root_rl
; }
721 static inline void blk_put_rl(struct request_list
*rl
) { }
722 static inline void blk_rq_set_rl(struct request
*rq
, struct request_list
*rl
) { }
723 static inline struct request_list
*blk_rq_rl(struct request
*rq
) { return &rq
->q
->root_rl
; }
725 static inline bool blkcg_bio_issue_check(struct request_queue
*q
,
726 struct bio
*bio
) { return true; }
728 #define blk_queue_for_each_rl(rl, q) \
729 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
731 #endif /* CONFIG_BLOCK */
732 #endif /* CONFIG_BLK_CGROUP */
733 #endif /* _BLK_CGROUP_H */