2 * Interface for controlling IO bandwidth on a request queue
4 * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/blkdev.h>
10 #include <linux/bio.h>
11 #include <linux/blktrace_api.h>
12 #include "blk-cgroup.h"
15 /* Max dispatch from a group in 1 round */
16 static int throtl_grp_quantum
= 8;
18 /* Total max dispatch from all groups in one round */
19 static int throtl_quantum
= 32;
21 /* Throttling is performed over 100ms slice and after that slice is renewed */
22 static unsigned long throtl_slice
= HZ
/10; /* 100 ms */
24 /* A workqueue to queue throttle related work */
25 static struct workqueue_struct
*kthrotld_workqueue
;
26 static void throtl_schedule_delayed_work(struct throtl_data
*td
,
29 struct throtl_rb_root
{
33 unsigned long min_disptime
;
36 #define THROTL_RB_ROOT (struct throtl_rb_root) { .rb = RB_ROOT, .left = NULL, \
37 .count = 0, .min_disptime = 0}
39 #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node)
42 /* List of throtl groups on the request queue*/
43 struct hlist_node tg_node
;
45 /* active throtl group service_tree member */
46 struct rb_node rb_node
;
49 * Dispatch time in jiffies. This is the estimated time when group
50 * will unthrottle and is ready to dispatch more bio. It is used as
51 * key to sort active groups in service tree.
53 unsigned long disptime
;
55 struct blkio_group blkg
;
59 /* Two lists for READ and WRITE */
60 struct bio_list bio_lists
[2];
62 /* Number of queued bios on READ and WRITE lists */
63 unsigned int nr_queued
[2];
65 /* bytes per second rate limits */
71 /* Number of bytes disptached in current slice */
72 uint64_t bytes_disp
[2];
73 /* Number of bio's dispatched in current slice */
74 unsigned int io_disp
[2];
76 /* When did we start a new slice */
77 unsigned long slice_start
[2];
78 unsigned long slice_end
[2];
80 /* Some throttle limits got updated for the group */
83 struct rcu_head rcu_head
;
88 /* List of throtl groups */
89 struct hlist_head tg_list
;
91 /* service tree for active throtl groups */
92 struct throtl_rb_root tg_service_tree
;
94 struct throtl_grp
*root_tg
;
95 struct request_queue
*queue
;
97 /* Total Number of queued bios on READ and WRITE lists */
98 unsigned int nr_queued
[2];
101 * number of total undestroyed groups
103 unsigned int nr_undestroyed_grps
;
105 /* Work for dispatching throttled bios */
106 struct delayed_work throtl_work
;
111 enum tg_state_flags
{
112 THROTL_TG_FLAG_on_rr
= 0, /* on round-robin busy list */
115 #define THROTL_TG_FNS(name) \
116 static inline void throtl_mark_tg_##name(struct throtl_grp *tg) \
118 (tg)->flags |= (1 << THROTL_TG_FLAG_##name); \
120 static inline void throtl_clear_tg_##name(struct throtl_grp *tg) \
122 (tg)->flags &= ~(1 << THROTL_TG_FLAG_##name); \
124 static inline int throtl_tg_##name(const struct throtl_grp *tg) \
126 return ((tg)->flags & (1 << THROTL_TG_FLAG_##name)) != 0; \
129 THROTL_TG_FNS(on_rr
);
131 #define throtl_log_tg(td, tg, fmt, args...) \
132 blk_add_trace_msg((td)->queue, "throtl %s " fmt, \
133 blkg_path(&(tg)->blkg), ##args); \
135 #define throtl_log(td, fmt, args...) \
136 blk_add_trace_msg((td)->queue, "throtl " fmt, ##args)
138 static inline struct throtl_grp
*tg_of_blkg(struct blkio_group
*blkg
)
141 return container_of(blkg
, struct throtl_grp
, blkg
);
146 static inline unsigned int total_nr_queued(struct throtl_data
*td
)
148 return td
->nr_queued
[0] + td
->nr_queued
[1];
151 static inline struct throtl_grp
*throtl_ref_get_tg(struct throtl_grp
*tg
)
153 atomic_inc(&tg
->ref
);
157 static void throtl_free_tg(struct rcu_head
*head
)
159 struct throtl_grp
*tg
;
161 tg
= container_of(head
, struct throtl_grp
, rcu_head
);
162 free_percpu(tg
->blkg
.stats_cpu
);
166 static void throtl_put_tg(struct throtl_grp
*tg
)
168 BUG_ON(atomic_read(&tg
->ref
) <= 0);
169 if (!atomic_dec_and_test(&tg
->ref
))
173 * A group is freed in rcu manner. But having an rcu lock does not
174 * mean that one can access all the fields of blkg and assume these
175 * are valid. For example, don't try to follow throtl_data and
176 * request queue links.
178 * Having a reference to blkg under an rcu allows acess to only
179 * values local to groups like group stats and group rate limits
181 call_rcu(&tg
->rcu_head
, throtl_free_tg
);
184 static struct blkio_group
*throtl_alloc_blkio_group(struct request_queue
*q
,
185 struct blkio_cgroup
*blkcg
)
187 struct throtl_grp
*tg
;
189 tg
= kzalloc_node(sizeof(*tg
), GFP_ATOMIC
, q
->node
);
193 INIT_HLIST_NODE(&tg
->tg_node
);
194 RB_CLEAR_NODE(&tg
->rb_node
);
195 bio_list_init(&tg
->bio_lists
[0]);
196 bio_list_init(&tg
->bio_lists
[1]);
197 tg
->limits_changed
= false;
202 tg
->iops
[WRITE
] = -1;
205 * Take the initial reference that will be released on destroy
206 * This can be thought of a joint reference by cgroup and
207 * request queue which will be dropped by either request queue
208 * exit or cgroup deletion path depending on who is exiting first.
210 atomic_set(&tg
->ref
, 1);
216 __throtl_tg_fill_dev_details(struct throtl_data
*td
, struct throtl_grp
*tg
)
218 struct backing_dev_info
*bdi
= &td
->queue
->backing_dev_info
;
219 unsigned int major
, minor
;
221 if (!tg
|| tg
->blkg
.dev
)
225 * Fill in device details for a group which might not have been
226 * filled at group creation time as queue was being instantiated
227 * and driver had not attached a device yet
229 if (bdi
->dev
&& dev_name(bdi
->dev
)) {
230 sscanf(dev_name(bdi
->dev
), "%u:%u", &major
, &minor
);
231 tg
->blkg
.dev
= MKDEV(major
, minor
);
236 * Should be called with without queue lock held. Here queue lock will be
237 * taken rarely. It will be taken only once during life time of a group
241 throtl_tg_fill_dev_details(struct throtl_data
*td
, struct throtl_grp
*tg
)
243 if (!tg
|| tg
->blkg
.dev
)
246 spin_lock_irq(td
->queue
->queue_lock
);
247 __throtl_tg_fill_dev_details(td
, tg
);
248 spin_unlock_irq(td
->queue
->queue_lock
);
251 static void throtl_link_blkio_group(struct request_queue
*q
,
252 struct blkio_group
*blkg
)
254 struct throtl_data
*td
= q
->td
;
255 struct throtl_grp
*tg
= tg_of_blkg(blkg
);
257 __throtl_tg_fill_dev_details(td
, tg
);
259 hlist_add_head(&tg
->tg_node
, &td
->tg_list
);
260 td
->nr_undestroyed_grps
++;
264 throtl_grp
*throtl_lookup_tg(struct throtl_data
*td
, struct blkio_cgroup
*blkcg
)
266 struct throtl_grp
*tg
= NULL
;
269 * This is the common case when there are no blkio cgroups.
270 * Avoid lookup in this case
272 if (blkcg
== &blkio_root_cgroup
)
275 tg
= tg_of_blkg(blkg_lookup(blkcg
, td
->queue
,
276 BLKIO_POLICY_THROTL
));
278 __throtl_tg_fill_dev_details(td
, tg
);
282 static struct throtl_grp
*throtl_lookup_create_tg(struct throtl_data
*td
,
283 struct blkio_cgroup
*blkcg
)
285 struct request_queue
*q
= td
->queue
;
286 struct throtl_grp
*tg
= NULL
;
289 * This is the common case when there are no blkio cgroups.
290 * Avoid lookup in this case
292 if (blkcg
== &blkio_root_cgroup
) {
295 struct blkio_group
*blkg
;
297 blkg
= blkg_lookup_create(blkcg
, q
, BLKIO_POLICY_THROTL
, false);
299 /* if %NULL and @q is alive, fall back to root_tg */
301 tg
= tg_of_blkg(blkg
);
302 else if (!blk_queue_dead(q
))
306 __throtl_tg_fill_dev_details(td
, tg
);
310 static struct throtl_grp
*throtl_rb_first(struct throtl_rb_root
*root
)
312 /* Service tree is empty */
317 root
->left
= rb_first(&root
->rb
);
320 return rb_entry_tg(root
->left
);
325 static void rb_erase_init(struct rb_node
*n
, struct rb_root
*root
)
331 static void throtl_rb_erase(struct rb_node
*n
, struct throtl_rb_root
*root
)
335 rb_erase_init(n
, &root
->rb
);
339 static void update_min_dispatch_time(struct throtl_rb_root
*st
)
341 struct throtl_grp
*tg
;
343 tg
= throtl_rb_first(st
);
347 st
->min_disptime
= tg
->disptime
;
351 tg_service_tree_add(struct throtl_rb_root
*st
, struct throtl_grp
*tg
)
353 struct rb_node
**node
= &st
->rb
.rb_node
;
354 struct rb_node
*parent
= NULL
;
355 struct throtl_grp
*__tg
;
356 unsigned long key
= tg
->disptime
;
359 while (*node
!= NULL
) {
361 __tg
= rb_entry_tg(parent
);
363 if (time_before(key
, __tg
->disptime
))
364 node
= &parent
->rb_left
;
366 node
= &parent
->rb_right
;
372 st
->left
= &tg
->rb_node
;
374 rb_link_node(&tg
->rb_node
, parent
, node
);
375 rb_insert_color(&tg
->rb_node
, &st
->rb
);
378 static void __throtl_enqueue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
380 struct throtl_rb_root
*st
= &td
->tg_service_tree
;
382 tg_service_tree_add(st
, tg
);
383 throtl_mark_tg_on_rr(tg
);
387 static void throtl_enqueue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
389 if (!throtl_tg_on_rr(tg
))
390 __throtl_enqueue_tg(td
, tg
);
393 static void __throtl_dequeue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
395 throtl_rb_erase(&tg
->rb_node
, &td
->tg_service_tree
);
396 throtl_clear_tg_on_rr(tg
);
399 static void throtl_dequeue_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
401 if (throtl_tg_on_rr(tg
))
402 __throtl_dequeue_tg(td
, tg
);
405 static void throtl_schedule_next_dispatch(struct throtl_data
*td
)
407 struct throtl_rb_root
*st
= &td
->tg_service_tree
;
410 * If there are more bios pending, schedule more work.
412 if (!total_nr_queued(td
))
417 update_min_dispatch_time(st
);
419 if (time_before_eq(st
->min_disptime
, jiffies
))
420 throtl_schedule_delayed_work(td
, 0);
422 throtl_schedule_delayed_work(td
, (st
->min_disptime
- jiffies
));
426 throtl_start_new_slice(struct throtl_data
*td
, struct throtl_grp
*tg
, bool rw
)
428 tg
->bytes_disp
[rw
] = 0;
430 tg
->slice_start
[rw
] = jiffies
;
431 tg
->slice_end
[rw
] = jiffies
+ throtl_slice
;
432 throtl_log_tg(td
, tg
, "[%c] new slice start=%lu end=%lu jiffies=%lu",
433 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
434 tg
->slice_end
[rw
], jiffies
);
437 static inline void throtl_set_slice_end(struct throtl_data
*td
,
438 struct throtl_grp
*tg
, bool rw
, unsigned long jiffy_end
)
440 tg
->slice_end
[rw
] = roundup(jiffy_end
, throtl_slice
);
443 static inline void throtl_extend_slice(struct throtl_data
*td
,
444 struct throtl_grp
*tg
, bool rw
, unsigned long jiffy_end
)
446 tg
->slice_end
[rw
] = roundup(jiffy_end
, throtl_slice
);
447 throtl_log_tg(td
, tg
, "[%c] extend slice start=%lu end=%lu jiffies=%lu",
448 rw
== READ
? 'R' : 'W', tg
->slice_start
[rw
],
449 tg
->slice_end
[rw
], jiffies
);
452 /* Determine if previously allocated or extended slice is complete or not */
454 throtl_slice_used(struct throtl_data
*td
, struct throtl_grp
*tg
, bool rw
)
456 if (time_in_range(jiffies
, tg
->slice_start
[rw
], tg
->slice_end
[rw
]))
462 /* Trim the used slices and adjust slice start accordingly */
464 throtl_trim_slice(struct throtl_data
*td
, struct throtl_grp
*tg
, bool rw
)
466 unsigned long nr_slices
, time_elapsed
, io_trim
;
469 BUG_ON(time_before(tg
->slice_end
[rw
], tg
->slice_start
[rw
]));
472 * If bps are unlimited (-1), then time slice don't get
473 * renewed. Don't try to trim the slice if slice is used. A new
474 * slice will start when appropriate.
476 if (throtl_slice_used(td
, tg
, rw
))
480 * A bio has been dispatched. Also adjust slice_end. It might happen
481 * that initially cgroup limit was very low resulting in high
482 * slice_end, but later limit was bumped up and bio was dispached
483 * sooner, then we need to reduce slice_end. A high bogus slice_end
484 * is bad because it does not allow new slice to start.
487 throtl_set_slice_end(td
, tg
, rw
, jiffies
+ throtl_slice
);
489 time_elapsed
= jiffies
- tg
->slice_start
[rw
];
491 nr_slices
= time_elapsed
/ throtl_slice
;
495 tmp
= tg
->bps
[rw
] * throtl_slice
* nr_slices
;
499 io_trim
= (tg
->iops
[rw
] * throtl_slice
* nr_slices
)/HZ
;
501 if (!bytes_trim
&& !io_trim
)
504 if (tg
->bytes_disp
[rw
] >= bytes_trim
)
505 tg
->bytes_disp
[rw
] -= bytes_trim
;
507 tg
->bytes_disp
[rw
] = 0;
509 if (tg
->io_disp
[rw
] >= io_trim
)
510 tg
->io_disp
[rw
] -= io_trim
;
514 tg
->slice_start
[rw
] += nr_slices
* throtl_slice
;
516 throtl_log_tg(td
, tg
, "[%c] trim slice nr=%lu bytes=%llu io=%lu"
517 " start=%lu end=%lu jiffies=%lu",
518 rw
== READ
? 'R' : 'W', nr_slices
, bytes_trim
, io_trim
,
519 tg
->slice_start
[rw
], tg
->slice_end
[rw
], jiffies
);
522 static bool tg_with_in_iops_limit(struct throtl_data
*td
, struct throtl_grp
*tg
,
523 struct bio
*bio
, unsigned long *wait
)
525 bool rw
= bio_data_dir(bio
);
526 unsigned int io_allowed
;
527 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
530 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
532 /* Slice has just started. Consider one slice interval */
534 jiffy_elapsed_rnd
= throtl_slice
;
536 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, throtl_slice
);
539 * jiffy_elapsed_rnd should not be a big value as minimum iops can be
540 * 1 then at max jiffy elapsed should be equivalent of 1 second as we
541 * will allow dispatch after 1 second and after that slice should
545 tmp
= (u64
)tg
->iops
[rw
] * jiffy_elapsed_rnd
;
549 io_allowed
= UINT_MAX
;
553 if (tg
->io_disp
[rw
] + 1 <= io_allowed
) {
559 /* Calc approx time to dispatch */
560 jiffy_wait
= ((tg
->io_disp
[rw
] + 1) * HZ
)/tg
->iops
[rw
] + 1;
562 if (jiffy_wait
> jiffy_elapsed
)
563 jiffy_wait
= jiffy_wait
- jiffy_elapsed
;
572 static bool tg_with_in_bps_limit(struct throtl_data
*td
, struct throtl_grp
*tg
,
573 struct bio
*bio
, unsigned long *wait
)
575 bool rw
= bio_data_dir(bio
);
576 u64 bytes_allowed
, extra_bytes
, tmp
;
577 unsigned long jiffy_elapsed
, jiffy_wait
, jiffy_elapsed_rnd
;
579 jiffy_elapsed
= jiffy_elapsed_rnd
= jiffies
- tg
->slice_start
[rw
];
581 /* Slice has just started. Consider one slice interval */
583 jiffy_elapsed_rnd
= throtl_slice
;
585 jiffy_elapsed_rnd
= roundup(jiffy_elapsed_rnd
, throtl_slice
);
587 tmp
= tg
->bps
[rw
] * jiffy_elapsed_rnd
;
591 if (tg
->bytes_disp
[rw
] + bio
->bi_size
<= bytes_allowed
) {
597 /* Calc approx time to dispatch */
598 extra_bytes
= tg
->bytes_disp
[rw
] + bio
->bi_size
- bytes_allowed
;
599 jiffy_wait
= div64_u64(extra_bytes
* HZ
, tg
->bps
[rw
]);
605 * This wait time is without taking into consideration the rounding
606 * up we did. Add that time also.
608 jiffy_wait
= jiffy_wait
+ (jiffy_elapsed_rnd
- jiffy_elapsed
);
614 static bool tg_no_rule_group(struct throtl_grp
*tg
, bool rw
) {
615 if (tg
->bps
[rw
] == -1 && tg
->iops
[rw
] == -1)
621 * Returns whether one can dispatch a bio or not. Also returns approx number
622 * of jiffies to wait before this bio is with-in IO rate and can be dispatched
624 static bool tg_may_dispatch(struct throtl_data
*td
, struct throtl_grp
*tg
,
625 struct bio
*bio
, unsigned long *wait
)
627 bool rw
= bio_data_dir(bio
);
628 unsigned long bps_wait
= 0, iops_wait
= 0, max_wait
= 0;
631 * Currently whole state machine of group depends on first bio
632 * queued in the group bio list. So one should not be calling
633 * this function with a different bio if there are other bios
636 BUG_ON(tg
->nr_queued
[rw
] && bio
!= bio_list_peek(&tg
->bio_lists
[rw
]));
638 /* If tg->bps = -1, then BW is unlimited */
639 if (tg
->bps
[rw
] == -1 && tg
->iops
[rw
] == -1) {
646 * If previous slice expired, start a new one otherwise renew/extend
647 * existing slice to make sure it is at least throtl_slice interval
650 if (throtl_slice_used(td
, tg
, rw
))
651 throtl_start_new_slice(td
, tg
, rw
);
653 if (time_before(tg
->slice_end
[rw
], jiffies
+ throtl_slice
))
654 throtl_extend_slice(td
, tg
, rw
, jiffies
+ throtl_slice
);
657 if (tg_with_in_bps_limit(td
, tg
, bio
, &bps_wait
)
658 && tg_with_in_iops_limit(td
, tg
, bio
, &iops_wait
)) {
664 max_wait
= max(bps_wait
, iops_wait
);
669 if (time_before(tg
->slice_end
[rw
], jiffies
+ max_wait
))
670 throtl_extend_slice(td
, tg
, rw
, jiffies
+ max_wait
);
675 static void throtl_charge_bio(struct throtl_grp
*tg
, struct bio
*bio
)
677 bool rw
= bio_data_dir(bio
);
678 bool sync
= rw_is_sync(bio
->bi_rw
);
680 /* Charge the bio to the group */
681 tg
->bytes_disp
[rw
] += bio
->bi_size
;
684 blkiocg_update_dispatch_stats(&tg
->blkg
, bio
->bi_size
, rw
, sync
);
687 static void throtl_add_bio_tg(struct throtl_data
*td
, struct throtl_grp
*tg
,
690 bool rw
= bio_data_dir(bio
);
692 bio_list_add(&tg
->bio_lists
[rw
], bio
);
693 /* Take a bio reference on tg */
694 throtl_ref_get_tg(tg
);
697 throtl_enqueue_tg(td
, tg
);
700 static void tg_update_disptime(struct throtl_data
*td
, struct throtl_grp
*tg
)
702 unsigned long read_wait
= -1, write_wait
= -1, min_wait
= -1, disptime
;
705 if ((bio
= bio_list_peek(&tg
->bio_lists
[READ
])))
706 tg_may_dispatch(td
, tg
, bio
, &read_wait
);
708 if ((bio
= bio_list_peek(&tg
->bio_lists
[WRITE
])))
709 tg_may_dispatch(td
, tg
, bio
, &write_wait
);
711 min_wait
= min(read_wait
, write_wait
);
712 disptime
= jiffies
+ min_wait
;
714 /* Update dispatch time */
715 throtl_dequeue_tg(td
, tg
);
716 tg
->disptime
= disptime
;
717 throtl_enqueue_tg(td
, tg
);
720 static void tg_dispatch_one_bio(struct throtl_data
*td
, struct throtl_grp
*tg
,
721 bool rw
, struct bio_list
*bl
)
725 bio
= bio_list_pop(&tg
->bio_lists
[rw
]);
727 /* Drop bio reference on tg */
730 BUG_ON(td
->nr_queued
[rw
] <= 0);
733 throtl_charge_bio(tg
, bio
);
734 bio_list_add(bl
, bio
);
735 bio
->bi_rw
|= REQ_THROTTLED
;
737 throtl_trim_slice(td
, tg
, rw
);
740 static int throtl_dispatch_tg(struct throtl_data
*td
, struct throtl_grp
*tg
,
743 unsigned int nr_reads
= 0, nr_writes
= 0;
744 unsigned int max_nr_reads
= throtl_grp_quantum
*3/4;
745 unsigned int max_nr_writes
= throtl_grp_quantum
- max_nr_reads
;
748 /* Try to dispatch 75% READS and 25% WRITES */
750 while ((bio
= bio_list_peek(&tg
->bio_lists
[READ
]))
751 && tg_may_dispatch(td
, tg
, bio
, NULL
)) {
753 tg_dispatch_one_bio(td
, tg
, bio_data_dir(bio
), bl
);
756 if (nr_reads
>= max_nr_reads
)
760 while ((bio
= bio_list_peek(&tg
->bio_lists
[WRITE
]))
761 && tg_may_dispatch(td
, tg
, bio
, NULL
)) {
763 tg_dispatch_one_bio(td
, tg
, bio_data_dir(bio
), bl
);
766 if (nr_writes
>= max_nr_writes
)
770 return nr_reads
+ nr_writes
;
773 static int throtl_select_dispatch(struct throtl_data
*td
, struct bio_list
*bl
)
775 unsigned int nr_disp
= 0;
776 struct throtl_grp
*tg
;
777 struct throtl_rb_root
*st
= &td
->tg_service_tree
;
780 tg
= throtl_rb_first(st
);
785 if (time_before(jiffies
, tg
->disptime
))
788 throtl_dequeue_tg(td
, tg
);
790 nr_disp
+= throtl_dispatch_tg(td
, tg
, bl
);
792 if (tg
->nr_queued
[0] || tg
->nr_queued
[1]) {
793 tg_update_disptime(td
, tg
);
794 throtl_enqueue_tg(td
, tg
);
797 if (nr_disp
>= throtl_quantum
)
804 static void throtl_process_limit_change(struct throtl_data
*td
)
806 struct throtl_grp
*tg
;
807 struct hlist_node
*pos
, *n
;
809 if (!td
->limits_changed
)
812 xchg(&td
->limits_changed
, false);
814 throtl_log(td
, "limits changed");
816 hlist_for_each_entry_safe(tg
, pos
, n
, &td
->tg_list
, tg_node
) {
817 if (!tg
->limits_changed
)
820 if (!xchg(&tg
->limits_changed
, false))
823 throtl_log_tg(td
, tg
, "limit change rbps=%llu wbps=%llu"
824 " riops=%u wiops=%u", tg
->bps
[READ
], tg
->bps
[WRITE
],
825 tg
->iops
[READ
], tg
->iops
[WRITE
]);
828 * Restart the slices for both READ and WRITES. It
829 * might happen that a group's limit are dropped
830 * suddenly and we don't want to account recently
831 * dispatched IO with new low rate
833 throtl_start_new_slice(td
, tg
, 0);
834 throtl_start_new_slice(td
, tg
, 1);
836 if (throtl_tg_on_rr(tg
))
837 tg_update_disptime(td
, tg
);
841 /* Dispatch throttled bios. Should be called without queue lock held. */
842 static int throtl_dispatch(struct request_queue
*q
)
844 struct throtl_data
*td
= q
->td
;
845 unsigned int nr_disp
= 0;
846 struct bio_list bio_list_on_stack
;
848 struct blk_plug plug
;
850 spin_lock_irq(q
->queue_lock
);
852 throtl_process_limit_change(td
);
854 if (!total_nr_queued(td
))
857 bio_list_init(&bio_list_on_stack
);
859 throtl_log(td
, "dispatch nr_queued=%u read=%u write=%u",
860 total_nr_queued(td
), td
->nr_queued
[READ
],
861 td
->nr_queued
[WRITE
]);
863 nr_disp
= throtl_select_dispatch(td
, &bio_list_on_stack
);
866 throtl_log(td
, "bios disp=%u", nr_disp
);
868 throtl_schedule_next_dispatch(td
);
870 spin_unlock_irq(q
->queue_lock
);
873 * If we dispatched some requests, unplug the queue to make sure
877 blk_start_plug(&plug
);
878 while((bio
= bio_list_pop(&bio_list_on_stack
)))
879 generic_make_request(bio
);
880 blk_finish_plug(&plug
);
885 void blk_throtl_work(struct work_struct
*work
)
887 struct throtl_data
*td
= container_of(work
, struct throtl_data
,
889 struct request_queue
*q
= td
->queue
;
894 /* Call with queue lock held */
896 throtl_schedule_delayed_work(struct throtl_data
*td
, unsigned long delay
)
899 struct delayed_work
*dwork
= &td
->throtl_work
;
901 /* schedule work if limits changed even if no bio is queued */
902 if (total_nr_queued(td
) || td
->limits_changed
) {
904 * We might have a work scheduled to be executed in future.
905 * Cancel that and schedule a new one.
907 __cancel_delayed_work(dwork
);
908 queue_delayed_work(kthrotld_workqueue
, dwork
, delay
);
909 throtl_log(td
, "schedule work. delay=%lu jiffies=%lu",
915 throtl_destroy_tg(struct throtl_data
*td
, struct throtl_grp
*tg
)
917 /* Something wrong if we are trying to remove same group twice */
918 BUG_ON(hlist_unhashed(&tg
->tg_node
));
920 hlist_del_init(&tg
->tg_node
);
923 * Put the reference taken at the time of creation so that when all
924 * queues are gone, group can be destroyed.
927 td
->nr_undestroyed_grps
--;
930 static bool throtl_release_tgs(struct throtl_data
*td
, bool release_root
)
932 struct hlist_node
*pos
, *n
;
933 struct throtl_grp
*tg
;
936 hlist_for_each_entry_safe(tg
, pos
, n
, &td
->tg_list
, tg_node
) {
938 if (!release_root
&& tg
== td
->root_tg
)
942 * If cgroup removal path got to blk_group first and removed
943 * it from cgroup list, then it will take care of destroying
946 if (!blkiocg_del_blkio_group(&tg
->blkg
))
947 throtl_destroy_tg(td
, tg
);
955 * Blk cgroup controller notification saying that blkio_group object is being
956 * delinked as associated cgroup object is going away. That also means that
957 * no new IO will come in this group. So get rid of this group as soon as
958 * any pending IO in the group is finished.
960 * This function is called under rcu_read_lock(). @q is the rcu protected
961 * pointer. That means @q is a valid request_queue pointer as long as we
964 * @q was fetched from blkio_group under blkio_cgroup->lock. That means
965 * it should not be NULL as even if queue was going away, cgroup deltion
966 * path got to it first.
968 void throtl_unlink_blkio_group(struct request_queue
*q
,
969 struct blkio_group
*blkg
)
973 spin_lock_irqsave(q
->queue_lock
, flags
);
974 throtl_destroy_tg(q
->td
, tg_of_blkg(blkg
));
975 spin_unlock_irqrestore(q
->queue_lock
, flags
);
978 static bool throtl_clear_queue(struct request_queue
*q
)
980 lockdep_assert_held(q
->queue_lock
);
983 * Clear tgs but leave the root one alone. This is necessary
984 * because root_tg is expected to be persistent and safe because
985 * blk-throtl can never be disabled while @q is alive. This is a
986 * kludge to prepare for unified blkg. This whole function will be
989 return throtl_release_tgs(q
->td
, false);
992 static void throtl_update_blkio_group_common(struct throtl_data
*td
,
993 struct throtl_grp
*tg
)
995 xchg(&tg
->limits_changed
, true);
996 xchg(&td
->limits_changed
, true);
997 /* Schedule a work now to process the limit change */
998 throtl_schedule_delayed_work(td
, 0);
1002 * For all update functions, @q should be a valid pointer because these
1003 * update functions are called under blkcg_lock, that means, blkg is
1004 * valid and in turn @q is valid. queue exit path can not race because
1007 * Can not take queue lock in update functions as queue lock under blkcg_lock
1008 * is not allowed. Under other paths we take blkcg_lock under queue_lock.
1010 static void throtl_update_blkio_group_read_bps(struct request_queue
*q
,
1011 struct blkio_group
*blkg
, u64 read_bps
)
1013 struct throtl_grp
*tg
= tg_of_blkg(blkg
);
1015 tg
->bps
[READ
] = read_bps
;
1016 throtl_update_blkio_group_common(q
->td
, tg
);
1019 static void throtl_update_blkio_group_write_bps(struct request_queue
*q
,
1020 struct blkio_group
*blkg
, u64 write_bps
)
1022 struct throtl_grp
*tg
= tg_of_blkg(blkg
);
1024 tg
->bps
[WRITE
] = write_bps
;
1025 throtl_update_blkio_group_common(q
->td
, tg
);
1028 static void throtl_update_blkio_group_read_iops(struct request_queue
*q
,
1029 struct blkio_group
*blkg
, unsigned int read_iops
)
1031 struct throtl_grp
*tg
= tg_of_blkg(blkg
);
1033 tg
->iops
[READ
] = read_iops
;
1034 throtl_update_blkio_group_common(q
->td
, tg
);
1037 static void throtl_update_blkio_group_write_iops(struct request_queue
*q
,
1038 struct blkio_group
*blkg
, unsigned int write_iops
)
1040 struct throtl_grp
*tg
= tg_of_blkg(blkg
);
1042 tg
->iops
[WRITE
] = write_iops
;
1043 throtl_update_blkio_group_common(q
->td
, tg
);
1046 static void throtl_shutdown_wq(struct request_queue
*q
)
1048 struct throtl_data
*td
= q
->td
;
1050 cancel_delayed_work_sync(&td
->throtl_work
);
1053 static struct blkio_policy_type blkio_policy_throtl
= {
1055 .blkio_alloc_group_fn
= throtl_alloc_blkio_group
,
1056 .blkio_link_group_fn
= throtl_link_blkio_group
,
1057 .blkio_unlink_group_fn
= throtl_unlink_blkio_group
,
1058 .blkio_clear_queue_fn
= throtl_clear_queue
,
1059 .blkio_update_group_read_bps_fn
=
1060 throtl_update_blkio_group_read_bps
,
1061 .blkio_update_group_write_bps_fn
=
1062 throtl_update_blkio_group_write_bps
,
1063 .blkio_update_group_read_iops_fn
=
1064 throtl_update_blkio_group_read_iops
,
1065 .blkio_update_group_write_iops_fn
=
1066 throtl_update_blkio_group_write_iops
,
1068 .plid
= BLKIO_POLICY_THROTL
,
1071 bool blk_throtl_bio(struct request_queue
*q
, struct bio
*bio
)
1073 struct throtl_data
*td
= q
->td
;
1074 struct throtl_grp
*tg
;
1075 bool rw
= bio_data_dir(bio
), update_disptime
= true;
1076 struct blkio_cgroup
*blkcg
;
1077 bool throttled
= false;
1079 if (bio
->bi_rw
& REQ_THROTTLED
) {
1080 bio
->bi_rw
&= ~REQ_THROTTLED
;
1085 * A throtl_grp pointer retrieved under rcu can be used to access
1086 * basic fields like stats and io rates. If a group has no rules,
1087 * just update the dispatch stats in lockless manner and return.
1090 blkcg
= task_blkio_cgroup(current
);
1091 tg
= throtl_lookup_tg(td
, blkcg
);
1093 throtl_tg_fill_dev_details(td
, tg
);
1095 if (tg_no_rule_group(tg
, rw
)) {
1096 blkiocg_update_dispatch_stats(&tg
->blkg
, bio
->bi_size
,
1097 rw
, rw_is_sync(bio
->bi_rw
));
1098 goto out_unlock_rcu
;
1103 * Either group has not been allocated yet or it is not an unlimited
1106 spin_lock_irq(q
->queue_lock
);
1107 tg
= throtl_lookup_create_tg(td
, blkcg
);
1111 if (tg
->nr_queued
[rw
]) {
1113 * There is already another bio queued in same dir. No
1114 * need to update dispatch time.
1116 update_disptime
= false;
1121 /* Bio is with-in rate limit of group */
1122 if (tg_may_dispatch(td
, tg
, bio
, NULL
)) {
1123 throtl_charge_bio(tg
, bio
);
1126 * We need to trim slice even when bios are not being queued
1127 * otherwise it might happen that a bio is not queued for
1128 * a long time and slice keeps on extending and trim is not
1129 * called for a long time. Now if limits are reduced suddenly
1130 * we take into account all the IO dispatched so far at new
1131 * low rate and * newly queued IO gets a really long dispatch
1134 * So keep on trimming slice even if bio is not queued.
1136 throtl_trim_slice(td
, tg
, rw
);
1141 throtl_log_tg(td
, tg
, "[%c] bio. bdisp=%llu sz=%u bps=%llu"
1142 " iodisp=%u iops=%u queued=%d/%d",
1143 rw
== READ
? 'R' : 'W',
1144 tg
->bytes_disp
[rw
], bio
->bi_size
, tg
->bps
[rw
],
1145 tg
->io_disp
[rw
], tg
->iops
[rw
],
1146 tg
->nr_queued
[READ
], tg
->nr_queued
[WRITE
]);
1148 throtl_add_bio_tg(q
->td
, tg
, bio
);
1151 if (update_disptime
) {
1152 tg_update_disptime(td
, tg
);
1153 throtl_schedule_next_dispatch(td
);
1157 spin_unlock_irq(q
->queue_lock
);
1165 * blk_throtl_drain - drain throttled bios
1166 * @q: request_queue to drain throttled bios for
1168 * Dispatch all currently throttled bios on @q through ->make_request_fn().
1170 void blk_throtl_drain(struct request_queue
*q
)
1171 __releases(q
->queue_lock
) __acquires(q
->queue_lock
)
1173 struct throtl_data
*td
= q
->td
;
1174 struct throtl_rb_root
*st
= &td
->tg_service_tree
;
1175 struct throtl_grp
*tg
;
1179 WARN_ON_ONCE(!queue_is_locked(q
));
1183 while ((tg
= throtl_rb_first(st
))) {
1184 throtl_dequeue_tg(td
, tg
);
1186 while ((bio
= bio_list_peek(&tg
->bio_lists
[READ
])))
1187 tg_dispatch_one_bio(td
, tg
, bio_data_dir(bio
), &bl
);
1188 while ((bio
= bio_list_peek(&tg
->bio_lists
[WRITE
])))
1189 tg_dispatch_one_bio(td
, tg
, bio_data_dir(bio
), &bl
);
1191 spin_unlock_irq(q
->queue_lock
);
1193 while ((bio
= bio_list_pop(&bl
)))
1194 generic_make_request(bio
);
1196 spin_lock_irq(q
->queue_lock
);
1199 int blk_throtl_init(struct request_queue
*q
)
1201 struct throtl_data
*td
;
1202 struct blkio_group
*blkg
;
1204 td
= kzalloc_node(sizeof(*td
), GFP_KERNEL
, q
->node
);
1208 INIT_HLIST_HEAD(&td
->tg_list
);
1209 td
->tg_service_tree
= THROTL_RB_ROOT
;
1210 td
->limits_changed
= false;
1211 INIT_DELAYED_WORK(&td
->throtl_work
, blk_throtl_work
);
1216 /* alloc and init root group. */
1218 spin_lock_irq(q
->queue_lock
);
1220 blkg
= blkg_lookup_create(&blkio_root_cgroup
, q
, BLKIO_POLICY_THROTL
,
1223 td
->root_tg
= tg_of_blkg(blkg
);
1225 spin_unlock_irq(q
->queue_lock
);
1235 void blk_throtl_exit(struct request_queue
*q
)
1237 struct throtl_data
*td
= q
->td
;
1242 throtl_shutdown_wq(q
);
1244 spin_lock_irq(q
->queue_lock
);
1245 throtl_release_tgs(td
, true);
1247 /* If there are other groups */
1248 if (td
->nr_undestroyed_grps
> 0)
1251 spin_unlock_irq(q
->queue_lock
);
1254 * Wait for tg->blkg->q accessors to exit their grace periods.
1255 * Do this wait only if there are other undestroyed groups out
1256 * there (other than root group). This can happen if cgroup deletion
1257 * path claimed the responsibility of cleaning up a group before
1258 * queue cleanup code get to the group.
1260 * Do not call synchronize_rcu() unconditionally as there are drivers
1261 * which create/delete request queue hundreds of times during scan/boot
1262 * and synchronize_rcu() can take significant time and slow down boot.
1268 * Just being safe to make sure after previous flush if some body did
1269 * update limits through cgroup and another work got queued, cancel
1272 throtl_shutdown_wq(q
);
1275 void blk_throtl_release(struct request_queue
*q
)
1280 static int __init
throtl_init(void)
1282 kthrotld_workqueue
= alloc_workqueue("kthrotld", WQ_MEM_RECLAIM
, 0);
1283 if (!kthrotld_workqueue
)
1284 panic("Failed to create kthrotld\n");
1286 blkio_policy_register(&blkio_policy_throtl
);
1290 module_init(throtl_init
);