Merge branch 'omap-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind...
[deliverable/linux.git] / block / blk-cgroup.c
1 /*
2 * Common Block IO controller cgroup interface
3 *
4 * Based on ideas and code from CFQ, CFS and BFQ:
5 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
6 *
7 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
8 * Paolo Valente <paolo.valente@unimore.it>
9 *
10 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
11 * Nauman Rafique <nauman@google.com>
12 */
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include "blk-cgroup.h"
21 #include <linux/genhd.h>
22
23 #define MAX_KEY_LEN 100
24
25 static DEFINE_SPINLOCK(blkio_list_lock);
26 static LIST_HEAD(blkio_list);
27
28 struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
29 EXPORT_SYMBOL_GPL(blkio_root_cgroup);
30
31 static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
32 struct cgroup *);
33 static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
34 struct task_struct *, bool);
35 static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
36 struct cgroup *, struct task_struct *, bool);
37 static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
38 static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
39
40 /* for encoding cft->private value on file */
41 #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
42 /* What policy owns the file, proportional or throttle */
43 #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
44 #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
45
46 struct cgroup_subsys blkio_subsys = {
47 .name = "blkio",
48 .create = blkiocg_create,
49 .can_attach = blkiocg_can_attach,
50 .attach = blkiocg_attach,
51 .destroy = blkiocg_destroy,
52 .populate = blkiocg_populate,
53 #ifdef CONFIG_BLK_CGROUP
54 /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
55 .subsys_id = blkio_subsys_id,
56 #endif
57 .use_id = 1,
58 .module = THIS_MODULE,
59 };
60 EXPORT_SYMBOL_GPL(blkio_subsys);
61
62 static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
63 struct blkio_policy_node *pn)
64 {
65 list_add(&pn->node, &blkcg->policy_list);
66 }
67
68 static inline bool cftype_blkg_same_policy(struct cftype *cft,
69 struct blkio_group *blkg)
70 {
71 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
72
73 if (blkg->plid == plid)
74 return 1;
75
76 return 0;
77 }
78
79 /* Determines if policy node matches cgroup file being accessed */
80 static inline bool pn_matches_cftype(struct cftype *cft,
81 struct blkio_policy_node *pn)
82 {
83 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
84 int fileid = BLKIOFILE_ATTR(cft->private);
85
86 return (plid == pn->plid && fileid == pn->fileid);
87 }
88
89 /* Must be called with blkcg->lock held */
90 static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
91 {
92 list_del(&pn->node);
93 }
94
95 /* Must be called with blkcg->lock held */
96 static struct blkio_policy_node *
97 blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
98 enum blkio_policy_id plid, int fileid)
99 {
100 struct blkio_policy_node *pn;
101
102 list_for_each_entry(pn, &blkcg->policy_list, node) {
103 if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
104 return pn;
105 }
106
107 return NULL;
108 }
109
110 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
111 {
112 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
113 struct blkio_cgroup, css);
114 }
115 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
116
117 struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
118 {
119 return container_of(task_subsys_state(tsk, blkio_subsys_id),
120 struct blkio_cgroup, css);
121 }
122 EXPORT_SYMBOL_GPL(task_blkio_cgroup);
123
124 static inline void
125 blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
126 {
127 struct blkio_policy_type *blkiop;
128
129 list_for_each_entry(blkiop, &blkio_list, list) {
130 /* If this policy does not own the blkg, do not send updates */
131 if (blkiop->plid != blkg->plid)
132 continue;
133 if (blkiop->ops.blkio_update_group_weight_fn)
134 blkiop->ops.blkio_update_group_weight_fn(blkg->key,
135 blkg, weight);
136 }
137 }
138
139 static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
140 int fileid)
141 {
142 struct blkio_policy_type *blkiop;
143
144 list_for_each_entry(blkiop, &blkio_list, list) {
145
146 /* If this policy does not own the blkg, do not send updates */
147 if (blkiop->plid != blkg->plid)
148 continue;
149
150 if (fileid == BLKIO_THROTL_read_bps_device
151 && blkiop->ops.blkio_update_group_read_bps_fn)
152 blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
153 blkg, bps);
154
155 if (fileid == BLKIO_THROTL_write_bps_device
156 && blkiop->ops.blkio_update_group_write_bps_fn)
157 blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
158 blkg, bps);
159 }
160 }
161
162 static inline void blkio_update_group_iops(struct blkio_group *blkg,
163 unsigned int iops, int fileid)
164 {
165 struct blkio_policy_type *blkiop;
166
167 list_for_each_entry(blkiop, &blkio_list, list) {
168
169 /* If this policy does not own the blkg, do not send updates */
170 if (blkiop->plid != blkg->plid)
171 continue;
172
173 if (fileid == BLKIO_THROTL_read_iops_device
174 && blkiop->ops.blkio_update_group_read_iops_fn)
175 blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
176 blkg, iops);
177
178 if (fileid == BLKIO_THROTL_write_iops_device
179 && blkiop->ops.blkio_update_group_write_iops_fn)
180 blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
181 blkg,iops);
182 }
183 }
184
185 /*
186 * Add to the appropriate stat variable depending on the request type.
187 * This should be called with the blkg->stats_lock held.
188 */
189 static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
190 bool sync)
191 {
192 if (direction)
193 stat[BLKIO_STAT_WRITE] += add;
194 else
195 stat[BLKIO_STAT_READ] += add;
196 if (sync)
197 stat[BLKIO_STAT_SYNC] += add;
198 else
199 stat[BLKIO_STAT_ASYNC] += add;
200 }
201
202 /*
203 * Decrements the appropriate stat variable if non-zero depending on the
204 * request type. Panics on value being zero.
205 * This should be called with the blkg->stats_lock held.
206 */
207 static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
208 {
209 if (direction) {
210 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
211 stat[BLKIO_STAT_WRITE]--;
212 } else {
213 BUG_ON(stat[BLKIO_STAT_READ] == 0);
214 stat[BLKIO_STAT_READ]--;
215 }
216 if (sync) {
217 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
218 stat[BLKIO_STAT_SYNC]--;
219 } else {
220 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
221 stat[BLKIO_STAT_ASYNC]--;
222 }
223 }
224
225 #ifdef CONFIG_DEBUG_BLK_CGROUP
226 /* This should be called with the blkg->stats_lock held. */
227 static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
228 struct blkio_group *curr_blkg)
229 {
230 if (blkio_blkg_waiting(&blkg->stats))
231 return;
232 if (blkg == curr_blkg)
233 return;
234 blkg->stats.start_group_wait_time = sched_clock();
235 blkio_mark_blkg_waiting(&blkg->stats);
236 }
237
238 /* This should be called with the blkg->stats_lock held. */
239 static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
240 {
241 unsigned long long now;
242
243 if (!blkio_blkg_waiting(stats))
244 return;
245
246 now = sched_clock();
247 if (time_after64(now, stats->start_group_wait_time))
248 stats->group_wait_time += now - stats->start_group_wait_time;
249 blkio_clear_blkg_waiting(stats);
250 }
251
252 /* This should be called with the blkg->stats_lock held. */
253 static void blkio_end_empty_time(struct blkio_group_stats *stats)
254 {
255 unsigned long long now;
256
257 if (!blkio_blkg_empty(stats))
258 return;
259
260 now = sched_clock();
261 if (time_after64(now, stats->start_empty_time))
262 stats->empty_time += now - stats->start_empty_time;
263 blkio_clear_blkg_empty(stats);
264 }
265
266 void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
267 {
268 unsigned long flags;
269
270 spin_lock_irqsave(&blkg->stats_lock, flags);
271 BUG_ON(blkio_blkg_idling(&blkg->stats));
272 blkg->stats.start_idle_time = sched_clock();
273 blkio_mark_blkg_idling(&blkg->stats);
274 spin_unlock_irqrestore(&blkg->stats_lock, flags);
275 }
276 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
277
278 void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
279 {
280 unsigned long flags;
281 unsigned long long now;
282 struct blkio_group_stats *stats;
283
284 spin_lock_irqsave(&blkg->stats_lock, flags);
285 stats = &blkg->stats;
286 if (blkio_blkg_idling(stats)) {
287 now = sched_clock();
288 if (time_after64(now, stats->start_idle_time))
289 stats->idle_time += now - stats->start_idle_time;
290 blkio_clear_blkg_idling(stats);
291 }
292 spin_unlock_irqrestore(&blkg->stats_lock, flags);
293 }
294 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
295
296 void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
297 {
298 unsigned long flags;
299 struct blkio_group_stats *stats;
300
301 spin_lock_irqsave(&blkg->stats_lock, flags);
302 stats = &blkg->stats;
303 stats->avg_queue_size_sum +=
304 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
305 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
306 stats->avg_queue_size_samples++;
307 blkio_update_group_wait_time(stats);
308 spin_unlock_irqrestore(&blkg->stats_lock, flags);
309 }
310 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
311
312 void blkiocg_set_start_empty_time(struct blkio_group *blkg)
313 {
314 unsigned long flags;
315 struct blkio_group_stats *stats;
316
317 spin_lock_irqsave(&blkg->stats_lock, flags);
318 stats = &blkg->stats;
319
320 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
321 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
322 spin_unlock_irqrestore(&blkg->stats_lock, flags);
323 return;
324 }
325
326 /*
327 * group is already marked empty. This can happen if cfqq got new
328 * request in parent group and moved to this group while being added
329 * to service tree. Just ignore the event and move on.
330 */
331 if(blkio_blkg_empty(stats)) {
332 spin_unlock_irqrestore(&blkg->stats_lock, flags);
333 return;
334 }
335
336 stats->start_empty_time = sched_clock();
337 blkio_mark_blkg_empty(stats);
338 spin_unlock_irqrestore(&blkg->stats_lock, flags);
339 }
340 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
341
342 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
343 unsigned long dequeue)
344 {
345 blkg->stats.dequeue += dequeue;
346 }
347 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
348 #else
349 static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
350 struct blkio_group *curr_blkg) {}
351 static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
352 #endif
353
354 void blkiocg_update_io_add_stats(struct blkio_group *blkg,
355 struct blkio_group *curr_blkg, bool direction,
356 bool sync)
357 {
358 unsigned long flags;
359
360 spin_lock_irqsave(&blkg->stats_lock, flags);
361 blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
362 sync);
363 blkio_end_empty_time(&blkg->stats);
364 blkio_set_start_group_wait_time(blkg, curr_blkg);
365 spin_unlock_irqrestore(&blkg->stats_lock, flags);
366 }
367 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
368
369 void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
370 bool direction, bool sync)
371 {
372 unsigned long flags;
373
374 spin_lock_irqsave(&blkg->stats_lock, flags);
375 blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
376 direction, sync);
377 spin_unlock_irqrestore(&blkg->stats_lock, flags);
378 }
379 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
380
381 void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
382 unsigned long unaccounted_time)
383 {
384 unsigned long flags;
385
386 spin_lock_irqsave(&blkg->stats_lock, flags);
387 blkg->stats.time += time;
388 #ifdef CONFIG_DEBUG_BLK_CGROUP
389 blkg->stats.unaccounted_time += unaccounted_time;
390 #endif
391 spin_unlock_irqrestore(&blkg->stats_lock, flags);
392 }
393 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
394
395 /*
396 * should be called under rcu read lock or queue lock to make sure blkg pointer
397 * is valid.
398 */
399 void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
400 uint64_t bytes, bool direction, bool sync)
401 {
402 struct blkio_group_stats_cpu *stats_cpu;
403 unsigned long flags;
404
405 /*
406 * Disabling interrupts to provide mutual exclusion between two
407 * writes on same cpu. It probably is not needed for 64bit. Not
408 * optimizing that case yet.
409 */
410 local_irq_save(flags);
411
412 stats_cpu = this_cpu_ptr(blkg->stats_cpu);
413
414 u64_stats_update_begin(&stats_cpu->syncp);
415 stats_cpu->sectors += bytes >> 9;
416 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
417 1, direction, sync);
418 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
419 bytes, direction, sync);
420 u64_stats_update_end(&stats_cpu->syncp);
421 local_irq_restore(flags);
422 }
423 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
424
425 void blkiocg_update_completion_stats(struct blkio_group *blkg,
426 uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
427 {
428 struct blkio_group_stats *stats;
429 unsigned long flags;
430 unsigned long long now = sched_clock();
431
432 spin_lock_irqsave(&blkg->stats_lock, flags);
433 stats = &blkg->stats;
434 if (time_after64(now, io_start_time))
435 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
436 now - io_start_time, direction, sync);
437 if (time_after64(io_start_time, start_time))
438 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
439 io_start_time - start_time, direction, sync);
440 spin_unlock_irqrestore(&blkg->stats_lock, flags);
441 }
442 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
443
444 /* Merged stats are per cpu. */
445 void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
446 bool sync)
447 {
448 struct blkio_group_stats_cpu *stats_cpu;
449 unsigned long flags;
450
451 /*
452 * Disabling interrupts to provide mutual exclusion between two
453 * writes on same cpu. It probably is not needed for 64bit. Not
454 * optimizing that case yet.
455 */
456 local_irq_save(flags);
457
458 stats_cpu = this_cpu_ptr(blkg->stats_cpu);
459
460 u64_stats_update_begin(&stats_cpu->syncp);
461 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
462 direction, sync);
463 u64_stats_update_end(&stats_cpu->syncp);
464 local_irq_restore(flags);
465 }
466 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
467
468 /*
469 * This function allocates the per cpu stats for blkio_group. Should be called
470 * from sleepable context as alloc_per_cpu() requires that.
471 */
472 int blkio_alloc_blkg_stats(struct blkio_group *blkg)
473 {
474 /* Allocate memory for per cpu stats */
475 blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
476 if (!blkg->stats_cpu)
477 return -ENOMEM;
478 return 0;
479 }
480 EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
481
482 void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
483 struct blkio_group *blkg, void *key, dev_t dev,
484 enum blkio_policy_id plid)
485 {
486 unsigned long flags;
487
488 spin_lock_irqsave(&blkcg->lock, flags);
489 spin_lock_init(&blkg->stats_lock);
490 rcu_assign_pointer(blkg->key, key);
491 blkg->blkcg_id = css_id(&blkcg->css);
492 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
493 blkg->plid = plid;
494 spin_unlock_irqrestore(&blkcg->lock, flags);
495 /* Need to take css reference ? */
496 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
497 blkg->dev = dev;
498 }
499 EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
500
501 static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
502 {
503 hlist_del_init_rcu(&blkg->blkcg_node);
504 blkg->blkcg_id = 0;
505 }
506
507 /*
508 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
509 * indicating that blk_group was unhashed by the time we got to it.
510 */
511 int blkiocg_del_blkio_group(struct blkio_group *blkg)
512 {
513 struct blkio_cgroup *blkcg;
514 unsigned long flags;
515 struct cgroup_subsys_state *css;
516 int ret = 1;
517
518 rcu_read_lock();
519 css = css_lookup(&blkio_subsys, blkg->blkcg_id);
520 if (css) {
521 blkcg = container_of(css, struct blkio_cgroup, css);
522 spin_lock_irqsave(&blkcg->lock, flags);
523 if (!hlist_unhashed(&blkg->blkcg_node)) {
524 __blkiocg_del_blkio_group(blkg);
525 ret = 0;
526 }
527 spin_unlock_irqrestore(&blkcg->lock, flags);
528 }
529
530 rcu_read_unlock();
531 return ret;
532 }
533 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
534
535 /* called under rcu_read_lock(). */
536 struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
537 {
538 struct blkio_group *blkg;
539 struct hlist_node *n;
540 void *__key;
541
542 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
543 __key = blkg->key;
544 if (__key == key)
545 return blkg;
546 }
547
548 return NULL;
549 }
550 EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
551
552 static void blkio_reset_stats_cpu(struct blkio_group *blkg)
553 {
554 struct blkio_group_stats_cpu *stats_cpu;
555 int i, j, k;
556 /*
557 * Note: On 64 bit arch this should not be an issue. This has the
558 * possibility of returning some inconsistent value on 32bit arch
559 * as 64bit update on 32bit is non atomic. Taking care of this
560 * corner case makes code very complicated, like sending IPIs to
561 * cpus, taking care of stats of offline cpus etc.
562 *
563 * reset stats is anyway more of a debug feature and this sounds a
564 * corner case. So I am not complicating the code yet until and
565 * unless this becomes a real issue.
566 */
567 for_each_possible_cpu(i) {
568 stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
569 stats_cpu->sectors = 0;
570 for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
571 for (k = 0; k < BLKIO_STAT_TOTAL; k++)
572 stats_cpu->stat_arr_cpu[j][k] = 0;
573 }
574 }
575
576 static int
577 blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
578 {
579 struct blkio_cgroup *blkcg;
580 struct blkio_group *blkg;
581 struct blkio_group_stats *stats;
582 struct hlist_node *n;
583 uint64_t queued[BLKIO_STAT_TOTAL];
584 int i;
585 #ifdef CONFIG_DEBUG_BLK_CGROUP
586 bool idling, waiting, empty;
587 unsigned long long now = sched_clock();
588 #endif
589
590 blkcg = cgroup_to_blkio_cgroup(cgroup);
591 spin_lock_irq(&blkcg->lock);
592 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
593 spin_lock(&blkg->stats_lock);
594 stats = &blkg->stats;
595 #ifdef CONFIG_DEBUG_BLK_CGROUP
596 idling = blkio_blkg_idling(stats);
597 waiting = blkio_blkg_waiting(stats);
598 empty = blkio_blkg_empty(stats);
599 #endif
600 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
601 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
602 memset(stats, 0, sizeof(struct blkio_group_stats));
603 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
604 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
605 #ifdef CONFIG_DEBUG_BLK_CGROUP
606 if (idling) {
607 blkio_mark_blkg_idling(stats);
608 stats->start_idle_time = now;
609 }
610 if (waiting) {
611 blkio_mark_blkg_waiting(stats);
612 stats->start_group_wait_time = now;
613 }
614 if (empty) {
615 blkio_mark_blkg_empty(stats);
616 stats->start_empty_time = now;
617 }
618 #endif
619 spin_unlock(&blkg->stats_lock);
620
621 /* Reset Per cpu stats which don't take blkg->stats_lock */
622 blkio_reset_stats_cpu(blkg);
623 }
624
625 spin_unlock_irq(&blkcg->lock);
626 return 0;
627 }
628
629 static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
630 int chars_left, bool diskname_only)
631 {
632 snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
633 chars_left -= strlen(str);
634 if (chars_left <= 0) {
635 printk(KERN_WARNING
636 "Possibly incorrect cgroup stat display format");
637 return;
638 }
639 if (diskname_only)
640 return;
641 switch (type) {
642 case BLKIO_STAT_READ:
643 strlcat(str, " Read", chars_left);
644 break;
645 case BLKIO_STAT_WRITE:
646 strlcat(str, " Write", chars_left);
647 break;
648 case BLKIO_STAT_SYNC:
649 strlcat(str, " Sync", chars_left);
650 break;
651 case BLKIO_STAT_ASYNC:
652 strlcat(str, " Async", chars_left);
653 break;
654 case BLKIO_STAT_TOTAL:
655 strlcat(str, " Total", chars_left);
656 break;
657 default:
658 strlcat(str, " Invalid", chars_left);
659 }
660 }
661
662 static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
663 struct cgroup_map_cb *cb, dev_t dev)
664 {
665 blkio_get_key_name(0, dev, str, chars_left, true);
666 cb->fill(cb, str, val);
667 return val;
668 }
669
670
671 static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
672 enum stat_type_cpu type, enum stat_sub_type sub_type)
673 {
674 int cpu;
675 struct blkio_group_stats_cpu *stats_cpu;
676 u64 val = 0, tval;
677
678 for_each_possible_cpu(cpu) {
679 unsigned int start;
680 stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
681
682 do {
683 start = u64_stats_fetch_begin(&stats_cpu->syncp);
684 if (type == BLKIO_STAT_CPU_SECTORS)
685 tval = stats_cpu->sectors;
686 else
687 tval = stats_cpu->stat_arr_cpu[type][sub_type];
688 } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
689
690 val += tval;
691 }
692
693 return val;
694 }
695
696 static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
697 struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
698 {
699 uint64_t disk_total, val;
700 char key_str[MAX_KEY_LEN];
701 enum stat_sub_type sub_type;
702
703 if (type == BLKIO_STAT_CPU_SECTORS) {
704 val = blkio_read_stat_cpu(blkg, type, 0);
705 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
706 }
707
708 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
709 sub_type++) {
710 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
711 val = blkio_read_stat_cpu(blkg, type, sub_type);
712 cb->fill(cb, key_str, val);
713 }
714
715 disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
716 blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
717
718 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
719 cb->fill(cb, key_str, disk_total);
720 return disk_total;
721 }
722
723 /* This should be called with blkg->stats_lock held */
724 static uint64_t blkio_get_stat(struct blkio_group *blkg,
725 struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
726 {
727 uint64_t disk_total;
728 char key_str[MAX_KEY_LEN];
729 enum stat_sub_type sub_type;
730
731 if (type == BLKIO_STAT_TIME)
732 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
733 blkg->stats.time, cb, dev);
734 #ifdef CONFIG_DEBUG_BLK_CGROUP
735 if (type == BLKIO_STAT_UNACCOUNTED_TIME)
736 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
737 blkg->stats.unaccounted_time, cb, dev);
738 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
739 uint64_t sum = blkg->stats.avg_queue_size_sum;
740 uint64_t samples = blkg->stats.avg_queue_size_samples;
741 if (samples)
742 do_div(sum, samples);
743 else
744 sum = 0;
745 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
746 }
747 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
748 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
749 blkg->stats.group_wait_time, cb, dev);
750 if (type == BLKIO_STAT_IDLE_TIME)
751 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
752 blkg->stats.idle_time, cb, dev);
753 if (type == BLKIO_STAT_EMPTY_TIME)
754 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
755 blkg->stats.empty_time, cb, dev);
756 if (type == BLKIO_STAT_DEQUEUE)
757 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
758 blkg->stats.dequeue, cb, dev);
759 #endif
760
761 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
762 sub_type++) {
763 blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
764 cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
765 }
766 disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
767 blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
768 blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
769 cb->fill(cb, key_str, disk_total);
770 return disk_total;
771 }
772
773 static int blkio_check_dev_num(dev_t dev)
774 {
775 int part = 0;
776 struct gendisk *disk;
777
778 disk = get_gendisk(dev, &part);
779 if (!disk || part)
780 return -ENODEV;
781
782 return 0;
783 }
784
785 static int blkio_policy_parse_and_set(char *buf,
786 struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
787 {
788 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
789 int ret;
790 unsigned long major, minor, temp;
791 int i = 0;
792 dev_t dev;
793 u64 bps, iops;
794
795 memset(s, 0, sizeof(s));
796
797 while ((p = strsep(&buf, " ")) != NULL) {
798 if (!*p)
799 continue;
800
801 s[i++] = p;
802
803 /* Prevent from inputing too many things */
804 if (i == 3)
805 break;
806 }
807
808 if (i != 2)
809 return -EINVAL;
810
811 p = strsep(&s[0], ":");
812 if (p != NULL)
813 major_s = p;
814 else
815 return -EINVAL;
816
817 minor_s = s[0];
818 if (!minor_s)
819 return -EINVAL;
820
821 ret = strict_strtoul(major_s, 10, &major);
822 if (ret)
823 return -EINVAL;
824
825 ret = strict_strtoul(minor_s, 10, &minor);
826 if (ret)
827 return -EINVAL;
828
829 dev = MKDEV(major, minor);
830
831 ret = blkio_check_dev_num(dev);
832 if (ret)
833 return ret;
834
835 newpn->dev = dev;
836
837 if (s[1] == NULL)
838 return -EINVAL;
839
840 switch (plid) {
841 case BLKIO_POLICY_PROP:
842 ret = strict_strtoul(s[1], 10, &temp);
843 if (ret || (temp < BLKIO_WEIGHT_MIN && temp > 0) ||
844 temp > BLKIO_WEIGHT_MAX)
845 return -EINVAL;
846
847 newpn->plid = plid;
848 newpn->fileid = fileid;
849 newpn->val.weight = temp;
850 break;
851 case BLKIO_POLICY_THROTL:
852 switch(fileid) {
853 case BLKIO_THROTL_read_bps_device:
854 case BLKIO_THROTL_write_bps_device:
855 ret = strict_strtoull(s[1], 10, &bps);
856 if (ret)
857 return -EINVAL;
858
859 newpn->plid = plid;
860 newpn->fileid = fileid;
861 newpn->val.bps = bps;
862 break;
863 case BLKIO_THROTL_read_iops_device:
864 case BLKIO_THROTL_write_iops_device:
865 ret = strict_strtoull(s[1], 10, &iops);
866 if (ret)
867 return -EINVAL;
868
869 if (iops > THROTL_IOPS_MAX)
870 return -EINVAL;
871
872 newpn->plid = plid;
873 newpn->fileid = fileid;
874 newpn->val.iops = (unsigned int)iops;
875 break;
876 }
877 break;
878 default:
879 BUG();
880 }
881
882 return 0;
883 }
884
885 unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
886 dev_t dev)
887 {
888 struct blkio_policy_node *pn;
889
890 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
891 BLKIO_PROP_weight_device);
892 if (pn)
893 return pn->val.weight;
894 else
895 return blkcg->weight;
896 }
897 EXPORT_SYMBOL_GPL(blkcg_get_weight);
898
899 uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
900 {
901 struct blkio_policy_node *pn;
902
903 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
904 BLKIO_THROTL_read_bps_device);
905 if (pn)
906 return pn->val.bps;
907 else
908 return -1;
909 }
910
911 uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
912 {
913 struct blkio_policy_node *pn;
914 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
915 BLKIO_THROTL_write_bps_device);
916 if (pn)
917 return pn->val.bps;
918 else
919 return -1;
920 }
921
922 unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
923 {
924 struct blkio_policy_node *pn;
925
926 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
927 BLKIO_THROTL_read_iops_device);
928 if (pn)
929 return pn->val.iops;
930 else
931 return -1;
932 }
933
934 unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
935 {
936 struct blkio_policy_node *pn;
937 pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
938 BLKIO_THROTL_write_iops_device);
939 if (pn)
940 return pn->val.iops;
941 else
942 return -1;
943 }
944
945 /* Checks whether user asked for deleting a policy rule */
946 static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
947 {
948 switch(pn->plid) {
949 case BLKIO_POLICY_PROP:
950 if (pn->val.weight == 0)
951 return 1;
952 break;
953 case BLKIO_POLICY_THROTL:
954 switch(pn->fileid) {
955 case BLKIO_THROTL_read_bps_device:
956 case BLKIO_THROTL_write_bps_device:
957 if (pn->val.bps == 0)
958 return 1;
959 break;
960 case BLKIO_THROTL_read_iops_device:
961 case BLKIO_THROTL_write_iops_device:
962 if (pn->val.iops == 0)
963 return 1;
964 }
965 break;
966 default:
967 BUG();
968 }
969
970 return 0;
971 }
972
973 static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
974 struct blkio_policy_node *newpn)
975 {
976 switch(oldpn->plid) {
977 case BLKIO_POLICY_PROP:
978 oldpn->val.weight = newpn->val.weight;
979 break;
980 case BLKIO_POLICY_THROTL:
981 switch(newpn->fileid) {
982 case BLKIO_THROTL_read_bps_device:
983 case BLKIO_THROTL_write_bps_device:
984 oldpn->val.bps = newpn->val.bps;
985 break;
986 case BLKIO_THROTL_read_iops_device:
987 case BLKIO_THROTL_write_iops_device:
988 oldpn->val.iops = newpn->val.iops;
989 }
990 break;
991 default:
992 BUG();
993 }
994 }
995
996 /*
997 * Some rules/values in blkg have changed. Propagate those to respective
998 * policies.
999 */
1000 static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
1001 struct blkio_group *blkg, struct blkio_policy_node *pn)
1002 {
1003 unsigned int weight, iops;
1004 u64 bps;
1005
1006 switch(pn->plid) {
1007 case BLKIO_POLICY_PROP:
1008 weight = pn->val.weight ? pn->val.weight :
1009 blkcg->weight;
1010 blkio_update_group_weight(blkg, weight);
1011 break;
1012 case BLKIO_POLICY_THROTL:
1013 switch(pn->fileid) {
1014 case BLKIO_THROTL_read_bps_device:
1015 case BLKIO_THROTL_write_bps_device:
1016 bps = pn->val.bps ? pn->val.bps : (-1);
1017 blkio_update_group_bps(blkg, bps, pn->fileid);
1018 break;
1019 case BLKIO_THROTL_read_iops_device:
1020 case BLKIO_THROTL_write_iops_device:
1021 iops = pn->val.iops ? pn->val.iops : (-1);
1022 blkio_update_group_iops(blkg, iops, pn->fileid);
1023 break;
1024 }
1025 break;
1026 default:
1027 BUG();
1028 }
1029 }
1030
1031 /*
1032 * A policy node rule has been updated. Propagate this update to all the
1033 * block groups which might be affected by this update.
1034 */
1035 static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
1036 struct blkio_policy_node *pn)
1037 {
1038 struct blkio_group *blkg;
1039 struct hlist_node *n;
1040
1041 spin_lock(&blkio_list_lock);
1042 spin_lock_irq(&blkcg->lock);
1043
1044 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1045 if (pn->dev != blkg->dev || pn->plid != blkg->plid)
1046 continue;
1047 blkio_update_blkg_policy(blkcg, blkg, pn);
1048 }
1049
1050 spin_unlock_irq(&blkcg->lock);
1051 spin_unlock(&blkio_list_lock);
1052 }
1053
1054 static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
1055 const char *buffer)
1056 {
1057 int ret = 0;
1058 char *buf;
1059 struct blkio_policy_node *newpn, *pn;
1060 struct blkio_cgroup *blkcg;
1061 int keep_newpn = 0;
1062 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1063 int fileid = BLKIOFILE_ATTR(cft->private);
1064
1065 buf = kstrdup(buffer, GFP_KERNEL);
1066 if (!buf)
1067 return -ENOMEM;
1068
1069 newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
1070 if (!newpn) {
1071 ret = -ENOMEM;
1072 goto free_buf;
1073 }
1074
1075 ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
1076 if (ret)
1077 goto free_newpn;
1078
1079 blkcg = cgroup_to_blkio_cgroup(cgrp);
1080
1081 spin_lock_irq(&blkcg->lock);
1082
1083 pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
1084 if (!pn) {
1085 if (!blkio_delete_rule_command(newpn)) {
1086 blkio_policy_insert_node(blkcg, newpn);
1087 keep_newpn = 1;
1088 }
1089 spin_unlock_irq(&blkcg->lock);
1090 goto update_io_group;
1091 }
1092
1093 if (blkio_delete_rule_command(newpn)) {
1094 blkio_policy_delete_node(pn);
1095 spin_unlock_irq(&blkcg->lock);
1096 goto update_io_group;
1097 }
1098 spin_unlock_irq(&blkcg->lock);
1099
1100 blkio_update_policy_rule(pn, newpn);
1101
1102 update_io_group:
1103 blkio_update_policy_node_blkg(blkcg, newpn);
1104
1105 free_newpn:
1106 if (!keep_newpn)
1107 kfree(newpn);
1108 free_buf:
1109 kfree(buf);
1110 return ret;
1111 }
1112
1113 static void
1114 blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
1115 {
1116 switch(pn->plid) {
1117 case BLKIO_POLICY_PROP:
1118 if (pn->fileid == BLKIO_PROP_weight_device)
1119 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
1120 MINOR(pn->dev), pn->val.weight);
1121 break;
1122 case BLKIO_POLICY_THROTL:
1123 switch(pn->fileid) {
1124 case BLKIO_THROTL_read_bps_device:
1125 case BLKIO_THROTL_write_bps_device:
1126 seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
1127 MINOR(pn->dev), pn->val.bps);
1128 break;
1129 case BLKIO_THROTL_read_iops_device:
1130 case BLKIO_THROTL_write_iops_device:
1131 seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
1132 MINOR(pn->dev), pn->val.iops);
1133 break;
1134 }
1135 break;
1136 default:
1137 BUG();
1138 }
1139 }
1140
1141 /* cgroup files which read their data from policy nodes end up here */
1142 static void blkio_read_policy_node_files(struct cftype *cft,
1143 struct blkio_cgroup *blkcg, struct seq_file *m)
1144 {
1145 struct blkio_policy_node *pn;
1146
1147 if (!list_empty(&blkcg->policy_list)) {
1148 spin_lock_irq(&blkcg->lock);
1149 list_for_each_entry(pn, &blkcg->policy_list, node) {
1150 if (!pn_matches_cftype(cft, pn))
1151 continue;
1152 blkio_print_policy_node(m, pn);
1153 }
1154 spin_unlock_irq(&blkcg->lock);
1155 }
1156 }
1157
1158 static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1159 struct seq_file *m)
1160 {
1161 struct blkio_cgroup *blkcg;
1162 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1163 int name = BLKIOFILE_ATTR(cft->private);
1164
1165 blkcg = cgroup_to_blkio_cgroup(cgrp);
1166
1167 switch(plid) {
1168 case BLKIO_POLICY_PROP:
1169 switch(name) {
1170 case BLKIO_PROP_weight_device:
1171 blkio_read_policy_node_files(cft, blkcg, m);
1172 return 0;
1173 default:
1174 BUG();
1175 }
1176 break;
1177 case BLKIO_POLICY_THROTL:
1178 switch(name){
1179 case BLKIO_THROTL_read_bps_device:
1180 case BLKIO_THROTL_write_bps_device:
1181 case BLKIO_THROTL_read_iops_device:
1182 case BLKIO_THROTL_write_iops_device:
1183 blkio_read_policy_node_files(cft, blkcg, m);
1184 return 0;
1185 default:
1186 BUG();
1187 }
1188 break;
1189 default:
1190 BUG();
1191 }
1192
1193 return 0;
1194 }
1195
1196 static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1197 struct cftype *cft, struct cgroup_map_cb *cb,
1198 enum stat_type type, bool show_total, bool pcpu)
1199 {
1200 struct blkio_group *blkg;
1201 struct hlist_node *n;
1202 uint64_t cgroup_total = 0;
1203
1204 rcu_read_lock();
1205 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
1206 if (blkg->dev) {
1207 if (!cftype_blkg_same_policy(cft, blkg))
1208 continue;
1209 if (pcpu)
1210 cgroup_total += blkio_get_stat_cpu(blkg, cb,
1211 blkg->dev, type);
1212 else {
1213 spin_lock_irq(&blkg->stats_lock);
1214 cgroup_total += blkio_get_stat(blkg, cb,
1215 blkg->dev, type);
1216 spin_unlock_irq(&blkg->stats_lock);
1217 }
1218 }
1219 }
1220 if (show_total)
1221 cb->fill(cb, "Total", cgroup_total);
1222 rcu_read_unlock();
1223 return 0;
1224 }
1225
1226 /* All map kind of cgroup file get serviced by this function */
1227 static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1228 struct cgroup_map_cb *cb)
1229 {
1230 struct blkio_cgroup *blkcg;
1231 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1232 int name = BLKIOFILE_ATTR(cft->private);
1233
1234 blkcg = cgroup_to_blkio_cgroup(cgrp);
1235
1236 switch(plid) {
1237 case BLKIO_POLICY_PROP:
1238 switch(name) {
1239 case BLKIO_PROP_time:
1240 return blkio_read_blkg_stats(blkcg, cft, cb,
1241 BLKIO_STAT_TIME, 0, 0);
1242 case BLKIO_PROP_sectors:
1243 return blkio_read_blkg_stats(blkcg, cft, cb,
1244 BLKIO_STAT_CPU_SECTORS, 0, 1);
1245 case BLKIO_PROP_io_service_bytes:
1246 return blkio_read_blkg_stats(blkcg, cft, cb,
1247 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1248 case BLKIO_PROP_io_serviced:
1249 return blkio_read_blkg_stats(blkcg, cft, cb,
1250 BLKIO_STAT_CPU_SERVICED, 1, 1);
1251 case BLKIO_PROP_io_service_time:
1252 return blkio_read_blkg_stats(blkcg, cft, cb,
1253 BLKIO_STAT_SERVICE_TIME, 1, 0);
1254 case BLKIO_PROP_io_wait_time:
1255 return blkio_read_blkg_stats(blkcg, cft, cb,
1256 BLKIO_STAT_WAIT_TIME, 1, 0);
1257 case BLKIO_PROP_io_merged:
1258 return blkio_read_blkg_stats(blkcg, cft, cb,
1259 BLKIO_STAT_CPU_MERGED, 1, 1);
1260 case BLKIO_PROP_io_queued:
1261 return blkio_read_blkg_stats(blkcg, cft, cb,
1262 BLKIO_STAT_QUEUED, 1, 0);
1263 #ifdef CONFIG_DEBUG_BLK_CGROUP
1264 case BLKIO_PROP_unaccounted_time:
1265 return blkio_read_blkg_stats(blkcg, cft, cb,
1266 BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
1267 case BLKIO_PROP_dequeue:
1268 return blkio_read_blkg_stats(blkcg, cft, cb,
1269 BLKIO_STAT_DEQUEUE, 0, 0);
1270 case BLKIO_PROP_avg_queue_size:
1271 return blkio_read_blkg_stats(blkcg, cft, cb,
1272 BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
1273 case BLKIO_PROP_group_wait_time:
1274 return blkio_read_blkg_stats(blkcg, cft, cb,
1275 BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
1276 case BLKIO_PROP_idle_time:
1277 return blkio_read_blkg_stats(blkcg, cft, cb,
1278 BLKIO_STAT_IDLE_TIME, 0, 0);
1279 case BLKIO_PROP_empty_time:
1280 return blkio_read_blkg_stats(blkcg, cft, cb,
1281 BLKIO_STAT_EMPTY_TIME, 0, 0);
1282 #endif
1283 default:
1284 BUG();
1285 }
1286 break;
1287 case BLKIO_POLICY_THROTL:
1288 switch(name){
1289 case BLKIO_THROTL_io_service_bytes:
1290 return blkio_read_blkg_stats(blkcg, cft, cb,
1291 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1292 case BLKIO_THROTL_io_serviced:
1293 return blkio_read_blkg_stats(blkcg, cft, cb,
1294 BLKIO_STAT_CPU_SERVICED, 1, 1);
1295 default:
1296 BUG();
1297 }
1298 break;
1299 default:
1300 BUG();
1301 }
1302
1303 return 0;
1304 }
1305
1306 static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
1307 {
1308 struct blkio_group *blkg;
1309 struct hlist_node *n;
1310 struct blkio_policy_node *pn;
1311
1312 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1313 return -EINVAL;
1314
1315 spin_lock(&blkio_list_lock);
1316 spin_lock_irq(&blkcg->lock);
1317 blkcg->weight = (unsigned int)val;
1318
1319 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1320 pn = blkio_policy_search_node(blkcg, blkg->dev,
1321 BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
1322 if (pn)
1323 continue;
1324
1325 blkio_update_group_weight(blkg, blkcg->weight);
1326 }
1327 spin_unlock_irq(&blkcg->lock);
1328 spin_unlock(&blkio_list_lock);
1329 return 0;
1330 }
1331
1332 static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
1333 struct blkio_cgroup *blkcg;
1334 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1335 int name = BLKIOFILE_ATTR(cft->private);
1336
1337 blkcg = cgroup_to_blkio_cgroup(cgrp);
1338
1339 switch(plid) {
1340 case BLKIO_POLICY_PROP:
1341 switch(name) {
1342 case BLKIO_PROP_weight:
1343 return (u64)blkcg->weight;
1344 }
1345 break;
1346 default:
1347 BUG();
1348 }
1349 return 0;
1350 }
1351
1352 static int
1353 blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1354 {
1355 struct blkio_cgroup *blkcg;
1356 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1357 int name = BLKIOFILE_ATTR(cft->private);
1358
1359 blkcg = cgroup_to_blkio_cgroup(cgrp);
1360
1361 switch(plid) {
1362 case BLKIO_POLICY_PROP:
1363 switch(name) {
1364 case BLKIO_PROP_weight:
1365 return blkio_weight_write(blkcg, val);
1366 }
1367 break;
1368 default:
1369 BUG();
1370 }
1371
1372 return 0;
1373 }
1374
1375 struct cftype blkio_files[] = {
1376 {
1377 .name = "weight_device",
1378 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1379 BLKIO_PROP_weight_device),
1380 .read_seq_string = blkiocg_file_read,
1381 .write_string = blkiocg_file_write,
1382 .max_write_len = 256,
1383 },
1384 {
1385 .name = "weight",
1386 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1387 BLKIO_PROP_weight),
1388 .read_u64 = blkiocg_file_read_u64,
1389 .write_u64 = blkiocg_file_write_u64,
1390 },
1391 {
1392 .name = "time",
1393 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1394 BLKIO_PROP_time),
1395 .read_map = blkiocg_file_read_map,
1396 },
1397 {
1398 .name = "sectors",
1399 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1400 BLKIO_PROP_sectors),
1401 .read_map = blkiocg_file_read_map,
1402 },
1403 {
1404 .name = "io_service_bytes",
1405 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1406 BLKIO_PROP_io_service_bytes),
1407 .read_map = blkiocg_file_read_map,
1408 },
1409 {
1410 .name = "io_serviced",
1411 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1412 BLKIO_PROP_io_serviced),
1413 .read_map = blkiocg_file_read_map,
1414 },
1415 {
1416 .name = "io_service_time",
1417 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1418 BLKIO_PROP_io_service_time),
1419 .read_map = blkiocg_file_read_map,
1420 },
1421 {
1422 .name = "io_wait_time",
1423 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1424 BLKIO_PROP_io_wait_time),
1425 .read_map = blkiocg_file_read_map,
1426 },
1427 {
1428 .name = "io_merged",
1429 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1430 BLKIO_PROP_io_merged),
1431 .read_map = blkiocg_file_read_map,
1432 },
1433 {
1434 .name = "io_queued",
1435 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1436 BLKIO_PROP_io_queued),
1437 .read_map = blkiocg_file_read_map,
1438 },
1439 {
1440 .name = "reset_stats",
1441 .write_u64 = blkiocg_reset_stats,
1442 },
1443 #ifdef CONFIG_BLK_DEV_THROTTLING
1444 {
1445 .name = "throttle.read_bps_device",
1446 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1447 BLKIO_THROTL_read_bps_device),
1448 .read_seq_string = blkiocg_file_read,
1449 .write_string = blkiocg_file_write,
1450 .max_write_len = 256,
1451 },
1452
1453 {
1454 .name = "throttle.write_bps_device",
1455 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1456 BLKIO_THROTL_write_bps_device),
1457 .read_seq_string = blkiocg_file_read,
1458 .write_string = blkiocg_file_write,
1459 .max_write_len = 256,
1460 },
1461
1462 {
1463 .name = "throttle.read_iops_device",
1464 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1465 BLKIO_THROTL_read_iops_device),
1466 .read_seq_string = blkiocg_file_read,
1467 .write_string = blkiocg_file_write,
1468 .max_write_len = 256,
1469 },
1470
1471 {
1472 .name = "throttle.write_iops_device",
1473 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1474 BLKIO_THROTL_write_iops_device),
1475 .read_seq_string = blkiocg_file_read,
1476 .write_string = blkiocg_file_write,
1477 .max_write_len = 256,
1478 },
1479 {
1480 .name = "throttle.io_service_bytes",
1481 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1482 BLKIO_THROTL_io_service_bytes),
1483 .read_map = blkiocg_file_read_map,
1484 },
1485 {
1486 .name = "throttle.io_serviced",
1487 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1488 BLKIO_THROTL_io_serviced),
1489 .read_map = blkiocg_file_read_map,
1490 },
1491 #endif /* CONFIG_BLK_DEV_THROTTLING */
1492
1493 #ifdef CONFIG_DEBUG_BLK_CGROUP
1494 {
1495 .name = "avg_queue_size",
1496 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1497 BLKIO_PROP_avg_queue_size),
1498 .read_map = blkiocg_file_read_map,
1499 },
1500 {
1501 .name = "group_wait_time",
1502 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1503 BLKIO_PROP_group_wait_time),
1504 .read_map = blkiocg_file_read_map,
1505 },
1506 {
1507 .name = "idle_time",
1508 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1509 BLKIO_PROP_idle_time),
1510 .read_map = blkiocg_file_read_map,
1511 },
1512 {
1513 .name = "empty_time",
1514 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1515 BLKIO_PROP_empty_time),
1516 .read_map = blkiocg_file_read_map,
1517 },
1518 {
1519 .name = "dequeue",
1520 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1521 BLKIO_PROP_dequeue),
1522 .read_map = blkiocg_file_read_map,
1523 },
1524 {
1525 .name = "unaccounted_time",
1526 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1527 BLKIO_PROP_unaccounted_time),
1528 .read_map = blkiocg_file_read_map,
1529 },
1530 #endif
1531 };
1532
1533 static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1534 {
1535 return cgroup_add_files(cgroup, subsys, blkio_files,
1536 ARRAY_SIZE(blkio_files));
1537 }
1538
1539 static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1540 {
1541 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1542 unsigned long flags;
1543 struct blkio_group *blkg;
1544 void *key;
1545 struct blkio_policy_type *blkiop;
1546 struct blkio_policy_node *pn, *pntmp;
1547
1548 rcu_read_lock();
1549 do {
1550 spin_lock_irqsave(&blkcg->lock, flags);
1551
1552 if (hlist_empty(&blkcg->blkg_list)) {
1553 spin_unlock_irqrestore(&blkcg->lock, flags);
1554 break;
1555 }
1556
1557 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
1558 blkcg_node);
1559 key = rcu_dereference(blkg->key);
1560 __blkiocg_del_blkio_group(blkg);
1561
1562 spin_unlock_irqrestore(&blkcg->lock, flags);
1563
1564 /*
1565 * This blkio_group is being unlinked as associated cgroup is
1566 * going away. Let all the IO controlling policies know about
1567 * this event.
1568 */
1569 spin_lock(&blkio_list_lock);
1570 list_for_each_entry(blkiop, &blkio_list, list) {
1571 if (blkiop->plid != blkg->plid)
1572 continue;
1573 blkiop->ops.blkio_unlink_group_fn(key, blkg);
1574 }
1575 spin_unlock(&blkio_list_lock);
1576 } while (1);
1577
1578 list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
1579 blkio_policy_delete_node(pn);
1580 kfree(pn);
1581 }
1582
1583 free_css_id(&blkio_subsys, &blkcg->css);
1584 rcu_read_unlock();
1585 if (blkcg != &blkio_root_cgroup)
1586 kfree(blkcg);
1587 }
1588
1589 static struct cgroup_subsys_state *
1590 blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1591 {
1592 struct blkio_cgroup *blkcg;
1593 struct cgroup *parent = cgroup->parent;
1594
1595 if (!parent) {
1596 blkcg = &blkio_root_cgroup;
1597 goto done;
1598 }
1599
1600 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1601 if (!blkcg)
1602 return ERR_PTR(-ENOMEM);
1603
1604 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
1605 done:
1606 spin_lock_init(&blkcg->lock);
1607 INIT_HLIST_HEAD(&blkcg->blkg_list);
1608
1609 INIT_LIST_HEAD(&blkcg->policy_list);
1610 return &blkcg->css;
1611 }
1612
1613 /*
1614 * We cannot support shared io contexts, as we have no mean to support
1615 * two tasks with the same ioc in two different groups without major rework
1616 * of the main cic data structures. For now we allow a task to change
1617 * its cgroup only if it's the only owner of its ioc.
1618 */
1619 static int blkiocg_can_attach(struct cgroup_subsys *subsys,
1620 struct cgroup *cgroup, struct task_struct *tsk,
1621 bool threadgroup)
1622 {
1623 struct io_context *ioc;
1624 int ret = 0;
1625
1626 /* task_lock() is needed to avoid races with exit_io_context() */
1627 task_lock(tsk);
1628 ioc = tsk->io_context;
1629 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1630 ret = -EINVAL;
1631 task_unlock(tsk);
1632
1633 return ret;
1634 }
1635
1636 static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
1637 struct cgroup *prev, struct task_struct *tsk,
1638 bool threadgroup)
1639 {
1640 struct io_context *ioc;
1641
1642 task_lock(tsk);
1643 ioc = tsk->io_context;
1644 if (ioc)
1645 ioc->cgroup_changed = 1;
1646 task_unlock(tsk);
1647 }
1648
1649 void blkio_policy_register(struct blkio_policy_type *blkiop)
1650 {
1651 spin_lock(&blkio_list_lock);
1652 list_add_tail(&blkiop->list, &blkio_list);
1653 spin_unlock(&blkio_list_lock);
1654 }
1655 EXPORT_SYMBOL_GPL(blkio_policy_register);
1656
1657 void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1658 {
1659 spin_lock(&blkio_list_lock);
1660 list_del_init(&blkiop->list);
1661 spin_unlock(&blkio_list_lock);
1662 }
1663 EXPORT_SYMBOL_GPL(blkio_policy_unregister);
1664
1665 static int __init init_cgroup_blkio(void)
1666 {
1667 return cgroup_load_subsys(&blkio_subsys);
1668 }
1669
1670 static void __exit exit_cgroup_blkio(void)
1671 {
1672 cgroup_unload_subsys(&blkio_subsys);
1673 }
1674
1675 module_init(init_cgroup_blkio);
1676 module_exit(exit_cgroup_blkio);
1677 MODULE_LICENSE("GPL");
This page took 0.063627 seconds and 5 git commands to generate.