blktrace: port to tracepoints
[deliverable/linux.git] / block / elevator.c
1 /*
2 * Block device elevator/IO-scheduler.
3 *
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 *
6 * 30042000 Jens Axboe <axboe@kernel.dk> :
7 *
8 * Split the elevator a bit so that it is possible to choose a different
9 * one or even write a new "plug in". There are three pieces:
10 * - elevator_fn, inserts a new request in the queue list
11 * - elevator_merge_fn, decides whether a new buffer can be merged with
12 * an existing request
13 * - elevator_dequeue_fn, called when a request is taken off the active list
14 *
15 * 20082000 Dave Jones <davej@suse.de> :
16 * Removed tests for max-bomb-segments, which was breaking elvtune
17 * when run without -bN
18 *
19 * Jens:
20 * - Rework again to work with bio instead of buffer_heads
21 * - loose bi_dev comparisons, partition handling is right now
22 * - completely modularize elevator setup and teardown
23 *
24 */
25 #include <linux/kernel.h>
26 #include <linux/fs.h>
27 #include <linux/blkdev.h>
28 #include <linux/elevator.h>
29 #include <linux/bio.h>
30 #include <linux/module.h>
31 #include <linux/slab.h>
32 #include <linux/init.h>
33 #include <linux/compiler.h>
34 #include <linux/delay.h>
35 #include <linux/blktrace_api.h>
36 #include <trace/block.h>
37 #include <linux/hash.h>
38 #include <linux/uaccess.h>
39
40 #include "blk.h"
41
42 static DEFINE_SPINLOCK(elv_list_lock);
43 static LIST_HEAD(elv_list);
44
45 /*
46 * Merge hash stuff.
47 */
48 static const int elv_hash_shift = 6;
49 #define ELV_HASH_BLOCK(sec) ((sec) >> 3)
50 #define ELV_HASH_FN(sec) \
51 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
52 #define ELV_HASH_ENTRIES (1 << elv_hash_shift)
53 #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
54 #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
55
56 /*
57 * Query io scheduler to see if the current process issuing bio may be
58 * merged with rq.
59 */
60 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
61 {
62 struct request_queue *q = rq->q;
63 elevator_t *e = q->elevator;
64
65 if (e->ops->elevator_allow_merge_fn)
66 return e->ops->elevator_allow_merge_fn(q, rq, bio);
67
68 return 1;
69 }
70
71 /*
72 * can we safely merge with this request?
73 */
74 int elv_rq_merge_ok(struct request *rq, struct bio *bio)
75 {
76 if (!rq_mergeable(rq))
77 return 0;
78
79 /*
80 * Don't merge file system requests and discard requests
81 */
82 if (bio_discard(bio) != bio_discard(rq->bio))
83 return 0;
84
85 /*
86 * different data direction or already started, don't merge
87 */
88 if (bio_data_dir(bio) != rq_data_dir(rq))
89 return 0;
90
91 /*
92 * must be same device and not a special request
93 */
94 if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
95 return 0;
96
97 /*
98 * only merge integrity protected bio into ditto rq
99 */
100 if (bio_integrity(bio) != blk_integrity_rq(rq))
101 return 0;
102
103 if (!elv_iosched_allow_merge(rq, bio))
104 return 0;
105
106 return 1;
107 }
108 EXPORT_SYMBOL(elv_rq_merge_ok);
109
110 static inline int elv_try_merge(struct request *__rq, struct bio *bio)
111 {
112 int ret = ELEVATOR_NO_MERGE;
113
114 /*
115 * we can merge and sequence is ok, check if it's possible
116 */
117 if (elv_rq_merge_ok(__rq, bio)) {
118 if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
119 ret = ELEVATOR_BACK_MERGE;
120 else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
121 ret = ELEVATOR_FRONT_MERGE;
122 }
123
124 return ret;
125 }
126
127 static struct elevator_type *elevator_find(const char *name)
128 {
129 struct elevator_type *e;
130
131 list_for_each_entry(e, &elv_list, list) {
132 if (!strcmp(e->elevator_name, name))
133 return e;
134 }
135
136 return NULL;
137 }
138
139 static void elevator_put(struct elevator_type *e)
140 {
141 module_put(e->elevator_owner);
142 }
143
144 static struct elevator_type *elevator_get(const char *name)
145 {
146 struct elevator_type *e;
147
148 spin_lock(&elv_list_lock);
149
150 e = elevator_find(name);
151 if (!e) {
152 char elv[ELV_NAME_MAX + strlen("-iosched")];
153
154 spin_unlock(&elv_list_lock);
155
156 if (!strcmp(name, "anticipatory"))
157 sprintf(elv, "as-iosched");
158 else
159 sprintf(elv, "%s-iosched", name);
160
161 request_module("%s", elv);
162 spin_lock(&elv_list_lock);
163 e = elevator_find(name);
164 }
165
166 if (e && !try_module_get(e->elevator_owner))
167 e = NULL;
168
169 spin_unlock(&elv_list_lock);
170
171 return e;
172 }
173
174 static void *elevator_init_queue(struct request_queue *q,
175 struct elevator_queue *eq)
176 {
177 return eq->ops->elevator_init_fn(q);
178 }
179
180 static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
181 void *data)
182 {
183 q->elevator = eq;
184 eq->elevator_data = data;
185 }
186
187 static char chosen_elevator[16];
188
189 static int __init elevator_setup(char *str)
190 {
191 /*
192 * Be backwards-compatible with previous kernels, so users
193 * won't get the wrong elevator.
194 */
195 if (!strcmp(str, "as"))
196 strcpy(chosen_elevator, "anticipatory");
197 else
198 strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
199 return 1;
200 }
201
202 __setup("elevator=", elevator_setup);
203
204 static struct kobj_type elv_ktype;
205
206 static elevator_t *elevator_alloc(struct request_queue *q,
207 struct elevator_type *e)
208 {
209 elevator_t *eq;
210 int i;
211
212 eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node);
213 if (unlikely(!eq))
214 goto err;
215
216 eq->ops = &e->ops;
217 eq->elevator_type = e;
218 kobject_init(&eq->kobj, &elv_ktype);
219 mutex_init(&eq->sysfs_lock);
220
221 eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
222 GFP_KERNEL, q->node);
223 if (!eq->hash)
224 goto err;
225
226 for (i = 0; i < ELV_HASH_ENTRIES; i++)
227 INIT_HLIST_HEAD(&eq->hash[i]);
228
229 return eq;
230 err:
231 kfree(eq);
232 elevator_put(e);
233 return NULL;
234 }
235
236 static void elevator_release(struct kobject *kobj)
237 {
238 elevator_t *e = container_of(kobj, elevator_t, kobj);
239
240 elevator_put(e->elevator_type);
241 kfree(e->hash);
242 kfree(e);
243 }
244
245 int elevator_init(struct request_queue *q, char *name)
246 {
247 struct elevator_type *e = NULL;
248 struct elevator_queue *eq;
249 int ret = 0;
250 void *data;
251
252 INIT_LIST_HEAD(&q->queue_head);
253 q->last_merge = NULL;
254 q->end_sector = 0;
255 q->boundary_rq = NULL;
256
257 if (name) {
258 e = elevator_get(name);
259 if (!e)
260 return -EINVAL;
261 }
262
263 if (!e && *chosen_elevator) {
264 e = elevator_get(chosen_elevator);
265 if (!e)
266 printk(KERN_ERR "I/O scheduler %s not found\n",
267 chosen_elevator);
268 }
269
270 if (!e) {
271 e = elevator_get(CONFIG_DEFAULT_IOSCHED);
272 if (!e) {
273 printk(KERN_ERR
274 "Default I/O scheduler not found. " \
275 "Using noop.\n");
276 e = elevator_get("noop");
277 }
278 }
279
280 eq = elevator_alloc(q, e);
281 if (!eq)
282 return -ENOMEM;
283
284 data = elevator_init_queue(q, eq);
285 if (!data) {
286 kobject_put(&eq->kobj);
287 return -ENOMEM;
288 }
289
290 elevator_attach(q, eq, data);
291 return ret;
292 }
293 EXPORT_SYMBOL(elevator_init);
294
295 void elevator_exit(elevator_t *e)
296 {
297 mutex_lock(&e->sysfs_lock);
298 if (e->ops->elevator_exit_fn)
299 e->ops->elevator_exit_fn(e);
300 e->ops = NULL;
301 mutex_unlock(&e->sysfs_lock);
302
303 kobject_put(&e->kobj);
304 }
305 EXPORT_SYMBOL(elevator_exit);
306
307 static void elv_activate_rq(struct request_queue *q, struct request *rq)
308 {
309 elevator_t *e = q->elevator;
310
311 if (e->ops->elevator_activate_req_fn)
312 e->ops->elevator_activate_req_fn(q, rq);
313 }
314
315 static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
316 {
317 elevator_t *e = q->elevator;
318
319 if (e->ops->elevator_deactivate_req_fn)
320 e->ops->elevator_deactivate_req_fn(q, rq);
321 }
322
323 static inline void __elv_rqhash_del(struct request *rq)
324 {
325 hlist_del_init(&rq->hash);
326 }
327
328 static void elv_rqhash_del(struct request_queue *q, struct request *rq)
329 {
330 if (ELV_ON_HASH(rq))
331 __elv_rqhash_del(rq);
332 }
333
334 static void elv_rqhash_add(struct request_queue *q, struct request *rq)
335 {
336 elevator_t *e = q->elevator;
337
338 BUG_ON(ELV_ON_HASH(rq));
339 hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
340 }
341
342 static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
343 {
344 __elv_rqhash_del(rq);
345 elv_rqhash_add(q, rq);
346 }
347
348 static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
349 {
350 elevator_t *e = q->elevator;
351 struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
352 struct hlist_node *entry, *next;
353 struct request *rq;
354
355 hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
356 BUG_ON(!ELV_ON_HASH(rq));
357
358 if (unlikely(!rq_mergeable(rq))) {
359 __elv_rqhash_del(rq);
360 continue;
361 }
362
363 if (rq_hash_key(rq) == offset)
364 return rq;
365 }
366
367 return NULL;
368 }
369
370 /*
371 * RB-tree support functions for inserting/lookup/removal of requests
372 * in a sorted RB tree.
373 */
374 struct request *elv_rb_add(struct rb_root *root, struct request *rq)
375 {
376 struct rb_node **p = &root->rb_node;
377 struct rb_node *parent = NULL;
378 struct request *__rq;
379
380 while (*p) {
381 parent = *p;
382 __rq = rb_entry(parent, struct request, rb_node);
383
384 if (rq->sector < __rq->sector)
385 p = &(*p)->rb_left;
386 else if (rq->sector > __rq->sector)
387 p = &(*p)->rb_right;
388 else
389 return __rq;
390 }
391
392 rb_link_node(&rq->rb_node, parent, p);
393 rb_insert_color(&rq->rb_node, root);
394 return NULL;
395 }
396 EXPORT_SYMBOL(elv_rb_add);
397
398 void elv_rb_del(struct rb_root *root, struct request *rq)
399 {
400 BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
401 rb_erase(&rq->rb_node, root);
402 RB_CLEAR_NODE(&rq->rb_node);
403 }
404 EXPORT_SYMBOL(elv_rb_del);
405
406 struct request *elv_rb_find(struct rb_root *root, sector_t sector)
407 {
408 struct rb_node *n = root->rb_node;
409 struct request *rq;
410
411 while (n) {
412 rq = rb_entry(n, struct request, rb_node);
413
414 if (sector < rq->sector)
415 n = n->rb_left;
416 else if (sector > rq->sector)
417 n = n->rb_right;
418 else
419 return rq;
420 }
421
422 return NULL;
423 }
424 EXPORT_SYMBOL(elv_rb_find);
425
426 /*
427 * Insert rq into dispatch queue of q. Queue lock must be held on
428 * entry. rq is sort instead into the dispatch queue. To be used by
429 * specific elevators.
430 */
431 void elv_dispatch_sort(struct request_queue *q, struct request *rq)
432 {
433 sector_t boundary;
434 struct list_head *entry;
435 int stop_flags;
436
437 if (q->last_merge == rq)
438 q->last_merge = NULL;
439
440 elv_rqhash_del(q, rq);
441
442 q->nr_sorted--;
443
444 boundary = q->end_sector;
445 stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
446 list_for_each_prev(entry, &q->queue_head) {
447 struct request *pos = list_entry_rq(entry);
448
449 if (blk_discard_rq(rq) != blk_discard_rq(pos))
450 break;
451 if (rq_data_dir(rq) != rq_data_dir(pos))
452 break;
453 if (pos->cmd_flags & stop_flags)
454 break;
455 if (rq->sector >= boundary) {
456 if (pos->sector < boundary)
457 continue;
458 } else {
459 if (pos->sector >= boundary)
460 break;
461 }
462 if (rq->sector >= pos->sector)
463 break;
464 }
465
466 list_add(&rq->queuelist, entry);
467 }
468 EXPORT_SYMBOL(elv_dispatch_sort);
469
470 /*
471 * Insert rq into dispatch queue of q. Queue lock must be held on
472 * entry. rq is added to the back of the dispatch queue. To be used by
473 * specific elevators.
474 */
475 void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
476 {
477 if (q->last_merge == rq)
478 q->last_merge = NULL;
479
480 elv_rqhash_del(q, rq);
481
482 q->nr_sorted--;
483
484 q->end_sector = rq_end_sector(rq);
485 q->boundary_rq = rq;
486 list_add_tail(&rq->queuelist, &q->queue_head);
487 }
488 EXPORT_SYMBOL(elv_dispatch_add_tail);
489
490 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
491 {
492 elevator_t *e = q->elevator;
493 struct request *__rq;
494 int ret;
495
496 /*
497 * First try one-hit cache.
498 */
499 if (q->last_merge) {
500 ret = elv_try_merge(q->last_merge, bio);
501 if (ret != ELEVATOR_NO_MERGE) {
502 *req = q->last_merge;
503 return ret;
504 }
505 }
506
507 if (blk_queue_nomerges(q))
508 return ELEVATOR_NO_MERGE;
509
510 /*
511 * See if our hash lookup can find a potential backmerge.
512 */
513 __rq = elv_rqhash_find(q, bio->bi_sector);
514 if (__rq && elv_rq_merge_ok(__rq, bio)) {
515 *req = __rq;
516 return ELEVATOR_BACK_MERGE;
517 }
518
519 if (e->ops->elevator_merge_fn)
520 return e->ops->elevator_merge_fn(q, req, bio);
521
522 return ELEVATOR_NO_MERGE;
523 }
524
525 void elv_merged_request(struct request_queue *q, struct request *rq, int type)
526 {
527 elevator_t *e = q->elevator;
528
529 if (e->ops->elevator_merged_fn)
530 e->ops->elevator_merged_fn(q, rq, type);
531
532 if (type == ELEVATOR_BACK_MERGE)
533 elv_rqhash_reposition(q, rq);
534
535 q->last_merge = rq;
536 }
537
538 void elv_merge_requests(struct request_queue *q, struct request *rq,
539 struct request *next)
540 {
541 elevator_t *e = q->elevator;
542
543 if (e->ops->elevator_merge_req_fn)
544 e->ops->elevator_merge_req_fn(q, rq, next);
545
546 elv_rqhash_reposition(q, rq);
547 elv_rqhash_del(q, next);
548
549 q->nr_sorted--;
550 q->last_merge = rq;
551 }
552
553 void elv_requeue_request(struct request_queue *q, struct request *rq)
554 {
555 /*
556 * it already went through dequeue, we need to decrement the
557 * in_flight count again
558 */
559 if (blk_account_rq(rq)) {
560 q->in_flight--;
561 if (blk_sorted_rq(rq))
562 elv_deactivate_rq(q, rq);
563 }
564
565 rq->cmd_flags &= ~REQ_STARTED;
566
567 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
568 }
569
570 static void elv_drain_elevator(struct request_queue *q)
571 {
572 static int printed;
573 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
574 ;
575 if (q->nr_sorted == 0)
576 return;
577 if (printed++ < 10) {
578 printk(KERN_ERR "%s: forced dispatching is broken "
579 "(nr_sorted=%u), please report this\n",
580 q->elevator->elevator_type->elevator_name, q->nr_sorted);
581 }
582 }
583
584 void elv_insert(struct request_queue *q, struct request *rq, int where)
585 {
586 struct list_head *pos;
587 unsigned ordseq;
588 int unplug_it = 1;
589
590 trace_block_rq_insert(q, rq);
591
592 rq->q = q;
593
594 switch (where) {
595 case ELEVATOR_INSERT_FRONT:
596 rq->cmd_flags |= REQ_SOFTBARRIER;
597
598 list_add(&rq->queuelist, &q->queue_head);
599 break;
600
601 case ELEVATOR_INSERT_BACK:
602 rq->cmd_flags |= REQ_SOFTBARRIER;
603 elv_drain_elevator(q);
604 list_add_tail(&rq->queuelist, &q->queue_head);
605 /*
606 * We kick the queue here for the following reasons.
607 * - The elevator might have returned NULL previously
608 * to delay requests and returned them now. As the
609 * queue wasn't empty before this request, ll_rw_blk
610 * won't run the queue on return, resulting in hang.
611 * - Usually, back inserted requests won't be merged
612 * with anything. There's no point in delaying queue
613 * processing.
614 */
615 blk_remove_plug(q);
616 blk_start_queueing(q);
617 break;
618
619 case ELEVATOR_INSERT_SORT:
620 BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
621 rq->cmd_flags |= REQ_SORTED;
622 q->nr_sorted++;
623 if (rq_mergeable(rq)) {
624 elv_rqhash_add(q, rq);
625 if (!q->last_merge)
626 q->last_merge = rq;
627 }
628
629 /*
630 * Some ioscheds (cfq) run q->request_fn directly, so
631 * rq cannot be accessed after calling
632 * elevator_add_req_fn.
633 */
634 q->elevator->ops->elevator_add_req_fn(q, rq);
635 break;
636
637 case ELEVATOR_INSERT_REQUEUE:
638 /*
639 * If ordered flush isn't in progress, we do front
640 * insertion; otherwise, requests should be requeued
641 * in ordseq order.
642 */
643 rq->cmd_flags |= REQ_SOFTBARRIER;
644
645 /*
646 * Most requeues happen because of a busy condition,
647 * don't force unplug of the queue for that case.
648 */
649 unplug_it = 0;
650
651 if (q->ordseq == 0) {
652 list_add(&rq->queuelist, &q->queue_head);
653 break;
654 }
655
656 ordseq = blk_ordered_req_seq(rq);
657
658 list_for_each(pos, &q->queue_head) {
659 struct request *pos_rq = list_entry_rq(pos);
660 if (ordseq <= blk_ordered_req_seq(pos_rq))
661 break;
662 }
663
664 list_add_tail(&rq->queuelist, pos);
665 break;
666
667 default:
668 printk(KERN_ERR "%s: bad insertion point %d\n",
669 __func__, where);
670 BUG();
671 }
672
673 if (unplug_it && blk_queue_plugged(q)) {
674 int nrq = q->rq.count[READ] + q->rq.count[WRITE]
675 - q->in_flight;
676
677 if (nrq >= q->unplug_thresh)
678 __generic_unplug_device(q);
679 }
680 }
681
682 void __elv_add_request(struct request_queue *q, struct request *rq, int where,
683 int plug)
684 {
685 if (q->ordcolor)
686 rq->cmd_flags |= REQ_ORDERED_COLOR;
687
688 if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
689 /*
690 * toggle ordered color
691 */
692 if (blk_barrier_rq(rq))
693 q->ordcolor ^= 1;
694
695 /*
696 * barriers implicitly indicate back insertion
697 */
698 if (where == ELEVATOR_INSERT_SORT)
699 where = ELEVATOR_INSERT_BACK;
700
701 /*
702 * this request is scheduling boundary, update
703 * end_sector
704 */
705 if (blk_fs_request(rq) || blk_discard_rq(rq)) {
706 q->end_sector = rq_end_sector(rq);
707 q->boundary_rq = rq;
708 }
709 } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
710 where == ELEVATOR_INSERT_SORT)
711 where = ELEVATOR_INSERT_BACK;
712
713 if (plug)
714 blk_plug_device(q);
715
716 elv_insert(q, rq, where);
717 }
718 EXPORT_SYMBOL(__elv_add_request);
719
720 void elv_add_request(struct request_queue *q, struct request *rq, int where,
721 int plug)
722 {
723 unsigned long flags;
724
725 spin_lock_irqsave(q->queue_lock, flags);
726 __elv_add_request(q, rq, where, plug);
727 spin_unlock_irqrestore(q->queue_lock, flags);
728 }
729 EXPORT_SYMBOL(elv_add_request);
730
731 static inline struct request *__elv_next_request(struct request_queue *q)
732 {
733 struct request *rq;
734
735 while (1) {
736 while (!list_empty(&q->queue_head)) {
737 rq = list_entry_rq(q->queue_head.next);
738 if (blk_do_ordered(q, &rq))
739 return rq;
740 }
741
742 if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
743 return NULL;
744 }
745 }
746
747 struct request *elv_next_request(struct request_queue *q)
748 {
749 struct request *rq;
750 int ret;
751
752 while ((rq = __elv_next_request(q)) != NULL) {
753 /*
754 * Kill the empty barrier place holder, the driver must
755 * not ever see it.
756 */
757 if (blk_empty_barrier(rq)) {
758 __blk_end_request(rq, 0, blk_rq_bytes(rq));
759 continue;
760 }
761 if (!(rq->cmd_flags & REQ_STARTED)) {
762 /*
763 * This is the first time the device driver
764 * sees this request (possibly after
765 * requeueing). Notify IO scheduler.
766 */
767 if (blk_sorted_rq(rq))
768 elv_activate_rq(q, rq);
769
770 /*
771 * just mark as started even if we don't start
772 * it, a request that has been delayed should
773 * not be passed by new incoming requests
774 */
775 rq->cmd_flags |= REQ_STARTED;
776 trace_block_rq_issue(q, rq);
777 }
778
779 if (!q->boundary_rq || q->boundary_rq == rq) {
780 q->end_sector = rq_end_sector(rq);
781 q->boundary_rq = NULL;
782 }
783
784 if (rq->cmd_flags & REQ_DONTPREP)
785 break;
786
787 if (q->dma_drain_size && rq->data_len) {
788 /*
789 * make sure space for the drain appears we
790 * know we can do this because max_hw_segments
791 * has been adjusted to be one fewer than the
792 * device can handle
793 */
794 rq->nr_phys_segments++;
795 }
796
797 if (!q->prep_rq_fn)
798 break;
799
800 ret = q->prep_rq_fn(q, rq);
801 if (ret == BLKPREP_OK) {
802 break;
803 } else if (ret == BLKPREP_DEFER) {
804 /*
805 * the request may have been (partially) prepped.
806 * we need to keep this request in the front to
807 * avoid resource deadlock. REQ_STARTED will
808 * prevent other fs requests from passing this one.
809 */
810 if (q->dma_drain_size && rq->data_len &&
811 !(rq->cmd_flags & REQ_DONTPREP)) {
812 /*
813 * remove the space for the drain we added
814 * so that we don't add it again
815 */
816 --rq->nr_phys_segments;
817 }
818
819 rq = NULL;
820 break;
821 } else if (ret == BLKPREP_KILL) {
822 rq->cmd_flags |= REQ_QUIET;
823 __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
824 } else {
825 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
826 break;
827 }
828 }
829
830 return rq;
831 }
832 EXPORT_SYMBOL(elv_next_request);
833
834 void elv_dequeue_request(struct request_queue *q, struct request *rq)
835 {
836 BUG_ON(list_empty(&rq->queuelist));
837 BUG_ON(ELV_ON_HASH(rq));
838
839 list_del_init(&rq->queuelist);
840
841 /*
842 * the time frame between a request being removed from the lists
843 * and to it is freed is accounted as io that is in progress at
844 * the driver side.
845 */
846 if (blk_account_rq(rq))
847 q->in_flight++;
848
849 /*
850 * We are now handing the request to the hardware, add the
851 * timeout handler.
852 */
853 blk_add_timer(rq);
854 }
855 EXPORT_SYMBOL(elv_dequeue_request);
856
857 int elv_queue_empty(struct request_queue *q)
858 {
859 elevator_t *e = q->elevator;
860
861 if (!list_empty(&q->queue_head))
862 return 0;
863
864 if (e->ops->elevator_queue_empty_fn)
865 return e->ops->elevator_queue_empty_fn(q);
866
867 return 1;
868 }
869 EXPORT_SYMBOL(elv_queue_empty);
870
871 struct request *elv_latter_request(struct request_queue *q, struct request *rq)
872 {
873 elevator_t *e = q->elevator;
874
875 if (e->ops->elevator_latter_req_fn)
876 return e->ops->elevator_latter_req_fn(q, rq);
877 return NULL;
878 }
879
880 struct request *elv_former_request(struct request_queue *q, struct request *rq)
881 {
882 elevator_t *e = q->elevator;
883
884 if (e->ops->elevator_former_req_fn)
885 return e->ops->elevator_former_req_fn(q, rq);
886 return NULL;
887 }
888
889 int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
890 {
891 elevator_t *e = q->elevator;
892
893 if (e->ops->elevator_set_req_fn)
894 return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
895
896 rq->elevator_private = NULL;
897 return 0;
898 }
899
900 void elv_put_request(struct request_queue *q, struct request *rq)
901 {
902 elevator_t *e = q->elevator;
903
904 if (e->ops->elevator_put_req_fn)
905 e->ops->elevator_put_req_fn(rq);
906 }
907
908 int elv_may_queue(struct request_queue *q, int rw)
909 {
910 elevator_t *e = q->elevator;
911
912 if (e->ops->elevator_may_queue_fn)
913 return e->ops->elevator_may_queue_fn(q, rw);
914
915 return ELV_MQUEUE_MAY;
916 }
917
918 void elv_abort_queue(struct request_queue *q)
919 {
920 struct request *rq;
921
922 while (!list_empty(&q->queue_head)) {
923 rq = list_entry_rq(q->queue_head.next);
924 rq->cmd_flags |= REQ_QUIET;
925 trace_block_rq_abort(q, rq);
926 __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
927 }
928 }
929 EXPORT_SYMBOL(elv_abort_queue);
930
931 void elv_completed_request(struct request_queue *q, struct request *rq)
932 {
933 elevator_t *e = q->elevator;
934
935 /*
936 * request is released from the driver, io must be done
937 */
938 if (blk_account_rq(rq)) {
939 q->in_flight--;
940 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
941 e->ops->elevator_completed_req_fn(q, rq);
942 }
943
944 /*
945 * Check if the queue is waiting for fs requests to be
946 * drained for flush sequence.
947 */
948 if (unlikely(q->ordseq)) {
949 struct request *first_rq = list_entry_rq(q->queue_head.next);
950 if (q->in_flight == 0 &&
951 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
952 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
953 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
954 blk_start_queueing(q);
955 }
956 }
957 }
958
959 #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
960
961 static ssize_t
962 elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
963 {
964 elevator_t *e = container_of(kobj, elevator_t, kobj);
965 struct elv_fs_entry *entry = to_elv(attr);
966 ssize_t error;
967
968 if (!entry->show)
969 return -EIO;
970
971 mutex_lock(&e->sysfs_lock);
972 error = e->ops ? entry->show(e, page) : -ENOENT;
973 mutex_unlock(&e->sysfs_lock);
974 return error;
975 }
976
977 static ssize_t
978 elv_attr_store(struct kobject *kobj, struct attribute *attr,
979 const char *page, size_t length)
980 {
981 elevator_t *e = container_of(kobj, elevator_t, kobj);
982 struct elv_fs_entry *entry = to_elv(attr);
983 ssize_t error;
984
985 if (!entry->store)
986 return -EIO;
987
988 mutex_lock(&e->sysfs_lock);
989 error = e->ops ? entry->store(e, page, length) : -ENOENT;
990 mutex_unlock(&e->sysfs_lock);
991 return error;
992 }
993
994 static struct sysfs_ops elv_sysfs_ops = {
995 .show = elv_attr_show,
996 .store = elv_attr_store,
997 };
998
999 static struct kobj_type elv_ktype = {
1000 .sysfs_ops = &elv_sysfs_ops,
1001 .release = elevator_release,
1002 };
1003
1004 int elv_register_queue(struct request_queue *q)
1005 {
1006 elevator_t *e = q->elevator;
1007 int error;
1008
1009 error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
1010 if (!error) {
1011 struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
1012 if (attr) {
1013 while (attr->attr.name) {
1014 if (sysfs_create_file(&e->kobj, &attr->attr))
1015 break;
1016 attr++;
1017 }
1018 }
1019 kobject_uevent(&e->kobj, KOBJ_ADD);
1020 }
1021 return error;
1022 }
1023
1024 static void __elv_unregister_queue(elevator_t *e)
1025 {
1026 kobject_uevent(&e->kobj, KOBJ_REMOVE);
1027 kobject_del(&e->kobj);
1028 }
1029
1030 void elv_unregister_queue(struct request_queue *q)
1031 {
1032 if (q)
1033 __elv_unregister_queue(q->elevator);
1034 }
1035
1036 void elv_register(struct elevator_type *e)
1037 {
1038 char *def = "";
1039
1040 spin_lock(&elv_list_lock);
1041 BUG_ON(elevator_find(e->elevator_name));
1042 list_add_tail(&e->list, &elv_list);
1043 spin_unlock(&elv_list_lock);
1044
1045 if (!strcmp(e->elevator_name, chosen_elevator) ||
1046 (!*chosen_elevator &&
1047 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
1048 def = " (default)";
1049
1050 printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
1051 def);
1052 }
1053 EXPORT_SYMBOL_GPL(elv_register);
1054
1055 void elv_unregister(struct elevator_type *e)
1056 {
1057 struct task_struct *g, *p;
1058
1059 /*
1060 * Iterate every thread in the process to remove the io contexts.
1061 */
1062 if (e->ops.trim) {
1063 read_lock(&tasklist_lock);
1064 do_each_thread(g, p) {
1065 task_lock(p);
1066 if (p->io_context)
1067 e->ops.trim(p->io_context);
1068 task_unlock(p);
1069 } while_each_thread(g, p);
1070 read_unlock(&tasklist_lock);
1071 }
1072
1073 spin_lock(&elv_list_lock);
1074 list_del_init(&e->list);
1075 spin_unlock(&elv_list_lock);
1076 }
1077 EXPORT_SYMBOL_GPL(elv_unregister);
1078
1079 /*
1080 * switch to new_e io scheduler. be careful not to introduce deadlocks -
1081 * we don't free the old io scheduler, before we have allocated what we
1082 * need for the new one. this way we have a chance of going back to the old
1083 * one, if the new one fails init for some reason.
1084 */
1085 static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1086 {
1087 elevator_t *old_elevator, *e;
1088 void *data;
1089
1090 /*
1091 * Allocate new elevator
1092 */
1093 e = elevator_alloc(q, new_e);
1094 if (!e)
1095 return 0;
1096
1097 data = elevator_init_queue(q, e);
1098 if (!data) {
1099 kobject_put(&e->kobj);
1100 return 0;
1101 }
1102
1103 /*
1104 * Turn on BYPASS and drain all requests w/ elevator private data
1105 */
1106 spin_lock_irq(q->queue_lock);
1107
1108 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
1109
1110 elv_drain_elevator(q);
1111
1112 while (q->rq.elvpriv) {
1113 blk_start_queueing(q);
1114 spin_unlock_irq(q->queue_lock);
1115 msleep(10);
1116 spin_lock_irq(q->queue_lock);
1117 elv_drain_elevator(q);
1118 }
1119
1120 /*
1121 * Remember old elevator.
1122 */
1123 old_elevator = q->elevator;
1124
1125 /*
1126 * attach and start new elevator
1127 */
1128 elevator_attach(q, e, data);
1129
1130 spin_unlock_irq(q->queue_lock);
1131
1132 __elv_unregister_queue(old_elevator);
1133
1134 if (elv_register_queue(q))
1135 goto fail_register;
1136
1137 /*
1138 * finally exit old elevator and turn off BYPASS.
1139 */
1140 elevator_exit(old_elevator);
1141 spin_lock_irq(q->queue_lock);
1142 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
1143 spin_unlock_irq(q->queue_lock);
1144
1145 blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
1146
1147 return 1;
1148
1149 fail_register:
1150 /*
1151 * switch failed, exit the new io scheduler and reattach the old
1152 * one again (along with re-adding the sysfs dir)
1153 */
1154 elevator_exit(e);
1155 q->elevator = old_elevator;
1156 elv_register_queue(q);
1157
1158 spin_lock_irq(q->queue_lock);
1159 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
1160 spin_unlock_irq(q->queue_lock);
1161
1162 return 0;
1163 }
1164
1165 ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1166 size_t count)
1167 {
1168 char elevator_name[ELV_NAME_MAX];
1169 struct elevator_type *e;
1170
1171 strlcpy(elevator_name, name, sizeof(elevator_name));
1172 strstrip(elevator_name);
1173
1174 e = elevator_get(elevator_name);
1175 if (!e) {
1176 printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
1177 return -EINVAL;
1178 }
1179
1180 if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
1181 elevator_put(e);
1182 return count;
1183 }
1184
1185 if (!elevator_switch(q, e))
1186 printk(KERN_ERR "elevator: switch to %s failed\n",
1187 elevator_name);
1188 return count;
1189 }
1190
1191 ssize_t elv_iosched_show(struct request_queue *q, char *name)
1192 {
1193 elevator_t *e = q->elevator;
1194 struct elevator_type *elv = e->elevator_type;
1195 struct elevator_type *__e;
1196 int len = 0;
1197
1198 spin_lock(&elv_list_lock);
1199 list_for_each_entry(__e, &elv_list, list) {
1200 if (!strcmp(elv->elevator_name, __e->elevator_name))
1201 len += sprintf(name+len, "[%s] ", elv->elevator_name);
1202 else
1203 len += sprintf(name+len, "%s ", __e->elevator_name);
1204 }
1205 spin_unlock(&elv_list_lock);
1206
1207 len += sprintf(len+name, "\n");
1208 return len;
1209 }
1210
1211 struct request *elv_rb_former_request(struct request_queue *q,
1212 struct request *rq)
1213 {
1214 struct rb_node *rbprev = rb_prev(&rq->rb_node);
1215
1216 if (rbprev)
1217 return rb_entry_rq(rbprev);
1218
1219 return NULL;
1220 }
1221 EXPORT_SYMBOL(elv_rb_former_request);
1222
1223 struct request *elv_rb_latter_request(struct request_queue *q,
1224 struct request *rq)
1225 {
1226 struct rb_node *rbnext = rb_next(&rq->rb_node);
1227
1228 if (rbnext)
1229 return rb_entry_rq(rbnext);
1230
1231 return NULL;
1232 }
1233 EXPORT_SYMBOL(elv_rb_latter_request);
This page took 0.094013 seconds and 6 git commands to generate.