Merge git://oss.sgi.com:8090/xfs-2.6
[deliverable/linux.git] / block / deadline-iosched.c
1 /*
2 * Deadline i/o scheduler.
3 *
4 * Copyright (C) 2002 Jens Axboe <axboe@suse.de>
5 */
6 #include <linux/kernel.h>
7 #include <linux/fs.h>
8 #include <linux/blkdev.h>
9 #include <linux/elevator.h>
10 #include <linux/bio.h>
11 #include <linux/config.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/init.h>
15 #include <linux/compiler.h>
16 #include <linux/hash.h>
17 #include <linux/rbtree.h>
18
19 /*
20 * See Documentation/block/deadline-iosched.txt
21 */
22 static const int read_expire = HZ / 2; /* max time before a read is submitted. */
23 static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
24 static const int writes_starved = 2; /* max times reads can starve a write */
25 static const int fifo_batch = 16; /* # of sequential requests treated as one
26 by the above parameters. For throughput. */
27
28 static const int deadline_hash_shift = 5;
29 #define DL_HASH_BLOCK(sec) ((sec) >> 3)
30 #define DL_HASH_FN(sec) (hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
31 #define DL_HASH_ENTRIES (1 << deadline_hash_shift)
32 #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
33 #define list_entry_hash(ptr) list_entry((ptr), struct deadline_rq, hash)
34 #define ON_HASH(drq) (drq)->on_hash
35
36 struct deadline_data {
37 /*
38 * run time data
39 */
40
41 /*
42 * requests (deadline_rq s) are present on both sort_list and fifo_list
43 */
44 struct rb_root sort_list[2];
45 struct list_head fifo_list[2];
46
47 /*
48 * next in sort order. read, write or both are NULL
49 */
50 struct deadline_rq *next_drq[2];
51 struct list_head *hash; /* request hash */
52 unsigned int batching; /* number of sequential requests made */
53 sector_t last_sector; /* head position */
54 unsigned int starved; /* times reads have starved writes */
55
56 /*
57 * settings that change how the i/o scheduler behaves
58 */
59 int fifo_expire[2];
60 int fifo_batch;
61 int writes_starved;
62 int front_merges;
63
64 mempool_t *drq_pool;
65 };
66
67 /*
68 * pre-request data.
69 */
70 struct deadline_rq {
71 /*
72 * rbtree index, key is the starting offset
73 */
74 struct rb_node rb_node;
75 sector_t rb_key;
76
77 struct request *request;
78
79 /*
80 * request hash, key is the ending offset (for back merge lookup)
81 */
82 struct list_head hash;
83 char on_hash;
84
85 /*
86 * expire fifo
87 */
88 struct list_head fifo;
89 unsigned long expires;
90 };
91
92 static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq);
93
94 static kmem_cache_t *drq_pool;
95
96 #define RQ_DATA(rq) ((struct deadline_rq *) (rq)->elevator_private)
97
98 /*
99 * the back merge hash support functions
100 */
101 static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
102 {
103 drq->on_hash = 0;
104 list_del_init(&drq->hash);
105 }
106
107 static inline void deadline_del_drq_hash(struct deadline_rq *drq)
108 {
109 if (ON_HASH(drq))
110 __deadline_del_drq_hash(drq);
111 }
112
113 static inline void
114 deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
115 {
116 struct request *rq = drq->request;
117
118 BUG_ON(ON_HASH(drq));
119
120 drq->on_hash = 1;
121 list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
122 }
123
124 /*
125 * move hot entry to front of chain
126 */
127 static inline void
128 deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
129 {
130 struct request *rq = drq->request;
131 struct list_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
132
133 if (ON_HASH(drq) && drq->hash.prev != head) {
134 list_del(&drq->hash);
135 list_add(&drq->hash, head);
136 }
137 }
138
139 static struct request *
140 deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
141 {
142 struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
143 struct list_head *entry, *next = hash_list->next;
144
145 while ((entry = next) != hash_list) {
146 struct deadline_rq *drq = list_entry_hash(entry);
147 struct request *__rq = drq->request;
148
149 next = entry->next;
150
151 BUG_ON(!ON_HASH(drq));
152
153 if (!rq_mergeable(__rq)) {
154 __deadline_del_drq_hash(drq);
155 continue;
156 }
157
158 if (rq_hash_key(__rq) == offset)
159 return __rq;
160 }
161
162 return NULL;
163 }
164
165 /*
166 * rb tree support functions
167 */
168 #define RB_EMPTY(root) ((root)->rb_node == NULL)
169 #define ON_RB(node) (rb_parent(node) != node)
170 #define RB_CLEAR(node) (rb_set_parent(node, node))
171 #define rb_entry_drq(node) rb_entry((node), struct deadline_rq, rb_node)
172 #define DRQ_RB_ROOT(dd, drq) (&(dd)->sort_list[rq_data_dir((drq)->request)])
173 #define rq_rb_key(rq) (rq)->sector
174
175 static struct deadline_rq *
176 __deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
177 {
178 struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node;
179 struct rb_node *parent = NULL;
180 struct deadline_rq *__drq;
181
182 while (*p) {
183 parent = *p;
184 __drq = rb_entry_drq(parent);
185
186 if (drq->rb_key < __drq->rb_key)
187 p = &(*p)->rb_left;
188 else if (drq->rb_key > __drq->rb_key)
189 p = &(*p)->rb_right;
190 else
191 return __drq;
192 }
193
194 rb_link_node(&drq->rb_node, parent, p);
195 return NULL;
196 }
197
198 static void
199 deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
200 {
201 struct deadline_rq *__alias;
202
203 drq->rb_key = rq_rb_key(drq->request);
204
205 retry:
206 __alias = __deadline_add_drq_rb(dd, drq);
207 if (!__alias) {
208 rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
209 return;
210 }
211
212 deadline_move_request(dd, __alias);
213 goto retry;
214 }
215
216 static inline void
217 deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
218 {
219 const int data_dir = rq_data_dir(drq->request);
220
221 if (dd->next_drq[data_dir] == drq) {
222 struct rb_node *rbnext = rb_next(&drq->rb_node);
223
224 dd->next_drq[data_dir] = NULL;
225 if (rbnext)
226 dd->next_drq[data_dir] = rb_entry_drq(rbnext);
227 }
228
229 BUG_ON(!ON_RB(&drq->rb_node));
230 rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
231 RB_CLEAR(&drq->rb_node);
232 }
233
234 static struct request *
235 deadline_find_drq_rb(struct deadline_data *dd, sector_t sector, int data_dir)
236 {
237 struct rb_node *n = dd->sort_list[data_dir].rb_node;
238 struct deadline_rq *drq;
239
240 while (n) {
241 drq = rb_entry_drq(n);
242
243 if (sector < drq->rb_key)
244 n = n->rb_left;
245 else if (sector > drq->rb_key)
246 n = n->rb_right;
247 else
248 return drq->request;
249 }
250
251 return NULL;
252 }
253
254 /*
255 * deadline_find_first_drq finds the first (lowest sector numbered) request
256 * for the specified data_dir. Used to sweep back to the start of the disk
257 * (1-way elevator) after we process the last (highest sector) request.
258 */
259 static struct deadline_rq *
260 deadline_find_first_drq(struct deadline_data *dd, int data_dir)
261 {
262 struct rb_node *n = dd->sort_list[data_dir].rb_node;
263
264 for (;;) {
265 if (n->rb_left == NULL)
266 return rb_entry_drq(n);
267
268 n = n->rb_left;
269 }
270 }
271
272 /*
273 * add drq to rbtree and fifo
274 */
275 static void
276 deadline_add_request(struct request_queue *q, struct request *rq)
277 {
278 struct deadline_data *dd = q->elevator->elevator_data;
279 struct deadline_rq *drq = RQ_DATA(rq);
280
281 const int data_dir = rq_data_dir(drq->request);
282
283 deadline_add_drq_rb(dd, drq);
284 /*
285 * set expire time (only used for reads) and add to fifo list
286 */
287 drq->expires = jiffies + dd->fifo_expire[data_dir];
288 list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
289
290 if (rq_mergeable(rq))
291 deadline_add_drq_hash(dd, drq);
292 }
293
294 /*
295 * remove rq from rbtree, fifo, and hash
296 */
297 static void deadline_remove_request(request_queue_t *q, struct request *rq)
298 {
299 struct deadline_rq *drq = RQ_DATA(rq);
300 struct deadline_data *dd = q->elevator->elevator_data;
301
302 list_del_init(&drq->fifo);
303 deadline_del_drq_rb(dd, drq);
304 deadline_del_drq_hash(drq);
305 }
306
307 static int
308 deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
309 {
310 struct deadline_data *dd = q->elevator->elevator_data;
311 struct request *__rq;
312 int ret;
313
314 /*
315 * see if the merge hash can satisfy a back merge
316 */
317 __rq = deadline_find_drq_hash(dd, bio->bi_sector);
318 if (__rq) {
319 BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
320
321 if (elv_rq_merge_ok(__rq, bio)) {
322 ret = ELEVATOR_BACK_MERGE;
323 goto out;
324 }
325 }
326
327 /*
328 * check for front merge
329 */
330 if (dd->front_merges) {
331 sector_t rb_key = bio->bi_sector + bio_sectors(bio);
332
333 __rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio));
334 if (__rq) {
335 BUG_ON(rb_key != rq_rb_key(__rq));
336
337 if (elv_rq_merge_ok(__rq, bio)) {
338 ret = ELEVATOR_FRONT_MERGE;
339 goto out;
340 }
341 }
342 }
343
344 return ELEVATOR_NO_MERGE;
345 out:
346 if (ret)
347 deadline_hot_drq_hash(dd, RQ_DATA(__rq));
348 *req = __rq;
349 return ret;
350 }
351
352 static void deadline_merged_request(request_queue_t *q, struct request *req)
353 {
354 struct deadline_data *dd = q->elevator->elevator_data;
355 struct deadline_rq *drq = RQ_DATA(req);
356
357 /*
358 * hash always needs to be repositioned, key is end sector
359 */
360 deadline_del_drq_hash(drq);
361 deadline_add_drq_hash(dd, drq);
362
363 /*
364 * if the merge was a front merge, we need to reposition request
365 */
366 if (rq_rb_key(req) != drq->rb_key) {
367 deadline_del_drq_rb(dd, drq);
368 deadline_add_drq_rb(dd, drq);
369 }
370 }
371
372 static void
373 deadline_merged_requests(request_queue_t *q, struct request *req,
374 struct request *next)
375 {
376 struct deadline_data *dd = q->elevator->elevator_data;
377 struct deadline_rq *drq = RQ_DATA(req);
378 struct deadline_rq *dnext = RQ_DATA(next);
379
380 BUG_ON(!drq);
381 BUG_ON(!dnext);
382
383 /*
384 * reposition drq (this is the merged request) in hash, and in rbtree
385 * in case of a front merge
386 */
387 deadline_del_drq_hash(drq);
388 deadline_add_drq_hash(dd, drq);
389
390 if (rq_rb_key(req) != drq->rb_key) {
391 deadline_del_drq_rb(dd, drq);
392 deadline_add_drq_rb(dd, drq);
393 }
394
395 /*
396 * if dnext expires before drq, assign its expire time to drq
397 * and move into dnext position (dnext will be deleted) in fifo
398 */
399 if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) {
400 if (time_before(dnext->expires, drq->expires)) {
401 list_move(&drq->fifo, &dnext->fifo);
402 drq->expires = dnext->expires;
403 }
404 }
405
406 /*
407 * kill knowledge of next, this one is a goner
408 */
409 deadline_remove_request(q, next);
410 }
411
412 /*
413 * move request from sort list to dispatch queue.
414 */
415 static inline void
416 deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
417 {
418 request_queue_t *q = drq->request->q;
419
420 deadline_remove_request(q, drq->request);
421 elv_dispatch_add_tail(q, drq->request);
422 }
423
424 /*
425 * move an entry to dispatch queue
426 */
427 static void
428 deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq)
429 {
430 const int data_dir = rq_data_dir(drq->request);
431 struct rb_node *rbnext = rb_next(&drq->rb_node);
432
433 dd->next_drq[READ] = NULL;
434 dd->next_drq[WRITE] = NULL;
435
436 if (rbnext)
437 dd->next_drq[data_dir] = rb_entry_drq(rbnext);
438
439 dd->last_sector = drq->request->sector + drq->request->nr_sectors;
440
441 /*
442 * take it off the sort and fifo list, move
443 * to dispatch queue
444 */
445 deadline_move_to_dispatch(dd, drq);
446 }
447
448 #define list_entry_fifo(ptr) list_entry((ptr), struct deadline_rq, fifo)
449
450 /*
451 * deadline_check_fifo returns 0 if there are no expired reads on the fifo,
452 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
453 */
454 static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
455 {
456 struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next);
457
458 /*
459 * drq is expired!
460 */
461 if (time_after(jiffies, drq->expires))
462 return 1;
463
464 return 0;
465 }
466
467 /*
468 * deadline_dispatch_requests selects the best request according to
469 * read/write expire, fifo_batch, etc
470 */
471 static int deadline_dispatch_requests(request_queue_t *q, int force)
472 {
473 struct deadline_data *dd = q->elevator->elevator_data;
474 const int reads = !list_empty(&dd->fifo_list[READ]);
475 const int writes = !list_empty(&dd->fifo_list[WRITE]);
476 struct deadline_rq *drq;
477 int data_dir;
478
479 /*
480 * batches are currently reads XOR writes
481 */
482 if (dd->next_drq[WRITE])
483 drq = dd->next_drq[WRITE];
484 else
485 drq = dd->next_drq[READ];
486
487 if (drq) {
488 /* we have a "next request" */
489
490 if (dd->last_sector != drq->request->sector)
491 /* end the batch on a non sequential request */
492 dd->batching += dd->fifo_batch;
493
494 if (dd->batching < dd->fifo_batch)
495 /* we are still entitled to batch */
496 goto dispatch_request;
497 }
498
499 /*
500 * at this point we are not running a batch. select the appropriate
501 * data direction (read / write)
502 */
503
504 if (reads) {
505 BUG_ON(RB_EMPTY(&dd->sort_list[READ]));
506
507 if (writes && (dd->starved++ >= dd->writes_starved))
508 goto dispatch_writes;
509
510 data_dir = READ;
511
512 goto dispatch_find_request;
513 }
514
515 /*
516 * there are either no reads or writes have been starved
517 */
518
519 if (writes) {
520 dispatch_writes:
521 BUG_ON(RB_EMPTY(&dd->sort_list[WRITE]));
522
523 dd->starved = 0;
524
525 data_dir = WRITE;
526
527 goto dispatch_find_request;
528 }
529
530 return 0;
531
532 dispatch_find_request:
533 /*
534 * we are not running a batch, find best request for selected data_dir
535 */
536 if (deadline_check_fifo(dd, data_dir)) {
537 /* An expired request exists - satisfy it */
538 dd->batching = 0;
539 drq = list_entry_fifo(dd->fifo_list[data_dir].next);
540
541 } else if (dd->next_drq[data_dir]) {
542 /*
543 * The last req was the same dir and we have a next request in
544 * sort order. No expired requests so continue on from here.
545 */
546 drq = dd->next_drq[data_dir];
547 } else {
548 /*
549 * The last req was the other direction or we have run out of
550 * higher-sectored requests. Go back to the lowest sectored
551 * request (1 way elevator) and start a new batch.
552 */
553 dd->batching = 0;
554 drq = deadline_find_first_drq(dd, data_dir);
555 }
556
557 dispatch_request:
558 /*
559 * drq is the selected appropriate request.
560 */
561 dd->batching++;
562 deadline_move_request(dd, drq);
563
564 return 1;
565 }
566
567 static int deadline_queue_empty(request_queue_t *q)
568 {
569 struct deadline_data *dd = q->elevator->elevator_data;
570
571 return list_empty(&dd->fifo_list[WRITE])
572 && list_empty(&dd->fifo_list[READ]);
573 }
574
575 static struct request *
576 deadline_former_request(request_queue_t *q, struct request *rq)
577 {
578 struct deadline_rq *drq = RQ_DATA(rq);
579 struct rb_node *rbprev = rb_prev(&drq->rb_node);
580
581 if (rbprev)
582 return rb_entry_drq(rbprev)->request;
583
584 return NULL;
585 }
586
587 static struct request *
588 deadline_latter_request(request_queue_t *q, struct request *rq)
589 {
590 struct deadline_rq *drq = RQ_DATA(rq);
591 struct rb_node *rbnext = rb_next(&drq->rb_node);
592
593 if (rbnext)
594 return rb_entry_drq(rbnext)->request;
595
596 return NULL;
597 }
598
599 static void deadline_exit_queue(elevator_t *e)
600 {
601 struct deadline_data *dd = e->elevator_data;
602
603 BUG_ON(!list_empty(&dd->fifo_list[READ]));
604 BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
605
606 mempool_destroy(dd->drq_pool);
607 kfree(dd->hash);
608 kfree(dd);
609 }
610
611 /*
612 * initialize elevator private data (deadline_data), and alloc a drq for
613 * each request on the free lists
614 */
615 static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
616 {
617 struct deadline_data *dd;
618 int i;
619
620 if (!drq_pool)
621 return NULL;
622
623 dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
624 if (!dd)
625 return NULL;
626 memset(dd, 0, sizeof(*dd));
627
628 dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES,
629 GFP_KERNEL, q->node);
630 if (!dd->hash) {
631 kfree(dd);
632 return NULL;
633 }
634
635 dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
636 mempool_free_slab, drq_pool, q->node);
637 if (!dd->drq_pool) {
638 kfree(dd->hash);
639 kfree(dd);
640 return NULL;
641 }
642
643 for (i = 0; i < DL_HASH_ENTRIES; i++)
644 INIT_LIST_HEAD(&dd->hash[i]);
645
646 INIT_LIST_HEAD(&dd->fifo_list[READ]);
647 INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
648 dd->sort_list[READ] = RB_ROOT;
649 dd->sort_list[WRITE] = RB_ROOT;
650 dd->fifo_expire[READ] = read_expire;
651 dd->fifo_expire[WRITE] = write_expire;
652 dd->writes_starved = writes_starved;
653 dd->front_merges = 1;
654 dd->fifo_batch = fifo_batch;
655 return dd;
656 }
657
658 static void deadline_put_request(request_queue_t *q, struct request *rq)
659 {
660 struct deadline_data *dd = q->elevator->elevator_data;
661 struct deadline_rq *drq = RQ_DATA(rq);
662
663 mempool_free(drq, dd->drq_pool);
664 rq->elevator_private = NULL;
665 }
666
667 static int
668 deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
669 gfp_t gfp_mask)
670 {
671 struct deadline_data *dd = q->elevator->elevator_data;
672 struct deadline_rq *drq;
673
674 drq = mempool_alloc(dd->drq_pool, gfp_mask);
675 if (drq) {
676 memset(drq, 0, sizeof(*drq));
677 RB_CLEAR(&drq->rb_node);
678 drq->request = rq;
679
680 INIT_LIST_HEAD(&drq->hash);
681 drq->on_hash = 0;
682
683 INIT_LIST_HEAD(&drq->fifo);
684
685 rq->elevator_private = drq;
686 return 0;
687 }
688
689 return 1;
690 }
691
692 /*
693 * sysfs parts below
694 */
695
696 static ssize_t
697 deadline_var_show(int var, char *page)
698 {
699 return sprintf(page, "%d\n", var);
700 }
701
702 static ssize_t
703 deadline_var_store(int *var, const char *page, size_t count)
704 {
705 char *p = (char *) page;
706
707 *var = simple_strtol(p, &p, 10);
708 return count;
709 }
710
711 #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
712 static ssize_t __FUNC(elevator_t *e, char *page) \
713 { \
714 struct deadline_data *dd = e->elevator_data; \
715 int __data = __VAR; \
716 if (__CONV) \
717 __data = jiffies_to_msecs(__data); \
718 return deadline_var_show(__data, (page)); \
719 }
720 SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1);
721 SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1);
722 SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0);
723 SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0);
724 SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
725 #undef SHOW_FUNCTION
726
727 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
728 static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
729 { \
730 struct deadline_data *dd = e->elevator_data; \
731 int __data; \
732 int ret = deadline_var_store(&__data, (page), count); \
733 if (__data < (MIN)) \
734 __data = (MIN); \
735 else if (__data > (MAX)) \
736 __data = (MAX); \
737 if (__CONV) \
738 *(__PTR) = msecs_to_jiffies(__data); \
739 else \
740 *(__PTR) = __data; \
741 return ret; \
742 }
743 STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
744 STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
745 STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
746 STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0);
747 STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0);
748 #undef STORE_FUNCTION
749
750 #define DD_ATTR(name) \
751 __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \
752 deadline_##name##_store)
753
754 static struct elv_fs_entry deadline_attrs[] = {
755 DD_ATTR(read_expire),
756 DD_ATTR(write_expire),
757 DD_ATTR(writes_starved),
758 DD_ATTR(front_merges),
759 DD_ATTR(fifo_batch),
760 __ATTR_NULL
761 };
762
763 static struct elevator_type iosched_deadline = {
764 .ops = {
765 .elevator_merge_fn = deadline_merge,
766 .elevator_merged_fn = deadline_merged_request,
767 .elevator_merge_req_fn = deadline_merged_requests,
768 .elevator_dispatch_fn = deadline_dispatch_requests,
769 .elevator_add_req_fn = deadline_add_request,
770 .elevator_queue_empty_fn = deadline_queue_empty,
771 .elevator_former_req_fn = deadline_former_request,
772 .elevator_latter_req_fn = deadline_latter_request,
773 .elevator_set_req_fn = deadline_set_request,
774 .elevator_put_req_fn = deadline_put_request,
775 .elevator_init_fn = deadline_init_queue,
776 .elevator_exit_fn = deadline_exit_queue,
777 },
778
779 .elevator_attrs = deadline_attrs,
780 .elevator_name = "deadline",
781 .elevator_owner = THIS_MODULE,
782 };
783
784 static int __init deadline_init(void)
785 {
786 int ret;
787
788 drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq),
789 0, 0, NULL, NULL);
790
791 if (!drq_pool)
792 return -ENOMEM;
793
794 ret = elv_register(&iosched_deadline);
795 if (ret)
796 kmem_cache_destroy(drq_pool);
797
798 return ret;
799 }
800
801 static void __exit deadline_exit(void)
802 {
803 kmem_cache_destroy(drq_pool);
804 elv_unregister(&iosched_deadline);
805 }
806
807 module_init(deadline_init);
808 module_exit(deadline_exit);
809
810 MODULE_AUTHOR("Jens Axboe");
811 MODULE_LICENSE("GPL");
812 MODULE_DESCRIPTION("deadline IO scheduler");
This page took 0.048578 seconds and 5 git commands to generate.