1 /******************************************************************************
3 * Back-end of the driver for virtual block devices. This portion of the
4 * driver exports a 'unified' block-device interface that can be accessed
5 * by any operating system that implements a compatible front end. A
6 * reference front-end implementation can be found in:
7 * drivers/block/xen-blkfront.c
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Copyright (c) 2005, Christopher Clark
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
37 #include <linux/spinlock.h>
38 #include <linux/kthread.h>
39 #include <linux/list.h>
40 #include <linux/delay.h>
41 #include <linux/freezer.h>
43 #include <xen/events.h>
45 #include <asm/xen/hypervisor.h>
46 #include <asm/xen/hypercall.h>
50 * These are rather arbitrary. They are fairly large because adjacent requests
51 * pulled from a communication ring are quite likely to end up being part of
52 * the same scatter/gather request at the disc.
54 * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
56 * This will increase the chances of being able to write whole tracks.
57 * 64 should be enough to keep us competitive with Linux.
59 static int xen_blkif_reqs
= 64;
60 module_param_named(reqs
, xen_blkif_reqs
, int, 0);
61 MODULE_PARM_DESC(reqs
, "Number of blkback requests to allocate");
63 /* Run-time switchable: /sys/module/blkback/parameters/ */
64 static unsigned int log_stats
;
65 static unsigned int debug_lvl
;
66 module_param(log_stats
, int, 0644);
67 module_param(debug_lvl
, int, 0644);
70 * Each outstanding request that we've passed to the lower device layers has a
71 * 'pending_req' allocated to it. Each buffer_head that completes decrements
72 * the pendcnt towards zero. When it hits zero, the specified domain has a
73 * response queued for it, with the saved 'id' passed back.
76 struct blkif_st
*blkif
;
80 unsigned short operation
;
82 struct list_head free_list
;
85 #define BLKBACK_INVALID_HANDLE (~0)
88 struct pending_req
*pending_reqs
;
89 /* List of all 'pending_req' available */
90 struct list_head pending_free
;
91 /* And its spinlock. */
92 spinlock_t pending_free_lock
;
93 wait_queue_head_t pending_free_wq
;
94 /* The list of all pages that are available. */
95 struct page
**pending_pages
;
96 /* And the grant handles that are available. */
97 grant_handle_t
*pending_grant_handles
;
100 static struct xen_blkbk
*blkbk
;
103 * Little helpful macro to figure out the index and virtual address of the
104 * pending_pages[..]. For each 'pending_req' we have have up to
105 * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
106 * 10 and would index in the pending_pages[..]. */
107 static inline int vaddr_pagenr(struct pending_req
*req
, int seg
)
109 return (req
- blkbk
->pending_reqs
) *
110 BLKIF_MAX_SEGMENTS_PER_REQUEST
+ seg
;
113 #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
115 static inline unsigned long vaddr(struct pending_req
*req
, int seg
)
117 unsigned long pfn
= page_to_pfn(blkbk
->pending_page(req
, seg
));
118 return (unsigned long)pfn_to_kaddr(pfn
);
121 #define pending_handle(_req, _seg) \
122 (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
125 static int do_block_io_op(struct blkif_st
*blkif
);
126 static void dispatch_rw_block_io(struct blkif_st
*blkif
,
127 struct blkif_request
*req
,
128 struct pending_req
*pending_req
);
129 static void make_response(struct blkif_st
*blkif
, u64 id
,
130 unsigned short op
, int st
);
133 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
135 static struct pending_req
*alloc_req(void)
137 struct pending_req
*req
= NULL
;
140 spin_lock_irqsave(&blkbk
->pending_free_lock
, flags
);
141 if (!list_empty(&blkbk
->pending_free
)) {
142 req
= list_entry(blkbk
->pending_free
.next
, struct pending_req
,
144 list_del(&req
->free_list
);
146 spin_unlock_irqrestore(&blkbk
->pending_free_lock
, flags
);
151 * Return the 'pending_req' structure back to the freepool. We also
152 * wake up the thread if it was waiting for a free page.
154 static void free_req(struct pending_req
*req
)
159 spin_lock_irqsave(&blkbk
->pending_free_lock
, flags
);
160 was_empty
= list_empty(&blkbk
->pending_free
);
161 list_add(&req
->free_list
, &blkbk
->pending_free
);
162 spin_unlock_irqrestore(&blkbk
->pending_free_lock
, flags
);
164 wake_up(&blkbk
->pending_free_wq
);
168 * Routines for managing virtual block devices (vbds).
172 static int vbd_translate(struct phys_req
*req
, struct blkif_st
*blkif
,
175 struct vbd
*vbd
= &blkif
->vbd
;
178 if ((operation
!= READ
) && vbd
->readonly
)
181 if (unlikely((req
->sector_number
+ req
->nr_sects
) > vbd_sz(vbd
)))
184 req
->dev
= vbd
->pdevice
;
185 req
->bdev
= vbd
->bdev
;
192 static void vbd_resize(struct blkif_st
*blkif
)
194 struct vbd
*vbd
= &blkif
->vbd
;
195 struct xenbus_transaction xbt
;
197 struct xenbus_device
*dev
= xen_blkbk_xenbus(blkif
->be
);
198 unsigned long long new_size
= vbd_sz(vbd
);
200 printk(KERN_INFO
"VBD Resize: Domid: %d, Device: (%d, %d)\n",
201 blkif
->domid
, MAJOR(vbd
->pdevice
), MINOR(vbd
->pdevice
));
202 printk(KERN_INFO
"VBD Resize: new size %llu\n", new_size
);
203 vbd
->size
= new_size
;
205 err
= xenbus_transaction_start(&xbt
);
207 printk(KERN_WARNING
"Error starting transaction");
210 err
= xenbus_printf(xbt
, dev
->nodename
, "sectors", "%llu",
211 (unsigned long long)vbd_sz(vbd
));
213 printk(KERN_WARNING
"Error writing new size");
217 * Write the current state; we will use this to synchronize
218 * the front-end. If the current state is "connected" the
219 * front-end will get the new size information online.
221 err
= xenbus_printf(xbt
, dev
->nodename
, "state", "%d", dev
->state
);
223 printk(KERN_WARNING
"Error writing the state");
227 err
= xenbus_transaction_end(xbt
, 0);
231 printk(KERN_WARNING
"Error ending transaction");
233 xenbus_transaction_end(xbt
, 1);
237 * Notification from the guest OS.
239 static void blkif_notify_work(struct blkif_st
*blkif
)
241 blkif
->waiting_reqs
= 1;
245 irqreturn_t
xen_blkif_be_int(int irq
, void *dev_id
)
247 blkif_notify_work(dev_id
);
252 * SCHEDULER FUNCTIONS
255 static void print_stats(struct blkif_st
*blkif
)
257 printk(KERN_DEBUG
"%s: oo %3d | rd %4d | wr %4d | f %4d\n",
258 current
->comm
, blkif
->st_oo_req
,
259 blkif
->st_rd_req
, blkif
->st_wr_req
, blkif
->st_f_req
);
260 blkif
->st_print
= jiffies
+ msecs_to_jiffies(10 * 1000);
261 blkif
->st_rd_req
= 0;
262 blkif
->st_wr_req
= 0;
263 blkif
->st_oo_req
= 0;
266 int xen_blkif_schedule(void *arg
)
268 struct blkif_st
*blkif
= arg
;
269 struct vbd
*vbd
= &blkif
->vbd
;
271 xen_blkif_get(blkif
);
274 printk(KERN_DEBUG
"%s: started\n", current
->comm
);
276 while (!kthread_should_stop()) {
279 if (unlikely(vbd
->size
!= vbd_sz(vbd
)))
282 wait_event_interruptible(
284 blkif
->waiting_reqs
|| kthread_should_stop());
285 wait_event_interruptible(
286 blkbk
->pending_free_wq
,
287 !list_empty(&blkbk
->pending_free
) ||
288 kthread_should_stop());
290 blkif
->waiting_reqs
= 0;
291 smp_mb(); /* clear flag *before* checking for work */
293 if (do_block_io_op(blkif
))
294 blkif
->waiting_reqs
= 1;
296 if (log_stats
&& time_after(jiffies
, blkif
->st_print
))
303 printk(KERN_DEBUG
"%s: exiting\n", current
->comm
);
305 blkif
->xenblkd
= NULL
;
306 xen_blkif_put(blkif
);
316 * Unmap the grant references, and also remove the M2P over-rides
317 * used in the 'pending_req'.
319 static void xen_blkbk_unmap(struct pending_req
*req
)
321 struct gnttab_unmap_grant_ref unmap
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
322 unsigned int i
, invcount
= 0;
323 grant_handle_t handle
;
326 for (i
= 0; i
< req
->nr_pages
; i
++) {
327 handle
= pending_handle(req
, i
);
328 if (handle
== BLKBACK_INVALID_HANDLE
)
330 gnttab_set_unmap_op(&unmap
[invcount
], vaddr(req
, i
),
331 GNTMAP_host_map
, handle
);
332 pending_handle(req
, i
) = BLKBACK_INVALID_HANDLE
;
336 ret
= HYPERVISOR_grant_table_op(
337 GNTTABOP_unmap_grant_ref
, unmap
, invcount
);
339 /* Note, we use invcount, so nr->pages, so we can't index
340 * using vaddr(req, i).
342 for (i
= 0; i
< invcount
; i
++) {
343 ret
= m2p_remove_override(
344 virt_to_page(unmap
[i
].host_addr
), false);
346 printk(KERN_ALERT
"Failed to remove M2P override for " \
347 "%lx\n", (unsigned long)unmap
[i
].host_addr
);
352 static int xen_blkbk_map(struct blkif_request
*req
, struct pending_req
*pending_req
,
353 struct seg_buf seg
[])
355 struct gnttab_map_grant_ref map
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
357 int nseg
= req
->nr_segments
;
359 /* Fill out preq.nr_sects with proper amount of sectors, and setup
360 * assign map[..] with the PFN of the page in our domain with the
361 * corresponding grant reference for each page.
363 for (i
= 0; i
< nseg
; i
++) {
366 flags
= GNTMAP_host_map
;
367 if (pending_req
->operation
!= BLKIF_OP_READ
)
368 flags
|= GNTMAP_readonly
;
369 gnttab_set_map_op(&map
[i
], vaddr(pending_req
, i
), flags
,
370 req
->u
.rw
.seg
[i
].gref
, pending_req
->blkif
->domid
);
373 ret
= HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref
, map
, nseg
);
376 /* Now swizzel the MFN in our domain with the MFN from the other domain
377 * so that when we access vaddr(pending_req,i) it has the contents of
378 * the page from the other domain.
380 for (i
= 0; i
< nseg
; i
++) {
381 if (unlikely(map
[i
].status
!= 0)) {
382 DPRINTK("invalid buffer -- could not remap it\n");
383 map
[i
].handle
= BLKBACK_INVALID_HANDLE
;
387 pending_handle(pending_req
, i
) = map
[i
].handle
;
392 ret
= m2p_add_override(PFN_DOWN(map
[i
].dev_bus_addr
),
393 blkbk
->pending_page(pending_req
, i
), false);
395 printk(KERN_ALERT
"Failed to install M2P override for"\
396 " %lx (ret: %d)\n", (unsigned long)
397 map
[i
].dev_bus_addr
, ret
);
398 /* We could switch over to GNTTABOP_copy */
402 seg
[i
].buf
= map
[i
].dev_bus_addr
|
403 (req
->u
.rw
.seg
[i
].first_sect
<< 9);
409 * Completion callback on the bio's. Called as bh->b_end_io()
412 static void __end_block_io_op(struct pending_req
*pending_req
, int error
)
414 /* An error fails the entire request. */
415 if ((pending_req
->operation
== BLKIF_OP_FLUSH_DISKCACHE
) &&
416 (error
== -EOPNOTSUPP
)) {
417 DPRINTK("blkback: flush diskcache op failed, not supported\n");
418 xen_blkbk_flush_diskcache(XBT_NIL
, pending_req
->blkif
->be
, 0);
419 pending_req
->status
= BLKIF_RSP_EOPNOTSUPP
;
421 DPRINTK("Buffer not up-to-date at end of operation, "
422 "error=%d\n", error
);
423 pending_req
->status
= BLKIF_RSP_ERROR
;
426 /* If all of the bio's have completed it is time to unmap
427 * the grant references associated with 'request' and provide
428 * the proper response on the ring.
430 if (atomic_dec_and_test(&pending_req
->pendcnt
)) {
431 xen_blkbk_unmap(pending_req
);
432 make_response(pending_req
->blkif
, pending_req
->id
,
433 pending_req
->operation
, pending_req
->status
);
434 xen_blkif_put(pending_req
->blkif
);
435 free_req(pending_req
);
442 static void end_block_io_op(struct bio
*bio
, int error
)
444 __end_block_io_op(bio
->bi_private
, error
);
451 * Function to copy the from the ring buffer the 'struct blkif_request'
452 * (which has the sectors we want, number of them, grant references, etc),
453 * and transmute it to the block API to hand it over to the proper block disk.
455 static int do_block_io_op(struct blkif_st
*blkif
)
457 union blkif_back_rings
*blk_rings
= &blkif
->blk_rings
;
458 struct blkif_request req
;
459 struct pending_req
*pending_req
;
463 rc
= blk_rings
->common
.req_cons
;
464 rp
= blk_rings
->common
.sring
->req_prod
;
465 rmb(); /* Ensure we see queued requests up to 'rp'. */
469 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings
->common
, rc
))
472 if (kthread_should_stop()) {
477 pending_req
= alloc_req();
478 if (NULL
== pending_req
) {
484 switch (blkif
->blk_protocol
) {
485 case BLKIF_PROTOCOL_NATIVE
:
486 memcpy(&req
, RING_GET_REQUEST(&blk_rings
->native
, rc
), sizeof(req
));
488 case BLKIF_PROTOCOL_X86_32
:
489 blkif_get_x86_32_req(&req
, RING_GET_REQUEST(&blk_rings
->x86_32
, rc
));
491 case BLKIF_PROTOCOL_X86_64
:
492 blkif_get_x86_64_req(&req
, RING_GET_REQUEST(&blk_rings
->x86_64
, rc
));
497 blk_rings
->common
.req_cons
= ++rc
; /* before make_response() */
499 /* Apply all sanity checks to /private copy/ of request. */
502 switch (req
.operation
) {
505 dispatch_rw_block_io(blkif
, &req
, pending_req
);
507 case BLKIF_OP_FLUSH_DISKCACHE
:
512 dispatch_rw_block_io(blkif
, &req
, pending_req
);
514 case BLKIF_OP_WRITE_BARRIER
:
516 /* A good sign something is wrong: sleep for a while to
517 * avoid excessive CPU consumption by a bad guest. */
519 DPRINTK("error: unknown block io operation [%d]\n",
521 make_response(blkif
, req
.id
, req
.operation
,
523 free_req(pending_req
);
527 /* Yield point for this unbounded loop. */
535 * Transumation of the 'struct blkif_request' to a proper 'struct bio'
536 * and call the 'submit_bio' to pass it to the underlaying storage.
538 static void dispatch_rw_block_io(struct blkif_st
*blkif
,
539 struct blkif_request
*req
,
540 struct pending_req
*pending_req
)
542 struct phys_req preq
;
543 struct seg_buf seg
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
545 struct bio
*bio
= NULL
;
546 struct bio
*biolist
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
549 struct blk_plug plug
;
551 switch (req
->operation
) {
556 operation
= WRITE_ODIRECT
;
558 case BLKIF_OP_FLUSH_DISKCACHE
:
559 operation
= WRITE_FLUSH
;
560 /* The frontend likes to set this to -1, which vbd_translate
562 req
->u
.rw
.sector_number
= 0;
564 case BLKIF_OP_WRITE_BARRIER
:
565 /* Should never get here. */
567 operation
= 0; /* make gcc happy */
571 /* Check that the number of segments is sane. */
572 nseg
= req
->nr_segments
;
573 if (unlikely(nseg
== 0 && operation
!= WRITE_FLUSH
) ||
574 unlikely(nseg
> BLKIF_MAX_SEGMENTS_PER_REQUEST
)) {
575 DPRINTK("Bad number of segments in request (%d)\n", nseg
);
576 /* Haven't submitted any bio's yet. */
580 preq
.dev
= req
->handle
;
581 preq
.sector_number
= req
->u
.rw
.sector_number
;
584 pending_req
->blkif
= blkif
;
585 pending_req
->id
= req
->id
;
586 pending_req
->operation
= req
->operation
;
587 pending_req
->status
= BLKIF_RSP_OKAY
;
588 pending_req
->nr_pages
= nseg
;
590 for (i
= 0; i
< nseg
; i
++) {
591 seg
[i
].nsec
= req
->u
.rw
.seg
[i
].last_sect
-
592 req
->u
.rw
.seg
[i
].first_sect
+ 1;
593 if ((req
->u
.rw
.seg
[i
].last_sect
>= (PAGE_SIZE
>> 9)) ||
594 (req
->u
.rw
.seg
[i
].last_sect
< req
->u
.rw
.seg
[i
].first_sect
))
596 preq
.nr_sects
+= seg
[i
].nsec
;
600 if (vbd_translate(&preq
, blkif
, operation
) != 0) {
601 DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
602 operation
== READ
? "read" : "write",
604 preq
.sector_number
+ preq
.nr_sects
, preq
.dev
);
607 /* This check _MUST_ be done after vbd_translate as the preq.bdev
609 for (i
= 0; i
< nseg
; i
++) {
610 if (((int)preq
.sector_number
|(int)seg
[i
].nsec
) &
611 ((bdev_logical_block_size(preq
.bdev
) >> 9) - 1)) {
612 DPRINTK("Misaligned I/O request from domain %d",
617 /* If we have failed at this point, we need to undo the M2P override,
618 * set gnttab_set_unmap_op on all of the grant references and perform
619 * the hypercall to unmap the grants - that is all done in
622 if (xen_blkbk_map(req
, pending_req
, seg
))
625 /* This corresponding blkif_put is done in __end_block_io_op */
626 xen_blkif_get(blkif
);
628 for (i
= 0; i
< nseg
; i
++) {
629 while ((bio
== NULL
) ||
631 blkbk
->pending_page(pending_req
, i
),
633 seg
[i
].buf
& ~PAGE_MASK
) == 0)) {
635 bio
= biolist
[nbio
++] = bio_alloc(GFP_KERNEL
, nseg
-i
);
636 if (unlikely(bio
== NULL
))
639 bio
->bi_bdev
= preq
.bdev
;
640 bio
->bi_private
= pending_req
;
641 bio
->bi_end_io
= end_block_io_op
;
642 bio
->bi_sector
= preq
.sector_number
;
645 preq
.sector_number
+= seg
[i
].nsec
;
648 /* This will be hit if the operation was a barrier. */
650 BUG_ON(operation
!= WRITE_FLUSH
);
651 bio
= biolist
[nbio
++] = bio_alloc(GFP_KERNEL
, 0);
652 if (unlikely(bio
== NULL
))
655 bio
->bi_bdev
= preq
.bdev
;
656 bio
->bi_private
= pending_req
;
657 bio
->bi_end_io
= end_block_io_op
;
661 /* We set it one so that the last submit_bio does not have to call
664 atomic_set(&pending_req
->pendcnt
, nbio
);
666 /* Get a reference count for the disk queue and start sending I/O */
667 blk_start_plug(&plug
);
669 for (i
= 0; i
< nbio
; i
++)
670 submit_bio(operation
, biolist
[i
]);
672 blk_finish_plug(&plug
);
673 /* Let the I/Os go.. */
675 if (operation
== READ
)
676 blkif
->st_rd_sect
+= preq
.nr_sects
;
677 else if (operation
== WRITE
|| operation
== WRITE_FLUSH
)
678 blkif
->st_wr_sect
+= preq
.nr_sects
;
683 xen_blkbk_unmap(pending_req
);
685 /* Haven't submitted any bio's yet. */
686 make_response(blkif
, req
->id
, req
->operation
, BLKIF_RSP_ERROR
);
687 free_req(pending_req
);
688 msleep(1); /* back off a bit */
692 for (i
= 0; i
< (nbio
-1); i
++)
694 __end_block_io_op(pending_req
, -EINVAL
);
695 msleep(1); /* back off a bit */
702 * Put a response on the ring on how the operation fared.
704 static void make_response(struct blkif_st
*blkif
, u64 id
,
705 unsigned short op
, int st
)
707 struct blkif_response resp
;
709 union blkif_back_rings
*blk_rings
= &blkif
->blk_rings
;
717 spin_lock_irqsave(&blkif
->blk_ring_lock
, flags
);
718 /* Place on the response ring for the relevant domain. */
719 switch (blkif
->blk_protocol
) {
720 case BLKIF_PROTOCOL_NATIVE
:
721 memcpy(RING_GET_RESPONSE(&blk_rings
->native
, blk_rings
->native
.rsp_prod_pvt
),
722 &resp
, sizeof(resp
));
724 case BLKIF_PROTOCOL_X86_32
:
725 memcpy(RING_GET_RESPONSE(&blk_rings
->x86_32
, blk_rings
->x86_32
.rsp_prod_pvt
),
726 &resp
, sizeof(resp
));
728 case BLKIF_PROTOCOL_X86_64
:
729 memcpy(RING_GET_RESPONSE(&blk_rings
->x86_64
, blk_rings
->x86_64
.rsp_prod_pvt
),
730 &resp
, sizeof(resp
));
735 blk_rings
->common
.rsp_prod_pvt
++;
736 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings
->common
, notify
);
737 if (blk_rings
->common
.rsp_prod_pvt
== blk_rings
->common
.req_cons
) {
739 * Tail check for pending requests. Allows frontend to avoid
740 * notifications if requests are already in flight (lower
741 * overheads and promotes batching).
743 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings
->common
, more_to_do
);
745 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings
->common
)) {
749 spin_unlock_irqrestore(&blkif
->blk_ring_lock
, flags
);
752 blkif_notify_work(blkif
);
754 notify_remote_via_irq(blkif
->irq
);
757 static int __init
xen_blkif_init(void)
762 if (!xen_pv_domain())
765 blkbk
= kzalloc(sizeof(struct xen_blkbk
), GFP_KERNEL
);
767 printk(KERN_ALERT
"%s: out of memory!\n", __func__
);
771 mmap_pages
= xen_blkif_reqs
* BLKIF_MAX_SEGMENTS_PER_REQUEST
;
773 blkbk
->pending_reqs
= kmalloc(sizeof(blkbk
->pending_reqs
[0]) *
774 xen_blkif_reqs
, GFP_KERNEL
);
775 blkbk
->pending_grant_handles
= kzalloc(sizeof(blkbk
->pending_grant_handles
[0]) *
776 mmap_pages
, GFP_KERNEL
);
777 blkbk
->pending_pages
= kzalloc(sizeof(blkbk
->pending_pages
[0]) *
778 mmap_pages
, GFP_KERNEL
);
780 if (!blkbk
->pending_reqs
|| !blkbk
->pending_grant_handles
||
781 !blkbk
->pending_pages
) {
786 for (i
= 0; i
< mmap_pages
; i
++) {
787 blkbk
->pending_grant_handles
[i
] = BLKBACK_INVALID_HANDLE
;
788 blkbk
->pending_pages
[i
] = alloc_page(GFP_KERNEL
);
789 if (blkbk
->pending_pages
[i
] == NULL
) {
794 rc
= xen_blkif_interface_init();
798 memset(blkbk
->pending_reqs
, 0, sizeof(blkbk
->pending_reqs
));
800 INIT_LIST_HEAD(&blkbk
->pending_free
);
801 spin_lock_init(&blkbk
->pending_free_lock
);
802 init_waitqueue_head(&blkbk
->pending_free_wq
);
804 for (i
= 0; i
< xen_blkif_reqs
; i
++)
805 list_add_tail(&blkbk
->pending_reqs
[i
].free_list
,
806 &blkbk
->pending_free
);
808 rc
= xen_blkif_xenbus_init();
815 printk(KERN_ERR
"%s: out of memory\n", __func__
);
817 kfree(blkbk
->pending_reqs
);
818 kfree(blkbk
->pending_grant_handles
);
819 for (i
= 0; i
< mmap_pages
; i
++) {
820 if (blkbk
->pending_pages
[i
])
821 __free_page(blkbk
->pending_pages
[i
]);
823 kfree(blkbk
->pending_pages
);
829 module_init(xen_blkif_init
);
831 MODULE_LICENSE("Dual BSD/GPL");