1 /******************************************************************************
3 * Back-end of the driver for virtual block devices. This portion of the
4 * driver exports a 'unified' block-device interface that can be accessed
5 * by any operating system that implements a compatible front end. A
6 * reference front-end implementation can be found in:
7 * drivers/block/xen-blkfront.c
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Copyright (c) 2005, Christopher Clark
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
37 #include <linux/spinlock.h>
38 #include <linux/kthread.h>
39 #include <linux/list.h>
40 #include <linux/delay.h>
41 #include <linux/freezer.h>
43 #include <xen/events.h>
45 #include <asm/xen/hypervisor.h>
46 #include <asm/xen/hypercall.h>
49 #define WRITE_BARRIER (REQ_WRITE | REQ_FLUSH | REQ_FUA)
52 * These are rather arbitrary. They are fairly large because adjacent requests
53 * pulled from a communication ring are quite likely to end up being part of
54 * the same scatter/gather request at the disc.
56 * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
58 * This will increase the chances of being able to write whole tracks.
59 * 64 should be enough to keep us competitive with Linux.
61 static int xen_blkif_reqs
= 64;
62 module_param_named(reqs
, xen_blkif_reqs
, int, 0);
63 MODULE_PARM_DESC(reqs
, "Number of blkback requests to allocate");
65 /* Run-time switchable: /sys/module/blkback/parameters/ */
66 static unsigned int log_stats
;
67 static unsigned int debug_lvl
;
68 module_param(log_stats
, int, 0644);
69 module_param(debug_lvl
, int, 0644);
72 * Each outstanding request that we've passed to the lower device layers has a
73 * 'pending_req' allocated to it. Each buffer_head that completes decrements
74 * the pendcnt towards zero. When it hits zero, the specified domain has a
75 * response queued for it, with the saved 'id' passed back.
78 struct blkif_st
*blkif
;
82 unsigned short operation
;
84 struct list_head free_list
;
87 #define BLKBACK_INVALID_HANDLE (~0)
90 struct pending_req
*pending_reqs
;
91 /* List of all 'pending_req' available */
92 struct list_head pending_free
;
93 /* And its spinlock. */
94 spinlock_t pending_free_lock
;
95 wait_queue_head_t pending_free_wq
;
96 /* The list of all pages that are available. */
97 struct page
**pending_pages
;
98 /* And the grant handles that are available. */
99 grant_handle_t
*pending_grant_handles
;
102 static struct xen_blkbk
*blkbk
;
105 * Little helpful macro to figure out the index and virtual address of the
106 * pending_pages[..]. For each 'pending_req' we have have up to
107 * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
108 * 10 and would index in the pending_pages[..]. */
109 static inline int vaddr_pagenr(struct pending_req
*req
, int seg
)
111 return (req
- blkbk
->pending_reqs
) *
112 BLKIF_MAX_SEGMENTS_PER_REQUEST
+ seg
;
115 #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
117 static inline unsigned long vaddr(struct pending_req
*req
, int seg
)
119 unsigned long pfn
= page_to_pfn(blkbk
->pending_page(req
, seg
));
120 return (unsigned long)pfn_to_kaddr(pfn
);
123 #define pending_handle(_req, _seg) \
124 (blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
127 static int do_block_io_op(struct blkif_st
*blkif
);
128 static void dispatch_rw_block_io(struct blkif_st
*blkif
,
129 struct blkif_request
*req
,
130 struct pending_req
*pending_req
);
131 static void make_response(struct blkif_st
*blkif
, u64 id
,
132 unsigned short op
, int st
);
135 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
137 static struct pending_req
*alloc_req(void)
139 struct pending_req
*req
= NULL
;
142 spin_lock_irqsave(&blkbk
->pending_free_lock
, flags
);
143 if (!list_empty(&blkbk
->pending_free
)) {
144 req
= list_entry(blkbk
->pending_free
.next
, struct pending_req
,
146 list_del(&req
->free_list
);
148 spin_unlock_irqrestore(&blkbk
->pending_free_lock
, flags
);
153 * Return the 'pending_req' structure back to the freepool. We also
154 * wake up the thread if it was waiting for a free page.
156 static void free_req(struct pending_req
*req
)
161 spin_lock_irqsave(&blkbk
->pending_free_lock
, flags
);
162 was_empty
= list_empty(&blkbk
->pending_free
);
163 list_add(&req
->free_list
, &blkbk
->pending_free
);
164 spin_unlock_irqrestore(&blkbk
->pending_free_lock
, flags
);
166 wake_up(&blkbk
->pending_free_wq
);
170 * Routines for managing virtual block devices (vbds).
174 static int vbd_translate(struct phys_req
*req
, struct blkif_st
*blkif
,
177 struct vbd
*vbd
= &blkif
->vbd
;
180 if ((operation
!= READ
) && vbd
->readonly
)
183 if (unlikely((req
->sector_number
+ req
->nr_sects
) > vbd_sz(vbd
)))
186 req
->dev
= vbd
->pdevice
;
187 req
->bdev
= vbd
->bdev
;
194 static void vbd_resize(struct blkif_st
*blkif
)
196 struct vbd
*vbd
= &blkif
->vbd
;
197 struct xenbus_transaction xbt
;
199 struct xenbus_device
*dev
= xen_blkbk_xenbus(blkif
->be
);
200 unsigned long long new_size
= vbd_sz(vbd
);
202 printk(KERN_INFO
"VBD Resize: Domid: %d, Device: (%d, %d)\n",
203 blkif
->domid
, MAJOR(vbd
->pdevice
), MINOR(vbd
->pdevice
));
204 printk(KERN_INFO
"VBD Resize: new size %llu\n", new_size
);
205 vbd
->size
= new_size
;
207 err
= xenbus_transaction_start(&xbt
);
209 printk(KERN_WARNING
"Error starting transaction");
212 err
= xenbus_printf(xbt
, dev
->nodename
, "sectors", "%llu",
213 (unsigned long long)vbd_sz(vbd
));
215 printk(KERN_WARNING
"Error writing new size");
219 * Write the current state; we will use this to synchronize
220 * the front-end. If the current state is "connected" the
221 * front-end will get the new size information online.
223 err
= xenbus_printf(xbt
, dev
->nodename
, "state", "%d", dev
->state
);
225 printk(KERN_WARNING
"Error writing the state");
229 err
= xenbus_transaction_end(xbt
, 0);
233 printk(KERN_WARNING
"Error ending transaction");
235 xenbus_transaction_end(xbt
, 1);
239 * Notification from the guest OS.
241 static void blkif_notify_work(struct blkif_st
*blkif
)
243 blkif
->waiting_reqs
= 1;
247 irqreturn_t
xen_blkif_be_int(int irq
, void *dev_id
)
249 blkif_notify_work(dev_id
);
254 * SCHEDULER FUNCTIONS
257 static void print_stats(struct blkif_st
*blkif
)
259 printk(KERN_DEBUG
"%s: oo %3d | rd %4d | wr %4d | br %4d\n",
260 current
->comm
, blkif
->st_oo_req
,
261 blkif
->st_rd_req
, blkif
->st_wr_req
, blkif
->st_br_req
);
262 blkif
->st_print
= jiffies
+ msecs_to_jiffies(10 * 1000);
263 blkif
->st_rd_req
= 0;
264 blkif
->st_wr_req
= 0;
265 blkif
->st_oo_req
= 0;
268 int xen_blkif_schedule(void *arg
)
270 struct blkif_st
*blkif
= arg
;
271 struct vbd
*vbd
= &blkif
->vbd
;
273 xen_blkif_get(blkif
);
276 printk(KERN_DEBUG
"%s: started\n", current
->comm
);
278 while (!kthread_should_stop()) {
281 if (unlikely(vbd
->size
!= vbd_sz(vbd
)))
284 wait_event_interruptible(
286 blkif
->waiting_reqs
|| kthread_should_stop());
287 wait_event_interruptible(
288 blkbk
->pending_free_wq
,
289 !list_empty(&blkbk
->pending_free
) ||
290 kthread_should_stop());
292 blkif
->waiting_reqs
= 0;
293 smp_mb(); /* clear flag *before* checking for work */
295 if (do_block_io_op(blkif
))
296 blkif
->waiting_reqs
= 1;
298 if (log_stats
&& time_after(jiffies
, blkif
->st_print
))
305 printk(KERN_DEBUG
"%s: exiting\n", current
->comm
);
307 blkif
->xenblkd
= NULL
;
308 xen_blkif_put(blkif
);
318 * Unmap the grant references, and also remove the M2P over-rides
319 * used in the 'pending_req'.
321 static void xen_blkbk_unmap(struct pending_req
*req
)
323 struct gnttab_unmap_grant_ref unmap
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
324 unsigned int i
, invcount
= 0;
325 grant_handle_t handle
;
328 for (i
= 0; i
< req
->nr_pages
; i
++) {
329 handle
= pending_handle(req
, i
);
330 if (handle
== BLKBACK_INVALID_HANDLE
)
332 gnttab_set_unmap_op(&unmap
[invcount
], vaddr(req
, i
),
333 GNTMAP_host_map
, handle
);
334 pending_handle(req
, i
) = BLKBACK_INVALID_HANDLE
;
338 ret
= HYPERVISOR_grant_table_op(
339 GNTTABOP_unmap_grant_ref
, unmap
, invcount
);
341 /* Note, we use invcount, so nr->pages, so we can't index
342 * using vaddr(req, i).
344 for (i
= 0; i
< invcount
; i
++) {
345 ret
= m2p_remove_override(
346 virt_to_page(unmap
[i
].host_addr
), false);
348 printk(KERN_ALERT
"Failed to remove M2P override for " \
349 "%lx\n", (unsigned long)unmap
[i
].host_addr
);
354 static int xen_blkbk_map(struct blkif_request
*req
, struct pending_req
*pending_req
,
355 struct seg_buf seg
[])
357 struct gnttab_map_grant_ref map
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
359 int nseg
= req
->nr_segments
;
361 /* Fill out preq.nr_sects with proper amount of sectors, and setup
362 * assign map[..] with the PFN of the page in our domain with the
363 * corresponding grant reference for each page.
365 for (i
= 0; i
< nseg
; i
++) {
368 flags
= GNTMAP_host_map
;
369 if (pending_req
->operation
!= BLKIF_OP_READ
)
370 flags
|= GNTMAP_readonly
;
371 gnttab_set_map_op(&map
[i
], vaddr(pending_req
, i
), flags
,
372 req
->u
.rw
.seg
[i
].gref
, pending_req
->blkif
->domid
);
375 ret
= HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref
, map
, nseg
);
378 /* Now swizzel the MFN in our domain with the MFN from the other domain
379 * so that when we access vaddr(pending_req,i) it has the contents of
380 * the page from the other domain.
382 for (i
= 0; i
< nseg
; i
++) {
383 if (unlikely(map
[i
].status
!= 0)) {
384 DPRINTK("invalid buffer -- could not remap it\n");
385 map
[i
].handle
= BLKBACK_INVALID_HANDLE
;
389 pending_handle(pending_req
, i
) = map
[i
].handle
;
394 ret
= m2p_add_override(PFN_DOWN(map
[i
].dev_bus_addr
),
395 blkbk
->pending_page(pending_req
, i
), false);
397 printk(KERN_ALERT
"Failed to install M2P override for"\
398 " %lx (ret: %d)\n", (unsigned long)
399 map
[i
].dev_bus_addr
, ret
);
400 /* We could switch over to GNTTABOP_copy */
404 seg
[i
].buf
= map
[i
].dev_bus_addr
|
405 (req
->u
.rw
.seg
[i
].first_sect
<< 9);
411 * Completion callback on the bio's. Called as bh->b_end_io()
414 static void __end_block_io_op(struct pending_req
*pending_req
, int error
)
416 /* An error fails the entire request. */
417 if ((pending_req
->operation
== BLKIF_OP_WRITE_BARRIER
) &&
418 (error
== -EOPNOTSUPP
)) {
419 DPRINTK("blkback: write barrier op failed, not supported\n");
420 xen_blkbk_barrier(XBT_NIL
, pending_req
->blkif
->be
, 0);
421 pending_req
->status
= BLKIF_RSP_EOPNOTSUPP
;
423 DPRINTK("Buffer not up-to-date at end of operation, "
424 "error=%d\n", error
);
425 pending_req
->status
= BLKIF_RSP_ERROR
;
428 /* If all of the bio's have completed it is time to unmap
429 * the grant references associated with 'request' and provide
430 * the proper response on the ring.
432 if (atomic_dec_and_test(&pending_req
->pendcnt
)) {
433 xen_blkbk_unmap(pending_req
);
434 make_response(pending_req
->blkif
, pending_req
->id
,
435 pending_req
->operation
, pending_req
->status
);
436 xen_blkif_put(pending_req
->blkif
);
437 free_req(pending_req
);
444 static void end_block_io_op(struct bio
*bio
, int error
)
446 __end_block_io_op(bio
->bi_private
, error
);
453 * Function to copy the from the ring buffer the 'struct blkif_request'
454 * (which has the sectors we want, number of them, grant references, etc),
455 * and transmute it to the block API to hand it over to the proper block disk.
457 static int do_block_io_op(struct blkif_st
*blkif
)
459 union blkif_back_rings
*blk_rings
= &blkif
->blk_rings
;
460 struct blkif_request req
;
461 struct pending_req
*pending_req
;
465 rc
= blk_rings
->common
.req_cons
;
466 rp
= blk_rings
->common
.sring
->req_prod
;
467 rmb(); /* Ensure we see queued requests up to 'rp'. */
471 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings
->common
, rc
))
474 if (kthread_should_stop()) {
479 pending_req
= alloc_req();
480 if (NULL
== pending_req
) {
486 switch (blkif
->blk_protocol
) {
487 case BLKIF_PROTOCOL_NATIVE
:
488 memcpy(&req
, RING_GET_REQUEST(&blk_rings
->native
, rc
), sizeof(req
));
490 case BLKIF_PROTOCOL_X86_32
:
491 blkif_get_x86_32_req(&req
, RING_GET_REQUEST(&blk_rings
->x86_32
, rc
));
493 case BLKIF_PROTOCOL_X86_64
:
494 blkif_get_x86_64_req(&req
, RING_GET_REQUEST(&blk_rings
->x86_64
, rc
));
499 blk_rings
->common
.req_cons
= ++rc
; /* before make_response() */
501 /* Apply all sanity checks to /private copy/ of request. */
504 switch (req
.operation
) {
507 dispatch_rw_block_io(blkif
, &req
, pending_req
);
509 case BLKIF_OP_WRITE_BARRIER
:
514 dispatch_rw_block_io(blkif
, &req
, pending_req
);
517 /* A good sign something is wrong: sleep for a while to
518 * avoid excessive CPU consumption by a bad guest. */
520 DPRINTK("error: unknown block io operation [%d]\n",
522 make_response(blkif
, req
.id
, req
.operation
,
524 free_req(pending_req
);
528 /* Yield point for this unbounded loop. */
536 * Transumation of the 'struct blkif_request' to a proper 'struct bio'
537 * and call the 'submit_bio' to pass it to the underlaying storage.
539 static void dispatch_rw_block_io(struct blkif_st
*blkif
,
540 struct blkif_request
*req
,
541 struct pending_req
*pending_req
)
543 struct phys_req preq
;
544 struct seg_buf seg
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
546 struct bio
*bio
= NULL
;
547 struct bio
*biolist
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
550 struct blk_plug plug
;
552 switch (req
->operation
) {
557 operation
= WRITE_ODIRECT
;
559 case BLKIF_OP_WRITE_BARRIER
:
560 operation
= WRITE_BARRIER
;
563 operation
= 0; /* make gcc happy */
567 /* Check that the number of segments is sane. */
568 nseg
= req
->nr_segments
;
569 if (unlikely(nseg
== 0 && operation
!= WRITE_BARRIER
) ||
570 unlikely(nseg
> BLKIF_MAX_SEGMENTS_PER_REQUEST
)) {
571 DPRINTK("Bad number of segments in request (%d)\n", nseg
);
572 /* Haven't submitted any bio's yet. */
576 preq
.dev
= req
->handle
;
577 preq
.sector_number
= req
->u
.rw
.sector_number
;
580 pending_req
->blkif
= blkif
;
581 pending_req
->id
= req
->id
;
582 pending_req
->operation
= req
->operation
;
583 pending_req
->status
= BLKIF_RSP_OKAY
;
584 pending_req
->nr_pages
= nseg
;
586 for (i
= 0; i
< nseg
; i
++) {
587 seg
[i
].nsec
= req
->u
.rw
.seg
[i
].last_sect
-
588 req
->u
.rw
.seg
[i
].first_sect
+ 1;
589 if ((req
->u
.rw
.seg
[i
].last_sect
>= (PAGE_SIZE
>> 9)) ||
590 (req
->u
.rw
.seg
[i
].last_sect
< req
->u
.rw
.seg
[i
].first_sect
))
592 preq
.nr_sects
+= seg
[i
].nsec
;
596 if (vbd_translate(&preq
, blkif
, operation
) != 0) {
597 DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
598 operation
== READ
? "read" : "write",
600 preq
.sector_number
+ preq
.nr_sects
, preq
.dev
);
603 /* This check _MUST_ be done after vbd_translate as the preq.bdev
605 for (i
= 0; i
< nseg
; i
++) {
606 if (((int)preq
.sector_number
|(int)seg
[i
].nsec
) &
607 ((bdev_logical_block_size(preq
.bdev
) >> 9) - 1)) {
608 DPRINTK("Misaligned I/O request from domain %d",
613 /* If we have failed at this point, we need to undo the M2P override,
614 * set gnttab_set_unmap_op on all of the grant references and perform
615 * the hypercall to unmap the grants - that is all done in
618 if (xen_blkbk_map(req
, pending_req
, seg
))
621 /* This corresponding blkif_put is done in __end_block_io_op */
622 xen_blkif_get(blkif
);
624 for (i
= 0; i
< nseg
; i
++) {
625 while ((bio
== NULL
) ||
627 blkbk
->pending_page(pending_req
, i
),
629 seg
[i
].buf
& ~PAGE_MASK
) == 0)) {
631 bio
= biolist
[nbio
++] = bio_alloc(GFP_KERNEL
, nseg
-i
);
632 if (unlikely(bio
== NULL
))
635 bio
->bi_bdev
= preq
.bdev
;
636 bio
->bi_private
= pending_req
;
637 bio
->bi_end_io
= end_block_io_op
;
638 bio
->bi_sector
= preq
.sector_number
;
641 preq
.sector_number
+= seg
[i
].nsec
;
644 /* This will be hit if the operation was a barrier. */
646 BUG_ON(operation
!= WRITE_BARRIER
);
647 bio
= biolist
[nbio
++] = bio_alloc(GFP_KERNEL
, 0);
648 if (unlikely(bio
== NULL
))
651 bio
->bi_bdev
= preq
.bdev
;
652 bio
->bi_private
= pending_req
;
653 bio
->bi_end_io
= end_block_io_op
;
658 /* We set it one so that the last submit_bio does not have to call
661 atomic_set(&pending_req
->pendcnt
, nbio
);
663 /* Get a reference count for the disk queue and start sending I/O */
664 blk_start_plug(&plug
);
666 for (i
= 0; i
< nbio
; i
++)
667 submit_bio(operation
, biolist
[i
]);
669 blk_finish_plug(&plug
);
670 /* Let the I/Os go.. */
672 if (operation
== READ
)
673 blkif
->st_rd_sect
+= preq
.nr_sects
;
674 else if (operation
== WRITE
|| operation
== WRITE_BARRIER
)
675 blkif
->st_wr_sect
+= preq
.nr_sects
;
680 xen_blkbk_unmap(pending_req
);
682 /* Haven't submitted any bio's yet. */
683 make_response(blkif
, req
->id
, req
->operation
, BLKIF_RSP_ERROR
);
684 free_req(pending_req
);
685 msleep(1); /* back off a bit */
689 for (i
= 0; i
< (nbio
-1); i
++)
691 __end_block_io_op(pending_req
, -EINVAL
);
692 msleep(1); /* back off a bit */
699 * Put a response on the ring on how the operation fared.
701 static void make_response(struct blkif_st
*blkif
, u64 id
,
702 unsigned short op
, int st
)
704 struct blkif_response resp
;
706 union blkif_back_rings
*blk_rings
= &blkif
->blk_rings
;
714 spin_lock_irqsave(&blkif
->blk_ring_lock
, flags
);
715 /* Place on the response ring for the relevant domain. */
716 switch (blkif
->blk_protocol
) {
717 case BLKIF_PROTOCOL_NATIVE
:
718 memcpy(RING_GET_RESPONSE(&blk_rings
->native
, blk_rings
->native
.rsp_prod_pvt
),
719 &resp
, sizeof(resp
));
721 case BLKIF_PROTOCOL_X86_32
:
722 memcpy(RING_GET_RESPONSE(&blk_rings
->x86_32
, blk_rings
->x86_32
.rsp_prod_pvt
),
723 &resp
, sizeof(resp
));
725 case BLKIF_PROTOCOL_X86_64
:
726 memcpy(RING_GET_RESPONSE(&blk_rings
->x86_64
, blk_rings
->x86_64
.rsp_prod_pvt
),
727 &resp
, sizeof(resp
));
732 blk_rings
->common
.rsp_prod_pvt
++;
733 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings
->common
, notify
);
734 if (blk_rings
->common
.rsp_prod_pvt
== blk_rings
->common
.req_cons
) {
736 * Tail check for pending requests. Allows frontend to avoid
737 * notifications if requests are already in flight (lower
738 * overheads and promotes batching).
740 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings
->common
, more_to_do
);
742 } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings
->common
)) {
746 spin_unlock_irqrestore(&blkif
->blk_ring_lock
, flags
);
749 blkif_notify_work(blkif
);
751 notify_remote_via_irq(blkif
->irq
);
754 static int __init
xen_blkif_init(void)
759 if (!xen_pv_domain())
762 blkbk
= kzalloc(sizeof(struct xen_blkbk
), GFP_KERNEL
);
764 printk(KERN_ALERT
"%s: out of memory!\n", __func__
);
768 mmap_pages
= xen_blkif_reqs
* BLKIF_MAX_SEGMENTS_PER_REQUEST
;
770 blkbk
->pending_reqs
= kmalloc(sizeof(blkbk
->pending_reqs
[0]) *
771 xen_blkif_reqs
, GFP_KERNEL
);
772 blkbk
->pending_grant_handles
= kzalloc(sizeof(blkbk
->pending_grant_handles
[0]) *
773 mmap_pages
, GFP_KERNEL
);
774 blkbk
->pending_pages
= kzalloc(sizeof(blkbk
->pending_pages
[0]) *
775 mmap_pages
, GFP_KERNEL
);
777 if (!blkbk
->pending_reqs
|| !blkbk
->pending_grant_handles
||
778 !blkbk
->pending_pages
) {
783 for (i
= 0; i
< mmap_pages
; i
++) {
784 blkbk
->pending_grant_handles
[i
] = BLKBACK_INVALID_HANDLE
;
785 blkbk
->pending_pages
[i
] = alloc_page(GFP_KERNEL
);
786 if (blkbk
->pending_pages
[i
] == NULL
) {
791 rc
= xen_blkif_interface_init();
795 memset(blkbk
->pending_reqs
, 0, sizeof(blkbk
->pending_reqs
));
797 INIT_LIST_HEAD(&blkbk
->pending_free
);
798 spin_lock_init(&blkbk
->pending_free_lock
);
799 init_waitqueue_head(&blkbk
->pending_free_wq
);
801 for (i
= 0; i
< xen_blkif_reqs
; i
++)
802 list_add_tail(&blkbk
->pending_reqs
[i
].free_list
,
803 &blkbk
->pending_free
);
805 rc
= xen_blkif_xenbus_init();
812 printk(KERN_ERR
"%s: out of memory\n", __func__
);
814 kfree(blkbk
->pending_reqs
);
815 kfree(blkbk
->pending_grant_handles
);
816 for (i
= 0; i
< mmap_pages
; i
++) {
817 if (blkbk
->pending_pages
[i
])
818 __free_page(blkbk
->pending_pages
[i
]);
820 kfree(blkbk
->pending_pages
);
826 module_init(xen_blkif_init
);
828 MODULE_LICENSE("Dual BSD/GPL");