4 * XenLinux virtual block device driver.
6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8 * Copyright (c) 2004, Christian Limpach
9 * Copyright (c) 2004, Andrew Warfield
10 * Copyright (c) 2005, Christopher Clark
11 * Copyright (c) 2005, XenSource Ltd
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
38 #include <linux/interrupt.h>
39 #include <linux/blkdev.h>
40 #include <linux/module.h>
42 #include <xen/xenbus.h>
43 #include <xen/grant_table.h>
44 #include <xen/events.h>
47 #include <xen/interface/grant_table.h>
48 #include <xen/interface/io/blkif.h>
50 #include <asm/xen/hypervisor.h>
53 BLKIF_STATE_DISCONNECTED
,
54 BLKIF_STATE_CONNECTED
,
55 BLKIF_STATE_SUSPENDED
,
59 struct blkif_request req
;
60 unsigned long request
;
61 unsigned long frame
[BLKIF_MAX_SEGMENTS_PER_REQUEST
];
64 static struct block_device_operations xlvbd_block_fops
;
66 #define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE)
69 * We have one of these per vbd, whether ide, scsi or 'other'. They
70 * hang in private_data off the gendisk structure. We may end up
71 * putting all kinds of interesting stuff here :-)
75 struct xenbus_device
*xbdev
;
80 enum blkif_state connected
;
82 struct blkif_front_ring ring
;
83 unsigned int evtchn
, irq
;
84 struct request_queue
*rq
;
85 struct work_struct work
;
86 struct gnttab_free_callback callback
;
87 struct blk_shadow shadow
[BLK_RING_SIZE
];
88 unsigned long shadow_free
;
92 * The number of people holding this device open. We won't allow a
93 * hot-unplug unless this is 0.
98 static DEFINE_SPINLOCK(blkif_io_lock
);
100 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \
101 (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
102 #define GRANT_INVALID_REF 0
104 #define PARTS_PER_DISK 16
106 #define BLKIF_MAJOR(dev) ((dev)>>8)
107 #define BLKIF_MINOR(dev) ((dev) & 0xff)
109 #define DEV_NAME "xvd" /* name in /dev */
111 /* Information about our VBDs. */
113 static LIST_HEAD(vbds_list
);
115 static int get_id_from_freelist(struct blkfront_info
*info
)
117 unsigned long free
= info
->shadow_free
;
118 BUG_ON(free
> BLK_RING_SIZE
);
119 info
->shadow_free
= info
->shadow
[free
].req
.id
;
120 info
->shadow
[free
].req
.id
= 0x0fffffee; /* debug */
124 static void add_id_to_freelist(struct blkfront_info
*info
,
127 info
->shadow
[id
].req
.id
= info
->shadow_free
;
128 info
->shadow
[id
].request
= 0;
129 info
->shadow_free
= id
;
132 static void blkif_restart_queue_callback(void *arg
)
134 struct blkfront_info
*info
= (struct blkfront_info
*)arg
;
135 schedule_work(&info
->work
);
139 * blkif_queue_request
143 * id: for guest use only.
144 * operation: BLKIF_OP_{READ,WRITE,PROBE}
145 * buffer: buffer to read/write into. this should be a
146 * virtual address in the guest os.
148 static int blkif_queue_request(struct request
*req
)
150 struct blkfront_info
*info
= req
->rq_disk
->private_data
;
151 unsigned long buffer_mfn
;
152 struct blkif_request
*ring_req
;
154 struct bio_vec
*bvec
;
157 unsigned int fsect
, lsect
;
159 grant_ref_t gref_head
;
161 if (unlikely(info
->connected
!= BLKIF_STATE_CONNECTED
))
164 if (gnttab_alloc_grant_references(
165 BLKIF_MAX_SEGMENTS_PER_REQUEST
, &gref_head
) < 0) {
166 gnttab_request_free_callback(
168 blkif_restart_queue_callback
,
170 BLKIF_MAX_SEGMENTS_PER_REQUEST
);
174 /* Fill out a communications ring structure. */
175 ring_req
= RING_GET_REQUEST(&info
->ring
, info
->ring
.req_prod_pvt
);
176 id
= get_id_from_freelist(info
);
177 info
->shadow
[id
].request
= (unsigned long)req
;
180 ring_req
->sector_number
= (blkif_sector_t
)req
->sector
;
181 ring_req
->handle
= info
->handle
;
183 ring_req
->operation
= rq_data_dir(req
) ?
184 BLKIF_OP_WRITE
: BLKIF_OP_READ
;
185 if (blk_barrier_rq(req
))
186 ring_req
->operation
= BLKIF_OP_WRITE_BARRIER
;
188 ring_req
->nr_segments
= 0;
189 rq_for_each_bio (bio
, req
) {
190 bio_for_each_segment (bvec
, bio
, idx
) {
191 BUG_ON(ring_req
->nr_segments
192 == BLKIF_MAX_SEGMENTS_PER_REQUEST
);
193 buffer_mfn
= pfn_to_mfn(page_to_pfn(bvec
->bv_page
));
194 fsect
= bvec
->bv_offset
>> 9;
195 lsect
= fsect
+ (bvec
->bv_len
>> 9) - 1;
196 /* install a grant reference. */
197 ref
= gnttab_claim_grant_reference(&gref_head
);
198 BUG_ON(ref
== -ENOSPC
);
200 gnttab_grant_foreign_access_ref(
202 info
->xbdev
->otherend_id
,
206 info
->shadow
[id
].frame
[ring_req
->nr_segments
] =
207 mfn_to_pfn(buffer_mfn
);
209 ring_req
->seg
[ring_req
->nr_segments
] =
210 (struct blkif_request_segment
) {
213 .last_sect
= lsect
};
215 ring_req
->nr_segments
++;
219 info
->ring
.req_prod_pvt
++;
221 /* Keep a private copy so we can reissue requests when recovering. */
222 info
->shadow
[id
].req
= *ring_req
;
224 gnttab_free_grant_references(gref_head
);
230 static inline void flush_requests(struct blkfront_info
*info
)
234 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info
->ring
, notify
);
237 notify_remote_via_irq(info
->irq
);
242 * read a block; request is in a request queue
244 static void do_blkif_request(request_queue_t
*rq
)
246 struct blkfront_info
*info
= NULL
;
250 pr_debug("Entered do_blkif_request\n");
254 while ((req
= elv_next_request(rq
)) != NULL
) {
255 info
= req
->rq_disk
->private_data
;
256 if (!blk_fs_request(req
)) {
261 if (RING_FULL(&info
->ring
))
264 pr_debug("do_blk_req %p: cmd %p, sec %lx, "
265 "(%u/%li) buffer:%p [%s]\n",
266 req
, req
->cmd
, (unsigned long)req
->sector
,
267 req
->current_nr_sectors
,
268 req
->nr_sectors
, req
->buffer
,
269 rq_data_dir(req
) ? "write" : "read");
272 blkdev_dequeue_request(req
);
273 if (blkif_queue_request(req
)) {
274 blk_requeue_request(rq
, req
);
276 /* Avoid pointless unplugs. */
285 flush_requests(info
);
288 static int xlvbd_init_blk_queue(struct gendisk
*gd
, u16 sector_size
)
292 rq
= blk_init_queue(do_blkif_request
, &blkif_io_lock
);
296 elevator_init(rq
, "noop");
298 /* Hard sector size and max sectors impersonate the equiv. hardware. */
299 blk_queue_hardsect_size(rq
, sector_size
);
300 blk_queue_max_sectors(rq
, 512);
302 /* Each segment in a request is up to an aligned page in size. */
303 blk_queue_segment_boundary(rq
, PAGE_SIZE
- 1);
304 blk_queue_max_segment_size(rq
, PAGE_SIZE
);
306 /* Ensure a merged request will fit in a single I/O ring slot. */
307 blk_queue_max_phys_segments(rq
, BLKIF_MAX_SEGMENTS_PER_REQUEST
);
308 blk_queue_max_hw_segments(rq
, BLKIF_MAX_SEGMENTS_PER_REQUEST
);
310 /* Make sure buffer addresses are sector-aligned. */
311 blk_queue_dma_alignment(rq
, 511);
319 static int xlvbd_barrier(struct blkfront_info
*info
)
323 err
= blk_queue_ordered(info
->rq
,
324 info
->feature_barrier
? QUEUE_ORDERED_DRAIN
: QUEUE_ORDERED_NONE
,
330 printk(KERN_INFO
"blkfront: %s: barriers %s\n",
332 info
->feature_barrier
? "enabled" : "disabled");
337 static int xlvbd_alloc_gendisk(int minor
, blkif_sector_t capacity
,
338 int vdevice
, u16 vdisk_info
, u16 sector_size
,
339 struct blkfront_info
*info
)
345 BUG_ON(info
->gd
!= NULL
);
346 BUG_ON(info
->rq
!= NULL
);
348 if ((minor
% PARTS_PER_DISK
) == 0)
349 nr_minors
= PARTS_PER_DISK
;
351 gd
= alloc_disk(nr_minors
);
356 sprintf(gd
->disk_name
, "%s%c", DEV_NAME
,
357 'a' + minor
/ PARTS_PER_DISK
);
359 sprintf(gd
->disk_name
, "%s%c%d", DEV_NAME
,
360 'a' + minor
/ PARTS_PER_DISK
,
361 minor
% PARTS_PER_DISK
);
363 gd
->major
= XENVBD_MAJOR
;
364 gd
->first_minor
= minor
;
365 gd
->fops
= &xlvbd_block_fops
;
366 gd
->private_data
= info
;
367 gd
->driverfs_dev
= &(info
->xbdev
->dev
);
368 set_capacity(gd
, capacity
);
370 if (xlvbd_init_blk_queue(gd
, sector_size
)) {
375 info
->rq
= gd
->queue
;
378 if (info
->feature_barrier
)
381 if (vdisk_info
& VDISK_READONLY
)
384 if (vdisk_info
& VDISK_REMOVABLE
)
385 gd
->flags
|= GENHD_FL_REMOVABLE
;
387 if (vdisk_info
& VDISK_CDROM
)
388 gd
->flags
|= GENHD_FL_CD
;
396 static void kick_pending_request_queues(struct blkfront_info
*info
)
398 if (!RING_FULL(&info
->ring
)) {
399 /* Re-enable calldowns. */
400 blk_start_queue(info
->rq
);
401 /* Kick things off immediately. */
402 do_blkif_request(info
->rq
);
406 static void blkif_restart_queue(struct work_struct
*work
)
408 struct blkfront_info
*info
= container_of(work
, struct blkfront_info
, work
);
410 spin_lock_irq(&blkif_io_lock
);
411 if (info
->connected
== BLKIF_STATE_CONNECTED
)
412 kick_pending_request_queues(info
);
413 spin_unlock_irq(&blkif_io_lock
);
416 static void blkif_free(struct blkfront_info
*info
, int suspend
)
418 /* Prevent new requests being issued until we fix things up. */
419 spin_lock_irq(&blkif_io_lock
);
420 info
->connected
= suspend
?
421 BLKIF_STATE_SUSPENDED
: BLKIF_STATE_DISCONNECTED
;
422 /* No more blkif_request(). */
424 blk_stop_queue(info
->rq
);
425 /* No more gnttab callback work. */
426 gnttab_cancel_free_callback(&info
->callback
);
427 spin_unlock_irq(&blkif_io_lock
);
429 /* Flush gnttab callback work. Must be done with no locks held. */
430 flush_scheduled_work();
432 /* Free resources associated with old device channel. */
433 if (info
->ring_ref
!= GRANT_INVALID_REF
) {
434 gnttab_end_foreign_access(info
->ring_ref
, 0,
435 (unsigned long)info
->ring
.sring
);
436 info
->ring_ref
= GRANT_INVALID_REF
;
437 info
->ring
.sring
= NULL
;
440 unbind_from_irqhandler(info
->irq
, info
);
441 info
->evtchn
= info
->irq
= 0;
445 static void blkif_completion(struct blk_shadow
*s
)
448 for (i
= 0; i
< s
->req
.nr_segments
; i
++)
449 gnttab_end_foreign_access(s
->req
.seg
[i
].gref
, 0, 0UL);
452 static irqreturn_t
blkif_interrupt(int irq
, void *dev_id
)
455 struct blkif_response
*bret
;
458 struct blkfront_info
*info
= (struct blkfront_info
*)dev_id
;
461 spin_lock_irqsave(&blkif_io_lock
, flags
);
463 if (unlikely(info
->connected
!= BLKIF_STATE_CONNECTED
)) {
464 spin_unlock_irqrestore(&blkif_io_lock
, flags
);
469 rp
= info
->ring
.sring
->rsp_prod
;
470 rmb(); /* Ensure we see queued responses up to 'rp'. */
472 for (i
= info
->ring
.rsp_cons
; i
!= rp
; i
++) {
476 bret
= RING_GET_RESPONSE(&info
->ring
, i
);
478 req
= (struct request
*)info
->shadow
[id
].request
;
480 blkif_completion(&info
->shadow
[id
]);
482 add_id_to_freelist(info
, id
);
484 uptodate
= (bret
->status
== BLKIF_RSP_OKAY
);
485 switch (bret
->operation
) {
486 case BLKIF_OP_WRITE_BARRIER
:
487 if (unlikely(bret
->status
== BLKIF_RSP_EOPNOTSUPP
)) {
488 printk(KERN_WARNING
"blkfront: %s: write barrier op failed\n",
489 info
->gd
->disk_name
);
490 uptodate
= -EOPNOTSUPP
;
491 info
->feature_barrier
= 0;
497 if (unlikely(bret
->status
!= BLKIF_RSP_OKAY
))
498 dev_dbg(&info
->xbdev
->dev
, "Bad return from blkdev data "
499 "request: %x\n", bret
->status
);
501 ret
= end_that_request_first(req
, uptodate
,
502 req
->hard_nr_sectors
);
504 end_that_request_last(req
, uptodate
);
511 info
->ring
.rsp_cons
= i
;
513 if (i
!= info
->ring
.req_prod_pvt
) {
515 RING_FINAL_CHECK_FOR_RESPONSES(&info
->ring
, more_to_do
);
519 info
->ring
.sring
->rsp_event
= i
+ 1;
521 kick_pending_request_queues(info
);
523 spin_unlock_irqrestore(&blkif_io_lock
, flags
);
529 static int setup_blkring(struct xenbus_device
*dev
,
530 struct blkfront_info
*info
)
532 struct blkif_sring
*sring
;
535 info
->ring_ref
= GRANT_INVALID_REF
;
537 sring
= (struct blkif_sring
*)__get_free_page(GFP_KERNEL
);
539 xenbus_dev_fatal(dev
, -ENOMEM
, "allocating shared ring");
542 SHARED_RING_INIT(sring
);
543 FRONT_RING_INIT(&info
->ring
, sring
, PAGE_SIZE
);
545 err
= xenbus_grant_ring(dev
, virt_to_mfn(info
->ring
.sring
));
547 free_page((unsigned long)sring
);
548 info
->ring
.sring
= NULL
;
551 info
->ring_ref
= err
;
553 err
= xenbus_alloc_evtchn(dev
, &info
->evtchn
);
557 err
= bind_evtchn_to_irqhandler(info
->evtchn
,
559 IRQF_SAMPLE_RANDOM
, "blkif", info
);
561 xenbus_dev_fatal(dev
, err
,
562 "bind_evtchn_to_irqhandler failed");
574 /* Common code used when first setting up, and when resuming. */
575 static int talk_to_backend(struct xenbus_device
*dev
,
576 struct blkfront_info
*info
)
578 const char *message
= NULL
;
579 struct xenbus_transaction xbt
;
582 /* Create shared ring, alloc event channel. */
583 err
= setup_blkring(dev
, info
);
588 err
= xenbus_transaction_start(&xbt
);
590 xenbus_dev_fatal(dev
, err
, "starting transaction");
591 goto destroy_blkring
;
594 err
= xenbus_printf(xbt
, dev
->nodename
,
595 "ring-ref", "%u", info
->ring_ref
);
597 message
= "writing ring-ref";
598 goto abort_transaction
;
600 err
= xenbus_printf(xbt
, dev
->nodename
,
601 "event-channel", "%u", info
->evtchn
);
603 message
= "writing event-channel";
604 goto abort_transaction
;
607 err
= xenbus_transaction_end(xbt
, 0);
611 xenbus_dev_fatal(dev
, err
, "completing transaction");
612 goto destroy_blkring
;
615 xenbus_switch_state(dev
, XenbusStateInitialised
);
620 xenbus_transaction_end(xbt
, 1);
622 xenbus_dev_fatal(dev
, err
, "%s", message
);
631 * Entry point to this code when a new device is created. Allocate the basic
632 * structures and the ring buffer for communication with the backend, and
633 * inform the backend of the appropriate details for those. Switch to
636 static int blkfront_probe(struct xenbus_device
*dev
,
637 const struct xenbus_device_id
*id
)
640 struct blkfront_info
*info
;
642 /* FIXME: Use dynamic device id if this is not set. */
643 err
= xenbus_scanf(XBT_NIL
, dev
->nodename
,
644 "virtual-device", "%i", &vdevice
);
646 xenbus_dev_fatal(dev
, err
, "reading virtual-device");
650 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
652 xenbus_dev_fatal(dev
, -ENOMEM
, "allocating info structure");
657 info
->vdevice
= vdevice
;
658 info
->connected
= BLKIF_STATE_DISCONNECTED
;
659 INIT_WORK(&info
->work
, blkif_restart_queue
);
661 for (i
= 0; i
< BLK_RING_SIZE
; i
++)
662 info
->shadow
[i
].req
.id
= i
+1;
663 info
->shadow
[BLK_RING_SIZE
-1].req
.id
= 0x0fffffff;
665 /* Front end dir is a number, which is used as the id. */
666 info
->handle
= simple_strtoul(strrchr(dev
->nodename
, '/')+1, NULL
, 0);
667 dev
->dev
.driver_data
= info
;
669 err
= talk_to_backend(dev
, info
);
672 dev
->dev
.driver_data
= NULL
;
680 static int blkif_recover(struct blkfront_info
*info
)
683 struct blkif_request
*req
;
684 struct blk_shadow
*copy
;
687 /* Stage 1: Make a safe copy of the shadow state. */
688 copy
= kmalloc(sizeof(info
->shadow
), GFP_KERNEL
);
691 memcpy(copy
, info
->shadow
, sizeof(info
->shadow
));
693 /* Stage 2: Set up free list. */
694 memset(&info
->shadow
, 0, sizeof(info
->shadow
));
695 for (i
= 0; i
< BLK_RING_SIZE
; i
++)
696 info
->shadow
[i
].req
.id
= i
+1;
697 info
->shadow_free
= info
->ring
.req_prod_pvt
;
698 info
->shadow
[BLK_RING_SIZE
-1].req
.id
= 0x0fffffff;
700 /* Stage 3: Find pending requests and requeue them. */
701 for (i
= 0; i
< BLK_RING_SIZE
; i
++) {
703 if (copy
[i
].request
== 0)
706 /* Grab a request slot and copy shadow state into it. */
707 req
= RING_GET_REQUEST(&info
->ring
, info
->ring
.req_prod_pvt
);
710 /* We get a new request id, and must reset the shadow state. */
711 req
->id
= get_id_from_freelist(info
);
712 memcpy(&info
->shadow
[req
->id
], ©
[i
], sizeof(copy
[i
]));
714 /* Rewrite any grant references invalidated by susp/resume. */
715 for (j
= 0; j
< req
->nr_segments
; j
++)
716 gnttab_grant_foreign_access_ref(
718 info
->xbdev
->otherend_id
,
719 pfn_to_mfn(info
->shadow
[req
->id
].frame
[j
]),
722 info
->shadow
[req
->id
].request
));
723 info
->shadow
[req
->id
].req
= *req
;
725 info
->ring
.req_prod_pvt
++;
730 xenbus_switch_state(info
->xbdev
, XenbusStateConnected
);
732 spin_lock_irq(&blkif_io_lock
);
734 /* Now safe for us to use the shared ring */
735 info
->connected
= BLKIF_STATE_CONNECTED
;
737 /* Send off requeued requests */
738 flush_requests(info
);
740 /* Kick any other new requests queued since we resumed */
741 kick_pending_request_queues(info
);
743 spin_unlock_irq(&blkif_io_lock
);
749 * We are reconnecting to the backend, due to a suspend/resume, or a backend
750 * driver restart. We tear down our blkif structure and recreate it, but
751 * leave the device-layer structures intact so that this is transparent to the
752 * rest of the kernel.
754 static int blkfront_resume(struct xenbus_device
*dev
)
756 struct blkfront_info
*info
= dev
->dev
.driver_data
;
759 dev_dbg(&dev
->dev
, "blkfront_resume: %s\n", dev
->nodename
);
761 blkif_free(info
, info
->connected
== BLKIF_STATE_CONNECTED
);
763 err
= talk_to_backend(dev
, info
);
764 if (info
->connected
== BLKIF_STATE_SUSPENDED
&& !err
)
765 err
= blkif_recover(info
);
772 * Invoked when the backend is finally 'ready' (and has told produced
773 * the details about the physical device - #sectors, size, etc).
775 static void blkfront_connect(struct blkfront_info
*info
)
777 unsigned long long sectors
;
778 unsigned long sector_size
;
782 if ((info
->connected
== BLKIF_STATE_CONNECTED
) ||
783 (info
->connected
== BLKIF_STATE_SUSPENDED
) )
786 dev_dbg(&info
->xbdev
->dev
, "%s:%s.\n",
787 __func__
, info
->xbdev
->otherend
);
789 err
= xenbus_gather(XBT_NIL
, info
->xbdev
->otherend
,
790 "sectors", "%llu", §ors
,
791 "info", "%u", &binfo
,
792 "sector-size", "%lu", §or_size
,
795 xenbus_dev_fatal(info
->xbdev
, err
,
796 "reading backend fields at %s",
797 info
->xbdev
->otherend
);
801 err
= xenbus_gather(XBT_NIL
, info
->xbdev
->otherend
,
802 "feature-barrier", "%lu", &info
->feature_barrier
,
805 info
->feature_barrier
= 0;
807 err
= xlvbd_alloc_gendisk(BLKIF_MINOR(info
->vdevice
),
808 sectors
, info
->vdevice
,
809 binfo
, sector_size
, info
);
811 xenbus_dev_fatal(info
->xbdev
, err
, "xlvbd_add at %s",
812 info
->xbdev
->otherend
);
816 xenbus_switch_state(info
->xbdev
, XenbusStateConnected
);
818 /* Kick pending requests. */
819 spin_lock_irq(&blkif_io_lock
);
820 info
->connected
= BLKIF_STATE_CONNECTED
;
821 kick_pending_request_queues(info
);
822 spin_unlock_irq(&blkif_io_lock
);
828 * Handle the change of state of the backend to Closing. We must delete our
829 * device-layer structures now, to ensure that writes are flushed through to
830 * the backend. Once is this done, we can switch to Closed in
833 static void blkfront_closing(struct xenbus_device
*dev
)
835 struct blkfront_info
*info
= dev
->dev
.driver_data
;
838 dev_dbg(&dev
->dev
, "blkfront_closing: %s removed\n", dev
->nodename
);
840 if (info
->rq
== NULL
)
843 spin_lock_irqsave(&blkif_io_lock
, flags
);
845 del_gendisk(info
->gd
);
847 /* No more blkif_request(). */
848 blk_stop_queue(info
->rq
);
850 /* No more gnttab callback work. */
851 gnttab_cancel_free_callback(&info
->callback
);
852 spin_unlock_irqrestore(&blkif_io_lock
, flags
);
854 /* Flush gnttab callback work. Must be done with no locks held. */
855 flush_scheduled_work();
857 blk_cleanup_queue(info
->rq
);
861 xenbus_frontend_closed(dev
);
865 * Callback received when the backend's state changes.
867 static void backend_changed(struct xenbus_device
*dev
,
868 enum xenbus_state backend_state
)
870 struct blkfront_info
*info
= dev
->dev
.driver_data
;
871 struct block_device
*bd
;
873 dev_dbg(&dev
->dev
, "blkfront:backend_changed.\n");
875 switch (backend_state
) {
876 case XenbusStateInitialising
:
877 case XenbusStateInitWait
:
878 case XenbusStateInitialised
:
879 case XenbusStateUnknown
:
880 case XenbusStateClosed
:
883 case XenbusStateConnected
:
884 blkfront_connect(info
);
887 case XenbusStateClosing
:
888 bd
= bdget(info
->dev
);
890 xenbus_dev_fatal(dev
, -ENODEV
, "bdget failed");
892 mutex_lock(&bd
->bd_mutex
);
894 xenbus_dev_error(dev
, -EBUSY
,
895 "Device in use; refusing to close");
897 blkfront_closing(dev
);
898 mutex_unlock(&bd
->bd_mutex
);
904 static int blkfront_remove(struct xenbus_device
*dev
)
906 struct blkfront_info
*info
= dev
->dev
.driver_data
;
908 dev_dbg(&dev
->dev
, "blkfront_remove: %s removed\n", dev
->nodename
);
917 static int blkif_open(struct inode
*inode
, struct file
*filep
)
919 struct blkfront_info
*info
= inode
->i_bdev
->bd_disk
->private_data
;
924 static int blkif_release(struct inode
*inode
, struct file
*filep
)
926 struct blkfront_info
*info
= inode
->i_bdev
->bd_disk
->private_data
;
928 if (info
->users
== 0) {
929 /* Check whether we have been instructed to close. We will
930 have ignored this request initially, as the device was
932 struct xenbus_device
*dev
= info
->xbdev
;
933 enum xenbus_state state
= xenbus_read_driver_state(dev
->otherend
);
935 if (state
== XenbusStateClosing
)
936 blkfront_closing(dev
);
941 static struct block_device_operations xlvbd_block_fops
=
943 .owner
= THIS_MODULE
,
945 .release
= blkif_release
,
949 static struct xenbus_device_id blkfront_ids
[] = {
954 static struct xenbus_driver blkfront
= {
956 .owner
= THIS_MODULE
,
958 .probe
= blkfront_probe
,
959 .remove
= blkfront_remove
,
960 .resume
= blkfront_resume
,
961 .otherend_changed
= backend_changed
,
964 static int __init
xlblk_init(void)
966 if (!is_running_on_xen())
969 if (register_blkdev(XENVBD_MAJOR
, DEV_NAME
)) {
970 printk(KERN_WARNING
"xen_blk: can't get major %d with name %s\n",
971 XENVBD_MAJOR
, DEV_NAME
);
975 return xenbus_register_frontend(&blkfront
);
977 module_init(xlblk_init
);
980 static void xlblk_exit(void)
982 return xenbus_unregister_driver(&blkfront
);
984 module_exit(xlblk_exit
);
986 MODULE_DESCRIPTION("Xen virtual block device frontend");
987 MODULE_LICENSE("GPL");
988 MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR
);