4 * XenLinux virtual block device driver.
6 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8 * Copyright (c) 2004, Christian Limpach
9 * Copyright (c) 2004, Andrew Warfield
10 * Copyright (c) 2005, Christopher Clark
11 * Copyright (c) 2005, XenSource Ltd
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
38 #include <linux/interrupt.h>
39 #include <linux/blkdev.h>
40 #include <linux/hdreg.h>
41 #include <linux/cdrom.h>
42 #include <linux/module.h>
43 #include <linux/slab.h>
44 #include <linux/mutex.h>
45 #include <linux/scatterlist.h>
46 #include <linux/bitmap.h>
47 #include <linux/list.h>
50 #include <xen/xenbus.h>
51 #include <xen/grant_table.h>
52 #include <xen/events.h>
54 #include <xen/platform_pci.h>
56 #include <xen/interface/grant_table.h>
57 #include <xen/interface/io/blkif.h>
58 #include <xen/interface/io/protocols.h>
60 #include <asm/xen/hypervisor.h>
63 BLKIF_STATE_DISCONNECTED
,
64 BLKIF_STATE_CONNECTED
,
65 BLKIF_STATE_SUSPENDED
,
71 struct list_head node
;
75 struct blkif_request req
;
76 struct request
*request
;
77 struct grant
**grants_used
;
78 struct grant
**indirect_grants
;
79 struct scatterlist
*sg
;
88 static DEFINE_MUTEX(blkfront_mutex
);
89 static const struct block_device_operations xlvbd_block_fops
;
92 * Maximum number of segments in indirect requests, the actual value used by
93 * the frontend driver is the minimum of this value and the value provided
94 * by the backend driver.
97 static unsigned int xen_blkif_max_segments
= 32;
98 module_param_named(max
, xen_blkif_max_segments
, int, S_IRUGO
);
99 MODULE_PARM_DESC(max
, "Maximum amount of segments in indirect requests (default is 32)");
101 #define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE)
104 * We have one of these per vbd, whether ide, scsi or 'other'. They
105 * hang in private_data off the gendisk structure. We may end up
106 * putting all kinds of interesting stuff here :-)
112 struct xenbus_device
*xbdev
;
116 enum blkif_state connected
;
118 struct blkif_front_ring ring
;
119 unsigned int evtchn
, irq
;
120 struct request_queue
*rq
;
121 struct work_struct work
;
122 struct gnttab_free_callback callback
;
123 struct blk_shadow shadow
[BLK_RING_SIZE
];
124 struct list_head grants
;
125 struct list_head indirect_pages
;
126 unsigned int persistent_gnts_c
;
127 unsigned long shadow_free
;
128 unsigned int feature_flush
;
129 unsigned int feature_discard
:1;
130 unsigned int feature_secdiscard
:1;
131 unsigned int discard_granularity
;
132 unsigned int discard_alignment
;
133 unsigned int feature_persistent
:1;
134 unsigned int max_indirect_segments
;
138 static unsigned int nr_minors
;
139 static unsigned long *minors
;
140 static DEFINE_SPINLOCK(minor_lock
);
142 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \
143 (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
144 #define GRANT_INVALID_REF 0
146 #define PARTS_PER_DISK 16
147 #define PARTS_PER_EXT_DISK 256
149 #define BLKIF_MAJOR(dev) ((dev)>>8)
150 #define BLKIF_MINOR(dev) ((dev) & 0xff)
153 #define EXTENDED (1<<EXT_SHIFT)
154 #define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
155 #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
156 #define EMULATED_HD_DISK_MINOR_OFFSET (0)
157 #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
158 #define EMULATED_SD_DISK_MINOR_OFFSET (0)
159 #define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
161 #define DEV_NAME "xvd" /* name in /dev */
163 #define SEGS_PER_INDIRECT_FRAME \
164 (PAGE_SIZE/sizeof(struct blkif_request_segment))
165 #define INDIRECT_GREFS(_segs) \
166 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
168 static int blkfront_setup_indirect(struct blkfront_info
*info
);
170 static int get_id_from_freelist(struct blkfront_info
*info
)
172 unsigned long free
= info
->shadow_free
;
173 BUG_ON(free
>= BLK_RING_SIZE
);
174 info
->shadow_free
= info
->shadow
[free
].req
.u
.rw
.id
;
175 info
->shadow
[free
].req
.u
.rw
.id
= 0x0fffffee; /* debug */
179 static int add_id_to_freelist(struct blkfront_info
*info
,
182 if (info
->shadow
[id
].req
.u
.rw
.id
!= id
)
184 if (info
->shadow
[id
].request
== NULL
)
186 info
->shadow
[id
].req
.u
.rw
.id
= info
->shadow_free
;
187 info
->shadow
[id
].request
= NULL
;
188 info
->shadow_free
= id
;
192 static int fill_grant_buffer(struct blkfront_info
*info
, int num
)
194 struct page
*granted_page
;
195 struct grant
*gnt_list_entry
, *n
;
199 gnt_list_entry
= kzalloc(sizeof(struct grant
), GFP_NOIO
);
203 if (info
->feature_persistent
) {
204 granted_page
= alloc_page(GFP_NOIO
);
206 kfree(gnt_list_entry
);
209 gnt_list_entry
->pfn
= page_to_pfn(granted_page
);
212 gnt_list_entry
->gref
= GRANT_INVALID_REF
;
213 list_add(&gnt_list_entry
->node
, &info
->grants
);
220 list_for_each_entry_safe(gnt_list_entry
, n
,
221 &info
->grants
, node
) {
222 list_del(&gnt_list_entry
->node
);
223 if (info
->feature_persistent
)
224 __free_page(pfn_to_page(gnt_list_entry
->pfn
));
225 kfree(gnt_list_entry
);
232 static struct grant
*get_grant(grant_ref_t
*gref_head
,
234 struct blkfront_info
*info
)
236 struct grant
*gnt_list_entry
;
237 unsigned long buffer_mfn
;
239 BUG_ON(list_empty(&info
->grants
));
240 gnt_list_entry
= list_first_entry(&info
->grants
, struct grant
,
242 list_del(&gnt_list_entry
->node
);
244 if (gnt_list_entry
->gref
!= GRANT_INVALID_REF
) {
245 info
->persistent_gnts_c
--;
246 return gnt_list_entry
;
249 /* Assign a gref to this page */
250 gnt_list_entry
->gref
= gnttab_claim_grant_reference(gref_head
);
251 BUG_ON(gnt_list_entry
->gref
== -ENOSPC
);
252 if (!info
->feature_persistent
) {
254 gnt_list_entry
->pfn
= pfn
;
256 buffer_mfn
= pfn_to_mfn(gnt_list_entry
->pfn
);
257 gnttab_grant_foreign_access_ref(gnt_list_entry
->gref
,
258 info
->xbdev
->otherend_id
,
260 return gnt_list_entry
;
263 static const char *op_name(int op
)
265 static const char *const names
[] = {
266 [BLKIF_OP_READ
] = "read",
267 [BLKIF_OP_WRITE
] = "write",
268 [BLKIF_OP_WRITE_BARRIER
] = "barrier",
269 [BLKIF_OP_FLUSH_DISKCACHE
] = "flush",
270 [BLKIF_OP_DISCARD
] = "discard" };
272 if (op
< 0 || op
>= ARRAY_SIZE(names
))
280 static int xlbd_reserve_minors(unsigned int minor
, unsigned int nr
)
282 unsigned int end
= minor
+ nr
;
285 if (end
> nr_minors
) {
286 unsigned long *bitmap
, *old
;
288 bitmap
= kcalloc(BITS_TO_LONGS(end
), sizeof(*bitmap
),
293 spin_lock(&minor_lock
);
294 if (end
> nr_minors
) {
296 memcpy(bitmap
, minors
,
297 BITS_TO_LONGS(nr_minors
) * sizeof(*bitmap
));
299 nr_minors
= BITS_TO_LONGS(end
) * BITS_PER_LONG
;
302 spin_unlock(&minor_lock
);
306 spin_lock(&minor_lock
);
307 if (find_next_bit(minors
, end
, minor
) >= end
) {
308 bitmap_set(minors
, minor
, nr
);
312 spin_unlock(&minor_lock
);
317 static void xlbd_release_minors(unsigned int minor
, unsigned int nr
)
319 unsigned int end
= minor
+ nr
;
321 BUG_ON(end
> nr_minors
);
322 spin_lock(&minor_lock
);
323 bitmap_clear(minors
, minor
, nr
);
324 spin_unlock(&minor_lock
);
327 static void blkif_restart_queue_callback(void *arg
)
329 struct blkfront_info
*info
= (struct blkfront_info
*)arg
;
330 schedule_work(&info
->work
);
333 static int blkif_getgeo(struct block_device
*bd
, struct hd_geometry
*hg
)
335 /* We don't have real geometry info, but let's at least return
336 values consistent with the size of the device */
337 sector_t nsect
= get_capacity(bd
->bd_disk
);
338 sector_t cylinders
= nsect
;
342 sector_div(cylinders
, hg
->heads
* hg
->sectors
);
343 hg
->cylinders
= cylinders
;
344 if ((sector_t
)(hg
->cylinders
+ 1) * hg
->heads
* hg
->sectors
< nsect
)
345 hg
->cylinders
= 0xffff;
349 static int blkif_ioctl(struct block_device
*bdev
, fmode_t mode
,
350 unsigned command
, unsigned long argument
)
352 struct blkfront_info
*info
= bdev
->bd_disk
->private_data
;
355 dev_dbg(&info
->xbdev
->dev
, "command: 0x%x, argument: 0x%lx\n",
356 command
, (long)argument
);
359 case CDROMMULTISESSION
:
360 dev_dbg(&info
->xbdev
->dev
, "FIXME: support multisession CDs later\n");
361 for (i
= 0; i
< sizeof(struct cdrom_multisession
); i
++)
362 if (put_user(0, (char __user
*)(argument
+ i
)))
366 case CDROM_GET_CAPABILITY
: {
367 struct gendisk
*gd
= info
->gd
;
368 if (gd
->flags
& GENHD_FL_CD
)
374 /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
376 return -EINVAL
; /* same return as native Linux */
383 * Generate a Xen blkfront IO request from a blk layer request. Reads
384 * and writes are handled as expected.
386 * @req: a request struct
388 static int blkif_queue_request(struct request
*req
)
390 struct blkfront_info
*info
= req
->rq_disk
->private_data
;
391 struct blkif_request
*ring_req
;
393 unsigned int fsect
, lsect
;
395 struct blkif_request_segment
*segments
= NULL
;
398 * Used to store if we are able to queue the request by just using
399 * existing persistent grants, or if we have to get new grants,
400 * as there are not sufficiently many free.
402 bool new_persistent_gnts
;
403 grant_ref_t gref_head
;
404 struct grant
*gnt_list_entry
= NULL
;
405 struct scatterlist
*sg
;
408 if (unlikely(info
->connected
!= BLKIF_STATE_CONNECTED
))
411 max_grefs
= req
->nr_phys_segments
;
412 if (max_grefs
> BLKIF_MAX_SEGMENTS_PER_REQUEST
)
414 * If we are using indirect segments we need to account
415 * for the indirect grefs used in the request.
417 max_grefs
+= INDIRECT_GREFS(req
->nr_phys_segments
);
419 /* Check if we have enough grants to allocate a requests */
420 if (info
->persistent_gnts_c
< max_grefs
) {
421 new_persistent_gnts
= 1;
422 if (gnttab_alloc_grant_references(
423 max_grefs
- info
->persistent_gnts_c
,
425 gnttab_request_free_callback(
427 blkif_restart_queue_callback
,
433 new_persistent_gnts
= 0;
435 /* Fill out a communications ring structure. */
436 ring_req
= RING_GET_REQUEST(&info
->ring
, info
->ring
.req_prod_pvt
);
437 id
= get_id_from_freelist(info
);
438 info
->shadow
[id
].request
= req
;
440 if (unlikely(req
->cmd_flags
& (REQ_DISCARD
| REQ_SECURE
))) {
441 ring_req
->operation
= BLKIF_OP_DISCARD
;
442 ring_req
->u
.discard
.nr_sectors
= blk_rq_sectors(req
);
443 ring_req
->u
.discard
.id
= id
;
444 ring_req
->u
.discard
.sector_number
= (blkif_sector_t
)blk_rq_pos(req
);
445 if ((req
->cmd_flags
& REQ_SECURE
) && info
->feature_secdiscard
)
446 ring_req
->u
.discard
.flag
= BLKIF_DISCARD_SECURE
;
448 ring_req
->u
.discard
.flag
= 0;
450 BUG_ON(info
->max_indirect_segments
== 0 &&
451 req
->nr_phys_segments
> BLKIF_MAX_SEGMENTS_PER_REQUEST
);
452 BUG_ON(info
->max_indirect_segments
&&
453 req
->nr_phys_segments
> info
->max_indirect_segments
);
454 nseg
= blk_rq_map_sg(req
->q
, req
, info
->shadow
[id
].sg
);
455 ring_req
->u
.rw
.id
= id
;
456 if (nseg
> BLKIF_MAX_SEGMENTS_PER_REQUEST
) {
458 * The indirect operation can only be a BLKIF_OP_READ or
461 BUG_ON(req
->cmd_flags
& (REQ_FLUSH
| REQ_FUA
));
462 ring_req
->operation
= BLKIF_OP_INDIRECT
;
463 ring_req
->u
.indirect
.indirect_op
= rq_data_dir(req
) ?
464 BLKIF_OP_WRITE
: BLKIF_OP_READ
;
465 ring_req
->u
.indirect
.sector_number
= (blkif_sector_t
)blk_rq_pos(req
);
466 ring_req
->u
.indirect
.handle
= info
->handle
;
467 ring_req
->u
.indirect
.nr_segments
= nseg
;
469 ring_req
->u
.rw
.sector_number
= (blkif_sector_t
)blk_rq_pos(req
);
470 ring_req
->u
.rw
.handle
= info
->handle
;
471 ring_req
->operation
= rq_data_dir(req
) ?
472 BLKIF_OP_WRITE
: BLKIF_OP_READ
;
473 if (req
->cmd_flags
& (REQ_FLUSH
| REQ_FUA
)) {
475 * Ideally we can do an unordered flush-to-disk. In case the
476 * backend onlysupports barriers, use that. A barrier request
477 * a superset of FUA, so we can implement it the same
478 * way. (It's also a FLUSH+FUA, since it is
479 * guaranteed ordered WRT previous writes.)
481 switch (info
->feature_flush
&
482 ((REQ_FLUSH
|REQ_FUA
))) {
483 case REQ_FLUSH
|REQ_FUA
:
484 ring_req
->operation
=
485 BLKIF_OP_WRITE_BARRIER
;
488 ring_req
->operation
=
489 BLKIF_OP_FLUSH_DISKCACHE
;
492 ring_req
->operation
= 0;
495 ring_req
->u
.rw
.nr_segments
= nseg
;
497 for_each_sg(info
->shadow
[id
].sg
, sg
, nseg
, i
) {
498 fsect
= sg
->offset
>> 9;
499 lsect
= fsect
+ (sg
->length
>> 9) - 1;
501 if ((ring_req
->operation
== BLKIF_OP_INDIRECT
) &&
502 (i
% SEGS_PER_INDIRECT_FRAME
== 0)) {
503 unsigned long uninitialized_var(pfn
);
506 kunmap_atomic(segments
);
508 n
= i
/ SEGS_PER_INDIRECT_FRAME
;
509 if (!info
->feature_persistent
) {
510 struct page
*indirect_page
;
512 /* Fetch a pre-allocated page to use for indirect grefs */
513 BUG_ON(list_empty(&info
->indirect_pages
));
514 indirect_page
= list_first_entry(&info
->indirect_pages
,
516 list_del(&indirect_page
->lru
);
517 pfn
= page_to_pfn(indirect_page
);
519 gnt_list_entry
= get_grant(&gref_head
, pfn
, info
);
520 info
->shadow
[id
].indirect_grants
[n
] = gnt_list_entry
;
521 segments
= kmap_atomic(pfn_to_page(gnt_list_entry
->pfn
));
522 ring_req
->u
.indirect
.indirect_grefs
[n
] = gnt_list_entry
->gref
;
525 gnt_list_entry
= get_grant(&gref_head
, page_to_pfn(sg_page(sg
)), info
);
526 ref
= gnt_list_entry
->gref
;
528 info
->shadow
[id
].grants_used
[i
] = gnt_list_entry
;
530 if (rq_data_dir(req
) && info
->feature_persistent
) {
534 BUG_ON(sg
->offset
+ sg
->length
> PAGE_SIZE
);
536 shared_data
= kmap_atomic(pfn_to_page(gnt_list_entry
->pfn
));
537 bvec_data
= kmap_atomic(sg_page(sg
));
540 * this does not wipe data stored outside the
541 * range sg->offset..sg->offset+sg->length.
542 * Therefore, blkback *could* see data from
543 * previous requests. This is OK as long as
544 * persistent grants are shared with just one
545 * domain. It may need refactoring if this
548 memcpy(shared_data
+ sg
->offset
,
549 bvec_data
+ sg
->offset
,
552 kunmap_atomic(bvec_data
);
553 kunmap_atomic(shared_data
);
555 if (ring_req
->operation
!= BLKIF_OP_INDIRECT
) {
556 ring_req
->u
.rw
.seg
[i
] =
557 (struct blkif_request_segment
) {
560 .last_sect
= lsect
};
562 n
= i
% SEGS_PER_INDIRECT_FRAME
;
564 (struct blkif_request_segment
) {
567 .last_sect
= lsect
};
571 kunmap_atomic(segments
);
574 info
->ring
.req_prod_pvt
++;
576 /* Keep a private copy so we can reissue requests when recovering. */
577 info
->shadow
[id
].req
= *ring_req
;
579 if (new_persistent_gnts
)
580 gnttab_free_grant_references(gref_head
);
586 static inline void flush_requests(struct blkfront_info
*info
)
590 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info
->ring
, notify
);
593 notify_remote_via_irq(info
->irq
);
596 static inline bool blkif_request_flush_invalid(struct request
*req
,
597 struct blkfront_info
*info
)
599 return ((req
->cmd_type
!= REQ_TYPE_FS
) ||
600 ((req
->cmd_flags
& REQ_FLUSH
) &&
601 !(info
->feature_flush
& REQ_FLUSH
)) ||
602 ((req
->cmd_flags
& REQ_FUA
) &&
603 !(info
->feature_flush
& REQ_FUA
)));
608 * read a block; request is in a request queue
610 static void do_blkif_request(struct request_queue
*rq
)
612 struct blkfront_info
*info
= NULL
;
616 pr_debug("Entered do_blkif_request\n");
620 while ((req
= blk_peek_request(rq
)) != NULL
) {
621 info
= req
->rq_disk
->private_data
;
623 if (RING_FULL(&info
->ring
))
626 blk_start_request(req
);
628 if (blkif_request_flush_invalid(req
, info
)) {
629 __blk_end_request_all(req
, -EOPNOTSUPP
);
633 pr_debug("do_blk_req %p: cmd %p, sec %lx, "
635 req
, req
->cmd
, (unsigned long)blk_rq_pos(req
),
636 blk_rq_cur_sectors(req
), blk_rq_sectors(req
),
637 rq_data_dir(req
) ? "write" : "read");
639 if (blkif_queue_request(req
)) {
640 blk_requeue_request(rq
, req
);
642 /* Avoid pointless unplugs. */
651 flush_requests(info
);
654 static int xlvbd_init_blk_queue(struct gendisk
*gd
, u16 sector_size
,
655 unsigned int physical_sector_size
,
656 unsigned int segments
)
658 struct request_queue
*rq
;
659 struct blkfront_info
*info
= gd
->private_data
;
661 rq
= blk_init_queue(do_blkif_request
, &info
->io_lock
);
665 queue_flag_set_unlocked(QUEUE_FLAG_VIRT
, rq
);
667 if (info
->feature_discard
) {
668 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, rq
);
669 blk_queue_max_discard_sectors(rq
, get_capacity(gd
));
670 rq
->limits
.discard_granularity
= info
->discard_granularity
;
671 rq
->limits
.discard_alignment
= info
->discard_alignment
;
672 if (info
->feature_secdiscard
)
673 queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD
, rq
);
676 /* Hard sector size and max sectors impersonate the equiv. hardware. */
677 blk_queue_logical_block_size(rq
, sector_size
);
678 blk_queue_physical_block_size(rq
, physical_sector_size
);
679 blk_queue_max_hw_sectors(rq
, (segments
* PAGE_SIZE
) / 512);
681 /* Each segment in a request is up to an aligned page in size. */
682 blk_queue_segment_boundary(rq
, PAGE_SIZE
- 1);
683 blk_queue_max_segment_size(rq
, PAGE_SIZE
);
685 /* Ensure a merged request will fit in a single I/O ring slot. */
686 blk_queue_max_segments(rq
, segments
);
688 /* Make sure buffer addresses are sector-aligned. */
689 blk_queue_dma_alignment(rq
, 511);
691 /* Make sure we don't use bounce buffers. */
692 blk_queue_bounce_limit(rq
, BLK_BOUNCE_ANY
);
699 static const char *flush_info(unsigned int feature_flush
)
701 switch (feature_flush
& ((REQ_FLUSH
| REQ_FUA
))) {
702 case REQ_FLUSH
|REQ_FUA
:
703 return "barrier: enabled;";
705 return "flush diskcache: enabled;";
707 return "barrier or flush: disabled;";
711 static void xlvbd_flush(struct blkfront_info
*info
)
713 blk_queue_flush(info
->rq
, info
->feature_flush
);
714 pr_info("blkfront: %s: %s %s %s %s %s\n",
715 info
->gd
->disk_name
, flush_info(info
->feature_flush
),
716 "persistent grants:", info
->feature_persistent
?
717 "enabled;" : "disabled;", "indirect descriptors:",
718 info
->max_indirect_segments
? "enabled;" : "disabled;");
721 static int xen_translate_vdev(int vdevice
, int *minor
, unsigned int *offset
)
724 major
= BLKIF_MAJOR(vdevice
);
725 *minor
= BLKIF_MINOR(vdevice
);
728 *offset
= (*minor
/ 64) + EMULATED_HD_DISK_NAME_OFFSET
;
729 *minor
= ((*minor
/ 64) * PARTS_PER_DISK
) +
730 EMULATED_HD_DISK_MINOR_OFFSET
;
733 *offset
= (*minor
/ 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET
;
734 *minor
= (((*minor
/ 64) + 2) * PARTS_PER_DISK
) +
735 EMULATED_HD_DISK_MINOR_OFFSET
;
737 case XEN_SCSI_DISK0_MAJOR
:
738 *offset
= (*minor
/ PARTS_PER_DISK
) + EMULATED_SD_DISK_NAME_OFFSET
;
739 *minor
= *minor
+ EMULATED_SD_DISK_MINOR_OFFSET
;
741 case XEN_SCSI_DISK1_MAJOR
:
742 case XEN_SCSI_DISK2_MAJOR
:
743 case XEN_SCSI_DISK3_MAJOR
:
744 case XEN_SCSI_DISK4_MAJOR
:
745 case XEN_SCSI_DISK5_MAJOR
:
746 case XEN_SCSI_DISK6_MAJOR
:
747 case XEN_SCSI_DISK7_MAJOR
:
748 *offset
= (*minor
/ PARTS_PER_DISK
) +
749 ((major
- XEN_SCSI_DISK1_MAJOR
+ 1) * 16) +
750 EMULATED_SD_DISK_NAME_OFFSET
;
752 ((major
- XEN_SCSI_DISK1_MAJOR
+ 1) * 16 * PARTS_PER_DISK
) +
753 EMULATED_SD_DISK_MINOR_OFFSET
;
755 case XEN_SCSI_DISK8_MAJOR
:
756 case XEN_SCSI_DISK9_MAJOR
:
757 case XEN_SCSI_DISK10_MAJOR
:
758 case XEN_SCSI_DISK11_MAJOR
:
759 case XEN_SCSI_DISK12_MAJOR
:
760 case XEN_SCSI_DISK13_MAJOR
:
761 case XEN_SCSI_DISK14_MAJOR
:
762 case XEN_SCSI_DISK15_MAJOR
:
763 *offset
= (*minor
/ PARTS_PER_DISK
) +
764 ((major
- XEN_SCSI_DISK8_MAJOR
+ 8) * 16) +
765 EMULATED_SD_DISK_NAME_OFFSET
;
767 ((major
- XEN_SCSI_DISK8_MAJOR
+ 8) * 16 * PARTS_PER_DISK
) +
768 EMULATED_SD_DISK_MINOR_OFFSET
;
771 *offset
= *minor
/ PARTS_PER_DISK
;
774 printk(KERN_WARNING
"blkfront: your disk configuration is "
775 "incorrect, please use an xvd device instead\n");
781 static char *encode_disk_name(char *ptr
, unsigned int n
)
784 ptr
= encode_disk_name(ptr
, n
/ 26 - 1);
789 static int xlvbd_alloc_gendisk(blkif_sector_t capacity
,
790 struct blkfront_info
*info
,
791 u16 vdisk_info
, u16 sector_size
,
792 unsigned int physical_sector_size
)
802 BUG_ON(info
->gd
!= NULL
);
803 BUG_ON(info
->rq
!= NULL
);
805 if ((info
->vdevice
>>EXT_SHIFT
) > 1) {
806 /* this is above the extended range; something is wrong */
807 printk(KERN_WARNING
"blkfront: vdevice 0x%x is above the extended range; ignoring\n", info
->vdevice
);
811 if (!VDEV_IS_EXTENDED(info
->vdevice
)) {
812 err
= xen_translate_vdev(info
->vdevice
, &minor
, &offset
);
815 nr_parts
= PARTS_PER_DISK
;
817 minor
= BLKIF_MINOR_EXT(info
->vdevice
);
818 nr_parts
= PARTS_PER_EXT_DISK
;
819 offset
= minor
/ nr_parts
;
820 if (xen_hvm_domain() && offset
< EMULATED_HD_DISK_NAME_OFFSET
+ 4)
821 printk(KERN_WARNING
"blkfront: vdevice 0x%x might conflict with "
822 "emulated IDE disks,\n\t choose an xvd device name"
823 "from xvde on\n", info
->vdevice
);
825 if (minor
>> MINORBITS
) {
826 pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
827 info
->vdevice
, minor
);
831 if ((minor
% nr_parts
) == 0)
832 nr_minors
= nr_parts
;
834 err
= xlbd_reserve_minors(minor
, nr_minors
);
839 gd
= alloc_disk(nr_minors
);
843 strcpy(gd
->disk_name
, DEV_NAME
);
844 ptr
= encode_disk_name(gd
->disk_name
+ sizeof(DEV_NAME
) - 1, offset
);
845 BUG_ON(ptr
>= gd
->disk_name
+ DISK_NAME_LEN
);
849 snprintf(ptr
, gd
->disk_name
+ DISK_NAME_LEN
- ptr
,
850 "%d", minor
& (nr_parts
- 1));
852 gd
->major
= XENVBD_MAJOR
;
853 gd
->first_minor
= minor
;
854 gd
->fops
= &xlvbd_block_fops
;
855 gd
->private_data
= info
;
856 gd
->driverfs_dev
= &(info
->xbdev
->dev
);
857 set_capacity(gd
, capacity
);
859 if (xlvbd_init_blk_queue(gd
, sector_size
, physical_sector_size
,
860 info
->max_indirect_segments
? :
861 BLKIF_MAX_SEGMENTS_PER_REQUEST
)) {
866 info
->rq
= gd
->queue
;
871 if (vdisk_info
& VDISK_READONLY
)
874 if (vdisk_info
& VDISK_REMOVABLE
)
875 gd
->flags
|= GENHD_FL_REMOVABLE
;
877 if (vdisk_info
& VDISK_CDROM
)
878 gd
->flags
|= GENHD_FL_CD
;
883 xlbd_release_minors(minor
, nr_minors
);
888 static void xlvbd_release_gendisk(struct blkfront_info
*info
)
890 unsigned int minor
, nr_minors
;
893 if (info
->rq
== NULL
)
896 spin_lock_irqsave(&info
->io_lock
, flags
);
898 /* No more blkif_request(). */
899 blk_stop_queue(info
->rq
);
901 /* No more gnttab callback work. */
902 gnttab_cancel_free_callback(&info
->callback
);
903 spin_unlock_irqrestore(&info
->io_lock
, flags
);
905 /* Flush gnttab callback work. Must be done with no locks held. */
906 flush_work(&info
->work
);
908 del_gendisk(info
->gd
);
910 minor
= info
->gd
->first_minor
;
911 nr_minors
= info
->gd
->minors
;
912 xlbd_release_minors(minor
, nr_minors
);
914 blk_cleanup_queue(info
->rq
);
921 static void kick_pending_request_queues(struct blkfront_info
*info
)
923 if (!RING_FULL(&info
->ring
)) {
924 /* Re-enable calldowns. */
925 blk_start_queue(info
->rq
);
926 /* Kick things off immediately. */
927 do_blkif_request(info
->rq
);
931 static void blkif_restart_queue(struct work_struct
*work
)
933 struct blkfront_info
*info
= container_of(work
, struct blkfront_info
, work
);
935 spin_lock_irq(&info
->io_lock
);
936 if (info
->connected
== BLKIF_STATE_CONNECTED
)
937 kick_pending_request_queues(info
);
938 spin_unlock_irq(&info
->io_lock
);
941 static void blkif_free(struct blkfront_info
*info
, int suspend
)
943 struct grant
*persistent_gnt
;
947 /* Prevent new requests being issued until we fix things up. */
948 spin_lock_irq(&info
->io_lock
);
949 info
->connected
= suspend
?
950 BLKIF_STATE_SUSPENDED
: BLKIF_STATE_DISCONNECTED
;
951 /* No more blkif_request(). */
953 blk_stop_queue(info
->rq
);
955 /* Remove all persistent grants */
956 if (!list_empty(&info
->grants
)) {
957 list_for_each_entry_safe(persistent_gnt
, n
,
958 &info
->grants
, node
) {
959 list_del(&persistent_gnt
->node
);
960 if (persistent_gnt
->gref
!= GRANT_INVALID_REF
) {
961 gnttab_end_foreign_access(persistent_gnt
->gref
,
963 info
->persistent_gnts_c
--;
965 if (info
->feature_persistent
)
966 __free_page(pfn_to_page(persistent_gnt
->pfn
));
967 kfree(persistent_gnt
);
970 BUG_ON(info
->persistent_gnts_c
!= 0);
973 * Remove indirect pages, this only happens when using indirect
974 * descriptors but not persistent grants
976 if (!list_empty(&info
->indirect_pages
)) {
977 struct page
*indirect_page
, *n
;
979 BUG_ON(info
->feature_persistent
);
980 list_for_each_entry_safe(indirect_page
, n
, &info
->indirect_pages
, lru
) {
981 list_del(&indirect_page
->lru
);
982 __free_page(indirect_page
);
986 for (i
= 0; i
< BLK_RING_SIZE
; i
++) {
988 * Clear persistent grants present in requests already
991 if (!info
->shadow
[i
].request
)
994 segs
= info
->shadow
[i
].req
.operation
== BLKIF_OP_INDIRECT
?
995 info
->shadow
[i
].req
.u
.indirect
.nr_segments
:
996 info
->shadow
[i
].req
.u
.rw
.nr_segments
;
997 for (j
= 0; j
< segs
; j
++) {
998 persistent_gnt
= info
->shadow
[i
].grants_used
[j
];
999 gnttab_end_foreign_access(persistent_gnt
->gref
, 0, 0UL);
1000 if (info
->feature_persistent
)
1001 __free_page(pfn_to_page(persistent_gnt
->pfn
));
1002 kfree(persistent_gnt
);
1005 if (info
->shadow
[i
].req
.operation
!= BLKIF_OP_INDIRECT
)
1007 * If this is not an indirect operation don't try to
1008 * free indirect segments
1012 for (j
= 0; j
< INDIRECT_GREFS(segs
); j
++) {
1013 persistent_gnt
= info
->shadow
[i
].indirect_grants
[j
];
1014 gnttab_end_foreign_access(persistent_gnt
->gref
, 0, 0UL);
1015 __free_page(pfn_to_page(persistent_gnt
->pfn
));
1016 kfree(persistent_gnt
);
1020 kfree(info
->shadow
[i
].grants_used
);
1021 info
->shadow
[i
].grants_used
= NULL
;
1022 kfree(info
->shadow
[i
].indirect_grants
);
1023 info
->shadow
[i
].indirect_grants
= NULL
;
1024 kfree(info
->shadow
[i
].sg
);
1025 info
->shadow
[i
].sg
= NULL
;
1028 /* No more gnttab callback work. */
1029 gnttab_cancel_free_callback(&info
->callback
);
1030 spin_unlock_irq(&info
->io_lock
);
1032 /* Flush gnttab callback work. Must be done with no locks held. */
1033 flush_work(&info
->work
);
1035 /* Free resources associated with old device channel. */
1036 if (info
->ring_ref
!= GRANT_INVALID_REF
) {
1037 gnttab_end_foreign_access(info
->ring_ref
, 0,
1038 (unsigned long)info
->ring
.sring
);
1039 info
->ring_ref
= GRANT_INVALID_REF
;
1040 info
->ring
.sring
= NULL
;
1043 unbind_from_irqhandler(info
->irq
, info
);
1044 info
->evtchn
= info
->irq
= 0;
1048 static void blkif_completion(struct blk_shadow
*s
, struct blkfront_info
*info
,
1049 struct blkif_response
*bret
)
1052 struct scatterlist
*sg
;
1057 nseg
= s
->req
.operation
== BLKIF_OP_INDIRECT
?
1058 s
->req
.u
.indirect
.nr_segments
: s
->req
.u
.rw
.nr_segments
;
1060 if (bret
->operation
== BLKIF_OP_READ
&& info
->feature_persistent
) {
1062 * Copy the data received from the backend into the bvec.
1063 * Since bv_offset can be different than 0, and bv_len different
1064 * than PAGE_SIZE, we have to keep track of the current offset,
1065 * to be sure we are copying the data from the right shared page.
1067 for_each_sg(s
->sg
, sg
, nseg
, i
) {
1068 BUG_ON(sg
->offset
+ sg
->length
> PAGE_SIZE
);
1069 shared_data
= kmap_atomic(
1070 pfn_to_page(s
->grants_used
[i
]->pfn
));
1071 bvec_data
= kmap_atomic(sg_page(sg
));
1072 memcpy(bvec_data
+ sg
->offset
,
1073 shared_data
+ sg
->offset
,
1075 kunmap_atomic(bvec_data
);
1076 kunmap_atomic(shared_data
);
1079 /* Add the persistent grant into the list of free grants */
1080 for (i
= 0; i
< nseg
; i
++) {
1081 if (gnttab_query_foreign_access(s
->grants_used
[i
]->gref
)) {
1083 * If the grant is still mapped by the backend (the
1084 * backend has chosen to make this grant persistent)
1085 * we add it at the head of the list, so it will be
1088 if (!info
->feature_persistent
)
1089 pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1090 s
->grants_used
[i
]->gref
);
1091 list_add(&s
->grants_used
[i
]->node
, &info
->grants
);
1092 info
->persistent_gnts_c
++;
1095 * If the grant is not mapped by the backend we end the
1096 * foreign access and add it to the tail of the list,
1097 * so it will not be picked again unless we run out of
1098 * persistent grants.
1100 gnttab_end_foreign_access(s
->grants_used
[i
]->gref
, 0, 0UL);
1101 s
->grants_used
[i
]->gref
= GRANT_INVALID_REF
;
1102 list_add_tail(&s
->grants_used
[i
]->node
, &info
->grants
);
1105 if (s
->req
.operation
== BLKIF_OP_INDIRECT
) {
1106 for (i
= 0; i
< INDIRECT_GREFS(nseg
); i
++) {
1107 if (gnttab_query_foreign_access(s
->indirect_grants
[i
]->gref
)) {
1108 if (!info
->feature_persistent
)
1109 pr_alert_ratelimited("backed has not unmapped grant: %u\n",
1110 s
->indirect_grants
[i
]->gref
);
1111 list_add(&s
->indirect_grants
[i
]->node
, &info
->grants
);
1112 info
->persistent_gnts_c
++;
1114 struct page
*indirect_page
;
1116 gnttab_end_foreign_access(s
->indirect_grants
[i
]->gref
, 0, 0UL);
1118 * Add the used indirect page back to the list of
1119 * available pages for indirect grefs.
1121 indirect_page
= pfn_to_page(s
->indirect_grants
[i
]->pfn
);
1122 list_add(&indirect_page
->lru
, &info
->indirect_pages
);
1123 s
->indirect_grants
[i
]->gref
= GRANT_INVALID_REF
;
1124 list_add_tail(&s
->indirect_grants
[i
]->node
, &info
->grants
);
1130 static irqreturn_t
blkif_interrupt(int irq
, void *dev_id
)
1132 struct request
*req
;
1133 struct blkif_response
*bret
;
1135 unsigned long flags
;
1136 struct blkfront_info
*info
= (struct blkfront_info
*)dev_id
;
1139 spin_lock_irqsave(&info
->io_lock
, flags
);
1141 if (unlikely(info
->connected
!= BLKIF_STATE_CONNECTED
)) {
1142 spin_unlock_irqrestore(&info
->io_lock
, flags
);
1147 rp
= info
->ring
.sring
->rsp_prod
;
1148 rmb(); /* Ensure we see queued responses up to 'rp'. */
1150 for (i
= info
->ring
.rsp_cons
; i
!= rp
; i
++) {
1153 bret
= RING_GET_RESPONSE(&info
->ring
, i
);
1156 * The backend has messed up and given us an id that we would
1157 * never have given to it (we stamp it up to BLK_RING_SIZE -
1158 * look in get_id_from_freelist.
1160 if (id
>= BLK_RING_SIZE
) {
1161 WARN(1, "%s: response to %s has incorrect id (%ld)\n",
1162 info
->gd
->disk_name
, op_name(bret
->operation
), id
);
1163 /* We can't safely get the 'struct request' as
1164 * the id is busted. */
1167 req
= info
->shadow
[id
].request
;
1169 if (bret
->operation
!= BLKIF_OP_DISCARD
)
1170 blkif_completion(&info
->shadow
[id
], info
, bret
);
1172 if (add_id_to_freelist(info
, id
)) {
1173 WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
1174 info
->gd
->disk_name
, op_name(bret
->operation
), id
);
1178 error
= (bret
->status
== BLKIF_RSP_OKAY
) ? 0 : -EIO
;
1179 switch (bret
->operation
) {
1180 case BLKIF_OP_DISCARD
:
1181 if (unlikely(bret
->status
== BLKIF_RSP_EOPNOTSUPP
)) {
1182 struct request_queue
*rq
= info
->rq
;
1183 printk(KERN_WARNING
"blkfront: %s: %s op failed\n",
1184 info
->gd
->disk_name
, op_name(bret
->operation
));
1185 error
= -EOPNOTSUPP
;
1186 info
->feature_discard
= 0;
1187 info
->feature_secdiscard
= 0;
1188 queue_flag_clear(QUEUE_FLAG_DISCARD
, rq
);
1189 queue_flag_clear(QUEUE_FLAG_SECDISCARD
, rq
);
1191 __blk_end_request_all(req
, error
);
1193 case BLKIF_OP_FLUSH_DISKCACHE
:
1194 case BLKIF_OP_WRITE_BARRIER
:
1195 if (unlikely(bret
->status
== BLKIF_RSP_EOPNOTSUPP
)) {
1196 printk(KERN_WARNING
"blkfront: %s: %s op failed\n",
1197 info
->gd
->disk_name
, op_name(bret
->operation
));
1198 error
= -EOPNOTSUPP
;
1200 if (unlikely(bret
->status
== BLKIF_RSP_ERROR
&&
1201 info
->shadow
[id
].req
.u
.rw
.nr_segments
== 0)) {
1202 printk(KERN_WARNING
"blkfront: %s: empty %s op failed\n",
1203 info
->gd
->disk_name
, op_name(bret
->operation
));
1204 error
= -EOPNOTSUPP
;
1206 if (unlikely(error
)) {
1207 if (error
== -EOPNOTSUPP
)
1209 info
->feature_flush
= 0;
1214 case BLKIF_OP_WRITE
:
1215 if (unlikely(bret
->status
!= BLKIF_RSP_OKAY
))
1216 dev_dbg(&info
->xbdev
->dev
, "Bad return from blkdev data "
1217 "request: %x\n", bret
->status
);
1219 __blk_end_request_all(req
, error
);
1226 info
->ring
.rsp_cons
= i
;
1228 if (i
!= info
->ring
.req_prod_pvt
) {
1230 RING_FINAL_CHECK_FOR_RESPONSES(&info
->ring
, more_to_do
);
1234 info
->ring
.sring
->rsp_event
= i
+ 1;
1236 kick_pending_request_queues(info
);
1238 spin_unlock_irqrestore(&info
->io_lock
, flags
);
1244 static int setup_blkring(struct xenbus_device
*dev
,
1245 struct blkfront_info
*info
)
1247 struct blkif_sring
*sring
;
1251 info
->ring_ref
= GRANT_INVALID_REF
;
1253 sring
= (struct blkif_sring
*)__get_free_page(GFP_NOIO
| __GFP_HIGH
);
1255 xenbus_dev_fatal(dev
, -ENOMEM
, "allocating shared ring");
1258 SHARED_RING_INIT(sring
);
1259 FRONT_RING_INIT(&info
->ring
, sring
, PAGE_SIZE
);
1261 err
= xenbus_grant_ring(dev
, info
->ring
.sring
, 1, &gref
);
1263 free_page((unsigned long)sring
);
1264 info
->ring
.sring
= NULL
;
1267 info
->ring_ref
= gref
;
1269 err
= xenbus_alloc_evtchn(dev
, &info
->evtchn
);
1273 err
= bind_evtchn_to_irqhandler(info
->evtchn
, blkif_interrupt
, 0,
1276 xenbus_dev_fatal(dev
, err
,
1277 "bind_evtchn_to_irqhandler failed");
1284 blkif_free(info
, 0);
1289 /* Common code used when first setting up, and when resuming. */
1290 static int talk_to_blkback(struct xenbus_device
*dev
,
1291 struct blkfront_info
*info
)
1293 const char *message
= NULL
;
1294 struct xenbus_transaction xbt
;
1297 /* Create shared ring, alloc event channel. */
1298 err
= setup_blkring(dev
, info
);
1303 err
= xenbus_transaction_start(&xbt
);
1305 xenbus_dev_fatal(dev
, err
, "starting transaction");
1306 goto destroy_blkring
;
1309 err
= xenbus_printf(xbt
, dev
->nodename
,
1310 "ring-ref", "%u", info
->ring_ref
);
1312 message
= "writing ring-ref";
1313 goto abort_transaction
;
1315 err
= xenbus_printf(xbt
, dev
->nodename
,
1316 "event-channel", "%u", info
->evtchn
);
1318 message
= "writing event-channel";
1319 goto abort_transaction
;
1321 err
= xenbus_printf(xbt
, dev
->nodename
, "protocol", "%s",
1322 XEN_IO_PROTO_ABI_NATIVE
);
1324 message
= "writing protocol";
1325 goto abort_transaction
;
1327 err
= xenbus_printf(xbt
, dev
->nodename
,
1328 "feature-persistent", "%u", 1);
1331 "writing persistent grants feature to xenbus");
1333 err
= xenbus_transaction_end(xbt
, 0);
1337 xenbus_dev_fatal(dev
, err
, "completing transaction");
1338 goto destroy_blkring
;
1341 xenbus_switch_state(dev
, XenbusStateInitialised
);
1346 xenbus_transaction_end(xbt
, 1);
1348 xenbus_dev_fatal(dev
, err
, "%s", message
);
1350 blkif_free(info
, 0);
1356 * Entry point to this code when a new device is created. Allocate the basic
1357 * structures and the ring buffer for communication with the backend, and
1358 * inform the backend of the appropriate details for those. Switch to
1359 * Initialised state.
1361 static int blkfront_probe(struct xenbus_device
*dev
,
1362 const struct xenbus_device_id
*id
)
1364 int err
, vdevice
, i
;
1365 struct blkfront_info
*info
;
1367 /* FIXME: Use dynamic device id if this is not set. */
1368 err
= xenbus_scanf(XBT_NIL
, dev
->nodename
,
1369 "virtual-device", "%i", &vdevice
);
1371 /* go looking in the extended area instead */
1372 err
= xenbus_scanf(XBT_NIL
, dev
->nodename
, "virtual-device-ext",
1375 xenbus_dev_fatal(dev
, err
, "reading virtual-device");
1380 if (xen_hvm_domain()) {
1383 /* no unplug has been done: do not hook devices != xen vbds */
1384 if (xen_has_pv_and_legacy_disk_devices()) {
1387 if (!VDEV_IS_EXTENDED(vdevice
))
1388 major
= BLKIF_MAJOR(vdevice
);
1390 major
= XENVBD_MAJOR
;
1392 if (major
!= XENVBD_MAJOR
) {
1394 "%s: HVM does not support vbd %d as xen block device\n",
1399 /* do not create a PV cdrom device if we are an HVM guest */
1400 type
= xenbus_read(XBT_NIL
, dev
->nodename
, "device-type", &len
);
1403 if (strncmp(type
, "cdrom", 5) == 0) {
1409 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
1411 xenbus_dev_fatal(dev
, -ENOMEM
, "allocating info structure");
1415 mutex_init(&info
->mutex
);
1416 spin_lock_init(&info
->io_lock
);
1418 info
->vdevice
= vdevice
;
1419 INIT_LIST_HEAD(&info
->grants
);
1420 INIT_LIST_HEAD(&info
->indirect_pages
);
1421 info
->persistent_gnts_c
= 0;
1422 info
->connected
= BLKIF_STATE_DISCONNECTED
;
1423 INIT_WORK(&info
->work
, blkif_restart_queue
);
1425 for (i
= 0; i
< BLK_RING_SIZE
; i
++)
1426 info
->shadow
[i
].req
.u
.rw
.id
= i
+1;
1427 info
->shadow
[BLK_RING_SIZE
-1].req
.u
.rw
.id
= 0x0fffffff;
1429 /* Front end dir is a number, which is used as the id. */
1430 info
->handle
= simple_strtoul(strrchr(dev
->nodename
, '/')+1, NULL
, 0);
1431 dev_set_drvdata(&dev
->dev
, info
);
1433 err
= talk_to_blkback(dev
, info
);
1436 dev_set_drvdata(&dev
->dev
, NULL
);
1443 static void split_bio_end(struct bio
*bio
, int error
)
1445 struct split_bio
*split_bio
= bio
->bi_private
;
1448 split_bio
->err
= error
;
1450 if (atomic_dec_and_test(&split_bio
->pending
)) {
1451 split_bio
->bio
->bi_phys_segments
= 0;
1452 bio_endio(split_bio
->bio
, split_bio
->err
);
1458 static int blkif_recover(struct blkfront_info
*info
)
1461 struct request
*req
, *n
;
1462 struct blk_shadow
*copy
;
1464 struct bio
*bio
, *cloned_bio
;
1465 struct bio_list bio_list
, merge_bio
;
1466 unsigned int segs
, offset
;
1468 struct split_bio
*split_bio
;
1469 struct list_head requests
;
1471 /* Stage 1: Make a safe copy of the shadow state. */
1472 copy
= kmemdup(info
->shadow
, sizeof(info
->shadow
),
1473 GFP_NOIO
| __GFP_REPEAT
| __GFP_HIGH
);
1477 /* Stage 2: Set up free list. */
1478 memset(&info
->shadow
, 0, sizeof(info
->shadow
));
1479 for (i
= 0; i
< BLK_RING_SIZE
; i
++)
1480 info
->shadow
[i
].req
.u
.rw
.id
= i
+1;
1481 info
->shadow_free
= info
->ring
.req_prod_pvt
;
1482 info
->shadow
[BLK_RING_SIZE
-1].req
.u
.rw
.id
= 0x0fffffff;
1484 rc
= blkfront_setup_indirect(info
);
1490 segs
= info
->max_indirect_segments
? : BLKIF_MAX_SEGMENTS_PER_REQUEST
;
1491 blk_queue_max_segments(info
->rq
, segs
);
1492 bio_list_init(&bio_list
);
1493 INIT_LIST_HEAD(&requests
);
1494 for (i
= 0; i
< BLK_RING_SIZE
; i
++) {
1496 if (!copy
[i
].request
)
1500 * Get the bios in the request so we can re-queue them.
1502 if (copy
[i
].request
->cmd_flags
&
1503 (REQ_FLUSH
| REQ_FUA
| REQ_DISCARD
| REQ_SECURE
)) {
1505 * Flush operations don't contain bios, so
1506 * we need to requeue the whole request
1508 list_add(©
[i
].request
->queuelist
, &requests
);
1511 merge_bio
.head
= copy
[i
].request
->bio
;
1512 merge_bio
.tail
= copy
[i
].request
->biotail
;
1513 bio_list_merge(&bio_list
, &merge_bio
);
1514 copy
[i
].request
->bio
= NULL
;
1515 blk_end_request_all(copy
[i
].request
, 0);
1521 * Empty the queue, this is important because we might have
1522 * requests in the queue with more segments than what we
1525 spin_lock_irq(&info
->io_lock
);
1526 while ((req
= blk_fetch_request(info
->rq
)) != NULL
) {
1527 if (req
->cmd_flags
&
1528 (REQ_FLUSH
| REQ_FUA
| REQ_DISCARD
| REQ_SECURE
)) {
1529 list_add(&req
->queuelist
, &requests
);
1532 merge_bio
.head
= req
->bio
;
1533 merge_bio
.tail
= req
->biotail
;
1534 bio_list_merge(&bio_list
, &merge_bio
);
1536 if (req
->cmd_flags
& (REQ_FLUSH
| REQ_FUA
))
1537 pr_alert("diskcache flush request found!\n");
1538 __blk_end_request_all(req
, 0);
1540 spin_unlock_irq(&info
->io_lock
);
1542 xenbus_switch_state(info
->xbdev
, XenbusStateConnected
);
1544 spin_lock_irq(&info
->io_lock
);
1546 /* Now safe for us to use the shared ring */
1547 info
->connected
= BLKIF_STATE_CONNECTED
;
1549 /* Kick any other new requests queued since we resumed */
1550 kick_pending_request_queues(info
);
1552 list_for_each_entry_safe(req
, n
, &requests
, queuelist
) {
1553 /* Requeue pending requests (flush or discard) */
1554 list_del_init(&req
->queuelist
);
1555 BUG_ON(req
->nr_phys_segments
> segs
);
1556 blk_requeue_request(info
->rq
, req
);
1558 spin_unlock_irq(&info
->io_lock
);
1560 while ((bio
= bio_list_pop(&bio_list
)) != NULL
) {
1561 /* Traverse the list of pending bios and re-queue them */
1562 if (bio_segments(bio
) > segs
) {
1564 * This bio has more segments than what we can
1565 * handle, we have to split it.
1567 pending
= (bio_segments(bio
) + segs
- 1) / segs
;
1568 split_bio
= kzalloc(sizeof(*split_bio
), GFP_NOIO
);
1569 BUG_ON(split_bio
== NULL
);
1570 atomic_set(&split_bio
->pending
, pending
);
1571 split_bio
->bio
= bio
;
1572 for (i
= 0; i
< pending
; i
++) {
1573 offset
= (i
* segs
* PAGE_SIZE
) >> 9;
1574 size
= min((unsigned int)(segs
* PAGE_SIZE
) >> 9,
1575 (unsigned int)bio_sectors(bio
) - offset
);
1576 cloned_bio
= bio_clone(bio
, GFP_NOIO
);
1577 BUG_ON(cloned_bio
== NULL
);
1578 bio_trim(cloned_bio
, offset
, size
);
1579 cloned_bio
->bi_private
= split_bio
;
1580 cloned_bio
->bi_end_io
= split_bio_end
;
1581 submit_bio(cloned_bio
->bi_rw
, cloned_bio
);
1584 * Now we have to wait for all those smaller bios to
1585 * end, so we can also end the "parent" bio.
1589 /* We don't need to split this bio */
1590 submit_bio(bio
->bi_rw
, bio
);
1597 * We are reconnecting to the backend, due to a suspend/resume, or a backend
1598 * driver restart. We tear down our blkif structure and recreate it, but
1599 * leave the device-layer structures intact so that this is transparent to the
1600 * rest of the kernel.
1602 static int blkfront_resume(struct xenbus_device
*dev
)
1604 struct blkfront_info
*info
= dev_get_drvdata(&dev
->dev
);
1607 dev_dbg(&dev
->dev
, "blkfront_resume: %s\n", dev
->nodename
);
1609 blkif_free(info
, info
->connected
== BLKIF_STATE_CONNECTED
);
1611 err
= talk_to_blkback(dev
, info
);
1614 * We have to wait for the backend to switch to
1615 * connected state, since we want to read which
1616 * features it supports.
1623 blkfront_closing(struct blkfront_info
*info
)
1625 struct xenbus_device
*xbdev
= info
->xbdev
;
1626 struct block_device
*bdev
= NULL
;
1628 mutex_lock(&info
->mutex
);
1630 if (xbdev
->state
== XenbusStateClosing
) {
1631 mutex_unlock(&info
->mutex
);
1636 bdev
= bdget_disk(info
->gd
, 0);
1638 mutex_unlock(&info
->mutex
);
1641 xenbus_frontend_closed(xbdev
);
1645 mutex_lock(&bdev
->bd_mutex
);
1647 if (bdev
->bd_openers
) {
1648 xenbus_dev_error(xbdev
, -EBUSY
,
1649 "Device in use; refusing to close");
1650 xenbus_switch_state(xbdev
, XenbusStateClosing
);
1652 xlvbd_release_gendisk(info
);
1653 xenbus_frontend_closed(xbdev
);
1656 mutex_unlock(&bdev
->bd_mutex
);
1660 static void blkfront_setup_discard(struct blkfront_info
*info
)
1663 unsigned int discard_granularity
;
1664 unsigned int discard_alignment
;
1665 unsigned int discard_secure
;
1667 info
->feature_discard
= 1;
1668 err
= xenbus_gather(XBT_NIL
, info
->xbdev
->otherend
,
1669 "discard-granularity", "%u", &discard_granularity
,
1670 "discard-alignment", "%u", &discard_alignment
,
1673 info
->discard_granularity
= discard_granularity
;
1674 info
->discard_alignment
= discard_alignment
;
1676 err
= xenbus_gather(XBT_NIL
, info
->xbdev
->otherend
,
1677 "discard-secure", "%d", &discard_secure
,
1680 info
->feature_secdiscard
= !!discard_secure
;
1683 static int blkfront_setup_indirect(struct blkfront_info
*info
)
1685 unsigned int indirect_segments
, segs
;
1688 err
= xenbus_gather(XBT_NIL
, info
->xbdev
->otherend
,
1689 "feature-max-indirect-segments", "%u", &indirect_segments
,
1692 info
->max_indirect_segments
= 0;
1693 segs
= BLKIF_MAX_SEGMENTS_PER_REQUEST
;
1695 info
->max_indirect_segments
= min(indirect_segments
,
1696 xen_blkif_max_segments
);
1697 segs
= info
->max_indirect_segments
;
1700 err
= fill_grant_buffer(info
, (segs
+ INDIRECT_GREFS(segs
)) * BLK_RING_SIZE
);
1704 if (!info
->feature_persistent
&& info
->max_indirect_segments
) {
1706 * We are using indirect descriptors but not persistent
1707 * grants, we need to allocate a set of pages that can be
1708 * used for mapping indirect grefs
1710 int num
= INDIRECT_GREFS(segs
) * BLK_RING_SIZE
;
1712 BUG_ON(!list_empty(&info
->indirect_pages
));
1713 for (i
= 0; i
< num
; i
++) {
1714 struct page
*indirect_page
= alloc_page(GFP_NOIO
);
1717 list_add(&indirect_page
->lru
, &info
->indirect_pages
);
1721 for (i
= 0; i
< BLK_RING_SIZE
; i
++) {
1722 info
->shadow
[i
].grants_used
= kzalloc(
1723 sizeof(info
->shadow
[i
].grants_used
[0]) * segs
,
1725 info
->shadow
[i
].sg
= kzalloc(sizeof(info
->shadow
[i
].sg
[0]) * segs
, GFP_NOIO
);
1726 if (info
->max_indirect_segments
)
1727 info
->shadow
[i
].indirect_grants
= kzalloc(
1728 sizeof(info
->shadow
[i
].indirect_grants
[0]) *
1729 INDIRECT_GREFS(segs
),
1731 if ((info
->shadow
[i
].grants_used
== NULL
) ||
1732 (info
->shadow
[i
].sg
== NULL
) ||
1733 (info
->max_indirect_segments
&&
1734 (info
->shadow
[i
].indirect_grants
== NULL
)))
1736 sg_init_table(info
->shadow
[i
].sg
, segs
);
1743 for (i
= 0; i
< BLK_RING_SIZE
; i
++) {
1744 kfree(info
->shadow
[i
].grants_used
);
1745 info
->shadow
[i
].grants_used
= NULL
;
1746 kfree(info
->shadow
[i
].sg
);
1747 info
->shadow
[i
].sg
= NULL
;
1748 kfree(info
->shadow
[i
].indirect_grants
);
1749 info
->shadow
[i
].indirect_grants
= NULL
;
1751 if (!list_empty(&info
->indirect_pages
)) {
1752 struct page
*indirect_page
, *n
;
1753 list_for_each_entry_safe(indirect_page
, n
, &info
->indirect_pages
, lru
) {
1754 list_del(&indirect_page
->lru
);
1755 __free_page(indirect_page
);
1762 * Invoked when the backend is finally 'ready' (and has told produced
1763 * the details about the physical device - #sectors, size, etc).
1765 static void blkfront_connect(struct blkfront_info
*info
)
1767 unsigned long long sectors
;
1768 unsigned long sector_size
;
1769 unsigned int physical_sector_size
;
1772 int barrier
, flush
, discard
, persistent
;
1774 switch (info
->connected
) {
1775 case BLKIF_STATE_CONNECTED
:
1777 * Potentially, the back-end may be signalling
1778 * a capacity change; update the capacity.
1780 err
= xenbus_scanf(XBT_NIL
, info
->xbdev
->otherend
,
1781 "sectors", "%Lu", §ors
);
1782 if (XENBUS_EXIST_ERR(err
))
1784 printk(KERN_INFO
"Setting capacity to %Lu\n",
1786 set_capacity(info
->gd
, sectors
);
1787 revalidate_disk(info
->gd
);
1790 case BLKIF_STATE_SUSPENDED
:
1792 * If we are recovering from suspension, we need to wait
1793 * for the backend to announce it's features before
1794 * reconnecting, at least we need to know if the backend
1795 * supports indirect descriptors, and how many.
1797 blkif_recover(info
);
1804 dev_dbg(&info
->xbdev
->dev
, "%s:%s.\n",
1805 __func__
, info
->xbdev
->otherend
);
1807 err
= xenbus_gather(XBT_NIL
, info
->xbdev
->otherend
,
1808 "sectors", "%llu", §ors
,
1809 "info", "%u", &binfo
,
1810 "sector-size", "%lu", §or_size
,
1813 xenbus_dev_fatal(info
->xbdev
, err
,
1814 "reading backend fields at %s",
1815 info
->xbdev
->otherend
);
1820 * physcial-sector-size is a newer field, so old backends may not
1821 * provide this. Assume physical sector size to be the same as
1822 * sector_size in that case.
1824 err
= xenbus_scanf(XBT_NIL
, info
->xbdev
->otherend
,
1825 "physical-sector-size", "%u", &physical_sector_size
);
1827 physical_sector_size
= sector_size
;
1829 info
->feature_flush
= 0;
1831 err
= xenbus_gather(XBT_NIL
, info
->xbdev
->otherend
,
1832 "feature-barrier", "%d", &barrier
,
1836 * If there's no "feature-barrier" defined, then it means
1837 * we're dealing with a very old backend which writes
1838 * synchronously; nothing to do.
1840 * If there are barriers, then we use flush.
1842 if (!err
&& barrier
)
1843 info
->feature_flush
= REQ_FLUSH
| REQ_FUA
;
1845 * And if there is "feature-flush-cache" use that above
1848 err
= xenbus_gather(XBT_NIL
, info
->xbdev
->otherend
,
1849 "feature-flush-cache", "%d", &flush
,
1853 info
->feature_flush
= REQ_FLUSH
;
1855 err
= xenbus_gather(XBT_NIL
, info
->xbdev
->otherend
,
1856 "feature-discard", "%d", &discard
,
1859 if (!err
&& discard
)
1860 blkfront_setup_discard(info
);
1862 err
= xenbus_gather(XBT_NIL
, info
->xbdev
->otherend
,
1863 "feature-persistent", "%u", &persistent
,
1866 info
->feature_persistent
= 0;
1868 info
->feature_persistent
= persistent
;
1870 err
= blkfront_setup_indirect(info
);
1872 xenbus_dev_fatal(info
->xbdev
, err
, "setup_indirect at %s",
1873 info
->xbdev
->otherend
);
1877 err
= xlvbd_alloc_gendisk(sectors
, info
, binfo
, sector_size
,
1878 physical_sector_size
);
1880 xenbus_dev_fatal(info
->xbdev
, err
, "xlvbd_add at %s",
1881 info
->xbdev
->otherend
);
1885 xenbus_switch_state(info
->xbdev
, XenbusStateConnected
);
1887 /* Kick pending requests. */
1888 spin_lock_irq(&info
->io_lock
);
1889 info
->connected
= BLKIF_STATE_CONNECTED
;
1890 kick_pending_request_queues(info
);
1891 spin_unlock_irq(&info
->io_lock
);
1899 * Callback received when the backend's state changes.
1901 static void blkback_changed(struct xenbus_device
*dev
,
1902 enum xenbus_state backend_state
)
1904 struct blkfront_info
*info
= dev_get_drvdata(&dev
->dev
);
1906 dev_dbg(&dev
->dev
, "blkfront:blkback_changed to state %d.\n", backend_state
);
1908 switch (backend_state
) {
1909 case XenbusStateInitialising
:
1910 case XenbusStateInitWait
:
1911 case XenbusStateInitialised
:
1912 case XenbusStateReconfiguring
:
1913 case XenbusStateReconfigured
:
1914 case XenbusStateUnknown
:
1917 case XenbusStateConnected
:
1918 blkfront_connect(info
);
1921 case XenbusStateClosed
:
1922 if (dev
->state
== XenbusStateClosed
)
1924 /* Missed the backend's Closing state -- fallthrough */
1925 case XenbusStateClosing
:
1926 blkfront_closing(info
);
1931 static int blkfront_remove(struct xenbus_device
*xbdev
)
1933 struct blkfront_info
*info
= dev_get_drvdata(&xbdev
->dev
);
1934 struct block_device
*bdev
= NULL
;
1935 struct gendisk
*disk
;
1937 dev_dbg(&xbdev
->dev
, "%s removed", xbdev
->nodename
);
1939 blkif_free(info
, 0);
1941 mutex_lock(&info
->mutex
);
1945 bdev
= bdget_disk(disk
, 0);
1948 mutex_unlock(&info
->mutex
);
1956 * The xbdev was removed before we reached the Closed
1957 * state. See if it's safe to remove the disk. If the bdev
1958 * isn't closed yet, we let release take care of it.
1961 mutex_lock(&bdev
->bd_mutex
);
1962 info
= disk
->private_data
;
1964 dev_warn(disk_to_dev(disk
),
1965 "%s was hot-unplugged, %d stale handles\n",
1966 xbdev
->nodename
, bdev
->bd_openers
);
1968 if (info
&& !bdev
->bd_openers
) {
1969 xlvbd_release_gendisk(info
);
1970 disk
->private_data
= NULL
;
1974 mutex_unlock(&bdev
->bd_mutex
);
1980 static int blkfront_is_ready(struct xenbus_device
*dev
)
1982 struct blkfront_info
*info
= dev_get_drvdata(&dev
->dev
);
1984 return info
->is_ready
&& info
->xbdev
;
1987 static int blkif_open(struct block_device
*bdev
, fmode_t mode
)
1989 struct gendisk
*disk
= bdev
->bd_disk
;
1990 struct blkfront_info
*info
;
1993 mutex_lock(&blkfront_mutex
);
1995 info
= disk
->private_data
;
2002 mutex_lock(&info
->mutex
);
2005 /* xbdev is closed */
2008 mutex_unlock(&info
->mutex
);
2011 mutex_unlock(&blkfront_mutex
);
2015 static void blkif_release(struct gendisk
*disk
, fmode_t mode
)
2017 struct blkfront_info
*info
= disk
->private_data
;
2018 struct block_device
*bdev
;
2019 struct xenbus_device
*xbdev
;
2021 mutex_lock(&blkfront_mutex
);
2023 bdev
= bdget_disk(disk
, 0);
2026 WARN(1, "Block device %s yanked out from us!\n", disk
->disk_name
);
2029 if (bdev
->bd_openers
)
2033 * Check if we have been instructed to close. We will have
2034 * deferred this request, because the bdev was still open.
2037 mutex_lock(&info
->mutex
);
2038 xbdev
= info
->xbdev
;
2040 if (xbdev
&& xbdev
->state
== XenbusStateClosing
) {
2041 /* pending switch to state closed */
2042 dev_info(disk_to_dev(bdev
->bd_disk
), "releasing disk\n");
2043 xlvbd_release_gendisk(info
);
2044 xenbus_frontend_closed(info
->xbdev
);
2047 mutex_unlock(&info
->mutex
);
2050 /* sudden device removal */
2051 dev_info(disk_to_dev(bdev
->bd_disk
), "releasing disk\n");
2052 xlvbd_release_gendisk(info
);
2053 disk
->private_data
= NULL
;
2060 mutex_unlock(&blkfront_mutex
);
2063 static const struct block_device_operations xlvbd_block_fops
=
2065 .owner
= THIS_MODULE
,
2067 .release
= blkif_release
,
2068 .getgeo
= blkif_getgeo
,
2069 .ioctl
= blkif_ioctl
,
2073 static const struct xenbus_device_id blkfront_ids
[] = {
2078 static struct xenbus_driver blkfront_driver
= {
2079 .ids
= blkfront_ids
,
2080 .probe
= blkfront_probe
,
2081 .remove
= blkfront_remove
,
2082 .resume
= blkfront_resume
,
2083 .otherend_changed
= blkback_changed
,
2084 .is_ready
= blkfront_is_ready
,
2087 static int __init
xlblk_init(void)
2094 if (!xen_has_pv_disk_devices())
2097 if (register_blkdev(XENVBD_MAJOR
, DEV_NAME
)) {
2098 printk(KERN_WARNING
"xen_blk: can't get major %d with name %s\n",
2099 XENVBD_MAJOR
, DEV_NAME
);
2103 ret
= xenbus_register_frontend(&blkfront_driver
);
2105 unregister_blkdev(XENVBD_MAJOR
, DEV_NAME
);
2111 module_init(xlblk_init
);
2114 static void __exit
xlblk_exit(void)
2116 xenbus_unregister_driver(&blkfront_driver
);
2117 unregister_blkdev(XENVBD_MAJOR
, DEV_NAME
);
2120 module_exit(xlblk_exit
);
2122 MODULE_DESCRIPTION("Xen virtual block device frontend");
2123 MODULE_LICENSE("GPL");
2124 MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR
);
2125 MODULE_ALIAS("xen:vbd");
2126 MODULE_ALIAS("xenblk");