2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/blkdev.h>
5 #include <linux/hdreg.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/virtio.h>
9 #include <linux/virtio_blk.h>
10 #include <linux/scatterlist.h>
11 #include <linux/string_helpers.h>
12 #include <scsi/scsi_cmnd.h>
13 #include <linux/idr.h>
14 #include <linux/blk-mq.h>
15 #include <linux/numa.h>
18 #define VQ_NAME_LEN 16
21 static DEFINE_IDA(vd_index_ida
);
23 static struct workqueue_struct
*virtblk_wq
;
25 struct virtio_blk_vq
{
28 char name
[VQ_NAME_LEN
];
29 } ____cacheline_aligned_in_smp
;
33 struct virtio_device
*vdev
;
35 /* The disk structure for the kernel. */
38 /* Block layer tags. */
39 struct blk_mq_tag_set tag_set
;
41 /* Process context for config space updates */
42 struct work_struct config_work
;
44 /* Lock for config space updates */
45 struct mutex config_lock
;
47 /* enable config space updates */
50 /* What host tells us, plus 2 for header & tailer. */
51 unsigned int sg_elems
;
53 /* Ida index - used to track minor number allocations. */
58 struct virtio_blk_vq
*vqs
;
64 struct virtio_blk_outhdr out_hdr
;
65 struct virtio_scsi_inhdr in_hdr
;
67 struct scatterlist sg
[];
70 static inline int virtblk_result(struct virtblk_req
*vbr
)
72 switch (vbr
->status
) {
75 case VIRTIO_BLK_S_UNSUPP
:
82 static int __virtblk_add_req(struct virtqueue
*vq
,
83 struct virtblk_req
*vbr
,
84 struct scatterlist
*data_sg
,
87 struct scatterlist hdr
, status
, cmd
, sense
, inhdr
, *sgs
[6];
88 unsigned int num_out
= 0, num_in
= 0;
89 int type
= vbr
->out_hdr
.type
& ~VIRTIO_BLK_T_OUT
;
91 sg_init_one(&hdr
, &vbr
->out_hdr
, sizeof(vbr
->out_hdr
));
92 sgs
[num_out
++] = &hdr
;
95 * If this is a packet command we need a couple of additional headers.
96 * Behind the normal outhdr we put a segment with the scsi command
97 * block, and before the normal inhdr we put the sense data and the
98 * inhdr with additional status information.
100 if (type
== VIRTIO_BLK_T_SCSI_CMD
) {
101 sg_init_one(&cmd
, vbr
->req
->cmd
, vbr
->req
->cmd_len
);
102 sgs
[num_out
++] = &cmd
;
106 if (vbr
->out_hdr
.type
& VIRTIO_BLK_T_OUT
)
107 sgs
[num_out
++] = data_sg
;
109 sgs
[num_out
+ num_in
++] = data_sg
;
112 if (type
== VIRTIO_BLK_T_SCSI_CMD
) {
113 sg_init_one(&sense
, vbr
->req
->sense
, SCSI_SENSE_BUFFERSIZE
);
114 sgs
[num_out
+ num_in
++] = &sense
;
115 sg_init_one(&inhdr
, &vbr
->in_hdr
, sizeof(vbr
->in_hdr
));
116 sgs
[num_out
+ num_in
++] = &inhdr
;
119 sg_init_one(&status
, &vbr
->status
, sizeof(vbr
->status
));
120 sgs
[num_out
+ num_in
++] = &status
;
122 return virtqueue_add_sgs(vq
, sgs
, num_out
, num_in
, vbr
, GFP_ATOMIC
);
125 static inline void virtblk_request_done(struct request
*req
)
127 struct virtblk_req
*vbr
= blk_mq_rq_to_pdu(req
);
128 int error
= virtblk_result(vbr
);
130 if (req
->cmd_type
== REQ_TYPE_BLOCK_PC
) {
131 req
->resid_len
= vbr
->in_hdr
.residual
;
132 req
->sense_len
= vbr
->in_hdr
.sense_len
;
133 req
->errors
= vbr
->in_hdr
.errors
;
134 } else if (req
->cmd_type
== REQ_TYPE_SPECIAL
) {
135 req
->errors
= (error
!= 0);
138 blk_mq_end_io(req
, error
);
141 static void virtblk_done(struct virtqueue
*vq
)
143 struct virtio_blk
*vblk
= vq
->vdev
->priv
;
144 bool req_done
= false;
146 struct virtblk_req
*vbr
;
150 spin_lock_irqsave(&vblk
->vqs
[qid
].lock
, flags
);
152 virtqueue_disable_cb(vq
);
153 while ((vbr
= virtqueue_get_buf(vblk
->vqs
[qid
].vq
, &len
)) != NULL
) {
154 blk_mq_complete_request(vbr
->req
);
157 if (unlikely(virtqueue_is_broken(vq
)))
159 } while (!virtqueue_enable_cb(vq
));
161 /* In case queue is stopped waiting for more buffers. */
163 blk_mq_start_stopped_hw_queues(vblk
->disk
->queue
, true);
164 spin_unlock_irqrestore(&vblk
->vqs
[qid
].lock
, flags
);
167 static int virtio_queue_rq(struct blk_mq_hw_ctx
*hctx
, struct request
*req
)
169 struct virtio_blk
*vblk
= hctx
->queue
->queuedata
;
170 struct virtblk_req
*vbr
= blk_mq_rq_to_pdu(req
);
173 int qid
= hctx
->queue_num
;
174 const bool last
= (req
->cmd_flags
& REQ_END
) != 0;
178 BUG_ON(req
->nr_phys_segments
+ 2 > vblk
->sg_elems
);
181 if (req
->cmd_flags
& REQ_FLUSH
) {
182 vbr
->out_hdr
.type
= VIRTIO_BLK_T_FLUSH
;
183 vbr
->out_hdr
.sector
= 0;
184 vbr
->out_hdr
.ioprio
= req_get_ioprio(vbr
->req
);
186 switch (req
->cmd_type
) {
188 vbr
->out_hdr
.type
= 0;
189 vbr
->out_hdr
.sector
= blk_rq_pos(vbr
->req
);
190 vbr
->out_hdr
.ioprio
= req_get_ioprio(vbr
->req
);
192 case REQ_TYPE_BLOCK_PC
:
193 vbr
->out_hdr
.type
= VIRTIO_BLK_T_SCSI_CMD
;
194 vbr
->out_hdr
.sector
= 0;
195 vbr
->out_hdr
.ioprio
= req_get_ioprio(vbr
->req
);
197 case REQ_TYPE_SPECIAL
:
198 vbr
->out_hdr
.type
= VIRTIO_BLK_T_GET_ID
;
199 vbr
->out_hdr
.sector
= 0;
200 vbr
->out_hdr
.ioprio
= req_get_ioprio(vbr
->req
);
203 /* We don't put anything else in the queue. */
208 num
= blk_rq_map_sg(hctx
->queue
, vbr
->req
, vbr
->sg
);
210 if (rq_data_dir(vbr
->req
) == WRITE
)
211 vbr
->out_hdr
.type
|= VIRTIO_BLK_T_OUT
;
213 vbr
->out_hdr
.type
|= VIRTIO_BLK_T_IN
;
216 spin_lock_irqsave(&vblk
->vqs
[qid
].lock
, flags
);
217 err
= __virtblk_add_req(vblk
->vqs
[qid
].vq
, vbr
, vbr
->sg
, num
);
219 virtqueue_kick(vblk
->vqs
[qid
].vq
);
220 blk_mq_stop_hw_queue(hctx
);
221 spin_unlock_irqrestore(&vblk
->vqs
[qid
].lock
, flags
);
222 /* Out of mem doesn't actually happen, since we fall back
223 * to direct descriptors */
224 if (err
== -ENOMEM
|| err
== -ENOSPC
)
225 return BLK_MQ_RQ_QUEUE_BUSY
;
226 return BLK_MQ_RQ_QUEUE_ERROR
;
229 if (last
&& virtqueue_kick_prepare(vblk
->vqs
[qid
].vq
))
231 spin_unlock_irqrestore(&vblk
->vqs
[qid
].lock
, flags
);
234 virtqueue_notify(vblk
->vqs
[qid
].vq
);
235 return BLK_MQ_RQ_QUEUE_OK
;
238 /* return id (s/n) string for *disk to *id_str
240 static int virtblk_get_id(struct gendisk
*disk
, char *id_str
)
242 struct virtio_blk
*vblk
= disk
->private_data
;
247 bio
= bio_map_kern(vblk
->disk
->queue
, id_str
, VIRTIO_BLK_ID_BYTES
,
252 req
= blk_make_request(vblk
->disk
->queue
, bio
, GFP_KERNEL
);
258 req
->cmd_type
= REQ_TYPE_SPECIAL
;
259 err
= blk_execute_rq(vblk
->disk
->queue
, vblk
->disk
, req
, false);
260 blk_put_request(req
);
265 static int virtblk_ioctl(struct block_device
*bdev
, fmode_t mode
,
266 unsigned int cmd
, unsigned long data
)
268 struct gendisk
*disk
= bdev
->bd_disk
;
269 struct virtio_blk
*vblk
= disk
->private_data
;
272 * Only allow the generic SCSI ioctls if the host can support it.
274 if (!virtio_has_feature(vblk
->vdev
, VIRTIO_BLK_F_SCSI
))
277 return scsi_cmd_blk_ioctl(bdev
, mode
, cmd
,
278 (void __user
*)data
);
281 /* We provide getgeo only to please some old bootloader/partitioning tools */
282 static int virtblk_getgeo(struct block_device
*bd
, struct hd_geometry
*geo
)
284 struct virtio_blk
*vblk
= bd
->bd_disk
->private_data
;
286 /* see if the host passed in geometry config */
287 if (virtio_has_feature(vblk
->vdev
, VIRTIO_BLK_F_GEOMETRY
)) {
288 virtio_cread(vblk
->vdev
, struct virtio_blk_config
,
289 geometry
.cylinders
, &geo
->cylinders
);
290 virtio_cread(vblk
->vdev
, struct virtio_blk_config
,
291 geometry
.heads
, &geo
->heads
);
292 virtio_cread(vblk
->vdev
, struct virtio_blk_config
,
293 geometry
.sectors
, &geo
->sectors
);
295 /* some standard values, similar to sd */
297 geo
->sectors
= 1 << 5;
298 geo
->cylinders
= get_capacity(bd
->bd_disk
) >> 11;
303 static const struct block_device_operations virtblk_fops
= {
304 .ioctl
= virtblk_ioctl
,
305 .owner
= THIS_MODULE
,
306 .getgeo
= virtblk_getgeo
,
309 static int index_to_minor(int index
)
311 return index
<< PART_BITS
;
314 static int minor_to_index(int minor
)
316 return minor
>> PART_BITS
;
319 static ssize_t
virtblk_serial_show(struct device
*dev
,
320 struct device_attribute
*attr
, char *buf
)
322 struct gendisk
*disk
= dev_to_disk(dev
);
325 /* sysfs gives us a PAGE_SIZE buffer */
326 BUILD_BUG_ON(PAGE_SIZE
< VIRTIO_BLK_ID_BYTES
);
328 buf
[VIRTIO_BLK_ID_BYTES
] = '\0';
329 err
= virtblk_get_id(disk
, buf
);
333 if (err
== -EIO
) /* Unsupported? Make it empty. */
338 DEVICE_ATTR(serial
, S_IRUGO
, virtblk_serial_show
, NULL
);
340 static void virtblk_config_changed_work(struct work_struct
*work
)
342 struct virtio_blk
*vblk
=
343 container_of(work
, struct virtio_blk
, config_work
);
344 struct virtio_device
*vdev
= vblk
->vdev
;
345 struct request_queue
*q
= vblk
->disk
->queue
;
346 char cap_str_2
[10], cap_str_10
[10];
347 char *envp
[] = { "RESIZE=1", NULL
};
350 mutex_lock(&vblk
->config_lock
);
351 if (!vblk
->config_enable
)
354 /* Host must always specify the capacity. */
355 virtio_cread(vdev
, struct virtio_blk_config
, capacity
, &capacity
);
357 /* If capacity is too big, truncate with warning. */
358 if ((sector_t
)capacity
!= capacity
) {
359 dev_warn(&vdev
->dev
, "Capacity %llu too large: truncating\n",
360 (unsigned long long)capacity
);
361 capacity
= (sector_t
)-1;
364 size
= capacity
* queue_logical_block_size(q
);
365 string_get_size(size
, STRING_UNITS_2
, cap_str_2
, sizeof(cap_str_2
));
366 string_get_size(size
, STRING_UNITS_10
, cap_str_10
, sizeof(cap_str_10
));
368 dev_notice(&vdev
->dev
,
369 "new size: %llu %d-byte logical blocks (%s/%s)\n",
370 (unsigned long long)capacity
,
371 queue_logical_block_size(q
),
372 cap_str_10
, cap_str_2
);
374 set_capacity(vblk
->disk
, capacity
);
375 revalidate_disk(vblk
->disk
);
376 kobject_uevent_env(&disk_to_dev(vblk
->disk
)->kobj
, KOBJ_CHANGE
, envp
);
378 mutex_unlock(&vblk
->config_lock
);
381 static void virtblk_config_changed(struct virtio_device
*vdev
)
383 struct virtio_blk
*vblk
= vdev
->priv
;
385 queue_work(virtblk_wq
, &vblk
->config_work
);
388 static int init_vq(struct virtio_blk
*vblk
)
392 vq_callback_t
**callbacks
;
394 struct virtqueue
**vqs
;
395 unsigned short num_vqs
;
396 struct virtio_device
*vdev
= vblk
->vdev
;
398 err
= virtio_cread_feature(vdev
, VIRTIO_BLK_F_MQ
,
399 struct virtio_blk_config
, num_queues
,
404 vblk
->vqs
= kmalloc(sizeof(*vblk
->vqs
) * num_vqs
, GFP_KERNEL
);
410 names
= kmalloc(sizeof(*names
) * num_vqs
, GFP_KERNEL
);
414 callbacks
= kmalloc(sizeof(*callbacks
) * num_vqs
, GFP_KERNEL
);
418 vqs
= kmalloc(sizeof(*vqs
) * num_vqs
, GFP_KERNEL
);
422 for (i
= 0; i
< num_vqs
; i
++) {
423 callbacks
[i
] = virtblk_done
;
424 snprintf(vblk
->vqs
[i
].name
, VQ_NAME_LEN
, "req.%d", i
);
425 names
[i
] = vblk
->vqs
[i
].name
;
428 /* Discover virtqueues and write information to configuration. */
429 err
= vdev
->config
->find_vqs(vdev
, num_vqs
, vqs
, callbacks
, names
);
433 for (i
= 0; i
< num_vqs
; i
++) {
434 spin_lock_init(&vblk
->vqs
[i
].lock
);
435 vblk
->vqs
[i
].vq
= vqs
[i
];
437 vblk
->num_vqs
= num_vqs
;
453 * Legacy naming scheme used for virtio devices. We are stuck with it for
454 * virtio blk but don't ever use it for any new driver.
456 static int virtblk_name_format(char *prefix
, int index
, char *buf
, int buflen
)
458 const int base
= 'z' - 'a' + 1;
459 char *begin
= buf
+ strlen(prefix
);
460 char *end
= buf
+ buflen
;
470 *--p
= 'a' + (index
% unit
);
471 index
= (index
/ unit
) - 1;
472 } while (index
>= 0);
474 memmove(begin
, p
, end
- p
);
475 memcpy(buf
, prefix
, strlen(prefix
));
480 static int virtblk_get_cache_mode(struct virtio_device
*vdev
)
485 err
= virtio_cread_feature(vdev
, VIRTIO_BLK_F_CONFIG_WCE
,
486 struct virtio_blk_config
, wce
,
489 writeback
= virtio_has_feature(vdev
, VIRTIO_BLK_F_WCE
);
494 static void virtblk_update_cache_mode(struct virtio_device
*vdev
)
496 u8 writeback
= virtblk_get_cache_mode(vdev
);
497 struct virtio_blk
*vblk
= vdev
->priv
;
500 blk_queue_flush(vblk
->disk
->queue
, REQ_FLUSH
);
502 blk_queue_flush(vblk
->disk
->queue
, 0);
504 revalidate_disk(vblk
->disk
);
507 static const char *const virtblk_cache_types
[] = {
508 "write through", "write back"
512 virtblk_cache_type_store(struct device
*dev
, struct device_attribute
*attr
,
513 const char *buf
, size_t count
)
515 struct gendisk
*disk
= dev_to_disk(dev
);
516 struct virtio_blk
*vblk
= disk
->private_data
;
517 struct virtio_device
*vdev
= vblk
->vdev
;
520 BUG_ON(!virtio_has_feature(vblk
->vdev
, VIRTIO_BLK_F_CONFIG_WCE
));
521 for (i
= ARRAY_SIZE(virtblk_cache_types
); --i
>= 0; )
522 if (sysfs_streq(buf
, virtblk_cache_types
[i
]))
528 virtio_cwrite8(vdev
, offsetof(struct virtio_blk_config
, wce
), i
);
529 virtblk_update_cache_mode(vdev
);
534 virtblk_cache_type_show(struct device
*dev
, struct device_attribute
*attr
,
537 struct gendisk
*disk
= dev_to_disk(dev
);
538 struct virtio_blk
*vblk
= disk
->private_data
;
539 u8 writeback
= virtblk_get_cache_mode(vblk
->vdev
);
541 BUG_ON(writeback
>= ARRAY_SIZE(virtblk_cache_types
));
542 return snprintf(buf
, 40, "%s\n", virtblk_cache_types
[writeback
]);
545 static const struct device_attribute dev_attr_cache_type_ro
=
546 __ATTR(cache_type
, S_IRUGO
,
547 virtblk_cache_type_show
, NULL
);
548 static const struct device_attribute dev_attr_cache_type_rw
=
549 __ATTR(cache_type
, S_IRUGO
|S_IWUSR
,
550 virtblk_cache_type_show
, virtblk_cache_type_store
);
552 static int virtblk_init_request(void *data
, struct request
*rq
,
553 unsigned int hctx_idx
, unsigned int request_idx
,
554 unsigned int numa_node
)
556 struct virtio_blk
*vblk
= data
;
557 struct virtblk_req
*vbr
= blk_mq_rq_to_pdu(rq
);
559 sg_init_table(vbr
->sg
, vblk
->sg_elems
);
563 static struct blk_mq_ops virtio_mq_ops
= {
564 .queue_rq
= virtio_queue_rq
,
565 .map_queue
= blk_mq_map_queue
,
566 .complete
= virtblk_request_done
,
567 .init_request
= virtblk_init_request
,
570 static unsigned int virtblk_queue_depth
;
571 module_param_named(queue_depth
, virtblk_queue_depth
, uint
, 0444);
573 static int virtblk_probe(struct virtio_device
*vdev
)
575 struct virtio_blk
*vblk
;
576 struct request_queue
*q
;
580 u32 v
, blk_size
, sg_elems
, opt_io_size
;
582 u8 physical_block_exp
, alignment_offset
;
584 err
= ida_simple_get(&vd_index_ida
, 0, minor_to_index(1 << MINORBITS
),
590 /* We need to know how many segments before we allocate. */
591 err
= virtio_cread_feature(vdev
, VIRTIO_BLK_F_SEG_MAX
,
592 struct virtio_blk_config
, seg_max
,
595 /* We need at least one SG element, whatever they say. */
596 if (err
|| !sg_elems
)
599 /* We need an extra sg elements at head and tail. */
601 vdev
->priv
= vblk
= kmalloc(sizeof(*vblk
), GFP_KERNEL
);
608 vblk
->sg_elems
= sg_elems
;
609 mutex_init(&vblk
->config_lock
);
611 INIT_WORK(&vblk
->config_work
, virtblk_config_changed_work
);
612 vblk
->config_enable
= true;
618 /* FIXME: How many partitions? How long is a piece of string? */
619 vblk
->disk
= alloc_disk(1 << PART_BITS
);
625 /* Default queue sizing is to fill the ring. */
626 if (!virtblk_queue_depth
) {
627 virtblk_queue_depth
= vblk
->vqs
[0].vq
->num_free
;
628 /* ... but without indirect descs, we use 2 descs per req */
629 if (!virtio_has_feature(vdev
, VIRTIO_RING_F_INDIRECT_DESC
))
630 virtblk_queue_depth
/= 2;
633 memset(&vblk
->tag_set
, 0, sizeof(vblk
->tag_set
));
634 vblk
->tag_set
.ops
= &virtio_mq_ops
;
635 vblk
->tag_set
.queue_depth
= virtblk_queue_depth
;
636 vblk
->tag_set
.numa_node
= NUMA_NO_NODE
;
637 vblk
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
;
638 vblk
->tag_set
.cmd_size
=
639 sizeof(struct virtblk_req
) +
640 sizeof(struct scatterlist
) * sg_elems
;
641 vblk
->tag_set
.driver_data
= vblk
;
642 vblk
->tag_set
.nr_hw_queues
= vblk
->num_vqs
;
644 err
= blk_mq_alloc_tag_set(&vblk
->tag_set
);
648 q
= vblk
->disk
->queue
= blk_mq_init_queue(&vblk
->tag_set
);
656 virtblk_name_format("vd", index
, vblk
->disk
->disk_name
, DISK_NAME_LEN
);
658 vblk
->disk
->major
= major
;
659 vblk
->disk
->first_minor
= index_to_minor(index
);
660 vblk
->disk
->private_data
= vblk
;
661 vblk
->disk
->fops
= &virtblk_fops
;
662 vblk
->disk
->driverfs_dev
= &vdev
->dev
;
665 /* configure queue flush support */
666 virtblk_update_cache_mode(vdev
);
668 /* If disk is read-only in the host, the guest should obey */
669 if (virtio_has_feature(vdev
, VIRTIO_BLK_F_RO
))
670 set_disk_ro(vblk
->disk
, 1);
672 /* Host must always specify the capacity. */
673 virtio_cread(vdev
, struct virtio_blk_config
, capacity
, &cap
);
675 /* If capacity is too big, truncate with warning. */
676 if ((sector_t
)cap
!= cap
) {
677 dev_warn(&vdev
->dev
, "Capacity %llu too large: truncating\n",
678 (unsigned long long)cap
);
681 set_capacity(vblk
->disk
, cap
);
683 /* We can handle whatever the host told us to handle. */
684 blk_queue_max_segments(q
, vblk
->sg_elems
-2);
686 /* No need to bounce any requests */
687 blk_queue_bounce_limit(q
, BLK_BOUNCE_ANY
);
689 /* No real sector limit. */
690 blk_queue_max_hw_sectors(q
, -1U);
692 /* Host can optionally specify maximum segment size and number of
694 err
= virtio_cread_feature(vdev
, VIRTIO_BLK_F_SIZE_MAX
,
695 struct virtio_blk_config
, size_max
, &v
);
697 blk_queue_max_segment_size(q
, v
);
699 blk_queue_max_segment_size(q
, -1U);
701 /* Host can optionally specify the block size of the device */
702 err
= virtio_cread_feature(vdev
, VIRTIO_BLK_F_BLK_SIZE
,
703 struct virtio_blk_config
, blk_size
,
706 blk_queue_logical_block_size(q
, blk_size
);
708 blk_size
= queue_logical_block_size(q
);
710 /* Use topology information if available */
711 err
= virtio_cread_feature(vdev
, VIRTIO_BLK_F_TOPOLOGY
,
712 struct virtio_blk_config
, physical_block_exp
,
713 &physical_block_exp
);
714 if (!err
&& physical_block_exp
)
715 blk_queue_physical_block_size(q
,
716 blk_size
* (1 << physical_block_exp
));
718 err
= virtio_cread_feature(vdev
, VIRTIO_BLK_F_TOPOLOGY
,
719 struct virtio_blk_config
, alignment_offset
,
721 if (!err
&& alignment_offset
)
722 blk_queue_alignment_offset(q
, blk_size
* alignment_offset
);
724 err
= virtio_cread_feature(vdev
, VIRTIO_BLK_F_TOPOLOGY
,
725 struct virtio_blk_config
, min_io_size
,
727 if (!err
&& min_io_size
)
728 blk_queue_io_min(q
, blk_size
* min_io_size
);
730 err
= virtio_cread_feature(vdev
, VIRTIO_BLK_F_TOPOLOGY
,
731 struct virtio_blk_config
, opt_io_size
,
733 if (!err
&& opt_io_size
)
734 blk_queue_io_opt(q
, blk_size
* opt_io_size
);
736 add_disk(vblk
->disk
);
737 err
= device_create_file(disk_to_dev(vblk
->disk
), &dev_attr_serial
);
741 if (virtio_has_feature(vdev
, VIRTIO_BLK_F_CONFIG_WCE
))
742 err
= device_create_file(disk_to_dev(vblk
->disk
),
743 &dev_attr_cache_type_rw
);
745 err
= device_create_file(disk_to_dev(vblk
->disk
),
746 &dev_attr_cache_type_ro
);
752 del_gendisk(vblk
->disk
);
753 blk_cleanup_queue(vblk
->disk
->queue
);
755 blk_mq_free_tag_set(&vblk
->tag_set
);
757 put_disk(vblk
->disk
);
759 vdev
->config
->del_vqs(vdev
);
763 ida_simple_remove(&vd_index_ida
, index
);
768 static void virtblk_remove(struct virtio_device
*vdev
)
770 struct virtio_blk
*vblk
= vdev
->priv
;
771 int index
= vblk
->index
;
774 /* Prevent config work handler from accessing the device. */
775 mutex_lock(&vblk
->config_lock
);
776 vblk
->config_enable
= false;
777 mutex_unlock(&vblk
->config_lock
);
779 del_gendisk(vblk
->disk
);
780 blk_cleanup_queue(vblk
->disk
->queue
);
782 blk_mq_free_tag_set(&vblk
->tag_set
);
784 /* Stop all the virtqueues. */
785 vdev
->config
->reset(vdev
);
787 flush_work(&vblk
->config_work
);
789 refc
= atomic_read(&disk_to_dev(vblk
->disk
)->kobj
.kref
.refcount
);
790 put_disk(vblk
->disk
);
791 vdev
->config
->del_vqs(vdev
);
795 /* Only free device id if we don't have any users */
797 ida_simple_remove(&vd_index_ida
, index
);
800 #ifdef CONFIG_PM_SLEEP
801 static int virtblk_freeze(struct virtio_device
*vdev
)
803 struct virtio_blk
*vblk
= vdev
->priv
;
805 /* Ensure we don't receive any more interrupts */
806 vdev
->config
->reset(vdev
);
808 /* Prevent config work handler from accessing the device. */
809 mutex_lock(&vblk
->config_lock
);
810 vblk
->config_enable
= false;
811 mutex_unlock(&vblk
->config_lock
);
813 flush_work(&vblk
->config_work
);
815 blk_mq_stop_hw_queues(vblk
->disk
->queue
);
817 vdev
->config
->del_vqs(vdev
);
821 static int virtblk_restore(struct virtio_device
*vdev
)
823 struct virtio_blk
*vblk
= vdev
->priv
;
826 vblk
->config_enable
= true;
827 ret
= init_vq(vdev
->priv
);
829 blk_mq_start_stopped_hw_queues(vblk
->disk
->queue
, true);
835 static const struct virtio_device_id id_table
[] = {
836 { VIRTIO_ID_BLOCK
, VIRTIO_DEV_ANY_ID
},
840 static unsigned int features
[] = {
841 VIRTIO_BLK_F_SEG_MAX
, VIRTIO_BLK_F_SIZE_MAX
, VIRTIO_BLK_F_GEOMETRY
,
842 VIRTIO_BLK_F_RO
, VIRTIO_BLK_F_BLK_SIZE
, VIRTIO_BLK_F_SCSI
,
843 VIRTIO_BLK_F_WCE
, VIRTIO_BLK_F_TOPOLOGY
, VIRTIO_BLK_F_CONFIG_WCE
,
847 static struct virtio_driver virtio_blk
= {
848 .feature_table
= features
,
849 .feature_table_size
= ARRAY_SIZE(features
),
850 .driver
.name
= KBUILD_MODNAME
,
851 .driver
.owner
= THIS_MODULE
,
852 .id_table
= id_table
,
853 .probe
= virtblk_probe
,
854 .remove
= virtblk_remove
,
855 .config_changed
= virtblk_config_changed
,
856 #ifdef CONFIG_PM_SLEEP
857 .freeze
= virtblk_freeze
,
858 .restore
= virtblk_restore
,
862 static int __init
init(void)
866 virtblk_wq
= alloc_workqueue("virtio-blk", 0, 0);
870 major
= register_blkdev(0, "virtblk");
873 goto out_destroy_workqueue
;
876 error
= register_virtio_driver(&virtio_blk
);
878 goto out_unregister_blkdev
;
881 out_unregister_blkdev
:
882 unregister_blkdev(major
, "virtblk");
883 out_destroy_workqueue
:
884 destroy_workqueue(virtblk_wq
);
888 static void __exit
fini(void)
890 unregister_blkdev(major
, "virtblk");
891 unregister_virtio_driver(&virtio_blk
);
892 destroy_workqueue(virtblk_wq
);
897 MODULE_DEVICE_TABLE(virtio
, id_table
);
898 MODULE_DESCRIPTION("Virtio block driver");
899 MODULE_LICENSE("GPL");