Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
[deliverable/linux.git] / drivers / block / virtio_blk.c
1 //#define DEBUG
2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/blkdev.h>
5 #include <linux/hdreg.h>
6 #include <linux/module.h>
7 #include <linux/mutex.h>
8 #include <linux/virtio.h>
9 #include <linux/virtio_blk.h>
10 #include <linux/scatterlist.h>
11 #include <linux/string_helpers.h>
12 #include <scsi/scsi_cmnd.h>
13 #include <linux/idr.h>
14 #include <linux/blk-mq.h>
15 #include <linux/numa.h>
16
17 #define PART_BITS 4
18 #define VQ_NAME_LEN 16
19
20 static int major;
21 static DEFINE_IDA(vd_index_ida);
22
23 static struct workqueue_struct *virtblk_wq;
24
25 struct virtio_blk_vq {
26 struct virtqueue *vq;
27 spinlock_t lock;
28 char name[VQ_NAME_LEN];
29 } ____cacheline_aligned_in_smp;
30
31 struct virtio_blk
32 {
33 struct virtio_device *vdev;
34
35 /* The disk structure for the kernel. */
36 struct gendisk *disk;
37
38 /* Block layer tags. */
39 struct blk_mq_tag_set tag_set;
40
41 /* Process context for config space updates */
42 struct work_struct config_work;
43
44 /* Lock for config space updates */
45 struct mutex config_lock;
46
47 /* enable config space updates */
48 bool config_enable;
49
50 /* What host tells us, plus 2 for header & tailer. */
51 unsigned int sg_elems;
52
53 /* Ida index - used to track minor number allocations. */
54 int index;
55
56 /* num of vqs */
57 int num_vqs;
58 struct virtio_blk_vq *vqs;
59 };
60
61 struct virtblk_req
62 {
63 struct request *req;
64 struct virtio_blk_outhdr out_hdr;
65 struct virtio_scsi_inhdr in_hdr;
66 u8 status;
67 struct scatterlist sg[];
68 };
69
70 static inline int virtblk_result(struct virtblk_req *vbr)
71 {
72 switch (vbr->status) {
73 case VIRTIO_BLK_S_OK:
74 return 0;
75 case VIRTIO_BLK_S_UNSUPP:
76 return -ENOTTY;
77 default:
78 return -EIO;
79 }
80 }
81
82 static int __virtblk_add_req(struct virtqueue *vq,
83 struct virtblk_req *vbr,
84 struct scatterlist *data_sg,
85 bool have_data)
86 {
87 struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
88 unsigned int num_out = 0, num_in = 0;
89 int type = vbr->out_hdr.type & ~VIRTIO_BLK_T_OUT;
90
91 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
92 sgs[num_out++] = &hdr;
93
94 /*
95 * If this is a packet command we need a couple of additional headers.
96 * Behind the normal outhdr we put a segment with the scsi command
97 * block, and before the normal inhdr we put the sense data and the
98 * inhdr with additional status information.
99 */
100 if (type == VIRTIO_BLK_T_SCSI_CMD) {
101 sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len);
102 sgs[num_out++] = &cmd;
103 }
104
105 if (have_data) {
106 if (vbr->out_hdr.type & VIRTIO_BLK_T_OUT)
107 sgs[num_out++] = data_sg;
108 else
109 sgs[num_out + num_in++] = data_sg;
110 }
111
112 if (type == VIRTIO_BLK_T_SCSI_CMD) {
113 sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
114 sgs[num_out + num_in++] = &sense;
115 sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
116 sgs[num_out + num_in++] = &inhdr;
117 }
118
119 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
120 sgs[num_out + num_in++] = &status;
121
122 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
123 }
124
125 static inline void virtblk_request_done(struct request *req)
126 {
127 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
128 int error = virtblk_result(vbr);
129
130 if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
131 req->resid_len = vbr->in_hdr.residual;
132 req->sense_len = vbr->in_hdr.sense_len;
133 req->errors = vbr->in_hdr.errors;
134 } else if (req->cmd_type == REQ_TYPE_SPECIAL) {
135 req->errors = (error != 0);
136 }
137
138 blk_mq_end_io(req, error);
139 }
140
141 static void virtblk_done(struct virtqueue *vq)
142 {
143 struct virtio_blk *vblk = vq->vdev->priv;
144 bool req_done = false;
145 int qid = vq->index;
146 struct virtblk_req *vbr;
147 unsigned long flags;
148 unsigned int len;
149
150 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
151 do {
152 virtqueue_disable_cb(vq);
153 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
154 blk_mq_complete_request(vbr->req);
155 req_done = true;
156 }
157 if (unlikely(virtqueue_is_broken(vq)))
158 break;
159 } while (!virtqueue_enable_cb(vq));
160
161 /* In case queue is stopped waiting for more buffers. */
162 if (req_done)
163 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
164 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
165 }
166
167 static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
168 {
169 struct virtio_blk *vblk = hctx->queue->queuedata;
170 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
171 unsigned long flags;
172 unsigned int num;
173 int qid = hctx->queue_num;
174 const bool last = (req->cmd_flags & REQ_END) != 0;
175 int err;
176 bool notify = false;
177
178 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
179
180 vbr->req = req;
181 if (req->cmd_flags & REQ_FLUSH) {
182 vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
183 vbr->out_hdr.sector = 0;
184 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
185 } else {
186 switch (req->cmd_type) {
187 case REQ_TYPE_FS:
188 vbr->out_hdr.type = 0;
189 vbr->out_hdr.sector = blk_rq_pos(vbr->req);
190 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
191 break;
192 case REQ_TYPE_BLOCK_PC:
193 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
194 vbr->out_hdr.sector = 0;
195 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
196 break;
197 case REQ_TYPE_SPECIAL:
198 vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
199 vbr->out_hdr.sector = 0;
200 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
201 break;
202 default:
203 /* We don't put anything else in the queue. */
204 BUG();
205 }
206 }
207
208 num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
209 if (num) {
210 if (rq_data_dir(vbr->req) == WRITE)
211 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
212 else
213 vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
214 }
215
216 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
217 err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
218 if (err) {
219 virtqueue_kick(vblk->vqs[qid].vq);
220 blk_mq_stop_hw_queue(hctx);
221 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
222 /* Out of mem doesn't actually happen, since we fall back
223 * to direct descriptors */
224 if (err == -ENOMEM || err == -ENOSPC)
225 return BLK_MQ_RQ_QUEUE_BUSY;
226 return BLK_MQ_RQ_QUEUE_ERROR;
227 }
228
229 if (last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
230 notify = true;
231 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
232
233 if (notify)
234 virtqueue_notify(vblk->vqs[qid].vq);
235 return BLK_MQ_RQ_QUEUE_OK;
236 }
237
238 /* return id (s/n) string for *disk to *id_str
239 */
240 static int virtblk_get_id(struct gendisk *disk, char *id_str)
241 {
242 struct virtio_blk *vblk = disk->private_data;
243 struct request *req;
244 struct bio *bio;
245 int err;
246
247 bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
248 GFP_KERNEL);
249 if (IS_ERR(bio))
250 return PTR_ERR(bio);
251
252 req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
253 if (IS_ERR(req)) {
254 bio_put(bio);
255 return PTR_ERR(req);
256 }
257
258 req->cmd_type = REQ_TYPE_SPECIAL;
259 err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
260 blk_put_request(req);
261
262 return err;
263 }
264
265 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
266 unsigned int cmd, unsigned long data)
267 {
268 struct gendisk *disk = bdev->bd_disk;
269 struct virtio_blk *vblk = disk->private_data;
270
271 /*
272 * Only allow the generic SCSI ioctls if the host can support it.
273 */
274 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
275 return -ENOTTY;
276
277 return scsi_cmd_blk_ioctl(bdev, mode, cmd,
278 (void __user *)data);
279 }
280
281 /* We provide getgeo only to please some old bootloader/partitioning tools */
282 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
283 {
284 struct virtio_blk *vblk = bd->bd_disk->private_data;
285
286 /* see if the host passed in geometry config */
287 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
288 virtio_cread(vblk->vdev, struct virtio_blk_config,
289 geometry.cylinders, &geo->cylinders);
290 virtio_cread(vblk->vdev, struct virtio_blk_config,
291 geometry.heads, &geo->heads);
292 virtio_cread(vblk->vdev, struct virtio_blk_config,
293 geometry.sectors, &geo->sectors);
294 } else {
295 /* some standard values, similar to sd */
296 geo->heads = 1 << 6;
297 geo->sectors = 1 << 5;
298 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
299 }
300 return 0;
301 }
302
303 static const struct block_device_operations virtblk_fops = {
304 .ioctl = virtblk_ioctl,
305 .owner = THIS_MODULE,
306 .getgeo = virtblk_getgeo,
307 };
308
309 static int index_to_minor(int index)
310 {
311 return index << PART_BITS;
312 }
313
314 static int minor_to_index(int minor)
315 {
316 return minor >> PART_BITS;
317 }
318
319 static ssize_t virtblk_serial_show(struct device *dev,
320 struct device_attribute *attr, char *buf)
321 {
322 struct gendisk *disk = dev_to_disk(dev);
323 int err;
324
325 /* sysfs gives us a PAGE_SIZE buffer */
326 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
327
328 buf[VIRTIO_BLK_ID_BYTES] = '\0';
329 err = virtblk_get_id(disk, buf);
330 if (!err)
331 return strlen(buf);
332
333 if (err == -EIO) /* Unsupported? Make it empty. */
334 return 0;
335
336 return err;
337 }
338 DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
339
340 static void virtblk_config_changed_work(struct work_struct *work)
341 {
342 struct virtio_blk *vblk =
343 container_of(work, struct virtio_blk, config_work);
344 struct virtio_device *vdev = vblk->vdev;
345 struct request_queue *q = vblk->disk->queue;
346 char cap_str_2[10], cap_str_10[10];
347 char *envp[] = { "RESIZE=1", NULL };
348 u64 capacity, size;
349
350 mutex_lock(&vblk->config_lock);
351 if (!vblk->config_enable)
352 goto done;
353
354 /* Host must always specify the capacity. */
355 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
356
357 /* If capacity is too big, truncate with warning. */
358 if ((sector_t)capacity != capacity) {
359 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
360 (unsigned long long)capacity);
361 capacity = (sector_t)-1;
362 }
363
364 size = capacity * queue_logical_block_size(q);
365 string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
366 string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
367
368 dev_notice(&vdev->dev,
369 "new size: %llu %d-byte logical blocks (%s/%s)\n",
370 (unsigned long long)capacity,
371 queue_logical_block_size(q),
372 cap_str_10, cap_str_2);
373
374 set_capacity(vblk->disk, capacity);
375 revalidate_disk(vblk->disk);
376 kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
377 done:
378 mutex_unlock(&vblk->config_lock);
379 }
380
381 static void virtblk_config_changed(struct virtio_device *vdev)
382 {
383 struct virtio_blk *vblk = vdev->priv;
384
385 queue_work(virtblk_wq, &vblk->config_work);
386 }
387
388 static int init_vq(struct virtio_blk *vblk)
389 {
390 int err = 0;
391 int i;
392 vq_callback_t **callbacks;
393 const char **names;
394 struct virtqueue **vqs;
395 unsigned short num_vqs;
396 struct virtio_device *vdev = vblk->vdev;
397
398 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
399 struct virtio_blk_config, num_queues,
400 &num_vqs);
401 if (err)
402 num_vqs = 1;
403
404 vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL);
405 if (!vblk->vqs) {
406 err = -ENOMEM;
407 goto out;
408 }
409
410 names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL);
411 if (!names)
412 goto err_names;
413
414 callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL);
415 if (!callbacks)
416 goto err_callbacks;
417
418 vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL);
419 if (!vqs)
420 goto err_vqs;
421
422 for (i = 0; i < num_vqs; i++) {
423 callbacks[i] = virtblk_done;
424 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
425 names[i] = vblk->vqs[i].name;
426 }
427
428 /* Discover virtqueues and write information to configuration. */
429 err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
430 if (err)
431 goto err_find_vqs;
432
433 for (i = 0; i < num_vqs; i++) {
434 spin_lock_init(&vblk->vqs[i].lock);
435 vblk->vqs[i].vq = vqs[i];
436 }
437 vblk->num_vqs = num_vqs;
438
439 err_find_vqs:
440 kfree(vqs);
441 err_vqs:
442 kfree(callbacks);
443 err_callbacks:
444 kfree(names);
445 err_names:
446 if (err)
447 kfree(vblk->vqs);
448 out:
449 return err;
450 }
451
452 /*
453 * Legacy naming scheme used for virtio devices. We are stuck with it for
454 * virtio blk but don't ever use it for any new driver.
455 */
456 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
457 {
458 const int base = 'z' - 'a' + 1;
459 char *begin = buf + strlen(prefix);
460 char *end = buf + buflen;
461 char *p;
462 int unit;
463
464 p = end - 1;
465 *p = '\0';
466 unit = base;
467 do {
468 if (p == begin)
469 return -EINVAL;
470 *--p = 'a' + (index % unit);
471 index = (index / unit) - 1;
472 } while (index >= 0);
473
474 memmove(begin, p, end - p);
475 memcpy(buf, prefix, strlen(prefix));
476
477 return 0;
478 }
479
480 static int virtblk_get_cache_mode(struct virtio_device *vdev)
481 {
482 u8 writeback;
483 int err;
484
485 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
486 struct virtio_blk_config, wce,
487 &writeback);
488 if (err)
489 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE);
490
491 return writeback;
492 }
493
494 static void virtblk_update_cache_mode(struct virtio_device *vdev)
495 {
496 u8 writeback = virtblk_get_cache_mode(vdev);
497 struct virtio_blk *vblk = vdev->priv;
498
499 if (writeback)
500 blk_queue_flush(vblk->disk->queue, REQ_FLUSH);
501 else
502 blk_queue_flush(vblk->disk->queue, 0);
503
504 revalidate_disk(vblk->disk);
505 }
506
507 static const char *const virtblk_cache_types[] = {
508 "write through", "write back"
509 };
510
511 static ssize_t
512 virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
513 const char *buf, size_t count)
514 {
515 struct gendisk *disk = dev_to_disk(dev);
516 struct virtio_blk *vblk = disk->private_data;
517 struct virtio_device *vdev = vblk->vdev;
518 int i;
519
520 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
521 for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
522 if (sysfs_streq(buf, virtblk_cache_types[i]))
523 break;
524
525 if (i < 0)
526 return -EINVAL;
527
528 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
529 virtblk_update_cache_mode(vdev);
530 return count;
531 }
532
533 static ssize_t
534 virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
535 char *buf)
536 {
537 struct gendisk *disk = dev_to_disk(dev);
538 struct virtio_blk *vblk = disk->private_data;
539 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
540
541 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
542 return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
543 }
544
545 static const struct device_attribute dev_attr_cache_type_ro =
546 __ATTR(cache_type, S_IRUGO,
547 virtblk_cache_type_show, NULL);
548 static const struct device_attribute dev_attr_cache_type_rw =
549 __ATTR(cache_type, S_IRUGO|S_IWUSR,
550 virtblk_cache_type_show, virtblk_cache_type_store);
551
552 static int virtblk_init_request(void *data, struct request *rq,
553 unsigned int hctx_idx, unsigned int request_idx,
554 unsigned int numa_node)
555 {
556 struct virtio_blk *vblk = data;
557 struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
558
559 sg_init_table(vbr->sg, vblk->sg_elems);
560 return 0;
561 }
562
563 static struct blk_mq_ops virtio_mq_ops = {
564 .queue_rq = virtio_queue_rq,
565 .map_queue = blk_mq_map_queue,
566 .complete = virtblk_request_done,
567 .init_request = virtblk_init_request,
568 };
569
570 static unsigned int virtblk_queue_depth;
571 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
572
573 static int virtblk_probe(struct virtio_device *vdev)
574 {
575 struct virtio_blk *vblk;
576 struct request_queue *q;
577 int err, index;
578
579 u64 cap;
580 u32 v, blk_size, sg_elems, opt_io_size;
581 u16 min_io_size;
582 u8 physical_block_exp, alignment_offset;
583
584 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
585 GFP_KERNEL);
586 if (err < 0)
587 goto out;
588 index = err;
589
590 /* We need to know how many segments before we allocate. */
591 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
592 struct virtio_blk_config, seg_max,
593 &sg_elems);
594
595 /* We need at least one SG element, whatever they say. */
596 if (err || !sg_elems)
597 sg_elems = 1;
598
599 /* We need an extra sg elements at head and tail. */
600 sg_elems += 2;
601 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
602 if (!vblk) {
603 err = -ENOMEM;
604 goto out_free_index;
605 }
606
607 vblk->vdev = vdev;
608 vblk->sg_elems = sg_elems;
609 mutex_init(&vblk->config_lock);
610
611 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
612 vblk->config_enable = true;
613
614 err = init_vq(vblk);
615 if (err)
616 goto out_free_vblk;
617
618 /* FIXME: How many partitions? How long is a piece of string? */
619 vblk->disk = alloc_disk(1 << PART_BITS);
620 if (!vblk->disk) {
621 err = -ENOMEM;
622 goto out_free_vq;
623 }
624
625 /* Default queue sizing is to fill the ring. */
626 if (!virtblk_queue_depth) {
627 virtblk_queue_depth = vblk->vqs[0].vq->num_free;
628 /* ... but without indirect descs, we use 2 descs per req */
629 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
630 virtblk_queue_depth /= 2;
631 }
632
633 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
634 vblk->tag_set.ops = &virtio_mq_ops;
635 vblk->tag_set.queue_depth = virtblk_queue_depth;
636 vblk->tag_set.numa_node = NUMA_NO_NODE;
637 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
638 vblk->tag_set.cmd_size =
639 sizeof(struct virtblk_req) +
640 sizeof(struct scatterlist) * sg_elems;
641 vblk->tag_set.driver_data = vblk;
642 vblk->tag_set.nr_hw_queues = vblk->num_vqs;
643
644 err = blk_mq_alloc_tag_set(&vblk->tag_set);
645 if (err)
646 goto out_put_disk;
647
648 q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
649 if (!q) {
650 err = -ENOMEM;
651 goto out_free_tags;
652 }
653
654 q->queuedata = vblk;
655
656 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
657
658 vblk->disk->major = major;
659 vblk->disk->first_minor = index_to_minor(index);
660 vblk->disk->private_data = vblk;
661 vblk->disk->fops = &virtblk_fops;
662 vblk->disk->driverfs_dev = &vdev->dev;
663 vblk->index = index;
664
665 /* configure queue flush support */
666 virtblk_update_cache_mode(vdev);
667
668 /* If disk is read-only in the host, the guest should obey */
669 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
670 set_disk_ro(vblk->disk, 1);
671
672 /* Host must always specify the capacity. */
673 virtio_cread(vdev, struct virtio_blk_config, capacity, &cap);
674
675 /* If capacity is too big, truncate with warning. */
676 if ((sector_t)cap != cap) {
677 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
678 (unsigned long long)cap);
679 cap = (sector_t)-1;
680 }
681 set_capacity(vblk->disk, cap);
682
683 /* We can handle whatever the host told us to handle. */
684 blk_queue_max_segments(q, vblk->sg_elems-2);
685
686 /* No need to bounce any requests */
687 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
688
689 /* No real sector limit. */
690 blk_queue_max_hw_sectors(q, -1U);
691
692 /* Host can optionally specify maximum segment size and number of
693 * segments. */
694 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
695 struct virtio_blk_config, size_max, &v);
696 if (!err)
697 blk_queue_max_segment_size(q, v);
698 else
699 blk_queue_max_segment_size(q, -1U);
700
701 /* Host can optionally specify the block size of the device */
702 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
703 struct virtio_blk_config, blk_size,
704 &blk_size);
705 if (!err)
706 blk_queue_logical_block_size(q, blk_size);
707 else
708 blk_size = queue_logical_block_size(q);
709
710 /* Use topology information if available */
711 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
712 struct virtio_blk_config, physical_block_exp,
713 &physical_block_exp);
714 if (!err && physical_block_exp)
715 blk_queue_physical_block_size(q,
716 blk_size * (1 << physical_block_exp));
717
718 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
719 struct virtio_blk_config, alignment_offset,
720 &alignment_offset);
721 if (!err && alignment_offset)
722 blk_queue_alignment_offset(q, blk_size * alignment_offset);
723
724 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
725 struct virtio_blk_config, min_io_size,
726 &min_io_size);
727 if (!err && min_io_size)
728 blk_queue_io_min(q, blk_size * min_io_size);
729
730 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
731 struct virtio_blk_config, opt_io_size,
732 &opt_io_size);
733 if (!err && opt_io_size)
734 blk_queue_io_opt(q, blk_size * opt_io_size);
735
736 add_disk(vblk->disk);
737 err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
738 if (err)
739 goto out_del_disk;
740
741 if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
742 err = device_create_file(disk_to_dev(vblk->disk),
743 &dev_attr_cache_type_rw);
744 else
745 err = device_create_file(disk_to_dev(vblk->disk),
746 &dev_attr_cache_type_ro);
747 if (err)
748 goto out_del_disk;
749 return 0;
750
751 out_del_disk:
752 del_gendisk(vblk->disk);
753 blk_cleanup_queue(vblk->disk->queue);
754 out_free_tags:
755 blk_mq_free_tag_set(&vblk->tag_set);
756 out_put_disk:
757 put_disk(vblk->disk);
758 out_free_vq:
759 vdev->config->del_vqs(vdev);
760 out_free_vblk:
761 kfree(vblk);
762 out_free_index:
763 ida_simple_remove(&vd_index_ida, index);
764 out:
765 return err;
766 }
767
768 static void virtblk_remove(struct virtio_device *vdev)
769 {
770 struct virtio_blk *vblk = vdev->priv;
771 int index = vblk->index;
772 int refc;
773
774 /* Prevent config work handler from accessing the device. */
775 mutex_lock(&vblk->config_lock);
776 vblk->config_enable = false;
777 mutex_unlock(&vblk->config_lock);
778
779 del_gendisk(vblk->disk);
780 blk_cleanup_queue(vblk->disk->queue);
781
782 blk_mq_free_tag_set(&vblk->tag_set);
783
784 /* Stop all the virtqueues. */
785 vdev->config->reset(vdev);
786
787 flush_work(&vblk->config_work);
788
789 refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
790 put_disk(vblk->disk);
791 vdev->config->del_vqs(vdev);
792 kfree(vblk->vqs);
793 kfree(vblk);
794
795 /* Only free device id if we don't have any users */
796 if (refc == 1)
797 ida_simple_remove(&vd_index_ida, index);
798 }
799
800 #ifdef CONFIG_PM_SLEEP
801 static int virtblk_freeze(struct virtio_device *vdev)
802 {
803 struct virtio_blk *vblk = vdev->priv;
804
805 /* Ensure we don't receive any more interrupts */
806 vdev->config->reset(vdev);
807
808 /* Prevent config work handler from accessing the device. */
809 mutex_lock(&vblk->config_lock);
810 vblk->config_enable = false;
811 mutex_unlock(&vblk->config_lock);
812
813 flush_work(&vblk->config_work);
814
815 blk_mq_stop_hw_queues(vblk->disk->queue);
816
817 vdev->config->del_vqs(vdev);
818 return 0;
819 }
820
821 static int virtblk_restore(struct virtio_device *vdev)
822 {
823 struct virtio_blk *vblk = vdev->priv;
824 int ret;
825
826 vblk->config_enable = true;
827 ret = init_vq(vdev->priv);
828 if (!ret)
829 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
830
831 return ret;
832 }
833 #endif
834
835 static const struct virtio_device_id id_table[] = {
836 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
837 { 0 },
838 };
839
840 static unsigned int features[] = {
841 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
842 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
843 VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
844 VIRTIO_BLK_F_MQ,
845 };
846
847 static struct virtio_driver virtio_blk = {
848 .feature_table = features,
849 .feature_table_size = ARRAY_SIZE(features),
850 .driver.name = KBUILD_MODNAME,
851 .driver.owner = THIS_MODULE,
852 .id_table = id_table,
853 .probe = virtblk_probe,
854 .remove = virtblk_remove,
855 .config_changed = virtblk_config_changed,
856 #ifdef CONFIG_PM_SLEEP
857 .freeze = virtblk_freeze,
858 .restore = virtblk_restore,
859 #endif
860 };
861
862 static int __init init(void)
863 {
864 int error;
865
866 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
867 if (!virtblk_wq)
868 return -ENOMEM;
869
870 major = register_blkdev(0, "virtblk");
871 if (major < 0) {
872 error = major;
873 goto out_destroy_workqueue;
874 }
875
876 error = register_virtio_driver(&virtio_blk);
877 if (error)
878 goto out_unregister_blkdev;
879 return 0;
880
881 out_unregister_blkdev:
882 unregister_blkdev(major, "virtblk");
883 out_destroy_workqueue:
884 destroy_workqueue(virtblk_wq);
885 return error;
886 }
887
888 static void __exit fini(void)
889 {
890 unregister_blkdev(major, "virtblk");
891 unregister_virtio_driver(&virtio_blk);
892 destroy_workqueue(virtblk_wq);
893 }
894 module_init(init);
895 module_exit(fini);
896
897 MODULE_DEVICE_TABLE(virtio, id_table);
898 MODULE_DESCRIPTION("Virtio block driver");
899 MODULE_LICENSE("GPL");
This page took 0.050508 seconds and 5 git commands to generate.