Commit | Line | Data |
---|---|---|
e467cde2 RR |
1 | //#define DEBUG |
2 | #include <linux/spinlock.h> | |
3 | #include <linux/blkdev.h> | |
4 | #include <linux/hdreg.h> | |
5 | #include <linux/virtio.h> | |
6 | #include <linux/virtio_blk.h> | |
3d1266c7 JA |
7 | #include <linux/scatterlist.h> |
8 | ||
9 | #define VIRTIO_MAX_SG (3+MAX_PHYS_SEGMENTS) | |
e467cde2 RR |
10 | |
11 | static unsigned char virtblk_index = 'a'; | |
12 | struct virtio_blk | |
13 | { | |
14 | spinlock_t lock; | |
15 | ||
16 | struct virtio_device *vdev; | |
17 | struct virtqueue *vq; | |
18 | ||
19 | /* The disk structure for the kernel. */ | |
20 | struct gendisk *disk; | |
21 | ||
22 | /* Request tracking. */ | |
23 | struct list_head reqs; | |
24 | ||
25 | mempool_t *pool; | |
26 | ||
27 | /* Scatterlist: can be too big for stack. */ | |
3d1266c7 | 28 | struct scatterlist sg[VIRTIO_MAX_SG]; |
e467cde2 RR |
29 | }; |
30 | ||
31 | struct virtblk_req | |
32 | { | |
33 | struct list_head list; | |
34 | struct request *req; | |
35 | struct virtio_blk_outhdr out_hdr; | |
36 | struct virtio_blk_inhdr in_hdr; | |
37 | }; | |
38 | ||
18445c4d | 39 | static void blk_done(struct virtqueue *vq) |
e467cde2 RR |
40 | { |
41 | struct virtio_blk *vblk = vq->vdev->priv; | |
42 | struct virtblk_req *vbr; | |
43 | unsigned int len; | |
44 | unsigned long flags; | |
45 | ||
46 | spin_lock_irqsave(&vblk->lock, flags); | |
47 | while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) { | |
48 | int uptodate; | |
49 | switch (vbr->in_hdr.status) { | |
50 | case VIRTIO_BLK_S_OK: | |
51 | uptodate = 1; | |
52 | break; | |
53 | case VIRTIO_BLK_S_UNSUPP: | |
54 | uptodate = -ENOTTY; | |
55 | break; | |
56 | default: | |
57 | uptodate = 0; | |
58 | break; | |
59 | } | |
60 | ||
61 | end_dequeued_request(vbr->req, uptodate); | |
62 | list_del(&vbr->list); | |
63 | mempool_free(vbr, vblk->pool); | |
64 | } | |
65 | /* In case queue is stopped waiting for more buffers. */ | |
66 | blk_start_queue(vblk->disk->queue); | |
67 | spin_unlock_irqrestore(&vblk->lock, flags); | |
e467cde2 RR |
68 | } |
69 | ||
70 | static bool do_req(struct request_queue *q, struct virtio_blk *vblk, | |
71 | struct request *req) | |
72 | { | |
73 | unsigned long num, out, in; | |
74 | struct virtblk_req *vbr; | |
75 | ||
76 | vbr = mempool_alloc(vblk->pool, GFP_ATOMIC); | |
77 | if (!vbr) | |
78 | /* When another request finishes we'll try again. */ | |
79 | return false; | |
80 | ||
81 | vbr->req = req; | |
82 | if (blk_fs_request(vbr->req)) { | |
83 | vbr->out_hdr.type = 0; | |
84 | vbr->out_hdr.sector = vbr->req->sector; | |
85 | vbr->out_hdr.ioprio = vbr->req->ioprio; | |
86 | } else if (blk_pc_request(vbr->req)) { | |
87 | vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; | |
88 | vbr->out_hdr.sector = 0; | |
89 | vbr->out_hdr.ioprio = vbr->req->ioprio; | |
90 | } else { | |
91 | /* We don't put anything else in the queue. */ | |
92 | BUG(); | |
93 | } | |
94 | ||
95 | if (blk_barrier_rq(vbr->req)) | |
96 | vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; | |
97 | ||
3d1266c7 JA |
98 | /* This init could be done at vblk creation time */ |
99 | sg_init_table(vblk->sg, VIRTIO_MAX_SG); | |
e467cde2 RR |
100 | sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr)); |
101 | num = blk_rq_map_sg(q, vbr->req, vblk->sg+1); | |
102 | sg_set_buf(&vblk->sg[num+1], &vbr->in_hdr, sizeof(vbr->in_hdr)); | |
103 | ||
104 | if (rq_data_dir(vbr->req) == WRITE) { | |
105 | vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; | |
106 | out = 1 + num; | |
107 | in = 1; | |
108 | } else { | |
109 | vbr->out_hdr.type |= VIRTIO_BLK_T_IN; | |
110 | out = 1; | |
111 | in = 1 + num; | |
112 | } | |
113 | ||
114 | if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) { | |
115 | mempool_free(vbr, vblk->pool); | |
116 | return false; | |
117 | } | |
118 | ||
119 | list_add_tail(&vbr->list, &vblk->reqs); | |
120 | return true; | |
121 | } | |
122 | ||
123 | static void do_virtblk_request(struct request_queue *q) | |
124 | { | |
125 | struct virtio_blk *vblk = NULL; | |
126 | struct request *req; | |
127 | unsigned int issued = 0; | |
128 | ||
129 | while ((req = elv_next_request(q)) != NULL) { | |
130 | vblk = req->rq_disk->private_data; | |
131 | BUG_ON(req->nr_phys_segments > ARRAY_SIZE(vblk->sg)); | |
132 | ||
133 | /* If this request fails, stop queue and wait for something to | |
134 | finish to restart it. */ | |
135 | if (!do_req(q, vblk, req)) { | |
136 | blk_stop_queue(q); | |
137 | break; | |
138 | } | |
139 | blkdev_dequeue_request(req); | |
140 | issued++; | |
141 | } | |
142 | ||
143 | if (issued) | |
144 | vblk->vq->vq_ops->kick(vblk->vq); | |
145 | } | |
146 | ||
147 | static int virtblk_ioctl(struct inode *inode, struct file *filp, | |
148 | unsigned cmd, unsigned long data) | |
149 | { | |
150 | return scsi_cmd_ioctl(filp, inode->i_bdev->bd_disk->queue, | |
151 | inode->i_bdev->bd_disk, cmd, | |
152 | (void __user *)data); | |
153 | } | |
154 | ||
155 | static struct block_device_operations virtblk_fops = { | |
156 | .ioctl = virtblk_ioctl, | |
157 | .owner = THIS_MODULE, | |
158 | }; | |
159 | ||
160 | static int virtblk_probe(struct virtio_device *vdev) | |
161 | { | |
162 | struct virtio_blk *vblk; | |
163 | int err, major; | |
e467cde2 RR |
164 | u64 cap; |
165 | u32 v; | |
166 | ||
167 | vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); | |
168 | if (!vblk) { | |
169 | err = -ENOMEM; | |
170 | goto out; | |
171 | } | |
172 | ||
173 | INIT_LIST_HEAD(&vblk->reqs); | |
174 | spin_lock_init(&vblk->lock); | |
175 | vblk->vdev = vdev; | |
176 | ||
177 | /* We expect one virtqueue, for output. */ | |
a586d4f6 | 178 | vblk->vq = vdev->config->find_vq(vdev, 0, blk_done); |
e467cde2 RR |
179 | if (IS_ERR(vblk->vq)) { |
180 | err = PTR_ERR(vblk->vq); | |
181 | goto out_free_vblk; | |
182 | } | |
183 | ||
184 | vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req)); | |
185 | if (!vblk->pool) { | |
186 | err = -ENOMEM; | |
187 | goto out_free_vq; | |
188 | } | |
189 | ||
190 | major = register_blkdev(0, "virtblk"); | |
191 | if (major < 0) { | |
192 | err = major; | |
193 | goto out_mempool; | |
194 | } | |
195 | ||
196 | /* FIXME: How many partitions? How long is a piece of string? */ | |
197 | vblk->disk = alloc_disk(1 << 4); | |
198 | if (!vblk->disk) { | |
199 | err = -ENOMEM; | |
200 | goto out_unregister_blkdev; | |
201 | } | |
202 | ||
203 | vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock); | |
204 | if (!vblk->disk->queue) { | |
205 | err = -ENOMEM; | |
206 | goto out_put_disk; | |
207 | } | |
208 | ||
209 | sprintf(vblk->disk->disk_name, "vd%c", virtblk_index++); | |
210 | vblk->disk->major = major; | |
211 | vblk->disk->first_minor = 0; | |
212 | vblk->disk->private_data = vblk; | |
213 | vblk->disk->fops = &virtblk_fops; | |
214 | ||
215 | /* If barriers are supported, tell block layer that queue is ordered */ | |
a586d4f6 | 216 | if (vdev->config->feature(vdev, VIRTIO_BLK_F_BARRIER)) |
e467cde2 RR |
217 | blk_queue_ordered(vblk->disk->queue, QUEUE_ORDERED_TAG, NULL); |
218 | ||
a586d4f6 RR |
219 | /* Host must always specify the capacity. */ |
220 | __virtio_config_val(vdev, offsetof(struct virtio_blk_config, capacity), | |
221 | &cap); | |
e467cde2 RR |
222 | |
223 | /* If capacity is too big, truncate with warning. */ | |
224 | if ((sector_t)cap != cap) { | |
225 | dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n", | |
226 | (unsigned long long)cap); | |
227 | cap = (sector_t)-1; | |
228 | } | |
229 | set_capacity(vblk->disk, cap); | |
230 | ||
a586d4f6 RR |
231 | /* Host can optionally specify maximum segment size and number of |
232 | * segments. */ | |
233 | err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX, | |
234 | offsetof(struct virtio_blk_config, size_max), | |
235 | &v); | |
e467cde2 RR |
236 | if (!err) |
237 | blk_queue_max_segment_size(vblk->disk->queue, v); | |
e467cde2 | 238 | |
a586d4f6 RR |
239 | err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX, |
240 | offsetof(struct virtio_blk_config, seg_max), | |
241 | &v); | |
e467cde2 RR |
242 | if (!err) |
243 | blk_queue_max_hw_segments(vblk->disk->queue, v); | |
e467cde2 RR |
244 | |
245 | add_disk(vblk->disk); | |
246 | return 0; | |
247 | ||
248 | out_put_disk: | |
249 | put_disk(vblk->disk); | |
250 | out_unregister_blkdev: | |
251 | unregister_blkdev(major, "virtblk"); | |
252 | out_mempool: | |
253 | mempool_destroy(vblk->pool); | |
254 | out_free_vq: | |
255 | vdev->config->del_vq(vblk->vq); | |
256 | out_free_vblk: | |
257 | kfree(vblk); | |
258 | out: | |
259 | return err; | |
260 | } | |
261 | ||
262 | static void virtblk_remove(struct virtio_device *vdev) | |
263 | { | |
264 | struct virtio_blk *vblk = vdev->priv; | |
265 | int major = vblk->disk->major; | |
266 | ||
6e5aa7ef | 267 | /* Nothing should be pending. */ |
e467cde2 | 268 | BUG_ON(!list_empty(&vblk->reqs)); |
6e5aa7ef RR |
269 | |
270 | /* Stop all the virtqueues. */ | |
271 | vdev->config->reset(vdev); | |
272 | ||
e467cde2 RR |
273 | blk_cleanup_queue(vblk->disk->queue); |
274 | put_disk(vblk->disk); | |
275 | unregister_blkdev(major, "virtblk"); | |
276 | mempool_destroy(vblk->pool); | |
74b2553f | 277 | vdev->config->del_vq(vblk->vq); |
e467cde2 RR |
278 | kfree(vblk); |
279 | } | |
280 | ||
281 | static struct virtio_device_id id_table[] = { | |
282 | { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, | |
283 | { 0 }, | |
284 | }; | |
285 | ||
286 | static struct virtio_driver virtio_blk = { | |
287 | .driver.name = KBUILD_MODNAME, | |
288 | .driver.owner = THIS_MODULE, | |
289 | .id_table = id_table, | |
290 | .probe = virtblk_probe, | |
291 | .remove = __devexit_p(virtblk_remove), | |
292 | }; | |
293 | ||
294 | static int __init init(void) | |
295 | { | |
296 | return register_virtio_driver(&virtio_blk); | |
297 | } | |
298 | ||
299 | static void __exit fini(void) | |
300 | { | |
301 | unregister_virtio_driver(&virtio_blk); | |
302 | } | |
303 | module_init(init); | |
304 | module_exit(fini); | |
305 | ||
306 | MODULE_DEVICE_TABLE(virtio, id_table); | |
307 | MODULE_DESCRIPTION("Virtio block driver"); | |
308 | MODULE_LICENSE("GPL"); |