Commit | Line | Data |
---|---|---|
edfd52e6 PM |
1 | /* |
2 | * Virtio memory mapped device driver | |
3 | * | |
4 | * Copyright 2011, ARM Ltd. | |
5 | * | |
6 | * This module allows virtio devices to be used over a virtual, memory mapped | |
7 | * platform device. | |
8 | * | |
9 | * Registers layout (all 32-bit wide): | |
10 | * | |
11 | * offset d. name description | |
12 | * ------ -- ---------------- ----------------- | |
13 | * | |
14 | * 0x000 R MagicValue Magic value "virt" | |
15 | * 0x004 R Version Device version (current max. 1) | |
16 | * 0x008 R DeviceID Virtio device ID | |
17 | * 0x00c R VendorID Virtio vendor ID | |
18 | * | |
19 | * 0x010 R HostFeatures Features supported by the host | |
20 | * 0x014 W HostFeaturesSel Set of host features to access via HostFeatures | |
21 | * | |
22 | * 0x020 W GuestFeatures Features activated by the guest | |
23 | * 0x024 W GuestFeaturesSel Set of activated features to set via GuestFeatures | |
24 | * 0x028 W GuestPageSize Size of guest's memory page in bytes | |
25 | * | |
26 | * 0x030 W QueueSel Queue selector | |
27 | * 0x034 R QueueNumMax Maximum size of the currently selected queue | |
28 | * 0x038 W QueueNum Queue size for the currently selected queue | |
29 | * 0x03c W QueueAlign Used Ring alignment for the current queue | |
30 | * 0x040 RW QueuePFN PFN for the currently selected queue | |
31 | * | |
32 | * 0x050 W QueueNotify Queue notifier | |
33 | * 0x060 R InterruptStatus Interrupt status register | |
34 | * 0x060 W InterruptACK Interrupt acknowledge register | |
35 | * 0x070 RW Status Device status register | |
36 | * | |
37 | * 0x100+ RW Device-specific configuration space | |
38 | * | |
39 | * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007 | |
40 | * | |
41 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
42 | * See the COPYING file in the top-level directory. | |
43 | */ | |
44 | ||
45 | #include <linux/highmem.h> | |
46 | #include <linux/interrupt.h> | |
47 | #include <linux/io.h> | |
48 | #include <linux/list.h> | |
49 | #include <linux/module.h> | |
50 | #include <linux/platform_device.h> | |
51 | #include <linux/slab.h> | |
52 | #include <linux/spinlock.h> | |
53 | #include <linux/virtio.h> | |
54 | #include <linux/virtio_config.h> | |
55 | #include <linux/virtio_mmio.h> | |
56 | #include <linux/virtio_ring.h> | |
57 | ||
58 | ||
59 | ||
60 | /* The alignment to use between consumer and producer parts of vring. | |
61 | * Currently hardcoded to the page size. */ | |
62 | #define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE | |
63 | ||
64 | ||
65 | ||
66 | #define to_virtio_mmio_device(_plat_dev) \ | |
67 | container_of(_plat_dev, struct virtio_mmio_device, vdev) | |
68 | ||
69 | struct virtio_mmio_device { | |
70 | struct virtio_device vdev; | |
71 | struct platform_device *pdev; | |
72 | ||
73 | void __iomem *base; | |
74 | unsigned long version; | |
75 | ||
76 | /* a list of queues so we can dispatch IRQs */ | |
77 | spinlock_t lock; | |
78 | struct list_head virtqueues; | |
79 | }; | |
80 | ||
81 | struct virtio_mmio_vq_info { | |
82 | /* the actual virtqueue */ | |
83 | struct virtqueue *vq; | |
84 | ||
85 | /* the number of entries in the queue */ | |
86 | unsigned int num; | |
87 | ||
88 | /* the index of the queue */ | |
89 | int queue_index; | |
90 | ||
91 | /* the virtual address of the ring queue */ | |
92 | void *queue; | |
93 | ||
94 | /* the list node for the virtqueues list */ | |
95 | struct list_head node; | |
96 | }; | |
97 | ||
98 | ||
99 | ||
100 | /* Configuration interface */ | |
101 | ||
102 | static u32 vm_get_features(struct virtio_device *vdev) | |
103 | { | |
104 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | |
105 | ||
106 | /* TODO: Features > 32 bits */ | |
107 | writel(0, vm_dev->base + VIRTIO_MMIO_HOST_FEATURES_SEL); | |
108 | ||
109 | return readl(vm_dev->base + VIRTIO_MMIO_HOST_FEATURES); | |
110 | } | |
111 | ||
112 | static void vm_finalize_features(struct virtio_device *vdev) | |
113 | { | |
114 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | |
115 | int i; | |
116 | ||
117 | /* Give virtio_ring a chance to accept features. */ | |
118 | vring_transport_features(vdev); | |
119 | ||
120 | for (i = 0; i < ARRAY_SIZE(vdev->features); i++) { | |
fe1a7fe2 | 121 | writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL); |
edfd52e6 PM |
122 | writel(vdev->features[i], |
123 | vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES); | |
124 | } | |
125 | } | |
126 | ||
127 | static void vm_get(struct virtio_device *vdev, unsigned offset, | |
128 | void *buf, unsigned len) | |
129 | { | |
130 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | |
131 | u8 *ptr = buf; | |
132 | int i; | |
133 | ||
134 | for (i = 0; i < len; i++) | |
135 | ptr[i] = readb(vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); | |
136 | } | |
137 | ||
138 | static void vm_set(struct virtio_device *vdev, unsigned offset, | |
139 | const void *buf, unsigned len) | |
140 | { | |
141 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | |
142 | const u8 *ptr = buf; | |
143 | int i; | |
144 | ||
145 | for (i = 0; i < len; i++) | |
146 | writeb(ptr[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); | |
147 | } | |
148 | ||
149 | static u8 vm_get_status(struct virtio_device *vdev) | |
150 | { | |
151 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | |
152 | ||
153 | return readl(vm_dev->base + VIRTIO_MMIO_STATUS) & 0xff; | |
154 | } | |
155 | ||
156 | static void vm_set_status(struct virtio_device *vdev, u8 status) | |
157 | { | |
158 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | |
159 | ||
160 | /* We should never be setting status to 0. */ | |
161 | BUG_ON(status == 0); | |
162 | ||
163 | writel(status, vm_dev->base + VIRTIO_MMIO_STATUS); | |
164 | } | |
165 | ||
166 | static void vm_reset(struct virtio_device *vdev) | |
167 | { | |
168 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | |
169 | ||
170 | /* 0 status means a reset. */ | |
171 | writel(0, vm_dev->base + VIRTIO_MMIO_STATUS); | |
172 | } | |
173 | ||
174 | ||
175 | ||
176 | /* Transport interface */ | |
177 | ||
178 | /* the notify function used when creating a virt queue */ | |
179 | static void vm_notify(struct virtqueue *vq) | |
180 | { | |
181 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); | |
182 | struct virtio_mmio_vq_info *info = vq->priv; | |
183 | ||
184 | /* We write the queue's selector into the notification register to | |
185 | * signal the other end */ | |
186 | writel(info->queue_index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); | |
187 | } | |
188 | ||
189 | /* Notify all virtqueues on an interrupt. */ | |
190 | static irqreturn_t vm_interrupt(int irq, void *opaque) | |
191 | { | |
192 | struct virtio_mmio_device *vm_dev = opaque; | |
193 | struct virtio_mmio_vq_info *info; | |
194 | struct virtio_driver *vdrv = container_of(vm_dev->vdev.dev.driver, | |
195 | struct virtio_driver, driver); | |
196 | unsigned long status; | |
197 | unsigned long flags; | |
198 | irqreturn_t ret = IRQ_NONE; | |
199 | ||
200 | /* Read and acknowledge interrupts */ | |
201 | status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS); | |
202 | writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK); | |
203 | ||
204 | if (unlikely(status & VIRTIO_MMIO_INT_CONFIG) | |
205 | && vdrv && vdrv->config_changed) { | |
206 | vdrv->config_changed(&vm_dev->vdev); | |
207 | ret = IRQ_HANDLED; | |
208 | } | |
209 | ||
210 | if (likely(status & VIRTIO_MMIO_INT_VRING)) { | |
211 | spin_lock_irqsave(&vm_dev->lock, flags); | |
212 | list_for_each_entry(info, &vm_dev->virtqueues, node) | |
213 | ret |= vring_interrupt(irq, info->vq); | |
214 | spin_unlock_irqrestore(&vm_dev->lock, flags); | |
215 | } | |
216 | ||
217 | return ret; | |
218 | } | |
219 | ||
220 | ||
221 | ||
222 | static void vm_del_vq(struct virtqueue *vq) | |
223 | { | |
224 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); | |
225 | struct virtio_mmio_vq_info *info = vq->priv; | |
226 | unsigned long flags, size; | |
227 | ||
228 | spin_lock_irqsave(&vm_dev->lock, flags); | |
229 | list_del(&info->node); | |
230 | spin_unlock_irqrestore(&vm_dev->lock, flags); | |
231 | ||
232 | vring_del_virtqueue(vq); | |
233 | ||
234 | /* Select and deactivate the queue */ | |
235 | writel(info->queue_index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); | |
236 | writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); | |
237 | ||
238 | size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN)); | |
239 | free_pages_exact(info->queue, size); | |
240 | kfree(info); | |
241 | } | |
242 | ||
243 | static void vm_del_vqs(struct virtio_device *vdev) | |
244 | { | |
245 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | |
246 | struct virtqueue *vq, *n; | |
247 | ||
248 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) | |
249 | vm_del_vq(vq); | |
250 | ||
251 | free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev); | |
252 | } | |
253 | ||
254 | ||
255 | ||
256 | static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, | |
257 | void (*callback)(struct virtqueue *vq), | |
258 | const char *name) | |
259 | { | |
260 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | |
261 | struct virtio_mmio_vq_info *info; | |
262 | struct virtqueue *vq; | |
263 | unsigned long flags, size; | |
264 | int err; | |
265 | ||
266 | /* Select the queue we're interested in */ | |
267 | writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); | |
268 | ||
269 | /* Queue shouldn't already be set up. */ | |
270 | if (readl(vm_dev->base + VIRTIO_MMIO_QUEUE_PFN)) { | |
271 | err = -ENOENT; | |
272 | goto error_available; | |
273 | } | |
274 | ||
275 | /* Allocate and fill out our active queue description */ | |
276 | info = kmalloc(sizeof(*info), GFP_KERNEL); | |
277 | if (!info) { | |
278 | err = -ENOMEM; | |
279 | goto error_kmalloc; | |
280 | } | |
281 | info->queue_index = index; | |
282 | ||
283 | /* Allocate pages for the queue - start with a queue as big as | |
284 | * possible (limited by maximum size allowed by device), drop down | |
285 | * to a minimal size, just big enough to fit descriptor table | |
286 | * and two rings (which makes it "alignment_size * 2") | |
287 | */ | |
288 | info->num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX); | |
289 | while (1) { | |
290 | size = PAGE_ALIGN(vring_size(info->num, | |
291 | VIRTIO_MMIO_VRING_ALIGN)); | |
292 | /* Already smallest possible allocation? */ | |
293 | if (size <= VIRTIO_MMIO_VRING_ALIGN * 2) { | |
294 | err = -ENOMEM; | |
295 | goto error_alloc_pages; | |
296 | } | |
297 | ||
298 | info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); | |
299 | if (info->queue) | |
300 | break; | |
301 | ||
302 | info->num /= 2; | |
303 | } | |
304 | ||
305 | /* Activate the queue */ | |
306 | writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM); | |
307 | writel(VIRTIO_MMIO_VRING_ALIGN, | |
308 | vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN); | |
309 | writel(virt_to_phys(info->queue) >> PAGE_SHIFT, | |
310 | vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); | |
311 | ||
312 | /* Create the vring */ | |
313 | vq = vring_new_virtqueue(info->num, VIRTIO_MMIO_VRING_ALIGN, | |
314 | vdev, info->queue, vm_notify, callback, name); | |
315 | if (!vq) { | |
316 | err = -ENOMEM; | |
317 | goto error_new_virtqueue; | |
318 | } | |
319 | ||
320 | vq->priv = info; | |
321 | info->vq = vq; | |
322 | ||
323 | spin_lock_irqsave(&vm_dev->lock, flags); | |
324 | list_add(&info->node, &vm_dev->virtqueues); | |
325 | spin_unlock_irqrestore(&vm_dev->lock, flags); | |
326 | ||
327 | return vq; | |
328 | ||
329 | error_new_virtqueue: | |
330 | writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); | |
331 | free_pages_exact(info->queue, size); | |
332 | error_alloc_pages: | |
333 | kfree(info); | |
334 | error_kmalloc: | |
335 | error_available: | |
336 | return ERR_PTR(err); | |
337 | } | |
338 | ||
339 | static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, | |
340 | struct virtqueue *vqs[], | |
341 | vq_callback_t *callbacks[], | |
342 | const char *names[]) | |
343 | { | |
344 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | |
345 | unsigned int irq = platform_get_irq(vm_dev->pdev, 0); | |
346 | int i, err; | |
347 | ||
348 | err = request_irq(irq, vm_interrupt, IRQF_SHARED, | |
349 | dev_name(&vdev->dev), vm_dev); | |
350 | if (err) | |
351 | return err; | |
352 | ||
353 | for (i = 0; i < nvqs; ++i) { | |
354 | vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i]); | |
355 | if (IS_ERR(vqs[i])) { | |
356 | vm_del_vqs(vdev); | |
357 | return PTR_ERR(vqs[i]); | |
358 | } | |
359 | } | |
360 | ||
361 | return 0; | |
362 | } | |
363 | ||
66846048 RJ |
364 | static const char *vm_bus_name(struct virtio_device *vdev) |
365 | { | |
366 | struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); | |
edfd52e6 | 367 | |
66846048 RJ |
368 | return vm_dev->pdev->name; |
369 | } | |
edfd52e6 PM |
370 | |
371 | static struct virtio_config_ops virtio_mmio_config_ops = { | |
372 | .get = vm_get, | |
373 | .set = vm_set, | |
374 | .get_status = vm_get_status, | |
375 | .set_status = vm_set_status, | |
376 | .reset = vm_reset, | |
377 | .find_vqs = vm_find_vqs, | |
378 | .del_vqs = vm_del_vqs, | |
379 | .get_features = vm_get_features, | |
380 | .finalize_features = vm_finalize_features, | |
66846048 | 381 | .bus_name = vm_bus_name, |
edfd52e6 PM |
382 | }; |
383 | ||
384 | ||
385 | ||
386 | /* Platform device */ | |
387 | ||
388 | static int __devinit virtio_mmio_probe(struct platform_device *pdev) | |
389 | { | |
390 | struct virtio_mmio_device *vm_dev; | |
391 | struct resource *mem; | |
392 | unsigned long magic; | |
393 | ||
394 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
395 | if (!mem) | |
396 | return -EINVAL; | |
397 | ||
398 | if (!devm_request_mem_region(&pdev->dev, mem->start, | |
399 | resource_size(mem), pdev->name)) | |
400 | return -EBUSY; | |
401 | ||
402 | vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL); | |
403 | if (!vm_dev) | |
404 | return -ENOMEM; | |
405 | ||
406 | vm_dev->vdev.dev.parent = &pdev->dev; | |
407 | vm_dev->vdev.config = &virtio_mmio_config_ops; | |
408 | vm_dev->pdev = pdev; | |
409 | INIT_LIST_HEAD(&vm_dev->virtqueues); | |
410 | spin_lock_init(&vm_dev->lock); | |
411 | ||
412 | vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); | |
413 | if (vm_dev->base == NULL) | |
414 | return -EFAULT; | |
415 | ||
416 | /* Check magic value */ | |
417 | magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE); | |
418 | if (memcmp(&magic, "virt", 4) != 0) { | |
419 | dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic); | |
420 | return -ENODEV; | |
421 | } | |
422 | ||
423 | /* Check device version */ | |
424 | vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION); | |
425 | if (vm_dev->version != 1) { | |
426 | dev_err(&pdev->dev, "Version %ld not supported!\n", | |
427 | vm_dev->version); | |
428 | return -ENXIO; | |
429 | } | |
430 | ||
431 | vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID); | |
432 | vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); | |
433 | ||
434 | writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); | |
435 | ||
436 | platform_set_drvdata(pdev, vm_dev); | |
437 | ||
438 | return register_virtio_device(&vm_dev->vdev); | |
439 | } | |
440 | ||
441 | static int __devexit virtio_mmio_remove(struct platform_device *pdev) | |
442 | { | |
443 | struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev); | |
444 | ||
445 | unregister_virtio_device(&vm_dev->vdev); | |
446 | ||
447 | return 0; | |
448 | } | |
449 | ||
450 | ||
451 | ||
452 | /* Platform driver */ | |
453 | ||
454 | static struct of_device_id virtio_mmio_match[] = { | |
455 | { .compatible = "virtio,mmio", }, | |
456 | {}, | |
457 | }; | |
458 | MODULE_DEVICE_TABLE(of, virtio_mmio_match); | |
459 | ||
460 | static struct platform_driver virtio_mmio_driver = { | |
461 | .probe = virtio_mmio_probe, | |
462 | .remove = __devexit_p(virtio_mmio_remove), | |
463 | .driver = { | |
464 | .name = "virtio-mmio", | |
465 | .owner = THIS_MODULE, | |
466 | .of_match_table = virtio_mmio_match, | |
467 | }, | |
468 | }; | |
469 | ||
470 | static int __init virtio_mmio_init(void) | |
471 | { | |
472 | return platform_driver_register(&virtio_mmio_driver); | |
473 | } | |
474 | ||
475 | static void __exit virtio_mmio_exit(void) | |
476 | { | |
477 | platform_driver_unregister(&virtio_mmio_driver); | |
478 | } | |
479 | ||
480 | module_init(virtio_mmio_init); | |
481 | module_exit(virtio_mmio_exit); | |
482 | ||
483 | MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>"); | |
484 | MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices"); | |
485 | MODULE_LICENSE("GPL"); |