1 /* Virtio ring implementation.
3 * Copyright 2007 Rusty Russell IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/hrtimer.h>
26 #include <linux/kmemleak.h>
27 #include <linux/dma-mapping.h>
31 /* For development, we want to crash whenever the ring is screwed. */
32 #define BAD_RING(_vq, fmt, args...) \
34 dev_err(&(_vq)->vq.vdev->dev, \
35 "%s:"fmt, (_vq)->vq.name, ##args); \
38 /* Caller is supposed to guarantee no reentry. */
39 #define START_USE(_vq) \
42 panic("%s:in_use = %i\n", \
43 (_vq)->vq.name, (_vq)->in_use); \
44 (_vq)->in_use = __LINE__; \
46 #define END_USE(_vq) \
47 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
49 #define BAD_RING(_vq, fmt, args...) \
51 dev_err(&_vq->vq.vdev->dev, \
52 "%s:"fmt, (_vq)->vq.name, ##args); \
53 (_vq)->broken = true; \
59 struct vring_desc_state
{
60 void *data
; /* Data for callback. */
61 struct vring_desc
*indir_desc
; /* Indirect descriptor, if any. */
64 struct vring_virtqueue
{
67 /* Actual memory layout for this queue */
70 /* Can we use weak barriers? */
73 /* Other side has made a mess, don't try any more. */
76 /* Host supports indirect buffers */
79 /* Host publishes avail event idx */
82 /* Head of free buffer list. */
83 unsigned int free_head
;
84 /* Number we've added since last sync. */
85 unsigned int num_added
;
87 /* Last used index we've seen. */
90 /* Last written value to avail->flags */
91 u16 avail_flags_shadow
;
93 /* Last written value to avail->idx in guest byte order */
96 /* How to notify other side. FIXME: commonalize hcalls! */
97 bool (*notify
)(struct virtqueue
*vq
);
99 /* DMA, allocation, and size information */
101 size_t queue_size_in_bytes
;
102 dma_addr_t queue_dma_addr
;
105 /* They're supposed to lock for us. */
108 /* Figure out if their kicks are too delayed. */
109 bool last_add_time_valid
;
110 ktime_t last_add_time
;
113 /* Per-descriptor state. */
114 struct vring_desc_state desc_state
[];
117 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
120 * Modern virtio devices have feature bits to specify whether they need a
121 * quirk and bypass the IOMMU. If not there, just use the DMA API.
123 * If there, the interaction between virtio and DMA API is messy.
125 * On most systems with virtio, physical addresses match bus addresses,
126 * and it doesn't particularly matter whether we use the DMA API.
128 * On some systems, including Xen and any system with a physical device
129 * that speaks virtio behind a physical IOMMU, we must use the DMA API
130 * for virtio DMA to work at all.
132 * On other systems, including SPARC and PPC64, virtio-pci devices are
133 * enumerated as though they are behind an IOMMU, but the virtio host
134 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
135 * there or somehow map everything as the identity.
137 * For the time being, we preserve historic behavior and bypass the DMA
140 * TODO: install a per-device DMA ops structure that does the right thing
141 * taking into account all the above quirks, and use the DMA API
142 * unconditionally on data path.
145 static bool vring_use_dma_api(struct virtio_device
*vdev
)
147 if (!virtio_has_iommu_quirk(vdev
))
150 /* Otherwise, we are left to guess. */
152 * In theory, it's possible to have a buggy QEMU-supposed
153 * emulated Q35 IOMMU and Xen enabled at the same time. On
154 * such a configuration, virtio has never worked and will
155 * not work without an even larger kludge. Instead, enable
156 * the DMA API if we're a Xen guest, which at least allows
157 * all of the sensible Xen configurations to work correctly.
166 * The DMA ops on various arches are rather gnarly right now, and
167 * making all of the arch DMA ops work on the vring device itself
168 * is a mess. For now, we use the parent device for DMA ops.
170 struct device
*vring_dma_dev(const struct vring_virtqueue
*vq
)
172 return vq
->vq
.vdev
->dev
.parent
;
175 /* Map one sg entry. */
176 static dma_addr_t
vring_map_one_sg(const struct vring_virtqueue
*vq
,
177 struct scatterlist
*sg
,
178 enum dma_data_direction direction
)
180 if (!vring_use_dma_api(vq
->vq
.vdev
))
181 return (dma_addr_t
)sg_phys(sg
);
184 * We can't use dma_map_sg, because we don't use scatterlists in
185 * the way it expects (we don't guarantee that the scatterlist
186 * will exist for the lifetime of the mapping).
188 return dma_map_page(vring_dma_dev(vq
),
189 sg_page(sg
), sg
->offset
, sg
->length
,
193 static dma_addr_t
vring_map_single(const struct vring_virtqueue
*vq
,
194 void *cpu_addr
, size_t size
,
195 enum dma_data_direction direction
)
197 if (!vring_use_dma_api(vq
->vq
.vdev
))
198 return (dma_addr_t
)virt_to_phys(cpu_addr
);
200 return dma_map_single(vring_dma_dev(vq
),
201 cpu_addr
, size
, direction
);
204 static void vring_unmap_one(const struct vring_virtqueue
*vq
,
205 struct vring_desc
*desc
)
209 if (!vring_use_dma_api(vq
->vq
.vdev
))
212 flags
= virtio16_to_cpu(vq
->vq
.vdev
, desc
->flags
);
214 if (flags
& VRING_DESC_F_INDIRECT
) {
215 dma_unmap_single(vring_dma_dev(vq
),
216 virtio64_to_cpu(vq
->vq
.vdev
, desc
->addr
),
217 virtio32_to_cpu(vq
->vq
.vdev
, desc
->len
),
218 (flags
& VRING_DESC_F_WRITE
) ?
219 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
221 dma_unmap_page(vring_dma_dev(vq
),
222 virtio64_to_cpu(vq
->vq
.vdev
, desc
->addr
),
223 virtio32_to_cpu(vq
->vq
.vdev
, desc
->len
),
224 (flags
& VRING_DESC_F_WRITE
) ?
225 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
229 static int vring_mapping_error(const struct vring_virtqueue
*vq
,
232 if (!vring_use_dma_api(vq
->vq
.vdev
))
235 return dma_mapping_error(vring_dma_dev(vq
), addr
);
238 static struct vring_desc
*alloc_indirect(struct virtqueue
*_vq
,
239 unsigned int total_sg
, gfp_t gfp
)
241 struct vring_desc
*desc
;
245 * We require lowmem mappings for the descriptors because
246 * otherwise virt_to_phys will give us bogus addresses in the
249 gfp
&= ~__GFP_HIGHMEM
;
251 desc
= kmalloc(total_sg
* sizeof(struct vring_desc
), gfp
);
255 for (i
= 0; i
< total_sg
; i
++)
256 desc
[i
].next
= cpu_to_virtio16(_vq
->vdev
, i
+ 1);
260 static inline int virtqueue_add(struct virtqueue
*_vq
,
261 struct scatterlist
*sgs
[],
262 unsigned int total_sg
,
263 unsigned int out_sgs
,
268 struct vring_virtqueue
*vq
= to_vvq(_vq
);
269 struct scatterlist
*sg
;
270 struct vring_desc
*desc
;
271 unsigned int i
, n
, avail
, descs_used
, uninitialized_var(prev
), err_idx
;
277 BUG_ON(data
== NULL
);
279 if (unlikely(vq
->broken
)) {
286 ktime_t now
= ktime_get();
288 /* No kick or get, with .1 second between? Warn. */
289 if (vq
->last_add_time_valid
)
290 WARN_ON(ktime_to_ms(ktime_sub(now
, vq
->last_add_time
))
292 vq
->last_add_time
= now
;
293 vq
->last_add_time_valid
= true;
297 BUG_ON(total_sg
> vq
->vring
.num
);
298 BUG_ON(total_sg
== 0);
300 head
= vq
->free_head
;
302 /* If the host supports indirect descriptor tables, and we have multiple
303 * buffers, then go indirect. FIXME: tune this threshold */
304 if (vq
->indirect
&& total_sg
> 1 && vq
->vq
.num_free
)
305 desc
= alloc_indirect(_vq
, total_sg
, gfp
);
310 /* Use a single buffer which doesn't continue */
312 /* Set up rest to use this indirect table. */
317 desc
= vq
->vring
.desc
;
319 descs_used
= total_sg
;
322 if (vq
->vq
.num_free
< descs_used
) {
323 pr_debug("Can't add buf len %i - avail = %i\n",
324 descs_used
, vq
->vq
.num_free
);
325 /* FIXME: for historical reasons, we force a notify here if
326 * there are outgoing parts to the buffer. Presumably the
327 * host should service the ring ASAP. */
334 for (n
= 0; n
< out_sgs
; n
++) {
335 for (sg
= sgs
[n
]; sg
; sg
= sg_next(sg
)) {
336 dma_addr_t addr
= vring_map_one_sg(vq
, sg
, DMA_TO_DEVICE
);
337 if (vring_mapping_error(vq
, addr
))
340 desc
[i
].flags
= cpu_to_virtio16(_vq
->vdev
, VRING_DESC_F_NEXT
);
341 desc
[i
].addr
= cpu_to_virtio64(_vq
->vdev
, addr
);
342 desc
[i
].len
= cpu_to_virtio32(_vq
->vdev
, sg
->length
);
344 i
= virtio16_to_cpu(_vq
->vdev
, desc
[i
].next
);
347 for (; n
< (out_sgs
+ in_sgs
); n
++) {
348 for (sg
= sgs
[n
]; sg
; sg
= sg_next(sg
)) {
349 dma_addr_t addr
= vring_map_one_sg(vq
, sg
, DMA_FROM_DEVICE
);
350 if (vring_mapping_error(vq
, addr
))
353 desc
[i
].flags
= cpu_to_virtio16(_vq
->vdev
, VRING_DESC_F_NEXT
| VRING_DESC_F_WRITE
);
354 desc
[i
].addr
= cpu_to_virtio64(_vq
->vdev
, addr
);
355 desc
[i
].len
= cpu_to_virtio32(_vq
->vdev
, sg
->length
);
357 i
= virtio16_to_cpu(_vq
->vdev
, desc
[i
].next
);
360 /* Last one doesn't continue. */
361 desc
[prev
].flags
&= cpu_to_virtio16(_vq
->vdev
, ~VRING_DESC_F_NEXT
);
364 /* Now that the indirect table is filled in, map it. */
365 dma_addr_t addr
= vring_map_single(
366 vq
, desc
, total_sg
* sizeof(struct vring_desc
),
368 if (vring_mapping_error(vq
, addr
))
371 vq
->vring
.desc
[head
].flags
= cpu_to_virtio16(_vq
->vdev
, VRING_DESC_F_INDIRECT
);
372 vq
->vring
.desc
[head
].addr
= cpu_to_virtio64(_vq
->vdev
, addr
);
374 vq
->vring
.desc
[head
].len
= cpu_to_virtio32(_vq
->vdev
, total_sg
* sizeof(struct vring_desc
));
377 /* We're using some buffers from the free list. */
378 vq
->vq
.num_free
-= descs_used
;
380 /* Update free pointer */
382 vq
->free_head
= virtio16_to_cpu(_vq
->vdev
, vq
->vring
.desc
[head
].next
);
386 /* Store token and indirect buffer state. */
387 vq
->desc_state
[head
].data
= data
;
389 vq
->desc_state
[head
].indir_desc
= desc
;
391 /* Put entry in available array (but don't update avail->idx until they
393 avail
= vq
->avail_idx_shadow
& (vq
->vring
.num
- 1);
394 vq
->vring
.avail
->ring
[avail
] = cpu_to_virtio16(_vq
->vdev
, head
);
396 /* Descriptors and available array need to be set before we expose the
397 * new available array entries. */
398 virtio_wmb(vq
->weak_barriers
);
399 vq
->avail_idx_shadow
++;
400 vq
->vring
.avail
->idx
= cpu_to_virtio16(_vq
->vdev
, vq
->avail_idx_shadow
);
403 pr_debug("Added buffer head %i to %p\n", head
, vq
);
406 /* This is very unlikely, but theoretically possible. Kick
408 if (unlikely(vq
->num_added
== (1 << 16) - 1))
417 for (n
= 0; n
< total_sg
; n
++) {
420 vring_unmap_one(vq
, &desc
[i
]);
421 i
= vq
->vring
.desc
[i
].next
;
424 vq
->vq
.num_free
+= total_sg
;
433 * virtqueue_add_sgs - expose buffers to other end
434 * @vq: the struct virtqueue we're talking about.
435 * @sgs: array of terminated scatterlists.
436 * @out_num: the number of scatterlists readable by other side
437 * @in_num: the number of scatterlists which are writable (after readable ones)
438 * @data: the token identifying the buffer.
439 * @gfp: how to do memory allocations (if necessary).
441 * Caller must ensure we don't call this with other virtqueue operations
442 * at the same time (except where noted).
444 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
446 int virtqueue_add_sgs(struct virtqueue
*_vq
,
447 struct scatterlist
*sgs
[],
448 unsigned int out_sgs
,
453 unsigned int i
, total_sg
= 0;
455 /* Count them first. */
456 for (i
= 0; i
< out_sgs
+ in_sgs
; i
++) {
457 struct scatterlist
*sg
;
458 for (sg
= sgs
[i
]; sg
; sg
= sg_next(sg
))
461 return virtqueue_add(_vq
, sgs
, total_sg
, out_sgs
, in_sgs
, data
, gfp
);
463 EXPORT_SYMBOL_GPL(virtqueue_add_sgs
);
466 * virtqueue_add_outbuf - expose output buffers to other end
467 * @vq: the struct virtqueue we're talking about.
468 * @sg: scatterlist (must be well-formed and terminated!)
469 * @num: the number of entries in @sg readable by other side
470 * @data: the token identifying the buffer.
471 * @gfp: how to do memory allocations (if necessary).
473 * Caller must ensure we don't call this with other virtqueue operations
474 * at the same time (except where noted).
476 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
478 int virtqueue_add_outbuf(struct virtqueue
*vq
,
479 struct scatterlist
*sg
, unsigned int num
,
483 return virtqueue_add(vq
, &sg
, num
, 1, 0, data
, gfp
);
485 EXPORT_SYMBOL_GPL(virtqueue_add_outbuf
);
488 * virtqueue_add_inbuf - expose input buffers to other end
489 * @vq: the struct virtqueue we're talking about.
490 * @sg: scatterlist (must be well-formed and terminated!)
491 * @num: the number of entries in @sg writable by other side
492 * @data: the token identifying the buffer.
493 * @gfp: how to do memory allocations (if necessary).
495 * Caller must ensure we don't call this with other virtqueue operations
496 * at the same time (except where noted).
498 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
500 int virtqueue_add_inbuf(struct virtqueue
*vq
,
501 struct scatterlist
*sg
, unsigned int num
,
505 return virtqueue_add(vq
, &sg
, num
, 0, 1, data
, gfp
);
507 EXPORT_SYMBOL_GPL(virtqueue_add_inbuf
);
510 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
511 * @vq: the struct virtqueue
513 * Instead of virtqueue_kick(), you can do:
514 * if (virtqueue_kick_prepare(vq))
515 * virtqueue_notify(vq);
517 * This is sometimes useful because the virtqueue_kick_prepare() needs
518 * to be serialized, but the actual virtqueue_notify() call does not.
520 bool virtqueue_kick_prepare(struct virtqueue
*_vq
)
522 struct vring_virtqueue
*vq
= to_vvq(_vq
);
527 /* We need to expose available array entries before checking avail
529 virtio_mb(vq
->weak_barriers
);
531 old
= vq
->avail_idx_shadow
- vq
->num_added
;
532 new = vq
->avail_idx_shadow
;
536 if (vq
->last_add_time_valid
) {
537 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
538 vq
->last_add_time
)) > 100);
540 vq
->last_add_time_valid
= false;
544 needs_kick
= vring_need_event(virtio16_to_cpu(_vq
->vdev
, vring_avail_event(&vq
->vring
)),
547 needs_kick
= !(vq
->vring
.used
->flags
& cpu_to_virtio16(_vq
->vdev
, VRING_USED_F_NO_NOTIFY
));
552 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare
);
555 * virtqueue_notify - second half of split virtqueue_kick call.
556 * @vq: the struct virtqueue
558 * This does not need to be serialized.
560 * Returns false if host notify failed or queue is broken, otherwise true.
562 bool virtqueue_notify(struct virtqueue
*_vq
)
564 struct vring_virtqueue
*vq
= to_vvq(_vq
);
566 if (unlikely(vq
->broken
))
569 /* Prod other side to tell it about changes. */
570 if (!vq
->notify(_vq
)) {
576 EXPORT_SYMBOL_GPL(virtqueue_notify
);
579 * virtqueue_kick - update after add_buf
580 * @vq: the struct virtqueue
582 * After one or more virtqueue_add_* calls, invoke this to kick
585 * Caller must ensure we don't call this with other virtqueue
586 * operations at the same time (except where noted).
588 * Returns false if kick failed, otherwise true.
590 bool virtqueue_kick(struct virtqueue
*vq
)
592 if (virtqueue_kick_prepare(vq
))
593 return virtqueue_notify(vq
);
596 EXPORT_SYMBOL_GPL(virtqueue_kick
);
598 static void detach_buf(struct vring_virtqueue
*vq
, unsigned int head
)
601 u16 nextflag
= cpu_to_virtio16(vq
->vq
.vdev
, VRING_DESC_F_NEXT
);
603 /* Clear data ptr. */
604 vq
->desc_state
[head
].data
= NULL
;
606 /* Put back on free list: unmap first-level descriptors and find end */
609 while (vq
->vring
.desc
[i
].flags
& nextflag
) {
610 vring_unmap_one(vq
, &vq
->vring
.desc
[i
]);
611 i
= virtio16_to_cpu(vq
->vq
.vdev
, vq
->vring
.desc
[i
].next
);
615 vring_unmap_one(vq
, &vq
->vring
.desc
[i
]);
616 vq
->vring
.desc
[i
].next
= cpu_to_virtio16(vq
->vq
.vdev
, vq
->free_head
);
617 vq
->free_head
= head
;
619 /* Plus final descriptor */
622 /* Free the indirect table, if any, now that it's unmapped. */
623 if (vq
->desc_state
[head
].indir_desc
) {
624 struct vring_desc
*indir_desc
= vq
->desc_state
[head
].indir_desc
;
625 u32 len
= virtio32_to_cpu(vq
->vq
.vdev
, vq
->vring
.desc
[head
].len
);
627 BUG_ON(!(vq
->vring
.desc
[head
].flags
&
628 cpu_to_virtio16(vq
->vq
.vdev
, VRING_DESC_F_INDIRECT
)));
629 BUG_ON(len
== 0 || len
% sizeof(struct vring_desc
));
631 for (j
= 0; j
< len
/ sizeof(struct vring_desc
); j
++)
632 vring_unmap_one(vq
, &indir_desc
[j
]);
634 kfree(vq
->desc_state
[head
].indir_desc
);
635 vq
->desc_state
[head
].indir_desc
= NULL
;
639 static inline bool more_used(const struct vring_virtqueue
*vq
)
641 return vq
->last_used_idx
!= virtio16_to_cpu(vq
->vq
.vdev
, vq
->vring
.used
->idx
);
645 * virtqueue_get_buf - get the next used buffer
646 * @vq: the struct virtqueue we're talking about.
647 * @len: the length written into the buffer
649 * If the driver wrote data into the buffer, @len will be set to the
650 * amount written. This means you don't need to clear the buffer
651 * beforehand to ensure there's no data leakage in the case of short
654 * Caller must ensure we don't call this with other virtqueue
655 * operations at the same time (except where noted).
657 * Returns NULL if there are no used buffers, or the "data" token
658 * handed to virtqueue_add_*().
660 void *virtqueue_get_buf(struct virtqueue
*_vq
, unsigned int *len
)
662 struct vring_virtqueue
*vq
= to_vvq(_vq
);
669 if (unlikely(vq
->broken
)) {
674 if (!more_used(vq
)) {
675 pr_debug("No more buffers in queue\n");
680 /* Only get used array entries after they have been exposed by host. */
681 virtio_rmb(vq
->weak_barriers
);
683 last_used
= (vq
->last_used_idx
& (vq
->vring
.num
- 1));
684 i
= virtio32_to_cpu(_vq
->vdev
, vq
->vring
.used
->ring
[last_used
].id
);
685 *len
= virtio32_to_cpu(_vq
->vdev
, vq
->vring
.used
->ring
[last_used
].len
);
687 if (unlikely(i
>= vq
->vring
.num
)) {
688 BAD_RING(vq
, "id %u out of range\n", i
);
691 if (unlikely(!vq
->desc_state
[i
].data
)) {
692 BAD_RING(vq
, "id %u is not a head!\n", i
);
696 /* detach_buf clears data, so grab it now. */
697 ret
= vq
->desc_state
[i
].data
;
700 /* If we expect an interrupt for the next entry, tell host
701 * by writing event index and flush out the write before
702 * the read in the next get_buf call. */
703 if (!(vq
->avail_flags_shadow
& VRING_AVAIL_F_NO_INTERRUPT
))
704 virtio_store_mb(vq
->weak_barriers
,
705 &vring_used_event(&vq
->vring
),
706 cpu_to_virtio16(_vq
->vdev
, vq
->last_used_idx
));
709 vq
->last_add_time_valid
= false;
715 EXPORT_SYMBOL_GPL(virtqueue_get_buf
);
718 * virtqueue_disable_cb - disable callbacks
719 * @vq: the struct virtqueue we're talking about.
721 * Note that this is not necessarily synchronous, hence unreliable and only
722 * useful as an optimization.
724 * Unlike other operations, this need not be serialized.
726 void virtqueue_disable_cb(struct virtqueue
*_vq
)
728 struct vring_virtqueue
*vq
= to_vvq(_vq
);
730 if (!(vq
->avail_flags_shadow
& VRING_AVAIL_F_NO_INTERRUPT
)) {
731 vq
->avail_flags_shadow
|= VRING_AVAIL_F_NO_INTERRUPT
;
732 vq
->vring
.avail
->flags
= cpu_to_virtio16(_vq
->vdev
, vq
->avail_flags_shadow
);
736 EXPORT_SYMBOL_GPL(virtqueue_disable_cb
);
739 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
740 * @vq: the struct virtqueue we're talking about.
742 * This re-enables callbacks; it returns current queue state
743 * in an opaque unsigned value. This value should be later tested by
744 * virtqueue_poll, to detect a possible race between the driver checking for
745 * more work, and enabling callbacks.
747 * Caller must ensure we don't call this with other virtqueue
748 * operations at the same time (except where noted).
750 unsigned virtqueue_enable_cb_prepare(struct virtqueue
*_vq
)
752 struct vring_virtqueue
*vq
= to_vvq(_vq
);
757 /* We optimistically turn back on interrupts, then check if there was
759 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
760 * either clear the flags bit or point the event index at the next
761 * entry. Always do both to keep code simple. */
762 if (vq
->avail_flags_shadow
& VRING_AVAIL_F_NO_INTERRUPT
) {
763 vq
->avail_flags_shadow
&= ~VRING_AVAIL_F_NO_INTERRUPT
;
764 vq
->vring
.avail
->flags
= cpu_to_virtio16(_vq
->vdev
, vq
->avail_flags_shadow
);
766 vring_used_event(&vq
->vring
) = cpu_to_virtio16(_vq
->vdev
, last_used_idx
= vq
->last_used_idx
);
768 return last_used_idx
;
770 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare
);
773 * virtqueue_poll - query pending used buffers
774 * @vq: the struct virtqueue we're talking about.
775 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
777 * Returns "true" if there are pending used buffers in the queue.
779 * This does not need to be serialized.
781 bool virtqueue_poll(struct virtqueue
*_vq
, unsigned last_used_idx
)
783 struct vring_virtqueue
*vq
= to_vvq(_vq
);
785 virtio_mb(vq
->weak_barriers
);
786 return (u16
)last_used_idx
!= virtio16_to_cpu(_vq
->vdev
, vq
->vring
.used
->idx
);
788 EXPORT_SYMBOL_GPL(virtqueue_poll
);
791 * virtqueue_enable_cb - restart callbacks after disable_cb.
792 * @vq: the struct virtqueue we're talking about.
794 * This re-enables callbacks; it returns "false" if there are pending
795 * buffers in the queue, to detect a possible race between the driver
796 * checking for more work, and enabling callbacks.
798 * Caller must ensure we don't call this with other virtqueue
799 * operations at the same time (except where noted).
801 bool virtqueue_enable_cb(struct virtqueue
*_vq
)
803 unsigned last_used_idx
= virtqueue_enable_cb_prepare(_vq
);
804 return !virtqueue_poll(_vq
, last_used_idx
);
806 EXPORT_SYMBOL_GPL(virtqueue_enable_cb
);
809 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
810 * @vq: the struct virtqueue we're talking about.
812 * This re-enables callbacks but hints to the other side to delay
813 * interrupts until most of the available buffers have been processed;
814 * it returns "false" if there are many pending buffers in the queue,
815 * to detect a possible race between the driver checking for more work,
816 * and enabling callbacks.
818 * Caller must ensure we don't call this with other virtqueue
819 * operations at the same time (except where noted).
821 bool virtqueue_enable_cb_delayed(struct virtqueue
*_vq
)
823 struct vring_virtqueue
*vq
= to_vvq(_vq
);
828 /* We optimistically turn back on interrupts, then check if there was
830 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
831 * either clear the flags bit or point the event index at the next
832 * entry. Always do both to keep code simple. */
833 if (vq
->avail_flags_shadow
& VRING_AVAIL_F_NO_INTERRUPT
) {
834 vq
->avail_flags_shadow
&= ~VRING_AVAIL_F_NO_INTERRUPT
;
835 vq
->vring
.avail
->flags
= cpu_to_virtio16(_vq
->vdev
, vq
->avail_flags_shadow
);
837 /* TODO: tune this threshold */
838 bufs
= (u16
)(vq
->avail_idx_shadow
- vq
->last_used_idx
) * 3 / 4;
840 virtio_store_mb(vq
->weak_barriers
,
841 &vring_used_event(&vq
->vring
),
842 cpu_to_virtio16(_vq
->vdev
, vq
->last_used_idx
+ bufs
));
844 if (unlikely((u16
)(virtio16_to_cpu(_vq
->vdev
, vq
->vring
.used
->idx
) - vq
->last_used_idx
) > bufs
)) {
852 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed
);
855 * virtqueue_detach_unused_buf - detach first unused buffer
856 * @vq: the struct virtqueue we're talking about.
858 * Returns NULL or the "data" token handed to virtqueue_add_*().
859 * This is not valid on an active queue; it is useful only for device
862 void *virtqueue_detach_unused_buf(struct virtqueue
*_vq
)
864 struct vring_virtqueue
*vq
= to_vvq(_vq
);
870 for (i
= 0; i
< vq
->vring
.num
; i
++) {
871 if (!vq
->desc_state
[i
].data
)
873 /* detach_buf clears data, so grab it now. */
874 buf
= vq
->desc_state
[i
].data
;
876 vq
->avail_idx_shadow
--;
877 vq
->vring
.avail
->idx
= cpu_to_virtio16(_vq
->vdev
, vq
->avail_idx_shadow
);
881 /* That should have freed everything. */
882 BUG_ON(vq
->vq
.num_free
!= vq
->vring
.num
);
887 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf
);
889 irqreturn_t
vring_interrupt(int irq
, void *_vq
)
891 struct vring_virtqueue
*vq
= to_vvq(_vq
);
893 if (!more_used(vq
)) {
894 pr_debug("virtqueue interrupt with no work for %p\n", vq
);
898 if (unlikely(vq
->broken
))
901 pr_debug("virtqueue callback for %p (%p)\n", vq
, vq
->vq
.callback
);
903 vq
->vq
.callback(&vq
->vq
);
907 EXPORT_SYMBOL_GPL(vring_interrupt
);
909 struct virtqueue
*__vring_new_virtqueue(unsigned int index
,
911 struct virtio_device
*vdev
,
913 bool (*notify
)(struct virtqueue
*),
914 void (*callback
)(struct virtqueue
*),
918 struct vring_virtqueue
*vq
;
920 vq
= kmalloc(sizeof(*vq
) + vring
.num
* sizeof(struct vring_desc_state
),
926 vq
->vq
.callback
= callback
;
929 vq
->vq
.num_free
= vring
.num
;
930 vq
->vq
.index
= index
;
931 vq
->we_own_ring
= false;
932 vq
->queue_dma_addr
= 0;
933 vq
->queue_size_in_bytes
= 0;
935 vq
->weak_barriers
= weak_barriers
;
937 vq
->last_used_idx
= 0;
938 vq
->avail_flags_shadow
= 0;
939 vq
->avail_idx_shadow
= 0;
941 list_add_tail(&vq
->vq
.list
, &vdev
->vqs
);
944 vq
->last_add_time_valid
= false;
947 vq
->indirect
= virtio_has_feature(vdev
, VIRTIO_RING_F_INDIRECT_DESC
);
948 vq
->event
= virtio_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
);
950 /* No callback? Tell other side not to bother us. */
952 vq
->avail_flags_shadow
|= VRING_AVAIL_F_NO_INTERRUPT
;
953 vq
->vring
.avail
->flags
= cpu_to_virtio16(vdev
, vq
->avail_flags_shadow
);
956 /* Put everything in free lists. */
958 for (i
= 0; i
< vring
.num
-1; i
++)
959 vq
->vring
.desc
[i
].next
= cpu_to_virtio16(vdev
, i
+ 1);
960 memset(vq
->desc_state
, 0, vring
.num
* sizeof(struct vring_desc_state
));
964 EXPORT_SYMBOL_GPL(__vring_new_virtqueue
);
966 static void *vring_alloc_queue(struct virtio_device
*vdev
, size_t size
,
967 dma_addr_t
*dma_handle
, gfp_t flag
)
969 if (vring_use_dma_api(vdev
)) {
970 return dma_alloc_coherent(vdev
->dev
.parent
, size
,
973 void *queue
= alloc_pages_exact(PAGE_ALIGN(size
), flag
);
975 phys_addr_t phys_addr
= virt_to_phys(queue
);
976 *dma_handle
= (dma_addr_t
)phys_addr
;
979 * Sanity check: make sure we dind't truncate
980 * the address. The only arches I can find that
981 * have 64-bit phys_addr_t but 32-bit dma_addr_t
982 * are certain non-highmem MIPS and x86
983 * configurations, but these configurations
984 * should never allocate physical pages above 32
985 * bits, so this is fine. Just in case, throw a
986 * warning and abort if we end up with an
987 * unrepresentable address.
989 if (WARN_ON_ONCE(*dma_handle
!= phys_addr
)) {
990 free_pages_exact(queue
, PAGE_ALIGN(size
));
998 static void vring_free_queue(struct virtio_device
*vdev
, size_t size
,
999 void *queue
, dma_addr_t dma_handle
)
1001 if (vring_use_dma_api(vdev
)) {
1002 dma_free_coherent(vdev
->dev
.parent
, size
, queue
, dma_handle
);
1004 free_pages_exact(queue
, PAGE_ALIGN(size
));
1008 struct virtqueue
*vring_create_virtqueue(
1011 unsigned int vring_align
,
1012 struct virtio_device
*vdev
,
1014 bool may_reduce_num
,
1015 bool (*notify
)(struct virtqueue
*),
1016 void (*callback
)(struct virtqueue
*),
1019 struct virtqueue
*vq
;
1021 dma_addr_t dma_addr
;
1022 size_t queue_size_in_bytes
;
1025 /* We assume num is a power of 2. */
1026 if (num
& (num
- 1)) {
1027 dev_warn(&vdev
->dev
, "Bad virtqueue length %u\n", num
);
1031 /* TODO: allocate each queue chunk individually */
1032 for (; num
&& vring_size(num
, vring_align
) > PAGE_SIZE
; num
/= 2) {
1033 queue
= vring_alloc_queue(vdev
, vring_size(num
, vring_align
),
1035 GFP_KERNEL
|__GFP_NOWARN
|__GFP_ZERO
);
1044 /* Try to get a single page. You are my only hope! */
1045 queue
= vring_alloc_queue(vdev
, vring_size(num
, vring_align
),
1046 &dma_addr
, GFP_KERNEL
|__GFP_ZERO
);
1051 queue_size_in_bytes
= vring_size(num
, vring_align
);
1052 vring_init(&vring
, num
, queue
, vring_align
);
1054 vq
= __vring_new_virtqueue(index
, vring
, vdev
, weak_barriers
,
1055 notify
, callback
, name
);
1057 vring_free_queue(vdev
, queue_size_in_bytes
, queue
,
1062 to_vvq(vq
)->queue_dma_addr
= dma_addr
;
1063 to_vvq(vq
)->queue_size_in_bytes
= queue_size_in_bytes
;
1064 to_vvq(vq
)->we_own_ring
= true;
1068 EXPORT_SYMBOL_GPL(vring_create_virtqueue
);
1070 struct virtqueue
*vring_new_virtqueue(unsigned int index
,
1072 unsigned int vring_align
,
1073 struct virtio_device
*vdev
,
1076 bool (*notify
)(struct virtqueue
*vq
),
1077 void (*callback
)(struct virtqueue
*vq
),
1081 vring_init(&vring
, num
, pages
, vring_align
);
1082 return __vring_new_virtqueue(index
, vring
, vdev
, weak_barriers
,
1083 notify
, callback
, name
);
1085 EXPORT_SYMBOL_GPL(vring_new_virtqueue
);
1087 void vring_del_virtqueue(struct virtqueue
*_vq
)
1089 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1091 if (vq
->we_own_ring
) {
1092 vring_free_queue(vq
->vq
.vdev
, vq
->queue_size_in_bytes
,
1093 vq
->vring
.desc
, vq
->queue_dma_addr
);
1095 list_del(&_vq
->list
);
1098 EXPORT_SYMBOL_GPL(vring_del_virtqueue
);
1100 /* Manipulates transport-specific feature bits. */
1101 void vring_transport_features(struct virtio_device
*vdev
)
1105 for (i
= VIRTIO_TRANSPORT_F_START
; i
< VIRTIO_TRANSPORT_F_END
; i
++) {
1107 case VIRTIO_RING_F_INDIRECT_DESC
:
1109 case VIRTIO_RING_F_EVENT_IDX
:
1111 case VIRTIO_F_VERSION_1
:
1113 case VIRTIO_F_IOMMU_PLATFORM
:
1116 /* We don't understand this bit. */
1117 __virtio_clear_bit(vdev
, i
);
1121 EXPORT_SYMBOL_GPL(vring_transport_features
);
1124 * virtqueue_get_vring_size - return the size of the virtqueue's vring
1125 * @vq: the struct virtqueue containing the vring of interest.
1127 * Returns the size of the vring. This is mainly used for boasting to
1128 * userspace. Unlike other operations, this need not be serialized.
1130 unsigned int virtqueue_get_vring_size(struct virtqueue
*_vq
)
1133 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1135 return vq
->vring
.num
;
1137 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size
);
1139 bool virtqueue_is_broken(struct virtqueue
*_vq
)
1141 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1145 EXPORT_SYMBOL_GPL(virtqueue_is_broken
);
1148 * This should prevent the device from being used, allowing drivers to
1149 * recover. You may need to grab appropriate locks to flush.
1151 void virtio_break_device(struct virtio_device
*dev
)
1153 struct virtqueue
*_vq
;
1155 list_for_each_entry(_vq
, &dev
->vqs
, list
) {
1156 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1160 EXPORT_SYMBOL_GPL(virtio_break_device
);
1162 dma_addr_t
virtqueue_get_desc_addr(struct virtqueue
*_vq
)
1164 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1166 BUG_ON(!vq
->we_own_ring
);
1168 return vq
->queue_dma_addr
;
1170 EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr
);
1172 dma_addr_t
virtqueue_get_avail_addr(struct virtqueue
*_vq
)
1174 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1176 BUG_ON(!vq
->we_own_ring
);
1178 return vq
->queue_dma_addr
+
1179 ((char *)vq
->vring
.avail
- (char *)vq
->vring
.desc
);
1181 EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr
);
1183 dma_addr_t
virtqueue_get_used_addr(struct virtqueue
*_vq
)
1185 struct vring_virtqueue
*vq
= to_vvq(_vq
);
1187 BUG_ON(!vq
->we_own_ring
);
1189 return vq
->queue_dma_addr
+
1190 ((char *)vq
->vring
.used
- (char *)vq
->vring
.desc
);
1192 EXPORT_SYMBOL_GPL(virtqueue_get_used_addr
);
1194 const struct vring
*virtqueue_get_vring(struct virtqueue
*vq
)
1196 return &to_vvq(vq
)->vring
;
1198 EXPORT_SYMBOL_GPL(virtqueue_get_vring
);
1200 MODULE_LICENSE("GPL");