1 /* Virtio ring implementation.
3 * Copyright 2007 Rusty Russell IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 #include <linux/virtio.h>
20 #include <linux/virtio_ring.h>
21 #include <linux/virtio_config.h>
22 #include <linux/device.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/hrtimer.h>
27 /* virtio guest is communicating with a virtual "device" that actually runs on
28 * a host processor. Memory barriers are used to control SMP effects. */
30 /* Where possible, use SMP barriers which are more lightweight than mandatory
31 * barriers, because mandatory barriers control MMIO effects on accesses
32 * through relaxed memory I/O windows (which virtio-pci does not use). */
33 #define virtio_mb(vq) \
34 do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0)
35 #define virtio_rmb(vq) \
36 do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
37 #define virtio_wmb(vq) \
38 do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0)
40 /* We must force memory ordering even if guest is UP since host could be
41 * running on another CPU, but SMP barriers are defined to barrier() in that
42 * configuration. So fall back to mandatory barriers instead. */
43 #define virtio_mb(vq) mb()
44 #define virtio_rmb(vq) rmb()
45 #define virtio_wmb(vq) wmb()
49 /* For development, we want to crash whenever the ring is screwed. */
50 #define BAD_RING(_vq, fmt, args...) \
52 dev_err(&(_vq)->vq.vdev->dev, \
53 "%s:"fmt, (_vq)->vq.name, ##args); \
56 /* Caller is supposed to guarantee no reentry. */
57 #define START_USE(_vq) \
60 panic("%s:in_use = %i\n", \
61 (_vq)->vq.name, (_vq)->in_use); \
62 (_vq)->in_use = __LINE__; \
64 #define END_USE(_vq) \
65 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
67 #define BAD_RING(_vq, fmt, args...) \
69 dev_err(&_vq->vq.vdev->dev, \
70 "%s:"fmt, (_vq)->vq.name, ##args); \
71 (_vq)->broken = true; \
77 struct vring_virtqueue
81 /* Actual memory layout for this queue */
84 /* Can we use weak barriers? */
87 /* Other side has made a mess, don't try any more. */
90 /* Host supports indirect buffers */
93 /* Host publishes avail event idx */
96 /* Number of free buffers */
97 unsigned int num_free
;
98 /* Head of free buffer list. */
99 unsigned int free_head
;
100 /* Number we've added since last sync. */
101 unsigned int num_added
;
103 /* Last used index we've seen. */
106 /* How to notify other side. FIXME: commonalize hcalls! */
107 void (*notify
)(struct virtqueue
*vq
);
109 /* Index of the queue */
113 /* They're supposed to lock for us. */
116 /* Figure out if their kicks are too delayed. */
117 bool last_add_time_valid
;
118 ktime_t last_add_time
;
121 /* Tokens for callbacks. */
125 #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
127 /* Set up an indirect table of descriptors and add it to the queue. */
128 static int vring_add_indirect(struct vring_virtqueue
*vq
,
129 struct scatterlist sg
[],
134 struct vring_desc
*desc
;
138 desc
= kmalloc((out
+ in
) * sizeof(struct vring_desc
), gfp
);
142 /* Transfer entries from the sg list into the indirect page */
143 for (i
= 0; i
< out
; i
++) {
144 desc
[i
].flags
= VRING_DESC_F_NEXT
;
145 desc
[i
].addr
= sg_phys(sg
);
146 desc
[i
].len
= sg
->length
;
150 for (; i
< (out
+ in
); i
++) {
151 desc
[i
].flags
= VRING_DESC_F_NEXT
|VRING_DESC_F_WRITE
;
152 desc
[i
].addr
= sg_phys(sg
);
153 desc
[i
].len
= sg
->length
;
158 /* Last one doesn't continue. */
159 desc
[i
-1].flags
&= ~VRING_DESC_F_NEXT
;
162 /* We're about to use a buffer */
165 /* Use a single buffer which doesn't continue */
166 head
= vq
->free_head
;
167 vq
->vring
.desc
[head
].flags
= VRING_DESC_F_INDIRECT
;
168 vq
->vring
.desc
[head
].addr
= virt_to_phys(desc
);
169 vq
->vring
.desc
[head
].len
= i
* sizeof(struct vring_desc
);
171 /* Update free pointer */
172 vq
->free_head
= vq
->vring
.desc
[head
].next
;
177 int virtqueue_get_queue_index(struct virtqueue
*_vq
)
179 struct vring_virtqueue
*vq
= to_vvq(_vq
);
180 return vq
->queue_index
;
182 EXPORT_SYMBOL_GPL(virtqueue_get_queue_index
);
185 * virtqueue_add_buf - expose buffer to other end
186 * @vq: the struct virtqueue we're talking about.
187 * @sg: the description of the buffer(s).
188 * @out_num: the number of sg readable by other side
189 * @in_num: the number of sg which are writable (after readable ones)
190 * @data: the token identifying the buffer.
191 * @gfp: how to do memory allocations (if necessary).
193 * Caller must ensure we don't call this with other virtqueue operations
194 * at the same time (except where noted).
196 * Returns remaining capacity of queue or a negative error
197 * (ie. ENOSPC). Note that it only really makes sense to treat all
198 * positive return values as "available": indirect buffers mean that
199 * we can put an entire sg[] array inside a single queue entry.
201 int virtqueue_add_buf(struct virtqueue
*_vq
,
202 struct scatterlist sg
[],
208 struct vring_virtqueue
*vq
= to_vvq(_vq
);
209 unsigned int i
, avail
, uninitialized_var(prev
);
214 BUG_ON(data
== NULL
);
218 ktime_t now
= ktime_get();
220 /* No kick or get, with .1 second between? Warn. */
221 if (vq
->last_add_time_valid
)
222 WARN_ON(ktime_to_ms(ktime_sub(now
, vq
->last_add_time
))
224 vq
->last_add_time
= now
;
225 vq
->last_add_time_valid
= true;
229 /* If the host supports indirect descriptor tables, and we have multiple
230 * buffers, then go indirect. FIXME: tune this threshold */
231 if (vq
->indirect
&& (out
+ in
) > 1 && vq
->num_free
) {
232 head
= vring_add_indirect(vq
, sg
, out
, in
, gfp
);
233 if (likely(head
>= 0))
237 BUG_ON(out
+ in
> vq
->vring
.num
);
238 BUG_ON(out
+ in
== 0);
240 if (vq
->num_free
< out
+ in
) {
241 pr_debug("Can't add buf len %i - avail = %i\n",
242 out
+ in
, vq
->num_free
);
243 /* FIXME: for historical reasons, we force a notify here if
244 * there are outgoing parts to the buffer. Presumably the
245 * host should service the ring ASAP. */
252 /* We're about to use some buffers from the free list. */
253 vq
->num_free
-= out
+ in
;
255 head
= vq
->free_head
;
256 for (i
= vq
->free_head
; out
; i
= vq
->vring
.desc
[i
].next
, out
--) {
257 vq
->vring
.desc
[i
].flags
= VRING_DESC_F_NEXT
;
258 vq
->vring
.desc
[i
].addr
= sg_phys(sg
);
259 vq
->vring
.desc
[i
].len
= sg
->length
;
263 for (; in
; i
= vq
->vring
.desc
[i
].next
, in
--) {
264 vq
->vring
.desc
[i
].flags
= VRING_DESC_F_NEXT
|VRING_DESC_F_WRITE
;
265 vq
->vring
.desc
[i
].addr
= sg_phys(sg
);
266 vq
->vring
.desc
[i
].len
= sg
->length
;
270 /* Last one doesn't continue. */
271 vq
->vring
.desc
[prev
].flags
&= ~VRING_DESC_F_NEXT
;
273 /* Update free pointer */
278 vq
->data
[head
] = data
;
280 /* Put entry in available array (but don't update avail->idx until they
282 avail
= (vq
->vring
.avail
->idx
& (vq
->vring
.num
-1));
283 vq
->vring
.avail
->ring
[avail
] = head
;
285 /* Descriptors and available array need to be set before we expose the
286 * new available array entries. */
288 vq
->vring
.avail
->idx
++;
291 /* This is very unlikely, but theoretically possible. Kick
293 if (unlikely(vq
->num_added
== (1 << 16) - 1))
296 pr_debug("Added buffer head %i to %p\n", head
, vq
);
301 EXPORT_SYMBOL_GPL(virtqueue_add_buf
);
304 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
305 * @vq: the struct virtqueue
307 * Instead of virtqueue_kick(), you can do:
308 * if (virtqueue_kick_prepare(vq))
309 * virtqueue_notify(vq);
311 * This is sometimes useful because the virtqueue_kick_prepare() needs
312 * to be serialized, but the actual virtqueue_notify() call does not.
314 bool virtqueue_kick_prepare(struct virtqueue
*_vq
)
316 struct vring_virtqueue
*vq
= to_vvq(_vq
);
321 /* We need to expose available array entries before checking avail
325 old
= vq
->vring
.avail
->idx
- vq
->num_added
;
326 new = vq
->vring
.avail
->idx
;
330 if (vq
->last_add_time_valid
) {
331 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
332 vq
->last_add_time
)) > 100);
334 vq
->last_add_time_valid
= false;
338 needs_kick
= vring_need_event(vring_avail_event(&vq
->vring
),
341 needs_kick
= !(vq
->vring
.used
->flags
& VRING_USED_F_NO_NOTIFY
);
346 EXPORT_SYMBOL_GPL(virtqueue_kick_prepare
);
349 * virtqueue_notify - second half of split virtqueue_kick call.
350 * @vq: the struct virtqueue
352 * This does not need to be serialized.
354 void virtqueue_notify(struct virtqueue
*_vq
)
356 struct vring_virtqueue
*vq
= to_vvq(_vq
);
358 /* Prod other side to tell it about changes. */
361 EXPORT_SYMBOL_GPL(virtqueue_notify
);
364 * virtqueue_kick - update after add_buf
365 * @vq: the struct virtqueue
367 * After one or more virtqueue_add_buf calls, invoke this to kick
370 * Caller must ensure we don't call this with other virtqueue
371 * operations at the same time (except where noted).
373 void virtqueue_kick(struct virtqueue
*vq
)
375 if (virtqueue_kick_prepare(vq
))
376 virtqueue_notify(vq
);
378 EXPORT_SYMBOL_GPL(virtqueue_kick
);
380 static void detach_buf(struct vring_virtqueue
*vq
, unsigned int head
)
384 /* Clear data ptr. */
385 vq
->data
[head
] = NULL
;
387 /* Put back on free list: find end */
390 /* Free the indirect table */
391 if (vq
->vring
.desc
[i
].flags
& VRING_DESC_F_INDIRECT
)
392 kfree(phys_to_virt(vq
->vring
.desc
[i
].addr
));
394 while (vq
->vring
.desc
[i
].flags
& VRING_DESC_F_NEXT
) {
395 i
= vq
->vring
.desc
[i
].next
;
399 vq
->vring
.desc
[i
].next
= vq
->free_head
;
400 vq
->free_head
= head
;
401 /* Plus final descriptor */
405 static inline bool more_used(const struct vring_virtqueue
*vq
)
407 return vq
->last_used_idx
!= vq
->vring
.used
->idx
;
411 * virtqueue_get_buf - get the next used buffer
412 * @vq: the struct virtqueue we're talking about.
413 * @len: the length written into the buffer
415 * If the driver wrote data into the buffer, @len will be set to the
416 * amount written. This means you don't need to clear the buffer
417 * beforehand to ensure there's no data leakage in the case of short
420 * Caller must ensure we don't call this with other virtqueue
421 * operations at the same time (except where noted).
423 * Returns NULL if there are no used buffers, or the "data" token
424 * handed to virtqueue_add_buf().
426 void *virtqueue_get_buf(struct virtqueue
*_vq
, unsigned int *len
)
428 struct vring_virtqueue
*vq
= to_vvq(_vq
);
435 if (unlikely(vq
->broken
)) {
440 if (!more_used(vq
)) {
441 pr_debug("No more buffers in queue\n");
446 /* Only get used array entries after they have been exposed by host. */
449 last_used
= (vq
->last_used_idx
& (vq
->vring
.num
- 1));
450 i
= vq
->vring
.used
->ring
[last_used
].id
;
451 *len
= vq
->vring
.used
->ring
[last_used
].len
;
453 if (unlikely(i
>= vq
->vring
.num
)) {
454 BAD_RING(vq
, "id %u out of range\n", i
);
457 if (unlikely(!vq
->data
[i
])) {
458 BAD_RING(vq
, "id %u is not a head!\n", i
);
462 /* detach_buf clears data, so grab it now. */
466 /* If we expect an interrupt for the next entry, tell host
467 * by writing event index and flush out the write before
468 * the read in the next get_buf call. */
469 if (!(vq
->vring
.avail
->flags
& VRING_AVAIL_F_NO_INTERRUPT
)) {
470 vring_used_event(&vq
->vring
) = vq
->last_used_idx
;
475 vq
->last_add_time_valid
= false;
481 EXPORT_SYMBOL_GPL(virtqueue_get_buf
);
484 * virtqueue_disable_cb - disable callbacks
485 * @vq: the struct virtqueue we're talking about.
487 * Note that this is not necessarily synchronous, hence unreliable and only
488 * useful as an optimization.
490 * Unlike other operations, this need not be serialized.
492 void virtqueue_disable_cb(struct virtqueue
*_vq
)
494 struct vring_virtqueue
*vq
= to_vvq(_vq
);
496 vq
->vring
.avail
->flags
|= VRING_AVAIL_F_NO_INTERRUPT
;
498 EXPORT_SYMBOL_GPL(virtqueue_disable_cb
);
501 * virtqueue_enable_cb - restart callbacks after disable_cb.
502 * @vq: the struct virtqueue we're talking about.
504 * This re-enables callbacks; it returns "false" if there are pending
505 * buffers in the queue, to detect a possible race between the driver
506 * checking for more work, and enabling callbacks.
508 * Caller must ensure we don't call this with other virtqueue
509 * operations at the same time (except where noted).
511 bool virtqueue_enable_cb(struct virtqueue
*_vq
)
513 struct vring_virtqueue
*vq
= to_vvq(_vq
);
517 /* We optimistically turn back on interrupts, then check if there was
519 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
520 * either clear the flags bit or point the event index at the next
521 * entry. Always do both to keep code simple. */
522 vq
->vring
.avail
->flags
&= ~VRING_AVAIL_F_NO_INTERRUPT
;
523 vring_used_event(&vq
->vring
) = vq
->last_used_idx
;
525 if (unlikely(more_used(vq
))) {
533 EXPORT_SYMBOL_GPL(virtqueue_enable_cb
);
536 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
537 * @vq: the struct virtqueue we're talking about.
539 * This re-enables callbacks but hints to the other side to delay
540 * interrupts until most of the available buffers have been processed;
541 * it returns "false" if there are many pending buffers in the queue,
542 * to detect a possible race between the driver checking for more work,
543 * and enabling callbacks.
545 * Caller must ensure we don't call this with other virtqueue
546 * operations at the same time (except where noted).
548 bool virtqueue_enable_cb_delayed(struct virtqueue
*_vq
)
550 struct vring_virtqueue
*vq
= to_vvq(_vq
);
555 /* We optimistically turn back on interrupts, then check if there was
557 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
558 * either clear the flags bit or point the event index at the next
559 * entry. Always do both to keep code simple. */
560 vq
->vring
.avail
->flags
&= ~VRING_AVAIL_F_NO_INTERRUPT
;
561 /* TODO: tune this threshold */
562 bufs
= (u16
)(vq
->vring
.avail
->idx
- vq
->last_used_idx
) * 3 / 4;
563 vring_used_event(&vq
->vring
) = vq
->last_used_idx
+ bufs
;
565 if (unlikely((u16
)(vq
->vring
.used
->idx
- vq
->last_used_idx
) > bufs
)) {
573 EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed
);
576 * virtqueue_detach_unused_buf - detach first unused buffer
577 * @vq: the struct virtqueue we're talking about.
579 * Returns NULL or the "data" token handed to virtqueue_add_buf().
580 * This is not valid on an active queue; it is useful only for device
583 void *virtqueue_detach_unused_buf(struct virtqueue
*_vq
)
585 struct vring_virtqueue
*vq
= to_vvq(_vq
);
591 for (i
= 0; i
< vq
->vring
.num
; i
++) {
594 /* detach_buf clears data, so grab it now. */
597 vq
->vring
.avail
->idx
--;
601 /* That should have freed everything. */
602 BUG_ON(vq
->num_free
!= vq
->vring
.num
);
607 EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf
);
609 irqreturn_t
vring_interrupt(int irq
, void *_vq
)
611 struct vring_virtqueue
*vq
= to_vvq(_vq
);
613 if (!more_used(vq
)) {
614 pr_debug("virtqueue interrupt with no work for %p\n", vq
);
618 if (unlikely(vq
->broken
))
621 pr_debug("virtqueue callback for %p (%p)\n", vq
, vq
->vq
.callback
);
623 vq
->vq
.callback(&vq
->vq
);
627 EXPORT_SYMBOL_GPL(vring_interrupt
);
629 struct virtqueue
*vring_new_virtqueue(unsigned int index
,
631 unsigned int vring_align
,
632 struct virtio_device
*vdev
,
635 void (*notify
)(struct virtqueue
*),
636 void (*callback
)(struct virtqueue
*),
639 struct vring_virtqueue
*vq
;
642 /* We assume num is a power of 2. */
643 if (num
& (num
- 1)) {
644 dev_warn(&vdev
->dev
, "Bad virtqueue length %u\n", num
);
648 vq
= kmalloc(sizeof(*vq
) + sizeof(void *)*num
, GFP_KERNEL
);
652 vring_init(&vq
->vring
, num
, pages
, vring_align
);
653 vq
->vq
.callback
= callback
;
657 vq
->weak_barriers
= weak_barriers
;
659 vq
->last_used_idx
= 0;
661 vq
->queue_index
= index
;
662 list_add_tail(&vq
->vq
.list
, &vdev
->vqs
);
665 vq
->last_add_time_valid
= false;
668 vq
->indirect
= virtio_has_feature(vdev
, VIRTIO_RING_F_INDIRECT_DESC
);
669 vq
->event
= virtio_has_feature(vdev
, VIRTIO_RING_F_EVENT_IDX
);
671 /* No callback? Tell other side not to bother us. */
673 vq
->vring
.avail
->flags
|= VRING_AVAIL_F_NO_INTERRUPT
;
675 /* Put everything in free lists. */
678 for (i
= 0; i
< num
-1; i
++) {
679 vq
->vring
.desc
[i
].next
= i
+1;
686 EXPORT_SYMBOL_GPL(vring_new_virtqueue
);
688 void vring_del_virtqueue(struct virtqueue
*vq
)
693 EXPORT_SYMBOL_GPL(vring_del_virtqueue
);
695 /* Manipulates transport-specific feature bits. */
696 void vring_transport_features(struct virtio_device
*vdev
)
700 for (i
= VIRTIO_TRANSPORT_F_START
; i
< VIRTIO_TRANSPORT_F_END
; i
++) {
702 case VIRTIO_RING_F_INDIRECT_DESC
:
704 case VIRTIO_RING_F_EVENT_IDX
:
707 /* We don't understand this bit. */
708 clear_bit(i
, vdev
->features
);
712 EXPORT_SYMBOL_GPL(vring_transport_features
);
715 * virtqueue_get_vring_size - return the size of the virtqueue's vring
716 * @vq: the struct virtqueue containing the vring of interest.
718 * Returns the size of the vring. This is mainly used for boasting to
719 * userspace. Unlike other operations, this need not be serialized.
721 unsigned int virtqueue_get_vring_size(struct virtqueue
*_vq
)
724 struct vring_virtqueue
*vq
= to_vvq(_vq
);
726 return vq
->vring
.num
;
728 EXPORT_SYMBOL_GPL(virtqueue_get_vring_size
);
730 MODULE_LICENSE("GPL");