Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / virtio / virtio_ring.c
CommitLineData
0a8a69dd
RR
1/* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include <linux/virtio.h>
20#include <linux/virtio_ring.h>
e34f8725 21#include <linux/virtio_config.h>
0a8a69dd 22#include <linux/device.h>
5a0e3ad6 23#include <linux/slab.h>
b5a2c4f1 24#include <linux/module.h>
e93300b1 25#include <linux/hrtimer.h>
6abb2dd9 26#include <linux/kmemleak.h>
780bc790 27#include <linux/dma-mapping.h>
78fe3987 28#include <xen/xen.h>
0a8a69dd
RR
29
30#ifdef DEBUG
31/* For development, we want to crash whenever the ring is screwed. */
9499f5e7
RR
32#define BAD_RING(_vq, fmt, args...) \
33 do { \
34 dev_err(&(_vq)->vq.vdev->dev, \
35 "%s:"fmt, (_vq)->vq.name, ##args); \
36 BUG(); \
37 } while (0)
c5f841f1
RR
38/* Caller is supposed to guarantee no reentry. */
39#define START_USE(_vq) \
40 do { \
41 if ((_vq)->in_use) \
9499f5e7
RR
42 panic("%s:in_use = %i\n", \
43 (_vq)->vq.name, (_vq)->in_use); \
c5f841f1 44 (_vq)->in_use = __LINE__; \
9499f5e7 45 } while (0)
3a35ce7d 46#define END_USE(_vq) \
97a545ab 47 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
0a8a69dd 48#else
9499f5e7
RR
49#define BAD_RING(_vq, fmt, args...) \
50 do { \
51 dev_err(&_vq->vq.vdev->dev, \
52 "%s:"fmt, (_vq)->vq.name, ##args); \
53 (_vq)->broken = true; \
54 } while (0)
0a8a69dd
RR
55#define START_USE(vq)
56#define END_USE(vq)
57#endif
58
780bc790
AL
59struct vring_desc_state {
60 void *data; /* Data for callback. */
61 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
62};
63
43b4f721 64struct vring_virtqueue {
0a8a69dd
RR
65 struct virtqueue vq;
66
67 /* Actual memory layout for this queue */
68 struct vring vring;
69
7b21e34f
RR
70 /* Can we use weak barriers? */
71 bool weak_barriers;
72
0a8a69dd
RR
73 /* Other side has made a mess, don't try any more. */
74 bool broken;
75
9fa29b9d
MM
76 /* Host supports indirect buffers */
77 bool indirect;
78
a5c262c5
MT
79 /* Host publishes avail event idx */
80 bool event;
81
0a8a69dd
RR
82 /* Head of free buffer list. */
83 unsigned int free_head;
84 /* Number we've added since last sync. */
85 unsigned int num_added;
86
87 /* Last used index we've seen. */
1bc4953e 88 u16 last_used_idx;
0a8a69dd 89
f277ec42
VS
90 /* Last written value to avail->flags */
91 u16 avail_flags_shadow;
92
93 /* Last written value to avail->idx in guest byte order */
94 u16 avail_idx_shadow;
95
0a8a69dd 96 /* How to notify other side. FIXME: commonalize hcalls! */
46f9c2b9 97 bool (*notify)(struct virtqueue *vq);
0a8a69dd 98
2a2d1382
AL
99 /* DMA, allocation, and size information */
100 bool we_own_ring;
101 size_t queue_size_in_bytes;
102 dma_addr_t queue_dma_addr;
103
0a8a69dd
RR
104#ifdef DEBUG
105 /* They're supposed to lock for us. */
106 unsigned int in_use;
e93300b1
RR
107
108 /* Figure out if their kicks are too delayed. */
109 bool last_add_time_valid;
110 ktime_t last_add_time;
0a8a69dd
RR
111#endif
112
780bc790
AL
113 /* Per-descriptor state. */
114 struct vring_desc_state desc_state[];
0a8a69dd
RR
115};
116
117#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
118
d26c96c8 119/*
1a937693
MT
120 * Modern virtio devices have feature bits to specify whether they need a
121 * quirk and bypass the IOMMU. If not there, just use the DMA API.
122 *
123 * If there, the interaction between virtio and DMA API is messy.
d26c96c8
AL
124 *
125 * On most systems with virtio, physical addresses match bus addresses,
126 * and it doesn't particularly matter whether we use the DMA API.
127 *
128 * On some systems, including Xen and any system with a physical device
129 * that speaks virtio behind a physical IOMMU, we must use the DMA API
130 * for virtio DMA to work at all.
131 *
132 * On other systems, including SPARC and PPC64, virtio-pci devices are
133 * enumerated as though they are behind an IOMMU, but the virtio host
134 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
135 * there or somehow map everything as the identity.
136 *
137 * For the time being, we preserve historic behavior and bypass the DMA
138 * API.
1a937693
MT
139 *
140 * TODO: install a per-device DMA ops structure that does the right thing
141 * taking into account all the above quirks, and use the DMA API
142 * unconditionally on data path.
d26c96c8
AL
143 */
144
145static bool vring_use_dma_api(struct virtio_device *vdev)
146{
1a937693
MT
147 if (!virtio_has_iommu_quirk(vdev))
148 return true;
149
150 /* Otherwise, we are left to guess. */
78fe3987
AL
151 /*
152 * In theory, it's possible to have a buggy QEMU-supposed
153 * emulated Q35 IOMMU and Xen enabled at the same time. On
154 * such a configuration, virtio has never worked and will
155 * not work without an even larger kludge. Instead, enable
156 * the DMA API if we're a Xen guest, which at least allows
157 * all of the sensible Xen configurations to work correctly.
158 */
159 if (xen_domain())
160 return true;
161
d26c96c8
AL
162 return false;
163}
164
780bc790
AL
165/*
166 * The DMA ops on various arches are rather gnarly right now, and
167 * making all of the arch DMA ops work on the vring device itself
168 * is a mess. For now, we use the parent device for DMA ops.
169 */
170struct device *vring_dma_dev(const struct vring_virtqueue *vq)
171{
172 return vq->vq.vdev->dev.parent;
173}
174
175/* Map one sg entry. */
176static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
177 struct scatterlist *sg,
178 enum dma_data_direction direction)
179{
180 if (!vring_use_dma_api(vq->vq.vdev))
181 return (dma_addr_t)sg_phys(sg);
182
183 /*
184 * We can't use dma_map_sg, because we don't use scatterlists in
185 * the way it expects (we don't guarantee that the scatterlist
186 * will exist for the lifetime of the mapping).
187 */
188 return dma_map_page(vring_dma_dev(vq),
189 sg_page(sg), sg->offset, sg->length,
190 direction);
191}
192
193static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
194 void *cpu_addr, size_t size,
195 enum dma_data_direction direction)
196{
197 if (!vring_use_dma_api(vq->vq.vdev))
198 return (dma_addr_t)virt_to_phys(cpu_addr);
199
200 return dma_map_single(vring_dma_dev(vq),
201 cpu_addr, size, direction);
202}
203
204static void vring_unmap_one(const struct vring_virtqueue *vq,
205 struct vring_desc *desc)
206{
207 u16 flags;
208
209 if (!vring_use_dma_api(vq->vq.vdev))
210 return;
211
212 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
213
214 if (flags & VRING_DESC_F_INDIRECT) {
215 dma_unmap_single(vring_dma_dev(vq),
216 virtio64_to_cpu(vq->vq.vdev, desc->addr),
217 virtio32_to_cpu(vq->vq.vdev, desc->len),
218 (flags & VRING_DESC_F_WRITE) ?
219 DMA_FROM_DEVICE : DMA_TO_DEVICE);
220 } else {
221 dma_unmap_page(vring_dma_dev(vq),
222 virtio64_to_cpu(vq->vq.vdev, desc->addr),
223 virtio32_to_cpu(vq->vq.vdev, desc->len),
224 (flags & VRING_DESC_F_WRITE) ?
225 DMA_FROM_DEVICE : DMA_TO_DEVICE);
226 }
227}
228
229static int vring_mapping_error(const struct vring_virtqueue *vq,
230 dma_addr_t addr)
231{
232 if (!vring_use_dma_api(vq->vq.vdev))
233 return 0;
234
235 return dma_mapping_error(vring_dma_dev(vq), addr);
236}
237
00e6f3d9
MT
238static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
239 unsigned int total_sg, gfp_t gfp)
9fa29b9d
MM
240{
241 struct vring_desc *desc;
b25bd251 242 unsigned int i;
9fa29b9d 243
b92b1b89
WD
244 /*
245 * We require lowmem mappings for the descriptors because
246 * otherwise virt_to_phys will give us bogus addresses in the
247 * virtqueue.
248 */
82107539 249 gfp &= ~__GFP_HIGHMEM;
b92b1b89 250
13816c76 251 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
9fa29b9d 252 if (!desc)
b25bd251 253 return NULL;
9fa29b9d 254
b25bd251 255 for (i = 0; i < total_sg; i++)
00e6f3d9 256 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
b25bd251 257 return desc;
9fa29b9d
MM
258}
259
13816c76
RR
260static inline int virtqueue_add(struct virtqueue *_vq,
261 struct scatterlist *sgs[],
eeebf9b1 262 unsigned int total_sg,
13816c76
RR
263 unsigned int out_sgs,
264 unsigned int in_sgs,
265 void *data,
266 gfp_t gfp)
0a8a69dd
RR
267{
268 struct vring_virtqueue *vq = to_vvq(_vq);
13816c76 269 struct scatterlist *sg;
b25bd251 270 struct vring_desc *desc;
780bc790 271 unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
1fe9b6fe 272 int head;
b25bd251 273 bool indirect;
0a8a69dd 274
9fa29b9d
MM
275 START_USE(vq);
276
0a8a69dd 277 BUG_ON(data == NULL);
9fa29b9d 278
70670444
RR
279 if (unlikely(vq->broken)) {
280 END_USE(vq);
281 return -EIO;
282 }
283
e93300b1
RR
284#ifdef DEBUG
285 {
286 ktime_t now = ktime_get();
287
288 /* No kick or get, with .1 second between? Warn. */
289 if (vq->last_add_time_valid)
290 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
291 > 100);
292 vq->last_add_time = now;
293 vq->last_add_time_valid = true;
294 }
295#endif
296
b25bd251
RR
297 BUG_ON(total_sg > vq->vring.num);
298 BUG_ON(total_sg == 0);
299
300 head = vq->free_head;
301
9fa29b9d
MM
302 /* If the host supports indirect descriptor tables, and we have multiple
303 * buffers, then go indirect. FIXME: tune this threshold */
b25bd251 304 if (vq->indirect && total_sg > 1 && vq->vq.num_free)
00e6f3d9 305 desc = alloc_indirect(_vq, total_sg, gfp);
b25bd251
RR
306 else
307 desc = NULL;
308
309 if (desc) {
310 /* Use a single buffer which doesn't continue */
780bc790 311 indirect = true;
b25bd251
RR
312 /* Set up rest to use this indirect table. */
313 i = 0;
314 descs_used = 1;
b25bd251 315 } else {
780bc790 316 indirect = false;
b25bd251
RR
317 desc = vq->vring.desc;
318 i = head;
319 descs_used = total_sg;
9fa29b9d
MM
320 }
321
b25bd251 322 if (vq->vq.num_free < descs_used) {
0a8a69dd 323 pr_debug("Can't add buf len %i - avail = %i\n",
b25bd251 324 descs_used, vq->vq.num_free);
44653eae
RR
325 /* FIXME: for historical reasons, we force a notify here if
326 * there are outgoing parts to the buffer. Presumably the
327 * host should service the ring ASAP. */
13816c76 328 if (out_sgs)
44653eae 329 vq->notify(&vq->vq);
58625edf
WY
330 if (indirect)
331 kfree(desc);
0a8a69dd
RR
332 END_USE(vq);
333 return -ENOSPC;
334 }
335
13816c76 336 for (n = 0; n < out_sgs; n++) {
eeebf9b1 337 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
780bc790
AL
338 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
339 if (vring_mapping_error(vq, addr))
340 goto unmap_release;
341
00e6f3d9 342 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
780bc790 343 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
00e6f3d9 344 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
13816c76 345 prev = i;
00e6f3d9 346 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
13816c76 347 }
0a8a69dd 348 }
13816c76 349 for (; n < (out_sgs + in_sgs); n++) {
eeebf9b1 350 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
780bc790
AL
351 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
352 if (vring_mapping_error(vq, addr))
353 goto unmap_release;
354
00e6f3d9 355 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
780bc790 356 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
00e6f3d9 357 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
13816c76 358 prev = i;
00e6f3d9 359 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
13816c76 360 }
0a8a69dd
RR
361 }
362 /* Last one doesn't continue. */
00e6f3d9 363 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
0a8a69dd 364
780bc790
AL
365 if (indirect) {
366 /* Now that the indirect table is filled in, map it. */
367 dma_addr_t addr = vring_map_single(
368 vq, desc, total_sg * sizeof(struct vring_desc),
369 DMA_TO_DEVICE);
370 if (vring_mapping_error(vq, addr))
371 goto unmap_release;
372
373 vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
374 vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);
375
376 vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
377 }
378
379 /* We're using some buffers from the free list. */
380 vq->vq.num_free -= descs_used;
381
0a8a69dd 382 /* Update free pointer */
b25bd251 383 if (indirect)
00e6f3d9 384 vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
b25bd251
RR
385 else
386 vq->free_head = i;
0a8a69dd 387
780bc790
AL
388 /* Store token and indirect buffer state. */
389 vq->desc_state[head].data = data;
390 if (indirect)
391 vq->desc_state[head].indir_desc = desc;
0a8a69dd
RR
392
393 /* Put entry in available array (but don't update avail->idx until they
3b720b8c 394 * do sync). */
f277ec42 395 avail = vq->avail_idx_shadow & (vq->vring.num - 1);
00e6f3d9 396 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
0a8a69dd 397
ee7cd898
RR
398 /* Descriptors and available array need to be set before we expose the
399 * new available array entries. */
a9a0fef7 400 virtio_wmb(vq->weak_barriers);
f277ec42
VS
401 vq->avail_idx_shadow++;
402 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
ee7cd898
RR
403 vq->num_added++;
404
5e05bf58
TH
405 pr_debug("Added buffer head %i to %p\n", head, vq);
406 END_USE(vq);
407
ee7cd898
RR
408 /* This is very unlikely, but theoretically possible. Kick
409 * just in case. */
410 if (unlikely(vq->num_added == (1 << 16) - 1))
411 virtqueue_kick(_vq);
412
98e8c6bc 413 return 0;
780bc790
AL
414
415unmap_release:
416 err_idx = i;
417 i = head;
418
419 for (n = 0; n < total_sg; n++) {
420 if (i == err_idx)
421 break;
422 vring_unmap_one(vq, &desc[i]);
423 i = vq->vring.desc[i].next;
424 }
425
426 vq->vq.num_free += total_sg;
427
428 if (indirect)
429 kfree(desc);
430
3cc36f6e 431 END_USE(vq);
780bc790 432 return -EIO;
0a8a69dd 433}
13816c76 434
13816c76
RR
435/**
436 * virtqueue_add_sgs - expose buffers to other end
437 * @vq: the struct virtqueue we're talking about.
438 * @sgs: array of terminated scatterlists.
439 * @out_num: the number of scatterlists readable by other side
440 * @in_num: the number of scatterlists which are writable (after readable ones)
441 * @data: the token identifying the buffer.
442 * @gfp: how to do memory allocations (if necessary).
443 *
444 * Caller must ensure we don't call this with other virtqueue operations
445 * at the same time (except where noted).
446 *
70670444 447 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
13816c76
RR
448 */
449int virtqueue_add_sgs(struct virtqueue *_vq,
450 struct scatterlist *sgs[],
451 unsigned int out_sgs,
452 unsigned int in_sgs,
453 void *data,
454 gfp_t gfp)
455{
eeebf9b1 456 unsigned int i, total_sg = 0;
13816c76
RR
457
458 /* Count them first. */
eeebf9b1 459 for (i = 0; i < out_sgs + in_sgs; i++) {
13816c76
RR
460 struct scatterlist *sg;
461 for (sg = sgs[i]; sg; sg = sg_next(sg))
eeebf9b1 462 total_sg++;
13816c76 463 }
eeebf9b1 464 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp);
13816c76
RR
465}
466EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
467
282edb36
RR
468/**
469 * virtqueue_add_outbuf - expose output buffers to other end
470 * @vq: the struct virtqueue we're talking about.
eeebf9b1
RR
471 * @sg: scatterlist (must be well-formed and terminated!)
472 * @num: the number of entries in @sg readable by other side
282edb36
RR
473 * @data: the token identifying the buffer.
474 * @gfp: how to do memory allocations (if necessary).
475 *
476 * Caller must ensure we don't call this with other virtqueue operations
477 * at the same time (except where noted).
478 *
70670444 479 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
282edb36
RR
480 */
481int virtqueue_add_outbuf(struct virtqueue *vq,
eeebf9b1 482 struct scatterlist *sg, unsigned int num,
282edb36
RR
483 void *data,
484 gfp_t gfp)
485{
eeebf9b1 486 return virtqueue_add(vq, &sg, num, 1, 0, data, gfp);
282edb36
RR
487}
488EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
489
490/**
491 * virtqueue_add_inbuf - expose input buffers to other end
492 * @vq: the struct virtqueue we're talking about.
eeebf9b1
RR
493 * @sg: scatterlist (must be well-formed and terminated!)
494 * @num: the number of entries in @sg writable by other side
282edb36
RR
495 * @data: the token identifying the buffer.
496 * @gfp: how to do memory allocations (if necessary).
497 *
498 * Caller must ensure we don't call this with other virtqueue operations
499 * at the same time (except where noted).
500 *
70670444 501 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
282edb36
RR
502 */
503int virtqueue_add_inbuf(struct virtqueue *vq,
eeebf9b1 504 struct scatterlist *sg, unsigned int num,
282edb36
RR
505 void *data,
506 gfp_t gfp)
507{
eeebf9b1 508 return virtqueue_add(vq, &sg, num, 0, 1, data, gfp);
282edb36
RR
509}
510EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
511
5dfc1762 512/**
41f0377f 513 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
5dfc1762
RR
514 * @vq: the struct virtqueue
515 *
41f0377f
RR
516 * Instead of virtqueue_kick(), you can do:
517 * if (virtqueue_kick_prepare(vq))
518 * virtqueue_notify(vq);
5dfc1762 519 *
41f0377f
RR
520 * This is sometimes useful because the virtqueue_kick_prepare() needs
521 * to be serialized, but the actual virtqueue_notify() call does not.
5dfc1762 522 */
41f0377f 523bool virtqueue_kick_prepare(struct virtqueue *_vq)
0a8a69dd
RR
524{
525 struct vring_virtqueue *vq = to_vvq(_vq);
a5c262c5 526 u16 new, old;
41f0377f
RR
527 bool needs_kick;
528
0a8a69dd 529 START_USE(vq);
a72caae2
JW
530 /* We need to expose available array entries before checking avail
531 * event. */
a9a0fef7 532 virtio_mb(vq->weak_barriers);
0a8a69dd 533
f277ec42
VS
534 old = vq->avail_idx_shadow - vq->num_added;
535 new = vq->avail_idx_shadow;
0a8a69dd
RR
536 vq->num_added = 0;
537
e93300b1
RR
538#ifdef DEBUG
539 if (vq->last_add_time_valid) {
540 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
541 vq->last_add_time)) > 100);
542 }
543 vq->last_add_time_valid = false;
544#endif
545
41f0377f 546 if (vq->event) {
00e6f3d9 547 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
41f0377f
RR
548 new, old);
549 } else {
00e6f3d9 550 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
41f0377f 551 }
0a8a69dd 552 END_USE(vq);
41f0377f
RR
553 return needs_kick;
554}
555EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
556
557/**
558 * virtqueue_notify - second half of split virtqueue_kick call.
559 * @vq: the struct virtqueue
560 *
561 * This does not need to be serialized.
5b1bf7cb
HG
562 *
563 * Returns false if host notify failed or queue is broken, otherwise true.
41f0377f 564 */
5b1bf7cb 565bool virtqueue_notify(struct virtqueue *_vq)
41f0377f
RR
566{
567 struct vring_virtqueue *vq = to_vvq(_vq);
568
5b1bf7cb
HG
569 if (unlikely(vq->broken))
570 return false;
571
41f0377f 572 /* Prod other side to tell it about changes. */
2342d6a6 573 if (!vq->notify(_vq)) {
5b1bf7cb
HG
574 vq->broken = true;
575 return false;
576 }
577 return true;
41f0377f
RR
578}
579EXPORT_SYMBOL_GPL(virtqueue_notify);
580
581/**
582 * virtqueue_kick - update after add_buf
583 * @vq: the struct virtqueue
584 *
b3087e48 585 * After one or more virtqueue_add_* calls, invoke this to kick
41f0377f
RR
586 * the other side.
587 *
588 * Caller must ensure we don't call this with other virtqueue
589 * operations at the same time (except where noted).
5b1bf7cb
HG
590 *
591 * Returns false if kick failed, otherwise true.
41f0377f 592 */
5b1bf7cb 593bool virtqueue_kick(struct virtqueue *vq)
41f0377f
RR
594{
595 if (virtqueue_kick_prepare(vq))
5b1bf7cb
HG
596 return virtqueue_notify(vq);
597 return true;
0a8a69dd 598}
7c5e9ed0 599EXPORT_SYMBOL_GPL(virtqueue_kick);
0a8a69dd
RR
600
601static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
602{
780bc790
AL
603 unsigned int i, j;
604 u16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
0a8a69dd
RR
605
606 /* Clear data ptr. */
780bc790 607 vq->desc_state[head].data = NULL;
0a8a69dd 608
780bc790 609 /* Put back on free list: unmap first-level descriptors and find end */
0a8a69dd 610 i = head;
9fa29b9d 611
780bc790
AL
612 while (vq->vring.desc[i].flags & nextflag) {
613 vring_unmap_one(vq, &vq->vring.desc[i]);
00e6f3d9 614 i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
06ca287d 615 vq->vq.num_free++;
0a8a69dd
RR
616 }
617
780bc790 618 vring_unmap_one(vq, &vq->vring.desc[i]);
00e6f3d9 619 vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
0a8a69dd 620 vq->free_head = head;
780bc790 621
0a8a69dd 622 /* Plus final descriptor */
06ca287d 623 vq->vq.num_free++;
780bc790
AL
624
625 /* Free the indirect table, if any, now that it's unmapped. */
626 if (vq->desc_state[head].indir_desc) {
627 struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
628 u32 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
629
630 BUG_ON(!(vq->vring.desc[head].flags &
631 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
632 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
633
634 for (j = 0; j < len / sizeof(struct vring_desc); j++)
635 vring_unmap_one(vq, &indir_desc[j]);
636
637 kfree(vq->desc_state[head].indir_desc);
638 vq->desc_state[head].indir_desc = NULL;
639 }
0a8a69dd
RR
640}
641
0a8a69dd
RR
642static inline bool more_used(const struct vring_virtqueue *vq)
643{
00e6f3d9 644 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
0a8a69dd
RR
645}
646
5dfc1762
RR
647/**
648 * virtqueue_get_buf - get the next used buffer
649 * @vq: the struct virtqueue we're talking about.
650 * @len: the length written into the buffer
651 *
652 * If the driver wrote data into the buffer, @len will be set to the
653 * amount written. This means you don't need to clear the buffer
654 * beforehand to ensure there's no data leakage in the case of short
655 * writes.
656 *
657 * Caller must ensure we don't call this with other virtqueue
658 * operations at the same time (except where noted).
659 *
660 * Returns NULL if there are no used buffers, or the "data" token
b3087e48 661 * handed to virtqueue_add_*().
5dfc1762 662 */
7c5e9ed0 663void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
0a8a69dd
RR
664{
665 struct vring_virtqueue *vq = to_vvq(_vq);
666 void *ret;
667 unsigned int i;
3b720b8c 668 u16 last_used;
0a8a69dd
RR
669
670 START_USE(vq);
671
5ef82752
RR
672 if (unlikely(vq->broken)) {
673 END_USE(vq);
674 return NULL;
675 }
676
0a8a69dd
RR
677 if (!more_used(vq)) {
678 pr_debug("No more buffers in queue\n");
679 END_USE(vq);
680 return NULL;
681 }
682
2d61ba95 683 /* Only get used array entries after they have been exposed by host. */
a9a0fef7 684 virtio_rmb(vq->weak_barriers);
2d61ba95 685
3b720b8c 686 last_used = (vq->last_used_idx & (vq->vring.num - 1));
00e6f3d9
MT
687 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
688 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
0a8a69dd
RR
689
690 if (unlikely(i >= vq->vring.num)) {
691 BAD_RING(vq, "id %u out of range\n", i);
692 return NULL;
693 }
780bc790 694 if (unlikely(!vq->desc_state[i].data)) {
0a8a69dd
RR
695 BAD_RING(vq, "id %u is not a head!\n", i);
696 return NULL;
697 }
698
699 /* detach_buf clears data, so grab it now. */
780bc790 700 ret = vq->desc_state[i].data;
0a8a69dd
RR
701 detach_buf(vq, i);
702 vq->last_used_idx++;
a5c262c5
MT
703 /* If we expect an interrupt for the next entry, tell host
704 * by writing event index and flush out the write before
705 * the read in the next get_buf call. */
788e5b3a
MT
706 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
707 virtio_store_mb(vq->weak_barriers,
708 &vring_used_event(&vq->vring),
709 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
a5c262c5 710
e93300b1
RR
711#ifdef DEBUG
712 vq->last_add_time_valid = false;
713#endif
714
0a8a69dd
RR
715 END_USE(vq);
716 return ret;
717}
7c5e9ed0 718EXPORT_SYMBOL_GPL(virtqueue_get_buf);
0a8a69dd 719
5dfc1762
RR
720/**
721 * virtqueue_disable_cb - disable callbacks
722 * @vq: the struct virtqueue we're talking about.
723 *
724 * Note that this is not necessarily synchronous, hence unreliable and only
725 * useful as an optimization.
726 *
727 * Unlike other operations, this need not be serialized.
728 */
7c5e9ed0 729void virtqueue_disable_cb(struct virtqueue *_vq)
18445c4d
RR
730{
731 struct vring_virtqueue *vq = to_vvq(_vq);
732
f277ec42
VS
733 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
734 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
735 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
736 }
737
18445c4d 738}
7c5e9ed0 739EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
18445c4d 740
5dfc1762 741/**
cc229884 742 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
5dfc1762
RR
743 * @vq: the struct virtqueue we're talking about.
744 *
cc229884
MT
745 * This re-enables callbacks; it returns current queue state
746 * in an opaque unsigned value. This value should be later tested by
747 * virtqueue_poll, to detect a possible race between the driver checking for
748 * more work, and enabling callbacks.
5dfc1762
RR
749 *
750 * Caller must ensure we don't call this with other virtqueue
751 * operations at the same time (except where noted).
752 */
cc229884 753unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
0a8a69dd
RR
754{
755 struct vring_virtqueue *vq = to_vvq(_vq);
cc229884 756 u16 last_used_idx;
0a8a69dd
RR
757
758 START_USE(vq);
0a8a69dd
RR
759
760 /* We optimistically turn back on interrupts, then check if there was
761 * more to do. */
a5c262c5
MT
762 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
763 * either clear the flags bit or point the event index at the next
764 * entry. Always do both to keep code simple. */
f277ec42
VS
765 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
766 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
767 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
768 }
00e6f3d9 769 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
cc229884
MT
770 END_USE(vq);
771 return last_used_idx;
772}
773EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
774
775/**
776 * virtqueue_poll - query pending used buffers
777 * @vq: the struct virtqueue we're talking about.
778 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
779 *
780 * Returns "true" if there are pending used buffers in the queue.
781 *
782 * This does not need to be serialized.
783 */
784bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
785{
786 struct vring_virtqueue *vq = to_vvq(_vq);
787
a9a0fef7 788 virtio_mb(vq->weak_barriers);
00e6f3d9 789 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
cc229884
MT
790}
791EXPORT_SYMBOL_GPL(virtqueue_poll);
0a8a69dd 792
cc229884
MT
793/**
794 * virtqueue_enable_cb - restart callbacks after disable_cb.
795 * @vq: the struct virtqueue we're talking about.
796 *
797 * This re-enables callbacks; it returns "false" if there are pending
798 * buffers in the queue, to detect a possible race between the driver
799 * checking for more work, and enabling callbacks.
800 *
801 * Caller must ensure we don't call this with other virtqueue
802 * operations at the same time (except where noted).
803 */
804bool virtqueue_enable_cb(struct virtqueue *_vq)
805{
806 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
807 return !virtqueue_poll(_vq, last_used_idx);
0a8a69dd 808}
7c5e9ed0 809EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
0a8a69dd 810
5dfc1762
RR
811/**
812 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
813 * @vq: the struct virtqueue we're talking about.
814 *
815 * This re-enables callbacks but hints to the other side to delay
816 * interrupts until most of the available buffers have been processed;
817 * it returns "false" if there are many pending buffers in the queue,
818 * to detect a possible race between the driver checking for more work,
819 * and enabling callbacks.
820 *
821 * Caller must ensure we don't call this with other virtqueue
822 * operations at the same time (except where noted).
823 */
7ab358c2
MT
824bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
825{
826 struct vring_virtqueue *vq = to_vvq(_vq);
827 u16 bufs;
828
829 START_USE(vq);
830
831 /* We optimistically turn back on interrupts, then check if there was
832 * more to do. */
833 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
834 * either clear the flags bit or point the event index at the next
835 * entry. Always do both to keep code simple. */
f277ec42
VS
836 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
837 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
838 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
839 }
7ab358c2 840 /* TODO: tune this threshold */
f277ec42 841 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
788e5b3a
MT
842
843 virtio_store_mb(vq->weak_barriers,
844 &vring_used_event(&vq->vring),
845 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
846
00e6f3d9 847 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
7ab358c2
MT
848 END_USE(vq);
849 return false;
850 }
851
852 END_USE(vq);
853 return true;
854}
855EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
856
5dfc1762
RR
857/**
858 * virtqueue_detach_unused_buf - detach first unused buffer
859 * @vq: the struct virtqueue we're talking about.
860 *
b3087e48 861 * Returns NULL or the "data" token handed to virtqueue_add_*().
5dfc1762
RR
862 * This is not valid on an active queue; it is useful only for device
863 * shutdown.
864 */
7c5e9ed0 865void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
c021eac4
SM
866{
867 struct vring_virtqueue *vq = to_vvq(_vq);
868 unsigned int i;
869 void *buf;
870
871 START_USE(vq);
872
873 for (i = 0; i < vq->vring.num; i++) {
780bc790 874 if (!vq->desc_state[i].data)
c021eac4
SM
875 continue;
876 /* detach_buf clears data, so grab it now. */
780bc790 877 buf = vq->desc_state[i].data;
c021eac4 878 detach_buf(vq, i);
f277ec42
VS
879 vq->avail_idx_shadow--;
880 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
c021eac4
SM
881 END_USE(vq);
882 return buf;
883 }
884 /* That should have freed everything. */
06ca287d 885 BUG_ON(vq->vq.num_free != vq->vring.num);
c021eac4
SM
886
887 END_USE(vq);
888 return NULL;
889}
7c5e9ed0 890EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
c021eac4 891
0a8a69dd
RR
892irqreturn_t vring_interrupt(int irq, void *_vq)
893{
894 struct vring_virtqueue *vq = to_vvq(_vq);
895
896 if (!more_used(vq)) {
897 pr_debug("virtqueue interrupt with no work for %p\n", vq);
898 return IRQ_NONE;
899 }
900
901 if (unlikely(vq->broken))
902 return IRQ_HANDLED;
903
904 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
18445c4d
RR
905 if (vq->vq.callback)
906 vq->vq.callback(&vq->vq);
0a8a69dd
RR
907
908 return IRQ_HANDLED;
909}
c6fd4701 910EXPORT_SYMBOL_GPL(vring_interrupt);
0a8a69dd 911
2a2d1382
AL
912struct virtqueue *__vring_new_virtqueue(unsigned int index,
913 struct vring vring,
914 struct virtio_device *vdev,
915 bool weak_barriers,
916 bool (*notify)(struct virtqueue *),
917 void (*callback)(struct virtqueue *),
918 const char *name)
0a8a69dd 919{
0a8a69dd 920 unsigned int i;
2a2d1382 921 struct vring_virtqueue *vq;
0a8a69dd 922
2a2d1382 923 vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state),
780bc790 924 GFP_KERNEL);
0a8a69dd
RR
925 if (!vq)
926 return NULL;
927
2a2d1382 928 vq->vring = vring;
0a8a69dd
RR
929 vq->vq.callback = callback;
930 vq->vq.vdev = vdev;
9499f5e7 931 vq->vq.name = name;
2a2d1382 932 vq->vq.num_free = vring.num;
06ca287d 933 vq->vq.index = index;
2a2d1382
AL
934 vq->we_own_ring = false;
935 vq->queue_dma_addr = 0;
936 vq->queue_size_in_bytes = 0;
0a8a69dd 937 vq->notify = notify;
7b21e34f 938 vq->weak_barriers = weak_barriers;
0a8a69dd
RR
939 vq->broken = false;
940 vq->last_used_idx = 0;
f277ec42
VS
941 vq->avail_flags_shadow = 0;
942 vq->avail_idx_shadow = 0;
0a8a69dd 943 vq->num_added = 0;
9499f5e7 944 list_add_tail(&vq->vq.list, &vdev->vqs);
0a8a69dd
RR
945#ifdef DEBUG
946 vq->in_use = false;
e93300b1 947 vq->last_add_time_valid = false;
0a8a69dd
RR
948#endif
949
9fa29b9d 950 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
a5c262c5 951 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
9fa29b9d 952
0a8a69dd 953 /* No callback? Tell other side not to bother us. */
f277ec42
VS
954 if (!callback) {
955 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
956 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
957 }
0a8a69dd
RR
958
959 /* Put everything in free lists. */
0a8a69dd 960 vq->free_head = 0;
2a2d1382 961 for (i = 0; i < vring.num-1; i++)
00e6f3d9 962 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
2a2d1382 963 memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state));
0a8a69dd
RR
964
965 return &vq->vq;
966}
2a2d1382
AL
967EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
968
969static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
970 dma_addr_t *dma_handle, gfp_t flag)
971{
972 if (vring_use_dma_api(vdev)) {
973 return dma_alloc_coherent(vdev->dev.parent, size,
974 dma_handle, flag);
975 } else {
976 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
977 if (queue) {
978 phys_addr_t phys_addr = virt_to_phys(queue);
979 *dma_handle = (dma_addr_t)phys_addr;
980
981 /*
982 * Sanity check: make sure we dind't truncate
983 * the address. The only arches I can find that
984 * have 64-bit phys_addr_t but 32-bit dma_addr_t
985 * are certain non-highmem MIPS and x86
986 * configurations, but these configurations
987 * should never allocate physical pages above 32
988 * bits, so this is fine. Just in case, throw a
989 * warning and abort if we end up with an
990 * unrepresentable address.
991 */
992 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
993 free_pages_exact(queue, PAGE_ALIGN(size));
994 return NULL;
995 }
996 }
997 return queue;
998 }
999}
1000
1001static void vring_free_queue(struct virtio_device *vdev, size_t size,
1002 void *queue, dma_addr_t dma_handle)
1003{
1004 if (vring_use_dma_api(vdev)) {
1005 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
1006 } else {
1007 free_pages_exact(queue, PAGE_ALIGN(size));
1008 }
1009}
1010
1011struct virtqueue *vring_create_virtqueue(
1012 unsigned int index,
1013 unsigned int num,
1014 unsigned int vring_align,
1015 struct virtio_device *vdev,
1016 bool weak_barriers,
1017 bool may_reduce_num,
1018 bool (*notify)(struct virtqueue *),
1019 void (*callback)(struct virtqueue *),
1020 const char *name)
1021{
1022 struct virtqueue *vq;
e00f7bd2 1023 void *queue = NULL;
2a2d1382
AL
1024 dma_addr_t dma_addr;
1025 size_t queue_size_in_bytes;
1026 struct vring vring;
1027
1028 /* We assume num is a power of 2. */
1029 if (num & (num - 1)) {
1030 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
1031 return NULL;
1032 }
1033
1034 /* TODO: allocate each queue chunk individually */
1035 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
1036 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1037 &dma_addr,
1038 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1039 if (queue)
1040 break;
1041 }
1042
1043 if (!num)
1044 return NULL;
1045
1046 if (!queue) {
1047 /* Try to get a single page. You are my only hope! */
1048 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1049 &dma_addr, GFP_KERNEL|__GFP_ZERO);
1050 }
1051 if (!queue)
1052 return NULL;
1053
1054 queue_size_in_bytes = vring_size(num, vring_align);
1055 vring_init(&vring, num, queue, vring_align);
1056
1057 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers,
1058 notify, callback, name);
1059 if (!vq) {
1060 vring_free_queue(vdev, queue_size_in_bytes, queue,
1061 dma_addr);
1062 return NULL;
1063 }
1064
1065 to_vvq(vq)->queue_dma_addr = dma_addr;
1066 to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
1067 to_vvq(vq)->we_own_ring = true;
1068
1069 return vq;
1070}
1071EXPORT_SYMBOL_GPL(vring_create_virtqueue);
1072
1073struct virtqueue *vring_new_virtqueue(unsigned int index,
1074 unsigned int num,
1075 unsigned int vring_align,
1076 struct virtio_device *vdev,
1077 bool weak_barriers,
1078 void *pages,
1079 bool (*notify)(struct virtqueue *vq),
1080 void (*callback)(struct virtqueue *vq),
1081 const char *name)
1082{
1083 struct vring vring;
1084 vring_init(&vring, num, pages, vring_align);
1085 return __vring_new_virtqueue(index, vring, vdev, weak_barriers,
1086 notify, callback, name);
1087}
c6fd4701 1088EXPORT_SYMBOL_GPL(vring_new_virtqueue);
0a8a69dd 1089
2a2d1382 1090void vring_del_virtqueue(struct virtqueue *_vq)
0a8a69dd 1091{
2a2d1382
AL
1092 struct vring_virtqueue *vq = to_vvq(_vq);
1093
1094 if (vq->we_own_ring) {
1095 vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes,
1096 vq->vring.desc, vq->queue_dma_addr);
1097 }
1098 list_del(&_vq->list);
1099 kfree(vq);
0a8a69dd 1100}
c6fd4701 1101EXPORT_SYMBOL_GPL(vring_del_virtqueue);
0a8a69dd 1102
e34f8725
RR
1103/* Manipulates transport-specific feature bits. */
1104void vring_transport_features(struct virtio_device *vdev)
1105{
1106 unsigned int i;
1107
1108 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
1109 switch (i) {
9fa29b9d
MM
1110 case VIRTIO_RING_F_INDIRECT_DESC:
1111 break;
a5c262c5
MT
1112 case VIRTIO_RING_F_EVENT_IDX:
1113 break;
747ae34a
MT
1114 case VIRTIO_F_VERSION_1:
1115 break;
1a937693
MT
1116 case VIRTIO_F_IOMMU_PLATFORM:
1117 break;
e34f8725
RR
1118 default:
1119 /* We don't understand this bit. */
e16e12be 1120 __virtio_clear_bit(vdev, i);
e34f8725
RR
1121 }
1122 }
1123}
1124EXPORT_SYMBOL_GPL(vring_transport_features);
1125
5dfc1762
RR
1126/**
1127 * virtqueue_get_vring_size - return the size of the virtqueue's vring
1128 * @vq: the struct virtqueue containing the vring of interest.
1129 *
1130 * Returns the size of the vring. This is mainly used for boasting to
1131 * userspace. Unlike other operations, this need not be serialized.
1132 */
8f9f4668
RJ
1133unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
1134{
1135
1136 struct vring_virtqueue *vq = to_vvq(_vq);
1137
1138 return vq->vring.num;
1139}
1140EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
1141
b3b32c94
HG
1142bool virtqueue_is_broken(struct virtqueue *_vq)
1143{
1144 struct vring_virtqueue *vq = to_vvq(_vq);
1145
1146 return vq->broken;
1147}
1148EXPORT_SYMBOL_GPL(virtqueue_is_broken);
1149
e2dcdfe9
RR
1150/*
1151 * This should prevent the device from being used, allowing drivers to
1152 * recover. You may need to grab appropriate locks to flush.
1153 */
1154void virtio_break_device(struct virtio_device *dev)
1155{
1156 struct virtqueue *_vq;
1157
1158 list_for_each_entry(_vq, &dev->vqs, list) {
1159 struct vring_virtqueue *vq = to_vvq(_vq);
1160 vq->broken = true;
1161 }
1162}
1163EXPORT_SYMBOL_GPL(virtio_break_device);
1164
2a2d1382 1165dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
89062652
CH
1166{
1167 struct vring_virtqueue *vq = to_vvq(_vq);
1168
2a2d1382
AL
1169 BUG_ON(!vq->we_own_ring);
1170
1171 return vq->queue_dma_addr;
89062652 1172}
2a2d1382 1173EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
89062652 1174
2a2d1382 1175dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
89062652
CH
1176{
1177 struct vring_virtqueue *vq = to_vvq(_vq);
1178
2a2d1382
AL
1179 BUG_ON(!vq->we_own_ring);
1180
1181 return vq->queue_dma_addr +
1182 ((char *)vq->vring.avail - (char *)vq->vring.desc);
1183}
1184EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
1185
1186dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
1187{
1188 struct vring_virtqueue *vq = to_vvq(_vq);
1189
1190 BUG_ON(!vq->we_own_ring);
1191
1192 return vq->queue_dma_addr +
1193 ((char *)vq->vring.used - (char *)vq->vring.desc);
1194}
1195EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
1196
1197const struct vring *virtqueue_get_vring(struct virtqueue *vq)
1198{
1199 return &to_vvq(vq)->vring;
89062652 1200}
2a2d1382 1201EXPORT_SYMBOL_GPL(virtqueue_get_vring);
89062652 1202
c6fd4701 1203MODULE_LICENSE("GPL");
This page took 0.662723 seconds and 5 git commands to generate.