Commit | Line | Data |
---|---|---|
f87d0fbb RR |
1 | /* |
2 | * Linux host-side vring helpers; for when the kernel needs to access | |
3 | * someone else's vring. | |
4 | * | |
5 | * Copyright IBM Corporation, 2013. | |
6 | * Parts taken from drivers/vhost/vhost.c Copyright 2009 Red Hat, Inc. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
21 | * | |
22 | * Written by: Rusty Russell <rusty@rustcorp.com.au> | |
23 | */ | |
24 | #ifndef _LINUX_VRINGH_H | |
25 | #define _LINUX_VRINGH_H | |
26 | #include <uapi/linux/virtio_ring.h> | |
b9f7ac8c | 27 | #include <linux/virtio_byteorder.h> |
f87d0fbb RR |
28 | #include <linux/uio.h> |
29 | #include <linux/slab.h> | |
30 | #include <asm/barrier.h> | |
31 | ||
32 | /* virtio_ring with information needed for host access. */ | |
33 | struct vringh { | |
b9f7ac8c MT |
34 | /* Everything is little endian */ |
35 | bool little_endian; | |
36 | ||
f87d0fbb RR |
37 | /* Guest publishes used event idx (note: we always do). */ |
38 | bool event_indices; | |
39 | ||
40 | /* Can we get away with weak barriers? */ | |
41 | bool weak_barriers; | |
42 | ||
43 | /* Last available index we saw (ie. where we're up to). */ | |
44 | u16 last_avail_idx; | |
45 | ||
46 | /* Last index we used. */ | |
47 | u16 last_used_idx; | |
48 | ||
49 | /* How many descriptors we've completed since last need_notify(). */ | |
50 | u32 completed; | |
51 | ||
52 | /* The vring (note: it may contain user pointers!) */ | |
53 | struct vring vring; | |
3beee86a SB |
54 | |
55 | /* The function to call to notify the guest about added buffers */ | |
56 | void (*notify)(struct vringh *); | |
57 | }; | |
58 | ||
59 | /** | |
60 | * struct vringh_config_ops - ops for creating a host vring from a virtio driver | |
61 | * @find_vrhs: find the host vrings and instantiate them | |
62 | * vdev: the virtio_device | |
63 | * nhvrs: the number of host vrings to find | |
64 | * hvrs: on success, includes new host vrings | |
65 | * callbacks: array of driver callbacks, for each host vring | |
66 | * include a NULL entry for vqs that do not need a callback | |
67 | * Returns 0 on success or error status | |
68 | * @del_vrhs: free the host vrings found by find_vrhs(). | |
69 | */ | |
70 | struct virtio_device; | |
71 | typedef void vrh_callback_t(struct virtio_device *, struct vringh *); | |
72 | struct vringh_config_ops { | |
73 | int (*find_vrhs)(struct virtio_device *vdev, unsigned nhvrs, | |
74 | struct vringh *vrhs[], vrh_callback_t *callbacks[]); | |
75 | void (*del_vrhs)(struct virtio_device *vdev); | |
f87d0fbb RR |
76 | }; |
77 | ||
78 | /* The memory the vring can access, and what offset to apply. */ | |
79 | struct vringh_range { | |
80 | u64 start, end_incl; | |
81 | u64 offset; | |
82 | }; | |
83 | ||
84 | /** | |
85 | * struct vringh_iov - iovec mangler. | |
86 | * | |
87 | * Mangles iovec in place, and restores it. | |
88 | * Remaining data is iov + i, of used - i elements. | |
89 | */ | |
90 | struct vringh_iov { | |
91 | struct iovec *iov; | |
92 | size_t consumed; /* Within iov[i] */ | |
93 | unsigned i, used, max_num; | |
94 | }; | |
95 | ||
96 | /** | |
97 | * struct vringh_iov - kvec mangler. | |
98 | * | |
99 | * Mangles kvec in place, and restores it. | |
100 | * Remaining data is iov + i, of used - i elements. | |
101 | */ | |
102 | struct vringh_kiov { | |
103 | struct kvec *iov; | |
104 | size_t consumed; /* Within iov[i] */ | |
105 | unsigned i, used, max_num; | |
106 | }; | |
107 | ||
108 | /* Flag on max_num to indicate we're kmalloced. */ | |
109 | #define VRINGH_IOV_ALLOCATED 0x8000000 | |
110 | ||
111 | /* Helpers for userspace vrings. */ | |
b97a8a90 | 112 | int vringh_init_user(struct vringh *vrh, u64 features, |
f87d0fbb RR |
113 | unsigned int num, bool weak_barriers, |
114 | struct vring_desc __user *desc, | |
115 | struct vring_avail __user *avail, | |
116 | struct vring_used __user *used); | |
117 | ||
118 | static inline void vringh_iov_init(struct vringh_iov *iov, | |
119 | struct iovec *iovec, unsigned num) | |
120 | { | |
121 | iov->used = iov->i = 0; | |
122 | iov->consumed = 0; | |
123 | iov->max_num = num; | |
124 | iov->iov = iovec; | |
125 | } | |
126 | ||
127 | static inline void vringh_iov_reset(struct vringh_iov *iov) | |
128 | { | |
129 | iov->iov[iov->i].iov_len += iov->consumed; | |
130 | iov->iov[iov->i].iov_base -= iov->consumed; | |
131 | iov->consumed = 0; | |
132 | iov->i = 0; | |
133 | } | |
134 | ||
135 | static inline void vringh_iov_cleanup(struct vringh_iov *iov) | |
136 | { | |
137 | if (iov->max_num & VRINGH_IOV_ALLOCATED) | |
138 | kfree(iov->iov); | |
139 | iov->max_num = iov->used = iov->i = iov->consumed = 0; | |
140 | iov->iov = NULL; | |
141 | } | |
142 | ||
143 | /* Convert a descriptor into iovecs. */ | |
144 | int vringh_getdesc_user(struct vringh *vrh, | |
145 | struct vringh_iov *riov, | |
146 | struct vringh_iov *wiov, | |
147 | bool (*getrange)(struct vringh *vrh, | |
148 | u64 addr, struct vringh_range *r), | |
149 | u16 *head); | |
150 | ||
151 | /* Copy bytes from readable vsg, consuming it (and incrementing wiov->i). */ | |
152 | ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len); | |
153 | ||
154 | /* Copy bytes into writable vsg, consuming it (and incrementing wiov->i). */ | |
155 | ssize_t vringh_iov_push_user(struct vringh_iov *wiov, | |
156 | const void *src, size_t len); | |
157 | ||
158 | /* Mark a descriptor as used. */ | |
159 | int vringh_complete_user(struct vringh *vrh, u16 head, u32 len); | |
160 | int vringh_complete_multi_user(struct vringh *vrh, | |
161 | const struct vring_used_elem used[], | |
162 | unsigned num_used); | |
163 | ||
164 | /* Pretend we've never seen descriptor (for easy error handling). */ | |
165 | void vringh_abandon_user(struct vringh *vrh, unsigned int num); | |
166 | ||
167 | /* Do we need to fire the eventfd to notify the other side? */ | |
168 | int vringh_need_notify_user(struct vringh *vrh); | |
169 | ||
170 | bool vringh_notify_enable_user(struct vringh *vrh); | |
171 | void vringh_notify_disable_user(struct vringh *vrh); | |
172 | ||
173 | /* Helpers for kernelspace vrings. */ | |
b97a8a90 | 174 | int vringh_init_kern(struct vringh *vrh, u64 features, |
f87d0fbb RR |
175 | unsigned int num, bool weak_barriers, |
176 | struct vring_desc *desc, | |
177 | struct vring_avail *avail, | |
178 | struct vring_used *used); | |
179 | ||
180 | static inline void vringh_kiov_init(struct vringh_kiov *kiov, | |
181 | struct kvec *kvec, unsigned num) | |
182 | { | |
183 | kiov->used = kiov->i = 0; | |
184 | kiov->consumed = 0; | |
185 | kiov->max_num = num; | |
186 | kiov->iov = kvec; | |
187 | } | |
188 | ||
189 | static inline void vringh_kiov_reset(struct vringh_kiov *kiov) | |
190 | { | |
191 | kiov->iov[kiov->i].iov_len += kiov->consumed; | |
192 | kiov->iov[kiov->i].iov_base -= kiov->consumed; | |
193 | kiov->consumed = 0; | |
194 | kiov->i = 0; | |
195 | } | |
196 | ||
197 | static inline void vringh_kiov_cleanup(struct vringh_kiov *kiov) | |
198 | { | |
199 | if (kiov->max_num & VRINGH_IOV_ALLOCATED) | |
200 | kfree(kiov->iov); | |
201 | kiov->max_num = kiov->used = kiov->i = kiov->consumed = 0; | |
202 | kiov->iov = NULL; | |
203 | } | |
204 | ||
205 | int vringh_getdesc_kern(struct vringh *vrh, | |
206 | struct vringh_kiov *riov, | |
207 | struct vringh_kiov *wiov, | |
208 | u16 *head, | |
209 | gfp_t gfp); | |
210 | ||
211 | ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len); | |
212 | ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov, | |
213 | const void *src, size_t len); | |
214 | void vringh_abandon_kern(struct vringh *vrh, unsigned int num); | |
215 | int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len); | |
216 | ||
217 | bool vringh_notify_enable_kern(struct vringh *vrh); | |
218 | void vringh_notify_disable_kern(struct vringh *vrh); | |
219 | ||
220 | int vringh_need_notify_kern(struct vringh *vrh); | |
221 | ||
3beee86a SB |
222 | /* Notify the guest about buffers added to the used ring */ |
223 | static inline void vringh_notify(struct vringh *vrh) | |
224 | { | |
225 | if (vrh->notify) | |
226 | vrh->notify(vrh); | |
227 | } | |
228 | ||
5da7b160 GK |
229 | static inline bool vringh_is_little_endian(const struct vringh *vrh) |
230 | { | |
7d824109 GK |
231 | return vrh->little_endian || |
232 | virtio_legacy_is_little_endian(); | |
5da7b160 GK |
233 | } |
234 | ||
b9f7ac8c MT |
235 | static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val) |
236 | { | |
5da7b160 | 237 | return __virtio16_to_cpu(vringh_is_little_endian(vrh), val); |
b9f7ac8c MT |
238 | } |
239 | ||
240 | static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val) | |
241 | { | |
5da7b160 | 242 | return __cpu_to_virtio16(vringh_is_little_endian(vrh), val); |
b9f7ac8c MT |
243 | } |
244 | ||
245 | static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val) | |
246 | { | |
5da7b160 | 247 | return __virtio32_to_cpu(vringh_is_little_endian(vrh), val); |
b9f7ac8c MT |
248 | } |
249 | ||
250 | static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val) | |
251 | { | |
5da7b160 | 252 | return __cpu_to_virtio32(vringh_is_little_endian(vrh), val); |
b9f7ac8c MT |
253 | } |
254 | ||
255 | static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val) | |
256 | { | |
5da7b160 | 257 | return __virtio64_to_cpu(vringh_is_little_endian(vrh), val); |
b9f7ac8c MT |
258 | } |
259 | ||
260 | static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val) | |
261 | { | |
5da7b160 | 262 | return __cpu_to_virtio64(vringh_is_little_endian(vrh), val); |
b9f7ac8c | 263 | } |
f87d0fbb | 264 | #endif /* _LINUX_VRINGH_H */ |