Merge branch 'core-printk-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / vhost / test.c
1 /* Copyright (C) 2009 Red Hat, Inc.
2 * Author: Michael S. Tsirkin <mst@redhat.com>
3 *
4 * This work is licensed under the terms of the GNU GPL, version 2.
5 *
6 * test virtio server in host kernel.
7 */
8
9 #include <linux/compat.h>
10 #include <linux/eventfd.h>
11 #include <linux/vhost.h>
12 #include <linux/miscdevice.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/workqueue.h>
16 #include <linux/rcupdate.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19
20 #include "test.h"
21 #include "vhost.h"
22
23 /* Max number of bytes transferred before requeueing the job.
24 * Using this limit prevents one virtqueue from starving others. */
25 #define VHOST_TEST_WEIGHT 0x80000
26
27 enum {
28 VHOST_TEST_VQ = 0,
29 VHOST_TEST_VQ_MAX = 1,
30 };
31
32 struct vhost_test {
33 struct vhost_dev dev;
34 struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
35 };
36
37 /* Expects to be always run from workqueue - which acts as
38 * read-size critical section for our kind of RCU. */
39 static void handle_vq(struct vhost_test *n)
40 {
41 struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ];
42 unsigned out, in;
43 int head;
44 size_t len, total_len = 0;
45 void *private;
46
47 mutex_lock(&vq->mutex);
48 private = vq->private_data;
49 if (!private) {
50 mutex_unlock(&vq->mutex);
51 return;
52 }
53
54 vhost_disable_notify(&n->dev, vq);
55
56 for (;;) {
57 head = vhost_get_vq_desc(&n->dev, vq, vq->iov,
58 ARRAY_SIZE(vq->iov),
59 &out, &in,
60 NULL, NULL);
61 /* On error, stop handling until the next kick. */
62 if (unlikely(head < 0))
63 break;
64 /* Nothing new? Wait for eventfd to tell us they refilled. */
65 if (head == vq->num) {
66 if (unlikely(vhost_enable_notify(&n->dev, vq))) {
67 vhost_disable_notify(&n->dev, vq);
68 continue;
69 }
70 break;
71 }
72 if (in) {
73 vq_err(vq, "Unexpected descriptor format for TX: "
74 "out %d, int %d\n", out, in);
75 break;
76 }
77 len = iov_length(vq->iov, out);
78 /* Sanity check */
79 if (!len) {
80 vq_err(vq, "Unexpected 0 len for TX\n");
81 break;
82 }
83 vhost_add_used_and_signal(&n->dev, vq, head, 0);
84 total_len += len;
85 if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
86 vhost_poll_queue(&vq->poll);
87 break;
88 }
89 }
90
91 mutex_unlock(&vq->mutex);
92 }
93
94 static void handle_vq_kick(struct vhost_work *work)
95 {
96 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
97 poll.work);
98 struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev);
99
100 handle_vq(n);
101 }
102
103 static int vhost_test_open(struct inode *inode, struct file *f)
104 {
105 struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
106 struct vhost_dev *dev;
107 struct vhost_virtqueue **vqs;
108 int r;
109
110 if (!n)
111 return -ENOMEM;
112 vqs = kmalloc(VHOST_TEST_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
113 if (!vqs) {
114 kfree(n);
115 return -ENOMEM;
116 }
117
118 dev = &n->dev;
119 vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
120 n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
121 r = vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
122 if (r < 0) {
123 kfree(vqs);
124 kfree(n);
125 return r;
126 }
127
128 f->private_data = n;
129
130 return 0;
131 }
132
133 static void *vhost_test_stop_vq(struct vhost_test *n,
134 struct vhost_virtqueue *vq)
135 {
136 void *private;
137
138 mutex_lock(&vq->mutex);
139 private = vq->private_data;
140 vq->private_data = NULL;
141 mutex_unlock(&vq->mutex);
142 return private;
143 }
144
145 static void vhost_test_stop(struct vhost_test *n, void **privatep)
146 {
147 *privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
148 }
149
150 static void vhost_test_flush_vq(struct vhost_test *n, int index)
151 {
152 vhost_poll_flush(&n->vqs[index].poll);
153 }
154
155 static void vhost_test_flush(struct vhost_test *n)
156 {
157 vhost_test_flush_vq(n, VHOST_TEST_VQ);
158 }
159
160 static int vhost_test_release(struct inode *inode, struct file *f)
161 {
162 struct vhost_test *n = f->private_data;
163 void *private;
164
165 vhost_test_stop(n, &private);
166 vhost_test_flush(n);
167 vhost_dev_cleanup(&n->dev, false);
168 /* We do an extra flush before freeing memory,
169 * since jobs can re-queue themselves. */
170 vhost_test_flush(n);
171 kfree(n);
172 return 0;
173 }
174
175 static long vhost_test_run(struct vhost_test *n, int test)
176 {
177 void *priv, *oldpriv;
178 struct vhost_virtqueue *vq;
179 int r, index;
180
181 if (test < 0 || test > 1)
182 return -EINVAL;
183
184 mutex_lock(&n->dev.mutex);
185 r = vhost_dev_check_owner(&n->dev);
186 if (r)
187 goto err;
188
189 for (index = 0; index < n->dev.nvqs; ++index) {
190 /* Verify that ring has been setup correctly. */
191 if (!vhost_vq_access_ok(&n->vqs[index])) {
192 r = -EFAULT;
193 goto err;
194 }
195 }
196
197 for (index = 0; index < n->dev.nvqs; ++index) {
198 vq = n->vqs + index;
199 mutex_lock(&vq->mutex);
200 priv = test ? n : NULL;
201
202 /* start polling new socket */
203 oldpriv = rcu_dereference_protected(vq->private_data,
204 lockdep_is_held(&vq->mutex));
205 rcu_assign_pointer(vq->private_data, priv);
206
207 r = vhost_init_used(&n->vqs[index]);
208
209 mutex_unlock(&vq->mutex);
210
211 if (r)
212 goto err;
213
214 if (oldpriv) {
215 vhost_test_flush_vq(n, index);
216 }
217 }
218
219 mutex_unlock(&n->dev.mutex);
220 return 0;
221
222 err:
223 mutex_unlock(&n->dev.mutex);
224 return r;
225 }
226
227 static long vhost_test_reset_owner(struct vhost_test *n)
228 {
229 void *priv = NULL;
230 long err;
231 struct vhost_memory *memory;
232
233 mutex_lock(&n->dev.mutex);
234 err = vhost_dev_check_owner(&n->dev);
235 if (err)
236 goto done;
237 memory = vhost_dev_reset_owner_prepare();
238 if (!memory) {
239 err = -ENOMEM;
240 goto done;
241 }
242 vhost_test_stop(n, &priv);
243 vhost_test_flush(n);
244 vhost_dev_reset_owner(&n->dev, memory);
245 done:
246 mutex_unlock(&n->dev.mutex);
247 return err;
248 }
249
250 static int vhost_test_set_features(struct vhost_test *n, u64 features)
251 {
252 mutex_lock(&n->dev.mutex);
253 if ((features & (1 << VHOST_F_LOG_ALL)) &&
254 !vhost_log_access_ok(&n->dev)) {
255 mutex_unlock(&n->dev.mutex);
256 return -EFAULT;
257 }
258 n->dev.acked_features = features;
259 smp_wmb();
260 vhost_test_flush(n);
261 mutex_unlock(&n->dev.mutex);
262 return 0;
263 }
264
265 static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
266 unsigned long arg)
267 {
268 struct vhost_test *n = f->private_data;
269 void __user *argp = (void __user *)arg;
270 u64 __user *featurep = argp;
271 int test;
272 u64 features;
273 int r;
274 switch (ioctl) {
275 case VHOST_TEST_RUN:
276 if (copy_from_user(&test, argp, sizeof test))
277 return -EFAULT;
278 return vhost_test_run(n, test);
279 case VHOST_GET_FEATURES:
280 features = VHOST_FEATURES;
281 if (copy_to_user(featurep, &features, sizeof features))
282 return -EFAULT;
283 return 0;
284 case VHOST_SET_FEATURES:
285 if (copy_from_user(&features, featurep, sizeof features))
286 return -EFAULT;
287 if (features & ~VHOST_FEATURES)
288 return -EOPNOTSUPP;
289 return vhost_test_set_features(n, features);
290 case VHOST_RESET_OWNER:
291 return vhost_test_reset_owner(n);
292 default:
293 mutex_lock(&n->dev.mutex);
294 r = vhost_dev_ioctl(&n->dev, ioctl, argp);
295 if (r == -ENOIOCTLCMD)
296 r = vhost_vring_ioctl(&n->dev, ioctl, argp);
297 vhost_test_flush(n);
298 mutex_unlock(&n->dev.mutex);
299 return r;
300 }
301 }
302
303 #ifdef CONFIG_COMPAT
304 static long vhost_test_compat_ioctl(struct file *f, unsigned int ioctl,
305 unsigned long arg)
306 {
307 return vhost_test_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
308 }
309 #endif
310
311 static const struct file_operations vhost_test_fops = {
312 .owner = THIS_MODULE,
313 .release = vhost_test_release,
314 .unlocked_ioctl = vhost_test_ioctl,
315 #ifdef CONFIG_COMPAT
316 .compat_ioctl = vhost_test_compat_ioctl,
317 #endif
318 .open = vhost_test_open,
319 .llseek = noop_llseek,
320 };
321
322 static struct miscdevice vhost_test_misc = {
323 MISC_DYNAMIC_MINOR,
324 "vhost-test",
325 &vhost_test_fops,
326 };
327
328 static int vhost_test_init(void)
329 {
330 return misc_register(&vhost_test_misc);
331 }
332 module_init(vhost_test_init);
333
334 static void vhost_test_exit(void)
335 {
336 misc_deregister(&vhost_test_misc);
337 }
338 module_exit(vhost_test_exit);
339
340 MODULE_VERSION("0.0.1");
341 MODULE_LICENSE("GPL v2");
342 MODULE_AUTHOR("Michael S. Tsirkin");
343 MODULE_DESCRIPTION("Host kernel side for virtio simulator");
This page took 0.04271 seconds and 6 git commands to generate.