Merge branch 'keys-asym-keyctl' into keys-next
[deliverable/linux.git] / drivers / gpu / drm / virtio / virtgpu_ioctl.c
1 /*
2 * Copyright (C) 2015 Red Hat, Inc.
3 * All Rights Reserved.
4 *
5 * Authors:
6 * Dave Airlie
7 * Alon Levy
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */
27
28 #include <drm/drmP.h>
29 #include "virtgpu_drv.h"
30 #include <drm/virtgpu_drm.h>
31 #include "ttm/ttm_execbuf_util.h"
32
33 static void convert_to_hw_box(struct virtio_gpu_box *dst,
34 const struct drm_virtgpu_3d_box *src)
35 {
36 dst->x = cpu_to_le32(src->x);
37 dst->y = cpu_to_le32(src->y);
38 dst->z = cpu_to_le32(src->z);
39 dst->w = cpu_to_le32(src->w);
40 dst->h = cpu_to_le32(src->h);
41 dst->d = cpu_to_le32(src->d);
42 }
43
44 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
45 struct drm_file *file_priv)
46 {
47 struct virtio_gpu_device *vgdev = dev->dev_private;
48 struct drm_virtgpu_map *virtio_gpu_map = data;
49
50 return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
51 virtio_gpu_map->handle,
52 &virtio_gpu_map->offset);
53 }
54
55 static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
56 struct list_head *head)
57 {
58 struct ttm_validate_buffer *buf;
59 struct ttm_buffer_object *bo;
60 struct virtio_gpu_object *qobj;
61 int ret;
62
63 ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
64 if (ret != 0)
65 return ret;
66
67 list_for_each_entry(buf, head, head) {
68 bo = buf->bo;
69 qobj = container_of(bo, struct virtio_gpu_object, tbo);
70 ret = ttm_bo_validate(bo, &qobj->placement, false, false);
71 if (ret) {
72 ttm_eu_backoff_reservation(ticket, head);
73 return ret;
74 }
75 }
76 return 0;
77 }
78
79 static void virtio_gpu_unref_list(struct list_head *head)
80 {
81 struct ttm_validate_buffer *buf;
82 struct ttm_buffer_object *bo;
83 struct virtio_gpu_object *qobj;
84 list_for_each_entry(buf, head, head) {
85 bo = buf->bo;
86 qobj = container_of(bo, struct virtio_gpu_object, tbo);
87
88 drm_gem_object_unreference_unlocked(&qobj->gem_base);
89 }
90 }
91
92 static int virtio_gpu_execbuffer(struct drm_device *dev,
93 struct drm_virtgpu_execbuffer *exbuf,
94 struct drm_file *drm_file)
95 {
96 struct virtio_gpu_device *vgdev = dev->dev_private;
97 struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
98 struct drm_gem_object *gobj;
99 struct virtio_gpu_fence *fence;
100 struct virtio_gpu_object *qobj;
101 int ret;
102 uint32_t *bo_handles = NULL;
103 void __user *user_bo_handles = NULL;
104 struct list_head validate_list;
105 struct ttm_validate_buffer *buflist = NULL;
106 int i;
107 struct ww_acquire_ctx ticket;
108 void *buf;
109
110 if (vgdev->has_virgl_3d == false)
111 return -ENOSYS;
112
113 INIT_LIST_HEAD(&validate_list);
114 if (exbuf->num_bo_handles) {
115
116 bo_handles = drm_malloc_ab(exbuf->num_bo_handles,
117 sizeof(uint32_t));
118 buflist = drm_calloc_large(exbuf->num_bo_handles,
119 sizeof(struct ttm_validate_buffer));
120 if (!bo_handles || !buflist) {
121 drm_free_large(bo_handles);
122 drm_free_large(buflist);
123 return -ENOMEM;
124 }
125
126 user_bo_handles = (void __user *)(uintptr_t)exbuf->bo_handles;
127 if (copy_from_user(bo_handles, user_bo_handles,
128 exbuf->num_bo_handles * sizeof(uint32_t))) {
129 ret = -EFAULT;
130 drm_free_large(bo_handles);
131 drm_free_large(buflist);
132 return ret;
133 }
134
135 for (i = 0; i < exbuf->num_bo_handles; i++) {
136 gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
137 if (!gobj) {
138 drm_free_large(bo_handles);
139 drm_free_large(buflist);
140 return -ENOENT;
141 }
142
143 qobj = gem_to_virtio_gpu_obj(gobj);
144 buflist[i].bo = &qobj->tbo;
145
146 list_add(&buflist[i].head, &validate_list);
147 }
148 drm_free_large(bo_handles);
149 }
150
151 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
152 if (ret)
153 goto out_free;
154
155 buf = kmalloc(exbuf->size, GFP_KERNEL);
156 if (!buf) {
157 ret = -ENOMEM;
158 goto out_unresv;
159 }
160 if (copy_from_user(buf, (void __user *)(uintptr_t)exbuf->command,
161 exbuf->size)) {
162 kfree(buf);
163 ret = -EFAULT;
164 goto out_unresv;
165 }
166 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
167 vfpriv->ctx_id, &fence);
168
169 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
170
171 /* fence the command bo */
172 virtio_gpu_unref_list(&validate_list);
173 drm_free_large(buflist);
174 fence_put(&fence->f);
175 return 0;
176
177 out_unresv:
178 ttm_eu_backoff_reservation(&ticket, &validate_list);
179 out_free:
180 virtio_gpu_unref_list(&validate_list);
181 drm_free_large(buflist);
182 return ret;
183 }
184
185 /*
186 * Usage of execbuffer:
187 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
188 * However, the command as passed from user space must *not* contain the initial
189 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
190 */
191 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
192 struct drm_file *file_priv)
193 {
194 struct drm_virtgpu_execbuffer *execbuffer = data;
195 return virtio_gpu_execbuffer(dev, execbuffer, file_priv);
196 }
197
198
199 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
200 struct drm_file *file_priv)
201 {
202 struct virtio_gpu_device *vgdev = dev->dev_private;
203 struct drm_virtgpu_getparam *param = data;
204 int value;
205
206 switch (param->param) {
207 case VIRTGPU_PARAM_3D_FEATURES:
208 value = vgdev->has_virgl_3d == true ? 1 : 0;
209 break;
210 default:
211 return -EINVAL;
212 }
213 if (copy_to_user((void __user *)(unsigned long)param->value,
214 &value, sizeof(int))) {
215 return -EFAULT;
216 }
217 return 0;
218 }
219
220 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
221 struct drm_file *file_priv)
222 {
223 struct virtio_gpu_device *vgdev = dev->dev_private;
224 struct drm_virtgpu_resource_create *rc = data;
225 int ret;
226 uint32_t res_id;
227 struct virtio_gpu_object *qobj;
228 struct drm_gem_object *obj;
229 uint32_t handle = 0;
230 uint32_t size;
231 struct list_head validate_list;
232 struct ttm_validate_buffer mainbuf;
233 struct virtio_gpu_fence *fence = NULL;
234 struct ww_acquire_ctx ticket;
235 struct virtio_gpu_resource_create_3d rc_3d;
236
237 if (vgdev->has_virgl_3d == false) {
238 if (rc->depth > 1)
239 return -EINVAL;
240 if (rc->nr_samples > 1)
241 return -EINVAL;
242 if (rc->last_level > 1)
243 return -EINVAL;
244 if (rc->target != 2)
245 return -EINVAL;
246 if (rc->array_size > 1)
247 return -EINVAL;
248 }
249
250 INIT_LIST_HEAD(&validate_list);
251 memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
252
253 virtio_gpu_resource_id_get(vgdev, &res_id);
254
255 size = rc->size;
256
257 /* allocate a single page size object */
258 if (size == 0)
259 size = PAGE_SIZE;
260
261 qobj = virtio_gpu_alloc_object(dev, size, false, false);
262 if (IS_ERR(qobj)) {
263 ret = PTR_ERR(qobj);
264 goto fail_id;
265 }
266 obj = &qobj->gem_base;
267
268 if (!vgdev->has_virgl_3d) {
269 virtio_gpu_cmd_create_resource(vgdev, res_id, rc->format,
270 rc->width, rc->height);
271
272 ret = virtio_gpu_object_attach(vgdev, qobj, res_id, NULL);
273 } else {
274 /* use a gem reference since unref list undoes them */
275 drm_gem_object_reference(&qobj->gem_base);
276 mainbuf.bo = &qobj->tbo;
277 list_add(&mainbuf.head, &validate_list);
278
279 ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
280 if (ret) {
281 DRM_DEBUG("failed to validate\n");
282 goto fail_unref;
283 }
284
285 rc_3d.resource_id = cpu_to_le32(res_id);
286 rc_3d.target = cpu_to_le32(rc->target);
287 rc_3d.format = cpu_to_le32(rc->format);
288 rc_3d.bind = cpu_to_le32(rc->bind);
289 rc_3d.width = cpu_to_le32(rc->width);
290 rc_3d.height = cpu_to_le32(rc->height);
291 rc_3d.depth = cpu_to_le32(rc->depth);
292 rc_3d.array_size = cpu_to_le32(rc->array_size);
293 rc_3d.last_level = cpu_to_le32(rc->last_level);
294 rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
295 rc_3d.flags = cpu_to_le32(rc->flags);
296
297 virtio_gpu_cmd_resource_create_3d(vgdev, &rc_3d, NULL);
298 ret = virtio_gpu_object_attach(vgdev, qobj, res_id, &fence);
299 if (ret) {
300 ttm_eu_backoff_reservation(&ticket, &validate_list);
301 goto fail_unref;
302 }
303 ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
304 }
305
306 qobj->hw_res_handle = res_id;
307
308 ret = drm_gem_handle_create(file_priv, obj, &handle);
309 if (ret) {
310
311 drm_gem_object_release(obj);
312 if (vgdev->has_virgl_3d) {
313 virtio_gpu_unref_list(&validate_list);
314 fence_put(&fence->f);
315 }
316 return ret;
317 }
318 drm_gem_object_unreference_unlocked(obj);
319
320 rc->res_handle = res_id; /* similiar to a VM address */
321 rc->bo_handle = handle;
322
323 if (vgdev->has_virgl_3d) {
324 virtio_gpu_unref_list(&validate_list);
325 fence_put(&fence->f);
326 }
327 return 0;
328 fail_unref:
329 if (vgdev->has_virgl_3d) {
330 virtio_gpu_unref_list(&validate_list);
331 fence_put(&fence->f);
332 }
333 //fail_obj:
334 // drm_gem_object_handle_unreference_unlocked(obj);
335 fail_id:
336 virtio_gpu_resource_id_put(vgdev, res_id);
337 return ret;
338 }
339
340 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
341 struct drm_file *file_priv)
342 {
343 struct drm_virtgpu_resource_info *ri = data;
344 struct drm_gem_object *gobj = NULL;
345 struct virtio_gpu_object *qobj = NULL;
346
347 gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
348 if (gobj == NULL)
349 return -ENOENT;
350
351 qobj = gem_to_virtio_gpu_obj(gobj);
352
353 ri->size = qobj->gem_base.size;
354 ri->res_handle = qobj->hw_res_handle;
355 drm_gem_object_unreference_unlocked(gobj);
356 return 0;
357 }
358
359 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
360 void *data,
361 struct drm_file *file)
362 {
363 struct virtio_gpu_device *vgdev = dev->dev_private;
364 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
365 struct drm_virtgpu_3d_transfer_from_host *args = data;
366 struct drm_gem_object *gobj = NULL;
367 struct virtio_gpu_object *qobj = NULL;
368 struct virtio_gpu_fence *fence;
369 int ret;
370 u32 offset = args->offset;
371 struct virtio_gpu_box box;
372
373 if (vgdev->has_virgl_3d == false)
374 return -ENOSYS;
375
376 gobj = drm_gem_object_lookup(file, args->bo_handle);
377 if (gobj == NULL)
378 return -ENOENT;
379
380 qobj = gem_to_virtio_gpu_obj(gobj);
381
382 ret = virtio_gpu_object_reserve(qobj, false);
383 if (ret)
384 goto out;
385
386 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
387 true, false);
388 if (unlikely(ret))
389 goto out_unres;
390
391 convert_to_hw_box(&box, &args->box);
392 virtio_gpu_cmd_transfer_from_host_3d
393 (vgdev, qobj->hw_res_handle,
394 vfpriv->ctx_id, offset, args->level,
395 &box, &fence);
396 reservation_object_add_excl_fence(qobj->tbo.resv,
397 &fence->f);
398
399 fence_put(&fence->f);
400 out_unres:
401 virtio_gpu_object_unreserve(qobj);
402 out:
403 drm_gem_object_unreference_unlocked(gobj);
404 return ret;
405 }
406
407 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
408 struct drm_file *file)
409 {
410 struct virtio_gpu_device *vgdev = dev->dev_private;
411 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
412 struct drm_virtgpu_3d_transfer_to_host *args = data;
413 struct drm_gem_object *gobj = NULL;
414 struct virtio_gpu_object *qobj = NULL;
415 struct virtio_gpu_fence *fence;
416 struct virtio_gpu_box box;
417 int ret;
418 u32 offset = args->offset;
419
420 gobj = drm_gem_object_lookup(file, args->bo_handle);
421 if (gobj == NULL)
422 return -ENOENT;
423
424 qobj = gem_to_virtio_gpu_obj(gobj);
425
426 ret = virtio_gpu_object_reserve(qobj, false);
427 if (ret)
428 goto out;
429
430 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
431 true, false);
432 if (unlikely(ret))
433 goto out_unres;
434
435 convert_to_hw_box(&box, &args->box);
436 if (!vgdev->has_virgl_3d) {
437 virtio_gpu_cmd_transfer_to_host_2d
438 (vgdev, qobj->hw_res_handle, offset,
439 box.w, box.h, box.x, box.y, NULL);
440 } else {
441 virtio_gpu_cmd_transfer_to_host_3d
442 (vgdev, qobj->hw_res_handle,
443 vfpriv ? vfpriv->ctx_id : 0, offset,
444 args->level, &box, &fence);
445 reservation_object_add_excl_fence(qobj->tbo.resv,
446 &fence->f);
447 fence_put(&fence->f);
448 }
449
450 out_unres:
451 virtio_gpu_object_unreserve(qobj);
452 out:
453 drm_gem_object_unreference_unlocked(gobj);
454 return ret;
455 }
456
457 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
458 struct drm_file *file)
459 {
460 struct drm_virtgpu_3d_wait *args = data;
461 struct drm_gem_object *gobj = NULL;
462 struct virtio_gpu_object *qobj = NULL;
463 int ret;
464 bool nowait = false;
465
466 gobj = drm_gem_object_lookup(file, args->handle);
467 if (gobj == NULL)
468 return -ENOENT;
469
470 qobj = gem_to_virtio_gpu_obj(gobj);
471
472 if (args->flags & VIRTGPU_WAIT_NOWAIT)
473 nowait = true;
474 ret = virtio_gpu_object_wait(qobj, nowait);
475
476 drm_gem_object_unreference_unlocked(gobj);
477 return ret;
478 }
479
480 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
481 void *data, struct drm_file *file)
482 {
483 struct virtio_gpu_device *vgdev = dev->dev_private;
484 struct drm_virtgpu_get_caps *args = data;
485 int size;
486 int i;
487 int found_valid = -1;
488 int ret;
489 struct virtio_gpu_drv_cap_cache *cache_ent;
490 void *ptr;
491 if (vgdev->num_capsets == 0)
492 return -ENOSYS;
493
494 spin_lock(&vgdev->display_info_lock);
495 for (i = 0; i < vgdev->num_capsets; i++) {
496 if (vgdev->capsets[i].id == args->cap_set_id) {
497 if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
498 found_valid = i;
499 break;
500 }
501 }
502 }
503
504 if (found_valid == -1) {
505 spin_unlock(&vgdev->display_info_lock);
506 return -EINVAL;
507 }
508
509 size = vgdev->capsets[found_valid].max_size;
510 if (args->size > size) {
511 spin_unlock(&vgdev->display_info_lock);
512 return -EINVAL;
513 }
514
515 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
516 if (cache_ent->id == args->cap_set_id &&
517 cache_ent->version == args->cap_set_ver) {
518 ptr = cache_ent->caps_cache;
519 spin_unlock(&vgdev->display_info_lock);
520 goto copy_exit;
521 }
522 }
523 spin_unlock(&vgdev->display_info_lock);
524
525 /* not in cache - need to talk to hw */
526 virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
527 &cache_ent);
528
529 ret = wait_event_timeout(vgdev->resp_wq,
530 atomic_read(&cache_ent->is_valid), 5 * HZ);
531
532 ptr = cache_ent->caps_cache;
533
534 copy_exit:
535 if (copy_to_user((void __user *)(unsigned long)args->addr, ptr, size))
536 return -EFAULT;
537
538 return 0;
539 }
540
541 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
542 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
543 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
544
545 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
546 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
547
548 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
549 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
550
551 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
552 virtio_gpu_resource_create_ioctl,
553 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
554
555 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
556 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
557
558 /* make transfer async to the main ring? - no sure, can we
559 thread these in the underlying GL */
560 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
561 virtio_gpu_transfer_from_host_ioctl,
562 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
563 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
564 virtio_gpu_transfer_to_host_ioctl,
565 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
566
567 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
568 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
569
570 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
571 DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
572 };
This page took 0.04701 seconds and 5 git commands to generate.