reservation: cross-device reservation support, v4
[deliverable/linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_execbuf.c
CommitLineData
fb1d9738
JB
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
760285e7
DH
30#include <drm/ttm/ttm_bo_api.h>
31#include <drm/ttm/ttm_placement.h>
fb1d9738 32
c0951b79
TH
33#define VMW_RES_HT_ORDER 12
34
35/**
36 * struct vmw_resource_relocation - Relocation info for resources
37 *
38 * @head: List head for the software context's relocation list.
39 * @res: Non-ref-counted pointer to the resource.
40 * @offset: Offset of 4 byte entries into the command buffer where the
41 * id that needs fixup is located.
42 */
43struct vmw_resource_relocation {
44 struct list_head head;
45 const struct vmw_resource *res;
46 unsigned long offset;
47};
48
49/**
50 * struct vmw_resource_val_node - Validation info for resources
51 *
52 * @head: List head for the software context's resource list.
53 * @hash: Hash entry for quick resouce to val_node lookup.
54 * @res: Ref-counted pointer to the resource.
55 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56 * @new_backup: Refcounted pointer to the new backup buffer.
57 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
58 * @first_usage: Set to true the first time the resource is referenced in
59 * the command stream.
60 * @no_buffer_needed: Resources do not need to allocate buffer backup on
61 * reservation. The command stream will provide one.
62 */
63struct vmw_resource_val_node {
64 struct list_head head;
65 struct drm_hash_item hash;
66 struct vmw_resource *res;
67 struct vmw_dma_buffer *new_backup;
68 unsigned long new_backup_offset;
69 bool first_usage;
70 bool no_buffer_needed;
71};
72
73/**
74 * vmw_resource_unreserve - unreserve resources previously reserved for
75 * command submission.
76 *
77 * @list_head: list of resources to unreserve.
78 * @backoff: Whether command submission failed.
79 */
80static void vmw_resource_list_unreserve(struct list_head *list,
81 bool backoff)
82{
83 struct vmw_resource_val_node *val;
84
85 list_for_each_entry(val, list, head) {
86 struct vmw_resource *res = val->res;
87 struct vmw_dma_buffer *new_backup =
88 backoff ? NULL : val->new_backup;
89
90 vmw_resource_unreserve(res, new_backup,
91 val->new_backup_offset);
92 vmw_dmabuf_unreference(&val->new_backup);
93 }
94}
95
96
97/**
98 * vmw_resource_val_add - Add a resource to the software context's
99 * resource list if it's not already on it.
100 *
101 * @sw_context: Pointer to the software context.
102 * @res: Pointer to the resource.
103 * @p_node On successful return points to a valid pointer to a
104 * struct vmw_resource_val_node, if non-NULL on entry.
105 */
106static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
107 struct vmw_resource *res,
108 struct vmw_resource_val_node **p_node)
109{
110 struct vmw_resource_val_node *node;
111 struct drm_hash_item *hash;
112 int ret;
113
114 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
115 &hash) == 0)) {
116 node = container_of(hash, struct vmw_resource_val_node, hash);
117 node->first_usage = false;
118 if (unlikely(p_node != NULL))
119 *p_node = node;
120 return 0;
121 }
122
123 node = kzalloc(sizeof(*node), GFP_KERNEL);
124 if (unlikely(node == NULL)) {
125 DRM_ERROR("Failed to allocate a resource validation "
126 "entry.\n");
127 return -ENOMEM;
128 }
129
130 node->hash.key = (unsigned long) res;
131 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
132 if (unlikely(ret != 0)) {
133 DRM_ERROR("Failed to initialize a resource validation "
134 "entry.\n");
135 kfree(node);
136 return ret;
137 }
138 list_add_tail(&node->head, &sw_context->resource_list);
139 node->res = vmw_resource_reference(res);
140 node->first_usage = true;
141
142 if (unlikely(p_node != NULL))
143 *p_node = node;
144
145 return 0;
146}
147
148/**
149 * vmw_resource_relocation_add - Add a relocation to the relocation list
150 *
151 * @list: Pointer to head of relocation list.
152 * @res: The resource.
153 * @offset: Offset into the command buffer currently being parsed where the
154 * id that needs fixup is located. Granularity is 4 bytes.
155 */
156static int vmw_resource_relocation_add(struct list_head *list,
157 const struct vmw_resource *res,
158 unsigned long offset)
159{
160 struct vmw_resource_relocation *rel;
161
162 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
163 if (unlikely(rel == NULL)) {
164 DRM_ERROR("Failed to allocate a resource relocation.\n");
165 return -ENOMEM;
166 }
167
168 rel->res = res;
169 rel->offset = offset;
170 list_add_tail(&rel->head, list);
171
172 return 0;
173}
174
175/**
176 * vmw_resource_relocations_free - Free all relocations on a list
177 *
178 * @list: Pointer to the head of the relocation list.
179 */
180static void vmw_resource_relocations_free(struct list_head *list)
181{
182 struct vmw_resource_relocation *rel, *n;
183
184 list_for_each_entry_safe(rel, n, list, head) {
185 list_del(&rel->head);
186 kfree(rel);
187 }
188}
189
190/**
191 * vmw_resource_relocations_apply - Apply all relocations on a list
192 *
193 * @cb: Pointer to the start of the command buffer bein patch. This need
194 * not be the same buffer as the one being parsed when the relocation
195 * list was built, but the contents must be the same modulo the
196 * resource ids.
197 * @list: Pointer to the head of the relocation list.
198 */
199static void vmw_resource_relocations_apply(uint32_t *cb,
200 struct list_head *list)
201{
202 struct vmw_resource_relocation *rel;
203
204 list_for_each_entry(rel, list, head)
205 cb[rel->offset] = rel->res->id;
206}
207
fb1d9738
JB
208static int vmw_cmd_invalid(struct vmw_private *dev_priv,
209 struct vmw_sw_context *sw_context,
210 SVGA3dCmdHeader *header)
211{
212 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
213}
214
215static int vmw_cmd_ok(struct vmw_private *dev_priv,
216 struct vmw_sw_context *sw_context,
217 SVGA3dCmdHeader *header)
218{
219 return 0;
220}
221
e2fa3a76
TH
222/**
223 * vmw_bo_to_validate_list - add a bo to a validate list
224 *
225 * @sw_context: The software context used for this command submission batch.
226 * @bo: The buffer object to add.
e2fa3a76
TH
227 * @p_val_node: If non-NULL Will be updated with the validate node number
228 * on return.
229 *
230 * Returns -EINVAL if the limit of number of buffer objects per command
231 * submission is reached.
232 */
233static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
234 struct ttm_buffer_object *bo,
e2fa3a76
TH
235 uint32_t *p_val_node)
236{
237 uint32_t val_node;
c0951b79 238 struct vmw_validate_buffer *vval_buf;
e2fa3a76 239 struct ttm_validate_buffer *val_buf;
c0951b79
TH
240 struct drm_hash_item *hash;
241 int ret;
e2fa3a76 242
c0951b79
TH
243 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
244 &hash) == 0)) {
245 vval_buf = container_of(hash, struct vmw_validate_buffer,
246 hash);
247 val_buf = &vval_buf->base;
248 val_node = vval_buf - sw_context->val_bufs;
249 } else {
250 val_node = sw_context->cur_val_buf;
251 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
252 DRM_ERROR("Max number of DMA buffers per submission "
253 "exceeded.\n");
254 return -EINVAL;
255 }
256 vval_buf = &sw_context->val_bufs[val_node];
257 vval_buf->hash.key = (unsigned long) bo;
258 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
259 if (unlikely(ret != 0)) {
260 DRM_ERROR("Failed to initialize a buffer validation "
261 "entry.\n");
262 return ret;
263 }
264 ++sw_context->cur_val_buf;
265 val_buf = &vval_buf->base;
e2fa3a76 266 val_buf->bo = ttm_bo_reference(bo);
c0951b79 267 val_buf->reserved = false;
e2fa3a76 268 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
e2fa3a76
TH
269 }
270
be013367 271 sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
e2fa3a76
TH
272
273 if (p_val_node)
274 *p_val_node = val_node;
275
276 return 0;
277}
278
c0951b79
TH
279/**
280 * vmw_resources_reserve - Reserve all resources on the sw_context's
281 * resource list.
282 *
283 * @sw_context: Pointer to the software context.
284 *
285 * Note that since vmware's command submission currently is protected by
286 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
287 * since only a single thread at once will attempt this.
288 */
289static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
fb1d9738 290{
c0951b79 291 struct vmw_resource_val_node *val;
fb1d9738
JB
292 int ret;
293
c0951b79
TH
294 list_for_each_entry(val, &sw_context->resource_list, head) {
295 struct vmw_resource *res = val->res;
fb1d9738 296
c0951b79
TH
297 ret = vmw_resource_reserve(res, val->no_buffer_needed);
298 if (unlikely(ret != 0))
299 return ret;
300
301 if (res->backup) {
302 struct ttm_buffer_object *bo = &res->backup->base;
303
304 ret = vmw_bo_to_validate_list
305 (sw_context, bo, NULL);
306
307 if (unlikely(ret != 0))
308 return ret;
309 }
fb1d9738 310 }
c0951b79
TH
311 return 0;
312}
fb1d9738 313
c0951b79
TH
314/**
315 * vmw_resources_validate - Validate all resources on the sw_context's
316 * resource list.
317 *
318 * @sw_context: Pointer to the software context.
319 *
320 * Before this function is called, all resource backup buffers must have
321 * been validated.
322 */
323static int vmw_resources_validate(struct vmw_sw_context *sw_context)
324{
325 struct vmw_resource_val_node *val;
326 int ret;
327
328 list_for_each_entry(val, &sw_context->resource_list, head) {
329 struct vmw_resource *res = val->res;
f18c8840 330
c0951b79
TH
331 ret = vmw_resource_validate(res);
332 if (unlikely(ret != 0)) {
333 if (ret != -ERESTARTSYS)
334 DRM_ERROR("Failed to validate resource.\n");
335 return ret;
336 }
337 }
f18c8840 338 return 0;
fb1d9738
JB
339}
340
c0951b79
TH
341/**
342 * vmw_cmd_res_check - Check that a resource is present and if so, put it
343 * on the resource validate list unless it's already there.
344 *
345 * @dev_priv: Pointer to a device private structure.
346 * @sw_context: Pointer to the software context.
347 * @res_type: Resource type.
348 * @converter: User-space visisble type specific information.
349 * @id: Pointer to the location in the command buffer currently being
350 * parsed from where the user-space resource id handle is located.
351 */
352static int vmw_cmd_res_check(struct vmw_private *dev_priv,
fb1d9738 353 struct vmw_sw_context *sw_context,
c0951b79
TH
354 enum vmw_res_type res_type,
355 const struct vmw_user_resource_conv *converter,
356 uint32_t *id,
357 struct vmw_resource_val_node **p_val)
fb1d9738 358{
c0951b79
TH
359 struct vmw_res_cache_entry *rcache =
360 &sw_context->res_cache[res_type];
be38ab6e 361 struct vmw_resource *res;
c0951b79
TH
362 struct vmw_resource_val_node *node;
363 int ret;
be38ab6e 364
c0951b79 365 if (*id == SVGA3D_INVALID_ID)
7a73ba74
TH
366 return 0;
367
c0951b79
TH
368 /*
369 * Fastpath in case of repeated commands referencing the same
370 * resource
371 */
7a73ba74 372
c0951b79
TH
373 if (likely(rcache->valid && *id == rcache->handle)) {
374 const struct vmw_resource *res = rcache->res;
375
376 rcache->node->first_usage = false;
377 if (p_val)
378 *p_val = rcache->node;
379
380 return vmw_resource_relocation_add
381 (&sw_context->res_relocations, res,
382 id - sw_context->buf_start);
be38ab6e
TH
383 }
384
c0951b79
TH
385 ret = vmw_user_resource_lookup_handle(dev_priv,
386 sw_context->tfile,
387 *id,
388 converter,
389 &res);
5bb39e81 390 if (unlikely(ret != 0)) {
c0951b79
TH
391 DRM_ERROR("Could not find or use resource 0x%08x.\n",
392 (unsigned) *id);
393 dump_stack();
5bb39e81
TH
394 return ret;
395 }
396
c0951b79
TH
397 rcache->valid = true;
398 rcache->res = res;
399 rcache->handle = *id;
be38ab6e 400
c0951b79
TH
401 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
402 res,
403 id - sw_context->buf_start);
404 if (unlikely(ret != 0))
405 goto out_no_reloc;
406
407 ret = vmw_resource_val_add(sw_context, res, &node);
408 if (unlikely(ret != 0))
409 goto out_no_reloc;
f18c8840 410
c0951b79
TH
411 rcache->node = node;
412 if (p_val)
413 *p_val = node;
414 vmw_resource_unreference(&res);
f18c8840 415 return 0;
c0951b79
TH
416
417out_no_reloc:
418 BUG_ON(sw_context->error_resource != NULL);
419 sw_context->error_resource = res;
420
421 return ret;
fb1d9738
JB
422}
423
c0951b79
TH
424/**
425 * vmw_cmd_cid_check - Check a command header for valid context information.
426 *
427 * @dev_priv: Pointer to a device private structure.
428 * @sw_context: Pointer to the software context.
429 * @header: A command header with an embedded user-space context handle.
430 *
431 * Convenience function: Call vmw_cmd_res_check with the user-space context
432 * handle embedded in @header.
433 */
434static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
435 struct vmw_sw_context *sw_context,
436 SVGA3dCmdHeader *header)
437{
438 struct vmw_cid_cmd {
439 SVGA3dCmdHeader header;
440 __le32 cid;
441 } *cmd;
442
443 cmd = container_of(header, struct vmw_cid_cmd, header);
444 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
445 user_context_converter, &cmd->cid, NULL);
446}
fb1d9738
JB
447
448static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
449 struct vmw_sw_context *sw_context,
450 SVGA3dCmdHeader *header)
451{
452 struct vmw_sid_cmd {
453 SVGA3dCmdHeader header;
454 SVGA3dCmdSetRenderTarget body;
455 } *cmd;
456 int ret;
457
458 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
459 if (unlikely(ret != 0))
460 return ret;
461
462 cmd = container_of(header, struct vmw_sid_cmd, header);
c0951b79
TH
463 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
464 user_surface_converter,
465 &cmd->body.target.sid, NULL);
7a73ba74 466 return ret;
fb1d9738
JB
467}
468
469static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
470 struct vmw_sw_context *sw_context,
471 SVGA3dCmdHeader *header)
472{
473 struct vmw_sid_cmd {
474 SVGA3dCmdHeader header;
475 SVGA3dCmdSurfaceCopy body;
476 } *cmd;
477 int ret;
478
479 cmd = container_of(header, struct vmw_sid_cmd, header);
c0951b79
TH
480 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
481 user_surface_converter,
482 &cmd->body.src.sid, NULL);
fb1d9738
JB
483 if (unlikely(ret != 0))
484 return ret;
c0951b79
TH
485 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
486 user_surface_converter,
487 &cmd->body.dest.sid, NULL);
fb1d9738
JB
488}
489
490static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
491 struct vmw_sw_context *sw_context,
492 SVGA3dCmdHeader *header)
493{
494 struct vmw_sid_cmd {
495 SVGA3dCmdHeader header;
496 SVGA3dCmdSurfaceStretchBlt body;
497 } *cmd;
498 int ret;
499
500 cmd = container_of(header, struct vmw_sid_cmd, header);
c0951b79
TH
501 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
502 user_surface_converter,
503 &cmd->body.src.sid, NULL);
fb1d9738
JB
504 if (unlikely(ret != 0))
505 return ret;
c0951b79
TH
506 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
507 user_surface_converter,
508 &cmd->body.dest.sid, NULL);
fb1d9738
JB
509}
510
511static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
512 struct vmw_sw_context *sw_context,
513 SVGA3dCmdHeader *header)
514{
515 struct vmw_sid_cmd {
516 SVGA3dCmdHeader header;
517 SVGA3dCmdBlitSurfaceToScreen body;
518 } *cmd;
519
520 cmd = container_of(header, struct vmw_sid_cmd, header);
0cff60c6
JB
521
522 if (unlikely(!sw_context->kernel)) {
523 DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
524 return -EPERM;
525 }
526
c0951b79
TH
527 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
528 user_surface_converter,
529 &cmd->body.srcImage.sid, NULL);
fb1d9738
JB
530}
531
532static int vmw_cmd_present_check(struct vmw_private *dev_priv,
533 struct vmw_sw_context *sw_context,
534 SVGA3dCmdHeader *header)
535{
536 struct vmw_sid_cmd {
537 SVGA3dCmdHeader header;
538 SVGA3dCmdPresent body;
539 } *cmd;
540
5bb39e81 541
fb1d9738 542 cmd = container_of(header, struct vmw_sid_cmd, header);
0cff60c6
JB
543
544 if (unlikely(!sw_context->kernel)) {
545 DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
546 return -EPERM;
547 }
548
c0951b79
TH
549 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
550 user_surface_converter, &cmd->body.sid,
551 NULL);
fb1d9738
JB
552}
553
e2fa3a76
TH
554/**
555 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
556 *
557 * @dev_priv: The device private structure.
e2fa3a76
TH
558 * @new_query_bo: The new buffer holding query results.
559 * @sw_context: The software context used for this command submission.
560 *
561 * This function checks whether @new_query_bo is suitable for holding
562 * query results, and if another buffer currently is pinned for query
563 * results. If so, the function prepares the state of @sw_context for
564 * switching pinned buffers after successful submission of the current
c0951b79 565 * command batch.
e2fa3a76
TH
566 */
567static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
e2fa3a76
TH
568 struct ttm_buffer_object *new_query_bo,
569 struct vmw_sw_context *sw_context)
570{
c0951b79
TH
571 struct vmw_res_cache_entry *ctx_entry =
572 &sw_context->res_cache[vmw_res_context];
e2fa3a76 573 int ret;
c0951b79
TH
574
575 BUG_ON(!ctx_entry->valid);
576 sw_context->last_query_ctx = ctx_entry->res;
e2fa3a76
TH
577
578 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
579
580 if (unlikely(new_query_bo->num_pages > 4)) {
581 DRM_ERROR("Query buffer too large.\n");
582 return -EINVAL;
583 }
584
585 if (unlikely(sw_context->cur_query_bo != NULL)) {
c0951b79 586 sw_context->needs_post_query_barrier = true;
e2fa3a76
TH
587 ret = vmw_bo_to_validate_list(sw_context,
588 sw_context->cur_query_bo,
e2fa3a76
TH
589 NULL);
590 if (unlikely(ret != 0))
591 return ret;
592 }
593 sw_context->cur_query_bo = new_query_bo;
594
595 ret = vmw_bo_to_validate_list(sw_context,
596 dev_priv->dummy_query_bo,
e2fa3a76
TH
597 NULL);
598 if (unlikely(ret != 0))
599 return ret;
600
601 }
602
e2fa3a76
TH
603 return 0;
604}
605
606
607/**
608 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
609 *
610 * @dev_priv: The device private structure.
611 * @sw_context: The software context used for this command submission batch.
612 *
613 * This function will check if we're switching query buffers, and will then,
e2fa3a76
TH
614 * issue a dummy occlusion query wait used as a query barrier. When the fence
615 * object following that query wait has signaled, we are sure that all
c0951b79 616 * preceding queries have finished, and the old query buffer can be unpinned.
e2fa3a76
TH
617 * However, since both the new query buffer and the old one are fenced with
618 * that fence, we can do an asynchronus unpin now, and be sure that the
619 * old query buffer won't be moved until the fence has signaled.
620 *
621 * As mentioned above, both the new - and old query buffers need to be fenced
622 * using a sequence emitted *after* calling this function.
623 */
624static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
625 struct vmw_sw_context *sw_context)
626{
e2fa3a76
TH
627 /*
628 * The validate list should still hold references to all
629 * contexts here.
630 */
631
c0951b79
TH
632 if (sw_context->needs_post_query_barrier) {
633 struct vmw_res_cache_entry *ctx_entry =
634 &sw_context->res_cache[vmw_res_context];
635 struct vmw_resource *ctx;
636 int ret;
e2fa3a76 637
c0951b79
TH
638 BUG_ON(!ctx_entry->valid);
639 ctx = ctx_entry->res;
e2fa3a76
TH
640
641 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
642
643 if (unlikely(ret != 0))
644 DRM_ERROR("Out of fifo space for dummy query.\n");
645 }
646
647 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
648 if (dev_priv->pinned_bo) {
649 vmw_bo_pin(dev_priv->pinned_bo, false);
650 ttm_bo_unref(&dev_priv->pinned_bo);
651 }
652
c0951b79
TH
653 if (!sw_context->needs_post_query_barrier) {
654 vmw_bo_pin(sw_context->cur_query_bo, true);
e2fa3a76 655
c0951b79
TH
656 /*
657 * We pin also the dummy_query_bo buffer so that we
658 * don't need to validate it when emitting
659 * dummy queries in context destroy paths.
660 */
e2fa3a76 661
c0951b79
TH
662 vmw_bo_pin(dev_priv->dummy_query_bo, true);
663 dev_priv->dummy_query_bo_pinned = true;
e2fa3a76 664
c0951b79
TH
665 BUG_ON(sw_context->last_query_ctx == NULL);
666 dev_priv->query_cid = sw_context->last_query_ctx->id;
667 dev_priv->query_cid_valid = true;
668 dev_priv->pinned_bo =
669 ttm_bo_reference(sw_context->cur_query_bo);
670 }
e2fa3a76
TH
671 }
672}
673
674/**
c0951b79
TH
675 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
676 * handle to a valid SVGAGuestPtr
e2fa3a76 677 *
c0951b79
TH
678 * @dev_priv: Pointer to a device private structure.
679 * @sw_context: The software context used for this command batch validation.
680 * @ptr: Pointer to the user-space handle to be translated.
681 * @vmw_bo_p: Points to a location that, on successful return will carry
682 * a reference-counted pointer to the DMA buffer identified by the
683 * user-space handle in @id.
e2fa3a76 684 *
c0951b79
TH
685 * This function saves information needed to translate a user-space buffer
686 * handle to a valid SVGAGuestPtr. The translation does not take place
687 * immediately, but during a call to vmw_apply_relocations().
688 * This function builds a relocation list and a list of buffers to validate.
689 * The former needs to be freed using either vmw_apply_relocations() or
690 * vmw_free_relocations(). The latter needs to be freed using
691 * vmw_clear_validations.
e2fa3a76 692 */
4e4ddd47
TH
693static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
694 struct vmw_sw_context *sw_context,
695 SVGAGuestPtr *ptr,
696 struct vmw_dma_buffer **vmw_bo_p)
fb1d9738 697{
fb1d9738
JB
698 struct vmw_dma_buffer *vmw_bo = NULL;
699 struct ttm_buffer_object *bo;
4e4ddd47 700 uint32_t handle = ptr->gmrId;
fb1d9738 701 struct vmw_relocation *reloc;
4e4ddd47 702 int ret;
fb1d9738 703
fb1d9738
JB
704 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
705 if (unlikely(ret != 0)) {
706 DRM_ERROR("Could not find or use GMR region.\n");
707 return -EINVAL;
708 }
709 bo = &vmw_bo->base;
710
711 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
4e4ddd47 712 DRM_ERROR("Max number relocations per submission"
fb1d9738
JB
713 " exceeded\n");
714 ret = -EINVAL;
715 goto out_no_reloc;
716 }
717
718 reloc = &sw_context->relocs[sw_context->cur_reloc++];
4e4ddd47 719 reloc->location = ptr;
fb1d9738 720
be013367 721 ret = vmw_bo_to_validate_list(sw_context, bo, &reloc->index);
e2fa3a76 722 if (unlikely(ret != 0))
fb1d9738 723 goto out_no_reloc;
fb1d9738 724
4e4ddd47
TH
725 *vmw_bo_p = vmw_bo;
726 return 0;
727
728out_no_reloc:
729 vmw_dmabuf_unreference(&vmw_bo);
730 vmw_bo_p = NULL;
731 return ret;
732}
733
c0951b79
TH
734/**
735 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
736 *
737 * @dev_priv: Pointer to a device private struct.
738 * @sw_context: The software context used for this command submission.
739 * @header: Pointer to the command header in the command stream.
740 */
741static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
742 struct vmw_sw_context *sw_context,
743 SVGA3dCmdHeader *header)
744{
745 struct vmw_begin_query_cmd {
746 SVGA3dCmdHeader header;
747 SVGA3dCmdBeginQuery q;
748 } *cmd;
749
750 cmd = container_of(header, struct vmw_begin_query_cmd,
751 header);
752
753 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
754 user_context_converter, &cmd->q.cid,
755 NULL);
756}
757
758/**
759 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
760 *
761 * @dev_priv: Pointer to a device private struct.
762 * @sw_context: The software context used for this command submission.
763 * @header: Pointer to the command header in the command stream.
764 */
4e4ddd47
TH
765static int vmw_cmd_end_query(struct vmw_private *dev_priv,
766 struct vmw_sw_context *sw_context,
767 SVGA3dCmdHeader *header)
768{
769 struct vmw_dma_buffer *vmw_bo;
770 struct vmw_query_cmd {
771 SVGA3dCmdHeader header;
772 SVGA3dCmdEndQuery q;
773 } *cmd;
774 int ret;
775
776 cmd = container_of(header, struct vmw_query_cmd, header);
777 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
778 if (unlikely(ret != 0))
779 return ret;
780
781 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
782 &cmd->q.guestResult,
783 &vmw_bo);
784 if (unlikely(ret != 0))
785 return ret;
786
c0951b79 787 ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
e2fa3a76 788
4e4ddd47 789 vmw_dmabuf_unreference(&vmw_bo);
e2fa3a76 790 return ret;
4e4ddd47 791}
fb1d9738 792
c0951b79
TH
793/*
794 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
795 *
796 * @dev_priv: Pointer to a device private struct.
797 * @sw_context: The software context used for this command submission.
798 * @header: Pointer to the command header in the command stream.
799 */
4e4ddd47
TH
800static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
801 struct vmw_sw_context *sw_context,
802 SVGA3dCmdHeader *header)
803{
804 struct vmw_dma_buffer *vmw_bo;
805 struct vmw_query_cmd {
806 SVGA3dCmdHeader header;
807 SVGA3dCmdWaitForQuery q;
808 } *cmd;
809 int ret;
810
811 cmd = container_of(header, struct vmw_query_cmd, header);
812 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
813 if (unlikely(ret != 0))
814 return ret;
815
816 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
817 &cmd->q.guestResult,
818 &vmw_bo);
819 if (unlikely(ret != 0))
820 return ret;
821
822 vmw_dmabuf_unreference(&vmw_bo);
823 return 0;
824}
825
4e4ddd47
TH
826static int vmw_cmd_dma(struct vmw_private *dev_priv,
827 struct vmw_sw_context *sw_context,
828 SVGA3dCmdHeader *header)
829{
830 struct vmw_dma_buffer *vmw_bo = NULL;
4e4ddd47
TH
831 struct vmw_surface *srf = NULL;
832 struct vmw_dma_cmd {
833 SVGA3dCmdHeader header;
834 SVGA3dCmdSurfaceDMA dma;
835 } *cmd;
836 int ret;
837
838 cmd = container_of(header, struct vmw_dma_cmd, header);
839 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
840 &cmd->dma.guest.ptr,
841 &vmw_bo);
842 if (unlikely(ret != 0))
843 return ret;
844
c0951b79
TH
845 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
846 user_surface_converter, &cmd->dma.host.sid,
847 NULL);
5bb39e81 848 if (unlikely(ret != 0)) {
c0951b79
TH
849 if (unlikely(ret != -ERESTARTSYS))
850 DRM_ERROR("could not find surface for DMA.\n");
851 goto out_no_surface;
5bb39e81
TH
852 }
853
c0951b79 854 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
f18c8840 855
c0951b79 856 vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
fb1d9738 857
c0951b79 858out_no_surface:
fb1d9738
JB
859 vmw_dmabuf_unreference(&vmw_bo);
860 return ret;
861}
862
7a73ba74
TH
863static int vmw_cmd_draw(struct vmw_private *dev_priv,
864 struct vmw_sw_context *sw_context,
865 SVGA3dCmdHeader *header)
866{
867 struct vmw_draw_cmd {
868 SVGA3dCmdHeader header;
869 SVGA3dCmdDrawPrimitives body;
870 } *cmd;
871 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
872 (unsigned long)header + sizeof(*cmd));
873 SVGA3dPrimitiveRange *range;
874 uint32_t i;
875 uint32_t maxnum;
876 int ret;
877
878 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
879 if (unlikely(ret != 0))
880 return ret;
881
882 cmd = container_of(header, struct vmw_draw_cmd, header);
883 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
884
885 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
886 DRM_ERROR("Illegal number of vertex declarations.\n");
887 return -EINVAL;
888 }
889
890 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
c0951b79
TH
891 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
892 user_surface_converter,
893 &decl->array.surfaceId, NULL);
7a73ba74
TH
894 if (unlikely(ret != 0))
895 return ret;
896 }
897
898 maxnum = (header->size - sizeof(cmd->body) -
899 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
900 if (unlikely(cmd->body.numRanges > maxnum)) {
901 DRM_ERROR("Illegal number of index ranges.\n");
902 return -EINVAL;
903 }
904
905 range = (SVGA3dPrimitiveRange *) decl;
906 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
c0951b79
TH
907 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
908 user_surface_converter,
909 &range->indexArray.surfaceId, NULL);
7a73ba74
TH
910 if (unlikely(ret != 0))
911 return ret;
912 }
913 return 0;
914}
915
916
917static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
918 struct vmw_sw_context *sw_context,
919 SVGA3dCmdHeader *header)
920{
921 struct vmw_tex_state_cmd {
922 SVGA3dCmdHeader header;
923 SVGA3dCmdSetTextureState state;
924 };
925
926 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
927 ((unsigned long) header + header->size + sizeof(header));
928 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
929 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
930 int ret;
931
932 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
933 if (unlikely(ret != 0))
934 return ret;
935
936 for (; cur_state < last_state; ++cur_state) {
937 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
938 continue;
939
c0951b79
TH
940 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
941 user_surface_converter,
942 &cur_state->value, NULL);
7a73ba74
TH
943 if (unlikely(ret != 0))
944 return ret;
945 }
946
947 return 0;
948}
949
4084fb89
JB
950static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
951 struct vmw_sw_context *sw_context,
952 void *buf)
953{
954 struct vmw_dma_buffer *vmw_bo;
955 int ret;
956
957 struct {
958 uint32_t header;
959 SVGAFifoCmdDefineGMRFB body;
960 } *cmd = buf;
961
962 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
963 &cmd->body.ptr,
964 &vmw_bo);
965 if (unlikely(ret != 0))
966 return ret;
967
968 vmw_dmabuf_unreference(&vmw_bo);
969
970 return ret;
971}
972
c0951b79
TH
973/**
974 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
975 * command
976 *
977 * @dev_priv: Pointer to a device private struct.
978 * @sw_context: The software context being used for this batch.
979 * @header: Pointer to the command header in the command stream.
980 */
981static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
982 struct vmw_sw_context *sw_context,
983 SVGA3dCmdHeader *header)
984{
985 struct vmw_set_shader_cmd {
986 SVGA3dCmdHeader header;
987 SVGA3dCmdSetShader body;
988 } *cmd;
989 int ret;
990
991 cmd = container_of(header, struct vmw_set_shader_cmd,
992 header);
993
994 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
995 if (unlikely(ret != 0))
996 return ret;
997
998 return 0;
999}
1000
4084fb89
JB
1001static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1002 struct vmw_sw_context *sw_context,
1003 void *buf, uint32_t *size)
1004{
1005 uint32_t size_remaining = *size;
4084fb89
JB
1006 uint32_t cmd_id;
1007
1008 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1009 switch (cmd_id) {
1010 case SVGA_CMD_UPDATE:
1011 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
4084fb89
JB
1012 break;
1013 case SVGA_CMD_DEFINE_GMRFB:
1014 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1015 break;
1016 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1017 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1018 break;
1019 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1020 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1021 break;
1022 default:
1023 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1024 return -EINVAL;
1025 }
1026
1027 if (*size > size_remaining) {
1028 DRM_ERROR("Invalid SVGA command (size mismatch):"
1029 " %u.\n", cmd_id);
1030 return -EINVAL;
1031 }
1032
0cff60c6 1033 if (unlikely(!sw_context->kernel)) {
4084fb89
JB
1034 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1035 return -EPERM;
1036 }
1037
1038 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1039 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1040
1041 return 0;
1042}
fb1d9738
JB
1043
1044typedef int (*vmw_cmd_func) (struct vmw_private *,
1045 struct vmw_sw_context *,
1046 SVGA3dCmdHeader *);
1047
1048#define VMW_CMD_DEF(cmd, func) \
1049 [cmd - SVGA_3D_CMD_BASE] = func
1050
1051static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
1052 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
1053 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
1054 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
1055 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
1056 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
1057 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
1058 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
1059 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
1060 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
1061 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
1062 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
1063 &vmw_cmd_set_render_target_check),
7a73ba74 1064 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
fb1d9738
JB
1065 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
1066 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
1067 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
1068 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
1069 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
1070 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
1071 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
1072 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
1073 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
c0951b79 1074 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader),
fb1d9738 1075 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
7a73ba74 1076 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
fb1d9738 1077 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
c0951b79 1078 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query),
4e4ddd47
TH
1079 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
1080 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
fb1d9738
JB
1081 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
1082 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
c0951b79
TH
1083 &vmw_cmd_blt_surf_screen_check),
1084 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid),
1085 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid),
1086 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid),
1087 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid),
fb1d9738
JB
1088};
1089
1090static int vmw_cmd_check(struct vmw_private *dev_priv,
1091 struct vmw_sw_context *sw_context,
1092 void *buf, uint32_t *size)
1093{
1094 uint32_t cmd_id;
7a73ba74 1095 uint32_t size_remaining = *size;
fb1d9738
JB
1096 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
1097 int ret;
1098
4084fb89
JB
1099 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1100 /* Handle any none 3D commands */
1101 if (unlikely(cmd_id < SVGA_CMD_MAX))
1102 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
1103
fb1d9738
JB
1104
1105 cmd_id = le32_to_cpu(header->id);
1106 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
1107
1108 cmd_id -= SVGA_3D_CMD_BASE;
7a73ba74
TH
1109 if (unlikely(*size > size_remaining))
1110 goto out_err;
1111
fb1d9738
JB
1112 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
1113 goto out_err;
1114
1115 ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
1116 if (unlikely(ret != 0))
1117 goto out_err;
1118
1119 return 0;
1120out_err:
1121 DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
1122 cmd_id + SVGA_3D_CMD_BASE);
1123 return -EINVAL;
1124}
1125
1126static int vmw_cmd_check_all(struct vmw_private *dev_priv,
1127 struct vmw_sw_context *sw_context,
922ade0d 1128 void *buf,
be38ab6e 1129 uint32_t size)
fb1d9738
JB
1130{
1131 int32_t cur_size = size;
1132 int ret;
1133
c0951b79
TH
1134 sw_context->buf_start = buf;
1135
fb1d9738 1136 while (cur_size > 0) {
7a73ba74 1137 size = cur_size;
fb1d9738
JB
1138 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
1139 if (unlikely(ret != 0))
1140 return ret;
1141 buf = (void *)((unsigned long) buf + size);
1142 cur_size -= size;
1143 }
1144
1145 if (unlikely(cur_size != 0)) {
1146 DRM_ERROR("Command verifier out of sync.\n");
1147 return -EINVAL;
1148 }
1149
1150 return 0;
1151}
1152
1153static void vmw_free_relocations(struct vmw_sw_context *sw_context)
1154{
1155 sw_context->cur_reloc = 0;
1156}
1157
1158static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
1159{
1160 uint32_t i;
1161 struct vmw_relocation *reloc;
1162 struct ttm_validate_buffer *validate;
1163 struct ttm_buffer_object *bo;
1164
1165 for (i = 0; i < sw_context->cur_reloc; ++i) {
1166 reloc = &sw_context->relocs[i];
c0951b79 1167 validate = &sw_context->val_bufs[reloc->index].base;
fb1d9738 1168 bo = validate->bo;
c0951b79
TH
1169 switch (bo->mem.mem_type) {
1170 case TTM_PL_VRAM:
135cba0d
TH
1171 reloc->location->offset += bo->offset;
1172 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
c0951b79
TH
1173 break;
1174 case VMW_PL_GMR:
135cba0d 1175 reloc->location->gmrId = bo->mem.start;
c0951b79
TH
1176 break;
1177 default:
1178 BUG();
1179 }
fb1d9738
JB
1180 }
1181 vmw_free_relocations(sw_context);
1182}
1183
c0951b79
TH
1184/**
1185 * vmw_resource_list_unrefererence - Free up a resource list and unreference
1186 * all resources referenced by it.
1187 *
1188 * @list: The resource list.
1189 */
1190static void vmw_resource_list_unreference(struct list_head *list)
1191{
1192 struct vmw_resource_val_node *val, *val_next;
1193
1194 /*
1195 * Drop references to resources held during command submission.
1196 */
1197
1198 list_for_each_entry_safe(val, val_next, list, head) {
1199 list_del_init(&val->head);
1200 vmw_resource_unreference(&val->res);
1201 kfree(val);
1202 }
1203}
1204
fb1d9738
JB
1205static void vmw_clear_validations(struct vmw_sw_context *sw_context)
1206{
c0951b79
TH
1207 struct vmw_validate_buffer *entry, *next;
1208 struct vmw_resource_val_node *val;
fb1d9738 1209
be38ab6e
TH
1210 /*
1211 * Drop references to DMA buffers held during command submission.
1212 */
fb1d9738 1213 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
c0951b79
TH
1214 base.head) {
1215 list_del(&entry->base.head);
1216 ttm_bo_unref(&entry->base.bo);
1217 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
fb1d9738
JB
1218 sw_context->cur_val_buf--;
1219 }
1220 BUG_ON(sw_context->cur_val_buf != 0);
be38ab6e 1221
c0951b79
TH
1222 list_for_each_entry(val, &sw_context->resource_list, head)
1223 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
fb1d9738
JB
1224}
1225
1226static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
1227 struct ttm_buffer_object *bo)
1228{
1229 int ret;
1230
e2fa3a76
TH
1231
1232 /*
1233 * Don't validate pinned buffers.
1234 */
1235
1236 if (bo == dev_priv->pinned_bo ||
1237 (bo == dev_priv->dummy_query_bo &&
1238 dev_priv->dummy_query_bo_pinned))
1239 return 0;
1240
8ba5152a 1241 /**
135cba0d
TH
1242 * Put BO in VRAM if there is space, otherwise as a GMR.
1243 * If there is no space in VRAM and GMR ids are all used up,
1244 * start evicting GMRs to make room. If the DMA buffer can't be
1245 * used as a GMR, this will return -ENOMEM.
8ba5152a
TH
1246 */
1247
97a875cb 1248 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
3d3a5b32 1249 if (likely(ret == 0 || ret == -ERESTARTSYS))
fb1d9738
JB
1250 return ret;
1251
8ba5152a
TH
1252 /**
1253 * If that failed, try VRAM again, this time evicting
1254 * previous contents.
1255 */
fb1d9738 1256
135cba0d 1257 DRM_INFO("Falling through to VRAM.\n");
97a875cb 1258 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
fb1d9738
JB
1259 return ret;
1260}
1261
1262
1263static int vmw_validate_buffers(struct vmw_private *dev_priv,
1264 struct vmw_sw_context *sw_context)
1265{
c0951b79 1266 struct vmw_validate_buffer *entry;
fb1d9738
JB
1267 int ret;
1268
c0951b79
TH
1269 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
1270 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo);
fb1d9738
JB
1271 if (unlikely(ret != 0))
1272 return ret;
1273 }
1274 return 0;
1275}
1276
be38ab6e
TH
1277static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
1278 uint32_t size)
1279{
1280 if (likely(sw_context->cmd_bounce_size >= size))
1281 return 0;
1282
1283 if (sw_context->cmd_bounce_size == 0)
1284 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
1285
1286 while (sw_context->cmd_bounce_size < size) {
1287 sw_context->cmd_bounce_size =
1288 PAGE_ALIGN(sw_context->cmd_bounce_size +
1289 (sw_context->cmd_bounce_size >> 1));
1290 }
1291
1292 if (sw_context->cmd_bounce != NULL)
1293 vfree(sw_context->cmd_bounce);
1294
1295 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
1296
1297 if (sw_context->cmd_bounce == NULL) {
1298 DRM_ERROR("Failed to allocate command bounce buffer.\n");
1299 sw_context->cmd_bounce_size = 0;
1300 return -ENOMEM;
1301 }
1302
1303 return 0;
1304}
1305
ae2a1040
TH
1306/**
1307 * vmw_execbuf_fence_commands - create and submit a command stream fence
1308 *
1309 * Creates a fence object and submits a command stream marker.
1310 * If this fails for some reason, We sync the fifo and return NULL.
1311 * It is then safe to fence buffers with a NULL pointer.
6070e9fa
JB
1312 *
1313 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
1314 * a userspace handle if @p_handle is not NULL, otherwise not.
ae2a1040
TH
1315 */
1316
1317int vmw_execbuf_fence_commands(struct drm_file *file_priv,
1318 struct vmw_private *dev_priv,
1319 struct vmw_fence_obj **p_fence,
1320 uint32_t *p_handle)
1321{
1322 uint32_t sequence;
1323 int ret;
1324 bool synced = false;
1325
6070e9fa
JB
1326 /* p_handle implies file_priv. */
1327 BUG_ON(p_handle != NULL && file_priv == NULL);
ae2a1040
TH
1328
1329 ret = vmw_fifo_send_fence(dev_priv, &sequence);
1330 if (unlikely(ret != 0)) {
1331 DRM_ERROR("Fence submission error. Syncing.\n");
1332 synced = true;
1333 }
1334
1335 if (p_handle != NULL)
1336 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
1337 sequence,
1338 DRM_VMW_FENCE_FLAG_EXEC,
1339 p_fence, p_handle);
1340 else
1341 ret = vmw_fence_create(dev_priv->fman, sequence,
1342 DRM_VMW_FENCE_FLAG_EXEC,
1343 p_fence);
1344
1345 if (unlikely(ret != 0 && !synced)) {
1346 (void) vmw_fallback_wait(dev_priv, false, false,
1347 sequence, false,
1348 VMW_FENCE_WAIT_TIMEOUT);
1349 *p_fence = NULL;
1350 }
1351
1352 return 0;
1353}
1354
8bf445ce
TH
1355/**
1356 * vmw_execbuf_copy_fence_user - copy fence object information to
1357 * user-space.
1358 *
1359 * @dev_priv: Pointer to a vmw_private struct.
1360 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
1361 * @ret: Return value from fence object creation.
1362 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
1363 * which the information should be copied.
1364 * @fence: Pointer to the fenc object.
1365 * @fence_handle: User-space fence handle.
1366 *
1367 * This function copies fence information to user-space. If copying fails,
1368 * The user-space struct drm_vmw_fence_rep::error member is hopefully
1369 * left untouched, and if it's preloaded with an -EFAULT by user-space,
1370 * the error will hopefully be detected.
1371 * Also if copying fails, user-space will be unable to signal the fence
1372 * object so we wait for it immediately, and then unreference the
1373 * user-space reference.
1374 */
57c5ee79 1375void
8bf445ce
TH
1376vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
1377 struct vmw_fpriv *vmw_fp,
1378 int ret,
1379 struct drm_vmw_fence_rep __user *user_fence_rep,
1380 struct vmw_fence_obj *fence,
1381 uint32_t fence_handle)
1382{
1383 struct drm_vmw_fence_rep fence_rep;
1384
1385 if (user_fence_rep == NULL)
1386 return;
1387
80d9b24a
DC
1388 memset(&fence_rep, 0, sizeof(fence_rep));
1389
8bf445ce
TH
1390 fence_rep.error = ret;
1391 if (ret == 0) {
1392 BUG_ON(fence == NULL);
1393
1394 fence_rep.handle = fence_handle;
1395 fence_rep.seqno = fence->seqno;
1396 vmw_update_seqno(dev_priv, &dev_priv->fifo);
1397 fence_rep.passed_seqno = dev_priv->last_read_seqno;
1398 }
1399
1400 /*
1401 * copy_to_user errors will be detected by user space not
1402 * seeing fence_rep::error filled in. Typically
1403 * user-space would have pre-set that member to -EFAULT.
1404 */
1405 ret = copy_to_user(user_fence_rep, &fence_rep,
1406 sizeof(fence_rep));
1407
1408 /*
1409 * User-space lost the fence object. We need to sync
1410 * and unreference the handle.
1411 */
1412 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
1413 ttm_ref_object_base_unref(vmw_fp->tfile,
1414 fence_handle, TTM_REF_USAGE);
1415 DRM_ERROR("Fence copy error. Syncing.\n");
1416 (void) vmw_fence_obj_wait(fence, fence->signal_mask,
1417 false, false,
1418 VMW_FENCE_WAIT_TIMEOUT);
1419 }
1420}
1421
922ade0d
TH
1422int vmw_execbuf_process(struct drm_file *file_priv,
1423 struct vmw_private *dev_priv,
1424 void __user *user_commands,
1425 void *kernel_commands,
1426 uint32_t command_size,
1427 uint64_t throttle_us,
bb1bd2f4
JB
1428 struct drm_vmw_fence_rep __user *user_fence_rep,
1429 struct vmw_fence_obj **out_fence)
fb1d9738 1430{
fb1d9738 1431 struct vmw_sw_context *sw_context = &dev_priv->ctx;
bb1bd2f4 1432 struct vmw_fence_obj *fence = NULL;
c0951b79
TH
1433 struct vmw_resource *error_resource;
1434 struct list_head resource_list;
ae2a1040 1435 uint32_t handle;
922ade0d
TH
1436 void *cmd;
1437 int ret;
fb1d9738 1438
922ade0d 1439 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
fb1d9738 1440 if (unlikely(ret != 0))
922ade0d 1441 return -ERESTARTSYS;
fb1d9738 1442
922ade0d
TH
1443 if (kernel_commands == NULL) {
1444 sw_context->kernel = false;
fb1d9738 1445
922ade0d
TH
1446 ret = vmw_resize_cmd_bounce(sw_context, command_size);
1447 if (unlikely(ret != 0))
1448 goto out_unlock;
fb1d9738 1449
fb1d9738 1450
922ade0d
TH
1451 ret = copy_from_user(sw_context->cmd_bounce,
1452 user_commands, command_size);
1453
1454 if (unlikely(ret != 0)) {
1455 ret = -EFAULT;
1456 DRM_ERROR("Failed copying commands.\n");
1457 goto out_unlock;
1458 }
1459 kernel_commands = sw_context->cmd_bounce;
1460 } else
1461 sw_context->kernel = true;
fb1d9738
JB
1462
1463 sw_context->tfile = vmw_fpriv(file_priv)->tfile;
fb1d9738
JB
1464 sw_context->cur_reloc = 0;
1465 sw_context->cur_val_buf = 0;
e2fa3a76 1466 sw_context->fence_flags = 0;
f18c8840 1467 INIT_LIST_HEAD(&sw_context->resource_list);
e2fa3a76 1468 sw_context->cur_query_bo = dev_priv->pinned_bo;
c0951b79
TH
1469 sw_context->last_query_ctx = NULL;
1470 sw_context->needs_post_query_barrier = false;
1471 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
fb1d9738 1472 INIT_LIST_HEAD(&sw_context->validate_nodes);
c0951b79
TH
1473 INIT_LIST_HEAD(&sw_context->res_relocations);
1474 if (!sw_context->res_ht_initialized) {
1475 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
1476 if (unlikely(ret != 0))
1477 goto out_unlock;
1478 sw_context->res_ht_initialized = true;
1479 }
fb1d9738 1480
c0951b79 1481 INIT_LIST_HEAD(&resource_list);
922ade0d
TH
1482 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
1483 command_size);
fb1d9738
JB
1484 if (unlikely(ret != 0))
1485 goto out_err;
be38ab6e 1486
c0951b79
TH
1487 ret = vmw_resources_reserve(sw_context);
1488 if (unlikely(ret != 0))
1489 goto out_err;
1490
65705962 1491 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
fb1d9738
JB
1492 if (unlikely(ret != 0))
1493 goto out_err;
1494
1495 ret = vmw_validate_buffers(dev_priv, sw_context);
1496 if (unlikely(ret != 0))
1497 goto out_err;
1498
c0951b79
TH
1499 ret = vmw_resources_validate(sw_context);
1500 if (unlikely(ret != 0))
1501 goto out_err;
1925d456 1502
922ade0d 1503 if (throttle_us) {
6bcd8d3c 1504 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
922ade0d 1505 throttle_us);
1925d456
TH
1506
1507 if (unlikely(ret != 0))
c0951b79 1508 goto out_err;
be38ab6e
TH
1509 }
1510
922ade0d 1511 cmd = vmw_fifo_reserve(dev_priv, command_size);
be38ab6e
TH
1512 if (unlikely(cmd == NULL)) {
1513 DRM_ERROR("Failed reserving fifo space for commands.\n");
1514 ret = -ENOMEM;
c0951b79 1515 goto out_err;
1925d456
TH
1516 }
1517
c0951b79 1518 vmw_apply_relocations(sw_context);
922ade0d 1519 memcpy(cmd, kernel_commands, command_size);
c0951b79
TH
1520
1521 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
1522 vmw_resource_relocations_free(&sw_context->res_relocations);
1523
922ade0d 1524 vmw_fifo_commit(dev_priv, command_size);
fb1d9738 1525
e2fa3a76 1526 vmw_query_bo_switch_commit(dev_priv, sw_context);
ae2a1040
TH
1527 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1528 &fence,
1529 (user_fence_rep) ? &handle : NULL);
fb1d9738
JB
1530 /*
1531 * This error is harmless, because if fence submission fails,
ae2a1040
TH
1532 * vmw_fifo_send_fence will sync. The error will be propagated to
1533 * user-space in @fence_rep
fb1d9738
JB
1534 */
1535
1536 if (ret != 0)
1537 DRM_ERROR("Fence submission error. Syncing.\n");
1538
c0951b79 1539 vmw_resource_list_unreserve(&sw_context->resource_list, false);
ae2a1040
TH
1540 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
1541 (void *) fence);
fb1d9738 1542
c0951b79
TH
1543 if (unlikely(dev_priv->pinned_bo != NULL &&
1544 !dev_priv->query_cid_valid))
1545 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
1546
ae2a1040 1547 vmw_clear_validations(sw_context);
8bf445ce
TH
1548 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
1549 user_fence_rep, fence, handle);
fb1d9738 1550
bb1bd2f4
JB
1551 /* Don't unreference when handing fence out */
1552 if (unlikely(out_fence != NULL)) {
1553 *out_fence = fence;
1554 fence = NULL;
1555 } else if (likely(fence != NULL)) {
ae2a1040 1556 vmw_fence_obj_unreference(&fence);
bb1bd2f4 1557 }
fb1d9738 1558
c0951b79 1559 list_splice_init(&sw_context->resource_list, &resource_list);
922ade0d 1560 mutex_unlock(&dev_priv->cmdbuf_mutex);
c0951b79
TH
1561
1562 /*
1563 * Unreference resources outside of the cmdbuf_mutex to
1564 * avoid deadlocks in resource destruction paths.
1565 */
1566 vmw_resource_list_unreference(&resource_list);
1567
fb1d9738 1568 return 0;
922ade0d 1569
fb1d9738 1570out_err:
c0951b79 1571 vmw_resource_relocations_free(&sw_context->res_relocations);
fb1d9738
JB
1572 vmw_free_relocations(sw_context);
1573 ttm_eu_backoff_reservation(&sw_context->validate_nodes);
c0951b79 1574 vmw_resource_list_unreserve(&sw_context->resource_list, true);
fb1d9738 1575 vmw_clear_validations(sw_context);
c0951b79
TH
1576 if (unlikely(dev_priv->pinned_bo != NULL &&
1577 !dev_priv->query_cid_valid))
1578 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
fb1d9738 1579out_unlock:
c0951b79
TH
1580 list_splice_init(&sw_context->resource_list, &resource_list);
1581 error_resource = sw_context->error_resource;
1582 sw_context->error_resource = NULL;
fb1d9738 1583 mutex_unlock(&dev_priv->cmdbuf_mutex);
c0951b79
TH
1584
1585 /*
1586 * Unreference resources outside of the cmdbuf_mutex to
1587 * avoid deadlocks in resource destruction paths.
1588 */
1589 vmw_resource_list_unreference(&resource_list);
1590 if (unlikely(error_resource != NULL))
1591 vmw_resource_unreference(&error_resource);
1592
922ade0d
TH
1593 return ret;
1594}
1595
e2fa3a76
TH
1596/**
1597 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
1598 *
1599 * @dev_priv: The device private structure.
1600 *
1601 * This function is called to idle the fifo and unpin the query buffer
1602 * if the normal way to do this hits an error, which should typically be
1603 * extremely rare.
1604 */
1605static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
1606{
1607 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
1608
1609 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
1610 vmw_bo_pin(dev_priv->pinned_bo, false);
1611 vmw_bo_pin(dev_priv->dummy_query_bo, false);
1612 dev_priv->dummy_query_bo_pinned = false;
1613}
1614
1615
1616/**
c0951b79 1617 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
e2fa3a76
TH
1618 * query bo.
1619 *
1620 * @dev_priv: The device private structure.
c0951b79
TH
1621 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
1622 * _after_ a query barrier that flushes all queries touching the current
1623 * buffer pointed to by @dev_priv->pinned_bo
e2fa3a76
TH
1624 *
1625 * This function should be used to unpin the pinned query bo, or
1626 * as a query barrier when we need to make sure that all queries have
1627 * finished before the next fifo command. (For example on hardware
1628 * context destructions where the hardware may otherwise leak unfinished
1629 * queries).
1630 *
1631 * This function does not return any failure codes, but make attempts
1632 * to do safe unpinning in case of errors.
1633 *
1634 * The function will synchronize on the previous query barrier, and will
1635 * thus not finish until that barrier has executed.
c0951b79
TH
1636 *
1637 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
1638 * before calling this function.
e2fa3a76 1639 */
c0951b79
TH
1640void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
1641 struct vmw_fence_obj *fence)
e2fa3a76
TH
1642{
1643 int ret = 0;
1644 struct list_head validate_list;
1645 struct ttm_validate_buffer pinned_val, query_val;
c0951b79 1646 struct vmw_fence_obj *lfence = NULL;
e2fa3a76
TH
1647
1648 if (dev_priv->pinned_bo == NULL)
1649 goto out_unlock;
1650
e2fa3a76
TH
1651 INIT_LIST_HEAD(&validate_list);
1652
e2fa3a76
TH
1653 pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
1654 list_add_tail(&pinned_val.head, &validate_list);
1655
e2fa3a76
TH
1656 query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
1657 list_add_tail(&query_val.head, &validate_list);
1658
1659 do {
1660 ret = ttm_eu_reserve_buffers(&validate_list);
1661 } while (ret == -ERESTARTSYS);
1662
1663 if (unlikely(ret != 0)) {
1664 vmw_execbuf_unpin_panic(dev_priv);
1665 goto out_no_reserve;
1666 }
1667
c0951b79
TH
1668 if (dev_priv->query_cid_valid) {
1669 BUG_ON(fence != NULL);
1670 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
1671 if (unlikely(ret != 0)) {
1672 vmw_execbuf_unpin_panic(dev_priv);
1673 goto out_no_emit;
1674 }
1675 dev_priv->query_cid_valid = false;
e2fa3a76
TH
1676 }
1677
1678 vmw_bo_pin(dev_priv->pinned_bo, false);
1679 vmw_bo_pin(dev_priv->dummy_query_bo, false);
1680 dev_priv->dummy_query_bo_pinned = false;
1681
c0951b79
TH
1682 if (fence == NULL) {
1683 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
1684 NULL);
1685 fence = lfence;
1686 }
e2fa3a76 1687 ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
c0951b79
TH
1688 if (lfence != NULL)
1689 vmw_fence_obj_unreference(&lfence);
e2fa3a76
TH
1690
1691 ttm_bo_unref(&query_val.bo);
1692 ttm_bo_unref(&pinned_val.bo);
1693 ttm_bo_unref(&dev_priv->pinned_bo);
1694
1695out_unlock:
e2fa3a76
TH
1696 return;
1697
1698out_no_emit:
1699 ttm_eu_backoff_reservation(&validate_list);
1700out_no_reserve:
1701 ttm_bo_unref(&query_val.bo);
1702 ttm_bo_unref(&pinned_val.bo);
1703 ttm_bo_unref(&dev_priv->pinned_bo);
c0951b79
TH
1704}
1705
1706/**
1707 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
1708 * query bo.
1709 *
1710 * @dev_priv: The device private structure.
1711 *
1712 * This function should be used to unpin the pinned query bo, or
1713 * as a query barrier when we need to make sure that all queries have
1714 * finished before the next fifo command. (For example on hardware
1715 * context destructions where the hardware may otherwise leak unfinished
1716 * queries).
1717 *
1718 * This function does not return any failure codes, but make attempts
1719 * to do safe unpinning in case of errors.
1720 *
1721 * The function will synchronize on the previous query barrier, and will
1722 * thus not finish until that barrier has executed.
1723 */
1724void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
1725{
1726 mutex_lock(&dev_priv->cmdbuf_mutex);
1727 if (dev_priv->query_cid_valid)
1728 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
e2fa3a76
TH
1729 mutex_unlock(&dev_priv->cmdbuf_mutex);
1730}
1731
922ade0d
TH
1732
1733int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
1734 struct drm_file *file_priv)
1735{
1736 struct vmw_private *dev_priv = vmw_priv(dev);
1737 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
1738 struct vmw_master *vmaster = vmw_master(file_priv->master);
1739 int ret;
1740
1741 /*
1742 * This will allow us to extend the ioctl argument while
1743 * maintaining backwards compatibility:
1744 * We take different code paths depending on the value of
1745 * arg->version.
1746 */
1747
1748 if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
1749 DRM_ERROR("Incorrect execbuf version.\n");
1750 DRM_ERROR("You're running outdated experimental "
1751 "vmwgfx user-space drivers.");
1752 return -EINVAL;
1753 }
1754
1755 ret = ttm_read_lock(&vmaster->lock, true);
1756 if (unlikely(ret != 0))
1757 return ret;
1758
1759 ret = vmw_execbuf_process(file_priv, dev_priv,
1760 (void __user *)(unsigned long)arg->commands,
1761 NULL, arg->command_size, arg->throttle_us,
bb1bd2f4
JB
1762 (void __user *)(unsigned long)arg->fence_rep,
1763 NULL);
922ade0d
TH
1764
1765 if (unlikely(ret != 0))
1766 goto out_unlock;
1767
1768 vmw_kms_cursor_post_execbuf(dev_priv);
1769
1770out_unlock:
fb1d9738
JB
1771 ttm_read_unlock(&vmaster->lock);
1772 return ret;
1773}
This page took 0.279331 seconds and 5 git commands to generate.