Commit | Line | Data |
---|---|---|
ae2a1040 TH |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
27 | ||
28 | #include "drmP.h" | |
29 | #include "vmwgfx_drv.h" | |
30 | ||
31 | #define VMW_FENCE_WRAP (1 << 31) | |
32 | ||
33 | struct vmw_fence_manager { | |
34 | int num_fence_objects; | |
35 | struct vmw_private *dev_priv; | |
36 | spinlock_t lock; | |
ae2a1040 TH |
37 | struct list_head fence_list; |
38 | struct work_struct work; | |
39 | u32 user_fence_size; | |
40 | u32 fence_size; | |
57c5ee79 | 41 | u32 event_fence_action_size; |
ae2a1040 TH |
42 | bool fifo_down; |
43 | struct list_head cleanup_list; | |
57c5ee79 TH |
44 | uint32_t pending_actions[VMW_ACTION_MAX]; |
45 | struct mutex goal_irq_mutex; | |
46 | bool goal_irq_on; /* Protected by @goal_irq_mutex */ | |
47 | bool seqno_valid; /* Protected by @lock, and may not be set to true | |
48 | without the @goal_irq_mutex held. */ | |
ae2a1040 TH |
49 | }; |
50 | ||
51 | struct vmw_user_fence { | |
52 | struct ttm_base_object base; | |
53 | struct vmw_fence_obj fence; | |
54 | }; | |
55 | ||
56 | /** | |
57c5ee79 | 57 | * struct vmw_event_fence_action - fence action that delivers a drm event. |
ae2a1040 | 58 | * |
57c5ee79 TH |
59 | * @e: A struct drm_pending_event that controls the event delivery. |
60 | * @action: A struct vmw_fence_action to hook up to a fence. | |
61 | * @fence: A referenced pointer to the fence to keep it alive while @action | |
62 | * hangs on it. | |
63 | * @dev: Pointer to a struct drm_device so we can access the event stuff. | |
64 | * @kref: Both @e and @action has destructors, so we need to refcount. | |
65 | * @size: Size accounted for this object. | |
66 | * @tv_sec: If non-null, the variable pointed to will be assigned | |
67 | * current time tv_sec val when the fence signals. | |
68 | * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will | |
69 | * be assigned the current time tv_usec val when the fence signals. | |
70 | */ | |
71 | struct vmw_event_fence_action { | |
72 | struct drm_pending_event e; | |
73 | struct vmw_fence_action action; | |
74 | struct vmw_fence_obj *fence; | |
75 | struct drm_device *dev; | |
76 | struct kref kref; | |
77 | uint32_t size; | |
78 | uint32_t *tv_sec; | |
79 | uint32_t *tv_usec; | |
80 | }; | |
81 | ||
82 | /** | |
83 | * Note on fencing subsystem usage of irqs: | |
84 | * Typically the vmw_fences_update function is called | |
85 | * | |
86 | * a) When a new fence seqno has been submitted by the fifo code. | |
87 | * b) On-demand when we have waiters. Sleeping waiters will switch on the | |
88 | * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE | |
89 | * irq is received. When the last fence waiter is gone, that IRQ is masked | |
90 | * away. | |
91 | * | |
92 | * In situations where there are no waiters and we don't submit any new fences, | |
93 | * fence objects may not be signaled. This is perfectly OK, since there are | |
94 | * no consumers of the signaled data, but that is NOT ok when there are fence | |
95 | * actions attached to a fence. The fencing subsystem then makes use of the | |
96 | * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence | |
97 | * which has an action attached, and each time vmw_fences_update is called, | |
98 | * the subsystem makes sure the fence goal seqno is updated. | |
99 | * | |
100 | * The fence goal seqno irq is on as long as there are unsignaled fence | |
101 | * objects with actions attached to them. | |
ae2a1040 TH |
102 | */ |
103 | ||
104 | static void vmw_fence_obj_destroy_locked(struct kref *kref) | |
105 | { | |
106 | struct vmw_fence_obj *fence = | |
107 | container_of(kref, struct vmw_fence_obj, kref); | |
108 | ||
109 | struct vmw_fence_manager *fman = fence->fman; | |
110 | unsigned int num_fences; | |
111 | ||
112 | list_del_init(&fence->head); | |
113 | num_fences = --fman->num_fence_objects; | |
114 | spin_unlock_irq(&fman->lock); | |
115 | if (fence->destroy) | |
116 | fence->destroy(fence); | |
117 | else | |
118 | kfree(fence); | |
119 | ||
120 | spin_lock_irq(&fman->lock); | |
121 | } | |
122 | ||
123 | ||
124 | /** | |
125 | * Execute signal actions on fences recently signaled. | |
126 | * This is done from a workqueue so we don't have to execute | |
127 | * signal actions from atomic context. | |
128 | */ | |
129 | ||
130 | static void vmw_fence_work_func(struct work_struct *work) | |
131 | { | |
132 | struct vmw_fence_manager *fman = | |
133 | container_of(work, struct vmw_fence_manager, work); | |
134 | struct list_head list; | |
135 | struct vmw_fence_action *action, *next_action; | |
57c5ee79 | 136 | bool seqno_valid; |
ae2a1040 TH |
137 | |
138 | do { | |
139 | INIT_LIST_HEAD(&list); | |
57c5ee79 TH |
140 | mutex_lock(&fman->goal_irq_mutex); |
141 | ||
ae2a1040 TH |
142 | spin_lock_irq(&fman->lock); |
143 | list_splice_init(&fman->cleanup_list, &list); | |
57c5ee79 | 144 | seqno_valid = fman->seqno_valid; |
ae2a1040 TH |
145 | spin_unlock_irq(&fman->lock); |
146 | ||
57c5ee79 TH |
147 | if (!seqno_valid && fman->goal_irq_on) { |
148 | fman->goal_irq_on = false; | |
149 | vmw_goal_waiter_remove(fman->dev_priv); | |
150 | } | |
151 | mutex_unlock(&fman->goal_irq_mutex); | |
152 | ||
ae2a1040 TH |
153 | if (list_empty(&list)) |
154 | return; | |
155 | ||
156 | /* | |
157 | * At this point, only we should be able to manipulate the | |
158 | * list heads of the actions we have on the private list. | |
57c5ee79 | 159 | * hence fman::lock not held. |
ae2a1040 TH |
160 | */ |
161 | ||
162 | list_for_each_entry_safe(action, next_action, &list, head) { | |
163 | list_del_init(&action->head); | |
57c5ee79 TH |
164 | if (action->cleanup) |
165 | action->cleanup(action); | |
ae2a1040 TH |
166 | } |
167 | } while (1); | |
168 | } | |
169 | ||
170 | struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) | |
171 | { | |
172 | struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL); | |
173 | ||
174 | if (unlikely(fman == NULL)) | |
175 | return NULL; | |
176 | ||
177 | fman->dev_priv = dev_priv; | |
178 | spin_lock_init(&fman->lock); | |
179 | INIT_LIST_HEAD(&fman->fence_list); | |
180 | INIT_LIST_HEAD(&fman->cleanup_list); | |
181 | INIT_WORK(&fman->work, &vmw_fence_work_func); | |
182 | fman->fifo_down = true; | |
183 | fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); | |
184 | fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); | |
57c5ee79 TH |
185 | fman->event_fence_action_size = |
186 | ttm_round_pot(sizeof(struct vmw_event_fence_action)); | |
187 | mutex_init(&fman->goal_irq_mutex); | |
ae2a1040 TH |
188 | |
189 | return fman; | |
190 | } | |
191 | ||
192 | void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) | |
193 | { | |
194 | unsigned long irq_flags; | |
195 | bool lists_empty; | |
196 | ||
197 | (void) cancel_work_sync(&fman->work); | |
198 | ||
199 | spin_lock_irqsave(&fman->lock, irq_flags); | |
200 | lists_empty = list_empty(&fman->fence_list) && | |
201 | list_empty(&fman->cleanup_list); | |
202 | spin_unlock_irqrestore(&fman->lock, irq_flags); | |
203 | ||
204 | BUG_ON(!lists_empty); | |
205 | kfree(fman); | |
206 | } | |
207 | ||
208 | static int vmw_fence_obj_init(struct vmw_fence_manager *fman, | |
209 | struct vmw_fence_obj *fence, | |
210 | u32 seqno, | |
211 | uint32_t mask, | |
212 | void (*destroy) (struct vmw_fence_obj *fence)) | |
213 | { | |
214 | unsigned long irq_flags; | |
215 | unsigned int num_fences; | |
216 | int ret = 0; | |
217 | ||
218 | fence->seqno = seqno; | |
219 | INIT_LIST_HEAD(&fence->seq_passed_actions); | |
220 | fence->fman = fman; | |
221 | fence->signaled = 0; | |
222 | fence->signal_mask = mask; | |
223 | kref_init(&fence->kref); | |
224 | fence->destroy = destroy; | |
225 | init_waitqueue_head(&fence->queue); | |
226 | ||
227 | spin_lock_irqsave(&fman->lock, irq_flags); | |
228 | if (unlikely(fman->fifo_down)) { | |
229 | ret = -EBUSY; | |
230 | goto out_unlock; | |
231 | } | |
232 | list_add_tail(&fence->head, &fman->fence_list); | |
233 | num_fences = ++fman->num_fence_objects; | |
234 | ||
235 | out_unlock: | |
236 | spin_unlock_irqrestore(&fman->lock, irq_flags); | |
237 | return ret; | |
238 | ||
239 | } | |
240 | ||
241 | struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence) | |
242 | { | |
e93daed8 TH |
243 | if (unlikely(fence == NULL)) |
244 | return NULL; | |
245 | ||
ae2a1040 TH |
246 | kref_get(&fence->kref); |
247 | return fence; | |
248 | } | |
249 | ||
250 | /** | |
251 | * vmw_fence_obj_unreference | |
252 | * | |
253 | * Note that this function may not be entered with disabled irqs since | |
254 | * it may re-enable them in the destroy function. | |
255 | * | |
256 | */ | |
257 | void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p) | |
258 | { | |
259 | struct vmw_fence_obj *fence = *fence_p; | |
e93daed8 TH |
260 | struct vmw_fence_manager *fman; |
261 | ||
262 | if (unlikely(fence == NULL)) | |
263 | return; | |
ae2a1040 | 264 | |
e93daed8 | 265 | fman = fence->fman; |
ae2a1040 TH |
266 | *fence_p = NULL; |
267 | spin_lock_irq(&fman->lock); | |
268 | BUG_ON(atomic_read(&fence->kref.refcount) == 0); | |
269 | kref_put(&fence->kref, vmw_fence_obj_destroy_locked); | |
270 | spin_unlock_irq(&fman->lock); | |
271 | } | |
272 | ||
273 | void vmw_fences_perform_actions(struct vmw_fence_manager *fman, | |
274 | struct list_head *list) | |
275 | { | |
276 | struct vmw_fence_action *action, *next_action; | |
277 | ||
278 | list_for_each_entry_safe(action, next_action, list, head) { | |
279 | list_del_init(&action->head); | |
57c5ee79 | 280 | fman->pending_actions[action->type]--; |
ae2a1040 TH |
281 | if (action->seq_passed != NULL) |
282 | action->seq_passed(action); | |
283 | ||
284 | /* | |
285 | * Add the cleanup action to the cleanup list so that | |
286 | * it will be performed by a worker task. | |
287 | */ | |
288 | ||
57c5ee79 TH |
289 | list_add_tail(&action->head, &fman->cleanup_list); |
290 | } | |
291 | } | |
292 | ||
293 | /** | |
294 | * vmw_fence_goal_new_locked - Figure out a new device fence goal | |
295 | * seqno if needed. | |
296 | * | |
297 | * @fman: Pointer to a fence manager. | |
298 | * @passed_seqno: The seqno the device currently signals as passed. | |
299 | * | |
300 | * This function should be called with the fence manager lock held. | |
301 | * It is typically called when we have a new passed_seqno, and | |
302 | * we might need to update the fence goal. It checks to see whether | |
303 | * the current fence goal has already passed, and, in that case, | |
304 | * scans through all unsignaled fences to get the next fence object with an | |
305 | * action attached, and sets the seqno of that fence as a new fence goal. | |
306 | * | |
307 | * returns true if the device goal seqno was updated. False otherwise. | |
308 | */ | |
309 | static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman, | |
310 | u32 passed_seqno) | |
311 | { | |
312 | u32 goal_seqno; | |
313 | __le32 __iomem *fifo_mem; | |
314 | struct vmw_fence_obj *fence; | |
315 | ||
316 | if (likely(!fman->seqno_valid)) | |
317 | return false; | |
318 | ||
319 | fifo_mem = fman->dev_priv->mmio_virt; | |
320 | goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL); | |
321 | if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP)) | |
322 | return false; | |
323 | ||
324 | fman->seqno_valid = false; | |
325 | list_for_each_entry(fence, &fman->fence_list, head) { | |
326 | if (!list_empty(&fence->seq_passed_actions)) { | |
327 | fman->seqno_valid = true; | |
328 | iowrite32(fence->seqno, | |
329 | fifo_mem + SVGA_FIFO_FENCE_GOAL); | |
330 | break; | |
331 | } | |
ae2a1040 | 332 | } |
57c5ee79 TH |
333 | |
334 | return true; | |
335 | } | |
336 | ||
337 | ||
338 | /** | |
339 | * vmw_fence_goal_check_locked - Replace the device fence goal seqno if | |
340 | * needed. | |
341 | * | |
342 | * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be | |
343 | * considered as a device fence goal. | |
344 | * | |
345 | * This function should be called with the fence manager lock held. | |
346 | * It is typically called when an action has been attached to a fence to | |
347 | * check whether the seqno of that fence should be used for a fence | |
348 | * goal interrupt. This is typically needed if the current fence goal is | |
349 | * invalid, or has a higher seqno than that of the current fence object. | |
350 | * | |
351 | * returns true if the device goal seqno was updated. False otherwise. | |
352 | */ | |
353 | static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) | |
354 | { | |
355 | u32 goal_seqno; | |
356 | __le32 __iomem *fifo_mem; | |
357 | ||
358 | if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) | |
359 | return false; | |
360 | ||
361 | fifo_mem = fence->fman->dev_priv->mmio_virt; | |
362 | goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL); | |
363 | if (likely(fence->fman->seqno_valid && | |
364 | goal_seqno - fence->seqno < VMW_FENCE_WRAP)) | |
365 | return false; | |
366 | ||
367 | iowrite32(fence->seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL); | |
368 | fence->fman->seqno_valid = true; | |
369 | ||
370 | return true; | |
ae2a1040 TH |
371 | } |
372 | ||
57c5ee79 | 373 | void vmw_fences_update(struct vmw_fence_manager *fman) |
ae2a1040 TH |
374 | { |
375 | unsigned long flags; | |
376 | struct vmw_fence_obj *fence, *next_fence; | |
377 | struct list_head action_list; | |
57c5ee79 TH |
378 | bool needs_rerun; |
379 | uint32_t seqno, new_seqno; | |
380 | __le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt; | |
ae2a1040 | 381 | |
57c5ee79 TH |
382 | seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); |
383 | rerun: | |
ae2a1040 TH |
384 | spin_lock_irqsave(&fman->lock, flags); |
385 | list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { | |
386 | if (seqno - fence->seqno < VMW_FENCE_WRAP) { | |
387 | list_del_init(&fence->head); | |
388 | fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC; | |
389 | INIT_LIST_HEAD(&action_list); | |
390 | list_splice_init(&fence->seq_passed_actions, | |
391 | &action_list); | |
392 | vmw_fences_perform_actions(fman, &action_list); | |
393 | wake_up_all(&fence->queue); | |
57c5ee79 TH |
394 | } else |
395 | break; | |
ae2a1040 | 396 | } |
57c5ee79 TH |
397 | |
398 | needs_rerun = vmw_fence_goal_new_locked(fman, seqno); | |
399 | ||
ae2a1040 TH |
400 | if (!list_empty(&fman->cleanup_list)) |
401 | (void) schedule_work(&fman->work); | |
402 | spin_unlock_irqrestore(&fman->lock, flags); | |
ae2a1040 | 403 | |
57c5ee79 TH |
404 | /* |
405 | * Rerun if the fence goal seqno was updated, and the | |
406 | * hardware might have raced with that update, so that | |
407 | * we missed a fence_goal irq. | |
408 | */ | |
409 | ||
410 | if (unlikely(needs_rerun)) { | |
411 | new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); | |
412 | if (new_seqno != seqno) { | |
413 | seqno = new_seqno; | |
414 | goto rerun; | |
415 | } | |
416 | } | |
417 | } | |
ae2a1040 TH |
418 | |
419 | bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence, | |
420 | uint32_t flags) | |
421 | { | |
422 | struct vmw_fence_manager *fman = fence->fman; | |
423 | unsigned long irq_flags; | |
424 | uint32_t signaled; | |
425 | ||
426 | spin_lock_irqsave(&fman->lock, irq_flags); | |
427 | signaled = fence->signaled; | |
428 | spin_unlock_irqrestore(&fman->lock, irq_flags); | |
429 | ||
430 | flags &= fence->signal_mask; | |
431 | if ((signaled & flags) == flags) | |
432 | return 1; | |
433 | ||
57c5ee79 TH |
434 | if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0) |
435 | vmw_fences_update(fman); | |
ae2a1040 TH |
436 | |
437 | spin_lock_irqsave(&fman->lock, irq_flags); | |
438 | signaled = fence->signaled; | |
439 | spin_unlock_irqrestore(&fman->lock, irq_flags); | |
440 | ||
441 | return ((signaled & flags) == flags); | |
442 | } | |
443 | ||
444 | int vmw_fence_obj_wait(struct vmw_fence_obj *fence, | |
445 | uint32_t flags, bool lazy, | |
446 | bool interruptible, unsigned long timeout) | |
447 | { | |
448 | struct vmw_private *dev_priv = fence->fman->dev_priv; | |
449 | long ret; | |
450 | ||
451 | if (likely(vmw_fence_obj_signaled(fence, flags))) | |
452 | return 0; | |
453 | ||
454 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); | |
455 | vmw_seqno_waiter_add(dev_priv); | |
456 | ||
457 | if (interruptible) | |
458 | ret = wait_event_interruptible_timeout | |
459 | (fence->queue, | |
460 | vmw_fence_obj_signaled(fence, flags), | |
461 | timeout); | |
462 | else | |
463 | ret = wait_event_timeout | |
464 | (fence->queue, | |
465 | vmw_fence_obj_signaled(fence, flags), | |
466 | timeout); | |
467 | ||
468 | vmw_seqno_waiter_remove(dev_priv); | |
469 | ||
470 | if (unlikely(ret == 0)) | |
471 | ret = -EBUSY; | |
472 | else if (likely(ret > 0)) | |
473 | ret = 0; | |
474 | ||
475 | return ret; | |
476 | } | |
477 | ||
478 | void vmw_fence_obj_flush(struct vmw_fence_obj *fence) | |
479 | { | |
480 | struct vmw_private *dev_priv = fence->fman->dev_priv; | |
481 | ||
482 | vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); | |
483 | } | |
484 | ||
485 | static void vmw_fence_destroy(struct vmw_fence_obj *fence) | |
486 | { | |
487 | struct vmw_fence_manager *fman = fence->fman; | |
488 | ||
489 | kfree(fence); | |
490 | /* | |
491 | * Free kernel space accounting. | |
492 | */ | |
493 | ttm_mem_global_free(vmw_mem_glob(fman->dev_priv), | |
494 | fman->fence_size); | |
495 | } | |
496 | ||
497 | int vmw_fence_create(struct vmw_fence_manager *fman, | |
498 | uint32_t seqno, | |
499 | uint32_t mask, | |
500 | struct vmw_fence_obj **p_fence) | |
501 | { | |
502 | struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv); | |
503 | struct vmw_fence_obj *fence; | |
504 | int ret; | |
505 | ||
506 | ret = ttm_mem_global_alloc(mem_glob, fman->fence_size, | |
507 | false, false); | |
508 | if (unlikely(ret != 0)) | |
509 | return ret; | |
510 | ||
511 | fence = kzalloc(sizeof(*fence), GFP_KERNEL); | |
512 | if (unlikely(fence == NULL)) { | |
513 | ret = -ENOMEM; | |
514 | goto out_no_object; | |
515 | } | |
516 | ||
517 | ret = vmw_fence_obj_init(fman, fence, seqno, mask, | |
518 | vmw_fence_destroy); | |
519 | if (unlikely(ret != 0)) | |
520 | goto out_err_init; | |
521 | ||
522 | *p_fence = fence; | |
523 | return 0; | |
524 | ||
525 | out_err_init: | |
526 | kfree(fence); | |
527 | out_no_object: | |
528 | ttm_mem_global_free(mem_glob, fman->fence_size); | |
529 | return ret; | |
530 | } | |
531 | ||
532 | ||
533 | static void vmw_user_fence_destroy(struct vmw_fence_obj *fence) | |
534 | { | |
535 | struct vmw_user_fence *ufence = | |
536 | container_of(fence, struct vmw_user_fence, fence); | |
537 | struct vmw_fence_manager *fman = fence->fman; | |
538 | ||
539 | kfree(ufence); | |
540 | /* | |
541 | * Free kernel space accounting. | |
542 | */ | |
543 | ttm_mem_global_free(vmw_mem_glob(fman->dev_priv), | |
544 | fman->user_fence_size); | |
545 | } | |
546 | ||
547 | static void vmw_user_fence_base_release(struct ttm_base_object **p_base) | |
548 | { | |
549 | struct ttm_base_object *base = *p_base; | |
550 | struct vmw_user_fence *ufence = | |
551 | container_of(base, struct vmw_user_fence, base); | |
552 | struct vmw_fence_obj *fence = &ufence->fence; | |
553 | ||
554 | *p_base = NULL; | |
555 | vmw_fence_obj_unreference(&fence); | |
556 | } | |
557 | ||
558 | int vmw_user_fence_create(struct drm_file *file_priv, | |
559 | struct vmw_fence_manager *fman, | |
560 | uint32_t seqno, | |
561 | uint32_t mask, | |
562 | struct vmw_fence_obj **p_fence, | |
563 | uint32_t *p_handle) | |
564 | { | |
565 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | |
566 | struct vmw_user_fence *ufence; | |
567 | struct vmw_fence_obj *tmp; | |
568 | struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv); | |
569 | int ret; | |
570 | ||
571 | /* | |
572 | * Kernel memory space accounting, since this object may | |
573 | * be created by a user-space request. | |
574 | */ | |
575 | ||
576 | ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size, | |
577 | false, false); | |
578 | if (unlikely(ret != 0)) | |
579 | return ret; | |
580 | ||
581 | ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); | |
582 | if (unlikely(ufence == NULL)) { | |
583 | ret = -ENOMEM; | |
584 | goto out_no_object; | |
585 | } | |
586 | ||
587 | ret = vmw_fence_obj_init(fman, &ufence->fence, seqno, | |
588 | mask, vmw_user_fence_destroy); | |
589 | if (unlikely(ret != 0)) { | |
590 | kfree(ufence); | |
591 | goto out_no_object; | |
592 | } | |
593 | ||
594 | /* | |
595 | * The base object holds a reference which is freed in | |
596 | * vmw_user_fence_base_release. | |
597 | */ | |
598 | tmp = vmw_fence_obj_reference(&ufence->fence); | |
599 | ret = ttm_base_object_init(tfile, &ufence->base, false, | |
600 | VMW_RES_FENCE, | |
601 | &vmw_user_fence_base_release, NULL); | |
602 | ||
603 | ||
604 | if (unlikely(ret != 0)) { | |
605 | /* | |
606 | * Free the base object's reference | |
607 | */ | |
608 | vmw_fence_obj_unreference(&tmp); | |
609 | goto out_err; | |
610 | } | |
611 | ||
612 | *p_fence = &ufence->fence; | |
613 | *p_handle = ufence->base.hash.key; | |
614 | ||
615 | return 0; | |
616 | out_err: | |
617 | tmp = &ufence->fence; | |
618 | vmw_fence_obj_unreference(&tmp); | |
619 | out_no_object: | |
620 | ttm_mem_global_free(mem_glob, fman->user_fence_size); | |
621 | return ret; | |
622 | } | |
623 | ||
624 | ||
625 | /** | |
626 | * vmw_fence_fifo_down - signal all unsignaled fence objects. | |
627 | */ | |
628 | ||
629 | void vmw_fence_fifo_down(struct vmw_fence_manager *fman) | |
630 | { | |
631 | unsigned long irq_flags; | |
632 | struct list_head action_list; | |
633 | int ret; | |
634 | ||
635 | /* | |
636 | * The list may be altered while we traverse it, so always | |
637 | * restart when we've released the fman->lock. | |
638 | */ | |
639 | ||
640 | spin_lock_irqsave(&fman->lock, irq_flags); | |
641 | fman->fifo_down = true; | |
642 | while (!list_empty(&fman->fence_list)) { | |
643 | struct vmw_fence_obj *fence = | |
644 | list_entry(fman->fence_list.prev, struct vmw_fence_obj, | |
645 | head); | |
646 | kref_get(&fence->kref); | |
647 | spin_unlock_irq(&fman->lock); | |
648 | ||
649 | ret = vmw_fence_obj_wait(fence, fence->signal_mask, | |
650 | false, false, | |
651 | VMW_FENCE_WAIT_TIMEOUT); | |
652 | ||
653 | if (unlikely(ret != 0)) { | |
654 | list_del_init(&fence->head); | |
655 | fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC; | |
656 | INIT_LIST_HEAD(&action_list); | |
657 | list_splice_init(&fence->seq_passed_actions, | |
658 | &action_list); | |
659 | vmw_fences_perform_actions(fman, &action_list); | |
660 | wake_up_all(&fence->queue); | |
661 | } | |
662 | ||
663 | spin_lock_irq(&fman->lock); | |
664 | ||
665 | BUG_ON(!list_empty(&fence->head)); | |
666 | kref_put(&fence->kref, vmw_fence_obj_destroy_locked); | |
667 | } | |
668 | spin_unlock_irqrestore(&fman->lock, irq_flags); | |
669 | } | |
670 | ||
671 | void vmw_fence_fifo_up(struct vmw_fence_manager *fman) | |
672 | { | |
673 | unsigned long irq_flags; | |
674 | ||
675 | spin_lock_irqsave(&fman->lock, irq_flags); | |
676 | fman->fifo_down = false; | |
677 | spin_unlock_irqrestore(&fman->lock, irq_flags); | |
678 | } | |
679 | ||
680 | ||
681 | int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, | |
682 | struct drm_file *file_priv) | |
683 | { | |
684 | struct drm_vmw_fence_wait_arg *arg = | |
685 | (struct drm_vmw_fence_wait_arg *)data; | |
686 | unsigned long timeout; | |
687 | struct ttm_base_object *base; | |
688 | struct vmw_fence_obj *fence; | |
689 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | |
690 | int ret; | |
691 | uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ); | |
692 | ||
693 | /* | |
694 | * 64-bit division not present on 32-bit systems, so do an | |
695 | * approximation. (Divide by 1000000). | |
696 | */ | |
697 | ||
698 | wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) - | |
699 | (wait_timeout >> 26); | |
700 | ||
701 | if (!arg->cookie_valid) { | |
702 | arg->cookie_valid = 1; | |
703 | arg->kernel_cookie = jiffies + wait_timeout; | |
704 | } | |
705 | ||
706 | base = ttm_base_object_lookup(tfile, arg->handle); | |
707 | if (unlikely(base == NULL)) { | |
708 | printk(KERN_ERR "Wait invalid fence object handle " | |
709 | "0x%08lx.\n", | |
710 | (unsigned long)arg->handle); | |
711 | return -EINVAL; | |
712 | } | |
713 | ||
714 | fence = &(container_of(base, struct vmw_user_fence, base)->fence); | |
715 | ||
716 | timeout = jiffies; | |
717 | if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) { | |
718 | ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ? | |
719 | 0 : -EBUSY); | |
720 | goto out; | |
721 | } | |
722 | ||
723 | timeout = (unsigned long)arg->kernel_cookie - timeout; | |
724 | ||
725 | ret = vmw_fence_obj_wait(fence, arg->flags, arg->lazy, true, timeout); | |
726 | ||
727 | out: | |
728 | ttm_base_object_unref(&base); | |
729 | ||
730 | /* | |
731 | * Optionally unref the fence object. | |
732 | */ | |
733 | ||
734 | if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF)) | |
735 | return ttm_ref_object_base_unref(tfile, arg->handle, | |
736 | TTM_REF_USAGE); | |
737 | return ret; | |
738 | } | |
739 | ||
740 | int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, | |
741 | struct drm_file *file_priv) | |
742 | { | |
743 | struct drm_vmw_fence_signaled_arg *arg = | |
744 | (struct drm_vmw_fence_signaled_arg *) data; | |
745 | struct ttm_base_object *base; | |
746 | struct vmw_fence_obj *fence; | |
747 | struct vmw_fence_manager *fman; | |
748 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | |
749 | struct vmw_private *dev_priv = vmw_priv(dev); | |
750 | ||
751 | base = ttm_base_object_lookup(tfile, arg->handle); | |
752 | if (unlikely(base == NULL)) { | |
753 | printk(KERN_ERR "Fence signaled invalid fence object handle " | |
754 | "0x%08lx.\n", | |
755 | (unsigned long)arg->handle); | |
756 | return -EINVAL; | |
757 | } | |
758 | ||
759 | fence = &(container_of(base, struct vmw_user_fence, base)->fence); | |
760 | fman = fence->fman; | |
761 | ||
762 | arg->signaled = vmw_fence_obj_signaled(fence, arg->flags); | |
763 | spin_lock_irq(&fman->lock); | |
764 | ||
765 | arg->signaled_flags = fence->signaled; | |
766 | arg->passed_seqno = dev_priv->last_read_seqno; | |
767 | spin_unlock_irq(&fman->lock); | |
768 | ||
769 | ttm_base_object_unref(&base); | |
770 | ||
771 | return 0; | |
772 | } | |
773 | ||
774 | ||
775 | int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, | |
776 | struct drm_file *file_priv) | |
777 | { | |
778 | struct drm_vmw_fence_arg *arg = | |
779 | (struct drm_vmw_fence_arg *) data; | |
780 | ||
781 | return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, | |
782 | arg->handle, | |
783 | TTM_REF_USAGE); | |
784 | } | |
57c5ee79 TH |
785 | |
786 | /** | |
787 | * vmw_event_fence_action_destroy | |
788 | * | |
789 | * @kref: The struct kref embedded in a struct vmw_event_fence_action. | |
790 | * | |
791 | * The vmw_event_fence_action destructor that may be called either after | |
792 | * the fence action cleanup, or when the event is delivered. | |
793 | * It frees both the vmw_event_fence_action struct and the actual | |
794 | * event structure copied to user-space. | |
795 | */ | |
796 | static void vmw_event_fence_action_destroy(struct kref *kref) | |
797 | { | |
798 | struct vmw_event_fence_action *eaction = | |
799 | container_of(kref, struct vmw_event_fence_action, kref); | |
800 | struct ttm_mem_global *mem_glob = | |
801 | vmw_mem_glob(vmw_priv(eaction->dev)); | |
802 | uint32_t size = eaction->size; | |
803 | ||
804 | kfree(eaction->e.event); | |
805 | kfree(eaction); | |
806 | ttm_mem_global_free(mem_glob, size); | |
807 | } | |
808 | ||
809 | ||
810 | /** | |
811 | * vmw_event_fence_action_delivered | |
812 | * | |
813 | * @e: The struct drm_pending_event embedded in a struct | |
814 | * vmw_event_fence_action. | |
815 | * | |
816 | * The struct drm_pending_event destructor that is called by drm | |
817 | * once the event is delivered. Since we don't know whether this function | |
818 | * will be called before or after the fence action destructor, we | |
819 | * free a refcount and destroy if it becomes zero. | |
820 | */ | |
821 | static void vmw_event_fence_action_delivered(struct drm_pending_event *e) | |
822 | { | |
823 | struct vmw_event_fence_action *eaction = | |
824 | container_of(e, struct vmw_event_fence_action, e); | |
825 | ||
826 | kref_put(&eaction->kref, vmw_event_fence_action_destroy); | |
827 | } | |
828 | ||
829 | ||
830 | /** | |
831 | * vmw_event_fence_action_seq_passed | |
832 | * | |
833 | * @action: The struct vmw_fence_action embedded in a struct | |
834 | * vmw_event_fence_action. | |
835 | * | |
836 | * This function is called when the seqno of the fence where @action is | |
837 | * attached has passed. It queues the event on the submitter's event list. | |
838 | * This function is always called from atomic context, and may be called | |
839 | * from irq context. It ups a refcount reflecting that we now have two | |
840 | * destructors. | |
841 | */ | |
842 | static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) | |
843 | { | |
844 | struct vmw_event_fence_action *eaction = | |
845 | container_of(action, struct vmw_event_fence_action, action); | |
846 | struct drm_device *dev = eaction->dev; | |
847 | struct drm_file *file_priv = eaction->e.file_priv; | |
848 | unsigned long irq_flags; | |
849 | ||
850 | kref_get(&eaction->kref); | |
851 | spin_lock_irqsave(&dev->event_lock, irq_flags); | |
852 | ||
853 | if (likely(eaction->tv_sec != NULL)) { | |
854 | struct timeval tv; | |
855 | ||
856 | do_gettimeofday(&tv); | |
857 | *eaction->tv_sec = tv.tv_sec; | |
858 | *eaction->tv_usec = tv.tv_usec; | |
859 | } | |
860 | ||
861 | list_add_tail(&eaction->e.link, &file_priv->event_list); | |
862 | wake_up_all(&file_priv->event_wait); | |
863 | spin_unlock_irqrestore(&dev->event_lock, irq_flags); | |
864 | } | |
865 | ||
866 | /** | |
867 | * vmw_event_fence_action_cleanup | |
868 | * | |
869 | * @action: The struct vmw_fence_action embedded in a struct | |
870 | * vmw_event_fence_action. | |
871 | * | |
872 | * This function is the struct vmw_fence_action destructor. It's typically | |
873 | * called from a workqueue. | |
874 | */ | |
875 | static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action) | |
876 | { | |
877 | struct vmw_event_fence_action *eaction = | |
878 | container_of(action, struct vmw_event_fence_action, action); | |
879 | ||
880 | vmw_fence_obj_unreference(&eaction->fence); | |
881 | kref_put(&eaction->kref, vmw_event_fence_action_destroy); | |
882 | } | |
883 | ||
884 | ||
885 | /** | |
886 | * vmw_fence_obj_add_action - Add an action to a fence object. | |
887 | * | |
888 | * @fence - The fence object. | |
889 | * @action - The action to add. | |
890 | * | |
891 | * Note that the action callbacks may be executed before this function | |
892 | * returns. | |
893 | */ | |
894 | void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, | |
895 | struct vmw_fence_action *action) | |
896 | { | |
897 | struct vmw_fence_manager *fman = fence->fman; | |
898 | unsigned long irq_flags; | |
899 | bool run_update = false; | |
900 | ||
901 | mutex_lock(&fman->goal_irq_mutex); | |
902 | spin_lock_irqsave(&fman->lock, irq_flags); | |
903 | ||
904 | fman->pending_actions[action->type]++; | |
905 | if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) { | |
906 | struct list_head action_list; | |
907 | ||
908 | INIT_LIST_HEAD(&action_list); | |
909 | list_add_tail(&action->head, &action_list); | |
910 | vmw_fences_perform_actions(fman, &action_list); | |
911 | } else { | |
912 | list_add_tail(&action->head, &fence->seq_passed_actions); | |
913 | ||
914 | /* | |
915 | * This function may set fman::seqno_valid, so it must | |
916 | * be run with the goal_irq_mutex held. | |
917 | */ | |
918 | run_update = vmw_fence_goal_check_locked(fence); | |
919 | } | |
920 | ||
921 | spin_unlock_irqrestore(&fman->lock, irq_flags); | |
922 | ||
923 | if (run_update) { | |
924 | if (!fman->goal_irq_on) { | |
925 | fman->goal_irq_on = true; | |
926 | vmw_goal_waiter_add(fman->dev_priv); | |
927 | } | |
928 | vmw_fences_update(fman); | |
929 | } | |
930 | mutex_unlock(&fman->goal_irq_mutex); | |
931 | ||
932 | } | |
933 | ||
934 | /** | |
935 | * vmw_event_fence_action_create - Post an event for sending when a fence | |
936 | * object seqno has passed. | |
937 | * | |
938 | * @file_priv: The file connection on which the event should be posted. | |
939 | * @fence: The fence object on which to post the event. | |
940 | * @event: Event to be posted. This event should've been alloced | |
941 | * using k[mz]alloc, and should've been completely initialized. | |
942 | * @interruptible: Interruptible waits if possible. | |
943 | * | |
944 | * As a side effect, the object pointed to by @event may have been | |
945 | * freed when this function returns. If this function returns with | |
946 | * an error code, the caller needs to free that object. | |
947 | */ | |
948 | ||
949 | int vmw_event_fence_action_create(struct drm_file *file_priv, | |
950 | struct vmw_fence_obj *fence, | |
951 | struct drm_event *event, | |
952 | uint32_t *tv_sec, | |
953 | uint32_t *tv_usec, | |
954 | bool interruptible) | |
955 | { | |
956 | struct vmw_event_fence_action *eaction = | |
957 | kzalloc(sizeof(*eaction), GFP_KERNEL); | |
958 | struct ttm_mem_global *mem_glob = | |
959 | vmw_mem_glob(fence->fman->dev_priv); | |
960 | struct vmw_fence_manager *fman = fence->fman; | |
961 | uint32_t size = fman->event_fence_action_size + | |
962 | ttm_round_pot(event->length); | |
963 | int ret; | |
964 | ||
965 | /* | |
966 | * Account for internal structure size as well as the | |
967 | * event size itself. | |
968 | */ | |
969 | ||
970 | ret = ttm_mem_global_alloc(mem_glob, size, false, interruptible); | |
971 | if (unlikely(ret != 0)) | |
972 | return ret; | |
973 | ||
974 | eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); | |
975 | if (unlikely(eaction == NULL)) { | |
976 | ttm_mem_global_free(mem_glob, size); | |
977 | return -ENOMEM; | |
978 | } | |
979 | ||
980 | eaction->e.event = event; | |
981 | eaction->e.file_priv = file_priv; | |
982 | eaction->e.destroy = vmw_event_fence_action_delivered; | |
983 | ||
984 | eaction->action.seq_passed = vmw_event_fence_action_seq_passed; | |
985 | eaction->action.cleanup = vmw_event_fence_action_cleanup; | |
986 | eaction->action.type = VMW_ACTION_EVENT; | |
987 | ||
988 | eaction->fence = vmw_fence_obj_reference(fence); | |
989 | eaction->dev = fman->dev_priv->dev; | |
990 | eaction->size = size; | |
991 | eaction->tv_sec = tv_sec; | |
992 | eaction->tv_usec = tv_usec; | |
993 | ||
994 | kref_init(&eaction->kref); | |
995 | vmw_fence_obj_add_action(fence, &eaction->action); | |
996 | ||
997 | return 0; | |
998 | } | |
999 | ||
1000 | int vmw_fence_event_ioctl(struct drm_device *dev, void *data, | |
1001 | struct drm_file *file_priv) | |
1002 | { | |
1003 | struct vmw_private *dev_priv = vmw_priv(dev); | |
1004 | struct drm_vmw_fence_event_arg *arg = | |
1005 | (struct drm_vmw_fence_event_arg *) data; | |
1006 | struct vmw_fence_obj *fence = NULL; | |
1007 | struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); | |
1008 | struct drm_vmw_fence_rep __user *user_fence_rep = | |
1009 | (struct drm_vmw_fence_rep __user *)(unsigned long) | |
1010 | arg->fence_rep; | |
1011 | uint32_t handle; | |
1012 | unsigned long irq_flags; | |
1013 | struct drm_vmw_event_fence *event; | |
1014 | int ret; | |
1015 | ||
1016 | /* | |
1017 | * Look up an existing fence object, | |
1018 | * and if user-space wants a new reference, | |
1019 | * add one. | |
1020 | */ | |
1021 | if (arg->handle) { | |
1022 | struct ttm_base_object *base = | |
1023 | ttm_base_object_lookup(vmw_fp->tfile, arg->handle); | |
1024 | ||
1025 | if (unlikely(base == NULL)) { | |
1026 | DRM_ERROR("Fence event invalid fence object handle " | |
1027 | "0x%08lx.\n", | |
1028 | (unsigned long)arg->handle); | |
1029 | return -EINVAL; | |
1030 | } | |
1031 | fence = &(container_of(base, struct vmw_user_fence, | |
1032 | base)->fence); | |
1033 | (void) vmw_fence_obj_reference(fence); | |
1034 | ||
1035 | if (user_fence_rep != NULL) { | |
1036 | bool existed; | |
1037 | ||
1038 | ret = ttm_ref_object_add(vmw_fp->tfile, base, | |
1039 | TTM_REF_USAGE, &existed); | |
1040 | if (unlikely(ret != 0)) { | |
1041 | DRM_ERROR("Failed to reference a fence " | |
1042 | "object.\n"); | |
1043 | goto out_no_ref_obj; | |
1044 | } | |
1045 | handle = base->hash.key; | |
1046 | } | |
1047 | ttm_base_object_unref(&base); | |
1048 | } | |
1049 | ||
1050 | /* | |
1051 | * Create a new fence object. | |
1052 | */ | |
1053 | if (!fence) { | |
1054 | ret = vmw_execbuf_fence_commands(file_priv, dev_priv, | |
1055 | &fence, | |
1056 | (user_fence_rep) ? | |
1057 | &handle : NULL); | |
1058 | if (unlikely(ret != 0)) { | |
1059 | DRM_ERROR("Fence event failed to create fence.\n"); | |
1060 | return ret; | |
1061 | } | |
1062 | } | |
1063 | ||
1064 | BUG_ON(fence == NULL); | |
1065 | ||
1066 | spin_lock_irqsave(&dev->event_lock, irq_flags); | |
1067 | ||
1068 | ret = (file_priv->event_space < sizeof(*event)) ? -EBUSY : 0; | |
1069 | if (likely(ret == 0)) | |
1070 | file_priv->event_space -= sizeof(*event); | |
1071 | ||
1072 | spin_unlock_irqrestore(&dev->event_lock, irq_flags); | |
1073 | ||
1074 | if (unlikely(ret != 0)) { | |
1075 | DRM_ERROR("Failed to allocate event space for this file.\n"); | |
1076 | goto out_no_event_space; | |
1077 | } | |
1078 | ||
1079 | event = kzalloc(sizeof(*event), GFP_KERNEL); | |
1080 | if (unlikely(event == NULL)) { | |
1081 | DRM_ERROR("Failed to allocate an event.\n"); | |
1082 | goto out_no_event; | |
1083 | } | |
1084 | ||
1085 | event->base.type = DRM_VMW_EVENT_FENCE_SIGNALED; | |
1086 | event->base.length = sizeof(*event); | |
1087 | event->user_data = arg->user_data; | |
1088 | ||
1089 | if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME) | |
1090 | ret = vmw_event_fence_action_create(file_priv, fence, | |
1091 | &event->base, | |
1092 | &event->tv_sec, | |
1093 | &event->tv_usec, | |
1094 | true); | |
1095 | else | |
1096 | ret = vmw_event_fence_action_create(file_priv, fence, | |
1097 | &event->base, | |
1098 | NULL, | |
1099 | NULL, | |
1100 | true); | |
1101 | ||
1102 | if (unlikely(ret != 0)) { | |
1103 | if (ret != -ERESTARTSYS) | |
1104 | DRM_ERROR("Failed to attach event to fence.\n"); | |
1105 | goto out_no_attach; | |
1106 | } | |
1107 | ||
1108 | vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, | |
1109 | handle); | |
1110 | vmw_fence_obj_unreference(&fence); | |
1111 | return 0; | |
1112 | out_no_attach: | |
1113 | kfree(event); | |
1114 | out_no_event: | |
1115 | spin_lock_irqsave(&dev->event_lock, irq_flags); | |
1116 | file_priv->event_space += sizeof(*event); | |
1117 | spin_unlock_irqrestore(&dev->event_lock, irq_flags); | |
1118 | out_no_event_space: | |
1119 | if (user_fence_rep != NULL) | |
1120 | ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, | |
1121 | handle, TTM_REF_USAGE); | |
1122 | out_no_ref_obj: | |
1123 | vmw_fence_obj_unreference(&fence); | |
1124 | return ret; | |
1125 | } |