drm/nouveau/fence: make ttm interfaces wrap ours, not the other way around
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nouveau_fence.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29
bd35fe5a
MS
30#include <linux/ktime.h>
31#include <linux/hrtimer.h>
32
6ee73861 33#include "nouveau_drv.h"
0c6c1c2f 34#include "nouveau_ramht.h"
20abd163 35#include "nouveau_software.h"
6ee73861
BS
36#include "nouveau_dma.h"
37
2730723b 38#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
cb1d771a 39#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
6ee73861
BS
40
41struct nouveau_fence {
42 struct nouveau_channel *channel;
43 struct kref refcount;
44 struct list_head entry;
45
46 uint32_t sequence;
47 bool signalled;
695b95b8 48 unsigned long timeout;
8ac3891b
FJ
49
50 void (*work)(void *priv, bool signalled);
51 void *priv;
6ee73861
BS
52};
53
0c6c1c2f
FJ
54struct nouveau_semaphore {
55 struct kref ref;
56 struct drm_device *dev;
57 struct drm_mm_node *mem;
58};
59
6ee73861
BS
60static inline struct nouveau_fence *
61nouveau_fence(void *sync_obj)
62{
63 return (struct nouveau_fence *)sync_obj;
64}
65
66static void
67nouveau_fence_del(struct kref *ref)
68{
69 struct nouveau_fence *fence =
70 container_of(ref, struct nouveau_fence, refcount);
71
2a6789ae 72 nouveau_channel_ref(NULL, &fence->channel);
6ee73861
BS
73 kfree(fence);
74}
75
76void
77nouveau_fence_update(struct nouveau_channel *chan)
78{
2730723b
FJ
79 struct drm_device *dev = chan->dev;
80 struct nouveau_fence *tmp, *fence;
6ee73861
BS
81 uint32_t sequence;
82
3ba64623
FJ
83 spin_lock(&chan->fence.lock);
84
937c3471
FJ
85 /* Fetch the last sequence if the channel is still up and running */
86 if (likely(!list_empty(&chan->fence.pending))) {
87 if (USE_REFCNT(dev))
88 sequence = nvchan_rd32(chan, 0x48);
89 else
90 sequence = atomic_read(&chan->fence.last_sequence_irq);
91
92 if (chan->fence.sequence_ack == sequence)
93 goto out;
94 chan->fence.sequence_ack = sequence;
95 }
6ee73861 96
2730723b 97 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
b08abd4e
BS
98 if (fence->sequence > chan->fence.sequence_ack)
99 break;
100
6ee73861
BS
101 fence->signalled = true;
102 list_del(&fence->entry);
b08abd4e 103 if (fence->work)
8ac3891b
FJ
104 fence->work(fence->priv, true);
105
6ee73861 106 kref_put(&fence->refcount, nouveau_fence_del);
6ee73861 107 }
b08abd4e 108
3ba64623 109out:
047d1d3c 110 spin_unlock(&chan->fence.lock);
6ee73861
BS
111}
112
113int
114nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
115 bool emit)
116{
117 struct nouveau_fence *fence;
118 int ret = 0;
119
120 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
121 if (!fence)
122 return -ENOMEM;
123 kref_init(&fence->refcount);
2a6789ae 124 nouveau_channel_ref(chan, &fence->channel);
6ee73861
BS
125
126 if (emit)
127 ret = nouveau_fence_emit(fence);
128
129 if (ret)
382d62e5 130 nouveau_fence_unref(&fence);
6ee73861
BS
131 *pfence = fence;
132 return ret;
133}
134
135struct nouveau_channel *
136nouveau_fence_channel(struct nouveau_fence *fence)
137{
2b478add 138 return fence ? nouveau_channel_get_unlocked(fence->channel) : NULL;
6ee73861
BS
139}
140
141int
142nouveau_fence_emit(struct nouveau_fence *fence)
143{
6ee73861 144 struct nouveau_channel *chan = fence->channel;
2730723b 145 struct drm_device *dev = chan->dev;
529c4959 146 struct drm_nouveau_private *dev_priv = dev->dev_private;
6ee73861
BS
147 int ret;
148
149 ret = RING_SPACE(chan, 2);
150 if (ret)
151 return ret;
152
153 if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
6ee73861 154 nouveau_fence_update(chan);
6ee73861
BS
155
156 BUG_ON(chan->fence.sequence ==
157 chan->fence.sequence_ack - 1);
158 }
159
160 fence->sequence = ++chan->fence.sequence;
161
162 kref_get(&fence->refcount);
047d1d3c 163 spin_lock(&chan->fence.lock);
6ee73861 164 list_add_tail(&fence->entry, &chan->fence.pending);
047d1d3c 165 spin_unlock(&chan->fence.lock);
6ee73861 166
529c4959
BS
167 if (USE_REFCNT(dev)) {
168 if (dev_priv->card_type < NV_C0)
6d597027 169 BEGIN_NV04(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
529c4959 170 else
6d597027 171 BEGIN_NVC0(chan, 0, NV10_SUBCHAN_REF_CNT, 1);
529c4959 172 } else {
6d597027 173 BEGIN_NV04(chan, NvSubSw, 0x0150, 1);
529c4959
BS
174 }
175 OUT_RING (chan, fence->sequence);
6ee73861 176 FIRE_RING(chan);
695b95b8 177 fence->timeout = jiffies + 3 * DRM_HZ;
6ee73861
BS
178
179 return 0;
180}
181
8ac3891b
FJ
182void
183nouveau_fence_work(struct nouveau_fence *fence,
184 void (*work)(void *priv, bool signalled),
185 void *priv)
186{
187 BUG_ON(fence->work);
188
189 spin_lock(&fence->channel->fence.lock);
190
191 if (fence->signalled) {
192 work(priv, true);
193 } else {
194 fence->work = work;
195 fence->priv = priv;
196 }
197
198 spin_unlock(&fence->channel->fence.lock);
199}
200
6ee73861 201void
875ac34a 202nouveau_fence_unref(struct nouveau_fence **pfence)
6ee73861 203{
875ac34a
BS
204 if (*pfence)
205 kref_put(&(*pfence)->refcount, nouveau_fence_del);
206 *pfence = NULL;
6ee73861
BS
207}
208
875ac34a
BS
209struct nouveau_fence *
210nouveau_fence_ref(struct nouveau_fence *fence)
6ee73861 211{
6ee73861 212 kref_get(&fence->refcount);
875ac34a 213 return fence;
6ee73861
BS
214}
215
216bool
875ac34a 217nouveau_fence_signalled(struct nouveau_fence *fence)
6ee73861 218{
6ee73861 219 struct nouveau_channel *chan = fence->channel;
6ee73861
BS
220
221 if (fence->signalled)
222 return true;
223
6ee73861 224 nouveau_fence_update(chan);
6ee73861
BS
225 return fence->signalled;
226}
227
228int
875ac34a 229nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
6ee73861 230{
bd35fe5a
MS
231 unsigned long sleep_time = NSEC_PER_MSEC / 1000;
232 ktime_t t;
6ee73861
BS
233 int ret = 0;
234
875ac34a
BS
235 while (!nouveau_fence_signalled(fence)) {
236 if (time_after_eq(jiffies, fence->timeout)) {
6ee73861
BS
237 ret = -EBUSY;
238 break;
239 }
240
875ac34a
BS
241 __set_current_state(intr ? TASK_INTERRUPTIBLE :
242 TASK_UNINTERRUPTIBLE);
bd35fe5a
MS
243 if (lazy) {
244 t = ktime_set(0, sleep_time);
245 schedule_hrtimeout(&t, HRTIMER_MODE_REL);
246 sleep_time *= 2;
247 if (sleep_time > NSEC_PER_MSEC)
248 sleep_time = NSEC_PER_MSEC;
249 }
6ee73861
BS
250
251 if (intr && signal_pending(current)) {
9ddc8c52 252 ret = -ERESTARTSYS;
6ee73861
BS
253 break;
254 }
255 }
256
257 __set_current_state(TASK_RUNNING);
258
259 return ret;
260}
261
0c6c1c2f 262static struct nouveau_semaphore *
c3b90a7d 263semaphore_alloc(struct drm_device *dev)
0c6c1c2f
FJ
264{
265 struct drm_nouveau_private *dev_priv = dev->dev_private;
266 struct nouveau_semaphore *sema;
c3b90a7d
BS
267 int size = (dev_priv->chipset < 0x84) ? 4 : 16;
268 int ret, i;
0c6c1c2f
FJ
269
270 if (!USE_SEMA(dev))
271 return NULL;
272
273 sema = kmalloc(sizeof(*sema), GFP_KERNEL);
274 if (!sema)
275 goto fail;
276
907af60b
FJ
277 ret = drm_mm_pre_get(&dev_priv->fence.heap);
278 if (ret)
279 goto fail;
280
0c6c1c2f 281 spin_lock(&dev_priv->fence.lock);
c3b90a7d 282 sema->mem = drm_mm_search_free(&dev_priv->fence.heap, size, 0, 0);
0c6c1c2f 283 if (sema->mem)
c3b90a7d 284 sema->mem = drm_mm_get_block_atomic(sema->mem, size, 0);
0c6c1c2f
FJ
285 spin_unlock(&dev_priv->fence.lock);
286
287 if (!sema->mem)
288 goto fail;
289
290 kref_init(&sema->ref);
291 sema->dev = dev;
c3b90a7d
BS
292 for (i = sema->mem->start; i < sema->mem->start + size; i += 4)
293 nouveau_bo_wr32(dev_priv->fence.bo, i / 4, 0);
0c6c1c2f
FJ
294
295 return sema;
296fail:
297 kfree(sema);
298 return NULL;
299}
300
301static void
c3b90a7d 302semaphore_free(struct kref *ref)
0c6c1c2f
FJ
303{
304 struct nouveau_semaphore *sema =
305 container_of(ref, struct nouveau_semaphore, ref);
306 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
307
308 spin_lock(&dev_priv->fence.lock);
309 drm_mm_put_block(sema->mem);
310 spin_unlock(&dev_priv->fence.lock);
311
312 kfree(sema);
313}
314
315static void
316semaphore_work(void *priv, bool signalled)
317{
318 struct nouveau_semaphore *sema = priv;
319 struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
320
321 if (unlikely(!signalled))
322 nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
323
c3b90a7d 324 kref_put(&sema->ref, semaphore_free);
0c6c1c2f
FJ
325}
326
327static int
c3b90a7d 328semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
0c6c1c2f 329{
c3b90a7d
BS
330 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
331 struct nouveau_fence *fence = NULL;
d02836b4 332 u64 offset = chan->fence.vma.offset + sema->mem->start;
0c6c1c2f
FJ
333 int ret;
334
c3b90a7d 335 if (dev_priv->chipset < 0x84) {
b16a5a18 336 ret = RING_SPACE(chan, 4);
ec23802d
BS
337 if (ret)
338 return ret;
8af29ccd 339
6d597027 340 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 3);
b16a5a18 341 OUT_RING (chan, NvSema);
d02836b4 342 OUT_RING (chan, offset);
c3b90a7d 343 OUT_RING (chan, 1);
cb1d771a
BS
344 } else
345 if (dev_priv->chipset < 0xc0) {
b16a5a18 346 ret = RING_SPACE(chan, 7);
c3b90a7d
BS
347 if (ret)
348 return ret;
349
6d597027 350 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
b16a5a18 351 OUT_RING (chan, chan->vram_handle);
6d597027 352 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
e3b7ed5e
BS
353 OUT_RING (chan, upper_32_bits(offset));
354 OUT_RING (chan, lower_32_bits(offset));
c3b90a7d
BS
355 OUT_RING (chan, 1);
356 OUT_RING (chan, 1); /* ACQUIRE_EQ */
cb1d771a 357 } else {
cb1d771a
BS
358 ret = RING_SPACE(chan, 5);
359 if (ret)
360 return ret;
361
6d597027 362 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
cb1d771a
BS
363 OUT_RING (chan, upper_32_bits(offset));
364 OUT_RING (chan, lower_32_bits(offset));
365 OUT_RING (chan, 1);
366 OUT_RING (chan, 0x1001); /* ACQUIRE_EQ */
8af29ccd
FJ
367 }
368
c3b90a7d
BS
369 /* Delay semaphore destruction until its work is done */
370 ret = nouveau_fence_new(chan, &fence, true);
371 if (ret)
372 return ret;
0c6c1c2f 373
c3b90a7d
BS
374 kref_get(&sema->ref);
375 nouveau_fence_work(fence, semaphore_work, sema);
376 nouveau_fence_unref(&fence);
377 return 0;
378}
379
380static int
381semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
382{
383 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
384 struct nouveau_fence *fence = NULL;
d02836b4 385 u64 offset = chan->fence.vma.offset + sema->mem->start;
c3b90a7d
BS
386 int ret;
387
388 if (dev_priv->chipset < 0x84) {
b16a5a18 389 ret = RING_SPACE(chan, 5);
c3b90a7d
BS
390 if (ret)
391 return ret;
392
6d597027 393 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2);
b16a5a18 394 OUT_RING (chan, NvSema);
d02836b4 395 OUT_RING (chan, offset);
6d597027 396 BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1);
c3b90a7d 397 OUT_RING (chan, 1);
cb1d771a
BS
398 } else
399 if (dev_priv->chipset < 0xc0) {
b16a5a18 400 ret = RING_SPACE(chan, 7);
c3b90a7d
BS
401 if (ret)
402 return ret;
403
6d597027 404 BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 1);
b16a5a18 405 OUT_RING (chan, chan->vram_handle);
6d597027 406 BEGIN_NV04(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
e3b7ed5e
BS
407 OUT_RING (chan, upper_32_bits(offset));
408 OUT_RING (chan, lower_32_bits(offset));
c3b90a7d
BS
409 OUT_RING (chan, 1);
410 OUT_RING (chan, 2); /* RELEASE */
cb1d771a 411 } else {
cb1d771a
BS
412 ret = RING_SPACE(chan, 5);
413 if (ret)
414 return ret;
415
6d597027 416 BEGIN_NVC0(chan, 0, NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH, 4);
cb1d771a
BS
417 OUT_RING (chan, upper_32_bits(offset));
418 OUT_RING (chan, lower_32_bits(offset));
419 OUT_RING (chan, 1);
420 OUT_RING (chan, 0x1002); /* RELEASE */
8af29ccd
FJ
421 }
422
0c6c1c2f
FJ
423 /* Delay semaphore destruction until its work is done */
424 ret = nouveau_fence_new(chan, &fence, true);
425 if (ret)
426 return ret;
427
428 kref_get(&sema->ref);
429 nouveau_fence_work(fence, semaphore_work, sema);
382d62e5 430 nouveau_fence_unref(&fence);
0c6c1c2f
FJ
431 return 0;
432}
433
2730723b
FJ
434int
435nouveau_fence_sync(struct nouveau_fence *fence,
436 struct nouveau_channel *wchan)
437{
438 struct nouveau_channel *chan = nouveau_fence_channel(fence);
0c6c1c2f
FJ
439 struct drm_device *dev = wchan->dev;
440 struct nouveau_semaphore *sema;
2b478add 441 int ret = 0;
2730723b 442
2b478add 443 if (likely(!chan || chan == wchan ||
382d62e5 444 nouveau_fence_signalled(fence)))
2b478add 445 goto out;
2730723b 446
c3b90a7d 447 sema = semaphore_alloc(dev);
0c6c1c2f
FJ
448 if (!sema) {
449 /* Early card or broken userspace, fall back to
450 * software sync. */
382d62e5 451 ret = nouveau_fence_wait(fence, true, false);
2b478add 452 goto out;
0c6c1c2f
FJ
453 }
454
08cd3d43 455 /* try to take chan's mutex, if we can't take it right away
cff5c133
BS
456 * we have to fallback to software sync to prevent locking
457 * order issues
458 */
08cd3d43 459 if (!mutex_trylock(&chan->mutex)) {
382d62e5 460 ret = nouveau_fence_wait(fence, true, false);
2b478add 461 goto out_unref;
cff5c133
BS
462 }
463
0c6c1c2f 464 /* Make wchan wait until it gets signalled */
c3b90a7d 465 ret = semaphore_acquire(wchan, sema);
8af29ccd 466 if (ret)
2b478add 467 goto out_unlock;
0c6c1c2f 468
8af29ccd 469 /* Signal the semaphore from chan */
c3b90a7d 470 ret = semaphore_release(chan, sema);
2b478add
FJ
471
472out_unlock:
08cd3d43 473 mutex_unlock(&chan->mutex);
2b478add 474out_unref:
c3b90a7d 475 kref_put(&sema->ref, semaphore_free);
2b478add
FJ
476out:
477 if (chan)
478 nouveau_channel_put_unlocked(&chan);
0c6c1c2f 479 return ret;
2730723b
FJ
480}
481
6ee73861 482int
382d62e5 483__nouveau_fence_flush(void *sync_obj, void *sync_arg)
6ee73861
BS
484{
485 return 0;
486}
487
6ee73861 488int
2730723b 489nouveau_fence_channel_init(struct nouveau_channel *chan)
6ee73861 490{
0c6c1c2f
FJ
491 struct drm_device *dev = chan->dev;
492 struct drm_nouveau_private *dev_priv = dev->dev_private;
2730723b
FJ
493 struct nouveau_gpuobj *obj = NULL;
494 int ret;
495
b16a5a18 496 if (dev_priv->card_type < NV_C0) {
b16a5a18
BS
497 ret = RING_SPACE(chan, 2);
498 if (ret)
499 return ret;
2730723b 500
6d597027 501 BEGIN_NV04(chan, NvSubSw, NV01_SUBCHAN_OBJECT, 1);
b16a5a18
BS
502 OUT_RING (chan, NvSw);
503 FIRE_RING (chan);
504 }
2730723b 505
b16a5a18 506 /* Setup area of memory shared between all channels for x-chan sync */
e3b7ed5e 507 if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
a8b214f0 508 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
0c6c1c2f 509
91a8f1ea 510 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
0c6c1c2f 511 mem->start << PAGE_SHIFT,
a8b214f0 512 mem->size, NV_MEM_ACCESS_RW,
7f4a195f 513 NV_MEM_TARGET_VRAM, &obj);
0c6c1c2f
FJ
514 if (ret)
515 return ret;
516
517 ret = nouveau_ramht_insert(chan, NvSema, obj);
518 nouveau_gpuobj_ref(NULL, &obj);
519 if (ret)
520 return ret;
cfd8be08
BS
521 } else
522 if (USE_SEMA(dev)) {
d02836b4
BS
523 /* map fence bo into channel's vm */
524 ret = nouveau_bo_vma_add(dev_priv->fence.bo, chan->vm,
525 &chan->fence.vma);
526 if (ret)
527 return ret;
0c6c1c2f
FJ
528 }
529
047d1d3c 530 atomic_set(&chan->fence.last_sequence_irq, 0);
6ee73861
BS
531 return 0;
532}
533
534void
2730723b 535nouveau_fence_channel_fini(struct nouveau_channel *chan)
6ee73861 536{
d02836b4 537 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
2730723b 538 struct nouveau_fence *tmp, *fence;
6ee73861 539
889fa93d 540 spin_lock(&chan->fence.lock);
2730723b 541 list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
6ee73861
BS
542 fence->signalled = true;
543 list_del(&fence->entry);
8ac3891b
FJ
544
545 if (unlikely(fence->work))
546 fence->work(fence->priv, false);
547
6ee73861
BS
548 kref_put(&fence->refcount, nouveau_fence_del);
549 }
889fa93d 550 spin_unlock(&chan->fence.lock);
d02836b4
BS
551
552 nouveau_bo_vma_del(dev_priv->fence.bo, &chan->fence.vma);
6ee73861
BS
553}
554
0c6c1c2f
FJ
555int
556nouveau_fence_init(struct drm_device *dev)
557{
558 struct drm_nouveau_private *dev_priv = dev->dev_private;
c3b90a7d 559 int size = (dev_priv->chipset < 0x84) ? 4096 : 16384;
0c6c1c2f
FJ
560 int ret;
561
562 /* Create a shared VRAM heap for cross-channel sync. */
563 if (USE_SEMA(dev)) {
7375c95b 564 ret = nouveau_bo_new(dev, size, 0, TTM_PL_FLAG_VRAM,
22b33e8e 565 0, 0, NULL, &dev_priv->fence.bo);
0c6c1c2f
FJ
566 if (ret)
567 return ret;
568
569 ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
570 if (ret)
571 goto fail;
572
573 ret = nouveau_bo_map(dev_priv->fence.bo);
574 if (ret)
575 goto fail;
576
577 ret = drm_mm_init(&dev_priv->fence.heap, 0,
578 dev_priv->fence.bo->bo.mem.size);
579 if (ret)
580 goto fail;
581
582 spin_lock_init(&dev_priv->fence.lock);
583 }
584
585 return 0;
586fail:
587 nouveau_bo_unmap(dev_priv->fence.bo);
588 nouveau_bo_ref(NULL, &dev_priv->fence.bo);
589 return ret;
590}
591
592void
593nouveau_fence_fini(struct drm_device *dev)
594{
595 struct drm_nouveau_private *dev_priv = dev->dev_private;
596
597 if (USE_SEMA(dev)) {
598 drm_mm_takedown(&dev_priv->fence.heap);
599 nouveau_bo_unmap(dev_priv->fence.bo);
600 nouveau_bo_unpin(dev_priv->fence.bo);
601 nouveau_bo_ref(NULL, &dev_priv->fence.bo);
602 }
603}
This page took 0.183935 seconds and 5 git commands to generate.