drm/nouveau: fixup gem_info ioctl to return client-specific bo virtual
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
30#include "drmP.h"
31
32#include "nouveau_drm.h"
33#include "nouveau_drv.h"
34#include "nouveau_dma.h"
f869ef88
BS
35#include "nouveau_mm.h"
36#include "nouveau_vm.h"
6ee73861 37
a510604d 38#include <linux/log2.h>
5a0e3ad6 39#include <linux/slab.h>
a510604d 40
6ee73861
BS
41static void
42nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
43{
44 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
a0af9add 45 struct drm_device *dev = dev_priv->dev;
6ee73861
BS
46 struct nouveau_bo *nvbo = nouveau_bo(bo);
47
6ee73861
BS
48 if (unlikely(nvbo->gem))
49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
50
a5cf68b0 51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
fd2871af 52 nouveau_bo_vma_del(nvbo, &nvbo->vma);
6ee73861
BS
53 kfree(nvbo);
54}
55
a0af9add 56static void
db5c8e29 57nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
f91bac5b 58 int *align, int *size)
a0af9add 59{
bfd83aca 60 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
a0af9add 61
573a2a37 62 if (dev_priv->card_type < NV_50) {
bfd83aca 63 if (nvbo->tile_mode) {
a0af9add
FJ
64 if (dev_priv->chipset >= 0x40) {
65 *align = 65536;
bfd83aca 66 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
67
68 } else if (dev_priv->chipset >= 0x30) {
69 *align = 32768;
bfd83aca 70 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
71
72 } else if (dev_priv->chipset >= 0x20) {
73 *align = 16384;
bfd83aca 74 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
75
76 } else if (dev_priv->chipset >= 0x10) {
77 *align = 16384;
bfd83aca 78 *size = roundup(*size, 32 * nvbo->tile_mode);
a0af9add
FJ
79 }
80 }
bfd83aca 81 } else {
f91bac5b
BS
82 *size = roundup(*size, (1 << nvbo->page_shift));
83 *align = max((1 << nvbo->page_shift), *align);
a0af9add
FJ
84 }
85
1c7059e4 86 *size = roundup(*size, PAGE_SIZE);
a0af9add
FJ
87}
88
6ee73861
BS
89int
90nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
91 int size, int align, uint32_t flags, uint32_t tile_mode,
d550c41e 92 uint32_t tile_flags, struct nouveau_bo **pnvbo)
6ee73861
BS
93{
94 struct drm_nouveau_private *dev_priv = dev->dev_private;
95 struct nouveau_bo *nvbo;
f91bac5b 96 int ret;
6ee73861
BS
97
98 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
99 if (!nvbo)
100 return -ENOMEM;
101 INIT_LIST_HEAD(&nvbo->head);
102 INIT_LIST_HEAD(&nvbo->entry);
fd2871af 103 INIT_LIST_HEAD(&nvbo->vma_list);
6ee73861
BS
104 nvbo->tile_mode = tile_mode;
105 nvbo->tile_flags = tile_flags;
699ddfd9 106 nvbo->bo.bdev = &dev_priv->ttm.bdev;
6ee73861 107
f91bac5b
BS
108 nvbo->page_shift = 12;
109 if (dev_priv->bar1_vm) {
110 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
111 nvbo->page_shift = dev_priv->bar1_vm->lpg_shift;
112 }
113
114 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
fd2871af
BS
115 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
116 nouveau_bo_placement_set(nvbo, flags, 0);
6ee73861 117
d550c41e 118 if (dev_priv->chan_vm) {
fd2871af 119 ret = nouveau_bo_vma_add(nvbo, dev_priv->chan_vm, &nvbo->vma);
4c136142
BS
120 if (ret) {
121 kfree(nvbo);
122 return ret;
123 }
124 }
125
6ee73861 126 nvbo->channel = chan;
6ee73861 127 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
fd2871af
BS
128 ttm_bo_type_device, &nvbo->placement,
129 align >> PAGE_SHIFT, 0, false, NULL, size,
130 nouveau_bo_del_ttm);
6ee73861
BS
131 if (ret) {
132 /* ttm will call nouveau_bo_del_ttm if it fails.. */
133 return ret;
134 }
90af89b9 135 nvbo->channel = NULL;
6ee73861 136
6ee73861
BS
137 *pnvbo = nvbo;
138 return 0;
139}
140
78ad0f7b
FJ
141static void
142set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
143{
144 *n = 0;
145
146 if (type & TTM_PL_FLAG_VRAM)
147 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
148 if (type & TTM_PL_FLAG_TT)
149 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
150 if (type & TTM_PL_FLAG_SYSTEM)
151 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
152}
153
699ddfd9
FJ
154static void
155set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
156{
157 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
812f219a 158 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
699ddfd9
FJ
159
160 if (dev_priv->card_type == NV_10 &&
812f219a
FJ
161 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
162 nvbo->bo.mem.num_pages < vram_pages / 2) {
699ddfd9
FJ
163 /*
164 * Make sure that the color and depth buffers are handled
165 * by independent memory controller units. Up to a 9x
166 * speed up when alpha-blending and depth-test are enabled
167 * at the same time.
168 */
699ddfd9
FJ
169 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
170 nvbo->placement.fpfn = vram_pages / 2;
171 nvbo->placement.lpfn = ~0;
172 } else {
173 nvbo->placement.fpfn = 0;
174 nvbo->placement.lpfn = vram_pages / 2;
175 }
176 }
177}
178
6ee73861 179void
78ad0f7b 180nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
6ee73861 181{
78ad0f7b
FJ
182 struct ttm_placement *pl = &nvbo->placement;
183 uint32_t flags = TTM_PL_MASK_CACHING |
184 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
185
186 pl->placement = nvbo->placements;
187 set_placement_list(nvbo->placements, &pl->num_placement,
188 type, flags);
189
190 pl->busy_placement = nvbo->busy_placements;
191 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
192 type | busy, flags);
699ddfd9
FJ
193
194 set_placement_range(nvbo, type);
6ee73861
BS
195}
196
197int
198nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
199{
200 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
201 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 202 int ret;
6ee73861
BS
203
204 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
205 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
206 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
207 1 << bo->mem.mem_type, memtype);
208 return -EINVAL;
209 }
210
211 if (nvbo->pin_refcnt++)
212 return 0;
213
214 ret = ttm_bo_reserve(bo, false, false, false, 0);
215 if (ret)
216 goto out;
217
78ad0f7b 218 nouveau_bo_placement_set(nvbo, memtype, 0);
6ee73861 219
7a45d764 220 ret = nouveau_bo_validate(nvbo, false, false, false);
6ee73861
BS
221 if (ret == 0) {
222 switch (bo->mem.mem_type) {
223 case TTM_PL_VRAM:
224 dev_priv->fb_aper_free -= bo->mem.size;
225 break;
226 case TTM_PL_TT:
227 dev_priv->gart_info.aper_free -= bo->mem.size;
228 break;
229 default:
230 break;
231 }
232 }
233 ttm_bo_unreserve(bo);
234out:
235 if (unlikely(ret))
236 nvbo->pin_refcnt--;
237 return ret;
238}
239
240int
241nouveau_bo_unpin(struct nouveau_bo *nvbo)
242{
243 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
244 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 245 int ret;
6ee73861
BS
246
247 if (--nvbo->pin_refcnt)
248 return 0;
249
250 ret = ttm_bo_reserve(bo, false, false, false, 0);
251 if (ret)
252 return ret;
253
78ad0f7b 254 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
6ee73861 255
7a45d764 256 ret = nouveau_bo_validate(nvbo, false, false, false);
6ee73861
BS
257 if (ret == 0) {
258 switch (bo->mem.mem_type) {
259 case TTM_PL_VRAM:
260 dev_priv->fb_aper_free += bo->mem.size;
261 break;
262 case TTM_PL_TT:
263 dev_priv->gart_info.aper_free += bo->mem.size;
264 break;
265 default:
266 break;
267 }
268 }
269
270 ttm_bo_unreserve(bo);
271 return ret;
272}
273
274int
275nouveau_bo_map(struct nouveau_bo *nvbo)
276{
277 int ret;
278
279 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
280 if (ret)
281 return ret;
282
283 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
284 ttm_bo_unreserve(&nvbo->bo);
285 return ret;
286}
287
288void
289nouveau_bo_unmap(struct nouveau_bo *nvbo)
290{
9d59e8a1
BS
291 if (nvbo)
292 ttm_bo_kunmap(&nvbo->kmap);
6ee73861
BS
293}
294
7a45d764
BS
295int
296nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
297 bool no_wait_reserve, bool no_wait_gpu)
298{
299 int ret;
300
301 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
302 no_wait_reserve, no_wait_gpu);
303 if (ret)
304 return ret;
305
306 return 0;
307}
308
6ee73861
BS
309u16
310nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
311{
312 bool is_iomem;
313 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
314 mem = &mem[index];
315 if (is_iomem)
316 return ioread16_native((void __force __iomem *)mem);
317 else
318 return *mem;
319}
320
321void
322nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
323{
324 bool is_iomem;
325 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
326 mem = &mem[index];
327 if (is_iomem)
328 iowrite16_native(val, (void __force __iomem *)mem);
329 else
330 *mem = val;
331}
332
333u32
334nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
335{
336 bool is_iomem;
337 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
338 mem = &mem[index];
339 if (is_iomem)
340 return ioread32_native((void __force __iomem *)mem);
341 else
342 return *mem;
343}
344
345void
346nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
347{
348 bool is_iomem;
349 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
350 mem = &mem[index];
351 if (is_iomem)
352 iowrite32_native(val, (void __force __iomem *)mem);
353 else
354 *mem = val;
355}
356
357static struct ttm_backend *
358nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
359{
360 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
361 struct drm_device *dev = dev_priv->dev;
362
363 switch (dev_priv->gart_info.type) {
b694dfb2 364#if __OS_HAS_AGP
6ee73861
BS
365 case NOUVEAU_GART_AGP:
366 return ttm_agp_backend_init(bdev, dev->agp->bridge);
b694dfb2 367#endif
58e6c7a9
BS
368 case NOUVEAU_GART_PDMA:
369 case NOUVEAU_GART_HW:
6ee73861
BS
370 return nouveau_sgdma_init_ttm(dev);
371 default:
372 NV_ERROR(dev, "Unknown GART type %d\n",
373 dev_priv->gart_info.type);
374 break;
375 }
376
377 return NULL;
378}
379
380static int
381nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
382{
383 /* We'll do this from user space. */
384 return 0;
385}
386
387static int
388nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
389 struct ttm_mem_type_manager *man)
390{
391 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
392 struct drm_device *dev = dev_priv->dev;
393
394 switch (type) {
395 case TTM_PL_SYSTEM:
396 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
397 man->available_caching = TTM_PL_MASK_CACHING;
398 man->default_caching = TTM_PL_FLAG_CACHED;
399 break;
400 case TTM_PL_VRAM:
8984e046 401 if (dev_priv->card_type >= NV_50) {
573a2a37 402 man->func = &nouveau_vram_manager;
f869ef88
BS
403 man->io_reserve_fastpath = false;
404 man->use_io_reserve_lru = true;
405 } else {
573a2a37 406 man->func = &ttm_bo_manager_func;
f869ef88 407 }
6ee73861 408 man->flags = TTM_MEMTYPE_FLAG_FIXED |
f32f02fd 409 TTM_MEMTYPE_FLAG_MAPPABLE;
6ee73861
BS
410 man->available_caching = TTM_PL_FLAG_UNCACHED |
411 TTM_PL_FLAG_WC;
412 man->default_caching = TTM_PL_FLAG_WC;
6ee73861
BS
413 break;
414 case TTM_PL_TT:
26c0c9e3
BS
415 if (dev_priv->card_type >= NV_50)
416 man->func = &nouveau_gart_manager;
417 else
418 man->func = &ttm_bo_manager_func;
6ee73861
BS
419 switch (dev_priv->gart_info.type) {
420 case NOUVEAU_GART_AGP:
f32f02fd 421 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
a3d487ea
FJ
422 man->available_caching = TTM_PL_FLAG_UNCACHED |
423 TTM_PL_FLAG_WC;
424 man->default_caching = TTM_PL_FLAG_WC;
6ee73861 425 break;
58e6c7a9
BS
426 case NOUVEAU_GART_PDMA:
427 case NOUVEAU_GART_HW:
6ee73861
BS
428 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
429 TTM_MEMTYPE_FLAG_CMA;
430 man->available_caching = TTM_PL_MASK_CACHING;
431 man->default_caching = TTM_PL_FLAG_CACHED;
432 break;
433 default:
434 NV_ERROR(dev, "Unknown GART type: %d\n",
435 dev_priv->gart_info.type);
436 return -EINVAL;
437 }
6ee73861
BS
438 break;
439 default:
440 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
441 return -EINVAL;
442 }
443 return 0;
444}
445
446static void
447nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
448{
449 struct nouveau_bo *nvbo = nouveau_bo(bo);
450
451 switch (bo->mem.mem_type) {
22fbd538 452 case TTM_PL_VRAM:
78ad0f7b
FJ
453 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
454 TTM_PL_FLAG_SYSTEM);
22fbd538 455 break;
6ee73861 456 default:
78ad0f7b 457 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
6ee73861
BS
458 break;
459 }
22fbd538
FJ
460
461 *pl = nvbo->placement;
6ee73861
BS
462}
463
464
465/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
466 * TTM_PL_{VRAM,TT} directly.
467 */
a0af9add 468
6ee73861
BS
469static int
470nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
9d87fa21
JG
471 struct nouveau_bo *nvbo, bool evict,
472 bool no_wait_reserve, bool no_wait_gpu,
6ee73861
BS
473 struct ttm_mem_reg *new_mem)
474{
475 struct nouveau_fence *fence = NULL;
476 int ret;
477
478 ret = nouveau_fence_new(chan, &fence, true);
479 if (ret)
480 return ret;
481
64798817 482 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
311ab694 483 no_wait_reserve, no_wait_gpu, new_mem);
382d62e5 484 nouveau_fence_unref(&fence);
6ee73861
BS
485 return ret;
486}
487
183720b8
BS
488static int
489nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
490 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
491{
d2f96666
BS
492 struct nouveau_mem *node = old_mem->mm_node;
493 u64 src_offset = node->vma[0].offset;
494 u64 dst_offset = node->vma[1].offset;
183720b8
BS
495 u32 page_count = new_mem->num_pages;
496 int ret;
497
183720b8
BS
498 page_count = new_mem->num_pages;
499 while (page_count) {
500 int line_count = (page_count > 2047) ? 2047 : page_count;
501
502 ret = RING_SPACE(chan, 12);
503 if (ret)
504 return ret;
505
506 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
507 OUT_RING (chan, upper_32_bits(dst_offset));
508 OUT_RING (chan, lower_32_bits(dst_offset));
509 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
510 OUT_RING (chan, upper_32_bits(src_offset));
511 OUT_RING (chan, lower_32_bits(src_offset));
512 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
513 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
514 OUT_RING (chan, PAGE_SIZE); /* line_length */
515 OUT_RING (chan, line_count);
516 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
517 OUT_RING (chan, 0x00100110);
518
519 page_count -= line_count;
520 src_offset += (PAGE_SIZE * line_count);
521 dst_offset += (PAGE_SIZE * line_count);
522 }
523
524 return 0;
525}
526
6ee73861 527static int
f1ab0cc9
BS
528nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
529 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
6ee73861 530{
d2f96666 531 struct nouveau_mem *node = old_mem->mm_node;
f1ab0cc9
BS
532 struct nouveau_bo *nvbo = nouveau_bo(bo);
533 u64 length = (new_mem->num_pages << PAGE_SHIFT);
d2f96666
BS
534 u64 src_offset = node->vma[0].offset;
535 u64 dst_offset = node->vma[1].offset;
6ee73861
BS
536 int ret;
537
f1ab0cc9
BS
538 while (length) {
539 u32 amount, stride, height;
540
5220b3c1
BS
541 amount = min(length, (u64)(4 * 1024 * 1024));
542 stride = 16 * 4;
f1ab0cc9
BS
543 height = amount / stride;
544
f13b3263
FJ
545 if (new_mem->mem_type == TTM_PL_VRAM &&
546 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
547 ret = RING_SPACE(chan, 8);
548 if (ret)
549 return ret;
550
551 BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
552 OUT_RING (chan, 0);
5220b3c1 553 OUT_RING (chan, 0);
f1ab0cc9
BS
554 OUT_RING (chan, stride);
555 OUT_RING (chan, height);
556 OUT_RING (chan, 1);
557 OUT_RING (chan, 0);
558 OUT_RING (chan, 0);
559 } else {
560 ret = RING_SPACE(chan, 2);
561 if (ret)
562 return ret;
563
564 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
565 OUT_RING (chan, 1);
566 }
f13b3263
FJ
567 if (old_mem->mem_type == TTM_PL_VRAM &&
568 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
569 ret = RING_SPACE(chan, 8);
570 if (ret)
571 return ret;
572
573 BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
574 OUT_RING (chan, 0);
5220b3c1 575 OUT_RING (chan, 0);
f1ab0cc9
BS
576 OUT_RING (chan, stride);
577 OUT_RING (chan, height);
578 OUT_RING (chan, 1);
579 OUT_RING (chan, 0);
580 OUT_RING (chan, 0);
581 } else {
582 ret = RING_SPACE(chan, 2);
583 if (ret)
584 return ret;
585
586 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
587 OUT_RING (chan, 1);
588 }
589
590 ret = RING_SPACE(chan, 14);
6ee73861
BS
591 if (ret)
592 return ret;
f1ab0cc9
BS
593
594 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
595 OUT_RING (chan, upper_32_bits(src_offset));
596 OUT_RING (chan, upper_32_bits(dst_offset));
597 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
598 OUT_RING (chan, lower_32_bits(src_offset));
599 OUT_RING (chan, lower_32_bits(dst_offset));
600 OUT_RING (chan, stride);
601 OUT_RING (chan, stride);
602 OUT_RING (chan, stride);
603 OUT_RING (chan, height);
604 OUT_RING (chan, 0x00000101);
605 OUT_RING (chan, 0x00000000);
606 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
607 OUT_RING (chan, 0);
608
609 length -= amount;
610 src_offset += amount;
611 dst_offset += amount;
6ee73861
BS
612 }
613
f1ab0cc9
BS
614 return 0;
615}
616
a6704788
BS
617static inline uint32_t
618nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
619 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
620{
621 if (mem->mem_type == TTM_PL_TT)
622 return chan->gart_handle;
623 return chan->vram_handle;
624}
625
f1ab0cc9
BS
626static int
627nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
628 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
629{
d961db75
BS
630 u32 src_offset = old_mem->start << PAGE_SHIFT;
631 u32 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
632 u32 page_count = new_mem->num_pages;
633 int ret;
634
635 ret = RING_SPACE(chan, 3);
636 if (ret)
637 return ret;
638
639 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
640 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
641 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
642
6ee73861
BS
643 page_count = new_mem->num_pages;
644 while (page_count) {
645 int line_count = (page_count > 2047) ? 2047 : page_count;
646
6ee73861
BS
647 ret = RING_SPACE(chan, 11);
648 if (ret)
649 return ret;
f1ab0cc9 650
6ee73861
BS
651 BEGIN_RING(chan, NvSubM2MF,
652 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
f1ab0cc9
BS
653 OUT_RING (chan, src_offset);
654 OUT_RING (chan, dst_offset);
655 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
656 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
657 OUT_RING (chan, PAGE_SIZE); /* line_length */
658 OUT_RING (chan, line_count);
659 OUT_RING (chan, 0x00000101);
660 OUT_RING (chan, 0x00000000);
6ee73861 661 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9 662 OUT_RING (chan, 0);
6ee73861
BS
663
664 page_count -= line_count;
665 src_offset += (PAGE_SIZE * line_count);
666 dst_offset += (PAGE_SIZE * line_count);
667 }
668
f1ab0cc9
BS
669 return 0;
670}
671
d2f96666
BS
672static int
673nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
674 struct ttm_mem_reg *mem, struct nouveau_vma *vma)
675{
676 struct nouveau_mem *node = mem->mm_node;
677 int ret;
678
679 ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
680 node->page_shift, NV_MEM_ACCESS_RO, vma);
681 if (ret)
682 return ret;
683
684 if (mem->mem_type == TTM_PL_VRAM)
685 nouveau_vm_map(vma, node);
686 else
687 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT,
688 node, node->pages);
689
690 return 0;
691}
692
f1ab0cc9
BS
693static int
694nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
695 bool no_wait_reserve, bool no_wait_gpu,
696 struct ttm_mem_reg *new_mem)
697{
698 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
699 struct nouveau_bo *nvbo = nouveau_bo(bo);
3425df48 700 struct ttm_mem_reg *old_mem = &bo->mem;
f1ab0cc9
BS
701 struct nouveau_channel *chan;
702 int ret;
703
704 chan = nvbo->channel;
d550c41e 705 if (!chan) {
f1ab0cc9 706 chan = dev_priv->channel;
e419cf09 707 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
6a6b73f2 708 }
f1ab0cc9 709
d2f96666
BS
710 /* create temporary vmas for the transfer and attach them to the
711 * old nouveau_mem node, these will get cleaned up after ttm has
712 * destroyed the ttm_mem_reg
3425df48 713 */
26c0c9e3 714 if (dev_priv->card_type >= NV_50) {
d5f42394 715 struct nouveau_mem *node = old_mem->mm_node;
3425df48 716
d2f96666
BS
717 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
718 if (ret)
719 goto out;
720
721 ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
722 if (ret)
723 goto out;
3425df48
BS
724 }
725
f1ab0cc9
BS
726 if (dev_priv->card_type < NV_50)
727 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
728 else
183720b8 729 if (dev_priv->card_type < NV_C0)
f1ab0cc9 730 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
183720b8
BS
731 else
732 ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
6a6b73f2
BS
733 if (ret == 0) {
734 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
735 no_wait_reserve,
736 no_wait_gpu, new_mem);
737 }
f1ab0cc9 738
3425df48 739out:
6a6b73f2
BS
740 if (chan == dev_priv->channel)
741 mutex_unlock(&chan->mutex);
742 return ret;
6ee73861
BS
743}
744
745static int
746nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
747 bool no_wait_reserve, bool no_wait_gpu,
748 struct ttm_mem_reg *new_mem)
6ee73861
BS
749{
750 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
751 struct ttm_placement placement;
752 struct ttm_mem_reg tmp_mem;
753 int ret;
754
755 placement.fpfn = placement.lpfn = 0;
756 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 757 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
758
759 tmp_mem = *new_mem;
760 tmp_mem.mm_node = NULL;
9d87fa21 761 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
6ee73861
BS
762 if (ret)
763 return ret;
764
765 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
766 if (ret)
767 goto out;
768
9d87fa21 769 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
6ee73861
BS
770 if (ret)
771 goto out;
772
b8884da6 773 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861 774out:
42311ff9 775 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
776 return ret;
777}
778
779static int
780nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
781 bool no_wait_reserve, bool no_wait_gpu,
782 struct ttm_mem_reg *new_mem)
6ee73861
BS
783{
784 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
785 struct ttm_placement placement;
786 struct ttm_mem_reg tmp_mem;
787 int ret;
788
789 placement.fpfn = placement.lpfn = 0;
790 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 791 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
792
793 tmp_mem = *new_mem;
794 tmp_mem.mm_node = NULL;
9d87fa21 795 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
6ee73861
BS
796 if (ret)
797 return ret;
798
b8884da6 799 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
6ee73861
BS
800 if (ret)
801 goto out;
802
b8884da6 803 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861
BS
804 if (ret)
805 goto out;
806
807out:
42311ff9 808 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
809 return ret;
810}
811
a4154bbf
BS
812static void
813nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
814{
26c0c9e3 815 struct nouveau_mem *node = new_mem->mm_node;
a4154bbf 816 struct nouveau_bo *nvbo = nouveau_bo(bo);
fd2871af
BS
817 struct nouveau_vma *vma;
818
819 list_for_each_entry(vma, &nvbo->vma_list, head) {
820 if (new_mem->mem_type == TTM_PL_VRAM) {
821 nouveau_vm_map(vma, new_mem->mm_node);
822 } else
823 if (new_mem->mem_type == TTM_PL_TT &&
824 nvbo->page_shift == vma->vm->spg_shift) {
825 nouveau_vm_map_sg(vma, 0, new_mem->
826 num_pages << PAGE_SHIFT,
827 node, node->pages);
828 } else {
829 nouveau_vm_unmap(vma);
830 }
a4154bbf
BS
831 }
832}
833
6ee73861 834static int
a0af9add
FJ
835nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
836 struct nouveau_tile_reg **new_tile)
6ee73861
BS
837{
838 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
6ee73861 839 struct drm_device *dev = dev_priv->dev;
a0af9add 840 struct nouveau_bo *nvbo = nouveau_bo(bo);
a4154bbf 841 u64 offset = new_mem->start << PAGE_SHIFT;
6ee73861 842
a4154bbf
BS
843 *new_tile = NULL;
844 if (new_mem->mem_type != TTM_PL_VRAM)
a0af9add 845 return 0;
a0af9add 846
a4154bbf 847 if (dev_priv->card_type >= NV_10) {
a0af9add 848 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
a5cf68b0
FJ
849 nvbo->tile_mode,
850 nvbo->tile_flags);
6ee73861
BS
851 }
852
a0af9add
FJ
853 return 0;
854}
855
856static void
857nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
858 struct nouveau_tile_reg *new_tile,
859 struct nouveau_tile_reg **old_tile)
860{
861 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
862 struct drm_device *dev = dev_priv->dev;
863
a4154bbf
BS
864 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
865 *old_tile = new_tile;
a0af9add
FJ
866}
867
868static int
869nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
870 bool no_wait_reserve, bool no_wait_gpu,
871 struct ttm_mem_reg *new_mem)
a0af9add
FJ
872{
873 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
874 struct nouveau_bo *nvbo = nouveau_bo(bo);
875 struct ttm_mem_reg *old_mem = &bo->mem;
876 struct nouveau_tile_reg *new_tile = NULL;
877 int ret = 0;
878
a4154bbf
BS
879 if (dev_priv->card_type < NV_50) {
880 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
881 if (ret)
882 return ret;
883 }
a0af9add 884
a0af9add 885 /* Fake bo copy. */
6ee73861
BS
886 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
887 BUG_ON(bo->mem.mm_node != NULL);
888 bo->mem = *new_mem;
889 new_mem->mm_node = NULL;
a0af9add 890 goto out;
6ee73861
BS
891 }
892
b8a6a804 893 /* Software copy if the card isn't up and running yet. */
183720b8 894 if (!dev_priv->channel) {
b8a6a804
BS
895 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
896 goto out;
897 }
898
a0af9add
FJ
899 /* Hardware assisted copy. */
900 if (new_mem->mem_type == TTM_PL_SYSTEM)
9d87fa21 901 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add 902 else if (old_mem->mem_type == TTM_PL_SYSTEM)
9d87fa21 903 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add 904 else
9d87fa21 905 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861 906
a0af9add
FJ
907 if (!ret)
908 goto out;
909
910 /* Fallback to software copy. */
9d87fa21 911 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add
FJ
912
913out:
a4154bbf
BS
914 if (dev_priv->card_type < NV_50) {
915 if (ret)
916 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
917 else
918 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
919 }
a0af9add
FJ
920
921 return ret;
6ee73861
BS
922}
923
924static int
925nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
926{
927 return 0;
928}
929
f32f02fd
JG
930static int
931nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
932{
933 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
934 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
935 struct drm_device *dev = dev_priv->dev;
f869ef88 936 int ret;
f32f02fd
JG
937
938 mem->bus.addr = NULL;
939 mem->bus.offset = 0;
940 mem->bus.size = mem->num_pages << PAGE_SHIFT;
941 mem->bus.base = 0;
942 mem->bus.is_iomem = false;
943 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
944 return -EINVAL;
945 switch (mem->mem_type) {
946 case TTM_PL_SYSTEM:
947 /* System memory */
948 return 0;
949 case TTM_PL_TT:
950#if __OS_HAS_AGP
951 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
d961db75 952 mem->bus.offset = mem->start << PAGE_SHIFT;
f32f02fd
JG
953 mem->bus.base = dev_priv->gart_info.aper_base;
954 mem->bus.is_iomem = true;
955 }
956#endif
957 break;
958 case TTM_PL_VRAM:
f869ef88 959 {
d5f42394 960 struct nouveau_mem *node = mem->mm_node;
8984e046 961 u8 page_shift;
f869ef88
BS
962
963 if (!dev_priv->bar1_vm) {
964 mem->bus.offset = mem->start << PAGE_SHIFT;
965 mem->bus.base = pci_resource_start(dev->pdev, 1);
966 mem->bus.is_iomem = true;
967 break;
968 }
969
8984e046 970 if (dev_priv->card_type == NV_C0)
d5f42394 971 page_shift = node->page_shift;
8984e046
BS
972 else
973 page_shift = 12;
974
4c74eb7f 975 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
8984e046 976 page_shift, NV_MEM_ACCESS_RW,
d5f42394 977 &node->bar_vma);
f869ef88
BS
978 if (ret)
979 return ret;
980
d5f42394 981 nouveau_vm_map(&node->bar_vma, node);
f869ef88 982 if (ret) {
d5f42394 983 nouveau_vm_put(&node->bar_vma);
f869ef88
BS
984 return ret;
985 }
986
d5f42394 987 mem->bus.offset = node->bar_vma.offset;
8984e046
BS
988 if (dev_priv->card_type == NV_50) /*XXX*/
989 mem->bus.offset -= 0x0020000000ULL;
01d73a69 990 mem->bus.base = pci_resource_start(dev->pdev, 1);
f32f02fd 991 mem->bus.is_iomem = true;
f869ef88 992 }
f32f02fd
JG
993 break;
994 default:
995 return -EINVAL;
996 }
997 return 0;
998}
999
1000static void
1001nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1002{
f869ef88 1003 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
d5f42394 1004 struct nouveau_mem *node = mem->mm_node;
f869ef88
BS
1005
1006 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
1007 return;
1008
d5f42394 1009 if (!node->bar_vma.node)
f869ef88
BS
1010 return;
1011
d5f42394
BS
1012 nouveau_vm_unmap(&node->bar_vma);
1013 nouveau_vm_put(&node->bar_vma);
f32f02fd
JG
1014}
1015
1016static int
1017nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1018{
e1429b4c
BS
1019 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1020 struct nouveau_bo *nvbo = nouveau_bo(bo);
1021
1022 /* as long as the bo isn't in vram, and isn't tiled, we've got
1023 * nothing to do here.
1024 */
1025 if (bo->mem.mem_type != TTM_PL_VRAM) {
f13b3263
FJ
1026 if (dev_priv->card_type < NV_50 ||
1027 !nouveau_bo_tile_layout(nvbo))
e1429b4c
BS
1028 return 0;
1029 }
1030
1031 /* make sure bo is in mappable vram */
d961db75 1032 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
e1429b4c
BS
1033 return 0;
1034
1035
1036 nvbo->placement.fpfn = 0;
1037 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
1038 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
7a45d764 1039 return nouveau_bo_validate(nvbo, false, true, false);
f32f02fd
JG
1040}
1041
332b242f
FJ
1042void
1043nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1044{
23c45e8e 1045 struct nouveau_fence *old_fence;
332b242f
FJ
1046
1047 if (likely(fence))
23c45e8e 1048 nouveau_fence_ref(fence);
332b242f 1049
23c45e8e
FJ
1050 spin_lock(&nvbo->bo.bdev->fence_lock);
1051 old_fence = nvbo->bo.sync_obj;
1052 nvbo->bo.sync_obj = fence;
332b242f 1053 spin_unlock(&nvbo->bo.bdev->fence_lock);
23c45e8e
FJ
1054
1055 nouveau_fence_unref(&old_fence);
332b242f
FJ
1056}
1057
6ee73861
BS
1058struct ttm_bo_driver nouveau_bo_driver = {
1059 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
1060 .invalidate_caches = nouveau_bo_invalidate_caches,
1061 .init_mem_type = nouveau_bo_init_mem_type,
1062 .evict_flags = nouveau_bo_evict_flags,
a4154bbf 1063 .move_notify = nouveau_bo_move_ntfy,
6ee73861
BS
1064 .move = nouveau_bo_move,
1065 .verify_access = nouveau_bo_verify_access,
382d62e5
MS
1066 .sync_obj_signaled = __nouveau_fence_signalled,
1067 .sync_obj_wait = __nouveau_fence_wait,
1068 .sync_obj_flush = __nouveau_fence_flush,
1069 .sync_obj_unref = __nouveau_fence_unref,
1070 .sync_obj_ref = __nouveau_fence_ref,
f32f02fd
JG
1071 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1072 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1073 .io_mem_free = &nouveau_ttm_io_mem_free,
6ee73861
BS
1074};
1075
fd2871af
BS
1076struct nouveau_vma *
1077nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1078{
1079 struct nouveau_vma *vma;
1080 list_for_each_entry(vma, &nvbo->vma_list, head) {
1081 if (vma->vm == vm)
1082 return vma;
1083 }
1084
1085 return NULL;
1086}
1087
1088int
1089nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1090 struct nouveau_vma *vma)
1091{
1092 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1093 struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1094 int ret;
1095
1096 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1097 NV_MEM_ACCESS_RW, vma);
1098 if (ret)
1099 return ret;
1100
1101 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1102 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1103 else
1104 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
1105 nouveau_vm_map_sg(vma, 0, size, node, node->pages);
1106
1107 list_add_tail(&vma->head, &nvbo->vma_list);
1108 return 0;
1109}
1110
1111void
1112nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1113{
1114 if (vma->node) {
1115 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
1116 spin_lock(&nvbo->bo.bdev->fence_lock);
1117 ttm_bo_wait(&nvbo->bo, false, false, false);
1118 spin_unlock(&nvbo->bo.bdev->fence_lock);
1119 nouveau_vm_unmap(vma);
1120 }
1121
1122 nouveau_vm_put(vma);
1123 list_del(&vma->head);
1124 }
1125}
This page took 0.148421 seconds and 5 git commands to generate.