drm/nouveau: allocate GPFIFOs and fences coherently
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
fdb751ef 30#include <linux/dma-mapping.h>
3e2b756b 31#include <linux/swiotlb.h>
6ee73861 32
ebb945a9 33#include "nouveau_drm.h"
6ee73861 34#include "nouveau_dma.h"
d375e7d5 35#include "nouveau_fence.h"
6ee73861 36
ebb945a9
BS
37#include "nouveau_bo.h"
38#include "nouveau_ttm.h"
39#include "nouveau_gem.h"
a510604d 40
bc9e7b9a
BS
41/*
42 * NV10-NV40 tiling helpers
43 */
44
45static void
ebb945a9
BS
46nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
47 u32 addr, u32 size, u32 pitch, u32 flags)
bc9e7b9a 48{
77145f1c 49 struct nouveau_drm *drm = nouveau_drm(dev);
ebb945a9 50 int i = reg - drm->tile.reg;
967e7bde 51 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
ebb945a9
BS
52 struct nouveau_fb_tile *tile = &pfb->tile.region[i];
53 struct nouveau_engine *engine;
bc9e7b9a 54
ebb945a9 55 nouveau_fence_unref(&reg->fence);
bc9e7b9a
BS
56
57 if (tile->pitch)
ebb945a9 58 pfb->tile.fini(pfb, i, tile);
bc9e7b9a
BS
59
60 if (pitch)
ebb945a9 61 pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
bc9e7b9a 62
ebb945a9 63 pfb->tile.prog(pfb, i, tile);
bc9e7b9a 64
ebb945a9
BS
65 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
66 engine->tile_prog(engine, i);
67 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
68 engine->tile_prog(engine, i);
bc9e7b9a
BS
69}
70
ebb945a9 71static struct nouveau_drm_tile *
bc9e7b9a
BS
72nv10_bo_get_tile_region(struct drm_device *dev, int i)
73{
77145f1c 74 struct nouveau_drm *drm = nouveau_drm(dev);
ebb945a9 75 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
bc9e7b9a 76
ebb945a9 77 spin_lock(&drm->tile.lock);
bc9e7b9a
BS
78
79 if (!tile->used &&
80 (!tile->fence || nouveau_fence_done(tile->fence)))
81 tile->used = true;
82 else
83 tile = NULL;
84
ebb945a9 85 spin_unlock(&drm->tile.lock);
bc9e7b9a
BS
86 return tile;
87}
88
89static void
ebb945a9 90nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
f2c24b83 91 struct fence *fence)
bc9e7b9a 92{
77145f1c 93 struct nouveau_drm *drm = nouveau_drm(dev);
bc9e7b9a
BS
94
95 if (tile) {
ebb945a9 96 spin_lock(&drm->tile.lock);
809e9447 97 tile->fence = (struct nouveau_fence *)fence_get(fence);
bc9e7b9a 98 tile->used = false;
ebb945a9 99 spin_unlock(&drm->tile.lock);
bc9e7b9a
BS
100 }
101}
102
ebb945a9
BS
103static struct nouveau_drm_tile *
104nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
105 u32 size, u32 pitch, u32 flags)
bc9e7b9a 106{
77145f1c 107 struct nouveau_drm *drm = nouveau_drm(dev);
967e7bde 108 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
ebb945a9 109 struct nouveau_drm_tile *tile, *found = NULL;
bc9e7b9a
BS
110 int i;
111
ebb945a9 112 for (i = 0; i < pfb->tile.regions; i++) {
bc9e7b9a
BS
113 tile = nv10_bo_get_tile_region(dev, i);
114
115 if (pitch && !found) {
116 found = tile;
117 continue;
118
ebb945a9 119 } else if (tile && pfb->tile.region[i].pitch) {
bc9e7b9a
BS
120 /* Kill an unused tile region. */
121 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
122 }
123
124 nv10_bo_put_tile_region(dev, tile, NULL);
125 }
126
127 if (found)
128 nv10_bo_update_tile_region(dev, found, addr, size,
129 pitch, flags);
130 return found;
131}
132
6ee73861
BS
133static void
134nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
135{
ebb945a9
BS
136 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
137 struct drm_device *dev = drm->dev;
6ee73861
BS
138 struct nouveau_bo *nvbo = nouveau_bo(bo);
139
55fb74ad 140 if (unlikely(nvbo->gem.filp))
6ee73861 141 DRM_ERROR("bo %p still attached to GEM object\n", bo);
4f385599 142 WARN_ON(nvbo->pin_refcnt > 0);
bc9e7b9a 143 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
6ee73861
BS
144 kfree(nvbo);
145}
146
a0af9add 147static void
db5c8e29 148nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
f91bac5b 149 int *align, int *size)
a0af9add 150{
ebb945a9 151 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
967e7bde 152 struct nvif_device *device = &drm->device;
a0af9add 153
967e7bde 154 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
bfd83aca 155 if (nvbo->tile_mode) {
967e7bde 156 if (device->info.chipset >= 0x40) {
a0af9add 157 *align = 65536;
bfd83aca 158 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 159
967e7bde 160 } else if (device->info.chipset >= 0x30) {
a0af9add 161 *align = 32768;
bfd83aca 162 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 163
967e7bde 164 } else if (device->info.chipset >= 0x20) {
a0af9add 165 *align = 16384;
bfd83aca 166 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 167
967e7bde 168 } else if (device->info.chipset >= 0x10) {
a0af9add 169 *align = 16384;
bfd83aca 170 *size = roundup(*size, 32 * nvbo->tile_mode);
a0af9add
FJ
171 }
172 }
bfd83aca 173 } else {
f91bac5b
BS
174 *size = roundup(*size, (1 << nvbo->page_shift));
175 *align = max((1 << nvbo->page_shift), *align);
a0af9add
FJ
176 }
177
1c7059e4 178 *size = roundup(*size, PAGE_SIZE);
a0af9add
FJ
179}
180
6ee73861 181int
7375c95b
BS
182nouveau_bo_new(struct drm_device *dev, int size, int align,
183 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
bb6178b0 184 struct sg_table *sg, struct reservation_object *robj,
7375c95b 185 struct nouveau_bo **pnvbo)
6ee73861 186{
77145f1c 187 struct nouveau_drm *drm = nouveau_drm(dev);
6ee73861 188 struct nouveau_bo *nvbo;
57de4ba9 189 size_t acc_size;
f91bac5b 190 int ret;
22b33e8e 191 int type = ttm_bo_type_device;
35095f75
ML
192 int lpg_shift = 12;
193 int max_size;
194
3ee6f5b5
BS
195 if (drm->client.vm)
196 lpg_shift = drm->client.vm->vmm->lpg_shift;
35095f75 197 max_size = INT_MAX & ~((1 << lpg_shift) - 1);
0108bc80
ML
198
199 if (size <= 0 || size > max_size) {
fa2bade9 200 NV_WARN(drm, "skipped size %x\n", (u32)size);
0108bc80
ML
201 return -EINVAL;
202 }
22b33e8e
DA
203
204 if (sg)
205 type = ttm_bo_type_sg;
6ee73861
BS
206
207 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
208 if (!nvbo)
209 return -ENOMEM;
210 INIT_LIST_HEAD(&nvbo->head);
211 INIT_LIST_HEAD(&nvbo->entry);
fd2871af 212 INIT_LIST_HEAD(&nvbo->vma_list);
6ee73861
BS
213 nvbo->tile_mode = tile_mode;
214 nvbo->tile_flags = tile_flags;
ebb945a9 215 nvbo->bo.bdev = &drm->ttm.bdev;
6ee73861 216
c3a0c771
AC
217 if (!nv_device_is_cpu_coherent(nvkm_device(&drm->device)))
218 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
219
f91bac5b 220 nvbo->page_shift = 12;
3ee6f5b5 221 if (drm->client.vm) {
f91bac5b 222 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
3ee6f5b5 223 nvbo->page_shift = drm->client.vm->vmm->lpg_shift;
f91bac5b
BS
224 }
225
226 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
fd2871af
BS
227 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
228 nouveau_bo_placement_set(nvbo, flags, 0);
6ee73861 229
ebb945a9 230 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
57de4ba9
JG
231 sizeof(struct nouveau_bo));
232
ebb945a9 233 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
22b33e8e 234 type, &nvbo->placement,
0b91c4a1 235 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
bb6178b0 236 robj, nouveau_bo_del_ttm);
6ee73861
BS
237 if (ret) {
238 /* ttm will call nouveau_bo_del_ttm if it fails.. */
239 return ret;
240 }
241
6ee73861
BS
242 *pnvbo = nvbo;
243 return 0;
244}
245
78ad0f7b 246static void
f1217ed0 247set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
78ad0f7b
FJ
248{
249 *n = 0;
250
251 if (type & TTM_PL_FLAG_VRAM)
f1217ed0 252 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
78ad0f7b 253 if (type & TTM_PL_FLAG_TT)
f1217ed0 254 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
78ad0f7b 255 if (type & TTM_PL_FLAG_SYSTEM)
f1217ed0 256 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
78ad0f7b
FJ
257}
258
699ddfd9
FJ
259static void
260set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
261{
ebb945a9 262 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
f392ec4b 263 u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT;
f1217ed0 264 unsigned i, fpfn, lpfn;
699ddfd9 265
967e7bde 266 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
812f219a 267 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
4beb116a 268 nvbo->bo.mem.num_pages < vram_pages / 4) {
699ddfd9
FJ
269 /*
270 * Make sure that the color and depth buffers are handled
271 * by independent memory controller units. Up to a 9x
272 * speed up when alpha-blending and depth-test are enabled
273 * at the same time.
274 */
699ddfd9 275 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
f1217ed0
CK
276 fpfn = vram_pages / 2;
277 lpfn = ~0;
699ddfd9 278 } else {
f1217ed0
CK
279 fpfn = 0;
280 lpfn = vram_pages / 2;
281 }
282 for (i = 0; i < nvbo->placement.num_placement; ++i) {
283 nvbo->placements[i].fpfn = fpfn;
284 nvbo->placements[i].lpfn = lpfn;
285 }
286 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
287 nvbo->busy_placements[i].fpfn = fpfn;
288 nvbo->busy_placements[i].lpfn = lpfn;
699ddfd9
FJ
289 }
290 }
291}
292
6ee73861 293void
78ad0f7b 294nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
6ee73861 295{
78ad0f7b 296 struct ttm_placement *pl = &nvbo->placement;
c3a0c771
AC
297 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
298 TTM_PL_MASK_CACHING) |
299 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
78ad0f7b
FJ
300
301 pl->placement = nvbo->placements;
302 set_placement_list(nvbo->placements, &pl->num_placement,
303 type, flags);
304
305 pl->busy_placement = nvbo->busy_placements;
306 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
307 type | busy, flags);
699ddfd9
FJ
308
309 set_placement_range(nvbo, type);
6ee73861
BS
310}
311
312int
313nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
314{
ebb945a9 315 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
6ee73861 316 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 317 int ret;
6ee73861 318
ee3939e0 319 ret = ttm_bo_reserve(bo, false, false, false, NULL);
0ae6d7bc
DV
320 if (ret)
321 goto out;
322
6ee73861 323 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
ebb945a9 324 NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
6ee73861 325 1 << bo->mem.mem_type, memtype);
0ae6d7bc
DV
326 ret = -EINVAL;
327 goto out;
6ee73861
BS
328 }
329
5be5a15a
AC
330 if (nvbo->pin_refcnt)
331 goto ref_inc;
6ee73861 332
78ad0f7b 333 nouveau_bo_placement_set(nvbo, memtype, 0);
6ee73861 334
97a875cb 335 ret = nouveau_bo_validate(nvbo, false, false);
6ee73861
BS
336 if (ret == 0) {
337 switch (bo->mem.mem_type) {
338 case TTM_PL_VRAM:
ebb945a9 339 drm->gem.vram_available -= bo->mem.size;
6ee73861
BS
340 break;
341 case TTM_PL_TT:
ebb945a9 342 drm->gem.gart_available -= bo->mem.size;
6ee73861
BS
343 break;
344 default:
345 break;
346 }
347 }
5be5a15a
AC
348
349ref_inc:
350 nvbo->pin_refcnt++;
351
6ee73861 352out:
0ae6d7bc 353 ttm_bo_unreserve(bo);
6ee73861
BS
354 return ret;
355}
356
357int
358nouveau_bo_unpin(struct nouveau_bo *nvbo)
359{
ebb945a9 360 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
6ee73861 361 struct ttm_buffer_object *bo = &nvbo->bo;
4f385599 362 int ret, ref;
6ee73861 363
ee3939e0 364 ret = ttm_bo_reserve(bo, false, false, false, NULL);
6ee73861
BS
365 if (ret)
366 return ret;
367
4f385599
ML
368 ref = --nvbo->pin_refcnt;
369 WARN_ON_ONCE(ref < 0);
370 if (ref)
0ae6d7bc
DV
371 goto out;
372
78ad0f7b 373 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
6ee73861 374
97a875cb 375 ret = nouveau_bo_validate(nvbo, false, false);
6ee73861
BS
376 if (ret == 0) {
377 switch (bo->mem.mem_type) {
378 case TTM_PL_VRAM:
ebb945a9 379 drm->gem.vram_available += bo->mem.size;
6ee73861
BS
380 break;
381 case TTM_PL_TT:
ebb945a9 382 drm->gem.gart_available += bo->mem.size;
6ee73861
BS
383 break;
384 default:
385 break;
386 }
387 }
388
0ae6d7bc 389out:
6ee73861
BS
390 ttm_bo_unreserve(bo);
391 return ret;
392}
393
394int
395nouveau_bo_map(struct nouveau_bo *nvbo)
396{
397 int ret;
398
ee3939e0 399 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
6ee73861
BS
400 if (ret)
401 return ret;
402
c3a0c771
AC
403 /*
404 * TTM buffers allocated using the DMA API already have a mapping, let's
405 * use it instead.
406 */
407 if (!nvbo->force_coherent)
408 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
409 &nvbo->kmap);
410
6ee73861
BS
411 ttm_bo_unreserve(&nvbo->bo);
412 return ret;
413}
414
415void
416nouveau_bo_unmap(struct nouveau_bo *nvbo)
417{
c3a0c771
AC
418 if (!nvbo)
419 return;
420
421 /*
422 * TTM buffers allocated using the DMA API already had a coherent
423 * mapping which we used, no need to unmap.
424 */
425 if (!nvbo->force_coherent)
9d59e8a1 426 ttm_bo_kunmap(&nvbo->kmap);
6ee73861
BS
427}
428
7a45d764
BS
429int
430nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
97a875cb 431 bool no_wait_gpu)
7a45d764
BS
432{
433 int ret;
434
97a875cb
ML
435 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
436 interruptible, no_wait_gpu);
7a45d764
BS
437 if (ret)
438 return ret;
439
440 return 0;
441}
442
c3a0c771
AC
443static inline void *
444_nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz)
445{
446 struct ttm_dma_tt *dma_tt;
447 u8 *m = mem;
448
449 index *= sz;
450
451 if (m) {
452 /* kmap'd address, return the corresponding offset */
453 m += index;
454 } else {
455 /* DMA-API mapping, lookup the right address */
456 dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm;
457 m = dma_tt->cpu_address[index / PAGE_SIZE];
458 m += index % PAGE_SIZE;
459 }
460
461 return m;
462}
463#define nouveau_bo_mem_index(o, i, m) _nouveau_bo_mem_index(o, i, m, sizeof(*m))
464
6ee73861
BS
465u16
466nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
467{
468 bool is_iomem;
469 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
c3a0c771
AC
470
471 mem = nouveau_bo_mem_index(nvbo, index, mem);
472
6ee73861
BS
473 if (is_iomem)
474 return ioread16_native((void __force __iomem *)mem);
475 else
476 return *mem;
477}
478
479void
480nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
481{
482 bool is_iomem;
483 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
c3a0c771
AC
484
485 mem = nouveau_bo_mem_index(nvbo, index, mem);
486
6ee73861
BS
487 if (is_iomem)
488 iowrite16_native(val, (void __force __iomem *)mem);
489 else
490 *mem = val;
491}
492
493u32
494nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
495{
496 bool is_iomem;
497 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
c3a0c771
AC
498
499 mem = nouveau_bo_mem_index(nvbo, index, mem);
500
6ee73861
BS
501 if (is_iomem)
502 return ioread32_native((void __force __iomem *)mem);
503 else
504 return *mem;
505}
506
507void
508nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
509{
510 bool is_iomem;
511 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
c3a0c771
AC
512
513 mem = nouveau_bo_mem_index(nvbo, index, mem);
514
6ee73861
BS
515 if (is_iomem)
516 iowrite32_native(val, (void __force __iomem *)mem);
517 else
518 *mem = val;
519}
520
649bf3ca 521static struct ttm_tt *
ebb945a9
BS
522nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
523 uint32_t page_flags, struct page *dummy_read)
6ee73861 524{
df1b4b91 525#if __OS_HAS_AGP
ebb945a9
BS
526 struct nouveau_drm *drm = nouveau_bdev(bdev);
527 struct drm_device *dev = drm->dev;
6ee73861 528
ebb945a9
BS
529 if (drm->agp.stat == ENABLED) {
530 return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
531 page_flags, dummy_read);
6ee73861 532 }
df1b4b91 533#endif
6ee73861 534
ebb945a9 535 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
6ee73861
BS
536}
537
538static int
539nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
540{
541 /* We'll do this from user space. */
542 return 0;
543}
544
545static int
546nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
547 struct ttm_mem_type_manager *man)
548{
ebb945a9 549 struct nouveau_drm *drm = nouveau_bdev(bdev);
6ee73861
BS
550
551 switch (type) {
552 case TTM_PL_SYSTEM:
553 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
554 man->available_caching = TTM_PL_MASK_CACHING;
555 man->default_caching = TTM_PL_FLAG_CACHED;
556 break;
557 case TTM_PL_VRAM:
e2a4e78c
AC
558 man->flags = TTM_MEMTYPE_FLAG_FIXED |
559 TTM_MEMTYPE_FLAG_MAPPABLE;
560 man->available_caching = TTM_PL_FLAG_UNCACHED |
561 TTM_PL_FLAG_WC;
562 man->default_caching = TTM_PL_FLAG_WC;
563
967e7bde 564 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
e2a4e78c 565 /* Some BARs do not support being ioremapped WC */
967e7bde 566 if (nvkm_bar(&drm->device)->iomap_uncached) {
e2a4e78c
AC
567 man->available_caching = TTM_PL_FLAG_UNCACHED;
568 man->default_caching = TTM_PL_FLAG_UNCACHED;
569 }
570
573a2a37 571 man->func = &nouveau_vram_manager;
f869ef88
BS
572 man->io_reserve_fastpath = false;
573 man->use_io_reserve_lru = true;
574 } else {
573a2a37 575 man->func = &ttm_bo_manager_func;
f869ef88 576 }
6ee73861
BS
577 break;
578 case TTM_PL_TT:
967e7bde 579 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
26c0c9e3 580 man->func = &nouveau_gart_manager;
3863c9bc 581 else
ebb945a9 582 if (drm->agp.stat != ENABLED)
3863c9bc 583 man->func = &nv04_gart_manager;
26c0c9e3
BS
584 else
585 man->func = &ttm_bo_manager_func;
ebb945a9
BS
586
587 if (drm->agp.stat == ENABLED) {
f32f02fd 588 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
a3d487ea
FJ
589 man->available_caching = TTM_PL_FLAG_UNCACHED |
590 TTM_PL_FLAG_WC;
591 man->default_caching = TTM_PL_FLAG_WC;
ebb945a9 592 } else {
6ee73861
BS
593 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
594 TTM_MEMTYPE_FLAG_CMA;
595 man->available_caching = TTM_PL_MASK_CACHING;
596 man->default_caching = TTM_PL_FLAG_CACHED;
6ee73861 597 }
ebb945a9 598
6ee73861
BS
599 break;
600 default:
6ee73861
BS
601 return -EINVAL;
602 }
603 return 0;
604}
605
606static void
607nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
608{
609 struct nouveau_bo *nvbo = nouveau_bo(bo);
610
611 switch (bo->mem.mem_type) {
22fbd538 612 case TTM_PL_VRAM:
78ad0f7b
FJ
613 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
614 TTM_PL_FLAG_SYSTEM);
22fbd538 615 break;
6ee73861 616 default:
78ad0f7b 617 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
6ee73861
BS
618 break;
619 }
22fbd538
FJ
620
621 *pl = nvbo->placement;
6ee73861
BS
622}
623
624
49981046
BS
625static int
626nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
627{
628 int ret = RING_SPACE(chan, 2);
629 if (ret == 0) {
630 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
00fc6f6f 631 OUT_RING (chan, handle & 0x0000ffff);
49981046
BS
632 FIRE_RING (chan);
633 }
634 return ret;
635}
636
c6b7e895
BS
637static int
638nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
639 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
640{
641 struct nouveau_mem *node = old_mem->mm_node;
642 int ret = RING_SPACE(chan, 10);
643 if (ret == 0) {
6d597027 644 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
c6b7e895
BS
645 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
646 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
647 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
648 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
649 OUT_RING (chan, PAGE_SIZE);
650 OUT_RING (chan, PAGE_SIZE);
651 OUT_RING (chan, PAGE_SIZE);
652 OUT_RING (chan, new_mem->num_pages);
6d597027 653 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
c6b7e895
BS
654 }
655 return ret;
656}
657
d1b167e1
BS
658static int
659nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
660{
661 int ret = RING_SPACE(chan, 2);
662 if (ret == 0) {
663 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
664 OUT_RING (chan, handle);
665 }
666 return ret;
667}
668
1a46098e
BS
669static int
670nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
671 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
672{
673 struct nouveau_mem *node = old_mem->mm_node;
674 u64 src_offset = node->vma[0].offset;
675 u64 dst_offset = node->vma[1].offset;
676 u32 page_count = new_mem->num_pages;
677 int ret;
678
679 page_count = new_mem->num_pages;
680 while (page_count) {
681 int line_count = (page_count > 8191) ? 8191 : page_count;
682
683 ret = RING_SPACE(chan, 11);
684 if (ret)
685 return ret;
686
687 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
688 OUT_RING (chan, upper_32_bits(src_offset));
689 OUT_RING (chan, lower_32_bits(src_offset));
690 OUT_RING (chan, upper_32_bits(dst_offset));
691 OUT_RING (chan, lower_32_bits(dst_offset));
692 OUT_RING (chan, PAGE_SIZE);
693 OUT_RING (chan, PAGE_SIZE);
694 OUT_RING (chan, PAGE_SIZE);
695 OUT_RING (chan, line_count);
696 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
697 OUT_RING (chan, 0x00000110);
698
699 page_count -= line_count;
700 src_offset += (PAGE_SIZE * line_count);
701 dst_offset += (PAGE_SIZE * line_count);
702 }
703
704 return 0;
705}
706
183720b8
BS
707static int
708nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
709 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
710{
d2f96666
BS
711 struct nouveau_mem *node = old_mem->mm_node;
712 u64 src_offset = node->vma[0].offset;
713 u64 dst_offset = node->vma[1].offset;
183720b8
BS
714 u32 page_count = new_mem->num_pages;
715 int ret;
716
183720b8
BS
717 page_count = new_mem->num_pages;
718 while (page_count) {
719 int line_count = (page_count > 2047) ? 2047 : page_count;
720
721 ret = RING_SPACE(chan, 12);
722 if (ret)
723 return ret;
724
d1b167e1 725 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
183720b8
BS
726 OUT_RING (chan, upper_32_bits(dst_offset));
727 OUT_RING (chan, lower_32_bits(dst_offset));
d1b167e1 728 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
183720b8
BS
729 OUT_RING (chan, upper_32_bits(src_offset));
730 OUT_RING (chan, lower_32_bits(src_offset));
731 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
732 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
733 OUT_RING (chan, PAGE_SIZE); /* line_length */
734 OUT_RING (chan, line_count);
d1b167e1 735 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
183720b8
BS
736 OUT_RING (chan, 0x00100110);
737
738 page_count -= line_count;
739 src_offset += (PAGE_SIZE * line_count);
740 dst_offset += (PAGE_SIZE * line_count);
741 }
742
743 return 0;
744}
745
fdf53241
BS
746static int
747nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
748 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
749{
750 struct nouveau_mem *node = old_mem->mm_node;
751 u64 src_offset = node->vma[0].offset;
752 u64 dst_offset = node->vma[1].offset;
753 u32 page_count = new_mem->num_pages;
754 int ret;
755
756 page_count = new_mem->num_pages;
757 while (page_count) {
758 int line_count = (page_count > 8191) ? 8191 : page_count;
759
760 ret = RING_SPACE(chan, 11);
761 if (ret)
762 return ret;
763
764 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
765 OUT_RING (chan, upper_32_bits(src_offset));
766 OUT_RING (chan, lower_32_bits(src_offset));
767 OUT_RING (chan, upper_32_bits(dst_offset));
768 OUT_RING (chan, lower_32_bits(dst_offset));
769 OUT_RING (chan, PAGE_SIZE);
770 OUT_RING (chan, PAGE_SIZE);
771 OUT_RING (chan, PAGE_SIZE);
772 OUT_RING (chan, line_count);
773 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
774 OUT_RING (chan, 0x00000110);
775
776 page_count -= line_count;
777 src_offset += (PAGE_SIZE * line_count);
778 dst_offset += (PAGE_SIZE * line_count);
779 }
780
781 return 0;
782}
783
5490e5df
BS
784static int
785nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
786 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
787{
788 struct nouveau_mem *node = old_mem->mm_node;
789 int ret = RING_SPACE(chan, 7);
790 if (ret == 0) {
791 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
792 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
793 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
794 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
795 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
796 OUT_RING (chan, 0x00000000 /* COPY */);
797 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
798 }
799 return ret;
800}
801
4c193d25
BS
802static int
803nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
804 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
805{
806 struct nouveau_mem *node = old_mem->mm_node;
807 int ret = RING_SPACE(chan, 7);
808 if (ret == 0) {
809 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
810 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
811 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
812 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
813 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
814 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
815 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
816 }
817 return ret;
818}
819
d1b167e1
BS
820static int
821nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
822{
ebb945a9 823 int ret = RING_SPACE(chan, 6);
d1b167e1 824 if (ret == 0) {
ebb945a9
BS
825 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
826 OUT_RING (chan, handle);
827 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
f45f55c4
BS
828 OUT_RING (chan, chan->drm->ntfy.handle);
829 OUT_RING (chan, chan->vram.handle);
830 OUT_RING (chan, chan->vram.handle);
d1b167e1
BS
831 }
832
833 return ret;
834}
835
6ee73861 836static int
f1ab0cc9
BS
837nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
838 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
6ee73861 839{
d2f96666 840 struct nouveau_mem *node = old_mem->mm_node;
f1ab0cc9 841 u64 length = (new_mem->num_pages << PAGE_SHIFT);
d2f96666
BS
842 u64 src_offset = node->vma[0].offset;
843 u64 dst_offset = node->vma[1].offset;
ce8f7699
ML
844 int src_tiled = !!node->memtype;
845 int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
6ee73861
BS
846 int ret;
847
f1ab0cc9
BS
848 while (length) {
849 u32 amount, stride, height;
850
ce8f7699
ML
851 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
852 if (ret)
853 return ret;
854
5220b3c1
BS
855 amount = min(length, (u64)(4 * 1024 * 1024));
856 stride = 16 * 4;
f1ab0cc9
BS
857 height = amount / stride;
858
ce8f7699 859 if (src_tiled) {
d1b167e1 860 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
f1ab0cc9 861 OUT_RING (chan, 0);
5220b3c1 862 OUT_RING (chan, 0);
f1ab0cc9
BS
863 OUT_RING (chan, stride);
864 OUT_RING (chan, height);
865 OUT_RING (chan, 1);
866 OUT_RING (chan, 0);
867 OUT_RING (chan, 0);
868 } else {
d1b167e1 869 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
f1ab0cc9
BS
870 OUT_RING (chan, 1);
871 }
ce8f7699 872 if (dst_tiled) {
d1b167e1 873 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
f1ab0cc9 874 OUT_RING (chan, 0);
5220b3c1 875 OUT_RING (chan, 0);
f1ab0cc9
BS
876 OUT_RING (chan, stride);
877 OUT_RING (chan, height);
878 OUT_RING (chan, 1);
879 OUT_RING (chan, 0);
880 OUT_RING (chan, 0);
881 } else {
d1b167e1 882 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
f1ab0cc9
BS
883 OUT_RING (chan, 1);
884 }
885
d1b167e1 886 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
f1ab0cc9
BS
887 OUT_RING (chan, upper_32_bits(src_offset));
888 OUT_RING (chan, upper_32_bits(dst_offset));
d1b167e1 889 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
f1ab0cc9
BS
890 OUT_RING (chan, lower_32_bits(src_offset));
891 OUT_RING (chan, lower_32_bits(dst_offset));
892 OUT_RING (chan, stride);
893 OUT_RING (chan, stride);
894 OUT_RING (chan, stride);
895 OUT_RING (chan, height);
896 OUT_RING (chan, 0x00000101);
897 OUT_RING (chan, 0x00000000);
d1b167e1 898 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9
BS
899 OUT_RING (chan, 0);
900
901 length -= amount;
902 src_offset += amount;
903 dst_offset += amount;
6ee73861
BS
904 }
905
f1ab0cc9
BS
906 return 0;
907}
908
d1b167e1
BS
909static int
910nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
911{
ebb945a9 912 int ret = RING_SPACE(chan, 4);
d1b167e1 913 if (ret == 0) {
ebb945a9
BS
914 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
915 OUT_RING (chan, handle);
916 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
f45f55c4 917 OUT_RING (chan, chan->drm->ntfy.handle);
d1b167e1
BS
918 }
919
920 return ret;
921}
922
a6704788
BS
923static inline uint32_t
924nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
925 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
926{
927 if (mem->mem_type == TTM_PL_TT)
ebb945a9 928 return NvDmaTT;
f45f55c4 929 return chan->vram.handle;
a6704788
BS
930}
931
f1ab0cc9
BS
932static int
933nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
934 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
935{
d961db75
BS
936 u32 src_offset = old_mem->start << PAGE_SHIFT;
937 u32 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
938 u32 page_count = new_mem->num_pages;
939 int ret;
940
941 ret = RING_SPACE(chan, 3);
942 if (ret)
943 return ret;
944
d1b167e1 945 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
f1ab0cc9
BS
946 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
947 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
948
6ee73861
BS
949 page_count = new_mem->num_pages;
950 while (page_count) {
951 int line_count = (page_count > 2047) ? 2047 : page_count;
952
6ee73861
BS
953 ret = RING_SPACE(chan, 11);
954 if (ret)
955 return ret;
f1ab0cc9 956
d1b167e1 957 BEGIN_NV04(chan, NvSubCopy,
6ee73861 958 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
f1ab0cc9
BS
959 OUT_RING (chan, src_offset);
960 OUT_RING (chan, dst_offset);
961 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
962 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
963 OUT_RING (chan, PAGE_SIZE); /* line_length */
964 OUT_RING (chan, line_count);
965 OUT_RING (chan, 0x00000101);
966 OUT_RING (chan, 0x00000000);
d1b167e1 967 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9 968 OUT_RING (chan, 0);
6ee73861
BS
969
970 page_count -= line_count;
971 src_offset += (PAGE_SIZE * line_count);
972 dst_offset += (PAGE_SIZE * line_count);
973 }
974
f1ab0cc9
BS
975 return 0;
976}
977
d2f96666 978static int
3c57d85d
BS
979nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
980 struct ttm_mem_reg *mem)
d2f96666 981{
3c57d85d
BS
982 struct nouveau_mem *old_node = bo->mem.mm_node;
983 struct nouveau_mem *new_node = mem->mm_node;
984 u64 size = (u64)mem->num_pages << PAGE_SHIFT;
d2f96666
BS
985 int ret;
986
3ee6f5b5 987 ret = nouveau_vm_get(drm->client.vm, size, old_node->page_shift,
3c57d85d 988 NV_MEM_ACCESS_RW, &old_node->vma[0]);
d2f96666
BS
989 if (ret)
990 return ret;
991
3ee6f5b5 992 ret = nouveau_vm_get(drm->client.vm, size, new_node->page_shift,
3c57d85d
BS
993 NV_MEM_ACCESS_RW, &old_node->vma[1]);
994 if (ret) {
995 nouveau_vm_put(&old_node->vma[0]);
996 return ret;
997 }
998
999 nouveau_vm_map(&old_node->vma[0], old_node);
1000 nouveau_vm_map(&old_node->vma[1], new_node);
d2f96666
BS
1001 return 0;
1002}
1003
f1ab0cc9
BS
1004static int
1005nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
97a875cb 1006 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
f1ab0cc9 1007{
ebb945a9 1008 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1934a2ad 1009 struct nouveau_channel *chan = drm->ttm.chan;
0ad72863 1010 struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
35b8141b 1011 struct nouveau_fence *fence;
f1ab0cc9
BS
1012 int ret;
1013
d2f96666
BS
1014 /* create temporary vmas for the transfer and attach them to the
1015 * old nouveau_mem node, these will get cleaned up after ttm has
1016 * destroyed the ttm_mem_reg
3425df48 1017 */
967e7bde 1018 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
3c57d85d 1019 ret = nouveau_bo_move_prep(drm, bo, new_mem);
d2f96666 1020 if (ret)
3c57d85d 1021 return ret;
3425df48
BS
1022 }
1023
0ad72863 1024 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
e3be4c23 1025 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
6a6b73f2 1026 if (ret == 0) {
35b8141b
BS
1027 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
1028 if (ret == 0) {
1029 ret = nouveau_fence_new(chan, false, &fence);
1030 if (ret == 0) {
f2c24b83
ML
1031 ret = ttm_bo_move_accel_cleanup(bo,
1032 &fence->base,
35b8141b
BS
1033 evict,
1034 no_wait_gpu,
1035 new_mem);
1036 nouveau_fence_unref(&fence);
1037 }
1038 }
6a6b73f2 1039 }
0ad72863 1040 mutex_unlock(&cli->mutex);
6a6b73f2 1041 return ret;
6ee73861
BS
1042}
1043
d1b167e1 1044void
49981046 1045nouveau_bo_move_init(struct nouveau_drm *drm)
d1b167e1 1046{
d1b167e1
BS
1047 static const struct {
1048 const char *name;
1a46098e 1049 int engine;
d1b167e1
BS
1050 u32 oclass;
1051 int (*exec)(struct nouveau_channel *,
1052 struct ttm_buffer_object *,
1053 struct ttm_mem_reg *, struct ttm_mem_reg *);
1054 int (*init)(struct nouveau_channel *, u32 handle);
1055 } _methods[] = {
00fc6f6f 1056 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
49981046 1057 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1a46098e
BS
1058 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1059 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1060 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1061 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1062 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1063 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1064 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
5490e5df 1065 {},
1a46098e 1066 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
d1b167e1
BS
1067 }, *mthd = _methods;
1068 const char *name = "CPU";
1069 int ret;
1070
1071 do {
49981046 1072 struct nouveau_channel *chan;
ebb945a9 1073
00fc6f6f 1074 if (mthd->engine)
49981046
BS
1075 chan = drm->cechan;
1076 else
1077 chan = drm->channel;
1078 if (chan == NULL)
1079 continue;
1080
0ad72863
BS
1081 ret = nvif_object_init(chan->object, NULL,
1082 mthd->oclass | (mthd->engine << 16),
1083 mthd->oclass, NULL, 0,
1084 &drm->ttm.copy);
d1b167e1 1085 if (ret == 0) {
0ad72863 1086 ret = mthd->init(chan, drm->ttm.copy.handle);
ebb945a9 1087 if (ret) {
0ad72863 1088 nvif_object_fini(&drm->ttm.copy);
ebb945a9 1089 continue;
d1b167e1 1090 }
ebb945a9
BS
1091
1092 drm->ttm.move = mthd->exec;
1bb3f6a2 1093 drm->ttm.chan = chan;
ebb945a9
BS
1094 name = mthd->name;
1095 break;
d1b167e1
BS
1096 }
1097 } while ((++mthd)->exec);
1098
ebb945a9 1099 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
d1b167e1
BS
1100}
1101
6ee73861
BS
1102static int
1103nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1104 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
6ee73861 1105{
f1217ed0
CK
1106 struct ttm_place placement_memtype = {
1107 .fpfn = 0,
1108 .lpfn = 0,
1109 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1110 };
6ee73861
BS
1111 struct ttm_placement placement;
1112 struct ttm_mem_reg tmp_mem;
1113 int ret;
1114
6ee73861 1115 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 1116 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
1117
1118 tmp_mem = *new_mem;
1119 tmp_mem.mm_node = NULL;
97a875cb 1120 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
6ee73861
BS
1121 if (ret)
1122 return ret;
1123
1124 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1125 if (ret)
1126 goto out;
1127
97a875cb 1128 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
6ee73861
BS
1129 if (ret)
1130 goto out;
1131
97a875cb 1132 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
6ee73861 1133out:
42311ff9 1134 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
1135 return ret;
1136}
1137
1138static int
1139nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1140 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
6ee73861 1141{
f1217ed0
CK
1142 struct ttm_place placement_memtype = {
1143 .fpfn = 0,
1144 .lpfn = 0,
1145 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1146 };
6ee73861
BS
1147 struct ttm_placement placement;
1148 struct ttm_mem_reg tmp_mem;
1149 int ret;
1150
6ee73861 1151 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 1152 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
1153
1154 tmp_mem = *new_mem;
1155 tmp_mem.mm_node = NULL;
97a875cb 1156 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
6ee73861
BS
1157 if (ret)
1158 return ret;
1159
97a875cb 1160 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
6ee73861
BS
1161 if (ret)
1162 goto out;
1163
97a875cb 1164 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
6ee73861
BS
1165 if (ret)
1166 goto out;
1167
1168out:
42311ff9 1169 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
1170 return ret;
1171}
1172
a4154bbf
BS
1173static void
1174nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1175{
a4154bbf 1176 struct nouveau_bo *nvbo = nouveau_bo(bo);
fd2871af
BS
1177 struct nouveau_vma *vma;
1178
9f1feed2
BS
1179 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1180 if (bo->destroy != nouveau_bo_del_ttm)
1181 return;
1182
fd2871af 1183 list_for_each_entry(vma, &nvbo->vma_list, head) {
2e2cfbe6
BS
1184 if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
1185 (new_mem->mem_type == TTM_PL_VRAM ||
1186 nvbo->page_shift != vma->vm->vmm->lpg_shift)) {
fd2871af 1187 nouveau_vm_map(vma, new_mem->mm_node);
fd2871af
BS
1188 } else {
1189 nouveau_vm_unmap(vma);
1190 }
a4154bbf
BS
1191 }
1192}
1193
6ee73861 1194static int
a0af9add 1195nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
ebb945a9 1196 struct nouveau_drm_tile **new_tile)
6ee73861 1197{
ebb945a9
BS
1198 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1199 struct drm_device *dev = drm->dev;
a0af9add 1200 struct nouveau_bo *nvbo = nouveau_bo(bo);
a4154bbf 1201 u64 offset = new_mem->start << PAGE_SHIFT;
6ee73861 1202
a4154bbf
BS
1203 *new_tile = NULL;
1204 if (new_mem->mem_type != TTM_PL_VRAM)
a0af9add 1205 return 0;
a0af9add 1206
967e7bde 1207 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
bc9e7b9a 1208 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
a5cf68b0
FJ
1209 nvbo->tile_mode,
1210 nvbo->tile_flags);
6ee73861
BS
1211 }
1212
a0af9add
FJ
1213 return 0;
1214}
1215
1216static void
1217nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
ebb945a9
BS
1218 struct nouveau_drm_tile *new_tile,
1219 struct nouveau_drm_tile **old_tile)
a0af9add 1220{
ebb945a9
BS
1221 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1222 struct drm_device *dev = drm->dev;
f2c24b83 1223 struct fence *fence = reservation_object_get_excl(bo->resv);
a0af9add 1224
f2c24b83 1225 nv10_bo_put_tile_region(dev, *old_tile, fence);
a4154bbf 1226 *old_tile = new_tile;
a0af9add
FJ
1227}
1228
1229static int
1230nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1231 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
a0af9add 1232{
ebb945a9 1233 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
a0af9add
FJ
1234 struct nouveau_bo *nvbo = nouveau_bo(bo);
1235 struct ttm_mem_reg *old_mem = &bo->mem;
ebb945a9 1236 struct nouveau_drm_tile *new_tile = NULL;
a0af9add
FJ
1237 int ret = 0;
1238
5be5a15a
AC
1239 if (nvbo->pin_refcnt)
1240 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1241
967e7bde 1242 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
a4154bbf
BS
1243 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1244 if (ret)
1245 return ret;
1246 }
a0af9add 1247
a0af9add 1248 /* Fake bo copy. */
6ee73861
BS
1249 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1250 BUG_ON(bo->mem.mm_node != NULL);
1251 bo->mem = *new_mem;
1252 new_mem->mm_node = NULL;
a0af9add 1253 goto out;
6ee73861
BS
1254 }
1255
a0af9add 1256 /* Hardware assisted copy. */
cef9e99e
BS
1257 if (drm->ttm.move) {
1258 if (new_mem->mem_type == TTM_PL_SYSTEM)
1259 ret = nouveau_bo_move_flipd(bo, evict, intr,
1260 no_wait_gpu, new_mem);
1261 else if (old_mem->mem_type == TTM_PL_SYSTEM)
1262 ret = nouveau_bo_move_flips(bo, evict, intr,
1263 no_wait_gpu, new_mem);
1264 else
1265 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1266 no_wait_gpu, new_mem);
1267 if (!ret)
1268 goto out;
1269 }
a0af9add
FJ
1270
1271 /* Fallback to software copy. */
cef9e99e 1272 ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
cef9e99e
BS
1273 if (ret == 0)
1274 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
a0af9add
FJ
1275
1276out:
967e7bde 1277 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
a4154bbf
BS
1278 if (ret)
1279 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1280 else
1281 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1282 }
a0af9add
FJ
1283
1284 return ret;
6ee73861
BS
1285}
1286
1287static int
1288nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1289{
acb46527
DH
1290 struct nouveau_bo *nvbo = nouveau_bo(bo);
1291
55fb74ad 1292 return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
6ee73861
BS
1293}
1294
f32f02fd
JG
1295static int
1296nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1297{
1298 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
ebb945a9 1299 struct nouveau_drm *drm = nouveau_bdev(bdev);
a5540906 1300 struct nouveau_mem *node = mem->mm_node;
f869ef88 1301 int ret;
f32f02fd
JG
1302
1303 mem->bus.addr = NULL;
1304 mem->bus.offset = 0;
1305 mem->bus.size = mem->num_pages << PAGE_SHIFT;
1306 mem->bus.base = 0;
1307 mem->bus.is_iomem = false;
1308 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1309 return -EINVAL;
1310 switch (mem->mem_type) {
1311 case TTM_PL_SYSTEM:
1312 /* System memory */
1313 return 0;
1314 case TTM_PL_TT:
1315#if __OS_HAS_AGP
ebb945a9 1316 if (drm->agp.stat == ENABLED) {
d961db75 1317 mem->bus.offset = mem->start << PAGE_SHIFT;
ebb945a9 1318 mem->bus.base = drm->agp.base;
5c13cac1 1319 mem->bus.is_iomem = !drm->dev->agp->cant_use_aperture;
f32f02fd
JG
1320 }
1321#endif
967e7bde 1322 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
a5540906
ML
1323 /* untiled */
1324 break;
1325 /* fallthrough, tiled memory */
f32f02fd 1326 case TTM_PL_VRAM:
3863c9bc 1327 mem->bus.offset = mem->start << PAGE_SHIFT;
967e7bde 1328 mem->bus.base = nv_device_resource_start(nvkm_device(&drm->device), 1);
3863c9bc 1329 mem->bus.is_iomem = true;
967e7bde
BS
1330 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1331 struct nouveau_bar *bar = nvkm_bar(&drm->device);
8984e046 1332
ebb945a9 1333 ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
3863c9bc
BS
1334 &node->bar_vma);
1335 if (ret)
1336 return ret;
f869ef88 1337
3863c9bc 1338 mem->bus.offset = node->bar_vma.offset;
f869ef88 1339 }
f32f02fd
JG
1340 break;
1341 default:
1342 return -EINVAL;
1343 }
1344 return 0;
1345}
1346
1347static void
1348nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1349{
ebb945a9 1350 struct nouveau_drm *drm = nouveau_bdev(bdev);
967e7bde 1351 struct nouveau_bar *bar = nvkm_bar(&drm->device);
d5f42394 1352 struct nouveau_mem *node = mem->mm_node;
f869ef88 1353
d5f42394 1354 if (!node->bar_vma.node)
f869ef88
BS
1355 return;
1356
ebb945a9 1357 bar->unmap(bar, &node->bar_vma);
f32f02fd
JG
1358}
1359
1360static int
1361nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1362{
ebb945a9 1363 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
e1429b4c 1364 struct nouveau_bo *nvbo = nouveau_bo(bo);
967e7bde
BS
1365 struct nvif_device *device = &drm->device;
1366 u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT;
f1217ed0 1367 int i, ret;
e1429b4c
BS
1368
1369 /* as long as the bo isn't in vram, and isn't tiled, we've got
1370 * nothing to do here.
1371 */
1372 if (bo->mem.mem_type != TTM_PL_VRAM) {
967e7bde 1373 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA ||
f13b3263 1374 !nouveau_bo_tile_layout(nvbo))
e1429b4c 1375 return 0;
a5540906
ML
1376
1377 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1378 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1379
1380 ret = nouveau_bo_validate(nvbo, false, false);
1381 if (ret)
1382 return ret;
1383 }
1384 return 0;
e1429b4c
BS
1385 }
1386
1387 /* make sure bo is in mappable vram */
967e7bde 1388 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
a5540906 1389 bo->mem.start + bo->mem.num_pages < mappable)
e1429b4c
BS
1390 return 0;
1391
f1217ed0
CK
1392 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1393 nvbo->placements[i].fpfn = 0;
1394 nvbo->placements[i].lpfn = mappable;
1395 }
1396
1397 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1398 nvbo->busy_placements[i].fpfn = 0;
1399 nvbo->busy_placements[i].lpfn = mappable;
1400 }
e1429b4c 1401
c284815d 1402 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
97a875cb 1403 return nouveau_bo_validate(nvbo, false, false);
f32f02fd
JG
1404}
1405
3230cfc3
KRW
1406static int
1407nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1408{
8e7e7052 1409 struct ttm_dma_tt *ttm_dma = (void *)ttm;
ebb945a9 1410 struct nouveau_drm *drm;
420b9469 1411 struct nouveau_device *device;
3230cfc3 1412 struct drm_device *dev;
fd1496a0 1413 struct device *pdev;
3230cfc3
KRW
1414 unsigned i;
1415 int r;
22b33e8e 1416 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
3230cfc3
KRW
1417
1418 if (ttm->state != tt_unpopulated)
1419 return 0;
1420
22b33e8e
DA
1421 if (slave && ttm->sg) {
1422 /* make userspace faulting work */
1423 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1424 ttm_dma->dma_address, ttm->num_pages);
1425 ttm->state = tt_unbound;
1426 return 0;
1427 }
1428
ebb945a9 1429 drm = nouveau_bdev(ttm->bdev);
967e7bde 1430 device = nvkm_device(&drm->device);
ebb945a9 1431 dev = drm->dev;
fd1496a0 1432 pdev = nv_device_base(device);
3230cfc3 1433
c3a0c771
AC
1434 /*
1435 * Objects matching this condition have been marked as force_coherent,
1436 * so use the DMA API for them.
1437 */
1438 if (!nv_device_is_cpu_coherent(device) &&
1439 ttm->caching_state == tt_uncached)
1440 return ttm_dma_populate(ttm_dma, dev->dev);
1441
dea7e0ac 1442#if __OS_HAS_AGP
ebb945a9 1443 if (drm->agp.stat == ENABLED) {
dea7e0ac
JG
1444 return ttm_agp_tt_populate(ttm);
1445 }
1446#endif
1447
3230cfc3
KRW
1448#ifdef CONFIG_SWIOTLB
1449 if (swiotlb_nr_tbl()) {
8e7e7052 1450 return ttm_dma_populate((void *)ttm, dev->dev);
3230cfc3
KRW
1451 }
1452#endif
1453
1454 r = ttm_pool_populate(ttm);
1455 if (r) {
1456 return r;
1457 }
1458
1459 for (i = 0; i < ttm->num_pages; i++) {
fd1496a0
AC
1460 dma_addr_t addr;
1461
1462 addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
1463 DMA_BIDIRECTIONAL);
1464
1465 if (dma_mapping_error(pdev, addr)) {
3230cfc3 1466 while (--i) {
fd1496a0
AC
1467 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1468 PAGE_SIZE, DMA_BIDIRECTIONAL);
8e7e7052 1469 ttm_dma->dma_address[i] = 0;
3230cfc3
KRW
1470 }
1471 ttm_pool_unpopulate(ttm);
1472 return -EFAULT;
1473 }
fd1496a0
AC
1474
1475 ttm_dma->dma_address[i] = addr;
3230cfc3
KRW
1476 }
1477 return 0;
1478}
1479
1480static void
1481nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1482{
8e7e7052 1483 struct ttm_dma_tt *ttm_dma = (void *)ttm;
ebb945a9 1484 struct nouveau_drm *drm;
420b9469 1485 struct nouveau_device *device;
3230cfc3 1486 struct drm_device *dev;
fd1496a0 1487 struct device *pdev;
3230cfc3 1488 unsigned i;
22b33e8e
DA
1489 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1490
1491 if (slave)
1492 return;
3230cfc3 1493
ebb945a9 1494 drm = nouveau_bdev(ttm->bdev);
967e7bde 1495 device = nvkm_device(&drm->device);
ebb945a9 1496 dev = drm->dev;
fd1496a0 1497 pdev = nv_device_base(device);
3230cfc3 1498
c3a0c771
AC
1499 /*
1500 * Objects matching this condition have been marked as force_coherent,
1501 * so use the DMA API for them.
1502 */
1503 if (!nv_device_is_cpu_coherent(device) &&
1504 ttm->caching_state == tt_uncached)
1505 ttm_dma_unpopulate(ttm_dma, dev->dev);
1506
dea7e0ac 1507#if __OS_HAS_AGP
ebb945a9 1508 if (drm->agp.stat == ENABLED) {
dea7e0ac
JG
1509 ttm_agp_tt_unpopulate(ttm);
1510 return;
1511 }
1512#endif
1513
3230cfc3
KRW
1514#ifdef CONFIG_SWIOTLB
1515 if (swiotlb_nr_tbl()) {
8e7e7052 1516 ttm_dma_unpopulate((void *)ttm, dev->dev);
3230cfc3
KRW
1517 return;
1518 }
1519#endif
1520
1521 for (i = 0; i < ttm->num_pages; i++) {
8e7e7052 1522 if (ttm_dma->dma_address[i]) {
fd1496a0
AC
1523 dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
1524 DMA_BIDIRECTIONAL);
3230cfc3
KRW
1525 }
1526 }
1527
1528 ttm_pool_unpopulate(ttm);
1529}
1530
875ac34a 1531void
809e9447 1532nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
875ac34a 1533{
29ba89b2 1534 struct reservation_object *resv = nvbo->bo.resv;
bdaf7ddf 1535
809e9447
ML
1536 if (exclusive)
1537 reservation_object_add_excl_fence(resv, &fence->base);
1538 else if (fence)
1539 reservation_object_add_shared_fence(resv, &fence->base);
875ac34a
BS
1540}
1541
6ee73861 1542struct ttm_bo_driver nouveau_bo_driver = {
649bf3ca 1543 .ttm_tt_create = &nouveau_ttm_tt_create,
3230cfc3
KRW
1544 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1545 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
6ee73861
BS
1546 .invalidate_caches = nouveau_bo_invalidate_caches,
1547 .init_mem_type = nouveau_bo_init_mem_type,
1548 .evict_flags = nouveau_bo_evict_flags,
a4154bbf 1549 .move_notify = nouveau_bo_move_ntfy,
6ee73861
BS
1550 .move = nouveau_bo_move,
1551 .verify_access = nouveau_bo_verify_access,
f32f02fd
JG
1552 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1553 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1554 .io_mem_free = &nouveau_ttm_io_mem_free,
6ee73861
BS
1555};
1556
fd2871af
BS
1557struct nouveau_vma *
1558nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1559{
1560 struct nouveau_vma *vma;
1561 list_for_each_entry(vma, &nvbo->vma_list, head) {
1562 if (vma->vm == vm)
1563 return vma;
1564 }
1565
1566 return NULL;
1567}
1568
1569int
1570nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1571 struct nouveau_vma *vma)
1572{
1573 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
fd2871af
BS
1574 int ret;
1575
1576 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1577 NV_MEM_ACCESS_RW, vma);
1578 if (ret)
1579 return ret;
1580
2e2cfbe6
BS
1581 if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
1582 (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
1583 nvbo->page_shift != vma->vm->vmm->lpg_shift))
fd2871af 1584 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
fd2871af
BS
1585
1586 list_add_tail(&vma->head, &nvbo->vma_list);
2fd3db6f 1587 vma->refcount = 1;
fd2871af
BS
1588 return 0;
1589}
1590
1591void
1592nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1593{
1594 if (vma->node) {
c4c7044f 1595 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
fd2871af 1596 nouveau_vm_unmap(vma);
fd2871af
BS
1597 nouveau_vm_put(vma);
1598 list_del(&vma->head);
1599 }
1600}
This page took 0.360483 seconds and 5 git commands to generate.