drm/nouveau/devinit: convert to new-style nvkm_subdev
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
fdb751ef 30#include <linux/dma-mapping.h>
3e2b756b 31#include <linux/swiotlb.h>
6ee73861 32
ebb945a9 33#include "nouveau_drm.h"
6ee73861 34#include "nouveau_dma.h"
d375e7d5 35#include "nouveau_fence.h"
6ee73861 36
ebb945a9
BS
37#include "nouveau_bo.h"
38#include "nouveau_ttm.h"
39#include "nouveau_gem.h"
a510604d 40
bc9e7b9a
BS
41/*
42 * NV10-NV40 tiling helpers
43 */
44
45static void
ebb945a9
BS
46nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
47 u32 addr, u32 size, u32 pitch, u32 flags)
bc9e7b9a 48{
77145f1c 49 struct nouveau_drm *drm = nouveau_drm(dev);
ebb945a9 50 int i = reg - drm->tile.reg;
b1e4553c
BS
51 struct nvkm_fb *fb = nvxx_fb(&drm->device);
52 struct nvkm_fb_tile *tile = &fb->tile.region[i];
be83cd4e 53 struct nvkm_engine *engine;
bc9e7b9a 54
ebb945a9 55 nouveau_fence_unref(&reg->fence);
bc9e7b9a
BS
56
57 if (tile->pitch)
b1e4553c 58 fb->tile.fini(fb, i, tile);
bc9e7b9a
BS
59
60 if (pitch)
b1e4553c 61 fb->tile.init(fb, i, addr, size, pitch, flags, tile);
bc9e7b9a 62
b1e4553c 63 fb->tile.prog(fb, i, tile);
bc9e7b9a 64
b1e4553c 65 if ((engine = nvkm_engine(fb, NVDEV_ENGINE_GR)))
ebb945a9 66 engine->tile_prog(engine, i);
b1e4553c 67 if ((engine = nvkm_engine(fb, NVDEV_ENGINE_MPEG)))
ebb945a9 68 engine->tile_prog(engine, i);
bc9e7b9a
BS
69}
70
ebb945a9 71static struct nouveau_drm_tile *
bc9e7b9a
BS
72nv10_bo_get_tile_region(struct drm_device *dev, int i)
73{
77145f1c 74 struct nouveau_drm *drm = nouveau_drm(dev);
ebb945a9 75 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
bc9e7b9a 76
ebb945a9 77 spin_lock(&drm->tile.lock);
bc9e7b9a
BS
78
79 if (!tile->used &&
80 (!tile->fence || nouveau_fence_done(tile->fence)))
81 tile->used = true;
82 else
83 tile = NULL;
84
ebb945a9 85 spin_unlock(&drm->tile.lock);
bc9e7b9a
BS
86 return tile;
87}
88
89static void
ebb945a9 90nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
f2c24b83 91 struct fence *fence)
bc9e7b9a 92{
77145f1c 93 struct nouveau_drm *drm = nouveau_drm(dev);
bc9e7b9a
BS
94
95 if (tile) {
ebb945a9 96 spin_lock(&drm->tile.lock);
809e9447 97 tile->fence = (struct nouveau_fence *)fence_get(fence);
bc9e7b9a 98 tile->used = false;
ebb945a9 99 spin_unlock(&drm->tile.lock);
bc9e7b9a
BS
100 }
101}
102
ebb945a9
BS
103static struct nouveau_drm_tile *
104nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
105 u32 size, u32 pitch, u32 flags)
bc9e7b9a 106{
77145f1c 107 struct nouveau_drm *drm = nouveau_drm(dev);
b1e4553c 108 struct nvkm_fb *fb = nvxx_fb(&drm->device);
ebb945a9 109 struct nouveau_drm_tile *tile, *found = NULL;
bc9e7b9a
BS
110 int i;
111
b1e4553c 112 for (i = 0; i < fb->tile.regions; i++) {
bc9e7b9a
BS
113 tile = nv10_bo_get_tile_region(dev, i);
114
115 if (pitch && !found) {
116 found = tile;
117 continue;
118
b1e4553c 119 } else if (tile && fb->tile.region[i].pitch) {
bc9e7b9a
BS
120 /* Kill an unused tile region. */
121 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
122 }
123
124 nv10_bo_put_tile_region(dev, tile, NULL);
125 }
126
127 if (found)
128 nv10_bo_update_tile_region(dev, found, addr, size,
129 pitch, flags);
130 return found;
131}
132
6ee73861
BS
133static void
134nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
135{
ebb945a9
BS
136 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
137 struct drm_device *dev = drm->dev;
6ee73861
BS
138 struct nouveau_bo *nvbo = nouveau_bo(bo);
139
55fb74ad 140 if (unlikely(nvbo->gem.filp))
6ee73861 141 DRM_ERROR("bo %p still attached to GEM object\n", bo);
4f385599 142 WARN_ON(nvbo->pin_refcnt > 0);
bc9e7b9a 143 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
6ee73861
BS
144 kfree(nvbo);
145}
146
a0af9add 147static void
db5c8e29 148nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
f91bac5b 149 int *align, int *size)
a0af9add 150{
ebb945a9 151 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
967e7bde 152 struct nvif_device *device = &drm->device;
a0af9add 153
967e7bde 154 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
bfd83aca 155 if (nvbo->tile_mode) {
967e7bde 156 if (device->info.chipset >= 0x40) {
a0af9add 157 *align = 65536;
bfd83aca 158 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 159
967e7bde 160 } else if (device->info.chipset >= 0x30) {
a0af9add 161 *align = 32768;
bfd83aca 162 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 163
967e7bde 164 } else if (device->info.chipset >= 0x20) {
a0af9add 165 *align = 16384;
bfd83aca 166 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 167
967e7bde 168 } else if (device->info.chipset >= 0x10) {
a0af9add 169 *align = 16384;
bfd83aca 170 *size = roundup(*size, 32 * nvbo->tile_mode);
a0af9add
FJ
171 }
172 }
bfd83aca 173 } else {
f91bac5b
BS
174 *size = roundup(*size, (1 << nvbo->page_shift));
175 *align = max((1 << nvbo->page_shift), *align);
a0af9add
FJ
176 }
177
1c7059e4 178 *size = roundup(*size, PAGE_SIZE);
a0af9add
FJ
179}
180
6ee73861 181int
7375c95b
BS
182nouveau_bo_new(struct drm_device *dev, int size, int align,
183 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
bb6178b0 184 struct sg_table *sg, struct reservation_object *robj,
7375c95b 185 struct nouveau_bo **pnvbo)
6ee73861 186{
77145f1c 187 struct nouveau_drm *drm = nouveau_drm(dev);
6ee73861 188 struct nouveau_bo *nvbo;
57de4ba9 189 size_t acc_size;
f91bac5b 190 int ret;
22b33e8e 191 int type = ttm_bo_type_device;
35095f75
ML
192 int lpg_shift = 12;
193 int max_size;
194
3ee6f5b5 195 if (drm->client.vm)
5ce3bf3c 196 lpg_shift = drm->client.vm->mmu->lpg_shift;
35095f75 197 max_size = INT_MAX & ~((1 << lpg_shift) - 1);
0108bc80
ML
198
199 if (size <= 0 || size > max_size) {
fa2bade9 200 NV_WARN(drm, "skipped size %x\n", (u32)size);
0108bc80
ML
201 return -EINVAL;
202 }
22b33e8e
DA
203
204 if (sg)
205 type = ttm_bo_type_sg;
6ee73861
BS
206
207 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
208 if (!nvbo)
209 return -ENOMEM;
210 INIT_LIST_HEAD(&nvbo->head);
211 INIT_LIST_HEAD(&nvbo->entry);
fd2871af 212 INIT_LIST_HEAD(&nvbo->vma_list);
6ee73861
BS
213 nvbo->tile_mode = tile_mode;
214 nvbo->tile_flags = tile_flags;
ebb945a9 215 nvbo->bo.bdev = &drm->ttm.bdev;
6ee73861 216
989aa5b7 217 if (!nv_device_is_cpu_coherent(nvxx_device(&drm->device)))
c3a0c771
AC
218 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
219
f91bac5b 220 nvbo->page_shift = 12;
3ee6f5b5 221 if (drm->client.vm) {
f91bac5b 222 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
5ce3bf3c 223 nvbo->page_shift = drm->client.vm->mmu->lpg_shift;
f91bac5b
BS
224 }
225
226 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
fd2871af
BS
227 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
228 nouveau_bo_placement_set(nvbo, flags, 0);
6ee73861 229
ebb945a9 230 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
57de4ba9
JG
231 sizeof(struct nouveau_bo));
232
ebb945a9 233 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
22b33e8e 234 type, &nvbo->placement,
0b91c4a1 235 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
bb6178b0 236 robj, nouveau_bo_del_ttm);
6ee73861
BS
237 if (ret) {
238 /* ttm will call nouveau_bo_del_ttm if it fails.. */
239 return ret;
240 }
241
6ee73861
BS
242 *pnvbo = nvbo;
243 return 0;
244}
245
78ad0f7b 246static void
f1217ed0 247set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
78ad0f7b
FJ
248{
249 *n = 0;
250
251 if (type & TTM_PL_FLAG_VRAM)
f1217ed0 252 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
78ad0f7b 253 if (type & TTM_PL_FLAG_TT)
f1217ed0 254 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
78ad0f7b 255 if (type & TTM_PL_FLAG_SYSTEM)
f1217ed0 256 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
78ad0f7b
FJ
257}
258
699ddfd9
FJ
259static void
260set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
261{
ebb945a9 262 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
f392ec4b 263 u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT;
f1217ed0 264 unsigned i, fpfn, lpfn;
699ddfd9 265
967e7bde 266 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
812f219a 267 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
4beb116a 268 nvbo->bo.mem.num_pages < vram_pages / 4) {
699ddfd9
FJ
269 /*
270 * Make sure that the color and depth buffers are handled
271 * by independent memory controller units. Up to a 9x
272 * speed up when alpha-blending and depth-test are enabled
273 * at the same time.
274 */
699ddfd9 275 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
f1217ed0
CK
276 fpfn = vram_pages / 2;
277 lpfn = ~0;
699ddfd9 278 } else {
f1217ed0
CK
279 fpfn = 0;
280 lpfn = vram_pages / 2;
281 }
282 for (i = 0; i < nvbo->placement.num_placement; ++i) {
283 nvbo->placements[i].fpfn = fpfn;
284 nvbo->placements[i].lpfn = lpfn;
285 }
286 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
287 nvbo->busy_placements[i].fpfn = fpfn;
288 nvbo->busy_placements[i].lpfn = lpfn;
699ddfd9
FJ
289 }
290 }
291}
292
6ee73861 293void
78ad0f7b 294nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
6ee73861 295{
78ad0f7b 296 struct ttm_placement *pl = &nvbo->placement;
c3a0c771
AC
297 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
298 TTM_PL_MASK_CACHING) |
299 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
78ad0f7b
FJ
300
301 pl->placement = nvbo->placements;
302 set_placement_list(nvbo->placements, &pl->num_placement,
303 type, flags);
304
305 pl->busy_placement = nvbo->busy_placements;
306 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
307 type | busy, flags);
699ddfd9
FJ
308
309 set_placement_range(nvbo, type);
6ee73861
BS
310}
311
312int
ad76b3f7 313nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
6ee73861 314{
ebb945a9 315 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
6ee73861 316 struct ttm_buffer_object *bo = &nvbo->bo;
ad76b3f7 317 bool force = false, evict = false;
78ad0f7b 318 int ret;
6ee73861 319
ee3939e0 320 ret = ttm_bo_reserve(bo, false, false, false, NULL);
0ae6d7bc 321 if (ret)
50ab2e52 322 return ret;
0ae6d7bc 323
ad76b3f7
BS
324 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
325 memtype == TTM_PL_FLAG_VRAM && contig) {
326 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
327 if (bo->mem.mem_type == TTM_PL_VRAM) {
be83cd4e 328 struct nvkm_mem *mem = bo->mem.mm_node;
ad76b3f7
BS
329 if (!list_is_singular(&mem->regions))
330 evict = true;
331 }
332 nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
333 force = true;
334 }
6ee73861
BS
335 }
336
ad76b3f7
BS
337 if (nvbo->pin_refcnt) {
338 if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
339 NV_ERROR(drm, "bo %p pinned elsewhere: "
340 "0x%08x vs 0x%08x\n", bo,
341 1 << bo->mem.mem_type, memtype);
342 ret = -EBUSY;
343 }
344 nvbo->pin_refcnt++;
50ab2e52 345 goto out;
ad76b3f7
BS
346 }
347
348 if (evict) {
349 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
350 ret = nouveau_bo_validate(nvbo, false, false);
351 if (ret)
352 goto out;
353 }
6ee73861 354
ad76b3f7 355 nvbo->pin_refcnt++;
78ad0f7b 356 nouveau_bo_placement_set(nvbo, memtype, 0);
6ee73861 357
50ab2e52
BS
358 /* drop pin_refcnt temporarily, so we don't trip the assertion
359 * in nouveau_bo_move() that makes sure we're not trying to
360 * move a pinned buffer
361 */
362 nvbo->pin_refcnt--;
97a875cb 363 ret = nouveau_bo_validate(nvbo, false, false);
6aac6ced
BS
364 if (ret)
365 goto out;
50ab2e52 366 nvbo->pin_refcnt++;
6aac6ced
BS
367
368 switch (bo->mem.mem_type) {
369 case TTM_PL_VRAM:
370 drm->gem.vram_available -= bo->mem.size;
371 break;
372 case TTM_PL_TT:
373 drm->gem.gart_available -= bo->mem.size;
374 break;
375 default:
376 break;
6ee73861 377 }
5be5a15a 378
6ee73861 379out:
ad76b3f7
BS
380 if (force && ret)
381 nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG;
0ae6d7bc 382 ttm_bo_unreserve(bo);
6ee73861
BS
383 return ret;
384}
385
386int
387nouveau_bo_unpin(struct nouveau_bo *nvbo)
388{
ebb945a9 389 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
6ee73861 390 struct ttm_buffer_object *bo = &nvbo->bo;
4f385599 391 int ret, ref;
6ee73861 392
ee3939e0 393 ret = ttm_bo_reserve(bo, false, false, false, NULL);
6ee73861
BS
394 if (ret)
395 return ret;
396
4f385599
ML
397 ref = --nvbo->pin_refcnt;
398 WARN_ON_ONCE(ref < 0);
399 if (ref)
0ae6d7bc
DV
400 goto out;
401
78ad0f7b 402 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
6ee73861 403
97a875cb 404 ret = nouveau_bo_validate(nvbo, false, false);
6ee73861
BS
405 if (ret == 0) {
406 switch (bo->mem.mem_type) {
407 case TTM_PL_VRAM:
ebb945a9 408 drm->gem.vram_available += bo->mem.size;
6ee73861
BS
409 break;
410 case TTM_PL_TT:
ebb945a9 411 drm->gem.gart_available += bo->mem.size;
6ee73861
BS
412 break;
413 default:
414 break;
415 }
416 }
417
0ae6d7bc 418out:
6ee73861
BS
419 ttm_bo_unreserve(bo);
420 return ret;
421}
422
423int
424nouveau_bo_map(struct nouveau_bo *nvbo)
425{
426 int ret;
427
ee3939e0 428 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
6ee73861
BS
429 if (ret)
430 return ret;
431
c3a0c771
AC
432 /*
433 * TTM buffers allocated using the DMA API already have a mapping, let's
434 * use it instead.
435 */
436 if (!nvbo->force_coherent)
437 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
438 &nvbo->kmap);
439
6ee73861
BS
440 ttm_bo_unreserve(&nvbo->bo);
441 return ret;
442}
443
444void
445nouveau_bo_unmap(struct nouveau_bo *nvbo)
446{
c3a0c771
AC
447 if (!nvbo)
448 return;
449
450 /*
451 * TTM buffers allocated using the DMA API already had a coherent
452 * mapping which we used, no need to unmap.
453 */
454 if (!nvbo->force_coherent)
9d59e8a1 455 ttm_bo_kunmap(&nvbo->kmap);
6ee73861
BS
456}
457
b22870ba
AC
458void
459nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
460{
461 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
be83cd4e 462 struct nvkm_device *device = nvxx_device(&drm->device);
b22870ba
AC
463 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
464 int i;
465
466 if (!ttm_dma)
467 return;
468
469 /* Don't waste time looping if the object is coherent */
470 if (nvbo->force_coherent)
471 return;
472
473 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
474 dma_sync_single_for_device(nv_device_base(device),
475 ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE);
476}
477
478void
479nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
480{
481 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
be83cd4e 482 struct nvkm_device *device = nvxx_device(&drm->device);
b22870ba
AC
483 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
484 int i;
485
486 if (!ttm_dma)
487 return;
488
489 /* Don't waste time looping if the object is coherent */
490 if (nvbo->force_coherent)
491 return;
492
493 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
494 dma_sync_single_for_cpu(nv_device_base(device),
495 ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE);
496}
497
7a45d764
BS
498int
499nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
97a875cb 500 bool no_wait_gpu)
7a45d764
BS
501{
502 int ret;
503
97a875cb
ML
504 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
505 interruptible, no_wait_gpu);
7a45d764
BS
506 if (ret)
507 return ret;
508
b22870ba
AC
509 nouveau_bo_sync_for_device(nvbo);
510
7a45d764
BS
511 return 0;
512}
513
c3a0c771
AC
514static inline void *
515_nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz)
516{
517 struct ttm_dma_tt *dma_tt;
518 u8 *m = mem;
519
520 index *= sz;
521
522 if (m) {
523 /* kmap'd address, return the corresponding offset */
524 m += index;
525 } else {
526 /* DMA-API mapping, lookup the right address */
527 dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm;
528 m = dma_tt->cpu_address[index / PAGE_SIZE];
529 m += index % PAGE_SIZE;
530 }
531
532 return m;
533}
534#define nouveau_bo_mem_index(o, i, m) _nouveau_bo_mem_index(o, i, m, sizeof(*m))
535
6ee73861
BS
536void
537nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
538{
539 bool is_iomem;
540 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
c3a0c771
AC
541
542 mem = nouveau_bo_mem_index(nvbo, index, mem);
543
6ee73861
BS
544 if (is_iomem)
545 iowrite16_native(val, (void __force __iomem *)mem);
546 else
547 *mem = val;
548}
549
550u32
551nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
552{
553 bool is_iomem;
554 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
c3a0c771
AC
555
556 mem = nouveau_bo_mem_index(nvbo, index, mem);
557
6ee73861
BS
558 if (is_iomem)
559 return ioread32_native((void __force __iomem *)mem);
560 else
561 return *mem;
562}
563
564void
565nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
566{
567 bool is_iomem;
568 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
c3a0c771
AC
569
570 mem = nouveau_bo_mem_index(nvbo, index, mem);
571
6ee73861
BS
572 if (is_iomem)
573 iowrite32_native(val, (void __force __iomem *)mem);
574 else
575 *mem = val;
576}
577
649bf3ca 578static struct ttm_tt *
ebb945a9
BS
579nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
580 uint32_t page_flags, struct page *dummy_read)
6ee73861 581{
df1b4b91 582#if __OS_HAS_AGP
ebb945a9
BS
583 struct nouveau_drm *drm = nouveau_bdev(bdev);
584 struct drm_device *dev = drm->dev;
6ee73861 585
ebb945a9
BS
586 if (drm->agp.stat == ENABLED) {
587 return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
588 page_flags, dummy_read);
6ee73861 589 }
df1b4b91 590#endif
6ee73861 591
ebb945a9 592 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
6ee73861
BS
593}
594
595static int
596nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
597{
598 /* We'll do this from user space. */
599 return 0;
600}
601
602static int
603nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
604 struct ttm_mem_type_manager *man)
605{
ebb945a9 606 struct nouveau_drm *drm = nouveau_bdev(bdev);
6ee73861
BS
607
608 switch (type) {
609 case TTM_PL_SYSTEM:
610 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
611 man->available_caching = TTM_PL_MASK_CACHING;
612 man->default_caching = TTM_PL_FLAG_CACHED;
613 break;
614 case TTM_PL_VRAM:
e2a4e78c
AC
615 man->flags = TTM_MEMTYPE_FLAG_FIXED |
616 TTM_MEMTYPE_FLAG_MAPPABLE;
617 man->available_caching = TTM_PL_FLAG_UNCACHED |
618 TTM_PL_FLAG_WC;
619 man->default_caching = TTM_PL_FLAG_WC;
620
967e7bde 621 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
e2a4e78c 622 /* Some BARs do not support being ioremapped WC */
989aa5b7 623 if (nvxx_bar(&drm->device)->iomap_uncached) {
e2a4e78c
AC
624 man->available_caching = TTM_PL_FLAG_UNCACHED;
625 man->default_caching = TTM_PL_FLAG_UNCACHED;
626 }
627
573a2a37 628 man->func = &nouveau_vram_manager;
f869ef88
BS
629 man->io_reserve_fastpath = false;
630 man->use_io_reserve_lru = true;
631 } else {
573a2a37 632 man->func = &ttm_bo_manager_func;
f869ef88 633 }
6ee73861
BS
634 break;
635 case TTM_PL_TT:
967e7bde 636 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
26c0c9e3 637 man->func = &nouveau_gart_manager;
3863c9bc 638 else
ebb945a9 639 if (drm->agp.stat != ENABLED)
3863c9bc 640 man->func = &nv04_gart_manager;
26c0c9e3
BS
641 else
642 man->func = &ttm_bo_manager_func;
ebb945a9
BS
643
644 if (drm->agp.stat == ENABLED) {
f32f02fd 645 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
a3d487ea
FJ
646 man->available_caching = TTM_PL_FLAG_UNCACHED |
647 TTM_PL_FLAG_WC;
648 man->default_caching = TTM_PL_FLAG_WC;
ebb945a9 649 } else {
6ee73861
BS
650 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
651 TTM_MEMTYPE_FLAG_CMA;
652 man->available_caching = TTM_PL_MASK_CACHING;
653 man->default_caching = TTM_PL_FLAG_CACHED;
6ee73861 654 }
ebb945a9 655
6ee73861
BS
656 break;
657 default:
6ee73861
BS
658 return -EINVAL;
659 }
660 return 0;
661}
662
663static void
664nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
665{
666 struct nouveau_bo *nvbo = nouveau_bo(bo);
667
668 switch (bo->mem.mem_type) {
22fbd538 669 case TTM_PL_VRAM:
78ad0f7b
FJ
670 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
671 TTM_PL_FLAG_SYSTEM);
22fbd538 672 break;
6ee73861 673 default:
78ad0f7b 674 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
6ee73861
BS
675 break;
676 }
22fbd538
FJ
677
678 *pl = nvbo->placement;
6ee73861
BS
679}
680
681
49981046
BS
682static int
683nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
684{
685 int ret = RING_SPACE(chan, 2);
686 if (ret == 0) {
687 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
00fc6f6f 688 OUT_RING (chan, handle & 0x0000ffff);
49981046
BS
689 FIRE_RING (chan);
690 }
691 return ret;
692}
693
c6b7e895
BS
694static int
695nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
696 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
697{
be83cd4e 698 struct nvkm_mem *node = old_mem->mm_node;
c6b7e895
BS
699 int ret = RING_SPACE(chan, 10);
700 if (ret == 0) {
6d597027 701 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
c6b7e895
BS
702 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
703 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
704 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
705 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
706 OUT_RING (chan, PAGE_SIZE);
707 OUT_RING (chan, PAGE_SIZE);
708 OUT_RING (chan, PAGE_SIZE);
709 OUT_RING (chan, new_mem->num_pages);
6d597027 710 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
c6b7e895
BS
711 }
712 return ret;
713}
714
d1b167e1
BS
715static int
716nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
717{
718 int ret = RING_SPACE(chan, 2);
719 if (ret == 0) {
720 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
721 OUT_RING (chan, handle);
722 }
723 return ret;
724}
725
1a46098e
BS
726static int
727nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
728 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
729{
be83cd4e 730 struct nvkm_mem *node = old_mem->mm_node;
1a46098e
BS
731 u64 src_offset = node->vma[0].offset;
732 u64 dst_offset = node->vma[1].offset;
733 u32 page_count = new_mem->num_pages;
734 int ret;
735
736 page_count = new_mem->num_pages;
737 while (page_count) {
738 int line_count = (page_count > 8191) ? 8191 : page_count;
739
740 ret = RING_SPACE(chan, 11);
741 if (ret)
742 return ret;
743
744 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
745 OUT_RING (chan, upper_32_bits(src_offset));
746 OUT_RING (chan, lower_32_bits(src_offset));
747 OUT_RING (chan, upper_32_bits(dst_offset));
748 OUT_RING (chan, lower_32_bits(dst_offset));
749 OUT_RING (chan, PAGE_SIZE);
750 OUT_RING (chan, PAGE_SIZE);
751 OUT_RING (chan, PAGE_SIZE);
752 OUT_RING (chan, line_count);
753 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
754 OUT_RING (chan, 0x00000110);
755
756 page_count -= line_count;
757 src_offset += (PAGE_SIZE * line_count);
758 dst_offset += (PAGE_SIZE * line_count);
759 }
760
761 return 0;
762}
763
183720b8
BS
764static int
765nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
766 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
767{
be83cd4e 768 struct nvkm_mem *node = old_mem->mm_node;
d2f96666
BS
769 u64 src_offset = node->vma[0].offset;
770 u64 dst_offset = node->vma[1].offset;
183720b8
BS
771 u32 page_count = new_mem->num_pages;
772 int ret;
773
183720b8
BS
774 page_count = new_mem->num_pages;
775 while (page_count) {
776 int line_count = (page_count > 2047) ? 2047 : page_count;
777
778 ret = RING_SPACE(chan, 12);
779 if (ret)
780 return ret;
781
d1b167e1 782 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
183720b8
BS
783 OUT_RING (chan, upper_32_bits(dst_offset));
784 OUT_RING (chan, lower_32_bits(dst_offset));
d1b167e1 785 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
183720b8
BS
786 OUT_RING (chan, upper_32_bits(src_offset));
787 OUT_RING (chan, lower_32_bits(src_offset));
788 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
789 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
790 OUT_RING (chan, PAGE_SIZE); /* line_length */
791 OUT_RING (chan, line_count);
d1b167e1 792 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
183720b8
BS
793 OUT_RING (chan, 0x00100110);
794
795 page_count -= line_count;
796 src_offset += (PAGE_SIZE * line_count);
797 dst_offset += (PAGE_SIZE * line_count);
798 }
799
800 return 0;
801}
802
fdf53241
BS
803static int
804nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
805 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
806{
be83cd4e 807 struct nvkm_mem *node = old_mem->mm_node;
fdf53241
BS
808 u64 src_offset = node->vma[0].offset;
809 u64 dst_offset = node->vma[1].offset;
810 u32 page_count = new_mem->num_pages;
811 int ret;
812
813 page_count = new_mem->num_pages;
814 while (page_count) {
815 int line_count = (page_count > 8191) ? 8191 : page_count;
816
817 ret = RING_SPACE(chan, 11);
818 if (ret)
819 return ret;
820
821 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
822 OUT_RING (chan, upper_32_bits(src_offset));
823 OUT_RING (chan, lower_32_bits(src_offset));
824 OUT_RING (chan, upper_32_bits(dst_offset));
825 OUT_RING (chan, lower_32_bits(dst_offset));
826 OUT_RING (chan, PAGE_SIZE);
827 OUT_RING (chan, PAGE_SIZE);
828 OUT_RING (chan, PAGE_SIZE);
829 OUT_RING (chan, line_count);
830 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
831 OUT_RING (chan, 0x00000110);
832
833 page_count -= line_count;
834 src_offset += (PAGE_SIZE * line_count);
835 dst_offset += (PAGE_SIZE * line_count);
836 }
837
838 return 0;
839}
840
5490e5df
BS
841static int
842nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
843 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
844{
be83cd4e 845 struct nvkm_mem *node = old_mem->mm_node;
5490e5df
BS
846 int ret = RING_SPACE(chan, 7);
847 if (ret == 0) {
848 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
849 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
850 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
851 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
852 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
853 OUT_RING (chan, 0x00000000 /* COPY */);
854 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
855 }
856 return ret;
857}
858
4c193d25
BS
859static int
860nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
861 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
862{
be83cd4e 863 struct nvkm_mem *node = old_mem->mm_node;
4c193d25
BS
864 int ret = RING_SPACE(chan, 7);
865 if (ret == 0) {
866 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
867 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
868 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
869 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
870 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
871 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
872 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
873 }
874 return ret;
875}
876
d1b167e1
BS
877static int
878nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
879{
ebb945a9 880 int ret = RING_SPACE(chan, 6);
d1b167e1 881 if (ret == 0) {
ebb945a9
BS
882 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
883 OUT_RING (chan, handle);
884 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
f45f55c4
BS
885 OUT_RING (chan, chan->drm->ntfy.handle);
886 OUT_RING (chan, chan->vram.handle);
887 OUT_RING (chan, chan->vram.handle);
d1b167e1
BS
888 }
889
890 return ret;
891}
892
6ee73861 893static int
f1ab0cc9
BS
894nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
895 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
6ee73861 896{
be83cd4e 897 struct nvkm_mem *node = old_mem->mm_node;
f1ab0cc9 898 u64 length = (new_mem->num_pages << PAGE_SHIFT);
d2f96666
BS
899 u64 src_offset = node->vma[0].offset;
900 u64 dst_offset = node->vma[1].offset;
ce8f7699 901 int src_tiled = !!node->memtype;
be83cd4e 902 int dst_tiled = !!((struct nvkm_mem *)new_mem->mm_node)->memtype;
6ee73861
BS
903 int ret;
904
f1ab0cc9
BS
905 while (length) {
906 u32 amount, stride, height;
907
ce8f7699
ML
908 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
909 if (ret)
910 return ret;
911
5220b3c1
BS
912 amount = min(length, (u64)(4 * 1024 * 1024));
913 stride = 16 * 4;
f1ab0cc9
BS
914 height = amount / stride;
915
ce8f7699 916 if (src_tiled) {
d1b167e1 917 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
f1ab0cc9 918 OUT_RING (chan, 0);
5220b3c1 919 OUT_RING (chan, 0);
f1ab0cc9
BS
920 OUT_RING (chan, stride);
921 OUT_RING (chan, height);
922 OUT_RING (chan, 1);
923 OUT_RING (chan, 0);
924 OUT_RING (chan, 0);
925 } else {
d1b167e1 926 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
f1ab0cc9
BS
927 OUT_RING (chan, 1);
928 }
ce8f7699 929 if (dst_tiled) {
d1b167e1 930 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
f1ab0cc9 931 OUT_RING (chan, 0);
5220b3c1 932 OUT_RING (chan, 0);
f1ab0cc9
BS
933 OUT_RING (chan, stride);
934 OUT_RING (chan, height);
935 OUT_RING (chan, 1);
936 OUT_RING (chan, 0);
937 OUT_RING (chan, 0);
938 } else {
d1b167e1 939 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
f1ab0cc9
BS
940 OUT_RING (chan, 1);
941 }
942
d1b167e1 943 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
f1ab0cc9
BS
944 OUT_RING (chan, upper_32_bits(src_offset));
945 OUT_RING (chan, upper_32_bits(dst_offset));
d1b167e1 946 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
f1ab0cc9
BS
947 OUT_RING (chan, lower_32_bits(src_offset));
948 OUT_RING (chan, lower_32_bits(dst_offset));
949 OUT_RING (chan, stride);
950 OUT_RING (chan, stride);
951 OUT_RING (chan, stride);
952 OUT_RING (chan, height);
953 OUT_RING (chan, 0x00000101);
954 OUT_RING (chan, 0x00000000);
d1b167e1 955 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9
BS
956 OUT_RING (chan, 0);
957
958 length -= amount;
959 src_offset += amount;
960 dst_offset += amount;
6ee73861
BS
961 }
962
f1ab0cc9
BS
963 return 0;
964}
965
d1b167e1
BS
966static int
967nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
968{
ebb945a9 969 int ret = RING_SPACE(chan, 4);
d1b167e1 970 if (ret == 0) {
ebb945a9
BS
971 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
972 OUT_RING (chan, handle);
973 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
f45f55c4 974 OUT_RING (chan, chan->drm->ntfy.handle);
d1b167e1
BS
975 }
976
977 return ret;
978}
979
a6704788
BS
980static inline uint32_t
981nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
982 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
983{
984 if (mem->mem_type == TTM_PL_TT)
ebb945a9 985 return NvDmaTT;
f45f55c4 986 return chan->vram.handle;
a6704788
BS
987}
988
f1ab0cc9
BS
989static int
990nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
991 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
992{
d961db75
BS
993 u32 src_offset = old_mem->start << PAGE_SHIFT;
994 u32 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
995 u32 page_count = new_mem->num_pages;
996 int ret;
997
998 ret = RING_SPACE(chan, 3);
999 if (ret)
1000 return ret;
1001
d1b167e1 1002 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
f1ab0cc9
BS
1003 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
1004 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
1005
6ee73861
BS
1006 page_count = new_mem->num_pages;
1007 while (page_count) {
1008 int line_count = (page_count > 2047) ? 2047 : page_count;
1009
6ee73861
BS
1010 ret = RING_SPACE(chan, 11);
1011 if (ret)
1012 return ret;
f1ab0cc9 1013
d1b167e1 1014 BEGIN_NV04(chan, NvSubCopy,
6ee73861 1015 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
f1ab0cc9
BS
1016 OUT_RING (chan, src_offset);
1017 OUT_RING (chan, dst_offset);
1018 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
1019 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
1020 OUT_RING (chan, PAGE_SIZE); /* line_length */
1021 OUT_RING (chan, line_count);
1022 OUT_RING (chan, 0x00000101);
1023 OUT_RING (chan, 0x00000000);
d1b167e1 1024 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9 1025 OUT_RING (chan, 0);
6ee73861
BS
1026
1027 page_count -= line_count;
1028 src_offset += (PAGE_SIZE * line_count);
1029 dst_offset += (PAGE_SIZE * line_count);
1030 }
1031
f1ab0cc9
BS
1032 return 0;
1033}
1034
d2f96666 1035static int
3c57d85d
BS
1036nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
1037 struct ttm_mem_reg *mem)
d2f96666 1038{
be83cd4e
BS
1039 struct nvkm_mem *old_node = bo->mem.mm_node;
1040 struct nvkm_mem *new_node = mem->mm_node;
3c57d85d 1041 u64 size = (u64)mem->num_pages << PAGE_SHIFT;
d2f96666
BS
1042 int ret;
1043
be83cd4e
BS
1044 ret = nvkm_vm_get(drm->client.vm, size, old_node->page_shift,
1045 NV_MEM_ACCESS_RW, &old_node->vma[0]);
d2f96666
BS
1046 if (ret)
1047 return ret;
1048
be83cd4e
BS
1049 ret = nvkm_vm_get(drm->client.vm, size, new_node->page_shift,
1050 NV_MEM_ACCESS_RW, &old_node->vma[1]);
3c57d85d 1051 if (ret) {
be83cd4e 1052 nvkm_vm_put(&old_node->vma[0]);
3c57d85d
BS
1053 return ret;
1054 }
1055
be83cd4e
BS
1056 nvkm_vm_map(&old_node->vma[0], old_node);
1057 nvkm_vm_map(&old_node->vma[1], new_node);
d2f96666
BS
1058 return 0;
1059}
1060
f1ab0cc9
BS
1061static int
1062nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
97a875cb 1063 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
f1ab0cc9 1064{
ebb945a9 1065 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1934a2ad 1066 struct nouveau_channel *chan = drm->ttm.chan;
a01ca78c 1067 struct nouveau_cli *cli = (void *)chan->user.client;
35b8141b 1068 struct nouveau_fence *fence;
f1ab0cc9
BS
1069 int ret;
1070
d2f96666 1071 /* create temporary vmas for the transfer and attach them to the
be83cd4e 1072 * old nvkm_mem node, these will get cleaned up after ttm has
d2f96666 1073 * destroyed the ttm_mem_reg
3425df48 1074 */
967e7bde 1075 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
3c57d85d 1076 ret = nouveau_bo_move_prep(drm, bo, new_mem);
d2f96666 1077 if (ret)
3c57d85d 1078 return ret;
3425df48
BS
1079 }
1080
0ad72863 1081 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
e3be4c23 1082 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
6a6b73f2 1083 if (ret == 0) {
35b8141b
BS
1084 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
1085 if (ret == 0) {
1086 ret = nouveau_fence_new(chan, false, &fence);
1087 if (ret == 0) {
f2c24b83
ML
1088 ret = ttm_bo_move_accel_cleanup(bo,
1089 &fence->base,
35b8141b
BS
1090 evict,
1091 no_wait_gpu,
1092 new_mem);
1093 nouveau_fence_unref(&fence);
1094 }
1095 }
6a6b73f2 1096 }
0ad72863 1097 mutex_unlock(&cli->mutex);
6a6b73f2 1098 return ret;
6ee73861
BS
1099}
1100
d1b167e1 1101void
49981046 1102nouveau_bo_move_init(struct nouveau_drm *drm)
d1b167e1 1103{
d1b167e1
BS
1104 static const struct {
1105 const char *name;
1a46098e 1106 int engine;
315a8b2e 1107 s32 oclass;
d1b167e1
BS
1108 int (*exec)(struct nouveau_channel *,
1109 struct ttm_buffer_object *,
1110 struct ttm_mem_reg *, struct ttm_mem_reg *);
1111 int (*init)(struct nouveau_channel *, u32 handle);
1112 } _methods[] = {
990b4547
BS
1113 { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
1114 { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
00fc6f6f 1115 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
49981046 1116 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1a46098e
BS
1117 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1118 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1119 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1120 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1121 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1122 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1123 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
5490e5df 1124 {},
1a46098e 1125 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
d1b167e1
BS
1126 }, *mthd = _methods;
1127 const char *name = "CPU";
1128 int ret;
1129
1130 do {
49981046 1131 struct nouveau_channel *chan;
ebb945a9 1132
00fc6f6f 1133 if (mthd->engine)
49981046
BS
1134 chan = drm->cechan;
1135 else
1136 chan = drm->channel;
1137 if (chan == NULL)
1138 continue;
1139
a01ca78c 1140 ret = nvif_object_init(&chan->user,
0ad72863
BS
1141 mthd->oclass | (mthd->engine << 16),
1142 mthd->oclass, NULL, 0,
1143 &drm->ttm.copy);
d1b167e1 1144 if (ret == 0) {
0ad72863 1145 ret = mthd->init(chan, drm->ttm.copy.handle);
ebb945a9 1146 if (ret) {
0ad72863 1147 nvif_object_fini(&drm->ttm.copy);
ebb945a9 1148 continue;
d1b167e1 1149 }
ebb945a9
BS
1150
1151 drm->ttm.move = mthd->exec;
1bb3f6a2 1152 drm->ttm.chan = chan;
ebb945a9
BS
1153 name = mthd->name;
1154 break;
d1b167e1
BS
1155 }
1156 } while ((++mthd)->exec);
1157
ebb945a9 1158 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
d1b167e1
BS
1159}
1160
6ee73861
BS
1161static int
1162nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1163 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
6ee73861 1164{
f1217ed0
CK
1165 struct ttm_place placement_memtype = {
1166 .fpfn = 0,
1167 .lpfn = 0,
1168 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1169 };
6ee73861
BS
1170 struct ttm_placement placement;
1171 struct ttm_mem_reg tmp_mem;
1172 int ret;
1173
6ee73861 1174 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 1175 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
1176
1177 tmp_mem = *new_mem;
1178 tmp_mem.mm_node = NULL;
97a875cb 1179 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
6ee73861
BS
1180 if (ret)
1181 return ret;
1182
1183 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1184 if (ret)
1185 goto out;
1186
97a875cb 1187 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
6ee73861
BS
1188 if (ret)
1189 goto out;
1190
97a875cb 1191 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
6ee73861 1192out:
42311ff9 1193 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
1194 return ret;
1195}
1196
1197static int
1198nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1199 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
6ee73861 1200{
f1217ed0
CK
1201 struct ttm_place placement_memtype = {
1202 .fpfn = 0,
1203 .lpfn = 0,
1204 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1205 };
6ee73861
BS
1206 struct ttm_placement placement;
1207 struct ttm_mem_reg tmp_mem;
1208 int ret;
1209
6ee73861 1210 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 1211 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
1212
1213 tmp_mem = *new_mem;
1214 tmp_mem.mm_node = NULL;
97a875cb 1215 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
6ee73861
BS
1216 if (ret)
1217 return ret;
1218
97a875cb 1219 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
6ee73861
BS
1220 if (ret)
1221 goto out;
1222
97a875cb 1223 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
6ee73861
BS
1224 if (ret)
1225 goto out;
1226
1227out:
42311ff9 1228 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
1229 return ret;
1230}
1231
a4154bbf
BS
1232static void
1233nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1234{
a4154bbf 1235 struct nouveau_bo *nvbo = nouveau_bo(bo);
be83cd4e 1236 struct nvkm_vma *vma;
fd2871af 1237
9f1feed2
BS
1238 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1239 if (bo->destroy != nouveau_bo_del_ttm)
1240 return;
1241
fd2871af 1242 list_for_each_entry(vma, &nvbo->vma_list, head) {
2e2cfbe6
BS
1243 if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
1244 (new_mem->mem_type == TTM_PL_VRAM ||
5ce3bf3c 1245 nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
be83cd4e 1246 nvkm_vm_map(vma, new_mem->mm_node);
fd2871af 1247 } else {
be83cd4e 1248 nvkm_vm_unmap(vma);
fd2871af 1249 }
a4154bbf
BS
1250 }
1251}
1252
6ee73861 1253static int
a0af9add 1254nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
ebb945a9 1255 struct nouveau_drm_tile **new_tile)
6ee73861 1256{
ebb945a9
BS
1257 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1258 struct drm_device *dev = drm->dev;
a0af9add 1259 struct nouveau_bo *nvbo = nouveau_bo(bo);
a4154bbf 1260 u64 offset = new_mem->start << PAGE_SHIFT;
6ee73861 1261
a4154bbf
BS
1262 *new_tile = NULL;
1263 if (new_mem->mem_type != TTM_PL_VRAM)
a0af9add 1264 return 0;
a0af9add 1265
967e7bde 1266 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
bc9e7b9a 1267 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
a5cf68b0
FJ
1268 nvbo->tile_mode,
1269 nvbo->tile_flags);
6ee73861
BS
1270 }
1271
a0af9add
FJ
1272 return 0;
1273}
1274
1275static void
1276nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
ebb945a9
BS
1277 struct nouveau_drm_tile *new_tile,
1278 struct nouveau_drm_tile **old_tile)
a0af9add 1279{
ebb945a9
BS
1280 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1281 struct drm_device *dev = drm->dev;
f2c24b83 1282 struct fence *fence = reservation_object_get_excl(bo->resv);
a0af9add 1283
f2c24b83 1284 nv10_bo_put_tile_region(dev, *old_tile, fence);
a4154bbf 1285 *old_tile = new_tile;
a0af9add
FJ
1286}
1287
1288static int
1289nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1290 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
a0af9add 1291{
ebb945a9 1292 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
a0af9add
FJ
1293 struct nouveau_bo *nvbo = nouveau_bo(bo);
1294 struct ttm_mem_reg *old_mem = &bo->mem;
ebb945a9 1295 struct nouveau_drm_tile *new_tile = NULL;
a0af9add
FJ
1296 int ret = 0;
1297
5be5a15a
AC
1298 if (nvbo->pin_refcnt)
1299 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1300
967e7bde 1301 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
a4154bbf
BS
1302 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1303 if (ret)
1304 return ret;
1305 }
a0af9add 1306
a0af9add 1307 /* Fake bo copy. */
6ee73861
BS
1308 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1309 BUG_ON(bo->mem.mm_node != NULL);
1310 bo->mem = *new_mem;
1311 new_mem->mm_node = NULL;
a0af9add 1312 goto out;
6ee73861
BS
1313 }
1314
a0af9add 1315 /* Hardware assisted copy. */
cef9e99e
BS
1316 if (drm->ttm.move) {
1317 if (new_mem->mem_type == TTM_PL_SYSTEM)
1318 ret = nouveau_bo_move_flipd(bo, evict, intr,
1319 no_wait_gpu, new_mem);
1320 else if (old_mem->mem_type == TTM_PL_SYSTEM)
1321 ret = nouveau_bo_move_flips(bo, evict, intr,
1322 no_wait_gpu, new_mem);
1323 else
1324 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1325 no_wait_gpu, new_mem);
1326 if (!ret)
1327 goto out;
1328 }
a0af9add
FJ
1329
1330 /* Fallback to software copy. */
cef9e99e 1331 ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
cef9e99e
BS
1332 if (ret == 0)
1333 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
a0af9add
FJ
1334
1335out:
967e7bde 1336 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
a4154bbf
BS
1337 if (ret)
1338 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1339 else
1340 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1341 }
a0af9add
FJ
1342
1343 return ret;
6ee73861
BS
1344}
1345
1346static int
1347nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1348{
acb46527
DH
1349 struct nouveau_bo *nvbo = nouveau_bo(bo);
1350
55fb74ad 1351 return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
6ee73861
BS
1352}
1353
f32f02fd
JG
1354static int
1355nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1356{
1357 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
ebb945a9 1358 struct nouveau_drm *drm = nouveau_bdev(bdev);
be83cd4e 1359 struct nvkm_mem *node = mem->mm_node;
f869ef88 1360 int ret;
f32f02fd
JG
1361
1362 mem->bus.addr = NULL;
1363 mem->bus.offset = 0;
1364 mem->bus.size = mem->num_pages << PAGE_SHIFT;
1365 mem->bus.base = 0;
1366 mem->bus.is_iomem = false;
1367 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1368 return -EINVAL;
1369 switch (mem->mem_type) {
1370 case TTM_PL_SYSTEM:
1371 /* System memory */
1372 return 0;
1373 case TTM_PL_TT:
1374#if __OS_HAS_AGP
ebb945a9 1375 if (drm->agp.stat == ENABLED) {
d961db75 1376 mem->bus.offset = mem->start << PAGE_SHIFT;
ebb945a9 1377 mem->bus.base = drm->agp.base;
5c13cac1 1378 mem->bus.is_iomem = !drm->dev->agp->cant_use_aperture;
f32f02fd
JG
1379 }
1380#endif
967e7bde 1381 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
a5540906
ML
1382 /* untiled */
1383 break;
1384 /* fallthrough, tiled memory */
f32f02fd 1385 case TTM_PL_VRAM:
3863c9bc 1386 mem->bus.offset = mem->start << PAGE_SHIFT;
989aa5b7 1387 mem->bus.base = nv_device_resource_start(nvxx_device(&drm->device), 1);
3863c9bc 1388 mem->bus.is_iomem = true;
967e7bde 1389 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
be83cd4e 1390 struct nvkm_bar *bar = nvxx_bar(&drm->device);
d8e83994
BS
1391 int page_shift = 12;
1392 if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
1393 page_shift = node->page_shift;
8984e046 1394
32932281
BS
1395 ret = nvkm_bar_umap(bar, node->size << 12, page_shift,
1396 &node->bar_vma);
3863c9bc
BS
1397 if (ret)
1398 return ret;
f869ef88 1399
d8e83994 1400 nvkm_vm_map(&node->bar_vma, node);
3863c9bc 1401 mem->bus.offset = node->bar_vma.offset;
f869ef88 1402 }
f32f02fd
JG
1403 break;
1404 default:
1405 return -EINVAL;
1406 }
1407 return 0;
1408}
1409
1410static void
1411nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1412{
be83cd4e 1413 struct nvkm_mem *node = mem->mm_node;
f869ef88 1414
d5f42394 1415 if (!node->bar_vma.node)
f869ef88
BS
1416 return;
1417
32932281
BS
1418 nvkm_vm_unmap(&node->bar_vma);
1419 nvkm_vm_put(&node->bar_vma);
f32f02fd
JG
1420}
1421
1422static int
1423nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1424{
ebb945a9 1425 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
e1429b4c 1426 struct nouveau_bo *nvbo = nouveau_bo(bo);
967e7bde 1427 struct nvif_device *device = &drm->device;
989aa5b7 1428 u32 mappable = nv_device_resource_len(nvxx_device(device), 1) >> PAGE_SHIFT;
f1217ed0 1429 int i, ret;
e1429b4c
BS
1430
1431 /* as long as the bo isn't in vram, and isn't tiled, we've got
1432 * nothing to do here.
1433 */
1434 if (bo->mem.mem_type != TTM_PL_VRAM) {
967e7bde 1435 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA ||
f13b3263 1436 !nouveau_bo_tile_layout(nvbo))
e1429b4c 1437 return 0;
a5540906
ML
1438
1439 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1440 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1441
1442 ret = nouveau_bo_validate(nvbo, false, false);
1443 if (ret)
1444 return ret;
1445 }
1446 return 0;
e1429b4c
BS
1447 }
1448
1449 /* make sure bo is in mappable vram */
967e7bde 1450 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
a5540906 1451 bo->mem.start + bo->mem.num_pages < mappable)
e1429b4c
BS
1452 return 0;
1453
f1217ed0
CK
1454 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1455 nvbo->placements[i].fpfn = 0;
1456 nvbo->placements[i].lpfn = mappable;
1457 }
1458
1459 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1460 nvbo->busy_placements[i].fpfn = 0;
1461 nvbo->busy_placements[i].lpfn = mappable;
1462 }
e1429b4c 1463
c284815d 1464 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
97a875cb 1465 return nouveau_bo_validate(nvbo, false, false);
f32f02fd
JG
1466}
1467
3230cfc3
KRW
1468static int
1469nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1470{
8e7e7052 1471 struct ttm_dma_tt *ttm_dma = (void *)ttm;
ebb945a9 1472 struct nouveau_drm *drm;
be83cd4e 1473 struct nvkm_device *device;
3230cfc3 1474 struct drm_device *dev;
fd1496a0 1475 struct device *pdev;
3230cfc3
KRW
1476 unsigned i;
1477 int r;
22b33e8e 1478 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
3230cfc3
KRW
1479
1480 if (ttm->state != tt_unpopulated)
1481 return 0;
1482
22b33e8e
DA
1483 if (slave && ttm->sg) {
1484 /* make userspace faulting work */
1485 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1486 ttm_dma->dma_address, ttm->num_pages);
1487 ttm->state = tt_unbound;
1488 return 0;
1489 }
1490
ebb945a9 1491 drm = nouveau_bdev(ttm->bdev);
989aa5b7 1492 device = nvxx_device(&drm->device);
ebb945a9 1493 dev = drm->dev;
fd1496a0 1494 pdev = nv_device_base(device);
3230cfc3 1495
c3a0c771
AC
1496 /*
1497 * Objects matching this condition have been marked as force_coherent,
1498 * so use the DMA API for them.
1499 */
1500 if (!nv_device_is_cpu_coherent(device) &&
1501 ttm->caching_state == tt_uncached)
1502 return ttm_dma_populate(ttm_dma, dev->dev);
1503
dea7e0ac 1504#if __OS_HAS_AGP
ebb945a9 1505 if (drm->agp.stat == ENABLED) {
dea7e0ac
JG
1506 return ttm_agp_tt_populate(ttm);
1507 }
1508#endif
1509
3230cfc3
KRW
1510#ifdef CONFIG_SWIOTLB
1511 if (swiotlb_nr_tbl()) {
8e7e7052 1512 return ttm_dma_populate((void *)ttm, dev->dev);
3230cfc3
KRW
1513 }
1514#endif
1515
1516 r = ttm_pool_populate(ttm);
1517 if (r) {
1518 return r;
1519 }
1520
1521 for (i = 0; i < ttm->num_pages; i++) {
fd1496a0
AC
1522 dma_addr_t addr;
1523
1524 addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
1525 DMA_BIDIRECTIONAL);
1526
1527 if (dma_mapping_error(pdev, addr)) {
3230cfc3 1528 while (--i) {
fd1496a0
AC
1529 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1530 PAGE_SIZE, DMA_BIDIRECTIONAL);
8e7e7052 1531 ttm_dma->dma_address[i] = 0;
3230cfc3
KRW
1532 }
1533 ttm_pool_unpopulate(ttm);
1534 return -EFAULT;
1535 }
fd1496a0
AC
1536
1537 ttm_dma->dma_address[i] = addr;
3230cfc3
KRW
1538 }
1539 return 0;
1540}
1541
1542static void
1543nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1544{
8e7e7052 1545 struct ttm_dma_tt *ttm_dma = (void *)ttm;
ebb945a9 1546 struct nouveau_drm *drm;
be83cd4e 1547 struct nvkm_device *device;
3230cfc3 1548 struct drm_device *dev;
fd1496a0 1549 struct device *pdev;
3230cfc3 1550 unsigned i;
22b33e8e
DA
1551 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1552
1553 if (slave)
1554 return;
3230cfc3 1555
ebb945a9 1556 drm = nouveau_bdev(ttm->bdev);
989aa5b7 1557 device = nvxx_device(&drm->device);
ebb945a9 1558 dev = drm->dev;
fd1496a0 1559 pdev = nv_device_base(device);
3230cfc3 1560
c3a0c771
AC
1561 /*
1562 * Objects matching this condition have been marked as force_coherent,
1563 * so use the DMA API for them.
1564 */
1565 if (!nv_device_is_cpu_coherent(device) &&
dcccdc14 1566 ttm->caching_state == tt_uncached) {
c3a0c771 1567 ttm_dma_unpopulate(ttm_dma, dev->dev);
dcccdc14
AC
1568 return;
1569 }
c3a0c771 1570
dea7e0ac 1571#if __OS_HAS_AGP
ebb945a9 1572 if (drm->agp.stat == ENABLED) {
dea7e0ac
JG
1573 ttm_agp_tt_unpopulate(ttm);
1574 return;
1575 }
1576#endif
1577
3230cfc3
KRW
1578#ifdef CONFIG_SWIOTLB
1579 if (swiotlb_nr_tbl()) {
8e7e7052 1580 ttm_dma_unpopulate((void *)ttm, dev->dev);
3230cfc3
KRW
1581 return;
1582 }
1583#endif
1584
1585 for (i = 0; i < ttm->num_pages; i++) {
8e7e7052 1586 if (ttm_dma->dma_address[i]) {
fd1496a0
AC
1587 dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
1588 DMA_BIDIRECTIONAL);
3230cfc3
KRW
1589 }
1590 }
1591
1592 ttm_pool_unpopulate(ttm);
1593}
1594
875ac34a 1595void
809e9447 1596nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
875ac34a 1597{
29ba89b2 1598 struct reservation_object *resv = nvbo->bo.resv;
bdaf7ddf 1599
809e9447
ML
1600 if (exclusive)
1601 reservation_object_add_excl_fence(resv, &fence->base);
1602 else if (fence)
1603 reservation_object_add_shared_fence(resv, &fence->base);
875ac34a
BS
1604}
1605
6ee73861 1606struct ttm_bo_driver nouveau_bo_driver = {
649bf3ca 1607 .ttm_tt_create = &nouveau_ttm_tt_create,
3230cfc3
KRW
1608 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1609 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
6ee73861
BS
1610 .invalidate_caches = nouveau_bo_invalidate_caches,
1611 .init_mem_type = nouveau_bo_init_mem_type,
1612 .evict_flags = nouveau_bo_evict_flags,
a4154bbf 1613 .move_notify = nouveau_bo_move_ntfy,
6ee73861
BS
1614 .move = nouveau_bo_move,
1615 .verify_access = nouveau_bo_verify_access,
f32f02fd
JG
1616 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1617 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1618 .io_mem_free = &nouveau_ttm_io_mem_free,
6ee73861
BS
1619};
1620
be83cd4e
BS
1621struct nvkm_vma *
1622nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm)
fd2871af 1623{
be83cd4e 1624 struct nvkm_vma *vma;
fd2871af
BS
1625 list_for_each_entry(vma, &nvbo->vma_list, head) {
1626 if (vma->vm == vm)
1627 return vma;
1628 }
1629
1630 return NULL;
1631}
1632
1633int
be83cd4e
BS
1634nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
1635 struct nvkm_vma *vma)
fd2871af
BS
1636{
1637 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
fd2871af
BS
1638 int ret;
1639
be83cd4e 1640 ret = nvkm_vm_get(vm, size, nvbo->page_shift,
fd2871af
BS
1641 NV_MEM_ACCESS_RW, vma);
1642 if (ret)
1643 return ret;
1644
2e2cfbe6
BS
1645 if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
1646 (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
5ce3bf3c 1647 nvbo->page_shift != vma->vm->mmu->lpg_shift))
be83cd4e 1648 nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
fd2871af
BS
1649
1650 list_add_tail(&vma->head, &nvbo->vma_list);
2fd3db6f 1651 vma->refcount = 1;
fd2871af
BS
1652 return 0;
1653}
1654
1655void
be83cd4e 1656nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
fd2871af
BS
1657{
1658 if (vma->node) {
c4c7044f 1659 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
be83cd4e
BS
1660 nvkm_vm_unmap(vma);
1661 nvkm_vm_put(vma);
fd2871af
BS
1662 list_del(&vma->head);
1663 }
1664}
This page took 0.451049 seconds and 5 git commands to generate.