drm/nouveau: port to nvif client/device/objects
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
ebb945a9 30#include <core/engine.h>
3e2b756b 31#include <linux/swiotlb.h>
6ee73861 32
ebb945a9 33#include "nouveau_drm.h"
6ee73861 34#include "nouveau_dma.h"
d375e7d5 35#include "nouveau_fence.h"
6ee73861 36
ebb945a9
BS
37#include "nouveau_bo.h"
38#include "nouveau_ttm.h"
39#include "nouveau_gem.h"
a510604d 40
bc9e7b9a
BS
41/*
42 * NV10-NV40 tiling helpers
43 */
44
45static void
ebb945a9
BS
46nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
47 u32 addr, u32 size, u32 pitch, u32 flags)
bc9e7b9a 48{
77145f1c 49 struct nouveau_drm *drm = nouveau_drm(dev);
ebb945a9 50 int i = reg - drm->tile.reg;
967e7bde 51 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
ebb945a9
BS
52 struct nouveau_fb_tile *tile = &pfb->tile.region[i];
53 struct nouveau_engine *engine;
bc9e7b9a 54
ebb945a9 55 nouveau_fence_unref(&reg->fence);
bc9e7b9a
BS
56
57 if (tile->pitch)
ebb945a9 58 pfb->tile.fini(pfb, i, tile);
bc9e7b9a
BS
59
60 if (pitch)
ebb945a9 61 pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
bc9e7b9a 62
ebb945a9 63 pfb->tile.prog(pfb, i, tile);
bc9e7b9a 64
ebb945a9
BS
65 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
66 engine->tile_prog(engine, i);
67 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
68 engine->tile_prog(engine, i);
bc9e7b9a
BS
69}
70
ebb945a9 71static struct nouveau_drm_tile *
bc9e7b9a
BS
72nv10_bo_get_tile_region(struct drm_device *dev, int i)
73{
77145f1c 74 struct nouveau_drm *drm = nouveau_drm(dev);
ebb945a9 75 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
bc9e7b9a 76
ebb945a9 77 spin_lock(&drm->tile.lock);
bc9e7b9a
BS
78
79 if (!tile->used &&
80 (!tile->fence || nouveau_fence_done(tile->fence)))
81 tile->used = true;
82 else
83 tile = NULL;
84
ebb945a9 85 spin_unlock(&drm->tile.lock);
bc9e7b9a
BS
86 return tile;
87}
88
89static void
ebb945a9
BS
90nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
91 struct nouveau_fence *fence)
bc9e7b9a 92{
77145f1c 93 struct nouveau_drm *drm = nouveau_drm(dev);
bc9e7b9a
BS
94
95 if (tile) {
ebb945a9 96 spin_lock(&drm->tile.lock);
5d216f60 97 tile->fence = nouveau_fence_ref(fence);
bc9e7b9a 98 tile->used = false;
ebb945a9 99 spin_unlock(&drm->tile.lock);
bc9e7b9a
BS
100 }
101}
102
ebb945a9
BS
103static struct nouveau_drm_tile *
104nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
105 u32 size, u32 pitch, u32 flags)
bc9e7b9a 106{
77145f1c 107 struct nouveau_drm *drm = nouveau_drm(dev);
967e7bde 108 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
ebb945a9 109 struct nouveau_drm_tile *tile, *found = NULL;
bc9e7b9a
BS
110 int i;
111
ebb945a9 112 for (i = 0; i < pfb->tile.regions; i++) {
bc9e7b9a
BS
113 tile = nv10_bo_get_tile_region(dev, i);
114
115 if (pitch && !found) {
116 found = tile;
117 continue;
118
ebb945a9 119 } else if (tile && pfb->tile.region[i].pitch) {
bc9e7b9a
BS
120 /* Kill an unused tile region. */
121 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
122 }
123
124 nv10_bo_put_tile_region(dev, tile, NULL);
125 }
126
127 if (found)
128 nv10_bo_update_tile_region(dev, found, addr, size,
129 pitch, flags);
130 return found;
131}
132
6ee73861
BS
133static void
134nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
135{
ebb945a9
BS
136 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
137 struct drm_device *dev = drm->dev;
6ee73861
BS
138 struct nouveau_bo *nvbo = nouveau_bo(bo);
139
55fb74ad 140 if (unlikely(nvbo->gem.filp))
6ee73861 141 DRM_ERROR("bo %p still attached to GEM object\n", bo);
4f385599 142 WARN_ON(nvbo->pin_refcnt > 0);
bc9e7b9a 143 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
6ee73861
BS
144 kfree(nvbo);
145}
146
a0af9add 147static void
db5c8e29 148nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
f91bac5b 149 int *align, int *size)
a0af9add 150{
ebb945a9 151 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
967e7bde 152 struct nvif_device *device = &drm->device;
a0af9add 153
967e7bde 154 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
bfd83aca 155 if (nvbo->tile_mode) {
967e7bde 156 if (device->info.chipset >= 0x40) {
a0af9add 157 *align = 65536;
bfd83aca 158 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 159
967e7bde 160 } else if (device->info.chipset >= 0x30) {
a0af9add 161 *align = 32768;
bfd83aca 162 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 163
967e7bde 164 } else if (device->info.chipset >= 0x20) {
a0af9add 165 *align = 16384;
bfd83aca 166 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 167
967e7bde 168 } else if (device->info.chipset >= 0x10) {
a0af9add 169 *align = 16384;
bfd83aca 170 *size = roundup(*size, 32 * nvbo->tile_mode);
a0af9add
FJ
171 }
172 }
bfd83aca 173 } else {
f91bac5b
BS
174 *size = roundup(*size, (1 << nvbo->page_shift));
175 *align = max((1 << nvbo->page_shift), *align);
a0af9add
FJ
176 }
177
1c7059e4 178 *size = roundup(*size, PAGE_SIZE);
a0af9add
FJ
179}
180
6ee73861 181int
7375c95b
BS
182nouveau_bo_new(struct drm_device *dev, int size, int align,
183 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
22b33e8e 184 struct sg_table *sg,
7375c95b 185 struct nouveau_bo **pnvbo)
6ee73861 186{
77145f1c 187 struct nouveau_drm *drm = nouveau_drm(dev);
6ee73861 188 struct nouveau_bo *nvbo;
57de4ba9 189 size_t acc_size;
f91bac5b 190 int ret;
22b33e8e 191 int type = ttm_bo_type_device;
35095f75
ML
192 int lpg_shift = 12;
193 int max_size;
194
3ee6f5b5
BS
195 if (drm->client.vm)
196 lpg_shift = drm->client.vm->vmm->lpg_shift;
35095f75 197 max_size = INT_MAX & ~((1 << lpg_shift) - 1);
0108bc80
ML
198
199 if (size <= 0 || size > max_size) {
fa2bade9 200 NV_WARN(drm, "skipped size %x\n", (u32)size);
0108bc80
ML
201 return -EINVAL;
202 }
22b33e8e
DA
203
204 if (sg)
205 type = ttm_bo_type_sg;
6ee73861
BS
206
207 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
208 if (!nvbo)
209 return -ENOMEM;
210 INIT_LIST_HEAD(&nvbo->head);
211 INIT_LIST_HEAD(&nvbo->entry);
fd2871af 212 INIT_LIST_HEAD(&nvbo->vma_list);
6ee73861
BS
213 nvbo->tile_mode = tile_mode;
214 nvbo->tile_flags = tile_flags;
ebb945a9 215 nvbo->bo.bdev = &drm->ttm.bdev;
6ee73861 216
f91bac5b 217 nvbo->page_shift = 12;
3ee6f5b5 218 if (drm->client.vm) {
f91bac5b 219 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
3ee6f5b5 220 nvbo->page_shift = drm->client.vm->vmm->lpg_shift;
f91bac5b
BS
221 }
222
223 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
fd2871af
BS
224 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
225 nouveau_bo_placement_set(nvbo, flags, 0);
6ee73861 226
ebb945a9 227 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
57de4ba9
JG
228 sizeof(struct nouveau_bo));
229
ebb945a9 230 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
22b33e8e 231 type, &nvbo->placement,
0b91c4a1 232 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
fd2871af 233 nouveau_bo_del_ttm);
6ee73861
BS
234 if (ret) {
235 /* ttm will call nouveau_bo_del_ttm if it fails.. */
236 return ret;
237 }
238
6ee73861
BS
239 *pnvbo = nvbo;
240 return 0;
241}
242
78ad0f7b
FJ
243static void
244set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
245{
246 *n = 0;
247
248 if (type & TTM_PL_FLAG_VRAM)
249 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
250 if (type & TTM_PL_FLAG_TT)
251 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
252 if (type & TTM_PL_FLAG_SYSTEM)
253 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
254}
255
699ddfd9
FJ
256static void
257set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
258{
ebb945a9 259 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
967e7bde 260 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
dceef5d8 261 u32 vram_pages = pfb->ram->size >> PAGE_SHIFT;
699ddfd9 262
967e7bde 263 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
812f219a 264 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
4beb116a 265 nvbo->bo.mem.num_pages < vram_pages / 4) {
699ddfd9
FJ
266 /*
267 * Make sure that the color and depth buffers are handled
268 * by independent memory controller units. Up to a 9x
269 * speed up when alpha-blending and depth-test are enabled
270 * at the same time.
271 */
699ddfd9
FJ
272 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
273 nvbo->placement.fpfn = vram_pages / 2;
274 nvbo->placement.lpfn = ~0;
275 } else {
276 nvbo->placement.fpfn = 0;
277 nvbo->placement.lpfn = vram_pages / 2;
278 }
279 }
280}
281
6ee73861 282void
78ad0f7b 283nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
6ee73861 284{
78ad0f7b
FJ
285 struct ttm_placement *pl = &nvbo->placement;
286 uint32_t flags = TTM_PL_MASK_CACHING |
287 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
288
289 pl->placement = nvbo->placements;
290 set_placement_list(nvbo->placements, &pl->num_placement,
291 type, flags);
292
293 pl->busy_placement = nvbo->busy_placements;
294 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
295 type | busy, flags);
699ddfd9
FJ
296
297 set_placement_range(nvbo, type);
6ee73861
BS
298}
299
300int
301nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
302{
ebb945a9 303 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
6ee73861 304 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 305 int ret;
6ee73861 306
ee3939e0 307 ret = ttm_bo_reserve(bo, false, false, false, NULL);
0ae6d7bc
DV
308 if (ret)
309 goto out;
310
6ee73861 311 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
ebb945a9 312 NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
6ee73861 313 1 << bo->mem.mem_type, memtype);
0ae6d7bc
DV
314 ret = -EINVAL;
315 goto out;
6ee73861
BS
316 }
317
318 if (nvbo->pin_refcnt++)
6ee73861
BS
319 goto out;
320
78ad0f7b 321 nouveau_bo_placement_set(nvbo, memtype, 0);
6ee73861 322
97a875cb 323 ret = nouveau_bo_validate(nvbo, false, false);
6ee73861
BS
324 if (ret == 0) {
325 switch (bo->mem.mem_type) {
326 case TTM_PL_VRAM:
ebb945a9 327 drm->gem.vram_available -= bo->mem.size;
6ee73861
BS
328 break;
329 case TTM_PL_TT:
ebb945a9 330 drm->gem.gart_available -= bo->mem.size;
6ee73861
BS
331 break;
332 default:
333 break;
334 }
335 }
6ee73861 336out:
0ae6d7bc 337 ttm_bo_unreserve(bo);
6ee73861
BS
338 return ret;
339}
340
341int
342nouveau_bo_unpin(struct nouveau_bo *nvbo)
343{
ebb945a9 344 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
6ee73861 345 struct ttm_buffer_object *bo = &nvbo->bo;
4f385599 346 int ret, ref;
6ee73861 347
ee3939e0 348 ret = ttm_bo_reserve(bo, false, false, false, NULL);
6ee73861
BS
349 if (ret)
350 return ret;
351
4f385599
ML
352 ref = --nvbo->pin_refcnt;
353 WARN_ON_ONCE(ref < 0);
354 if (ref)
0ae6d7bc
DV
355 goto out;
356
78ad0f7b 357 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
6ee73861 358
97a875cb 359 ret = nouveau_bo_validate(nvbo, false, false);
6ee73861
BS
360 if (ret == 0) {
361 switch (bo->mem.mem_type) {
362 case TTM_PL_VRAM:
ebb945a9 363 drm->gem.vram_available += bo->mem.size;
6ee73861
BS
364 break;
365 case TTM_PL_TT:
ebb945a9 366 drm->gem.gart_available += bo->mem.size;
6ee73861
BS
367 break;
368 default:
369 break;
370 }
371 }
372
0ae6d7bc 373out:
6ee73861
BS
374 ttm_bo_unreserve(bo);
375 return ret;
376}
377
378int
379nouveau_bo_map(struct nouveau_bo *nvbo)
380{
381 int ret;
382
ee3939e0 383 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
6ee73861
BS
384 if (ret)
385 return ret;
386
387 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
388 ttm_bo_unreserve(&nvbo->bo);
389 return ret;
390}
391
392void
393nouveau_bo_unmap(struct nouveau_bo *nvbo)
394{
9d59e8a1
BS
395 if (nvbo)
396 ttm_bo_kunmap(&nvbo->kmap);
6ee73861
BS
397}
398
7a45d764
BS
399int
400nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
97a875cb 401 bool no_wait_gpu)
7a45d764
BS
402{
403 int ret;
404
97a875cb
ML
405 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
406 interruptible, no_wait_gpu);
7a45d764
BS
407 if (ret)
408 return ret;
409
410 return 0;
411}
412
6ee73861
BS
413u16
414nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
415{
416 bool is_iomem;
417 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
418 mem = &mem[index];
419 if (is_iomem)
420 return ioread16_native((void __force __iomem *)mem);
421 else
422 return *mem;
423}
424
425void
426nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
427{
428 bool is_iomem;
429 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
430 mem = &mem[index];
431 if (is_iomem)
432 iowrite16_native(val, (void __force __iomem *)mem);
433 else
434 *mem = val;
435}
436
437u32
438nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
439{
440 bool is_iomem;
441 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
442 mem = &mem[index];
443 if (is_iomem)
444 return ioread32_native((void __force __iomem *)mem);
445 else
446 return *mem;
447}
448
449void
450nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
451{
452 bool is_iomem;
453 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
454 mem = &mem[index];
455 if (is_iomem)
456 iowrite32_native(val, (void __force __iomem *)mem);
457 else
458 *mem = val;
459}
460
649bf3ca 461static struct ttm_tt *
ebb945a9
BS
462nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
463 uint32_t page_flags, struct page *dummy_read)
6ee73861 464{
df1b4b91 465#if __OS_HAS_AGP
ebb945a9
BS
466 struct nouveau_drm *drm = nouveau_bdev(bdev);
467 struct drm_device *dev = drm->dev;
6ee73861 468
ebb945a9
BS
469 if (drm->agp.stat == ENABLED) {
470 return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
471 page_flags, dummy_read);
6ee73861 472 }
df1b4b91 473#endif
6ee73861 474
ebb945a9 475 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
6ee73861
BS
476}
477
478static int
479nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
480{
481 /* We'll do this from user space. */
482 return 0;
483}
484
485static int
486nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
487 struct ttm_mem_type_manager *man)
488{
ebb945a9 489 struct nouveau_drm *drm = nouveau_bdev(bdev);
6ee73861
BS
490
491 switch (type) {
492 case TTM_PL_SYSTEM:
493 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
494 man->available_caching = TTM_PL_MASK_CACHING;
495 man->default_caching = TTM_PL_FLAG_CACHED;
496 break;
497 case TTM_PL_VRAM:
e2a4e78c
AC
498 man->flags = TTM_MEMTYPE_FLAG_FIXED |
499 TTM_MEMTYPE_FLAG_MAPPABLE;
500 man->available_caching = TTM_PL_FLAG_UNCACHED |
501 TTM_PL_FLAG_WC;
502 man->default_caching = TTM_PL_FLAG_WC;
503
967e7bde 504 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
e2a4e78c 505 /* Some BARs do not support being ioremapped WC */
967e7bde 506 if (nvkm_bar(&drm->device)->iomap_uncached) {
e2a4e78c
AC
507 man->available_caching = TTM_PL_FLAG_UNCACHED;
508 man->default_caching = TTM_PL_FLAG_UNCACHED;
509 }
510
573a2a37 511 man->func = &nouveau_vram_manager;
f869ef88
BS
512 man->io_reserve_fastpath = false;
513 man->use_io_reserve_lru = true;
514 } else {
573a2a37 515 man->func = &ttm_bo_manager_func;
f869ef88 516 }
6ee73861
BS
517 break;
518 case TTM_PL_TT:
967e7bde 519 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
26c0c9e3 520 man->func = &nouveau_gart_manager;
3863c9bc 521 else
ebb945a9 522 if (drm->agp.stat != ENABLED)
3863c9bc 523 man->func = &nv04_gart_manager;
26c0c9e3
BS
524 else
525 man->func = &ttm_bo_manager_func;
ebb945a9
BS
526
527 if (drm->agp.stat == ENABLED) {
f32f02fd 528 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
a3d487ea
FJ
529 man->available_caching = TTM_PL_FLAG_UNCACHED |
530 TTM_PL_FLAG_WC;
531 man->default_caching = TTM_PL_FLAG_WC;
ebb945a9 532 } else {
6ee73861
BS
533 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
534 TTM_MEMTYPE_FLAG_CMA;
535 man->available_caching = TTM_PL_MASK_CACHING;
536 man->default_caching = TTM_PL_FLAG_CACHED;
6ee73861 537 }
ebb945a9 538
6ee73861
BS
539 break;
540 default:
6ee73861
BS
541 return -EINVAL;
542 }
543 return 0;
544}
545
546static void
547nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
548{
549 struct nouveau_bo *nvbo = nouveau_bo(bo);
550
551 switch (bo->mem.mem_type) {
22fbd538 552 case TTM_PL_VRAM:
78ad0f7b
FJ
553 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
554 TTM_PL_FLAG_SYSTEM);
22fbd538 555 break;
6ee73861 556 default:
78ad0f7b 557 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
6ee73861
BS
558 break;
559 }
22fbd538
FJ
560
561 *pl = nvbo->placement;
6ee73861
BS
562}
563
564
49981046
BS
565static int
566nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
567{
568 int ret = RING_SPACE(chan, 2);
569 if (ret == 0) {
570 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
00fc6f6f 571 OUT_RING (chan, handle & 0x0000ffff);
49981046
BS
572 FIRE_RING (chan);
573 }
574 return ret;
575}
576
c6b7e895
BS
577static int
578nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
579 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
580{
581 struct nouveau_mem *node = old_mem->mm_node;
582 int ret = RING_SPACE(chan, 10);
583 if (ret == 0) {
6d597027 584 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
c6b7e895
BS
585 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
586 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
587 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
588 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
589 OUT_RING (chan, PAGE_SIZE);
590 OUT_RING (chan, PAGE_SIZE);
591 OUT_RING (chan, PAGE_SIZE);
592 OUT_RING (chan, new_mem->num_pages);
6d597027 593 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
c6b7e895
BS
594 }
595 return ret;
596}
597
d1b167e1
BS
598static int
599nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
600{
601 int ret = RING_SPACE(chan, 2);
602 if (ret == 0) {
603 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
604 OUT_RING (chan, handle);
605 }
606 return ret;
607}
608
1a46098e
BS
609static int
610nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
611 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
612{
613 struct nouveau_mem *node = old_mem->mm_node;
614 u64 src_offset = node->vma[0].offset;
615 u64 dst_offset = node->vma[1].offset;
616 u32 page_count = new_mem->num_pages;
617 int ret;
618
619 page_count = new_mem->num_pages;
620 while (page_count) {
621 int line_count = (page_count > 8191) ? 8191 : page_count;
622
623 ret = RING_SPACE(chan, 11);
624 if (ret)
625 return ret;
626
627 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
628 OUT_RING (chan, upper_32_bits(src_offset));
629 OUT_RING (chan, lower_32_bits(src_offset));
630 OUT_RING (chan, upper_32_bits(dst_offset));
631 OUT_RING (chan, lower_32_bits(dst_offset));
632 OUT_RING (chan, PAGE_SIZE);
633 OUT_RING (chan, PAGE_SIZE);
634 OUT_RING (chan, PAGE_SIZE);
635 OUT_RING (chan, line_count);
636 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
637 OUT_RING (chan, 0x00000110);
638
639 page_count -= line_count;
640 src_offset += (PAGE_SIZE * line_count);
641 dst_offset += (PAGE_SIZE * line_count);
642 }
643
644 return 0;
645}
646
183720b8
BS
647static int
648nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
649 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
650{
d2f96666
BS
651 struct nouveau_mem *node = old_mem->mm_node;
652 u64 src_offset = node->vma[0].offset;
653 u64 dst_offset = node->vma[1].offset;
183720b8
BS
654 u32 page_count = new_mem->num_pages;
655 int ret;
656
183720b8
BS
657 page_count = new_mem->num_pages;
658 while (page_count) {
659 int line_count = (page_count > 2047) ? 2047 : page_count;
660
661 ret = RING_SPACE(chan, 12);
662 if (ret)
663 return ret;
664
d1b167e1 665 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
183720b8
BS
666 OUT_RING (chan, upper_32_bits(dst_offset));
667 OUT_RING (chan, lower_32_bits(dst_offset));
d1b167e1 668 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
183720b8
BS
669 OUT_RING (chan, upper_32_bits(src_offset));
670 OUT_RING (chan, lower_32_bits(src_offset));
671 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
672 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
673 OUT_RING (chan, PAGE_SIZE); /* line_length */
674 OUT_RING (chan, line_count);
d1b167e1 675 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
183720b8
BS
676 OUT_RING (chan, 0x00100110);
677
678 page_count -= line_count;
679 src_offset += (PAGE_SIZE * line_count);
680 dst_offset += (PAGE_SIZE * line_count);
681 }
682
683 return 0;
684}
685
fdf53241
BS
686static int
687nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
688 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
689{
690 struct nouveau_mem *node = old_mem->mm_node;
691 u64 src_offset = node->vma[0].offset;
692 u64 dst_offset = node->vma[1].offset;
693 u32 page_count = new_mem->num_pages;
694 int ret;
695
696 page_count = new_mem->num_pages;
697 while (page_count) {
698 int line_count = (page_count > 8191) ? 8191 : page_count;
699
700 ret = RING_SPACE(chan, 11);
701 if (ret)
702 return ret;
703
704 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
705 OUT_RING (chan, upper_32_bits(src_offset));
706 OUT_RING (chan, lower_32_bits(src_offset));
707 OUT_RING (chan, upper_32_bits(dst_offset));
708 OUT_RING (chan, lower_32_bits(dst_offset));
709 OUT_RING (chan, PAGE_SIZE);
710 OUT_RING (chan, PAGE_SIZE);
711 OUT_RING (chan, PAGE_SIZE);
712 OUT_RING (chan, line_count);
713 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
714 OUT_RING (chan, 0x00000110);
715
716 page_count -= line_count;
717 src_offset += (PAGE_SIZE * line_count);
718 dst_offset += (PAGE_SIZE * line_count);
719 }
720
721 return 0;
722}
723
5490e5df
BS
724static int
725nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
726 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
727{
728 struct nouveau_mem *node = old_mem->mm_node;
729 int ret = RING_SPACE(chan, 7);
730 if (ret == 0) {
731 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
732 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
733 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
734 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
735 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
736 OUT_RING (chan, 0x00000000 /* COPY */);
737 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
738 }
739 return ret;
740}
741
4c193d25
BS
742static int
743nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
744 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
745{
746 struct nouveau_mem *node = old_mem->mm_node;
747 int ret = RING_SPACE(chan, 7);
748 if (ret == 0) {
749 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
750 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
751 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
752 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
753 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
754 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
755 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
756 }
757 return ret;
758}
759
d1b167e1
BS
760static int
761nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
762{
ebb945a9 763 int ret = RING_SPACE(chan, 6);
d1b167e1 764 if (ret == 0) {
ebb945a9
BS
765 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
766 OUT_RING (chan, handle);
767 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
768 OUT_RING (chan, NvNotify0);
769 OUT_RING (chan, NvDmaFB);
770 OUT_RING (chan, NvDmaFB);
d1b167e1
BS
771 }
772
773 return ret;
774}
775
6ee73861 776static int
f1ab0cc9
BS
777nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
778 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
6ee73861 779{
d2f96666 780 struct nouveau_mem *node = old_mem->mm_node;
f1ab0cc9 781 u64 length = (new_mem->num_pages << PAGE_SHIFT);
d2f96666
BS
782 u64 src_offset = node->vma[0].offset;
783 u64 dst_offset = node->vma[1].offset;
ce8f7699
ML
784 int src_tiled = !!node->memtype;
785 int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
6ee73861
BS
786 int ret;
787
f1ab0cc9
BS
788 while (length) {
789 u32 amount, stride, height;
790
ce8f7699
ML
791 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
792 if (ret)
793 return ret;
794
5220b3c1
BS
795 amount = min(length, (u64)(4 * 1024 * 1024));
796 stride = 16 * 4;
f1ab0cc9
BS
797 height = amount / stride;
798
ce8f7699 799 if (src_tiled) {
d1b167e1 800 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
f1ab0cc9 801 OUT_RING (chan, 0);
5220b3c1 802 OUT_RING (chan, 0);
f1ab0cc9
BS
803 OUT_RING (chan, stride);
804 OUT_RING (chan, height);
805 OUT_RING (chan, 1);
806 OUT_RING (chan, 0);
807 OUT_RING (chan, 0);
808 } else {
d1b167e1 809 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
f1ab0cc9
BS
810 OUT_RING (chan, 1);
811 }
ce8f7699 812 if (dst_tiled) {
d1b167e1 813 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
f1ab0cc9 814 OUT_RING (chan, 0);
5220b3c1 815 OUT_RING (chan, 0);
f1ab0cc9
BS
816 OUT_RING (chan, stride);
817 OUT_RING (chan, height);
818 OUT_RING (chan, 1);
819 OUT_RING (chan, 0);
820 OUT_RING (chan, 0);
821 } else {
d1b167e1 822 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
f1ab0cc9
BS
823 OUT_RING (chan, 1);
824 }
825
d1b167e1 826 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
f1ab0cc9
BS
827 OUT_RING (chan, upper_32_bits(src_offset));
828 OUT_RING (chan, upper_32_bits(dst_offset));
d1b167e1 829 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
f1ab0cc9
BS
830 OUT_RING (chan, lower_32_bits(src_offset));
831 OUT_RING (chan, lower_32_bits(dst_offset));
832 OUT_RING (chan, stride);
833 OUT_RING (chan, stride);
834 OUT_RING (chan, stride);
835 OUT_RING (chan, height);
836 OUT_RING (chan, 0x00000101);
837 OUT_RING (chan, 0x00000000);
d1b167e1 838 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9
BS
839 OUT_RING (chan, 0);
840
841 length -= amount;
842 src_offset += amount;
843 dst_offset += amount;
6ee73861
BS
844 }
845
f1ab0cc9
BS
846 return 0;
847}
848
d1b167e1
BS
849static int
850nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
851{
ebb945a9 852 int ret = RING_SPACE(chan, 4);
d1b167e1 853 if (ret == 0) {
ebb945a9
BS
854 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
855 OUT_RING (chan, handle);
856 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
857 OUT_RING (chan, NvNotify0);
d1b167e1
BS
858 }
859
860 return ret;
861}
862
a6704788
BS
863static inline uint32_t
864nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
865 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
866{
867 if (mem->mem_type == TTM_PL_TT)
ebb945a9
BS
868 return NvDmaTT;
869 return NvDmaFB;
a6704788
BS
870}
871
f1ab0cc9
BS
872static int
873nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
874 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
875{
d961db75
BS
876 u32 src_offset = old_mem->start << PAGE_SHIFT;
877 u32 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
878 u32 page_count = new_mem->num_pages;
879 int ret;
880
881 ret = RING_SPACE(chan, 3);
882 if (ret)
883 return ret;
884
d1b167e1 885 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
f1ab0cc9
BS
886 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
887 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
888
6ee73861
BS
889 page_count = new_mem->num_pages;
890 while (page_count) {
891 int line_count = (page_count > 2047) ? 2047 : page_count;
892
6ee73861
BS
893 ret = RING_SPACE(chan, 11);
894 if (ret)
895 return ret;
f1ab0cc9 896
d1b167e1 897 BEGIN_NV04(chan, NvSubCopy,
6ee73861 898 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
f1ab0cc9
BS
899 OUT_RING (chan, src_offset);
900 OUT_RING (chan, dst_offset);
901 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
902 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
903 OUT_RING (chan, PAGE_SIZE); /* line_length */
904 OUT_RING (chan, line_count);
905 OUT_RING (chan, 0x00000101);
906 OUT_RING (chan, 0x00000000);
d1b167e1 907 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9 908 OUT_RING (chan, 0);
6ee73861
BS
909
910 page_count -= line_count;
911 src_offset += (PAGE_SIZE * line_count);
912 dst_offset += (PAGE_SIZE * line_count);
913 }
914
f1ab0cc9
BS
915 return 0;
916}
917
d2f96666 918static int
3c57d85d
BS
919nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
920 struct ttm_mem_reg *mem)
d2f96666 921{
3c57d85d
BS
922 struct nouveau_mem *old_node = bo->mem.mm_node;
923 struct nouveau_mem *new_node = mem->mm_node;
924 u64 size = (u64)mem->num_pages << PAGE_SHIFT;
d2f96666
BS
925 int ret;
926
3ee6f5b5 927 ret = nouveau_vm_get(drm->client.vm, size, old_node->page_shift,
3c57d85d 928 NV_MEM_ACCESS_RW, &old_node->vma[0]);
d2f96666
BS
929 if (ret)
930 return ret;
931
3ee6f5b5 932 ret = nouveau_vm_get(drm->client.vm, size, new_node->page_shift,
3c57d85d
BS
933 NV_MEM_ACCESS_RW, &old_node->vma[1]);
934 if (ret) {
935 nouveau_vm_put(&old_node->vma[0]);
936 return ret;
937 }
938
939 nouveau_vm_map(&old_node->vma[0], old_node);
940 nouveau_vm_map(&old_node->vma[1], new_node);
d2f96666
BS
941 return 0;
942}
943
f1ab0cc9
BS
944static int
945nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
97a875cb 946 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
f1ab0cc9 947{
ebb945a9 948 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1934a2ad 949 struct nouveau_channel *chan = drm->ttm.chan;
0ad72863 950 struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
35b8141b 951 struct nouveau_fence *fence;
f1ab0cc9
BS
952 int ret;
953
d2f96666
BS
954 /* create temporary vmas for the transfer and attach them to the
955 * old nouveau_mem node, these will get cleaned up after ttm has
956 * destroyed the ttm_mem_reg
3425df48 957 */
967e7bde 958 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
3c57d85d 959 ret = nouveau_bo_move_prep(drm, bo, new_mem);
d2f96666 960 if (ret)
3c57d85d 961 return ret;
3425df48
BS
962 }
963
0ad72863 964 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
35b8141b 965 ret = nouveau_fence_sync(bo->sync_obj, chan);
6a6b73f2 966 if (ret == 0) {
35b8141b
BS
967 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
968 if (ret == 0) {
969 ret = nouveau_fence_new(chan, false, &fence);
970 if (ret == 0) {
971 ret = ttm_bo_move_accel_cleanup(bo, fence,
972 evict,
973 no_wait_gpu,
974 new_mem);
975 nouveau_fence_unref(&fence);
976 }
977 }
6a6b73f2 978 }
0ad72863 979 mutex_unlock(&cli->mutex);
6a6b73f2 980 return ret;
6ee73861
BS
981}
982
d1b167e1 983void
49981046 984nouveau_bo_move_init(struct nouveau_drm *drm)
d1b167e1 985{
d1b167e1
BS
986 static const struct {
987 const char *name;
1a46098e 988 int engine;
d1b167e1
BS
989 u32 oclass;
990 int (*exec)(struct nouveau_channel *,
991 struct ttm_buffer_object *,
992 struct ttm_mem_reg *, struct ttm_mem_reg *);
993 int (*init)(struct nouveau_channel *, u32 handle);
994 } _methods[] = {
00fc6f6f 995 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
49981046 996 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1a46098e
BS
997 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
998 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
999 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1000 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1001 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1002 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1003 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
5490e5df 1004 {},
1a46098e 1005 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
d1b167e1
BS
1006 }, *mthd = _methods;
1007 const char *name = "CPU";
1008 int ret;
1009
1010 do {
49981046 1011 struct nouveau_channel *chan;
ebb945a9 1012
00fc6f6f 1013 if (mthd->engine)
49981046
BS
1014 chan = drm->cechan;
1015 else
1016 chan = drm->channel;
1017 if (chan == NULL)
1018 continue;
1019
0ad72863
BS
1020 ret = nvif_object_init(chan->object, NULL,
1021 mthd->oclass | (mthd->engine << 16),
1022 mthd->oclass, NULL, 0,
1023 &drm->ttm.copy);
d1b167e1 1024 if (ret == 0) {
0ad72863 1025 ret = mthd->init(chan, drm->ttm.copy.handle);
ebb945a9 1026 if (ret) {
0ad72863 1027 nvif_object_fini(&drm->ttm.copy);
ebb945a9 1028 continue;
d1b167e1 1029 }
ebb945a9
BS
1030
1031 drm->ttm.move = mthd->exec;
1bb3f6a2 1032 drm->ttm.chan = chan;
ebb945a9
BS
1033 name = mthd->name;
1034 break;
d1b167e1
BS
1035 }
1036 } while ((++mthd)->exec);
1037
ebb945a9 1038 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
d1b167e1
BS
1039}
1040
6ee73861
BS
1041static int
1042nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1043 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
6ee73861
BS
1044{
1045 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1046 struct ttm_placement placement;
1047 struct ttm_mem_reg tmp_mem;
1048 int ret;
1049
1050 placement.fpfn = placement.lpfn = 0;
1051 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 1052 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
1053
1054 tmp_mem = *new_mem;
1055 tmp_mem.mm_node = NULL;
97a875cb 1056 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
6ee73861
BS
1057 if (ret)
1058 return ret;
1059
1060 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1061 if (ret)
1062 goto out;
1063
97a875cb 1064 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
6ee73861
BS
1065 if (ret)
1066 goto out;
1067
97a875cb 1068 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
6ee73861 1069out:
42311ff9 1070 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
1071 return ret;
1072}
1073
1074static int
1075nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1076 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
6ee73861
BS
1077{
1078 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1079 struct ttm_placement placement;
1080 struct ttm_mem_reg tmp_mem;
1081 int ret;
1082
1083 placement.fpfn = placement.lpfn = 0;
1084 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 1085 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
1086
1087 tmp_mem = *new_mem;
1088 tmp_mem.mm_node = NULL;
97a875cb 1089 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
6ee73861
BS
1090 if (ret)
1091 return ret;
1092
97a875cb 1093 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
6ee73861
BS
1094 if (ret)
1095 goto out;
1096
97a875cb 1097 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
6ee73861
BS
1098 if (ret)
1099 goto out;
1100
1101out:
42311ff9 1102 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
1103 return ret;
1104}
1105
a4154bbf
BS
1106static void
1107nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1108{
a4154bbf 1109 struct nouveau_bo *nvbo = nouveau_bo(bo);
fd2871af
BS
1110 struct nouveau_vma *vma;
1111
9f1feed2
BS
1112 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1113 if (bo->destroy != nouveau_bo_del_ttm)
1114 return;
1115
fd2871af 1116 list_for_each_entry(vma, &nvbo->vma_list, head) {
2e2cfbe6
BS
1117 if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
1118 (new_mem->mem_type == TTM_PL_VRAM ||
1119 nvbo->page_shift != vma->vm->vmm->lpg_shift)) {
fd2871af 1120 nouveau_vm_map(vma, new_mem->mm_node);
fd2871af
BS
1121 } else {
1122 nouveau_vm_unmap(vma);
1123 }
a4154bbf
BS
1124 }
1125}
1126
6ee73861 1127static int
a0af9add 1128nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
ebb945a9 1129 struct nouveau_drm_tile **new_tile)
6ee73861 1130{
ebb945a9
BS
1131 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1132 struct drm_device *dev = drm->dev;
a0af9add 1133 struct nouveau_bo *nvbo = nouveau_bo(bo);
a4154bbf 1134 u64 offset = new_mem->start << PAGE_SHIFT;
6ee73861 1135
a4154bbf
BS
1136 *new_tile = NULL;
1137 if (new_mem->mem_type != TTM_PL_VRAM)
a0af9add 1138 return 0;
a0af9add 1139
967e7bde 1140 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
bc9e7b9a 1141 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
a5cf68b0
FJ
1142 nvbo->tile_mode,
1143 nvbo->tile_flags);
6ee73861
BS
1144 }
1145
a0af9add
FJ
1146 return 0;
1147}
1148
1149static void
1150nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
ebb945a9
BS
1151 struct nouveau_drm_tile *new_tile,
1152 struct nouveau_drm_tile **old_tile)
a0af9add 1153{
ebb945a9
BS
1154 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1155 struct drm_device *dev = drm->dev;
a0af9add 1156
bc9e7b9a 1157 nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
a4154bbf 1158 *old_tile = new_tile;
a0af9add
FJ
1159}
1160
1161static int
1162nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1163 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
a0af9add 1164{
ebb945a9 1165 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
a0af9add
FJ
1166 struct nouveau_bo *nvbo = nouveau_bo(bo);
1167 struct ttm_mem_reg *old_mem = &bo->mem;
ebb945a9 1168 struct nouveau_drm_tile *new_tile = NULL;
a0af9add
FJ
1169 int ret = 0;
1170
967e7bde 1171 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
a4154bbf
BS
1172 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1173 if (ret)
1174 return ret;
1175 }
a0af9add 1176
a0af9add 1177 /* Fake bo copy. */
6ee73861
BS
1178 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1179 BUG_ON(bo->mem.mm_node != NULL);
1180 bo->mem = *new_mem;
1181 new_mem->mm_node = NULL;
a0af9add 1182 goto out;
6ee73861
BS
1183 }
1184
a0af9add 1185 /* Hardware assisted copy. */
cef9e99e
BS
1186 if (drm->ttm.move) {
1187 if (new_mem->mem_type == TTM_PL_SYSTEM)
1188 ret = nouveau_bo_move_flipd(bo, evict, intr,
1189 no_wait_gpu, new_mem);
1190 else if (old_mem->mem_type == TTM_PL_SYSTEM)
1191 ret = nouveau_bo_move_flips(bo, evict, intr,
1192 no_wait_gpu, new_mem);
1193 else
1194 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1195 no_wait_gpu, new_mem);
1196 if (!ret)
1197 goto out;
1198 }
a0af9add
FJ
1199
1200 /* Fallback to software copy. */
cef9e99e
BS
1201 spin_lock(&bo->bdev->fence_lock);
1202 ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
1203 spin_unlock(&bo->bdev->fence_lock);
1204 if (ret == 0)
1205 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
a0af9add
FJ
1206
1207out:
967e7bde 1208 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
a4154bbf
BS
1209 if (ret)
1210 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1211 else
1212 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1213 }
a0af9add
FJ
1214
1215 return ret;
6ee73861
BS
1216}
1217
1218static int
1219nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1220{
acb46527
DH
1221 struct nouveau_bo *nvbo = nouveau_bo(bo);
1222
55fb74ad 1223 return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
6ee73861
BS
1224}
1225
f32f02fd
JG
1226static int
1227nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1228{
1229 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
ebb945a9 1230 struct nouveau_drm *drm = nouveau_bdev(bdev);
a5540906 1231 struct nouveau_mem *node = mem->mm_node;
ebb945a9 1232 struct drm_device *dev = drm->dev;
f869ef88 1233 int ret;
f32f02fd
JG
1234
1235 mem->bus.addr = NULL;
1236 mem->bus.offset = 0;
1237 mem->bus.size = mem->num_pages << PAGE_SHIFT;
1238 mem->bus.base = 0;
1239 mem->bus.is_iomem = false;
1240 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1241 return -EINVAL;
1242 switch (mem->mem_type) {
1243 case TTM_PL_SYSTEM:
1244 /* System memory */
1245 return 0;
1246 case TTM_PL_TT:
1247#if __OS_HAS_AGP
ebb945a9 1248 if (drm->agp.stat == ENABLED) {
d961db75 1249 mem->bus.offset = mem->start << PAGE_SHIFT;
ebb945a9 1250 mem->bus.base = drm->agp.base;
eda85d6a 1251 mem->bus.is_iomem = !dev->agp->cant_use_aperture;
f32f02fd
JG
1252 }
1253#endif
967e7bde 1254 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
a5540906
ML
1255 /* untiled */
1256 break;
1257 /* fallthrough, tiled memory */
f32f02fd 1258 case TTM_PL_VRAM:
3863c9bc 1259 mem->bus.offset = mem->start << PAGE_SHIFT;
967e7bde 1260 mem->bus.base = nv_device_resource_start(nvkm_device(&drm->device), 1);
3863c9bc 1261 mem->bus.is_iomem = true;
967e7bde
BS
1262 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1263 struct nouveau_bar *bar = nvkm_bar(&drm->device);
8984e046 1264
ebb945a9 1265 ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
3863c9bc
BS
1266 &node->bar_vma);
1267 if (ret)
1268 return ret;
f869ef88 1269
3863c9bc 1270 mem->bus.offset = node->bar_vma.offset;
f869ef88 1271 }
f32f02fd
JG
1272 break;
1273 default:
1274 return -EINVAL;
1275 }
1276 return 0;
1277}
1278
1279static void
1280nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1281{
ebb945a9 1282 struct nouveau_drm *drm = nouveau_bdev(bdev);
967e7bde 1283 struct nouveau_bar *bar = nvkm_bar(&drm->device);
d5f42394 1284 struct nouveau_mem *node = mem->mm_node;
f869ef88 1285
d5f42394 1286 if (!node->bar_vma.node)
f869ef88
BS
1287 return;
1288
ebb945a9 1289 bar->unmap(bar, &node->bar_vma);
f32f02fd
JG
1290}
1291
1292static int
1293nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1294{
ebb945a9 1295 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
e1429b4c 1296 struct nouveau_bo *nvbo = nouveau_bo(bo);
967e7bde
BS
1297 struct nvif_device *device = &drm->device;
1298 u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT;
a5540906 1299 int ret;
e1429b4c
BS
1300
1301 /* as long as the bo isn't in vram, and isn't tiled, we've got
1302 * nothing to do here.
1303 */
1304 if (bo->mem.mem_type != TTM_PL_VRAM) {
967e7bde 1305 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA ||
f13b3263 1306 !nouveau_bo_tile_layout(nvbo))
e1429b4c 1307 return 0;
a5540906
ML
1308
1309 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1310 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1311
1312 ret = nouveau_bo_validate(nvbo, false, false);
1313 if (ret)
1314 return ret;
1315 }
1316 return 0;
e1429b4c
BS
1317 }
1318
1319 /* make sure bo is in mappable vram */
967e7bde 1320 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
a5540906 1321 bo->mem.start + bo->mem.num_pages < mappable)
e1429b4c
BS
1322 return 0;
1323
1324
1325 nvbo->placement.fpfn = 0;
ebb945a9 1326 nvbo->placement.lpfn = mappable;
c284815d 1327 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
97a875cb 1328 return nouveau_bo_validate(nvbo, false, false);
f32f02fd
JG
1329}
1330
3230cfc3
KRW
1331static int
1332nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1333{
8e7e7052 1334 struct ttm_dma_tt *ttm_dma = (void *)ttm;
ebb945a9 1335 struct nouveau_drm *drm;
420b9469 1336 struct nouveau_device *device;
3230cfc3 1337 struct drm_device *dev;
fd1496a0 1338 struct device *pdev;
3230cfc3
KRW
1339 unsigned i;
1340 int r;
22b33e8e 1341 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
3230cfc3
KRW
1342
1343 if (ttm->state != tt_unpopulated)
1344 return 0;
1345
22b33e8e
DA
1346 if (slave && ttm->sg) {
1347 /* make userspace faulting work */
1348 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1349 ttm_dma->dma_address, ttm->num_pages);
1350 ttm->state = tt_unbound;
1351 return 0;
1352 }
1353
ebb945a9 1354 drm = nouveau_bdev(ttm->bdev);
967e7bde 1355 device = nvkm_device(&drm->device);
ebb945a9 1356 dev = drm->dev;
fd1496a0 1357 pdev = nv_device_base(device);
3230cfc3 1358
dea7e0ac 1359#if __OS_HAS_AGP
ebb945a9 1360 if (drm->agp.stat == ENABLED) {
dea7e0ac
JG
1361 return ttm_agp_tt_populate(ttm);
1362 }
1363#endif
1364
3230cfc3
KRW
1365#ifdef CONFIG_SWIOTLB
1366 if (swiotlb_nr_tbl()) {
8e7e7052 1367 return ttm_dma_populate((void *)ttm, dev->dev);
3230cfc3
KRW
1368 }
1369#endif
1370
1371 r = ttm_pool_populate(ttm);
1372 if (r) {
1373 return r;
1374 }
1375
1376 for (i = 0; i < ttm->num_pages; i++) {
fd1496a0
AC
1377 dma_addr_t addr;
1378
1379 addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
1380 DMA_BIDIRECTIONAL);
1381
1382 if (dma_mapping_error(pdev, addr)) {
3230cfc3 1383 while (--i) {
fd1496a0
AC
1384 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1385 PAGE_SIZE, DMA_BIDIRECTIONAL);
8e7e7052 1386 ttm_dma->dma_address[i] = 0;
3230cfc3
KRW
1387 }
1388 ttm_pool_unpopulate(ttm);
1389 return -EFAULT;
1390 }
fd1496a0
AC
1391
1392 ttm_dma->dma_address[i] = addr;
3230cfc3
KRW
1393 }
1394 return 0;
1395}
1396
1397static void
1398nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1399{
8e7e7052 1400 struct ttm_dma_tt *ttm_dma = (void *)ttm;
ebb945a9 1401 struct nouveau_drm *drm;
420b9469 1402 struct nouveau_device *device;
3230cfc3 1403 struct drm_device *dev;
fd1496a0 1404 struct device *pdev;
3230cfc3 1405 unsigned i;
22b33e8e
DA
1406 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1407
1408 if (slave)
1409 return;
3230cfc3 1410
ebb945a9 1411 drm = nouveau_bdev(ttm->bdev);
967e7bde 1412 device = nvkm_device(&drm->device);
ebb945a9 1413 dev = drm->dev;
fd1496a0 1414 pdev = nv_device_base(device);
3230cfc3 1415
dea7e0ac 1416#if __OS_HAS_AGP
ebb945a9 1417 if (drm->agp.stat == ENABLED) {
dea7e0ac
JG
1418 ttm_agp_tt_unpopulate(ttm);
1419 return;
1420 }
1421#endif
1422
3230cfc3
KRW
1423#ifdef CONFIG_SWIOTLB
1424 if (swiotlb_nr_tbl()) {
8e7e7052 1425 ttm_dma_unpopulate((void *)ttm, dev->dev);
3230cfc3
KRW
1426 return;
1427 }
1428#endif
1429
1430 for (i = 0; i < ttm->num_pages; i++) {
8e7e7052 1431 if (ttm_dma->dma_address[i]) {
fd1496a0
AC
1432 dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
1433 DMA_BIDIRECTIONAL);
3230cfc3
KRW
1434 }
1435 }
1436
1437 ttm_pool_unpopulate(ttm);
1438}
1439
875ac34a
BS
1440void
1441nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1442{
5d216f60 1443 struct nouveau_fence *new_fence = nouveau_fence_ref(fence);
875ac34a
BS
1444 struct nouveau_fence *old_fence = NULL;
1445
875ac34a
BS
1446 spin_lock(&nvbo->bo.bdev->fence_lock);
1447 old_fence = nvbo->bo.sync_obj;
5d216f60 1448 nvbo->bo.sync_obj = new_fence;
875ac34a
BS
1449 spin_unlock(&nvbo->bo.bdev->fence_lock);
1450
1451 nouveau_fence_unref(&old_fence);
1452}
1453
1454static void
1455nouveau_bo_fence_unref(void **sync_obj)
1456{
1457 nouveau_fence_unref((struct nouveau_fence **)sync_obj);
1458}
1459
1460static void *
1461nouveau_bo_fence_ref(void *sync_obj)
1462{
1463 return nouveau_fence_ref(sync_obj);
1464}
1465
1466static bool
dedfdffd 1467nouveau_bo_fence_signalled(void *sync_obj)
875ac34a 1468{
d375e7d5 1469 return nouveau_fence_done(sync_obj);
875ac34a
BS
1470}
1471
1472static int
dedfdffd 1473nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
875ac34a
BS
1474{
1475 return nouveau_fence_wait(sync_obj, lazy, intr);
1476}
1477
1478static int
dedfdffd 1479nouveau_bo_fence_flush(void *sync_obj)
875ac34a
BS
1480{
1481 return 0;
1482}
1483
6ee73861 1484struct ttm_bo_driver nouveau_bo_driver = {
649bf3ca 1485 .ttm_tt_create = &nouveau_ttm_tt_create,
3230cfc3
KRW
1486 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1487 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
6ee73861
BS
1488 .invalidate_caches = nouveau_bo_invalidate_caches,
1489 .init_mem_type = nouveau_bo_init_mem_type,
1490 .evict_flags = nouveau_bo_evict_flags,
a4154bbf 1491 .move_notify = nouveau_bo_move_ntfy,
6ee73861
BS
1492 .move = nouveau_bo_move,
1493 .verify_access = nouveau_bo_verify_access,
875ac34a
BS
1494 .sync_obj_signaled = nouveau_bo_fence_signalled,
1495 .sync_obj_wait = nouveau_bo_fence_wait,
1496 .sync_obj_flush = nouveau_bo_fence_flush,
1497 .sync_obj_unref = nouveau_bo_fence_unref,
1498 .sync_obj_ref = nouveau_bo_fence_ref,
f32f02fd
JG
1499 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1500 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1501 .io_mem_free = &nouveau_ttm_io_mem_free,
6ee73861
BS
1502};
1503
fd2871af
BS
1504struct nouveau_vma *
1505nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1506{
1507 struct nouveau_vma *vma;
1508 list_for_each_entry(vma, &nvbo->vma_list, head) {
1509 if (vma->vm == vm)
1510 return vma;
1511 }
1512
1513 return NULL;
1514}
1515
1516int
1517nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1518 struct nouveau_vma *vma)
1519{
1520 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
fd2871af
BS
1521 int ret;
1522
1523 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1524 NV_MEM_ACCESS_RW, vma);
1525 if (ret)
1526 return ret;
1527
2e2cfbe6
BS
1528 if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
1529 (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
1530 nvbo->page_shift != vma->vm->vmm->lpg_shift))
fd2871af 1531 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
fd2871af
BS
1532
1533 list_add_tail(&vma->head, &nvbo->vma_list);
2fd3db6f 1534 vma->refcount = 1;
fd2871af
BS
1535 return 0;
1536}
1537
1538void
1539nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1540{
1541 if (vma->node) {
c4c7044f 1542 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
fd2871af 1543 nouveau_vm_unmap(vma);
fd2871af
BS
1544 nouveau_vm_put(vma);
1545 list_del(&vma->head);
1546 }
1547}
This page took 0.345325 seconds and 5 git commands to generate.