drm/nouveau/nvif: fix a number of notify thinkos
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
fdb751ef 30#include <linux/dma-mapping.h>
3e2b756b 31#include <linux/swiotlb.h>
6ee73861 32
ebb945a9 33#include "nouveau_drm.h"
6ee73861 34#include "nouveau_dma.h"
d375e7d5 35#include "nouveau_fence.h"
6ee73861 36
ebb945a9
BS
37#include "nouveau_bo.h"
38#include "nouveau_ttm.h"
39#include "nouveau_gem.h"
a510604d 40
bc9e7b9a
BS
41/*
42 * NV10-NV40 tiling helpers
43 */
44
45static void
ebb945a9
BS
46nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
47 u32 addr, u32 size, u32 pitch, u32 flags)
bc9e7b9a 48{
77145f1c 49 struct nouveau_drm *drm = nouveau_drm(dev);
ebb945a9 50 int i = reg - drm->tile.reg;
967e7bde 51 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
ebb945a9
BS
52 struct nouveau_fb_tile *tile = &pfb->tile.region[i];
53 struct nouveau_engine *engine;
bc9e7b9a 54
ebb945a9 55 nouveau_fence_unref(&reg->fence);
bc9e7b9a
BS
56
57 if (tile->pitch)
ebb945a9 58 pfb->tile.fini(pfb, i, tile);
bc9e7b9a
BS
59
60 if (pitch)
ebb945a9 61 pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
bc9e7b9a 62
ebb945a9 63 pfb->tile.prog(pfb, i, tile);
bc9e7b9a 64
ebb945a9
BS
65 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
66 engine->tile_prog(engine, i);
67 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
68 engine->tile_prog(engine, i);
bc9e7b9a
BS
69}
70
ebb945a9 71static struct nouveau_drm_tile *
bc9e7b9a
BS
72nv10_bo_get_tile_region(struct drm_device *dev, int i)
73{
77145f1c 74 struct nouveau_drm *drm = nouveau_drm(dev);
ebb945a9 75 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
bc9e7b9a 76
ebb945a9 77 spin_lock(&drm->tile.lock);
bc9e7b9a
BS
78
79 if (!tile->used &&
80 (!tile->fence || nouveau_fence_done(tile->fence)))
81 tile->used = true;
82 else
83 tile = NULL;
84
ebb945a9 85 spin_unlock(&drm->tile.lock);
bc9e7b9a
BS
86 return tile;
87}
88
89static void
ebb945a9
BS
90nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
91 struct nouveau_fence *fence)
bc9e7b9a 92{
77145f1c 93 struct nouveau_drm *drm = nouveau_drm(dev);
bc9e7b9a
BS
94
95 if (tile) {
ebb945a9 96 spin_lock(&drm->tile.lock);
5d216f60 97 tile->fence = nouveau_fence_ref(fence);
bc9e7b9a 98 tile->used = false;
ebb945a9 99 spin_unlock(&drm->tile.lock);
bc9e7b9a
BS
100 }
101}
102
ebb945a9
BS
103static struct nouveau_drm_tile *
104nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
105 u32 size, u32 pitch, u32 flags)
bc9e7b9a 106{
77145f1c 107 struct nouveau_drm *drm = nouveau_drm(dev);
967e7bde 108 struct nouveau_fb *pfb = nvkm_fb(&drm->device);
ebb945a9 109 struct nouveau_drm_tile *tile, *found = NULL;
bc9e7b9a
BS
110 int i;
111
ebb945a9 112 for (i = 0; i < pfb->tile.regions; i++) {
bc9e7b9a
BS
113 tile = nv10_bo_get_tile_region(dev, i);
114
115 if (pitch && !found) {
116 found = tile;
117 continue;
118
ebb945a9 119 } else if (tile && pfb->tile.region[i].pitch) {
bc9e7b9a
BS
120 /* Kill an unused tile region. */
121 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
122 }
123
124 nv10_bo_put_tile_region(dev, tile, NULL);
125 }
126
127 if (found)
128 nv10_bo_update_tile_region(dev, found, addr, size,
129 pitch, flags);
130 return found;
131}
132
6ee73861
BS
133static void
134nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
135{
ebb945a9
BS
136 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
137 struct drm_device *dev = drm->dev;
6ee73861
BS
138 struct nouveau_bo *nvbo = nouveau_bo(bo);
139
55fb74ad 140 if (unlikely(nvbo->gem.filp))
6ee73861 141 DRM_ERROR("bo %p still attached to GEM object\n", bo);
4f385599 142 WARN_ON(nvbo->pin_refcnt > 0);
bc9e7b9a 143 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
6ee73861
BS
144 kfree(nvbo);
145}
146
a0af9add 147static void
db5c8e29 148nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
f91bac5b 149 int *align, int *size)
a0af9add 150{
ebb945a9 151 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
967e7bde 152 struct nvif_device *device = &drm->device;
a0af9add 153
967e7bde 154 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
bfd83aca 155 if (nvbo->tile_mode) {
967e7bde 156 if (device->info.chipset >= 0x40) {
a0af9add 157 *align = 65536;
bfd83aca 158 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 159
967e7bde 160 } else if (device->info.chipset >= 0x30) {
a0af9add 161 *align = 32768;
bfd83aca 162 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 163
967e7bde 164 } else if (device->info.chipset >= 0x20) {
a0af9add 165 *align = 16384;
bfd83aca 166 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 167
967e7bde 168 } else if (device->info.chipset >= 0x10) {
a0af9add 169 *align = 16384;
bfd83aca 170 *size = roundup(*size, 32 * nvbo->tile_mode);
a0af9add
FJ
171 }
172 }
bfd83aca 173 } else {
f91bac5b
BS
174 *size = roundup(*size, (1 << nvbo->page_shift));
175 *align = max((1 << nvbo->page_shift), *align);
a0af9add
FJ
176 }
177
1c7059e4 178 *size = roundup(*size, PAGE_SIZE);
a0af9add
FJ
179}
180
6ee73861 181int
7375c95b
BS
182nouveau_bo_new(struct drm_device *dev, int size, int align,
183 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
22b33e8e 184 struct sg_table *sg,
7375c95b 185 struct nouveau_bo **pnvbo)
6ee73861 186{
77145f1c 187 struct nouveau_drm *drm = nouveau_drm(dev);
6ee73861 188 struct nouveau_bo *nvbo;
57de4ba9 189 size_t acc_size;
f91bac5b 190 int ret;
22b33e8e 191 int type = ttm_bo_type_device;
35095f75
ML
192 int lpg_shift = 12;
193 int max_size;
194
3ee6f5b5
BS
195 if (drm->client.vm)
196 lpg_shift = drm->client.vm->vmm->lpg_shift;
35095f75 197 max_size = INT_MAX & ~((1 << lpg_shift) - 1);
0108bc80
ML
198
199 if (size <= 0 || size > max_size) {
fa2bade9 200 NV_WARN(drm, "skipped size %x\n", (u32)size);
0108bc80
ML
201 return -EINVAL;
202 }
22b33e8e
DA
203
204 if (sg)
205 type = ttm_bo_type_sg;
6ee73861
BS
206
207 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
208 if (!nvbo)
209 return -ENOMEM;
210 INIT_LIST_HEAD(&nvbo->head);
211 INIT_LIST_HEAD(&nvbo->entry);
fd2871af 212 INIT_LIST_HEAD(&nvbo->vma_list);
6ee73861
BS
213 nvbo->tile_mode = tile_mode;
214 nvbo->tile_flags = tile_flags;
ebb945a9 215 nvbo->bo.bdev = &drm->ttm.bdev;
6ee73861 216
f91bac5b 217 nvbo->page_shift = 12;
3ee6f5b5 218 if (drm->client.vm) {
f91bac5b 219 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
3ee6f5b5 220 nvbo->page_shift = drm->client.vm->vmm->lpg_shift;
f91bac5b
BS
221 }
222
223 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
fd2871af
BS
224 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
225 nouveau_bo_placement_set(nvbo, flags, 0);
6ee73861 226
ebb945a9 227 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
57de4ba9
JG
228 sizeof(struct nouveau_bo));
229
ebb945a9 230 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
22b33e8e 231 type, &nvbo->placement,
0b91c4a1 232 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
fd2871af 233 nouveau_bo_del_ttm);
6ee73861
BS
234 if (ret) {
235 /* ttm will call nouveau_bo_del_ttm if it fails.. */
236 return ret;
237 }
238
6ee73861
BS
239 *pnvbo = nvbo;
240 return 0;
241}
242
78ad0f7b
FJ
243static void
244set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
245{
246 *n = 0;
247
248 if (type & TTM_PL_FLAG_VRAM)
249 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
250 if (type & TTM_PL_FLAG_TT)
251 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
252 if (type & TTM_PL_FLAG_SYSTEM)
253 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
254}
255
699ddfd9
FJ
256static void
257set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
258{
ebb945a9 259 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
f392ec4b 260 u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT;
699ddfd9 261
967e7bde 262 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
812f219a 263 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
4beb116a 264 nvbo->bo.mem.num_pages < vram_pages / 4) {
699ddfd9
FJ
265 /*
266 * Make sure that the color and depth buffers are handled
267 * by independent memory controller units. Up to a 9x
268 * speed up when alpha-blending and depth-test are enabled
269 * at the same time.
270 */
699ddfd9
FJ
271 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
272 nvbo->placement.fpfn = vram_pages / 2;
273 nvbo->placement.lpfn = ~0;
274 } else {
275 nvbo->placement.fpfn = 0;
276 nvbo->placement.lpfn = vram_pages / 2;
277 }
278 }
279}
280
6ee73861 281void
78ad0f7b 282nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
6ee73861 283{
78ad0f7b
FJ
284 struct ttm_placement *pl = &nvbo->placement;
285 uint32_t flags = TTM_PL_MASK_CACHING |
286 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
287
288 pl->placement = nvbo->placements;
289 set_placement_list(nvbo->placements, &pl->num_placement,
290 type, flags);
291
292 pl->busy_placement = nvbo->busy_placements;
293 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
294 type | busy, flags);
699ddfd9
FJ
295
296 set_placement_range(nvbo, type);
6ee73861
BS
297}
298
299int
300nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
301{
ebb945a9 302 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
6ee73861 303 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 304 int ret;
6ee73861 305
ee3939e0 306 ret = ttm_bo_reserve(bo, false, false, false, NULL);
0ae6d7bc
DV
307 if (ret)
308 goto out;
309
6ee73861 310 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
ebb945a9 311 NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
6ee73861 312 1 << bo->mem.mem_type, memtype);
0ae6d7bc
DV
313 ret = -EINVAL;
314 goto out;
6ee73861
BS
315 }
316
317 if (nvbo->pin_refcnt++)
6ee73861
BS
318 goto out;
319
78ad0f7b 320 nouveau_bo_placement_set(nvbo, memtype, 0);
6ee73861 321
97a875cb 322 ret = nouveau_bo_validate(nvbo, false, false);
6ee73861
BS
323 if (ret == 0) {
324 switch (bo->mem.mem_type) {
325 case TTM_PL_VRAM:
ebb945a9 326 drm->gem.vram_available -= bo->mem.size;
6ee73861
BS
327 break;
328 case TTM_PL_TT:
ebb945a9 329 drm->gem.gart_available -= bo->mem.size;
6ee73861
BS
330 break;
331 default:
332 break;
333 }
334 }
6ee73861 335out:
0ae6d7bc 336 ttm_bo_unreserve(bo);
6ee73861
BS
337 return ret;
338}
339
340int
341nouveau_bo_unpin(struct nouveau_bo *nvbo)
342{
ebb945a9 343 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
6ee73861 344 struct ttm_buffer_object *bo = &nvbo->bo;
4f385599 345 int ret, ref;
6ee73861 346
ee3939e0 347 ret = ttm_bo_reserve(bo, false, false, false, NULL);
6ee73861
BS
348 if (ret)
349 return ret;
350
4f385599
ML
351 ref = --nvbo->pin_refcnt;
352 WARN_ON_ONCE(ref < 0);
353 if (ref)
0ae6d7bc
DV
354 goto out;
355
78ad0f7b 356 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
6ee73861 357
97a875cb 358 ret = nouveau_bo_validate(nvbo, false, false);
6ee73861
BS
359 if (ret == 0) {
360 switch (bo->mem.mem_type) {
361 case TTM_PL_VRAM:
ebb945a9 362 drm->gem.vram_available += bo->mem.size;
6ee73861
BS
363 break;
364 case TTM_PL_TT:
ebb945a9 365 drm->gem.gart_available += bo->mem.size;
6ee73861
BS
366 break;
367 default:
368 break;
369 }
370 }
371
0ae6d7bc 372out:
6ee73861
BS
373 ttm_bo_unreserve(bo);
374 return ret;
375}
376
377int
378nouveau_bo_map(struct nouveau_bo *nvbo)
379{
380 int ret;
381
ee3939e0 382 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
6ee73861
BS
383 if (ret)
384 return ret;
385
386 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
387 ttm_bo_unreserve(&nvbo->bo);
388 return ret;
389}
390
391void
392nouveau_bo_unmap(struct nouveau_bo *nvbo)
393{
9d59e8a1
BS
394 if (nvbo)
395 ttm_bo_kunmap(&nvbo->kmap);
6ee73861
BS
396}
397
7a45d764
BS
398int
399nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
97a875cb 400 bool no_wait_gpu)
7a45d764
BS
401{
402 int ret;
403
97a875cb
ML
404 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
405 interruptible, no_wait_gpu);
7a45d764
BS
406 if (ret)
407 return ret;
408
409 return 0;
410}
411
6ee73861
BS
412u16
413nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
414{
415 bool is_iomem;
416 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
417 mem = &mem[index];
418 if (is_iomem)
419 return ioread16_native((void __force __iomem *)mem);
420 else
421 return *mem;
422}
423
424void
425nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
426{
427 bool is_iomem;
428 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
429 mem = &mem[index];
430 if (is_iomem)
431 iowrite16_native(val, (void __force __iomem *)mem);
432 else
433 *mem = val;
434}
435
436u32
437nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
438{
439 bool is_iomem;
440 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
441 mem = &mem[index];
442 if (is_iomem)
443 return ioread32_native((void __force __iomem *)mem);
444 else
445 return *mem;
446}
447
448void
449nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
450{
451 bool is_iomem;
452 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
453 mem = &mem[index];
454 if (is_iomem)
455 iowrite32_native(val, (void __force __iomem *)mem);
456 else
457 *mem = val;
458}
459
649bf3ca 460static struct ttm_tt *
ebb945a9
BS
461nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
462 uint32_t page_flags, struct page *dummy_read)
6ee73861 463{
df1b4b91 464#if __OS_HAS_AGP
ebb945a9
BS
465 struct nouveau_drm *drm = nouveau_bdev(bdev);
466 struct drm_device *dev = drm->dev;
6ee73861 467
ebb945a9
BS
468 if (drm->agp.stat == ENABLED) {
469 return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
470 page_flags, dummy_read);
6ee73861 471 }
df1b4b91 472#endif
6ee73861 473
ebb945a9 474 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
6ee73861
BS
475}
476
477static int
478nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
479{
480 /* We'll do this from user space. */
481 return 0;
482}
483
484static int
485nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
486 struct ttm_mem_type_manager *man)
487{
ebb945a9 488 struct nouveau_drm *drm = nouveau_bdev(bdev);
6ee73861
BS
489
490 switch (type) {
491 case TTM_PL_SYSTEM:
492 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
493 man->available_caching = TTM_PL_MASK_CACHING;
494 man->default_caching = TTM_PL_FLAG_CACHED;
495 break;
496 case TTM_PL_VRAM:
e2a4e78c
AC
497 man->flags = TTM_MEMTYPE_FLAG_FIXED |
498 TTM_MEMTYPE_FLAG_MAPPABLE;
499 man->available_caching = TTM_PL_FLAG_UNCACHED |
500 TTM_PL_FLAG_WC;
501 man->default_caching = TTM_PL_FLAG_WC;
502
967e7bde 503 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
e2a4e78c 504 /* Some BARs do not support being ioremapped WC */
967e7bde 505 if (nvkm_bar(&drm->device)->iomap_uncached) {
e2a4e78c
AC
506 man->available_caching = TTM_PL_FLAG_UNCACHED;
507 man->default_caching = TTM_PL_FLAG_UNCACHED;
508 }
509
573a2a37 510 man->func = &nouveau_vram_manager;
f869ef88
BS
511 man->io_reserve_fastpath = false;
512 man->use_io_reserve_lru = true;
513 } else {
573a2a37 514 man->func = &ttm_bo_manager_func;
f869ef88 515 }
6ee73861
BS
516 break;
517 case TTM_PL_TT:
967e7bde 518 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
26c0c9e3 519 man->func = &nouveau_gart_manager;
3863c9bc 520 else
ebb945a9 521 if (drm->agp.stat != ENABLED)
3863c9bc 522 man->func = &nv04_gart_manager;
26c0c9e3
BS
523 else
524 man->func = &ttm_bo_manager_func;
ebb945a9
BS
525
526 if (drm->agp.stat == ENABLED) {
f32f02fd 527 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
a3d487ea
FJ
528 man->available_caching = TTM_PL_FLAG_UNCACHED |
529 TTM_PL_FLAG_WC;
530 man->default_caching = TTM_PL_FLAG_WC;
ebb945a9 531 } else {
6ee73861
BS
532 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
533 TTM_MEMTYPE_FLAG_CMA;
534 man->available_caching = TTM_PL_MASK_CACHING;
535 man->default_caching = TTM_PL_FLAG_CACHED;
6ee73861 536 }
ebb945a9 537
6ee73861
BS
538 break;
539 default:
6ee73861
BS
540 return -EINVAL;
541 }
542 return 0;
543}
544
545static void
546nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
547{
548 struct nouveau_bo *nvbo = nouveau_bo(bo);
549
550 switch (bo->mem.mem_type) {
22fbd538 551 case TTM_PL_VRAM:
78ad0f7b
FJ
552 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
553 TTM_PL_FLAG_SYSTEM);
22fbd538 554 break;
6ee73861 555 default:
78ad0f7b 556 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
6ee73861
BS
557 break;
558 }
22fbd538
FJ
559
560 *pl = nvbo->placement;
6ee73861
BS
561}
562
563
49981046
BS
564static int
565nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
566{
567 int ret = RING_SPACE(chan, 2);
568 if (ret == 0) {
569 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
00fc6f6f 570 OUT_RING (chan, handle & 0x0000ffff);
49981046
BS
571 FIRE_RING (chan);
572 }
573 return ret;
574}
575
c6b7e895
BS
576static int
577nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
578 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
579{
580 struct nouveau_mem *node = old_mem->mm_node;
581 int ret = RING_SPACE(chan, 10);
582 if (ret == 0) {
6d597027 583 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
c6b7e895
BS
584 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
585 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
586 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
587 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
588 OUT_RING (chan, PAGE_SIZE);
589 OUT_RING (chan, PAGE_SIZE);
590 OUT_RING (chan, PAGE_SIZE);
591 OUT_RING (chan, new_mem->num_pages);
6d597027 592 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
c6b7e895
BS
593 }
594 return ret;
595}
596
d1b167e1
BS
597static int
598nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
599{
600 int ret = RING_SPACE(chan, 2);
601 if (ret == 0) {
602 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
603 OUT_RING (chan, handle);
604 }
605 return ret;
606}
607
1a46098e
BS
608static int
609nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
610 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
611{
612 struct nouveau_mem *node = old_mem->mm_node;
613 u64 src_offset = node->vma[0].offset;
614 u64 dst_offset = node->vma[1].offset;
615 u32 page_count = new_mem->num_pages;
616 int ret;
617
618 page_count = new_mem->num_pages;
619 while (page_count) {
620 int line_count = (page_count > 8191) ? 8191 : page_count;
621
622 ret = RING_SPACE(chan, 11);
623 if (ret)
624 return ret;
625
626 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
627 OUT_RING (chan, upper_32_bits(src_offset));
628 OUT_RING (chan, lower_32_bits(src_offset));
629 OUT_RING (chan, upper_32_bits(dst_offset));
630 OUT_RING (chan, lower_32_bits(dst_offset));
631 OUT_RING (chan, PAGE_SIZE);
632 OUT_RING (chan, PAGE_SIZE);
633 OUT_RING (chan, PAGE_SIZE);
634 OUT_RING (chan, line_count);
635 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
636 OUT_RING (chan, 0x00000110);
637
638 page_count -= line_count;
639 src_offset += (PAGE_SIZE * line_count);
640 dst_offset += (PAGE_SIZE * line_count);
641 }
642
643 return 0;
644}
645
183720b8
BS
646static int
647nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
648 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
649{
d2f96666
BS
650 struct nouveau_mem *node = old_mem->mm_node;
651 u64 src_offset = node->vma[0].offset;
652 u64 dst_offset = node->vma[1].offset;
183720b8
BS
653 u32 page_count = new_mem->num_pages;
654 int ret;
655
183720b8
BS
656 page_count = new_mem->num_pages;
657 while (page_count) {
658 int line_count = (page_count > 2047) ? 2047 : page_count;
659
660 ret = RING_SPACE(chan, 12);
661 if (ret)
662 return ret;
663
d1b167e1 664 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
183720b8
BS
665 OUT_RING (chan, upper_32_bits(dst_offset));
666 OUT_RING (chan, lower_32_bits(dst_offset));
d1b167e1 667 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
183720b8
BS
668 OUT_RING (chan, upper_32_bits(src_offset));
669 OUT_RING (chan, lower_32_bits(src_offset));
670 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
671 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
672 OUT_RING (chan, PAGE_SIZE); /* line_length */
673 OUT_RING (chan, line_count);
d1b167e1 674 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
183720b8
BS
675 OUT_RING (chan, 0x00100110);
676
677 page_count -= line_count;
678 src_offset += (PAGE_SIZE * line_count);
679 dst_offset += (PAGE_SIZE * line_count);
680 }
681
682 return 0;
683}
684
fdf53241
BS
685static int
686nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
687 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
688{
689 struct nouveau_mem *node = old_mem->mm_node;
690 u64 src_offset = node->vma[0].offset;
691 u64 dst_offset = node->vma[1].offset;
692 u32 page_count = new_mem->num_pages;
693 int ret;
694
695 page_count = new_mem->num_pages;
696 while (page_count) {
697 int line_count = (page_count > 8191) ? 8191 : page_count;
698
699 ret = RING_SPACE(chan, 11);
700 if (ret)
701 return ret;
702
703 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
704 OUT_RING (chan, upper_32_bits(src_offset));
705 OUT_RING (chan, lower_32_bits(src_offset));
706 OUT_RING (chan, upper_32_bits(dst_offset));
707 OUT_RING (chan, lower_32_bits(dst_offset));
708 OUT_RING (chan, PAGE_SIZE);
709 OUT_RING (chan, PAGE_SIZE);
710 OUT_RING (chan, PAGE_SIZE);
711 OUT_RING (chan, line_count);
712 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
713 OUT_RING (chan, 0x00000110);
714
715 page_count -= line_count;
716 src_offset += (PAGE_SIZE * line_count);
717 dst_offset += (PAGE_SIZE * line_count);
718 }
719
720 return 0;
721}
722
5490e5df
BS
723static int
724nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
725 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
726{
727 struct nouveau_mem *node = old_mem->mm_node;
728 int ret = RING_SPACE(chan, 7);
729 if (ret == 0) {
730 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
731 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
732 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
733 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
734 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
735 OUT_RING (chan, 0x00000000 /* COPY */);
736 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
737 }
738 return ret;
739}
740
4c193d25
BS
741static int
742nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
743 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
744{
745 struct nouveau_mem *node = old_mem->mm_node;
746 int ret = RING_SPACE(chan, 7);
747 if (ret == 0) {
748 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
749 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
750 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
751 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
752 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
753 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
754 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
755 }
756 return ret;
757}
758
d1b167e1
BS
759static int
760nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
761{
ebb945a9 762 int ret = RING_SPACE(chan, 6);
d1b167e1 763 if (ret == 0) {
ebb945a9
BS
764 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
765 OUT_RING (chan, handle);
766 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
f45f55c4
BS
767 OUT_RING (chan, chan->drm->ntfy.handle);
768 OUT_RING (chan, chan->vram.handle);
769 OUT_RING (chan, chan->vram.handle);
d1b167e1
BS
770 }
771
772 return ret;
773}
774
6ee73861 775static int
f1ab0cc9
BS
776nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
777 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
6ee73861 778{
d2f96666 779 struct nouveau_mem *node = old_mem->mm_node;
f1ab0cc9 780 u64 length = (new_mem->num_pages << PAGE_SHIFT);
d2f96666
BS
781 u64 src_offset = node->vma[0].offset;
782 u64 dst_offset = node->vma[1].offset;
ce8f7699
ML
783 int src_tiled = !!node->memtype;
784 int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
6ee73861
BS
785 int ret;
786
f1ab0cc9
BS
787 while (length) {
788 u32 amount, stride, height;
789
ce8f7699
ML
790 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
791 if (ret)
792 return ret;
793
5220b3c1
BS
794 amount = min(length, (u64)(4 * 1024 * 1024));
795 stride = 16 * 4;
f1ab0cc9
BS
796 height = amount / stride;
797
ce8f7699 798 if (src_tiled) {
d1b167e1 799 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
f1ab0cc9 800 OUT_RING (chan, 0);
5220b3c1 801 OUT_RING (chan, 0);
f1ab0cc9
BS
802 OUT_RING (chan, stride);
803 OUT_RING (chan, height);
804 OUT_RING (chan, 1);
805 OUT_RING (chan, 0);
806 OUT_RING (chan, 0);
807 } else {
d1b167e1 808 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
f1ab0cc9
BS
809 OUT_RING (chan, 1);
810 }
ce8f7699 811 if (dst_tiled) {
d1b167e1 812 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
f1ab0cc9 813 OUT_RING (chan, 0);
5220b3c1 814 OUT_RING (chan, 0);
f1ab0cc9
BS
815 OUT_RING (chan, stride);
816 OUT_RING (chan, height);
817 OUT_RING (chan, 1);
818 OUT_RING (chan, 0);
819 OUT_RING (chan, 0);
820 } else {
d1b167e1 821 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
f1ab0cc9
BS
822 OUT_RING (chan, 1);
823 }
824
d1b167e1 825 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
f1ab0cc9
BS
826 OUT_RING (chan, upper_32_bits(src_offset));
827 OUT_RING (chan, upper_32_bits(dst_offset));
d1b167e1 828 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
f1ab0cc9
BS
829 OUT_RING (chan, lower_32_bits(src_offset));
830 OUT_RING (chan, lower_32_bits(dst_offset));
831 OUT_RING (chan, stride);
832 OUT_RING (chan, stride);
833 OUT_RING (chan, stride);
834 OUT_RING (chan, height);
835 OUT_RING (chan, 0x00000101);
836 OUT_RING (chan, 0x00000000);
d1b167e1 837 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9
BS
838 OUT_RING (chan, 0);
839
840 length -= amount;
841 src_offset += amount;
842 dst_offset += amount;
6ee73861
BS
843 }
844
f1ab0cc9
BS
845 return 0;
846}
847
d1b167e1
BS
848static int
849nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
850{
ebb945a9 851 int ret = RING_SPACE(chan, 4);
d1b167e1 852 if (ret == 0) {
ebb945a9
BS
853 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
854 OUT_RING (chan, handle);
855 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
f45f55c4 856 OUT_RING (chan, chan->drm->ntfy.handle);
d1b167e1
BS
857 }
858
859 return ret;
860}
861
a6704788
BS
862static inline uint32_t
863nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
864 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
865{
866 if (mem->mem_type == TTM_PL_TT)
ebb945a9 867 return NvDmaTT;
f45f55c4 868 return chan->vram.handle;
a6704788
BS
869}
870
f1ab0cc9
BS
871static int
872nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
873 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
874{
d961db75
BS
875 u32 src_offset = old_mem->start << PAGE_SHIFT;
876 u32 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
877 u32 page_count = new_mem->num_pages;
878 int ret;
879
880 ret = RING_SPACE(chan, 3);
881 if (ret)
882 return ret;
883
d1b167e1 884 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
f1ab0cc9
BS
885 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
886 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
887
6ee73861
BS
888 page_count = new_mem->num_pages;
889 while (page_count) {
890 int line_count = (page_count > 2047) ? 2047 : page_count;
891
6ee73861
BS
892 ret = RING_SPACE(chan, 11);
893 if (ret)
894 return ret;
f1ab0cc9 895
d1b167e1 896 BEGIN_NV04(chan, NvSubCopy,
6ee73861 897 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
f1ab0cc9
BS
898 OUT_RING (chan, src_offset);
899 OUT_RING (chan, dst_offset);
900 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
901 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
902 OUT_RING (chan, PAGE_SIZE); /* line_length */
903 OUT_RING (chan, line_count);
904 OUT_RING (chan, 0x00000101);
905 OUT_RING (chan, 0x00000000);
d1b167e1 906 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9 907 OUT_RING (chan, 0);
6ee73861
BS
908
909 page_count -= line_count;
910 src_offset += (PAGE_SIZE * line_count);
911 dst_offset += (PAGE_SIZE * line_count);
912 }
913
f1ab0cc9
BS
914 return 0;
915}
916
d2f96666 917static int
3c57d85d
BS
918nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
919 struct ttm_mem_reg *mem)
d2f96666 920{
3c57d85d
BS
921 struct nouveau_mem *old_node = bo->mem.mm_node;
922 struct nouveau_mem *new_node = mem->mm_node;
923 u64 size = (u64)mem->num_pages << PAGE_SHIFT;
d2f96666
BS
924 int ret;
925
3ee6f5b5 926 ret = nouveau_vm_get(drm->client.vm, size, old_node->page_shift,
3c57d85d 927 NV_MEM_ACCESS_RW, &old_node->vma[0]);
d2f96666
BS
928 if (ret)
929 return ret;
930
3ee6f5b5 931 ret = nouveau_vm_get(drm->client.vm, size, new_node->page_shift,
3c57d85d
BS
932 NV_MEM_ACCESS_RW, &old_node->vma[1]);
933 if (ret) {
934 nouveau_vm_put(&old_node->vma[0]);
935 return ret;
936 }
937
938 nouveau_vm_map(&old_node->vma[0], old_node);
939 nouveau_vm_map(&old_node->vma[1], new_node);
d2f96666
BS
940 return 0;
941}
942
f1ab0cc9
BS
943static int
944nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
97a875cb 945 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
f1ab0cc9 946{
ebb945a9 947 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1934a2ad 948 struct nouveau_channel *chan = drm->ttm.chan;
0ad72863 949 struct nouveau_cli *cli = (void *)nvif_client(&chan->device->base);
35b8141b 950 struct nouveau_fence *fence;
f1ab0cc9
BS
951 int ret;
952
d2f96666
BS
953 /* create temporary vmas for the transfer and attach them to the
954 * old nouveau_mem node, these will get cleaned up after ttm has
955 * destroyed the ttm_mem_reg
3425df48 956 */
967e7bde 957 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
3c57d85d 958 ret = nouveau_bo_move_prep(drm, bo, new_mem);
d2f96666 959 if (ret)
3c57d85d 960 return ret;
3425df48
BS
961 }
962
0ad72863 963 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
35b8141b 964 ret = nouveau_fence_sync(bo->sync_obj, chan);
6a6b73f2 965 if (ret == 0) {
35b8141b
BS
966 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
967 if (ret == 0) {
968 ret = nouveau_fence_new(chan, false, &fence);
969 if (ret == 0) {
970 ret = ttm_bo_move_accel_cleanup(bo, fence,
971 evict,
972 no_wait_gpu,
973 new_mem);
974 nouveau_fence_unref(&fence);
975 }
976 }
6a6b73f2 977 }
0ad72863 978 mutex_unlock(&cli->mutex);
6a6b73f2 979 return ret;
6ee73861
BS
980}
981
d1b167e1 982void
49981046 983nouveau_bo_move_init(struct nouveau_drm *drm)
d1b167e1 984{
d1b167e1
BS
985 static const struct {
986 const char *name;
1a46098e 987 int engine;
d1b167e1
BS
988 u32 oclass;
989 int (*exec)(struct nouveau_channel *,
990 struct ttm_buffer_object *,
991 struct ttm_mem_reg *, struct ttm_mem_reg *);
992 int (*init)(struct nouveau_channel *, u32 handle);
993 } _methods[] = {
00fc6f6f 994 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
49981046 995 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1a46098e
BS
996 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
997 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
998 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
999 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1000 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1001 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1002 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
5490e5df 1003 {},
1a46098e 1004 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
d1b167e1
BS
1005 }, *mthd = _methods;
1006 const char *name = "CPU";
1007 int ret;
1008
1009 do {
49981046 1010 struct nouveau_channel *chan;
ebb945a9 1011
00fc6f6f 1012 if (mthd->engine)
49981046
BS
1013 chan = drm->cechan;
1014 else
1015 chan = drm->channel;
1016 if (chan == NULL)
1017 continue;
1018
0ad72863
BS
1019 ret = nvif_object_init(chan->object, NULL,
1020 mthd->oclass | (mthd->engine << 16),
1021 mthd->oclass, NULL, 0,
1022 &drm->ttm.copy);
d1b167e1 1023 if (ret == 0) {
0ad72863 1024 ret = mthd->init(chan, drm->ttm.copy.handle);
ebb945a9 1025 if (ret) {
0ad72863 1026 nvif_object_fini(&drm->ttm.copy);
ebb945a9 1027 continue;
d1b167e1 1028 }
ebb945a9
BS
1029
1030 drm->ttm.move = mthd->exec;
1bb3f6a2 1031 drm->ttm.chan = chan;
ebb945a9
BS
1032 name = mthd->name;
1033 break;
d1b167e1
BS
1034 }
1035 } while ((++mthd)->exec);
1036
ebb945a9 1037 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
d1b167e1
BS
1038}
1039
6ee73861
BS
1040static int
1041nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1042 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
6ee73861
BS
1043{
1044 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1045 struct ttm_placement placement;
1046 struct ttm_mem_reg tmp_mem;
1047 int ret;
1048
1049 placement.fpfn = placement.lpfn = 0;
1050 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 1051 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
1052
1053 tmp_mem = *new_mem;
1054 tmp_mem.mm_node = NULL;
97a875cb 1055 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
6ee73861
BS
1056 if (ret)
1057 return ret;
1058
1059 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1060 if (ret)
1061 goto out;
1062
97a875cb 1063 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
6ee73861
BS
1064 if (ret)
1065 goto out;
1066
97a875cb 1067 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
6ee73861 1068out:
42311ff9 1069 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
1070 return ret;
1071}
1072
1073static int
1074nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1075 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
6ee73861
BS
1076{
1077 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1078 struct ttm_placement placement;
1079 struct ttm_mem_reg tmp_mem;
1080 int ret;
1081
1082 placement.fpfn = placement.lpfn = 0;
1083 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 1084 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
1085
1086 tmp_mem = *new_mem;
1087 tmp_mem.mm_node = NULL;
97a875cb 1088 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
6ee73861
BS
1089 if (ret)
1090 return ret;
1091
97a875cb 1092 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
6ee73861
BS
1093 if (ret)
1094 goto out;
1095
97a875cb 1096 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
6ee73861
BS
1097 if (ret)
1098 goto out;
1099
1100out:
42311ff9 1101 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
1102 return ret;
1103}
1104
a4154bbf
BS
1105static void
1106nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1107{
a4154bbf 1108 struct nouveau_bo *nvbo = nouveau_bo(bo);
fd2871af
BS
1109 struct nouveau_vma *vma;
1110
9f1feed2
BS
1111 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1112 if (bo->destroy != nouveau_bo_del_ttm)
1113 return;
1114
fd2871af 1115 list_for_each_entry(vma, &nvbo->vma_list, head) {
2e2cfbe6
BS
1116 if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
1117 (new_mem->mem_type == TTM_PL_VRAM ||
1118 nvbo->page_shift != vma->vm->vmm->lpg_shift)) {
fd2871af 1119 nouveau_vm_map(vma, new_mem->mm_node);
fd2871af
BS
1120 } else {
1121 nouveau_vm_unmap(vma);
1122 }
a4154bbf
BS
1123 }
1124}
1125
6ee73861 1126static int
a0af9add 1127nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
ebb945a9 1128 struct nouveau_drm_tile **new_tile)
6ee73861 1129{
ebb945a9
BS
1130 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1131 struct drm_device *dev = drm->dev;
a0af9add 1132 struct nouveau_bo *nvbo = nouveau_bo(bo);
a4154bbf 1133 u64 offset = new_mem->start << PAGE_SHIFT;
6ee73861 1134
a4154bbf
BS
1135 *new_tile = NULL;
1136 if (new_mem->mem_type != TTM_PL_VRAM)
a0af9add 1137 return 0;
a0af9add 1138
967e7bde 1139 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
bc9e7b9a 1140 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
a5cf68b0
FJ
1141 nvbo->tile_mode,
1142 nvbo->tile_flags);
6ee73861
BS
1143 }
1144
a0af9add
FJ
1145 return 0;
1146}
1147
1148static void
1149nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
ebb945a9
BS
1150 struct nouveau_drm_tile *new_tile,
1151 struct nouveau_drm_tile **old_tile)
a0af9add 1152{
ebb945a9
BS
1153 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1154 struct drm_device *dev = drm->dev;
a0af9add 1155
bc9e7b9a 1156 nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
a4154bbf 1157 *old_tile = new_tile;
a0af9add
FJ
1158}
1159
1160static int
1161nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1162 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
a0af9add 1163{
ebb945a9 1164 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
a0af9add
FJ
1165 struct nouveau_bo *nvbo = nouveau_bo(bo);
1166 struct ttm_mem_reg *old_mem = &bo->mem;
ebb945a9 1167 struct nouveau_drm_tile *new_tile = NULL;
a0af9add
FJ
1168 int ret = 0;
1169
967e7bde 1170 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
a4154bbf
BS
1171 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1172 if (ret)
1173 return ret;
1174 }
a0af9add 1175
a0af9add 1176 /* Fake bo copy. */
6ee73861
BS
1177 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1178 BUG_ON(bo->mem.mm_node != NULL);
1179 bo->mem = *new_mem;
1180 new_mem->mm_node = NULL;
a0af9add 1181 goto out;
6ee73861
BS
1182 }
1183
a0af9add 1184 /* Hardware assisted copy. */
cef9e99e
BS
1185 if (drm->ttm.move) {
1186 if (new_mem->mem_type == TTM_PL_SYSTEM)
1187 ret = nouveau_bo_move_flipd(bo, evict, intr,
1188 no_wait_gpu, new_mem);
1189 else if (old_mem->mem_type == TTM_PL_SYSTEM)
1190 ret = nouveau_bo_move_flips(bo, evict, intr,
1191 no_wait_gpu, new_mem);
1192 else
1193 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1194 no_wait_gpu, new_mem);
1195 if (!ret)
1196 goto out;
1197 }
a0af9add
FJ
1198
1199 /* Fallback to software copy. */
cef9e99e
BS
1200 spin_lock(&bo->bdev->fence_lock);
1201 ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
1202 spin_unlock(&bo->bdev->fence_lock);
1203 if (ret == 0)
1204 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
a0af9add
FJ
1205
1206out:
967e7bde 1207 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
a4154bbf
BS
1208 if (ret)
1209 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1210 else
1211 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1212 }
a0af9add
FJ
1213
1214 return ret;
6ee73861
BS
1215}
1216
1217static int
1218nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1219{
acb46527
DH
1220 struct nouveau_bo *nvbo = nouveau_bo(bo);
1221
55fb74ad 1222 return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
6ee73861
BS
1223}
1224
f32f02fd
JG
1225static int
1226nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1227{
1228 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
ebb945a9 1229 struct nouveau_drm *drm = nouveau_bdev(bdev);
a5540906 1230 struct nouveau_mem *node = mem->mm_node;
ebb945a9 1231 struct drm_device *dev = drm->dev;
f869ef88 1232 int ret;
f32f02fd
JG
1233
1234 mem->bus.addr = NULL;
1235 mem->bus.offset = 0;
1236 mem->bus.size = mem->num_pages << PAGE_SHIFT;
1237 mem->bus.base = 0;
1238 mem->bus.is_iomem = false;
1239 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1240 return -EINVAL;
1241 switch (mem->mem_type) {
1242 case TTM_PL_SYSTEM:
1243 /* System memory */
1244 return 0;
1245 case TTM_PL_TT:
1246#if __OS_HAS_AGP
ebb945a9 1247 if (drm->agp.stat == ENABLED) {
d961db75 1248 mem->bus.offset = mem->start << PAGE_SHIFT;
ebb945a9 1249 mem->bus.base = drm->agp.base;
eda85d6a 1250 mem->bus.is_iomem = !dev->agp->cant_use_aperture;
f32f02fd
JG
1251 }
1252#endif
967e7bde 1253 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
a5540906
ML
1254 /* untiled */
1255 break;
1256 /* fallthrough, tiled memory */
f32f02fd 1257 case TTM_PL_VRAM:
3863c9bc 1258 mem->bus.offset = mem->start << PAGE_SHIFT;
967e7bde 1259 mem->bus.base = nv_device_resource_start(nvkm_device(&drm->device), 1);
3863c9bc 1260 mem->bus.is_iomem = true;
967e7bde
BS
1261 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1262 struct nouveau_bar *bar = nvkm_bar(&drm->device);
8984e046 1263
ebb945a9 1264 ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
3863c9bc
BS
1265 &node->bar_vma);
1266 if (ret)
1267 return ret;
f869ef88 1268
3863c9bc 1269 mem->bus.offset = node->bar_vma.offset;
f869ef88 1270 }
f32f02fd
JG
1271 break;
1272 default:
1273 return -EINVAL;
1274 }
1275 return 0;
1276}
1277
1278static void
1279nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1280{
ebb945a9 1281 struct nouveau_drm *drm = nouveau_bdev(bdev);
967e7bde 1282 struct nouveau_bar *bar = nvkm_bar(&drm->device);
d5f42394 1283 struct nouveau_mem *node = mem->mm_node;
f869ef88 1284
d5f42394 1285 if (!node->bar_vma.node)
f869ef88
BS
1286 return;
1287
ebb945a9 1288 bar->unmap(bar, &node->bar_vma);
f32f02fd
JG
1289}
1290
1291static int
1292nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1293{
ebb945a9 1294 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
e1429b4c 1295 struct nouveau_bo *nvbo = nouveau_bo(bo);
967e7bde
BS
1296 struct nvif_device *device = &drm->device;
1297 u32 mappable = nv_device_resource_len(nvkm_device(device), 1) >> PAGE_SHIFT;
a5540906 1298 int ret;
e1429b4c
BS
1299
1300 /* as long as the bo isn't in vram, and isn't tiled, we've got
1301 * nothing to do here.
1302 */
1303 if (bo->mem.mem_type != TTM_PL_VRAM) {
967e7bde 1304 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA ||
f13b3263 1305 !nouveau_bo_tile_layout(nvbo))
e1429b4c 1306 return 0;
a5540906
ML
1307
1308 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1309 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1310
1311 ret = nouveau_bo_validate(nvbo, false, false);
1312 if (ret)
1313 return ret;
1314 }
1315 return 0;
e1429b4c
BS
1316 }
1317
1318 /* make sure bo is in mappable vram */
967e7bde 1319 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
a5540906 1320 bo->mem.start + bo->mem.num_pages < mappable)
e1429b4c
BS
1321 return 0;
1322
1323
1324 nvbo->placement.fpfn = 0;
ebb945a9 1325 nvbo->placement.lpfn = mappable;
c284815d 1326 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
97a875cb 1327 return nouveau_bo_validate(nvbo, false, false);
f32f02fd
JG
1328}
1329
3230cfc3
KRW
1330static int
1331nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1332{
8e7e7052 1333 struct ttm_dma_tt *ttm_dma = (void *)ttm;
ebb945a9 1334 struct nouveau_drm *drm;
420b9469 1335 struct nouveau_device *device;
3230cfc3 1336 struct drm_device *dev;
fd1496a0 1337 struct device *pdev;
3230cfc3
KRW
1338 unsigned i;
1339 int r;
22b33e8e 1340 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
3230cfc3
KRW
1341
1342 if (ttm->state != tt_unpopulated)
1343 return 0;
1344
22b33e8e
DA
1345 if (slave && ttm->sg) {
1346 /* make userspace faulting work */
1347 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1348 ttm_dma->dma_address, ttm->num_pages);
1349 ttm->state = tt_unbound;
1350 return 0;
1351 }
1352
ebb945a9 1353 drm = nouveau_bdev(ttm->bdev);
967e7bde 1354 device = nvkm_device(&drm->device);
ebb945a9 1355 dev = drm->dev;
fd1496a0 1356 pdev = nv_device_base(device);
3230cfc3 1357
dea7e0ac 1358#if __OS_HAS_AGP
ebb945a9 1359 if (drm->agp.stat == ENABLED) {
dea7e0ac
JG
1360 return ttm_agp_tt_populate(ttm);
1361 }
1362#endif
1363
3230cfc3
KRW
1364#ifdef CONFIG_SWIOTLB
1365 if (swiotlb_nr_tbl()) {
8e7e7052 1366 return ttm_dma_populate((void *)ttm, dev->dev);
3230cfc3
KRW
1367 }
1368#endif
1369
1370 r = ttm_pool_populate(ttm);
1371 if (r) {
1372 return r;
1373 }
1374
1375 for (i = 0; i < ttm->num_pages; i++) {
fd1496a0
AC
1376 dma_addr_t addr;
1377
1378 addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
1379 DMA_BIDIRECTIONAL);
1380
1381 if (dma_mapping_error(pdev, addr)) {
3230cfc3 1382 while (--i) {
fd1496a0
AC
1383 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1384 PAGE_SIZE, DMA_BIDIRECTIONAL);
8e7e7052 1385 ttm_dma->dma_address[i] = 0;
3230cfc3
KRW
1386 }
1387 ttm_pool_unpopulate(ttm);
1388 return -EFAULT;
1389 }
fd1496a0
AC
1390
1391 ttm_dma->dma_address[i] = addr;
3230cfc3
KRW
1392 }
1393 return 0;
1394}
1395
1396static void
1397nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1398{
8e7e7052 1399 struct ttm_dma_tt *ttm_dma = (void *)ttm;
ebb945a9 1400 struct nouveau_drm *drm;
420b9469 1401 struct nouveau_device *device;
3230cfc3 1402 struct drm_device *dev;
fd1496a0 1403 struct device *pdev;
3230cfc3 1404 unsigned i;
22b33e8e
DA
1405 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1406
1407 if (slave)
1408 return;
3230cfc3 1409
ebb945a9 1410 drm = nouveau_bdev(ttm->bdev);
967e7bde 1411 device = nvkm_device(&drm->device);
ebb945a9 1412 dev = drm->dev;
fd1496a0 1413 pdev = nv_device_base(device);
3230cfc3 1414
dea7e0ac 1415#if __OS_HAS_AGP
ebb945a9 1416 if (drm->agp.stat == ENABLED) {
dea7e0ac
JG
1417 ttm_agp_tt_unpopulate(ttm);
1418 return;
1419 }
1420#endif
1421
3230cfc3
KRW
1422#ifdef CONFIG_SWIOTLB
1423 if (swiotlb_nr_tbl()) {
8e7e7052 1424 ttm_dma_unpopulate((void *)ttm, dev->dev);
3230cfc3
KRW
1425 return;
1426 }
1427#endif
1428
1429 for (i = 0; i < ttm->num_pages; i++) {
8e7e7052 1430 if (ttm_dma->dma_address[i]) {
fd1496a0
AC
1431 dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
1432 DMA_BIDIRECTIONAL);
3230cfc3
KRW
1433 }
1434 }
1435
1436 ttm_pool_unpopulate(ttm);
1437}
1438
875ac34a
BS
1439void
1440nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1441{
5d216f60 1442 struct nouveau_fence *new_fence = nouveau_fence_ref(fence);
875ac34a
BS
1443 struct nouveau_fence *old_fence = NULL;
1444
875ac34a
BS
1445 spin_lock(&nvbo->bo.bdev->fence_lock);
1446 old_fence = nvbo->bo.sync_obj;
5d216f60 1447 nvbo->bo.sync_obj = new_fence;
875ac34a
BS
1448 spin_unlock(&nvbo->bo.bdev->fence_lock);
1449
1450 nouveau_fence_unref(&old_fence);
1451}
1452
1453static void
1454nouveau_bo_fence_unref(void **sync_obj)
1455{
1456 nouveau_fence_unref((struct nouveau_fence **)sync_obj);
1457}
1458
1459static void *
1460nouveau_bo_fence_ref(void *sync_obj)
1461{
1462 return nouveau_fence_ref(sync_obj);
1463}
1464
1465static bool
dedfdffd 1466nouveau_bo_fence_signalled(void *sync_obj)
875ac34a 1467{
d375e7d5 1468 return nouveau_fence_done(sync_obj);
875ac34a
BS
1469}
1470
1471static int
dedfdffd 1472nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
875ac34a
BS
1473{
1474 return nouveau_fence_wait(sync_obj, lazy, intr);
1475}
1476
1477static int
dedfdffd 1478nouveau_bo_fence_flush(void *sync_obj)
875ac34a
BS
1479{
1480 return 0;
1481}
1482
6ee73861 1483struct ttm_bo_driver nouveau_bo_driver = {
649bf3ca 1484 .ttm_tt_create = &nouveau_ttm_tt_create,
3230cfc3
KRW
1485 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1486 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
6ee73861
BS
1487 .invalidate_caches = nouveau_bo_invalidate_caches,
1488 .init_mem_type = nouveau_bo_init_mem_type,
1489 .evict_flags = nouveau_bo_evict_flags,
a4154bbf 1490 .move_notify = nouveau_bo_move_ntfy,
6ee73861
BS
1491 .move = nouveau_bo_move,
1492 .verify_access = nouveau_bo_verify_access,
875ac34a
BS
1493 .sync_obj_signaled = nouveau_bo_fence_signalled,
1494 .sync_obj_wait = nouveau_bo_fence_wait,
1495 .sync_obj_flush = nouveau_bo_fence_flush,
1496 .sync_obj_unref = nouveau_bo_fence_unref,
1497 .sync_obj_ref = nouveau_bo_fence_ref,
f32f02fd
JG
1498 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1499 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1500 .io_mem_free = &nouveau_ttm_io_mem_free,
6ee73861
BS
1501};
1502
fd2871af
BS
1503struct nouveau_vma *
1504nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1505{
1506 struct nouveau_vma *vma;
1507 list_for_each_entry(vma, &nvbo->vma_list, head) {
1508 if (vma->vm == vm)
1509 return vma;
1510 }
1511
1512 return NULL;
1513}
1514
1515int
1516nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1517 struct nouveau_vma *vma)
1518{
1519 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
fd2871af
BS
1520 int ret;
1521
1522 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1523 NV_MEM_ACCESS_RW, vma);
1524 if (ret)
1525 return ret;
1526
2e2cfbe6
BS
1527 if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
1528 (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
1529 nvbo->page_shift != vma->vm->vmm->lpg_shift))
fd2871af 1530 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
fd2871af
BS
1531
1532 list_add_tail(&vma->head, &nvbo->vma_list);
2fd3db6f 1533 vma->refcount = 1;
fd2871af
BS
1534 return 0;
1535}
1536
1537void
1538nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1539{
1540 if (vma->node) {
c4c7044f 1541 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
fd2871af 1542 nouveau_vm_unmap(vma);
fd2871af
BS
1543 nouveau_vm_put(vma);
1544 list_del(&vma->head);
1545 }
1546}
This page took 0.383853 seconds and 5 git commands to generate.