drm/nouveau/device: include the official chipset names
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
ebb945a9 30#include <core/engine.h>
6ee73861 31
ebb945a9
BS
32#include <subdev/fb.h>
33#include <subdev/vm.h>
34#include <subdev/bar.h>
35
36#include "nouveau_drm.h"
6ee73861 37#include "nouveau_dma.h"
d375e7d5 38#include "nouveau_fence.h"
6ee73861 39
ebb945a9
BS
40#include "nouveau_bo.h"
41#include "nouveau_ttm.h"
42#include "nouveau_gem.h"
a510604d 43
bc9e7b9a
BS
44/*
45 * NV10-NV40 tiling helpers
46 */
47
48static void
ebb945a9
BS
49nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
50 u32 addr, u32 size, u32 pitch, u32 flags)
bc9e7b9a 51{
ebb945a9
BS
52 struct nouveau_drm *drm = nouveau_newpriv(dev);
53 int i = reg - drm->tile.reg;
54 struct nouveau_fb *pfb = nouveau_fb(drm->device);
55 struct nouveau_fb_tile *tile = &pfb->tile.region[i];
56 struct nouveau_engine *engine;
bc9e7b9a 57
ebb945a9 58 nouveau_fence_unref(&reg->fence);
bc9e7b9a
BS
59
60 if (tile->pitch)
ebb945a9 61 pfb->tile.fini(pfb, i, tile);
bc9e7b9a
BS
62
63 if (pitch)
ebb945a9 64 pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
bc9e7b9a 65
ebb945a9 66 pfb->tile.prog(pfb, i, tile);
bc9e7b9a 67
ebb945a9
BS
68 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
69 engine->tile_prog(engine, i);
70 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
71 engine->tile_prog(engine, i);
bc9e7b9a
BS
72}
73
ebb945a9 74static struct nouveau_drm_tile *
bc9e7b9a
BS
75nv10_bo_get_tile_region(struct drm_device *dev, int i)
76{
ebb945a9
BS
77 struct nouveau_drm *drm = nouveau_newpriv(dev);
78 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
bc9e7b9a 79
ebb945a9 80 spin_lock(&drm->tile.lock);
bc9e7b9a
BS
81
82 if (!tile->used &&
83 (!tile->fence || nouveau_fence_done(tile->fence)))
84 tile->used = true;
85 else
86 tile = NULL;
87
ebb945a9 88 spin_unlock(&drm->tile.lock);
bc9e7b9a
BS
89 return tile;
90}
91
92static void
ebb945a9
BS
93nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
94 struct nouveau_fence *fence)
bc9e7b9a 95{
ebb945a9 96 struct nouveau_drm *drm = nouveau_newpriv(dev);
bc9e7b9a
BS
97
98 if (tile) {
ebb945a9 99 spin_lock(&drm->tile.lock);
bc9e7b9a
BS
100 if (fence) {
101 /* Mark it as pending. */
102 tile->fence = fence;
103 nouveau_fence_ref(fence);
104 }
105
106 tile->used = false;
ebb945a9 107 spin_unlock(&drm->tile.lock);
bc9e7b9a
BS
108 }
109}
110
ebb945a9
BS
111static struct nouveau_drm_tile *
112nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
113 u32 size, u32 pitch, u32 flags)
bc9e7b9a 114{
ebb945a9
BS
115 struct nouveau_drm *drm = nouveau_newpriv(dev);
116 struct nouveau_fb *pfb = nouveau_fb(drm->device);
117 struct nouveau_drm_tile *tile, *found = NULL;
bc9e7b9a
BS
118 int i;
119
ebb945a9 120 for (i = 0; i < pfb->tile.regions; i++) {
bc9e7b9a
BS
121 tile = nv10_bo_get_tile_region(dev, i);
122
123 if (pitch && !found) {
124 found = tile;
125 continue;
126
ebb945a9 127 } else if (tile && pfb->tile.region[i].pitch) {
bc9e7b9a
BS
128 /* Kill an unused tile region. */
129 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
130 }
131
132 nv10_bo_put_tile_region(dev, tile, NULL);
133 }
134
135 if (found)
136 nv10_bo_update_tile_region(dev, found, addr, size,
137 pitch, flags);
138 return found;
139}
140
6ee73861
BS
141static void
142nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
143{
ebb945a9
BS
144 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
145 struct drm_device *dev = drm->dev;
6ee73861
BS
146 struct nouveau_bo *nvbo = nouveau_bo(bo);
147
6ee73861
BS
148 if (unlikely(nvbo->gem))
149 DRM_ERROR("bo %p still attached to GEM object\n", bo);
bc9e7b9a 150 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
6ee73861
BS
151 kfree(nvbo);
152}
153
a0af9add 154static void
db5c8e29 155nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
f91bac5b 156 int *align, int *size)
a0af9add 157{
ebb945a9
BS
158 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
159 struct nouveau_device *device = nv_device(drm->device);
a0af9add 160
ebb945a9 161 if (device->card_type < NV_50) {
bfd83aca 162 if (nvbo->tile_mode) {
ebb945a9 163 if (device->chipset >= 0x40) {
a0af9add 164 *align = 65536;
bfd83aca 165 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 166
ebb945a9 167 } else if (device->chipset >= 0x30) {
a0af9add 168 *align = 32768;
bfd83aca 169 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 170
ebb945a9 171 } else if (device->chipset >= 0x20) {
a0af9add 172 *align = 16384;
bfd83aca 173 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 174
ebb945a9 175 } else if (device->chipset >= 0x10) {
a0af9add 176 *align = 16384;
bfd83aca 177 *size = roundup(*size, 32 * nvbo->tile_mode);
a0af9add
FJ
178 }
179 }
bfd83aca 180 } else {
f91bac5b
BS
181 *size = roundup(*size, (1 << nvbo->page_shift));
182 *align = max((1 << nvbo->page_shift), *align);
a0af9add
FJ
183 }
184
1c7059e4 185 *size = roundup(*size, PAGE_SIZE);
a0af9add
FJ
186}
187
6ee73861 188int
7375c95b
BS
189nouveau_bo_new(struct drm_device *dev, int size, int align,
190 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
22b33e8e 191 struct sg_table *sg,
7375c95b 192 struct nouveau_bo **pnvbo)
6ee73861 193{
ebb945a9 194 struct nouveau_drm *drm = nouveau_newpriv(dev);
6ee73861 195 struct nouveau_bo *nvbo;
57de4ba9 196 size_t acc_size;
f91bac5b 197 int ret;
22b33e8e
DA
198 int type = ttm_bo_type_device;
199
200 if (sg)
201 type = ttm_bo_type_sg;
6ee73861
BS
202
203 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
204 if (!nvbo)
205 return -ENOMEM;
206 INIT_LIST_HEAD(&nvbo->head);
207 INIT_LIST_HEAD(&nvbo->entry);
fd2871af 208 INIT_LIST_HEAD(&nvbo->vma_list);
6ee73861
BS
209 nvbo->tile_mode = tile_mode;
210 nvbo->tile_flags = tile_flags;
ebb945a9 211 nvbo->bo.bdev = &drm->ttm.bdev;
6ee73861 212
f91bac5b 213 nvbo->page_shift = 12;
ebb945a9 214 if (drm->client.base.vm) {
f91bac5b 215 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
ebb945a9 216 nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
f91bac5b
BS
217 }
218
219 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
fd2871af
BS
220 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
221 nouveau_bo_placement_set(nvbo, flags, 0);
6ee73861 222
ebb945a9 223 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
57de4ba9
JG
224 sizeof(struct nouveau_bo));
225
ebb945a9 226 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
22b33e8e
DA
227 type, &nvbo->placement,
228 align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
fd2871af 229 nouveau_bo_del_ttm);
6ee73861
BS
230 if (ret) {
231 /* ttm will call nouveau_bo_del_ttm if it fails.. */
232 return ret;
233 }
234
6ee73861
BS
235 *pnvbo = nvbo;
236 return 0;
237}
238
78ad0f7b
FJ
239static void
240set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
241{
242 *n = 0;
243
244 if (type & TTM_PL_FLAG_VRAM)
245 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
246 if (type & TTM_PL_FLAG_TT)
247 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
248 if (type & TTM_PL_FLAG_SYSTEM)
249 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
250}
251
699ddfd9
FJ
252static void
253set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
254{
ebb945a9
BS
255 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
256 struct nouveau_fb *pfb = nouveau_fb(drm->device);
257 u32 vram_pages = pfb->ram.size >> PAGE_SHIFT;
699ddfd9 258
ebb945a9 259 if (nv_device(drm->device)->card_type == NV_10 &&
812f219a 260 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
4beb116a 261 nvbo->bo.mem.num_pages < vram_pages / 4) {
699ddfd9
FJ
262 /*
263 * Make sure that the color and depth buffers are handled
264 * by independent memory controller units. Up to a 9x
265 * speed up when alpha-blending and depth-test are enabled
266 * at the same time.
267 */
699ddfd9
FJ
268 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
269 nvbo->placement.fpfn = vram_pages / 2;
270 nvbo->placement.lpfn = ~0;
271 } else {
272 nvbo->placement.fpfn = 0;
273 nvbo->placement.lpfn = vram_pages / 2;
274 }
275 }
276}
277
6ee73861 278void
78ad0f7b 279nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
6ee73861 280{
78ad0f7b
FJ
281 struct ttm_placement *pl = &nvbo->placement;
282 uint32_t flags = TTM_PL_MASK_CACHING |
283 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
284
285 pl->placement = nvbo->placements;
286 set_placement_list(nvbo->placements, &pl->num_placement,
287 type, flags);
288
289 pl->busy_placement = nvbo->busy_placements;
290 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
291 type | busy, flags);
699ddfd9
FJ
292
293 set_placement_range(nvbo, type);
6ee73861
BS
294}
295
296int
297nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
298{
ebb945a9 299 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
6ee73861 300 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 301 int ret;
6ee73861
BS
302
303 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
ebb945a9 304 NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
6ee73861
BS
305 1 << bo->mem.mem_type, memtype);
306 return -EINVAL;
307 }
308
309 if (nvbo->pin_refcnt++)
310 return 0;
311
312 ret = ttm_bo_reserve(bo, false, false, false, 0);
313 if (ret)
314 goto out;
315
78ad0f7b 316 nouveau_bo_placement_set(nvbo, memtype, 0);
6ee73861 317
7a45d764 318 ret = nouveau_bo_validate(nvbo, false, false, false);
6ee73861
BS
319 if (ret == 0) {
320 switch (bo->mem.mem_type) {
321 case TTM_PL_VRAM:
ebb945a9 322 drm->gem.vram_available -= bo->mem.size;
6ee73861
BS
323 break;
324 case TTM_PL_TT:
ebb945a9 325 drm->gem.gart_available -= bo->mem.size;
6ee73861
BS
326 break;
327 default:
328 break;
329 }
330 }
331 ttm_bo_unreserve(bo);
332out:
333 if (unlikely(ret))
334 nvbo->pin_refcnt--;
335 return ret;
336}
337
338int
339nouveau_bo_unpin(struct nouveau_bo *nvbo)
340{
ebb945a9 341 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
6ee73861 342 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 343 int ret;
6ee73861
BS
344
345 if (--nvbo->pin_refcnt)
346 return 0;
347
348 ret = ttm_bo_reserve(bo, false, false, false, 0);
349 if (ret)
350 return ret;
351
78ad0f7b 352 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
6ee73861 353
7a45d764 354 ret = nouveau_bo_validate(nvbo, false, false, false);
6ee73861
BS
355 if (ret == 0) {
356 switch (bo->mem.mem_type) {
357 case TTM_PL_VRAM:
ebb945a9 358 drm->gem.vram_available += bo->mem.size;
6ee73861
BS
359 break;
360 case TTM_PL_TT:
ebb945a9 361 drm->gem.gart_available += bo->mem.size;
6ee73861
BS
362 break;
363 default:
364 break;
365 }
366 }
367
368 ttm_bo_unreserve(bo);
369 return ret;
370}
371
372int
373nouveau_bo_map(struct nouveau_bo *nvbo)
374{
375 int ret;
376
377 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
378 if (ret)
379 return ret;
380
381 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
382 ttm_bo_unreserve(&nvbo->bo);
383 return ret;
384}
385
386void
387nouveau_bo_unmap(struct nouveau_bo *nvbo)
388{
9d59e8a1
BS
389 if (nvbo)
390 ttm_bo_kunmap(&nvbo->kmap);
6ee73861
BS
391}
392
7a45d764
BS
393int
394nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
395 bool no_wait_reserve, bool no_wait_gpu)
396{
397 int ret;
398
399 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
400 no_wait_reserve, no_wait_gpu);
401 if (ret)
402 return ret;
403
404 return 0;
405}
406
6ee73861
BS
407u16
408nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
409{
410 bool is_iomem;
411 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
412 mem = &mem[index];
413 if (is_iomem)
414 return ioread16_native((void __force __iomem *)mem);
415 else
416 return *mem;
417}
418
419void
420nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
421{
422 bool is_iomem;
423 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
424 mem = &mem[index];
425 if (is_iomem)
426 iowrite16_native(val, (void __force __iomem *)mem);
427 else
428 *mem = val;
429}
430
431u32
432nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
433{
434 bool is_iomem;
435 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
436 mem = &mem[index];
437 if (is_iomem)
438 return ioread32_native((void __force __iomem *)mem);
439 else
440 return *mem;
441}
442
443void
444nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
445{
446 bool is_iomem;
447 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
448 mem = &mem[index];
449 if (is_iomem)
450 iowrite32_native(val, (void __force __iomem *)mem);
451 else
452 *mem = val;
453}
454
649bf3ca 455static struct ttm_tt *
ebb945a9
BS
456nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
457 uint32_t page_flags, struct page *dummy_read)
6ee73861 458{
ebb945a9
BS
459 struct nouveau_drm *drm = nouveau_bdev(bdev);
460 struct drm_device *dev = drm->dev;
6ee73861 461
ebb945a9
BS
462 if (drm->agp.stat == ENABLED) {
463 return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
464 page_flags, dummy_read);
6ee73861
BS
465 }
466
ebb945a9 467 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
6ee73861
BS
468}
469
470static int
471nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
472{
473 /* We'll do this from user space. */
474 return 0;
475}
476
477static int
478nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
479 struct ttm_mem_type_manager *man)
480{
ebb945a9 481 struct nouveau_drm *drm = nouveau_bdev(bdev);
6ee73861
BS
482
483 switch (type) {
484 case TTM_PL_SYSTEM:
485 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
486 man->available_caching = TTM_PL_MASK_CACHING;
487 man->default_caching = TTM_PL_FLAG_CACHED;
488 break;
489 case TTM_PL_VRAM:
ebb945a9 490 if (nv_device(drm->device)->card_type >= NV_50) {
573a2a37 491 man->func = &nouveau_vram_manager;
f869ef88
BS
492 man->io_reserve_fastpath = false;
493 man->use_io_reserve_lru = true;
494 } else {
573a2a37 495 man->func = &ttm_bo_manager_func;
f869ef88 496 }
6ee73861 497 man->flags = TTM_MEMTYPE_FLAG_FIXED |
f32f02fd 498 TTM_MEMTYPE_FLAG_MAPPABLE;
6ee73861
BS
499 man->available_caching = TTM_PL_FLAG_UNCACHED |
500 TTM_PL_FLAG_WC;
501 man->default_caching = TTM_PL_FLAG_WC;
6ee73861
BS
502 break;
503 case TTM_PL_TT:
ebb945a9 504 if (nv_device(drm->device)->card_type >= NV_50)
26c0c9e3 505 man->func = &nouveau_gart_manager;
3863c9bc 506 else
ebb945a9 507 if (drm->agp.stat != ENABLED)
3863c9bc 508 man->func = &nv04_gart_manager;
26c0c9e3
BS
509 else
510 man->func = &ttm_bo_manager_func;
ebb945a9
BS
511
512 if (drm->agp.stat == ENABLED) {
f32f02fd 513 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
a3d487ea
FJ
514 man->available_caching = TTM_PL_FLAG_UNCACHED |
515 TTM_PL_FLAG_WC;
516 man->default_caching = TTM_PL_FLAG_WC;
ebb945a9 517 } else {
6ee73861
BS
518 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
519 TTM_MEMTYPE_FLAG_CMA;
520 man->available_caching = TTM_PL_MASK_CACHING;
521 man->default_caching = TTM_PL_FLAG_CACHED;
6ee73861 522 }
ebb945a9 523
6ee73861
BS
524 break;
525 default:
6ee73861
BS
526 return -EINVAL;
527 }
528 return 0;
529}
530
531static void
532nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
533{
534 struct nouveau_bo *nvbo = nouveau_bo(bo);
535
536 switch (bo->mem.mem_type) {
22fbd538 537 case TTM_PL_VRAM:
78ad0f7b
FJ
538 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
539 TTM_PL_FLAG_SYSTEM);
22fbd538 540 break;
6ee73861 541 default:
78ad0f7b 542 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
6ee73861
BS
543 break;
544 }
22fbd538
FJ
545
546 *pl = nvbo->placement;
6ee73861
BS
547}
548
549
550/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
551 * TTM_PL_{VRAM,TT} directly.
552 */
a0af9add 553
6ee73861
BS
554static int
555nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
9d87fa21
JG
556 struct nouveau_bo *nvbo, bool evict,
557 bool no_wait_reserve, bool no_wait_gpu,
6ee73861
BS
558 struct ttm_mem_reg *new_mem)
559{
560 struct nouveau_fence *fence = NULL;
561 int ret;
562
d375e7d5 563 ret = nouveau_fence_new(chan, &fence);
6ee73861
BS
564 if (ret)
565 return ret;
566
64798817 567 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
311ab694 568 no_wait_reserve, no_wait_gpu, new_mem);
382d62e5 569 nouveau_fence_unref(&fence);
6ee73861
BS
570 return ret;
571}
572
c6b7e895
BS
573static int
574nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
575 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
576{
577 struct nouveau_mem *node = old_mem->mm_node;
578 int ret = RING_SPACE(chan, 10);
579 if (ret == 0) {
6d597027 580 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
c6b7e895
BS
581 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
582 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
583 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
584 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
585 OUT_RING (chan, PAGE_SIZE);
586 OUT_RING (chan, PAGE_SIZE);
587 OUT_RING (chan, PAGE_SIZE);
588 OUT_RING (chan, new_mem->num_pages);
6d597027 589 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
c6b7e895
BS
590 }
591 return ret;
592}
593
d1b167e1
BS
594static int
595nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
596{
597 int ret = RING_SPACE(chan, 2);
598 if (ret == 0) {
599 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
600 OUT_RING (chan, handle);
601 }
602 return ret;
603}
604
1a46098e
BS
605static int
606nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
607 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
608{
609 struct nouveau_mem *node = old_mem->mm_node;
610 u64 src_offset = node->vma[0].offset;
611 u64 dst_offset = node->vma[1].offset;
612 u32 page_count = new_mem->num_pages;
613 int ret;
614
615 page_count = new_mem->num_pages;
616 while (page_count) {
617 int line_count = (page_count > 8191) ? 8191 : page_count;
618
619 ret = RING_SPACE(chan, 11);
620 if (ret)
621 return ret;
622
623 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
624 OUT_RING (chan, upper_32_bits(src_offset));
625 OUT_RING (chan, lower_32_bits(src_offset));
626 OUT_RING (chan, upper_32_bits(dst_offset));
627 OUT_RING (chan, lower_32_bits(dst_offset));
628 OUT_RING (chan, PAGE_SIZE);
629 OUT_RING (chan, PAGE_SIZE);
630 OUT_RING (chan, PAGE_SIZE);
631 OUT_RING (chan, line_count);
632 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
633 OUT_RING (chan, 0x00000110);
634
635 page_count -= line_count;
636 src_offset += (PAGE_SIZE * line_count);
637 dst_offset += (PAGE_SIZE * line_count);
638 }
639
640 return 0;
641}
642
183720b8
BS
643static int
644nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
645 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
646{
d2f96666
BS
647 struct nouveau_mem *node = old_mem->mm_node;
648 u64 src_offset = node->vma[0].offset;
649 u64 dst_offset = node->vma[1].offset;
183720b8
BS
650 u32 page_count = new_mem->num_pages;
651 int ret;
652
183720b8
BS
653 page_count = new_mem->num_pages;
654 while (page_count) {
655 int line_count = (page_count > 2047) ? 2047 : page_count;
656
657 ret = RING_SPACE(chan, 12);
658 if (ret)
659 return ret;
660
d1b167e1 661 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
183720b8
BS
662 OUT_RING (chan, upper_32_bits(dst_offset));
663 OUT_RING (chan, lower_32_bits(dst_offset));
d1b167e1 664 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
183720b8
BS
665 OUT_RING (chan, upper_32_bits(src_offset));
666 OUT_RING (chan, lower_32_bits(src_offset));
667 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
668 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
669 OUT_RING (chan, PAGE_SIZE); /* line_length */
670 OUT_RING (chan, line_count);
d1b167e1 671 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
183720b8
BS
672 OUT_RING (chan, 0x00100110);
673
674 page_count -= line_count;
675 src_offset += (PAGE_SIZE * line_count);
676 dst_offset += (PAGE_SIZE * line_count);
677 }
678
679 return 0;
680}
681
fdf53241
BS
682static int
683nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
684 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
685{
686 struct nouveau_mem *node = old_mem->mm_node;
687 u64 src_offset = node->vma[0].offset;
688 u64 dst_offset = node->vma[1].offset;
689 u32 page_count = new_mem->num_pages;
690 int ret;
691
692 page_count = new_mem->num_pages;
693 while (page_count) {
694 int line_count = (page_count > 8191) ? 8191 : page_count;
695
696 ret = RING_SPACE(chan, 11);
697 if (ret)
698 return ret;
699
700 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
701 OUT_RING (chan, upper_32_bits(src_offset));
702 OUT_RING (chan, lower_32_bits(src_offset));
703 OUT_RING (chan, upper_32_bits(dst_offset));
704 OUT_RING (chan, lower_32_bits(dst_offset));
705 OUT_RING (chan, PAGE_SIZE);
706 OUT_RING (chan, PAGE_SIZE);
707 OUT_RING (chan, PAGE_SIZE);
708 OUT_RING (chan, line_count);
709 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
710 OUT_RING (chan, 0x00000110);
711
712 page_count -= line_count;
713 src_offset += (PAGE_SIZE * line_count);
714 dst_offset += (PAGE_SIZE * line_count);
715 }
716
717 return 0;
718}
719
5490e5df
BS
720static int
721nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
722 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
723{
724 struct nouveau_mem *node = old_mem->mm_node;
725 int ret = RING_SPACE(chan, 7);
726 if (ret == 0) {
727 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
728 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
729 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
730 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
731 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
732 OUT_RING (chan, 0x00000000 /* COPY */);
733 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
734 }
735 return ret;
736}
737
4c193d25
BS
738static int
739nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
740 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
741{
742 struct nouveau_mem *node = old_mem->mm_node;
743 int ret = RING_SPACE(chan, 7);
744 if (ret == 0) {
745 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
746 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
747 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
748 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
749 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
750 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
751 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
752 }
753 return ret;
754}
755
d1b167e1
BS
756static int
757nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
758{
ebb945a9 759 int ret = RING_SPACE(chan, 6);
d1b167e1 760 if (ret == 0) {
ebb945a9
BS
761 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
762 OUT_RING (chan, handle);
763 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
764 OUT_RING (chan, NvNotify0);
765 OUT_RING (chan, NvDmaFB);
766 OUT_RING (chan, NvDmaFB);
d1b167e1
BS
767 }
768
769 return ret;
770}
771
6ee73861 772static int
f1ab0cc9
BS
773nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
774 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
6ee73861 775{
d2f96666 776 struct nouveau_mem *node = old_mem->mm_node;
f1ab0cc9
BS
777 struct nouveau_bo *nvbo = nouveau_bo(bo);
778 u64 length = (new_mem->num_pages << PAGE_SHIFT);
d2f96666
BS
779 u64 src_offset = node->vma[0].offset;
780 u64 dst_offset = node->vma[1].offset;
6ee73861
BS
781 int ret;
782
f1ab0cc9
BS
783 while (length) {
784 u32 amount, stride, height;
785
5220b3c1
BS
786 amount = min(length, (u64)(4 * 1024 * 1024));
787 stride = 16 * 4;
f1ab0cc9
BS
788 height = amount / stride;
789
f13b3263
FJ
790 if (new_mem->mem_type == TTM_PL_VRAM &&
791 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
792 ret = RING_SPACE(chan, 8);
793 if (ret)
794 return ret;
795
d1b167e1 796 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
f1ab0cc9 797 OUT_RING (chan, 0);
5220b3c1 798 OUT_RING (chan, 0);
f1ab0cc9
BS
799 OUT_RING (chan, stride);
800 OUT_RING (chan, height);
801 OUT_RING (chan, 1);
802 OUT_RING (chan, 0);
803 OUT_RING (chan, 0);
804 } else {
805 ret = RING_SPACE(chan, 2);
806 if (ret)
807 return ret;
808
d1b167e1 809 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
f1ab0cc9
BS
810 OUT_RING (chan, 1);
811 }
f13b3263
FJ
812 if (old_mem->mem_type == TTM_PL_VRAM &&
813 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
814 ret = RING_SPACE(chan, 8);
815 if (ret)
816 return ret;
817
d1b167e1 818 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
f1ab0cc9 819 OUT_RING (chan, 0);
5220b3c1 820 OUT_RING (chan, 0);
f1ab0cc9
BS
821 OUT_RING (chan, stride);
822 OUT_RING (chan, height);
823 OUT_RING (chan, 1);
824 OUT_RING (chan, 0);
825 OUT_RING (chan, 0);
826 } else {
827 ret = RING_SPACE(chan, 2);
828 if (ret)
829 return ret;
830
d1b167e1 831 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
f1ab0cc9
BS
832 OUT_RING (chan, 1);
833 }
834
835 ret = RING_SPACE(chan, 14);
6ee73861
BS
836 if (ret)
837 return ret;
f1ab0cc9 838
d1b167e1 839 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
f1ab0cc9
BS
840 OUT_RING (chan, upper_32_bits(src_offset));
841 OUT_RING (chan, upper_32_bits(dst_offset));
d1b167e1 842 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
f1ab0cc9
BS
843 OUT_RING (chan, lower_32_bits(src_offset));
844 OUT_RING (chan, lower_32_bits(dst_offset));
845 OUT_RING (chan, stride);
846 OUT_RING (chan, stride);
847 OUT_RING (chan, stride);
848 OUT_RING (chan, height);
849 OUT_RING (chan, 0x00000101);
850 OUT_RING (chan, 0x00000000);
d1b167e1 851 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9
BS
852 OUT_RING (chan, 0);
853
854 length -= amount;
855 src_offset += amount;
856 dst_offset += amount;
6ee73861
BS
857 }
858
f1ab0cc9
BS
859 return 0;
860}
861
d1b167e1
BS
862static int
863nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
864{
ebb945a9 865 int ret = RING_SPACE(chan, 4);
d1b167e1 866 if (ret == 0) {
ebb945a9
BS
867 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
868 OUT_RING (chan, handle);
869 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
870 OUT_RING (chan, NvNotify0);
d1b167e1
BS
871 }
872
873 return ret;
874}
875
a6704788
BS
876static inline uint32_t
877nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
878 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
879{
880 if (mem->mem_type == TTM_PL_TT)
ebb945a9
BS
881 return NvDmaTT;
882 return NvDmaFB;
a6704788
BS
883}
884
f1ab0cc9
BS
885static int
886nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
887 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
888{
d961db75
BS
889 u32 src_offset = old_mem->start << PAGE_SHIFT;
890 u32 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
891 u32 page_count = new_mem->num_pages;
892 int ret;
893
894 ret = RING_SPACE(chan, 3);
895 if (ret)
896 return ret;
897
d1b167e1 898 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
f1ab0cc9
BS
899 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
900 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
901
6ee73861
BS
902 page_count = new_mem->num_pages;
903 while (page_count) {
904 int line_count = (page_count > 2047) ? 2047 : page_count;
905
6ee73861
BS
906 ret = RING_SPACE(chan, 11);
907 if (ret)
908 return ret;
f1ab0cc9 909
d1b167e1 910 BEGIN_NV04(chan, NvSubCopy,
6ee73861 911 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
f1ab0cc9
BS
912 OUT_RING (chan, src_offset);
913 OUT_RING (chan, dst_offset);
914 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
915 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
916 OUT_RING (chan, PAGE_SIZE); /* line_length */
917 OUT_RING (chan, line_count);
918 OUT_RING (chan, 0x00000101);
919 OUT_RING (chan, 0x00000000);
d1b167e1 920 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9 921 OUT_RING (chan, 0);
6ee73861
BS
922
923 page_count -= line_count;
924 src_offset += (PAGE_SIZE * line_count);
925 dst_offset += (PAGE_SIZE * line_count);
926 }
927
f1ab0cc9
BS
928 return 0;
929}
930
d2f96666
BS
931static int
932nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
933 struct ttm_mem_reg *mem, struct nouveau_vma *vma)
934{
935 struct nouveau_mem *node = mem->mm_node;
936 int ret;
937
ebb945a9
BS
938 ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages <<
939 PAGE_SHIFT, node->page_shift,
940 NV_MEM_ACCESS_RW, vma);
d2f96666
BS
941 if (ret)
942 return ret;
943
944 if (mem->mem_type == TTM_PL_VRAM)
945 nouveau_vm_map(vma, node);
946 else
f7b24c42 947 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
d2f96666
BS
948
949 return 0;
950}
951
f1ab0cc9
BS
952static int
953nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
954 bool no_wait_reserve, bool no_wait_gpu,
955 struct ttm_mem_reg *new_mem)
956{
ebb945a9
BS
957 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
958 struct nouveau_channel *chan = chan = drm->channel;
f1ab0cc9 959 struct nouveau_bo *nvbo = nouveau_bo(bo);
3425df48 960 struct ttm_mem_reg *old_mem = &bo->mem;
f1ab0cc9
BS
961 int ret;
962
ebb945a9 963 mutex_lock(&chan->cli->mutex);
f1ab0cc9 964
d2f96666
BS
965 /* create temporary vmas for the transfer and attach them to the
966 * old nouveau_mem node, these will get cleaned up after ttm has
967 * destroyed the ttm_mem_reg
3425df48 968 */
ebb945a9 969 if (nv_device(drm->device)->card_type >= NV_50) {
d5f42394 970 struct nouveau_mem *node = old_mem->mm_node;
3425df48 971
d2f96666
BS
972 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
973 if (ret)
974 goto out;
975
976 ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
977 if (ret)
978 goto out;
3425df48
BS
979 }
980
ebb945a9 981 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
6a6b73f2
BS
982 if (ret == 0) {
983 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
984 no_wait_reserve,
985 no_wait_gpu, new_mem);
986 }
f1ab0cc9 987
3425df48 988out:
ebb945a9 989 mutex_unlock(&chan->cli->mutex);
6a6b73f2 990 return ret;
6ee73861
BS
991}
992
d1b167e1
BS
993void
994nouveau_bo_move_init(struct nouveau_channel *chan)
995{
ebb945a9
BS
996 struct nouveau_cli *cli = chan->cli;
997 struct nouveau_drm *drm = chan->drm;
d1b167e1
BS
998 static const struct {
999 const char *name;
1a46098e 1000 int engine;
d1b167e1
BS
1001 u32 oclass;
1002 int (*exec)(struct nouveau_channel *,
1003 struct ttm_buffer_object *,
1004 struct ttm_mem_reg *, struct ttm_mem_reg *);
1005 int (*init)(struct nouveau_channel *, u32 handle);
1006 } _methods[] = {
1a46098e
BS
1007 { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1008 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1009 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1010 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1011 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1012 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1013 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1014 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
5490e5df 1015 {},
1a46098e 1016 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
d1b167e1
BS
1017 }, *mthd = _methods;
1018 const char *name = "CPU";
1019 int ret;
1020
1021 do {
ebb945a9 1022 struct nouveau_object *object;
1a46098e 1023 u32 handle = (mthd->engine << 16) | mthd->oclass;
ebb945a9
BS
1024
1025 ret = nouveau_object_new(nv_object(cli), chan->handle, handle,
1026 mthd->oclass, NULL, 0, &object);
d1b167e1 1027 if (ret == 0) {
1a46098e 1028 ret = mthd->init(chan, handle);
ebb945a9
BS
1029 if (ret) {
1030 nouveau_object_del(nv_object(cli),
1031 chan->handle, handle);
1032 continue;
d1b167e1 1033 }
ebb945a9
BS
1034
1035 drm->ttm.move = mthd->exec;
1036 name = mthd->name;
1037 break;
d1b167e1
BS
1038 }
1039 } while ((++mthd)->exec);
1040
ebb945a9 1041 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
d1b167e1
BS
1042}
1043
6ee73861
BS
1044static int
1045nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
1046 bool no_wait_reserve, bool no_wait_gpu,
1047 struct ttm_mem_reg *new_mem)
6ee73861
BS
1048{
1049 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1050 struct ttm_placement placement;
1051 struct ttm_mem_reg tmp_mem;
1052 int ret;
1053
1054 placement.fpfn = placement.lpfn = 0;
1055 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 1056 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
1057
1058 tmp_mem = *new_mem;
1059 tmp_mem.mm_node = NULL;
9d87fa21 1060 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
6ee73861
BS
1061 if (ret)
1062 return ret;
1063
1064 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1065 if (ret)
1066 goto out;
1067
9d87fa21 1068 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
6ee73861
BS
1069 if (ret)
1070 goto out;
1071
b8884da6 1072 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861 1073out:
42311ff9 1074 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
1075 return ret;
1076}
1077
1078static int
1079nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
1080 bool no_wait_reserve, bool no_wait_gpu,
1081 struct ttm_mem_reg *new_mem)
6ee73861
BS
1082{
1083 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1084 struct ttm_placement placement;
1085 struct ttm_mem_reg tmp_mem;
1086 int ret;
1087
1088 placement.fpfn = placement.lpfn = 0;
1089 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 1090 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
1091
1092 tmp_mem = *new_mem;
1093 tmp_mem.mm_node = NULL;
9d87fa21 1094 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
6ee73861
BS
1095 if (ret)
1096 return ret;
1097
b8884da6 1098 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
6ee73861
BS
1099 if (ret)
1100 goto out;
1101
b8884da6 1102 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861
BS
1103 if (ret)
1104 goto out;
1105
1106out:
42311ff9 1107 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
1108 return ret;
1109}
1110
a4154bbf
BS
1111static void
1112nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1113{
a4154bbf 1114 struct nouveau_bo *nvbo = nouveau_bo(bo);
fd2871af
BS
1115 struct nouveau_vma *vma;
1116
9f1feed2
BS
1117 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1118 if (bo->destroy != nouveau_bo_del_ttm)
1119 return;
1120
fd2871af 1121 list_for_each_entry(vma, &nvbo->vma_list, head) {
dc97b340 1122 if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
fd2871af
BS
1123 nouveau_vm_map(vma, new_mem->mm_node);
1124 } else
dc97b340 1125 if (new_mem && new_mem->mem_type == TTM_PL_TT &&
ebb945a9 1126 nvbo->page_shift == vma->vm->vmm->spg_shift) {
22b33e8e
DA
1127 if (((struct nouveau_mem *)new_mem->mm_node)->sg)
1128 nouveau_vm_map_sg_table(vma, 0, new_mem->
1129 num_pages << PAGE_SHIFT,
1130 new_mem->mm_node);
1131 else
1132 nouveau_vm_map_sg(vma, 0, new_mem->
1133 num_pages << PAGE_SHIFT,
1134 new_mem->mm_node);
fd2871af
BS
1135 } else {
1136 nouveau_vm_unmap(vma);
1137 }
a4154bbf
BS
1138 }
1139}
1140
6ee73861 1141static int
a0af9add 1142nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
ebb945a9 1143 struct nouveau_drm_tile **new_tile)
6ee73861 1144{
ebb945a9
BS
1145 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1146 struct drm_device *dev = drm->dev;
a0af9add 1147 struct nouveau_bo *nvbo = nouveau_bo(bo);
a4154bbf 1148 u64 offset = new_mem->start << PAGE_SHIFT;
6ee73861 1149
a4154bbf
BS
1150 *new_tile = NULL;
1151 if (new_mem->mem_type != TTM_PL_VRAM)
a0af9add 1152 return 0;
a0af9add 1153
ebb945a9 1154 if (nv_device(drm->device)->card_type >= NV_10) {
bc9e7b9a 1155 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
a5cf68b0
FJ
1156 nvbo->tile_mode,
1157 nvbo->tile_flags);
6ee73861
BS
1158 }
1159
a0af9add
FJ
1160 return 0;
1161}
1162
1163static void
1164nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
ebb945a9
BS
1165 struct nouveau_drm_tile *new_tile,
1166 struct nouveau_drm_tile **old_tile)
a0af9add 1167{
ebb945a9
BS
1168 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1169 struct drm_device *dev = drm->dev;
a0af9add 1170
bc9e7b9a 1171 nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
a4154bbf 1172 *old_tile = new_tile;
a0af9add
FJ
1173}
1174
1175static int
1176nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
1177 bool no_wait_reserve, bool no_wait_gpu,
1178 struct ttm_mem_reg *new_mem)
a0af9add 1179{
ebb945a9 1180 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
a0af9add
FJ
1181 struct nouveau_bo *nvbo = nouveau_bo(bo);
1182 struct ttm_mem_reg *old_mem = &bo->mem;
ebb945a9 1183 struct nouveau_drm_tile *new_tile = NULL;
a0af9add
FJ
1184 int ret = 0;
1185
ebb945a9 1186 if (nv_device(drm->device)->card_type < NV_50) {
a4154bbf
BS
1187 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1188 if (ret)
1189 return ret;
1190 }
a0af9add 1191
a0af9add 1192 /* Fake bo copy. */
6ee73861
BS
1193 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1194 BUG_ON(bo->mem.mm_node != NULL);
1195 bo->mem = *new_mem;
1196 new_mem->mm_node = NULL;
a0af9add 1197 goto out;
6ee73861
BS
1198 }
1199
d1b167e1 1200 /* CPU copy if we have no accelerated method available */
ebb945a9 1201 if (!drm->ttm.move) {
b8a6a804
BS
1202 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
1203 goto out;
1204 }
1205
a0af9add
FJ
1206 /* Hardware assisted copy. */
1207 if (new_mem->mem_type == TTM_PL_SYSTEM)
9d87fa21 1208 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add 1209 else if (old_mem->mem_type == TTM_PL_SYSTEM)
9d87fa21 1210 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add 1211 else
9d87fa21 1212 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861 1213
a0af9add
FJ
1214 if (!ret)
1215 goto out;
1216
1217 /* Fallback to software copy. */
9d87fa21 1218 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add
FJ
1219
1220out:
ebb945a9 1221 if (nv_device(drm->device)->card_type < NV_50) {
a4154bbf
BS
1222 if (ret)
1223 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1224 else
1225 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1226 }
a0af9add
FJ
1227
1228 return ret;
6ee73861
BS
1229}
1230
1231static int
1232nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1233{
1234 return 0;
1235}
1236
f32f02fd
JG
1237static int
1238nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1239{
1240 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
ebb945a9
BS
1241 struct nouveau_drm *drm = nouveau_bdev(bdev);
1242 struct drm_device *dev = drm->dev;
f869ef88 1243 int ret;
f32f02fd
JG
1244
1245 mem->bus.addr = NULL;
1246 mem->bus.offset = 0;
1247 mem->bus.size = mem->num_pages << PAGE_SHIFT;
1248 mem->bus.base = 0;
1249 mem->bus.is_iomem = false;
1250 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1251 return -EINVAL;
1252 switch (mem->mem_type) {
1253 case TTM_PL_SYSTEM:
1254 /* System memory */
1255 return 0;
1256 case TTM_PL_TT:
1257#if __OS_HAS_AGP
ebb945a9 1258 if (drm->agp.stat == ENABLED) {
d961db75 1259 mem->bus.offset = mem->start << PAGE_SHIFT;
ebb945a9 1260 mem->bus.base = drm->agp.base;
f32f02fd
JG
1261 mem->bus.is_iomem = true;
1262 }
1263#endif
1264 break;
1265 case TTM_PL_VRAM:
3863c9bc
BS
1266 mem->bus.offset = mem->start << PAGE_SHIFT;
1267 mem->bus.base = pci_resource_start(dev->pdev, 1);
1268 mem->bus.is_iomem = true;
ebb945a9
BS
1269 if (nv_device(drm->device)->card_type >= NV_50) {
1270 struct nouveau_bar *bar = nouveau_bar(drm->device);
3863c9bc 1271 struct nouveau_mem *node = mem->mm_node;
8984e046 1272
ebb945a9 1273 ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
3863c9bc
BS
1274 &node->bar_vma);
1275 if (ret)
1276 return ret;
f869ef88 1277
3863c9bc 1278 mem->bus.offset = node->bar_vma.offset;
f869ef88 1279 }
f32f02fd
JG
1280 break;
1281 default:
1282 return -EINVAL;
1283 }
1284 return 0;
1285}
1286
1287static void
1288nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1289{
ebb945a9
BS
1290 struct nouveau_drm *drm = nouveau_bdev(bdev);
1291 struct nouveau_bar *bar = nouveau_bar(drm->device);
d5f42394 1292 struct nouveau_mem *node = mem->mm_node;
f869ef88 1293
d5f42394 1294 if (!node->bar_vma.node)
f869ef88
BS
1295 return;
1296
ebb945a9 1297 bar->unmap(bar, &node->bar_vma);
f32f02fd
JG
1298}
1299
1300static int
1301nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1302{
ebb945a9 1303 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
e1429b4c 1304 struct nouveau_bo *nvbo = nouveau_bo(bo);
ebb945a9
BS
1305 struct nouveau_device *device = nv_device(drm->device);
1306 u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
e1429b4c
BS
1307
1308 /* as long as the bo isn't in vram, and isn't tiled, we've got
1309 * nothing to do here.
1310 */
1311 if (bo->mem.mem_type != TTM_PL_VRAM) {
ebb945a9 1312 if (nv_device(drm->device)->card_type < NV_50 ||
f13b3263 1313 !nouveau_bo_tile_layout(nvbo))
e1429b4c
BS
1314 return 0;
1315 }
1316
1317 /* make sure bo is in mappable vram */
ebb945a9 1318 if (bo->mem.start + bo->mem.num_pages < mappable)
e1429b4c
BS
1319 return 0;
1320
1321
1322 nvbo->placement.fpfn = 0;
ebb945a9 1323 nvbo->placement.lpfn = mappable;
c284815d 1324 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
7a45d764 1325 return nouveau_bo_validate(nvbo, false, true, false);
f32f02fd
JG
1326}
1327
3230cfc3
KRW
1328static int
1329nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1330{
8e7e7052 1331 struct ttm_dma_tt *ttm_dma = (void *)ttm;
ebb945a9 1332 struct nouveau_drm *drm;
3230cfc3
KRW
1333 struct drm_device *dev;
1334 unsigned i;
1335 int r;
22b33e8e 1336 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
3230cfc3
KRW
1337
1338 if (ttm->state != tt_unpopulated)
1339 return 0;
1340
22b33e8e
DA
1341 if (slave && ttm->sg) {
1342 /* make userspace faulting work */
1343 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1344 ttm_dma->dma_address, ttm->num_pages);
1345 ttm->state = tt_unbound;
1346 return 0;
1347 }
1348
ebb945a9
BS
1349 drm = nouveau_bdev(ttm->bdev);
1350 dev = drm->dev;
3230cfc3 1351
dea7e0ac 1352#if __OS_HAS_AGP
ebb945a9 1353 if (drm->agp.stat == ENABLED) {
dea7e0ac
JG
1354 return ttm_agp_tt_populate(ttm);
1355 }
1356#endif
1357
3230cfc3
KRW
1358#ifdef CONFIG_SWIOTLB
1359 if (swiotlb_nr_tbl()) {
8e7e7052 1360 return ttm_dma_populate((void *)ttm, dev->dev);
3230cfc3
KRW
1361 }
1362#endif
1363
1364 r = ttm_pool_populate(ttm);
1365 if (r) {
1366 return r;
1367 }
1368
1369 for (i = 0; i < ttm->num_pages; i++) {
8e7e7052 1370 ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
3230cfc3
KRW
1371 0, PAGE_SIZE,
1372 PCI_DMA_BIDIRECTIONAL);
8e7e7052 1373 if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
3230cfc3 1374 while (--i) {
8e7e7052 1375 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
3230cfc3 1376 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
8e7e7052 1377 ttm_dma->dma_address[i] = 0;
3230cfc3
KRW
1378 }
1379 ttm_pool_unpopulate(ttm);
1380 return -EFAULT;
1381 }
1382 }
1383 return 0;
1384}
1385
1386static void
1387nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1388{
8e7e7052 1389 struct ttm_dma_tt *ttm_dma = (void *)ttm;
ebb945a9 1390 struct nouveau_drm *drm;
3230cfc3
KRW
1391 struct drm_device *dev;
1392 unsigned i;
22b33e8e
DA
1393 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1394
1395 if (slave)
1396 return;
3230cfc3 1397
ebb945a9
BS
1398 drm = nouveau_bdev(ttm->bdev);
1399 dev = drm->dev;
3230cfc3 1400
dea7e0ac 1401#if __OS_HAS_AGP
ebb945a9 1402 if (drm->agp.stat == ENABLED) {
dea7e0ac
JG
1403 ttm_agp_tt_unpopulate(ttm);
1404 return;
1405 }
1406#endif
1407
3230cfc3
KRW
1408#ifdef CONFIG_SWIOTLB
1409 if (swiotlb_nr_tbl()) {
8e7e7052 1410 ttm_dma_unpopulate((void *)ttm, dev->dev);
3230cfc3
KRW
1411 return;
1412 }
1413#endif
1414
1415 for (i = 0; i < ttm->num_pages; i++) {
8e7e7052
JG
1416 if (ttm_dma->dma_address[i]) {
1417 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
3230cfc3
KRW
1418 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1419 }
1420 }
1421
1422 ttm_pool_unpopulate(ttm);
1423}
1424
875ac34a
BS
1425void
1426nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1427{
1428 struct nouveau_fence *old_fence = NULL;
1429
1430 if (likely(fence))
1431 nouveau_fence_ref(fence);
1432
1433 spin_lock(&nvbo->bo.bdev->fence_lock);
1434 old_fence = nvbo->bo.sync_obj;
1435 nvbo->bo.sync_obj = fence;
1436 spin_unlock(&nvbo->bo.bdev->fence_lock);
1437
1438 nouveau_fence_unref(&old_fence);
1439}
1440
1441static void
1442nouveau_bo_fence_unref(void **sync_obj)
1443{
1444 nouveau_fence_unref((struct nouveau_fence **)sync_obj);
1445}
1446
1447static void *
1448nouveau_bo_fence_ref(void *sync_obj)
1449{
1450 return nouveau_fence_ref(sync_obj);
1451}
1452
1453static bool
1454nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg)
1455{
d375e7d5 1456 return nouveau_fence_done(sync_obj);
875ac34a
BS
1457}
1458
1459static int
1460nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
1461{
1462 return nouveau_fence_wait(sync_obj, lazy, intr);
1463}
1464
1465static int
1466nouveau_bo_fence_flush(void *sync_obj, void *sync_arg)
1467{
1468 return 0;
1469}
1470
6ee73861 1471struct ttm_bo_driver nouveau_bo_driver = {
649bf3ca 1472 .ttm_tt_create = &nouveau_ttm_tt_create,
3230cfc3
KRW
1473 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1474 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
6ee73861
BS
1475 .invalidate_caches = nouveau_bo_invalidate_caches,
1476 .init_mem_type = nouveau_bo_init_mem_type,
1477 .evict_flags = nouveau_bo_evict_flags,
a4154bbf 1478 .move_notify = nouveau_bo_move_ntfy,
6ee73861
BS
1479 .move = nouveau_bo_move,
1480 .verify_access = nouveau_bo_verify_access,
875ac34a
BS
1481 .sync_obj_signaled = nouveau_bo_fence_signalled,
1482 .sync_obj_wait = nouveau_bo_fence_wait,
1483 .sync_obj_flush = nouveau_bo_fence_flush,
1484 .sync_obj_unref = nouveau_bo_fence_unref,
1485 .sync_obj_ref = nouveau_bo_fence_ref,
f32f02fd
JG
1486 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1487 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1488 .io_mem_free = &nouveau_ttm_io_mem_free,
6ee73861
BS
1489};
1490
fd2871af
BS
1491struct nouveau_vma *
1492nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1493{
1494 struct nouveau_vma *vma;
1495 list_for_each_entry(vma, &nvbo->vma_list, head) {
1496 if (vma->vm == vm)
1497 return vma;
1498 }
1499
1500 return NULL;
1501}
1502
1503int
1504nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1505 struct nouveau_vma *vma)
1506{
1507 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1508 struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1509 int ret;
1510
1511 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1512 NV_MEM_ACCESS_RW, vma);
1513 if (ret)
1514 return ret;
1515
1516 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1517 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
22b33e8e
DA
1518 else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
1519 if (node->sg)
1520 nouveau_vm_map_sg_table(vma, 0, size, node);
1521 else
1522 nouveau_vm_map_sg(vma, 0, size, node);
1523 }
fd2871af
BS
1524
1525 list_add_tail(&vma->head, &nvbo->vma_list);
2fd3db6f 1526 vma->refcount = 1;
fd2871af
BS
1527 return 0;
1528}
1529
1530void
1531nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1532{
1533 if (vma->node) {
1534 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
1535 spin_lock(&nvbo->bo.bdev->fence_lock);
1717c0e2 1536 ttm_bo_wait(&nvbo->bo, false, false, false);
fd2871af
BS
1537 spin_unlock(&nvbo->bo.bdev->fence_lock);
1538 nouveau_vm_unmap(vma);
1539 }
1540
1541 nouveau_vm_put(vma);
1542 list_del(&vma->head);
1543 }
1544}
This page took 0.23508 seconds and 5 git commands to generate.