drm/nvc0: initial support for tiled buffer objects
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
30#include "drmP.h"
31
32#include "nouveau_drm.h"
33#include "nouveau_drv.h"
34#include "nouveau_dma.h"
f869ef88
BS
35#include "nouveau_mm.h"
36#include "nouveau_vm.h"
6ee73861 37
a510604d 38#include <linux/log2.h>
5a0e3ad6 39#include <linux/slab.h>
a510604d 40
6ee73861
BS
41static void
42nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
43{
44 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
a0af9add 45 struct drm_device *dev = dev_priv->dev;
6ee73861
BS
46 struct nouveau_bo *nvbo = nouveau_bo(bo);
47
6ee73861
BS
48 if (unlikely(nvbo->gem))
49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
50
a5cf68b0 51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
4c136142 52 nouveau_vm_put(&nvbo->vma);
6ee73861
BS
53 kfree(nvbo);
54}
55
a0af9add 56static void
bfd83aca
BS
57nouveau_bo_fixup_align(struct nouveau_bo *nvbo, int *align, int *size,
58 int *page_shift)
a0af9add 59{
bfd83aca 60 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
a0af9add 61
573a2a37 62 if (dev_priv->card_type < NV_50) {
bfd83aca 63 if (nvbo->tile_mode) {
a0af9add
FJ
64 if (dev_priv->chipset >= 0x40) {
65 *align = 65536;
bfd83aca 66 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
67
68 } else if (dev_priv->chipset >= 0x30) {
69 *align = 32768;
bfd83aca 70 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
71
72 } else if (dev_priv->chipset >= 0x20) {
73 *align = 16384;
bfd83aca 74 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
75
76 } else if (dev_priv->chipset >= 0x10) {
77 *align = 16384;
bfd83aca 78 *size = roundup(*size, 32 * nvbo->tile_mode);
a0af9add
FJ
79 }
80 }
bfd83aca
BS
81 } else {
82 if (likely(dev_priv->chan_vm)) {
83 if (*size > 256 * 1024)
84 *page_shift = dev_priv->chan_vm->lpg_shift;
85 else
86 *page_shift = dev_priv->chan_vm->spg_shift;
87 } else {
88 *page_shift = 12;
89 }
90
91 *size = roundup(*size, (1 << *page_shift));
92 *align = max((1 << *page_shift), *align);
a0af9add
FJ
93 }
94
1c7059e4 95 *size = roundup(*size, PAGE_SIZE);
a0af9add
FJ
96}
97
6ee73861
BS
98int
99nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
100 int size, int align, uint32_t flags, uint32_t tile_mode,
101 uint32_t tile_flags, bool no_vm, bool mappable,
102 struct nouveau_bo **pnvbo)
103{
104 struct drm_nouveau_private *dev_priv = dev->dev_private;
105 struct nouveau_bo *nvbo;
bfd83aca 106 int ret = 0, page_shift = 0;
6ee73861
BS
107
108 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
109 if (!nvbo)
110 return -ENOMEM;
111 INIT_LIST_HEAD(&nvbo->head);
112 INIT_LIST_HEAD(&nvbo->entry);
113 nvbo->mappable = mappable;
114 nvbo->no_vm = no_vm;
115 nvbo->tile_mode = tile_mode;
116 nvbo->tile_flags = tile_flags;
699ddfd9 117 nvbo->bo.bdev = &dev_priv->ttm.bdev;
6ee73861 118
bfd83aca 119 nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
6ee73861
BS
120 align >>= PAGE_SHIFT;
121
4c136142 122 if (!nvbo->no_vm && dev_priv->chan_vm) {
bfd83aca 123 ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
4c136142
BS
124 NV_MEM_ACCESS_RW, &nvbo->vma);
125 if (ret) {
126 kfree(nvbo);
127 return ret;
128 }
129 }
130
78ad0f7b 131 nouveau_bo_placement_set(nvbo, flags, 0);
6ee73861
BS
132
133 nvbo->channel = chan;
6ee73861
BS
134 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
135 ttm_bo_type_device, &nvbo->placement, align, 0,
136 false, NULL, size, nouveau_bo_del_ttm);
6ee73861
BS
137 if (ret) {
138 /* ttm will call nouveau_bo_del_ttm if it fails.. */
139 return ret;
140 }
90af89b9 141 nvbo->channel = NULL;
6ee73861 142
4c136142
BS
143 if (nvbo->vma.node) {
144 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
145 nvbo->bo.offset = nvbo->vma.offset;
146 }
147
6ee73861
BS
148 *pnvbo = nvbo;
149 return 0;
150}
151
78ad0f7b
FJ
152static void
153set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
154{
155 *n = 0;
156
157 if (type & TTM_PL_FLAG_VRAM)
158 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
159 if (type & TTM_PL_FLAG_TT)
160 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
161 if (type & TTM_PL_FLAG_SYSTEM)
162 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
163}
164
699ddfd9
FJ
165static void
166set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
167{
168 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
169
170 if (dev_priv->card_type == NV_10 &&
171 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) {
172 /*
173 * Make sure that the color and depth buffers are handled
174 * by independent memory controller units. Up to a 9x
175 * speed up when alpha-blending and depth-test are enabled
176 * at the same time.
177 */
178 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
179
180 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
181 nvbo->placement.fpfn = vram_pages / 2;
182 nvbo->placement.lpfn = ~0;
183 } else {
184 nvbo->placement.fpfn = 0;
185 nvbo->placement.lpfn = vram_pages / 2;
186 }
187 }
188}
189
6ee73861 190void
78ad0f7b 191nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
6ee73861 192{
78ad0f7b
FJ
193 struct ttm_placement *pl = &nvbo->placement;
194 uint32_t flags = TTM_PL_MASK_CACHING |
195 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
196
197 pl->placement = nvbo->placements;
198 set_placement_list(nvbo->placements, &pl->num_placement,
199 type, flags);
200
201 pl->busy_placement = nvbo->busy_placements;
202 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
203 type | busy, flags);
699ddfd9
FJ
204
205 set_placement_range(nvbo, type);
6ee73861
BS
206}
207
208int
209nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
210{
211 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
212 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 213 int ret;
6ee73861
BS
214
215 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
216 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
217 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
218 1 << bo->mem.mem_type, memtype);
219 return -EINVAL;
220 }
221
222 if (nvbo->pin_refcnt++)
223 return 0;
224
225 ret = ttm_bo_reserve(bo, false, false, false, 0);
226 if (ret)
227 goto out;
228
78ad0f7b 229 nouveau_bo_placement_set(nvbo, memtype, 0);
6ee73861 230
7a45d764 231 ret = nouveau_bo_validate(nvbo, false, false, false);
6ee73861
BS
232 if (ret == 0) {
233 switch (bo->mem.mem_type) {
234 case TTM_PL_VRAM:
235 dev_priv->fb_aper_free -= bo->mem.size;
236 break;
237 case TTM_PL_TT:
238 dev_priv->gart_info.aper_free -= bo->mem.size;
239 break;
240 default:
241 break;
242 }
243 }
244 ttm_bo_unreserve(bo);
245out:
246 if (unlikely(ret))
247 nvbo->pin_refcnt--;
248 return ret;
249}
250
251int
252nouveau_bo_unpin(struct nouveau_bo *nvbo)
253{
254 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
255 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 256 int ret;
6ee73861
BS
257
258 if (--nvbo->pin_refcnt)
259 return 0;
260
261 ret = ttm_bo_reserve(bo, false, false, false, 0);
262 if (ret)
263 return ret;
264
78ad0f7b 265 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
6ee73861 266
7a45d764 267 ret = nouveau_bo_validate(nvbo, false, false, false);
6ee73861
BS
268 if (ret == 0) {
269 switch (bo->mem.mem_type) {
270 case TTM_PL_VRAM:
271 dev_priv->fb_aper_free += bo->mem.size;
272 break;
273 case TTM_PL_TT:
274 dev_priv->gart_info.aper_free += bo->mem.size;
275 break;
276 default:
277 break;
278 }
279 }
280
281 ttm_bo_unreserve(bo);
282 return ret;
283}
284
285int
286nouveau_bo_map(struct nouveau_bo *nvbo)
287{
288 int ret;
289
290 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
291 if (ret)
292 return ret;
293
294 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
295 ttm_bo_unreserve(&nvbo->bo);
296 return ret;
297}
298
299void
300nouveau_bo_unmap(struct nouveau_bo *nvbo)
301{
9d59e8a1
BS
302 if (nvbo)
303 ttm_bo_kunmap(&nvbo->kmap);
6ee73861
BS
304}
305
7a45d764
BS
306int
307nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
308 bool no_wait_reserve, bool no_wait_gpu)
309{
310 int ret;
311
312 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
313 no_wait_reserve, no_wait_gpu);
314 if (ret)
315 return ret;
316
4c136142
BS
317 if (nvbo->vma.node) {
318 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
319 nvbo->bo.offset = nvbo->vma.offset;
320 }
321
7a45d764
BS
322 return 0;
323}
324
6ee73861
BS
325u16
326nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
327{
328 bool is_iomem;
329 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
330 mem = &mem[index];
331 if (is_iomem)
332 return ioread16_native((void __force __iomem *)mem);
333 else
334 return *mem;
335}
336
337void
338nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
339{
340 bool is_iomem;
341 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
342 mem = &mem[index];
343 if (is_iomem)
344 iowrite16_native(val, (void __force __iomem *)mem);
345 else
346 *mem = val;
347}
348
349u32
350nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
351{
352 bool is_iomem;
353 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
354 mem = &mem[index];
355 if (is_iomem)
356 return ioread32_native((void __force __iomem *)mem);
357 else
358 return *mem;
359}
360
361void
362nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
363{
364 bool is_iomem;
365 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
366 mem = &mem[index];
367 if (is_iomem)
368 iowrite32_native(val, (void __force __iomem *)mem);
369 else
370 *mem = val;
371}
372
373static struct ttm_backend *
374nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
375{
376 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
377 struct drm_device *dev = dev_priv->dev;
378
379 switch (dev_priv->gart_info.type) {
b694dfb2 380#if __OS_HAS_AGP
6ee73861
BS
381 case NOUVEAU_GART_AGP:
382 return ttm_agp_backend_init(bdev, dev->agp->bridge);
b694dfb2 383#endif
6ee73861
BS
384 case NOUVEAU_GART_SGDMA:
385 return nouveau_sgdma_init_ttm(dev);
386 default:
387 NV_ERROR(dev, "Unknown GART type %d\n",
388 dev_priv->gart_info.type);
389 break;
390 }
391
392 return NULL;
393}
394
395static int
396nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
397{
398 /* We'll do this from user space. */
399 return 0;
400}
401
402static int
403nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
404 struct ttm_mem_type_manager *man)
405{
406 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
407 struct drm_device *dev = dev_priv->dev;
408
409 switch (type) {
410 case TTM_PL_SYSTEM:
411 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
412 man->available_caching = TTM_PL_MASK_CACHING;
413 man->default_caching = TTM_PL_FLAG_CACHED;
414 break;
415 case TTM_PL_VRAM:
8984e046 416 if (dev_priv->card_type >= NV_50) {
573a2a37 417 man->func = &nouveau_vram_manager;
f869ef88
BS
418 man->io_reserve_fastpath = false;
419 man->use_io_reserve_lru = true;
420 } else {
573a2a37 421 man->func = &ttm_bo_manager_func;
f869ef88 422 }
6ee73861 423 man->flags = TTM_MEMTYPE_FLAG_FIXED |
f32f02fd 424 TTM_MEMTYPE_FLAG_MAPPABLE;
6ee73861
BS
425 man->available_caching = TTM_PL_FLAG_UNCACHED |
426 TTM_PL_FLAG_WC;
427 man->default_caching = TTM_PL_FLAG_WC;
6ee73861
BS
428 break;
429 case TTM_PL_TT:
d961db75 430 man->func = &ttm_bo_manager_func;
6ee73861
BS
431 switch (dev_priv->gart_info.type) {
432 case NOUVEAU_GART_AGP:
f32f02fd 433 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
a3d487ea
FJ
434 man->available_caching = TTM_PL_FLAG_UNCACHED |
435 TTM_PL_FLAG_WC;
436 man->default_caching = TTM_PL_FLAG_WC;
6ee73861
BS
437 break;
438 case NOUVEAU_GART_SGDMA:
439 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
440 TTM_MEMTYPE_FLAG_CMA;
441 man->available_caching = TTM_PL_MASK_CACHING;
442 man->default_caching = TTM_PL_FLAG_CACHED;
b571fe21 443 man->gpu_offset = dev_priv->gart_info.aper_base;
6ee73861
BS
444 break;
445 default:
446 NV_ERROR(dev, "Unknown GART type: %d\n",
447 dev_priv->gart_info.type);
448 return -EINVAL;
449 }
6ee73861
BS
450 break;
451 default:
452 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
453 return -EINVAL;
454 }
455 return 0;
456}
457
458static void
459nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
460{
461 struct nouveau_bo *nvbo = nouveau_bo(bo);
462
463 switch (bo->mem.mem_type) {
22fbd538 464 case TTM_PL_VRAM:
78ad0f7b
FJ
465 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
466 TTM_PL_FLAG_SYSTEM);
22fbd538 467 break;
6ee73861 468 default:
78ad0f7b 469 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
6ee73861
BS
470 break;
471 }
22fbd538
FJ
472
473 *pl = nvbo->placement;
6ee73861
BS
474}
475
476
477/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
478 * TTM_PL_{VRAM,TT} directly.
479 */
a0af9add 480
6ee73861
BS
481static int
482nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
9d87fa21
JG
483 struct nouveau_bo *nvbo, bool evict,
484 bool no_wait_reserve, bool no_wait_gpu,
6ee73861
BS
485 struct ttm_mem_reg *new_mem)
486{
487 struct nouveau_fence *fence = NULL;
488 int ret;
489
490 ret = nouveau_fence_new(chan, &fence, true);
491 if (ret)
492 return ret;
493
64798817 494 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
311ab694 495 no_wait_reserve, no_wait_gpu, new_mem);
382d62e5 496 nouveau_fence_unref(&fence);
6ee73861
BS
497 return ret;
498}
499
500static inline uint32_t
f1ab0cc9
BS
501nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
502 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
6ee73861 503{
f1ab0cc9
BS
504 struct nouveau_bo *nvbo = nouveau_bo(bo);
505
506 if (nvbo->no_vm) {
6ee73861
BS
507 if (mem->mem_type == TTM_PL_TT)
508 return NvDmaGART;
509 return NvDmaVRAM;
510 }
511
512 if (mem->mem_type == TTM_PL_TT)
513 return chan->gart_handle;
514 return chan->vram_handle;
515}
516
517static int
f1ab0cc9
BS
518nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
519 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
6ee73861 520{
6ee73861 521 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
f1ab0cc9
BS
522 struct nouveau_bo *nvbo = nouveau_bo(bo);
523 u64 length = (new_mem->num_pages << PAGE_SHIFT);
524 u64 src_offset, dst_offset;
6ee73861
BS
525 int ret;
526
d961db75
BS
527 src_offset = old_mem->start << PAGE_SHIFT;
528 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
529 if (!nvbo->no_vm) {
530 if (old_mem->mem_type == TTM_PL_VRAM)
4c136142 531 src_offset = nvbo->vma.offset;
6ee73861 532 else
b571fe21 533 src_offset += dev_priv->gart_info.aper_base;
f1ab0cc9
BS
534
535 if (new_mem->mem_type == TTM_PL_VRAM)
4c136142 536 dst_offset = nvbo->vma.offset;
f1ab0cc9 537 else
b571fe21 538 dst_offset += dev_priv->gart_info.aper_base;
6ee73861
BS
539 }
540
541 ret = RING_SPACE(chan, 3);
542 if (ret)
543 return ret;
6ee73861 544
f1ab0cc9
BS
545 BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
546 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
547 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
548
549 while (length) {
550 u32 amount, stride, height;
551
5220b3c1
BS
552 amount = min(length, (u64)(4 * 1024 * 1024));
553 stride = 16 * 4;
f1ab0cc9
BS
554 height = amount / stride;
555
f13b3263
FJ
556 if (new_mem->mem_type == TTM_PL_VRAM &&
557 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
558 ret = RING_SPACE(chan, 8);
559 if (ret)
560 return ret;
561
562 BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
563 OUT_RING (chan, 0);
5220b3c1 564 OUT_RING (chan, 0);
f1ab0cc9
BS
565 OUT_RING (chan, stride);
566 OUT_RING (chan, height);
567 OUT_RING (chan, 1);
568 OUT_RING (chan, 0);
569 OUT_RING (chan, 0);
570 } else {
571 ret = RING_SPACE(chan, 2);
572 if (ret)
573 return ret;
574
575 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
576 OUT_RING (chan, 1);
577 }
f13b3263
FJ
578 if (old_mem->mem_type == TTM_PL_VRAM &&
579 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
580 ret = RING_SPACE(chan, 8);
581 if (ret)
582 return ret;
583
584 BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
585 OUT_RING (chan, 0);
5220b3c1 586 OUT_RING (chan, 0);
f1ab0cc9
BS
587 OUT_RING (chan, stride);
588 OUT_RING (chan, height);
589 OUT_RING (chan, 1);
590 OUT_RING (chan, 0);
591 OUT_RING (chan, 0);
592 } else {
593 ret = RING_SPACE(chan, 2);
594 if (ret)
595 return ret;
596
597 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
598 OUT_RING (chan, 1);
599 }
600
601 ret = RING_SPACE(chan, 14);
6ee73861
BS
602 if (ret)
603 return ret;
f1ab0cc9
BS
604
605 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
606 OUT_RING (chan, upper_32_bits(src_offset));
607 OUT_RING (chan, upper_32_bits(dst_offset));
608 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
609 OUT_RING (chan, lower_32_bits(src_offset));
610 OUT_RING (chan, lower_32_bits(dst_offset));
611 OUT_RING (chan, stride);
612 OUT_RING (chan, stride);
613 OUT_RING (chan, stride);
614 OUT_RING (chan, height);
615 OUT_RING (chan, 0x00000101);
616 OUT_RING (chan, 0x00000000);
617 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
618 OUT_RING (chan, 0);
619
620 length -= amount;
621 src_offset += amount;
622 dst_offset += amount;
6ee73861
BS
623 }
624
f1ab0cc9
BS
625 return 0;
626}
627
628static int
629nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
630 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
631{
d961db75
BS
632 u32 src_offset = old_mem->start << PAGE_SHIFT;
633 u32 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
634 u32 page_count = new_mem->num_pages;
635 int ret;
636
637 ret = RING_SPACE(chan, 3);
638 if (ret)
639 return ret;
640
641 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
642 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
643 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
644
6ee73861
BS
645 page_count = new_mem->num_pages;
646 while (page_count) {
647 int line_count = (page_count > 2047) ? 2047 : page_count;
648
6ee73861
BS
649 ret = RING_SPACE(chan, 11);
650 if (ret)
651 return ret;
f1ab0cc9 652
6ee73861
BS
653 BEGIN_RING(chan, NvSubM2MF,
654 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
f1ab0cc9
BS
655 OUT_RING (chan, src_offset);
656 OUT_RING (chan, dst_offset);
657 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
658 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
659 OUT_RING (chan, PAGE_SIZE); /* line_length */
660 OUT_RING (chan, line_count);
661 OUT_RING (chan, 0x00000101);
662 OUT_RING (chan, 0x00000000);
6ee73861 663 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9 664 OUT_RING (chan, 0);
6ee73861
BS
665
666 page_count -= line_count;
667 src_offset += (PAGE_SIZE * line_count);
668 dst_offset += (PAGE_SIZE * line_count);
669 }
670
f1ab0cc9
BS
671 return 0;
672}
673
674static int
675nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
676 bool no_wait_reserve, bool no_wait_gpu,
677 struct ttm_mem_reg *new_mem)
678{
679 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
680 struct nouveau_bo *nvbo = nouveau_bo(bo);
681 struct nouveau_channel *chan;
682 int ret;
683
684 chan = nvbo->channel;
6a6b73f2 685 if (!chan || nvbo->no_vm) {
f1ab0cc9 686 chan = dev_priv->channel;
e419cf09 687 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
6a6b73f2 688 }
f1ab0cc9
BS
689
690 if (dev_priv->card_type < NV_50)
691 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
692 else
693 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
6a6b73f2
BS
694 if (ret == 0) {
695 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
696 no_wait_reserve,
697 no_wait_gpu, new_mem);
698 }
f1ab0cc9 699
6a6b73f2
BS
700 if (chan == dev_priv->channel)
701 mutex_unlock(&chan->mutex);
702 return ret;
6ee73861
BS
703}
704
705static int
706nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
707 bool no_wait_reserve, bool no_wait_gpu,
708 struct ttm_mem_reg *new_mem)
6ee73861
BS
709{
710 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
711 struct ttm_placement placement;
712 struct ttm_mem_reg tmp_mem;
713 int ret;
714
715 placement.fpfn = placement.lpfn = 0;
716 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 717 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
718
719 tmp_mem = *new_mem;
720 tmp_mem.mm_node = NULL;
9d87fa21 721 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
6ee73861
BS
722 if (ret)
723 return ret;
724
725 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
726 if (ret)
727 goto out;
728
9d87fa21 729 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
6ee73861
BS
730 if (ret)
731 goto out;
732
9d87fa21 733 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861 734out:
42311ff9 735 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
736 return ret;
737}
738
739static int
740nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
741 bool no_wait_reserve, bool no_wait_gpu,
742 struct ttm_mem_reg *new_mem)
6ee73861
BS
743{
744 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
745 struct ttm_placement placement;
746 struct ttm_mem_reg tmp_mem;
747 int ret;
748
749 placement.fpfn = placement.lpfn = 0;
750 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 751 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
752
753 tmp_mem = *new_mem;
754 tmp_mem.mm_node = NULL;
9d87fa21 755 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
6ee73861
BS
756 if (ret)
757 return ret;
758
9d87fa21 759 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
6ee73861
BS
760 if (ret)
761 goto out;
762
9d87fa21 763 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861
BS
764 if (ret)
765 goto out;
766
767out:
42311ff9 768 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
769 return ret;
770}
771
772static int
a0af9add
FJ
773nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
774 struct nouveau_tile_reg **new_tile)
6ee73861
BS
775{
776 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
6ee73861 777 struct drm_device *dev = dev_priv->dev;
a0af9add
FJ
778 struct nouveau_bo *nvbo = nouveau_bo(bo);
779 uint64_t offset;
6ee73861 780
a0af9add
FJ
781 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
782 /* Nothing to do. */
783 *new_tile = NULL;
784 return 0;
785 }
786
d961db75 787 offset = new_mem->start << PAGE_SHIFT;
6ee73861 788
4c136142
BS
789 if (dev_priv->chan_vm) {
790 nouveau_vm_map(&nvbo->vma, new_mem->mm_node);
a0af9add
FJ
791 } else if (dev_priv->card_type >= NV_10) {
792 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
a5cf68b0
FJ
793 nvbo->tile_mode,
794 nvbo->tile_flags);
6ee73861
BS
795 }
796
a0af9add
FJ
797 return 0;
798}
799
800static void
801nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
802 struct nouveau_tile_reg *new_tile,
803 struct nouveau_tile_reg **old_tile)
804{
805 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
806 struct drm_device *dev = dev_priv->dev;
807
808 if (dev_priv->card_type >= NV_10 &&
809 dev_priv->card_type < NV_50) {
a5cf68b0 810 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
a0af9add
FJ
811 *old_tile = new_tile;
812 }
813}
814
815static int
816nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
817 bool no_wait_reserve, bool no_wait_gpu,
818 struct ttm_mem_reg *new_mem)
a0af9add
FJ
819{
820 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
821 struct nouveau_bo *nvbo = nouveau_bo(bo);
822 struct ttm_mem_reg *old_mem = &bo->mem;
823 struct nouveau_tile_reg *new_tile = NULL;
824 int ret = 0;
825
826 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
827 if (ret)
828 return ret;
829
a0af9add 830 /* Fake bo copy. */
6ee73861
BS
831 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
832 BUG_ON(bo->mem.mm_node != NULL);
833 bo->mem = *new_mem;
834 new_mem->mm_node = NULL;
a0af9add 835 goto out;
6ee73861
BS
836 }
837
b8a6a804 838 /* Software copy if the card isn't up and running yet. */
966a5b7d 839 if (!dev_priv->channel || dev_priv->card_type == NV_C0) {
b8a6a804
BS
840 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
841 goto out;
842 }
843
a0af9add
FJ
844 /* Hardware assisted copy. */
845 if (new_mem->mem_type == TTM_PL_SYSTEM)
9d87fa21 846 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add 847 else if (old_mem->mem_type == TTM_PL_SYSTEM)
9d87fa21 848 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add 849 else
9d87fa21 850 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861 851
a0af9add
FJ
852 if (!ret)
853 goto out;
854
855 /* Fallback to software copy. */
9d87fa21 856 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add
FJ
857
858out:
859 if (ret)
860 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
861 else
862 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
863
864 return ret;
6ee73861
BS
865}
866
867static int
868nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
869{
870 return 0;
871}
872
f32f02fd
JG
873static int
874nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
875{
876 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
877 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
878 struct drm_device *dev = dev_priv->dev;
f869ef88 879 int ret;
f32f02fd
JG
880
881 mem->bus.addr = NULL;
882 mem->bus.offset = 0;
883 mem->bus.size = mem->num_pages << PAGE_SHIFT;
884 mem->bus.base = 0;
885 mem->bus.is_iomem = false;
886 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
887 return -EINVAL;
888 switch (mem->mem_type) {
889 case TTM_PL_SYSTEM:
890 /* System memory */
891 return 0;
892 case TTM_PL_TT:
893#if __OS_HAS_AGP
894 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
d961db75 895 mem->bus.offset = mem->start << PAGE_SHIFT;
f32f02fd
JG
896 mem->bus.base = dev_priv->gart_info.aper_base;
897 mem->bus.is_iomem = true;
898 }
899#endif
900 break;
901 case TTM_PL_VRAM:
f869ef88
BS
902 {
903 struct nouveau_vram *vram = mem->mm_node;
8984e046 904 u8 page_shift;
f869ef88
BS
905
906 if (!dev_priv->bar1_vm) {
907 mem->bus.offset = mem->start << PAGE_SHIFT;
908 mem->bus.base = pci_resource_start(dev->pdev, 1);
909 mem->bus.is_iomem = true;
910 break;
911 }
912
8984e046
BS
913 if (dev_priv->card_type == NV_C0)
914 page_shift = vram->page_shift;
915 else
916 page_shift = 12;
917
4c74eb7f 918 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
8984e046 919 page_shift, NV_MEM_ACCESS_RW,
4c74eb7f 920 &vram->bar_vma);
f869ef88
BS
921 if (ret)
922 return ret;
923
924 nouveau_vm_map(&vram->bar_vma, vram);
925 if (ret) {
926 nouveau_vm_put(&vram->bar_vma);
927 return ret;
928 }
929
8984e046
BS
930 mem->bus.offset = vram->bar_vma.offset;
931 if (dev_priv->card_type == NV_50) /*XXX*/
932 mem->bus.offset -= 0x0020000000ULL;
01d73a69 933 mem->bus.base = pci_resource_start(dev->pdev, 1);
f32f02fd 934 mem->bus.is_iomem = true;
f869ef88 935 }
f32f02fd
JG
936 break;
937 default:
938 return -EINVAL;
939 }
940 return 0;
941}
942
943static void
944nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
945{
f869ef88
BS
946 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
947 struct nouveau_vram *vram = mem->mm_node;
948
949 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
950 return;
951
952 if (!vram->bar_vma.node)
953 return;
954
955 nouveau_vm_unmap(&vram->bar_vma);
956 nouveau_vm_put(&vram->bar_vma);
f32f02fd
JG
957}
958
959static int
960nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
961{
e1429b4c
BS
962 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
963 struct nouveau_bo *nvbo = nouveau_bo(bo);
964
965 /* as long as the bo isn't in vram, and isn't tiled, we've got
966 * nothing to do here.
967 */
968 if (bo->mem.mem_type != TTM_PL_VRAM) {
f13b3263
FJ
969 if (dev_priv->card_type < NV_50 ||
970 !nouveau_bo_tile_layout(nvbo))
e1429b4c
BS
971 return 0;
972 }
973
974 /* make sure bo is in mappable vram */
d961db75 975 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
e1429b4c
BS
976 return 0;
977
978
979 nvbo->placement.fpfn = 0;
980 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
981 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
7a45d764 982 return nouveau_bo_validate(nvbo, false, true, false);
f32f02fd
JG
983}
984
332b242f
FJ
985void
986nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
987{
23c45e8e 988 struct nouveau_fence *old_fence;
332b242f
FJ
989
990 if (likely(fence))
23c45e8e 991 nouveau_fence_ref(fence);
332b242f 992
23c45e8e
FJ
993 spin_lock(&nvbo->bo.bdev->fence_lock);
994 old_fence = nvbo->bo.sync_obj;
995 nvbo->bo.sync_obj = fence;
332b242f 996 spin_unlock(&nvbo->bo.bdev->fence_lock);
23c45e8e
FJ
997
998 nouveau_fence_unref(&old_fence);
332b242f
FJ
999}
1000
6ee73861
BS
1001struct ttm_bo_driver nouveau_bo_driver = {
1002 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
1003 .invalidate_caches = nouveau_bo_invalidate_caches,
1004 .init_mem_type = nouveau_bo_init_mem_type,
1005 .evict_flags = nouveau_bo_evict_flags,
1006 .move = nouveau_bo_move,
1007 .verify_access = nouveau_bo_verify_access,
382d62e5
MS
1008 .sync_obj_signaled = __nouveau_fence_signalled,
1009 .sync_obj_wait = __nouveau_fence_wait,
1010 .sync_obj_flush = __nouveau_fence_flush,
1011 .sync_obj_unref = __nouveau_fence_unref,
1012 .sync_obj_ref = __nouveau_fence_ref,
f32f02fd
JG
1013 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1014 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1015 .io_mem_free = &nouveau_ttm_io_mem_free,
6ee73861
BS
1016};
1017
This page took 0.111201 seconds and 5 git commands to generate.