drm/radeon: add VRAM debugfs access v3
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_ttm.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <ttm/ttm_bo_api.h>
33#include <ttm/ttm_bo_driver.h>
34#include <ttm/ttm_placement.h>
35#include <ttm/ttm_module.h>
8d7cddcd 36#include <ttm/ttm_page_alloc.h>
771fe6b9
JG
37#include <drm/drmP.h>
38#include <drm/radeon_drm.h>
fa8a1238 39#include <linux/seq_file.h>
5a0e3ad6 40#include <linux/slab.h>
4cfe7629 41#include <linux/swiotlb.h>
2014b569 42#include <linux/debugfs.h>
771fe6b9
JG
43#include "radeon_reg.h"
44#include "radeon.h"
45
46#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
47
fa8a1238 48static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
2014b569 49static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
fa8a1238 50
771fe6b9
JG
51static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
52{
53 struct radeon_mman *mman;
54 struct radeon_device *rdev;
55
56 mman = container_of(bdev, struct radeon_mman, bdev);
57 rdev = container_of(mman, struct radeon_device, mman);
58 return rdev;
59}
60
61
62/*
63 * Global memory.
64 */
ba4420c2 65static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
771fe6b9
JG
66{
67 return ttm_mem_global_init(ref->object);
68}
69
ba4420c2 70static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
771fe6b9
JG
71{
72 ttm_mem_global_release(ref->object);
73}
74
75static int radeon_ttm_global_init(struct radeon_device *rdev)
76{
ba4420c2 77 struct drm_global_reference *global_ref;
771fe6b9
JG
78 int r;
79
80 rdev->mman.mem_global_referenced = false;
81 global_ref = &rdev->mman.mem_global_ref;
ba4420c2 82 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
771fe6b9
JG
83 global_ref->size = sizeof(struct ttm_mem_global);
84 global_ref->init = &radeon_ttm_mem_global_init;
85 global_ref->release = &radeon_ttm_mem_global_release;
ba4420c2 86 r = drm_global_item_ref(global_ref);
771fe6b9 87 if (r != 0) {
a987fcaa
TH
88 DRM_ERROR("Failed setting up TTM memory accounting "
89 "subsystem.\n");
771fe6b9
JG
90 return r;
91 }
a987fcaa
TH
92
93 rdev->mman.bo_global_ref.mem_glob =
94 rdev->mman.mem_global_ref.object;
95 global_ref = &rdev->mman.bo_global_ref.ref;
ba4420c2 96 global_ref->global_type = DRM_GLOBAL_TTM_BO;
7f5f4db2 97 global_ref->size = sizeof(struct ttm_bo_global);
a987fcaa
TH
98 global_ref->init = &ttm_bo_global_init;
99 global_ref->release = &ttm_bo_global_release;
ba4420c2 100 r = drm_global_item_ref(global_ref);
a987fcaa
TH
101 if (r != 0) {
102 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
ba4420c2 103 drm_global_item_unref(&rdev->mman.mem_global_ref);
a987fcaa
TH
104 return r;
105 }
106
771fe6b9
JG
107 rdev->mman.mem_global_referenced = true;
108 return 0;
109}
110
111static void radeon_ttm_global_fini(struct radeon_device *rdev)
112{
113 if (rdev->mman.mem_global_referenced) {
ba4420c2
DA
114 drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
115 drm_global_item_unref(&rdev->mman.mem_global_ref);
771fe6b9
JG
116 rdev->mman.mem_global_referenced = false;
117 }
118}
119
771fe6b9
JG
120static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
121{
122 return 0;
123}
124
125static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
126 struct ttm_mem_type_manager *man)
127{
128 struct radeon_device *rdev;
129
130 rdev = radeon_get_rdev(bdev);
131
132 switch (type) {
133 case TTM_PL_SYSTEM:
134 /* System memory */
135 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
136 man->available_caching = TTM_PL_MASK_CACHING;
137 man->default_caching = TTM_PL_FLAG_CACHED;
138 break;
139 case TTM_PL_TT:
d961db75 140 man->func = &ttm_bo_manager_func;
d594e46a 141 man->gpu_offset = rdev->mc.gtt_start;
771fe6b9
JG
142 man->available_caching = TTM_PL_MASK_CACHING;
143 man->default_caching = TTM_PL_FLAG_CACHED;
55c93278 144 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
771fe6b9
JG
145#if __OS_HAS_AGP
146 if (rdev->flags & RADEON_IS_AGP) {
d9906753 147 if (!rdev->ddev->agp) {
771fe6b9
JG
148 DRM_ERROR("AGP is not enabled for memory type %u\n",
149 (unsigned)type);
150 return -EINVAL;
151 }
55c93278 152 if (!rdev->ddev->agp->cant_use_aperture)
0a2d50e3 153 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
771fe6b9
JG
154 man->available_caching = TTM_PL_FLAG_UNCACHED |
155 TTM_PL_FLAG_WC;
156 man->default_caching = TTM_PL_FLAG_WC;
771fe6b9 157 }
0c321c79 158#endif
771fe6b9
JG
159 break;
160 case TTM_PL_VRAM:
161 /* "On-card" video ram */
d961db75 162 man->func = &ttm_bo_manager_func;
d594e46a 163 man->gpu_offset = rdev->mc.vram_start;
771fe6b9 164 man->flags = TTM_MEMTYPE_FLAG_FIXED |
771fe6b9
JG
165 TTM_MEMTYPE_FLAG_MAPPABLE;
166 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
167 man->default_caching = TTM_PL_FLAG_WC;
771fe6b9
JG
168 break;
169 default:
170 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
171 return -EINVAL;
172 }
173 return 0;
174}
175
312ea8da
JG
176static void radeon_evict_flags(struct ttm_buffer_object *bo,
177 struct ttm_placement *placement)
771fe6b9 178{
d03d8589
JG
179 struct radeon_bo *rbo;
180 static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
181
182 if (!radeon_ttm_bo_is_radeon_bo(bo)) {
183 placement->fpfn = 0;
184 placement->lpfn = 0;
185 placement->placement = &placements;
186 placement->busy_placement = &placements;
187 placement->num_placement = 1;
188 placement->num_busy_placement = 1;
189 return;
190 }
191 rbo = container_of(bo, struct radeon_bo, tbo);
771fe6b9 192 switch (bo->mem.mem_type) {
312ea8da 193 case TTM_PL_VRAM:
e32eb50d 194 if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
9270eb1b
DA
195 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
196 else
197 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
312ea8da
JG
198 break;
199 case TTM_PL_TT:
771fe6b9 200 default:
312ea8da 201 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
771fe6b9 202 }
eaa5fd1a 203 *placement = rbo->placement;
771fe6b9
JG
204}
205
206static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
207{
acb46527
DH
208 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
209
210 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
771fe6b9
JG
211}
212
213static void radeon_move_null(struct ttm_buffer_object *bo,
214 struct ttm_mem_reg *new_mem)
215{
216 struct ttm_mem_reg *old_mem = &bo->mem;
217
218 BUG_ON(old_mem->mm_node != NULL);
219 *old_mem = *new_mem;
220 new_mem->mm_node = NULL;
221}
222
223static int radeon_move_blit(struct ttm_buffer_object *bo,
97a875cb 224 bool evict, bool no_wait_gpu,
9d87fa21
JG
225 struct ttm_mem_reg *new_mem,
226 struct ttm_mem_reg *old_mem)
771fe6b9
JG
227{
228 struct radeon_device *rdev;
229 uint64_t old_start, new_start;
876dc9f3 230 struct radeon_fence *fence;
876dc9f3 231 int r, ridx;
771fe6b9
JG
232
233 rdev = radeon_get_rdev(bo->bdev);
876dc9f3 234 ridx = radeon_copy_ring_index(rdev);
d961db75
BS
235 old_start = old_mem->start << PAGE_SHIFT;
236 new_start = new_mem->start << PAGE_SHIFT;
771fe6b9
JG
237
238 switch (old_mem->mem_type) {
239 case TTM_PL_VRAM:
d594e46a 240 old_start += rdev->mc.vram_start;
771fe6b9
JG
241 break;
242 case TTM_PL_TT:
d594e46a 243 old_start += rdev->mc.gtt_start;
771fe6b9
JG
244 break;
245 default:
246 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
247 return -EINVAL;
248 }
249 switch (new_mem->mem_type) {
250 case TTM_PL_VRAM:
d594e46a 251 new_start += rdev->mc.vram_start;
771fe6b9
JG
252 break;
253 case TTM_PL_TT:
d594e46a 254 new_start += rdev->mc.gtt_start;
771fe6b9
JG
255 break;
256 default:
257 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
258 return -EINVAL;
259 }
876dc9f3 260 if (!rdev->ring[ridx].ready) {
3000bf39 261 DRM_ERROR("Trying to move memory with ring turned off.\n");
771fe6b9
JG
262 return -EINVAL;
263 }
003cefe0
AD
264
265 BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
266
3000bf39 267 /* sync other rings */
876dc9f3 268 fence = bo->sync_obj;
003cefe0
AD
269 r = radeon_copy(rdev, old_start, new_start,
270 new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
876dc9f3 271 &fence);
771fe6b9 272 /* FIXME: handle copy error */
b03640b1 273 r = ttm_bo_move_accel_cleanup(bo, (void *)fence,
97a875cb 274 evict, no_wait_gpu, new_mem);
771fe6b9
JG
275 radeon_fence_unref(&fence);
276 return r;
277}
278
279static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
9d87fa21 280 bool evict, bool interruptible,
97a875cb 281 bool no_wait_gpu,
771fe6b9
JG
282 struct ttm_mem_reg *new_mem)
283{
284 struct radeon_device *rdev;
285 struct ttm_mem_reg *old_mem = &bo->mem;
286 struct ttm_mem_reg tmp_mem;
312ea8da
JG
287 u32 placements;
288 struct ttm_placement placement;
771fe6b9
JG
289 int r;
290
291 rdev = radeon_get_rdev(bo->bdev);
292 tmp_mem = *new_mem;
293 tmp_mem.mm_node = NULL;
312ea8da
JG
294 placement.fpfn = 0;
295 placement.lpfn = 0;
296 placement.num_placement = 1;
297 placement.placement = &placements;
298 placement.num_busy_placement = 1;
299 placement.busy_placement = &placements;
300 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
301 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
97a875cb 302 interruptible, no_wait_gpu);
771fe6b9
JG
303 if (unlikely(r)) {
304 return r;
305 }
df67bed9
DA
306
307 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
308 if (unlikely(r)) {
309 goto out_cleanup;
310 }
311
771fe6b9
JG
312 r = ttm_tt_bind(bo->ttm, &tmp_mem);
313 if (unlikely(r)) {
314 goto out_cleanup;
315 }
97a875cb 316 r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
771fe6b9
JG
317 if (unlikely(r)) {
318 goto out_cleanup;
319 }
97a875cb 320 r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
771fe6b9 321out_cleanup:
42311ff9 322 ttm_bo_mem_put(bo, &tmp_mem);
771fe6b9
JG
323 return r;
324}
325
326static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
9d87fa21 327 bool evict, bool interruptible,
97a875cb 328 bool no_wait_gpu,
771fe6b9
JG
329 struct ttm_mem_reg *new_mem)
330{
331 struct radeon_device *rdev;
332 struct ttm_mem_reg *old_mem = &bo->mem;
333 struct ttm_mem_reg tmp_mem;
312ea8da
JG
334 struct ttm_placement placement;
335 u32 placements;
771fe6b9
JG
336 int r;
337
338 rdev = radeon_get_rdev(bo->bdev);
339 tmp_mem = *new_mem;
340 tmp_mem.mm_node = NULL;
312ea8da
JG
341 placement.fpfn = 0;
342 placement.lpfn = 0;
343 placement.num_placement = 1;
344 placement.placement = &placements;
345 placement.num_busy_placement = 1;
346 placement.busy_placement = &placements;
347 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
97a875cb
ML
348 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
349 interruptible, no_wait_gpu);
771fe6b9
JG
350 if (unlikely(r)) {
351 return r;
352 }
97a875cb 353 r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
771fe6b9
JG
354 if (unlikely(r)) {
355 goto out_cleanup;
356 }
97a875cb 357 r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
771fe6b9
JG
358 if (unlikely(r)) {
359 goto out_cleanup;
360 }
361out_cleanup:
42311ff9 362 ttm_bo_mem_put(bo, &tmp_mem);
771fe6b9
JG
363 return r;
364}
365
366static int radeon_bo_move(struct ttm_buffer_object *bo,
9d87fa21 367 bool evict, bool interruptible,
97a875cb 368 bool no_wait_gpu,
9d87fa21 369 struct ttm_mem_reg *new_mem)
771fe6b9
JG
370{
371 struct radeon_device *rdev;
372 struct ttm_mem_reg *old_mem = &bo->mem;
373 int r;
374
375 rdev = radeon_get_rdev(bo->bdev);
376 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
377 radeon_move_null(bo, new_mem);
378 return 0;
379 }
380 if ((old_mem->mem_type == TTM_PL_TT &&
381 new_mem->mem_type == TTM_PL_SYSTEM) ||
382 (old_mem->mem_type == TTM_PL_SYSTEM &&
383 new_mem->mem_type == TTM_PL_TT)) {
af901ca1 384 /* bind is enough */
771fe6b9
JG
385 radeon_move_null(bo, new_mem);
386 return 0;
387 }
27cd7769
AD
388 if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
389 rdev->asic->copy.copy == NULL) {
771fe6b9 390 /* use memcpy */
1ab2e105 391 goto memcpy;
771fe6b9
JG
392 }
393
394 if (old_mem->mem_type == TTM_PL_VRAM &&
395 new_mem->mem_type == TTM_PL_SYSTEM) {
1ab2e105 396 r = radeon_move_vram_ram(bo, evict, interruptible,
97a875cb 397 no_wait_gpu, new_mem);
771fe6b9
JG
398 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
399 new_mem->mem_type == TTM_PL_VRAM) {
1ab2e105 400 r = radeon_move_ram_vram(bo, evict, interruptible,
97a875cb 401 no_wait_gpu, new_mem);
771fe6b9 402 } else {
97a875cb 403 r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
771fe6b9 404 }
1ab2e105
MD
405
406 if (r) {
407memcpy:
97a875cb 408 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1ab2e105 409 }
771fe6b9
JG
410 return r;
411}
412
0a2d50e3
JG
413static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
414{
415 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
416 struct radeon_device *rdev = radeon_get_rdev(bdev);
417
418 mem->bus.addr = NULL;
419 mem->bus.offset = 0;
420 mem->bus.size = mem->num_pages << PAGE_SHIFT;
421 mem->bus.base = 0;
422 mem->bus.is_iomem = false;
423 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
424 return -EINVAL;
425 switch (mem->mem_type) {
426 case TTM_PL_SYSTEM:
427 /* system memory */
428 return 0;
429 case TTM_PL_TT:
430#if __OS_HAS_AGP
431 if (rdev->flags & RADEON_IS_AGP) {
432 /* RADEON_IS_AGP is set only if AGP is active */
d961db75 433 mem->bus.offset = mem->start << PAGE_SHIFT;
0a2d50e3 434 mem->bus.base = rdev->mc.agp_base;
365048ff 435 mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
0a2d50e3
JG
436 }
437#endif
438 break;
439 case TTM_PL_VRAM:
d961db75 440 mem->bus.offset = mem->start << PAGE_SHIFT;
0a2d50e3
JG
441 /* check if it's visible */
442 if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
443 return -EINVAL;
444 mem->bus.base = rdev->mc.aper_base;
445 mem->bus.is_iomem = true;
ffb57c4b
JE
446#ifdef __alpha__
447 /*
448 * Alpha: use bus.addr to hold the ioremap() return,
449 * so we can modify bus.base below.
450 */
451 if (mem->placement & TTM_PL_FLAG_WC)
452 mem->bus.addr =
453 ioremap_wc(mem->bus.base + mem->bus.offset,
454 mem->bus.size);
455 else
456 mem->bus.addr =
457 ioremap_nocache(mem->bus.base + mem->bus.offset,
458 mem->bus.size);
459
460 /*
461 * Alpha: Use just the bus offset plus
462 * the hose/domain memory base for bus.base.
463 * It then can be used to build PTEs for VRAM
464 * access, as done in ttm_bo_vm_fault().
465 */
466 mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
467 rdev->ddev->hose->dense_mem_base;
468#endif
0a2d50e3
JG
469 break;
470 default:
471 return -EINVAL;
472 }
473 return 0;
474}
475
476static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
477{
478}
479
dedfdffd 480static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
771fe6b9
JG
481{
482 return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
483}
484
dedfdffd 485static int radeon_sync_obj_flush(void *sync_obj)
771fe6b9
JG
486{
487 return 0;
488}
489
490static void radeon_sync_obj_unref(void **sync_obj)
491{
492 radeon_fence_unref((struct radeon_fence **)sync_obj);
493}
494
495static void *radeon_sync_obj_ref(void *sync_obj)
496{
497 return radeon_fence_ref((struct radeon_fence *)sync_obj);
498}
499
dedfdffd 500static bool radeon_sync_obj_signaled(void *sync_obj)
771fe6b9
JG
501{
502 return radeon_fence_signaled((struct radeon_fence *)sync_obj);
503}
504
649bf3ca
JG
505/*
506 * TTM backend functions.
507 */
508struct radeon_ttm_tt {
8e7e7052 509 struct ttm_dma_tt ttm;
649bf3ca
JG
510 struct radeon_device *rdev;
511 u64 offset;
512};
513
514static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
515 struct ttm_mem_reg *bo_mem)
516{
8e7e7052 517 struct radeon_ttm_tt *gtt = (void*)ttm;
649bf3ca
JG
518 int r;
519
649bf3ca
JG
520 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
521 if (!ttm->num_pages) {
522 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
523 ttm->num_pages, bo_mem, ttm);
524 }
525 r = radeon_gart_bind(gtt->rdev, gtt->offset,
8e7e7052 526 ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
649bf3ca
JG
527 if (r) {
528 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
529 ttm->num_pages, (unsigned)gtt->offset);
530 return r;
531 }
532 return 0;
533}
534
535static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
536{
8e7e7052 537 struct radeon_ttm_tt *gtt = (void *)ttm;
649bf3ca 538
649bf3ca
JG
539 radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
540 return 0;
541}
542
543static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
544{
8e7e7052 545 struct radeon_ttm_tt *gtt = (void *)ttm;
649bf3ca 546
8e7e7052 547 ttm_dma_tt_fini(&gtt->ttm);
649bf3ca
JG
548 kfree(gtt);
549}
550
551static struct ttm_backend_func radeon_backend_func = {
552 .bind = &radeon_ttm_backend_bind,
553 .unbind = &radeon_ttm_backend_unbind,
554 .destroy = &radeon_ttm_backend_destroy,
555};
556
1109ca09 557static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
649bf3ca
JG
558 unsigned long size, uint32_t page_flags,
559 struct page *dummy_read_page)
560{
561 struct radeon_device *rdev;
562 struct radeon_ttm_tt *gtt;
563
564 rdev = radeon_get_rdev(bdev);
565#if __OS_HAS_AGP
566 if (rdev->flags & RADEON_IS_AGP) {
567 return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
568 size, page_flags, dummy_read_page);
569 }
570#endif
571
572 gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
573 if (gtt == NULL) {
574 return NULL;
575 }
8e7e7052 576 gtt->ttm.ttm.func = &radeon_backend_func;
649bf3ca 577 gtt->rdev = rdev;
8e7e7052
JG
578 if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
579 kfree(gtt);
649bf3ca
JG
580 return NULL;
581 }
8e7e7052 582 return &gtt->ttm.ttm;
649bf3ca
JG
583}
584
c52494f6
KRW
585static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
586{
587 struct radeon_device *rdev;
8e7e7052 588 struct radeon_ttm_tt *gtt = (void *)ttm;
c52494f6
KRW
589 unsigned i;
590 int r;
40f5cf99 591 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
c52494f6
KRW
592
593 if (ttm->state != tt_unpopulated)
594 return 0;
595
40f5cf99
AD
596 if (slave && ttm->sg) {
597 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
598 gtt->ttm.dma_address, ttm->num_pages);
599 ttm->state = tt_unbound;
600 return 0;
601 }
602
c52494f6 603 rdev = radeon_get_rdev(ttm->bdev);
dea7e0ac
JG
604#if __OS_HAS_AGP
605 if (rdev->flags & RADEON_IS_AGP) {
606 return ttm_agp_tt_populate(ttm);
607 }
608#endif
c52494f6
KRW
609
610#ifdef CONFIG_SWIOTLB
611 if (swiotlb_nr_tbl()) {
8e7e7052 612 return ttm_dma_populate(&gtt->ttm, rdev->dev);
c52494f6
KRW
613 }
614#endif
615
616 r = ttm_pool_populate(ttm);
617 if (r) {
618 return r;
619 }
620
621 for (i = 0; i < ttm->num_pages; i++) {
8e7e7052
JG
622 gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
623 0, PAGE_SIZE,
624 PCI_DMA_BIDIRECTIONAL);
625 if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
c52494f6 626 while (--i) {
8e7e7052 627 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
c52494f6 628 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
8e7e7052 629 gtt->ttm.dma_address[i] = 0;
c52494f6
KRW
630 }
631 ttm_pool_unpopulate(ttm);
632 return -EFAULT;
633 }
634 }
635 return 0;
636}
637
638static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
639{
640 struct radeon_device *rdev;
8e7e7052 641 struct radeon_ttm_tt *gtt = (void *)ttm;
c52494f6 642 unsigned i;
40f5cf99
AD
643 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
644
645 if (slave)
646 return;
c52494f6
KRW
647
648 rdev = radeon_get_rdev(ttm->bdev);
dea7e0ac
JG
649#if __OS_HAS_AGP
650 if (rdev->flags & RADEON_IS_AGP) {
651 ttm_agp_tt_unpopulate(ttm);
652 return;
653 }
654#endif
c52494f6
KRW
655
656#ifdef CONFIG_SWIOTLB
657 if (swiotlb_nr_tbl()) {
8e7e7052 658 ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
c52494f6
KRW
659 return;
660 }
661#endif
662
663 for (i = 0; i < ttm->num_pages; i++) {
8e7e7052
JG
664 if (gtt->ttm.dma_address[i]) {
665 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
c52494f6
KRW
666 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
667 }
668 }
669
670 ttm_pool_unpopulate(ttm);
671}
649bf3ca 672
771fe6b9 673static struct ttm_bo_driver radeon_bo_driver = {
649bf3ca 674 .ttm_tt_create = &radeon_ttm_tt_create,
c52494f6
KRW
675 .ttm_tt_populate = &radeon_ttm_tt_populate,
676 .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
771fe6b9
JG
677 .invalidate_caches = &radeon_invalidate_caches,
678 .init_mem_type = &radeon_init_mem_type,
679 .evict_flags = &radeon_evict_flags,
680 .move = &radeon_bo_move,
681 .verify_access = &radeon_verify_access,
682 .sync_obj_signaled = &radeon_sync_obj_signaled,
683 .sync_obj_wait = &radeon_sync_obj_wait,
684 .sync_obj_flush = &radeon_sync_obj_flush,
685 .sync_obj_unref = &radeon_sync_obj_unref,
686 .sync_obj_ref = &radeon_sync_obj_ref,
e024e110
DA
687 .move_notify = &radeon_bo_move_notify,
688 .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
0a2d50e3
JG
689 .io_mem_reserve = &radeon_ttm_io_mem_reserve,
690 .io_mem_free = &radeon_ttm_io_mem_free,
771fe6b9
JG
691};
692
693int radeon_ttm_init(struct radeon_device *rdev)
694{
695 int r;
696
697 r = radeon_ttm_global_init(rdev);
698 if (r) {
699 return r;
700 }
701 /* No others user of address space so set it to 0 */
702 r = ttm_bo_device_init(&rdev->mman.bdev,
a987fcaa 703 rdev->mman.bo_global_ref.ref.object,
ad49f501
DA
704 &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
705 rdev->need_dma32);
771fe6b9
JG
706 if (r) {
707 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
708 return r;
709 }
0a0c7596 710 rdev->mman.initialized = true;
4c788679 711 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
312ea8da 712 rdev->mc.real_vram_size >> PAGE_SHIFT);
771fe6b9
JG
713 if (r) {
714 DRM_ERROR("Failed initializing VRAM heap.\n");
715 return r;
716 }
441921d5 717 r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
40f5cf99
AD
718 RADEON_GEM_DOMAIN_VRAM,
719 NULL, &rdev->stollen_vga_memory);
771fe6b9
JG
720 if (r) {
721 return r;
722 }
4c788679
JG
723 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
724 if (r)
725 return r;
726 r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
727 radeon_bo_unreserve(rdev->stollen_vga_memory);
771fe6b9 728 if (r) {
4c788679 729 radeon_bo_unref(&rdev->stollen_vga_memory);
771fe6b9
JG
730 return r;
731 }
732 DRM_INFO("radeon: %uM of VRAM memory ready\n",
fc986034 733 (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
4c788679 734 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
312ea8da 735 rdev->mc.gtt_size >> PAGE_SHIFT);
771fe6b9
JG
736 if (r) {
737 DRM_ERROR("Failed initializing GTT heap.\n");
738 return r;
739 }
740 DRM_INFO("radeon: %uM of GTT memory ready.\n",
3ce0a23d 741 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
949c4a34 742 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
fa8a1238
DA
743
744 r = radeon_ttm_debugfs_init(rdev);
745 if (r) {
746 DRM_ERROR("Failed to init debugfs\n");
747 return r;
748 }
771fe6b9
JG
749 return 0;
750}
751
752void radeon_ttm_fini(struct radeon_device *rdev)
753{
4c788679
JG
754 int r;
755
0a0c7596
JG
756 if (!rdev->mman.initialized)
757 return;
2014b569 758 radeon_ttm_debugfs_fini(rdev);
771fe6b9 759 if (rdev->stollen_vga_memory) {
4c788679
JG
760 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
761 if (r == 0) {
762 radeon_bo_unpin(rdev->stollen_vga_memory);
763 radeon_bo_unreserve(rdev->stollen_vga_memory);
764 }
765 radeon_bo_unref(&rdev->stollen_vga_memory);
771fe6b9
JG
766 }
767 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
768 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
769 ttm_bo_device_release(&rdev->mman.bdev);
770 radeon_gart_fini(rdev);
771 radeon_ttm_global_fini(rdev);
0a0c7596 772 rdev->mman.initialized = false;
771fe6b9
JG
773 DRM_INFO("radeon: ttm finalized\n");
774}
775
53595338
DA
776/* this should only be called at bootup or when userspace
777 * isn't running */
778void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
779{
780 struct ttm_mem_type_manager *man;
781
782 if (!rdev->mman.initialized)
783 return;
784
785 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
786 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
787 man->size = size >> PAGE_SHIFT;
788}
789
771fe6b9 790static struct vm_operations_struct radeon_ttm_vm_ops;
f0f37e2f 791static const struct vm_operations_struct *ttm_vm_ops = NULL;
771fe6b9
JG
792
793static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
794{
795 struct ttm_buffer_object *bo;
5876dd24 796 struct radeon_device *rdev;
771fe6b9
JG
797 int r;
798
5876dd24 799 bo = (struct ttm_buffer_object *)vma->vm_private_data;
771fe6b9
JG
800 if (bo == NULL) {
801 return VM_FAULT_NOPAGE;
802 }
5876dd24 803 rdev = radeon_get_rdev(bo->bdev);
db7fce39 804 down_read(&rdev->pm.mclk_lock);
771fe6b9 805 r = ttm_vm_ops->fault(vma, vmf);
db7fce39 806 up_read(&rdev->pm.mclk_lock);
771fe6b9
JG
807 return r;
808}
809
810int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
811{
812 struct drm_file *file_priv;
813 struct radeon_device *rdev;
814 int r;
815
816 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
817 return drm_mmap(filp, vma);
818 }
819
40b3be3f 820 file_priv = filp->private_data;
771fe6b9
JG
821 rdev = file_priv->minor->dev->dev_private;
822 if (rdev == NULL) {
823 return -EINVAL;
824 }
825 r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
826 if (unlikely(r != 0)) {
827 return r;
828 }
829 if (unlikely(ttm_vm_ops == NULL)) {
830 ttm_vm_ops = vma->vm_ops;
831 radeon_ttm_vm_ops = *ttm_vm_ops;
832 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
833 }
834 vma->vm_ops = &radeon_ttm_vm_ops;
835 return 0;
836}
837
fa8a1238 838#if defined(CONFIG_DEBUG_FS)
893d6e6e 839
fa8a1238
DA
840static int radeon_mm_dump_table(struct seq_file *m, void *data)
841{
842 struct drm_info_node *node = (struct drm_info_node *)m->private;
893d6e6e 843 unsigned ttm_pl = *(int *)node->info_ent->data;
fa8a1238
DA
844 struct drm_device *dev = node->minor->dev;
845 struct radeon_device *rdev = dev->dev_private;
893d6e6e 846 struct drm_mm *mm = (struct drm_mm *)rdev->mman.bdev.man[ttm_pl].priv;
fa8a1238
DA
847 int ret;
848 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
849
850 spin_lock(&glob->lru_lock);
851 ret = drm_mm_dump_table(m, mm);
852 spin_unlock(&glob->lru_lock);
853 return ret;
854}
893d6e6e
CK
855
856static int ttm_pl_vram = TTM_PL_VRAM;
857static int ttm_pl_tt = TTM_PL_TT;
858
859static struct drm_info_list radeon_ttm_debugfs_list[] = {
860 {"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram},
861 {"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt},
862 {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
863#ifdef CONFIG_SWIOTLB
864 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
865#endif
866};
867
2014b569
CK
868static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
869{
870 struct radeon_device *rdev = inode->i_private;
871 i_size_write(inode, rdev->mc.mc_vram_size);
872 filep->private_data = inode->i_private;
873 return 0;
874}
875
876static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf,
877 size_t size, loff_t *pos)
878{
879 struct radeon_device *rdev = f->private_data;
880 ssize_t result = 0;
881 int r;
882
883 if (size & 0x3 || *pos & 0x3)
884 return -EINVAL;
885
886 while (size) {
887 unsigned long flags;
888 uint32_t value;
889
890 if (*pos >= rdev->mc.mc_vram_size)
891 return result;
892
893 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
894 WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000);
895 if (rdev->family >= CHIP_CEDAR)
896 WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31);
897 value = RREG32(RADEON_MM_DATA);
898 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
899
900 r = put_user(value, (uint32_t *)buf);
901 if (r)
902 return r;
903
904 result += 4;
905 buf += 4;
906 *pos += 4;
907 size -= 4;
908 }
909
910 return result;
911}
912
913static const struct file_operations radeon_ttm_vram_fops = {
914 .owner = THIS_MODULE,
915 .open = radeon_ttm_vram_open,
916 .read = radeon_ttm_vram_read,
917 .llseek = default_llseek
918};
919
fa8a1238
DA
920#endif
921
922static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
923{
f4e45d02 924#if defined(CONFIG_DEBUG_FS)
2014b569
CK
925 unsigned count;
926
927 struct drm_minor *minor = rdev->ddev->primary;
928 struct dentry *ent, *root = minor->debugfs_root;
929
930 ent = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO, root,
931 rdev, &radeon_ttm_vram_fops);
932 if (IS_ERR(ent))
933 return PTR_ERR(ent);
934 rdev->mman.vram = ent;
935
936 count = ARRAY_SIZE(radeon_ttm_debugfs_list);
fa8a1238 937
c52494f6 938#ifdef CONFIG_SWIOTLB
893d6e6e
CK
939 if (!swiotlb_nr_tbl())
940 --count;
c52494f6 941#endif
fa8a1238 942
893d6e6e
CK
943 return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count);
944#else
945
fa8a1238 946 return 0;
893d6e6e 947#endif
fa8a1238 948}
2014b569
CK
949
950static void radeon_ttm_debugfs_fini(struct radeon_device *rdev)
951{
952#if defined(CONFIG_DEBUG_FS)
953
954 debugfs_remove(rdev->mman.vram);
955 rdev->mman.vram = NULL;
956#endif
957}
This page took 0.338007 seconds and 5 git commands to generate.