Merge remote-tracking branch 'vfio/next'
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_ttm.c
1 /*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26 /*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32 #include <ttm/ttm_bo_api.h>
33 #include <ttm/ttm_bo_driver.h>
34 #include <ttm/ttm_placement.h>
35 #include <ttm/ttm_module.h>
36 #include <ttm/ttm_page_alloc.h>
37 #include <drm/drmP.h>
38 #include <drm/radeon_drm.h>
39 #include <linux/seq_file.h>
40 #include <linux/slab.h>
41 #include <linux/swiotlb.h>
42 #include <linux/swap.h>
43 #include <linux/pagemap.h>
44 #include <linux/debugfs.h>
45 #include "radeon_reg.h"
46 #include "radeon.h"
47
48 #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
49
50 static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
51 static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
52
53 static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
54 {
55 struct radeon_mman *mman;
56 struct radeon_device *rdev;
57
58 mman = container_of(bdev, struct radeon_mman, bdev);
59 rdev = container_of(mman, struct radeon_device, mman);
60 return rdev;
61 }
62
63
64 /*
65 * Global memory.
66 */
67 static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
68 {
69 return ttm_mem_global_init(ref->object);
70 }
71
72 static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
73 {
74 ttm_mem_global_release(ref->object);
75 }
76
77 static int radeon_ttm_global_init(struct radeon_device *rdev)
78 {
79 struct drm_global_reference *global_ref;
80 int r;
81
82 rdev->mman.mem_global_referenced = false;
83 global_ref = &rdev->mman.mem_global_ref;
84 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
85 global_ref->size = sizeof(struct ttm_mem_global);
86 global_ref->init = &radeon_ttm_mem_global_init;
87 global_ref->release = &radeon_ttm_mem_global_release;
88 r = drm_global_item_ref(global_ref);
89 if (r != 0) {
90 DRM_ERROR("Failed setting up TTM memory accounting "
91 "subsystem.\n");
92 return r;
93 }
94
95 rdev->mman.bo_global_ref.mem_glob =
96 rdev->mman.mem_global_ref.object;
97 global_ref = &rdev->mman.bo_global_ref.ref;
98 global_ref->global_type = DRM_GLOBAL_TTM_BO;
99 global_ref->size = sizeof(struct ttm_bo_global);
100 global_ref->init = &ttm_bo_global_init;
101 global_ref->release = &ttm_bo_global_release;
102 r = drm_global_item_ref(global_ref);
103 if (r != 0) {
104 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
105 drm_global_item_unref(&rdev->mman.mem_global_ref);
106 return r;
107 }
108
109 rdev->mman.mem_global_referenced = true;
110 return 0;
111 }
112
113 static void radeon_ttm_global_fini(struct radeon_device *rdev)
114 {
115 if (rdev->mman.mem_global_referenced) {
116 drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
117 drm_global_item_unref(&rdev->mman.mem_global_ref);
118 rdev->mman.mem_global_referenced = false;
119 }
120 }
121
122 static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
123 {
124 return 0;
125 }
126
127 static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
128 struct ttm_mem_type_manager *man)
129 {
130 struct radeon_device *rdev;
131
132 rdev = radeon_get_rdev(bdev);
133
134 switch (type) {
135 case TTM_PL_SYSTEM:
136 /* System memory */
137 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
138 man->available_caching = TTM_PL_MASK_CACHING;
139 man->default_caching = TTM_PL_FLAG_CACHED;
140 break;
141 case TTM_PL_TT:
142 man->func = &ttm_bo_manager_func;
143 man->gpu_offset = rdev->mc.gtt_start;
144 man->available_caching = TTM_PL_MASK_CACHING;
145 man->default_caching = TTM_PL_FLAG_CACHED;
146 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
147 #if IS_ENABLED(CONFIG_AGP)
148 if (rdev->flags & RADEON_IS_AGP) {
149 if (!rdev->ddev->agp) {
150 DRM_ERROR("AGP is not enabled for memory type %u\n",
151 (unsigned)type);
152 return -EINVAL;
153 }
154 if (!rdev->ddev->agp->cant_use_aperture)
155 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
156 man->available_caching = TTM_PL_FLAG_UNCACHED |
157 TTM_PL_FLAG_WC;
158 man->default_caching = TTM_PL_FLAG_WC;
159 }
160 #endif
161 break;
162 case TTM_PL_VRAM:
163 /* "On-card" video ram */
164 man->func = &ttm_bo_manager_func;
165 man->gpu_offset = rdev->mc.vram_start;
166 man->flags = TTM_MEMTYPE_FLAG_FIXED |
167 TTM_MEMTYPE_FLAG_MAPPABLE;
168 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
169 man->default_caching = TTM_PL_FLAG_WC;
170 break;
171 default:
172 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
173 return -EINVAL;
174 }
175 return 0;
176 }
177
178 static void radeon_evict_flags(struct ttm_buffer_object *bo,
179 struct ttm_placement *placement)
180 {
181 static struct ttm_place placements = {
182 .fpfn = 0,
183 .lpfn = 0,
184 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
185 };
186
187 struct radeon_bo *rbo;
188
189 if (!radeon_ttm_bo_is_radeon_bo(bo)) {
190 placement->placement = &placements;
191 placement->busy_placement = &placements;
192 placement->num_placement = 1;
193 placement->num_busy_placement = 1;
194 return;
195 }
196 rbo = container_of(bo, struct radeon_bo, tbo);
197 switch (bo->mem.mem_type) {
198 case TTM_PL_VRAM:
199 if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
200 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
201 else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
202 bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
203 unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
204 int i;
205
206 /* Try evicting to the CPU inaccessible part of VRAM
207 * first, but only set GTT as busy placement, so this
208 * BO will be evicted to GTT rather than causing other
209 * BOs to be evicted from VRAM
210 */
211 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
212 RADEON_GEM_DOMAIN_GTT);
213 rbo->placement.num_busy_placement = 0;
214 for (i = 0; i < rbo->placement.num_placement; i++) {
215 if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
216 if (rbo->placements[0].fpfn < fpfn)
217 rbo->placements[0].fpfn = fpfn;
218 } else {
219 rbo->placement.busy_placement =
220 &rbo->placements[i];
221 rbo->placement.num_busy_placement = 1;
222 }
223 }
224 } else
225 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
226 break;
227 case TTM_PL_TT:
228 default:
229 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
230 }
231 *placement = rbo->placement;
232 }
233
234 static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
235 {
236 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
237
238 if (radeon_ttm_tt_has_userptr(bo->ttm))
239 return -EPERM;
240 return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
241 }
242
243 static void radeon_move_null(struct ttm_buffer_object *bo,
244 struct ttm_mem_reg *new_mem)
245 {
246 struct ttm_mem_reg *old_mem = &bo->mem;
247
248 BUG_ON(old_mem->mm_node != NULL);
249 *old_mem = *new_mem;
250 new_mem->mm_node = NULL;
251 }
252
253 static int radeon_move_blit(struct ttm_buffer_object *bo,
254 bool evict, bool no_wait_gpu,
255 struct ttm_mem_reg *new_mem,
256 struct ttm_mem_reg *old_mem)
257 {
258 struct radeon_device *rdev;
259 uint64_t old_start, new_start;
260 struct radeon_fence *fence;
261 unsigned num_pages;
262 int r, ridx;
263
264 rdev = radeon_get_rdev(bo->bdev);
265 ridx = radeon_copy_ring_index(rdev);
266 old_start = (u64)old_mem->start << PAGE_SHIFT;
267 new_start = (u64)new_mem->start << PAGE_SHIFT;
268
269 switch (old_mem->mem_type) {
270 case TTM_PL_VRAM:
271 old_start += rdev->mc.vram_start;
272 break;
273 case TTM_PL_TT:
274 old_start += rdev->mc.gtt_start;
275 break;
276 default:
277 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
278 return -EINVAL;
279 }
280 switch (new_mem->mem_type) {
281 case TTM_PL_VRAM:
282 new_start += rdev->mc.vram_start;
283 break;
284 case TTM_PL_TT:
285 new_start += rdev->mc.gtt_start;
286 break;
287 default:
288 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
289 return -EINVAL;
290 }
291 if (!rdev->ring[ridx].ready) {
292 DRM_ERROR("Trying to move memory with ring turned off.\n");
293 return -EINVAL;
294 }
295
296 BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
297
298 num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
299 fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv);
300 if (IS_ERR(fence))
301 return PTR_ERR(fence);
302
303 r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem);
304 radeon_fence_unref(&fence);
305 return r;
306 }
307
308 static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
309 bool evict, bool interruptible,
310 bool no_wait_gpu,
311 struct ttm_mem_reg *new_mem)
312 {
313 struct radeon_device *rdev;
314 struct ttm_mem_reg *old_mem = &bo->mem;
315 struct ttm_mem_reg tmp_mem;
316 struct ttm_place placements;
317 struct ttm_placement placement;
318 int r;
319
320 rdev = radeon_get_rdev(bo->bdev);
321 tmp_mem = *new_mem;
322 tmp_mem.mm_node = NULL;
323 placement.num_placement = 1;
324 placement.placement = &placements;
325 placement.num_busy_placement = 1;
326 placement.busy_placement = &placements;
327 placements.fpfn = 0;
328 placements.lpfn = 0;
329 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
330 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
331 interruptible, no_wait_gpu);
332 if (unlikely(r)) {
333 return r;
334 }
335
336 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
337 if (unlikely(r)) {
338 goto out_cleanup;
339 }
340
341 r = ttm_tt_bind(bo->ttm, &tmp_mem);
342 if (unlikely(r)) {
343 goto out_cleanup;
344 }
345 r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
346 if (unlikely(r)) {
347 goto out_cleanup;
348 }
349 r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
350 out_cleanup:
351 ttm_bo_mem_put(bo, &tmp_mem);
352 return r;
353 }
354
355 static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
356 bool evict, bool interruptible,
357 bool no_wait_gpu,
358 struct ttm_mem_reg *new_mem)
359 {
360 struct radeon_device *rdev;
361 struct ttm_mem_reg *old_mem = &bo->mem;
362 struct ttm_mem_reg tmp_mem;
363 struct ttm_placement placement;
364 struct ttm_place placements;
365 int r;
366
367 rdev = radeon_get_rdev(bo->bdev);
368 tmp_mem = *new_mem;
369 tmp_mem.mm_node = NULL;
370 placement.num_placement = 1;
371 placement.placement = &placements;
372 placement.num_busy_placement = 1;
373 placement.busy_placement = &placements;
374 placements.fpfn = 0;
375 placements.lpfn = 0;
376 placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
377 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
378 interruptible, no_wait_gpu);
379 if (unlikely(r)) {
380 return r;
381 }
382 r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
383 if (unlikely(r)) {
384 goto out_cleanup;
385 }
386 r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
387 if (unlikely(r)) {
388 goto out_cleanup;
389 }
390 out_cleanup:
391 ttm_bo_mem_put(bo, &tmp_mem);
392 return r;
393 }
394
395 static int radeon_bo_move(struct ttm_buffer_object *bo,
396 bool evict, bool interruptible,
397 bool no_wait_gpu,
398 struct ttm_mem_reg *new_mem)
399 {
400 struct radeon_device *rdev;
401 struct radeon_bo *rbo;
402 struct ttm_mem_reg *old_mem = &bo->mem;
403 int r;
404
405 r = ttm_bo_wait(bo, interruptible, no_wait_gpu);
406 if (r)
407 return r;
408
409 /* Can't move a pinned BO */
410 rbo = container_of(bo, struct radeon_bo, tbo);
411 if (WARN_ON_ONCE(rbo->pin_count > 0))
412 return -EINVAL;
413
414 rdev = radeon_get_rdev(bo->bdev);
415 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
416 radeon_move_null(bo, new_mem);
417 return 0;
418 }
419 if ((old_mem->mem_type == TTM_PL_TT &&
420 new_mem->mem_type == TTM_PL_SYSTEM) ||
421 (old_mem->mem_type == TTM_PL_SYSTEM &&
422 new_mem->mem_type == TTM_PL_TT)) {
423 /* bind is enough */
424 radeon_move_null(bo, new_mem);
425 return 0;
426 }
427 if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
428 rdev->asic->copy.copy == NULL) {
429 /* use memcpy */
430 goto memcpy;
431 }
432
433 if (old_mem->mem_type == TTM_PL_VRAM &&
434 new_mem->mem_type == TTM_PL_SYSTEM) {
435 r = radeon_move_vram_ram(bo, evict, interruptible,
436 no_wait_gpu, new_mem);
437 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
438 new_mem->mem_type == TTM_PL_VRAM) {
439 r = radeon_move_ram_vram(bo, evict, interruptible,
440 no_wait_gpu, new_mem);
441 } else {
442 r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
443 }
444
445 if (r) {
446 memcpy:
447 r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
448 if (r) {
449 return r;
450 }
451 }
452
453 /* update statistics */
454 atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
455 return 0;
456 }
457
458 static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
459 {
460 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
461 struct radeon_device *rdev = radeon_get_rdev(bdev);
462
463 mem->bus.addr = NULL;
464 mem->bus.offset = 0;
465 mem->bus.size = mem->num_pages << PAGE_SHIFT;
466 mem->bus.base = 0;
467 mem->bus.is_iomem = false;
468 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
469 return -EINVAL;
470 switch (mem->mem_type) {
471 case TTM_PL_SYSTEM:
472 /* system memory */
473 return 0;
474 case TTM_PL_TT:
475 #if IS_ENABLED(CONFIG_AGP)
476 if (rdev->flags & RADEON_IS_AGP) {
477 /* RADEON_IS_AGP is set only if AGP is active */
478 mem->bus.offset = mem->start << PAGE_SHIFT;
479 mem->bus.base = rdev->mc.agp_base;
480 mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
481 }
482 #endif
483 break;
484 case TTM_PL_VRAM:
485 mem->bus.offset = mem->start << PAGE_SHIFT;
486 /* check if it's visible */
487 if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
488 return -EINVAL;
489 mem->bus.base = rdev->mc.aper_base;
490 mem->bus.is_iomem = true;
491 #ifdef __alpha__
492 /*
493 * Alpha: use bus.addr to hold the ioremap() return,
494 * so we can modify bus.base below.
495 */
496 if (mem->placement & TTM_PL_FLAG_WC)
497 mem->bus.addr =
498 ioremap_wc(mem->bus.base + mem->bus.offset,
499 mem->bus.size);
500 else
501 mem->bus.addr =
502 ioremap_nocache(mem->bus.base + mem->bus.offset,
503 mem->bus.size);
504
505 /*
506 * Alpha: Use just the bus offset plus
507 * the hose/domain memory base for bus.base.
508 * It then can be used to build PTEs for VRAM
509 * access, as done in ttm_bo_vm_fault().
510 */
511 mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
512 rdev->ddev->hose->dense_mem_base;
513 #endif
514 break;
515 default:
516 return -EINVAL;
517 }
518 return 0;
519 }
520
521 static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
522 {
523 }
524
525 /*
526 * TTM backend functions.
527 */
528 struct radeon_ttm_tt {
529 struct ttm_dma_tt ttm;
530 struct radeon_device *rdev;
531 u64 offset;
532
533 uint64_t userptr;
534 struct mm_struct *usermm;
535 uint32_t userflags;
536 };
537
538 /* prepare the sg table with the user pages */
539 static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
540 {
541 struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
542 struct radeon_ttm_tt *gtt = (void *)ttm;
543 unsigned pinned = 0, nents;
544 int r;
545
546 int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
547 enum dma_data_direction direction = write ?
548 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
549
550 if (current->mm != gtt->usermm)
551 return -EPERM;
552
553 if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
554 /* check that we only pin down anonymous memory
555 to prevent problems with writeback */
556 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
557 struct vm_area_struct *vma;
558 vma = find_vma(gtt->usermm, gtt->userptr);
559 if (!vma || vma->vm_file || vma->vm_end < end)
560 return -EPERM;
561 }
562
563 do {
564 unsigned num_pages = ttm->num_pages - pinned;
565 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
566 struct page **pages = ttm->pages + pinned;
567
568 r = get_user_pages(userptr, num_pages, write, 0, pages, NULL);
569 if (r < 0)
570 goto release_pages;
571
572 pinned += r;
573
574 } while (pinned < ttm->num_pages);
575
576 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
577 ttm->num_pages << PAGE_SHIFT,
578 GFP_KERNEL);
579 if (r)
580 goto release_sg;
581
582 r = -ENOMEM;
583 nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
584 if (nents != ttm->sg->nents)
585 goto release_sg;
586
587 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
588 gtt->ttm.dma_address, ttm->num_pages);
589
590 return 0;
591
592 release_sg:
593 kfree(ttm->sg);
594
595 release_pages:
596 release_pages(ttm->pages, pinned, 0);
597 return r;
598 }
599
600 static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
601 {
602 struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
603 struct radeon_ttm_tt *gtt = (void *)ttm;
604 struct sg_page_iter sg_iter;
605
606 int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
607 enum dma_data_direction direction = write ?
608 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
609
610 /* double check that we don't free the table twice */
611 if (!ttm->sg->sgl)
612 return;
613
614 /* free the sg table and pages again */
615 dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
616
617 for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
618 struct page *page = sg_page_iter_page(&sg_iter);
619 if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
620 set_page_dirty(page);
621
622 mark_page_accessed(page);
623 put_page(page);
624 }
625
626 sg_free_table(ttm->sg);
627 }
628
629 static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
630 struct ttm_mem_reg *bo_mem)
631 {
632 struct radeon_ttm_tt *gtt = (void*)ttm;
633 uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
634 RADEON_GART_PAGE_WRITE;
635 int r;
636
637 if (gtt->userptr) {
638 radeon_ttm_tt_pin_userptr(ttm);
639 flags &= ~RADEON_GART_PAGE_WRITE;
640 }
641
642 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
643 if (!ttm->num_pages) {
644 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
645 ttm->num_pages, bo_mem, ttm);
646 }
647 if (ttm->caching_state == tt_cached)
648 flags |= RADEON_GART_PAGE_SNOOP;
649 r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages,
650 ttm->pages, gtt->ttm.dma_address, flags);
651 if (r) {
652 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
653 ttm->num_pages, (unsigned)gtt->offset);
654 return r;
655 }
656 return 0;
657 }
658
659 static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
660 {
661 struct radeon_ttm_tt *gtt = (void *)ttm;
662
663 radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
664
665 if (gtt->userptr)
666 radeon_ttm_tt_unpin_userptr(ttm);
667
668 return 0;
669 }
670
671 static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
672 {
673 struct radeon_ttm_tt *gtt = (void *)ttm;
674
675 ttm_dma_tt_fini(&gtt->ttm);
676 kfree(gtt);
677 }
678
679 static struct ttm_backend_func radeon_backend_func = {
680 .bind = &radeon_ttm_backend_bind,
681 .unbind = &radeon_ttm_backend_unbind,
682 .destroy = &radeon_ttm_backend_destroy,
683 };
684
685 static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
686 unsigned long size, uint32_t page_flags,
687 struct page *dummy_read_page)
688 {
689 struct radeon_device *rdev;
690 struct radeon_ttm_tt *gtt;
691
692 rdev = radeon_get_rdev(bdev);
693 #if IS_ENABLED(CONFIG_AGP)
694 if (rdev->flags & RADEON_IS_AGP) {
695 return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
696 size, page_flags, dummy_read_page);
697 }
698 #endif
699
700 gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
701 if (gtt == NULL) {
702 return NULL;
703 }
704 gtt->ttm.ttm.func = &radeon_backend_func;
705 gtt->rdev = rdev;
706 if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
707 kfree(gtt);
708 return NULL;
709 }
710 return &gtt->ttm.ttm;
711 }
712
713 static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm)
714 {
715 if (!ttm || ttm->func != &radeon_backend_func)
716 return NULL;
717 return (struct radeon_ttm_tt *)ttm;
718 }
719
720 static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
721 {
722 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
723 struct radeon_device *rdev;
724 unsigned i;
725 int r;
726 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
727
728 if (ttm->state != tt_unpopulated)
729 return 0;
730
731 if (gtt && gtt->userptr) {
732 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
733 if (!ttm->sg)
734 return -ENOMEM;
735
736 ttm->page_flags |= TTM_PAGE_FLAG_SG;
737 ttm->state = tt_unbound;
738 return 0;
739 }
740
741 if (slave && ttm->sg) {
742 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
743 gtt->ttm.dma_address, ttm->num_pages);
744 ttm->state = tt_unbound;
745 return 0;
746 }
747
748 rdev = radeon_get_rdev(ttm->bdev);
749 #if IS_ENABLED(CONFIG_AGP)
750 if (rdev->flags & RADEON_IS_AGP) {
751 return ttm_agp_tt_populate(ttm);
752 }
753 #endif
754
755 #ifdef CONFIG_SWIOTLB
756 if (swiotlb_nr_tbl()) {
757 return ttm_dma_populate(&gtt->ttm, rdev->dev);
758 }
759 #endif
760
761 r = ttm_pool_populate(ttm);
762 if (r) {
763 return r;
764 }
765
766 for (i = 0; i < ttm->num_pages; i++) {
767 gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
768 0, PAGE_SIZE,
769 PCI_DMA_BIDIRECTIONAL);
770 if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
771 while (i--) {
772 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
773 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
774 gtt->ttm.dma_address[i] = 0;
775 }
776 ttm_pool_unpopulate(ttm);
777 return -EFAULT;
778 }
779 }
780 return 0;
781 }
782
783 static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
784 {
785 struct radeon_device *rdev;
786 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
787 unsigned i;
788 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
789
790 if (gtt && gtt->userptr) {
791 kfree(ttm->sg);
792 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
793 return;
794 }
795
796 if (slave)
797 return;
798
799 rdev = radeon_get_rdev(ttm->bdev);
800 #if IS_ENABLED(CONFIG_AGP)
801 if (rdev->flags & RADEON_IS_AGP) {
802 ttm_agp_tt_unpopulate(ttm);
803 return;
804 }
805 #endif
806
807 #ifdef CONFIG_SWIOTLB
808 if (swiotlb_nr_tbl()) {
809 ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
810 return;
811 }
812 #endif
813
814 for (i = 0; i < ttm->num_pages; i++) {
815 if (gtt->ttm.dma_address[i]) {
816 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
817 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
818 }
819 }
820
821 ttm_pool_unpopulate(ttm);
822 }
823
824 int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
825 uint32_t flags)
826 {
827 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
828
829 if (gtt == NULL)
830 return -EINVAL;
831
832 gtt->userptr = addr;
833 gtt->usermm = current->mm;
834 gtt->userflags = flags;
835 return 0;
836 }
837
838 bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm)
839 {
840 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
841
842 if (gtt == NULL)
843 return false;
844
845 return !!gtt->userptr;
846 }
847
848 bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm)
849 {
850 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
851
852 if (gtt == NULL)
853 return false;
854
855 return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
856 }
857
858 static struct ttm_bo_driver radeon_bo_driver = {
859 .ttm_tt_create = &radeon_ttm_tt_create,
860 .ttm_tt_populate = &radeon_ttm_tt_populate,
861 .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
862 .invalidate_caches = &radeon_invalidate_caches,
863 .init_mem_type = &radeon_init_mem_type,
864 .evict_flags = &radeon_evict_flags,
865 .move = &radeon_bo_move,
866 .verify_access = &radeon_verify_access,
867 .move_notify = &radeon_bo_move_notify,
868 .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
869 .io_mem_reserve = &radeon_ttm_io_mem_reserve,
870 .io_mem_free = &radeon_ttm_io_mem_free,
871 .lru_tail = &ttm_bo_default_lru_tail,
872 .swap_lru_tail = &ttm_bo_default_swap_lru_tail,
873 };
874
875 int radeon_ttm_init(struct radeon_device *rdev)
876 {
877 int r;
878
879 r = radeon_ttm_global_init(rdev);
880 if (r) {
881 return r;
882 }
883 /* No others user of address space so set it to 0 */
884 r = ttm_bo_device_init(&rdev->mman.bdev,
885 rdev->mman.bo_global_ref.ref.object,
886 &radeon_bo_driver,
887 rdev->ddev->anon_inode->i_mapping,
888 DRM_FILE_PAGE_OFFSET,
889 rdev->need_dma32);
890 if (r) {
891 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
892 return r;
893 }
894 rdev->mman.initialized = true;
895 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
896 rdev->mc.real_vram_size >> PAGE_SHIFT);
897 if (r) {
898 DRM_ERROR("Failed initializing VRAM heap.\n");
899 return r;
900 }
901 /* Change the size here instead of the init above so only lpfn is affected */
902 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
903
904 r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
905 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
906 NULL, &rdev->stollen_vga_memory);
907 if (r) {
908 return r;
909 }
910 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
911 if (r)
912 return r;
913 r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
914 radeon_bo_unreserve(rdev->stollen_vga_memory);
915 if (r) {
916 radeon_bo_unref(&rdev->stollen_vga_memory);
917 return r;
918 }
919 DRM_INFO("radeon: %uM of VRAM memory ready\n",
920 (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
921 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
922 rdev->mc.gtt_size >> PAGE_SHIFT);
923 if (r) {
924 DRM_ERROR("Failed initializing GTT heap.\n");
925 return r;
926 }
927 DRM_INFO("radeon: %uM of GTT memory ready.\n",
928 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
929
930 r = radeon_ttm_debugfs_init(rdev);
931 if (r) {
932 DRM_ERROR("Failed to init debugfs\n");
933 return r;
934 }
935 return 0;
936 }
937
938 void radeon_ttm_fini(struct radeon_device *rdev)
939 {
940 int r;
941
942 if (!rdev->mman.initialized)
943 return;
944 radeon_ttm_debugfs_fini(rdev);
945 if (rdev->stollen_vga_memory) {
946 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
947 if (r == 0) {
948 radeon_bo_unpin(rdev->stollen_vga_memory);
949 radeon_bo_unreserve(rdev->stollen_vga_memory);
950 }
951 radeon_bo_unref(&rdev->stollen_vga_memory);
952 }
953 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
954 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
955 ttm_bo_device_release(&rdev->mman.bdev);
956 radeon_gart_fini(rdev);
957 radeon_ttm_global_fini(rdev);
958 rdev->mman.initialized = false;
959 DRM_INFO("radeon: ttm finalized\n");
960 }
961
962 /* this should only be called at bootup or when userspace
963 * isn't running */
964 void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
965 {
966 struct ttm_mem_type_manager *man;
967
968 if (!rdev->mman.initialized)
969 return;
970
971 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
972 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
973 man->size = size >> PAGE_SHIFT;
974 }
975
976 static struct vm_operations_struct radeon_ttm_vm_ops;
977 static const struct vm_operations_struct *ttm_vm_ops = NULL;
978
979 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
980 {
981 struct ttm_buffer_object *bo;
982 struct radeon_device *rdev;
983 int r;
984
985 bo = (struct ttm_buffer_object *)vma->vm_private_data;
986 if (bo == NULL) {
987 return VM_FAULT_NOPAGE;
988 }
989 rdev = radeon_get_rdev(bo->bdev);
990 down_read(&rdev->pm.mclk_lock);
991 r = ttm_vm_ops->fault(vma, vmf);
992 up_read(&rdev->pm.mclk_lock);
993 return r;
994 }
995
996 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
997 {
998 struct drm_file *file_priv;
999 struct radeon_device *rdev;
1000 int r;
1001
1002 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
1003 return -EINVAL;
1004 }
1005
1006 file_priv = filp->private_data;
1007 rdev = file_priv->minor->dev->dev_private;
1008 if (rdev == NULL) {
1009 return -EINVAL;
1010 }
1011 r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
1012 if (unlikely(r != 0)) {
1013 return r;
1014 }
1015 if (unlikely(ttm_vm_ops == NULL)) {
1016 ttm_vm_ops = vma->vm_ops;
1017 radeon_ttm_vm_ops = *ttm_vm_ops;
1018 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
1019 }
1020 vma->vm_ops = &radeon_ttm_vm_ops;
1021 return 0;
1022 }
1023
1024 #if defined(CONFIG_DEBUG_FS)
1025
1026 static int radeon_mm_dump_table(struct seq_file *m, void *data)
1027 {
1028 struct drm_info_node *node = (struct drm_info_node *)m->private;
1029 unsigned ttm_pl = *(int *)node->info_ent->data;
1030 struct drm_device *dev = node->minor->dev;
1031 struct radeon_device *rdev = dev->dev_private;
1032 struct drm_mm *mm = (struct drm_mm *)rdev->mman.bdev.man[ttm_pl].priv;
1033 int ret;
1034 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
1035
1036 spin_lock(&glob->lru_lock);
1037 ret = drm_mm_dump_table(m, mm);
1038 spin_unlock(&glob->lru_lock);
1039 return ret;
1040 }
1041
1042 static int ttm_pl_vram = TTM_PL_VRAM;
1043 static int ttm_pl_tt = TTM_PL_TT;
1044
1045 static struct drm_info_list radeon_ttm_debugfs_list[] = {
1046 {"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram},
1047 {"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt},
1048 {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
1049 #ifdef CONFIG_SWIOTLB
1050 {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
1051 #endif
1052 };
1053
1054 static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
1055 {
1056 struct radeon_device *rdev = inode->i_private;
1057 i_size_write(inode, rdev->mc.mc_vram_size);
1058 filep->private_data = inode->i_private;
1059 return 0;
1060 }
1061
1062 static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf,
1063 size_t size, loff_t *pos)
1064 {
1065 struct radeon_device *rdev = f->private_data;
1066 ssize_t result = 0;
1067 int r;
1068
1069 if (size & 0x3 || *pos & 0x3)
1070 return -EINVAL;
1071
1072 while (size) {
1073 unsigned long flags;
1074 uint32_t value;
1075
1076 if (*pos >= rdev->mc.mc_vram_size)
1077 return result;
1078
1079 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
1080 WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000);
1081 if (rdev->family >= CHIP_CEDAR)
1082 WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31);
1083 value = RREG32(RADEON_MM_DATA);
1084 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
1085
1086 r = put_user(value, (uint32_t *)buf);
1087 if (r)
1088 return r;
1089
1090 result += 4;
1091 buf += 4;
1092 *pos += 4;
1093 size -= 4;
1094 }
1095
1096 return result;
1097 }
1098
1099 static const struct file_operations radeon_ttm_vram_fops = {
1100 .owner = THIS_MODULE,
1101 .open = radeon_ttm_vram_open,
1102 .read = radeon_ttm_vram_read,
1103 .llseek = default_llseek
1104 };
1105
1106 static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep)
1107 {
1108 struct radeon_device *rdev = inode->i_private;
1109 i_size_write(inode, rdev->mc.gtt_size);
1110 filep->private_data = inode->i_private;
1111 return 0;
1112 }
1113
1114 static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
1115 size_t size, loff_t *pos)
1116 {
1117 struct radeon_device *rdev = f->private_data;
1118 ssize_t result = 0;
1119 int r;
1120
1121 while (size) {
1122 loff_t p = *pos / PAGE_SIZE;
1123 unsigned off = *pos & ~PAGE_MASK;
1124 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
1125 struct page *page;
1126 void *ptr;
1127
1128 if (p >= rdev->gart.num_cpu_pages)
1129 return result;
1130
1131 page = rdev->gart.pages[p];
1132 if (page) {
1133 ptr = kmap(page);
1134 ptr += off;
1135
1136 r = copy_to_user(buf, ptr, cur_size);
1137 kunmap(rdev->gart.pages[p]);
1138 } else
1139 r = clear_user(buf, cur_size);
1140
1141 if (r)
1142 return -EFAULT;
1143
1144 result += cur_size;
1145 buf += cur_size;
1146 *pos += cur_size;
1147 size -= cur_size;
1148 }
1149
1150 return result;
1151 }
1152
1153 static const struct file_operations radeon_ttm_gtt_fops = {
1154 .owner = THIS_MODULE,
1155 .open = radeon_ttm_gtt_open,
1156 .read = radeon_ttm_gtt_read,
1157 .llseek = default_llseek
1158 };
1159
1160 #endif
1161
1162 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
1163 {
1164 #if defined(CONFIG_DEBUG_FS)
1165 unsigned count;
1166
1167 struct drm_minor *minor = rdev->ddev->primary;
1168 struct dentry *ent, *root = minor->debugfs_root;
1169
1170 ent = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO, root,
1171 rdev, &radeon_ttm_vram_fops);
1172 if (IS_ERR(ent))
1173 return PTR_ERR(ent);
1174 rdev->mman.vram = ent;
1175
1176 ent = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO, root,
1177 rdev, &radeon_ttm_gtt_fops);
1178 if (IS_ERR(ent))
1179 return PTR_ERR(ent);
1180 rdev->mman.gtt = ent;
1181
1182 count = ARRAY_SIZE(radeon_ttm_debugfs_list);
1183
1184 #ifdef CONFIG_SWIOTLB
1185 if (!swiotlb_nr_tbl())
1186 --count;
1187 #endif
1188
1189 return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count);
1190 #else
1191
1192 return 0;
1193 #endif
1194 }
1195
1196 static void radeon_ttm_debugfs_fini(struct radeon_device *rdev)
1197 {
1198 #if defined(CONFIG_DEBUG_FS)
1199
1200 debugfs_remove(rdev->mman.vram);
1201 rdev->mman.vram = NULL;
1202
1203 debugfs_remove(rdev->mman.gtt);
1204 rdev->mman.gtt = NULL;
1205 #endif
1206 }
This page took 0.093169 seconds and 5 git commands to generate.