2 * Copyright 2009 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
32 #include <linux/list.h>
33 #include <linux/slab.h>
35 #include "radeon_drm.h"
39 int radeon_ttm_init(struct radeon_device
*rdev
);
40 void radeon_ttm_fini(struct radeon_device
*rdev
);
41 static void radeon_bo_clear_surface_reg(struct radeon_bo
*bo
);
44 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
45 * function are calling it.
48 static void radeon_ttm_bo_destroy(struct ttm_buffer_object
*tbo
)
52 bo
= container_of(tbo
, struct radeon_bo
, tbo
);
53 mutex_lock(&bo
->rdev
->gem
.mutex
);
54 list_del_init(&bo
->list
);
55 mutex_unlock(&bo
->rdev
->gem
.mutex
);
56 radeon_bo_clear_surface_reg(bo
);
60 bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object
*bo
)
62 if (bo
->destroy
== &radeon_ttm_bo_destroy
)
67 void radeon_ttm_placement_from_domain(struct radeon_bo
*rbo
, u32 domain
)
71 rbo
->placement
.fpfn
= 0;
72 rbo
->placement
.lpfn
= 0;
73 rbo
->placement
.placement
= rbo
->placements
;
74 rbo
->placement
.busy_placement
= rbo
->placements
;
75 if (domain
& RADEON_GEM_DOMAIN_VRAM
)
76 rbo
->placements
[c
++] = TTM_PL_FLAG_WC
| TTM_PL_FLAG_UNCACHED
|
78 if (domain
& RADEON_GEM_DOMAIN_GTT
)
79 rbo
->placements
[c
++] = TTM_PL_MASK_CACHING
| TTM_PL_FLAG_TT
;
80 if (domain
& RADEON_GEM_DOMAIN_CPU
)
81 rbo
->placements
[c
++] = TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
;
83 rbo
->placements
[c
++] = TTM_PL_MASK_CACHING
| TTM_PL_FLAG_SYSTEM
;
84 rbo
->placement
.num_placement
= c
;
85 rbo
->placement
.num_busy_placement
= c
;
88 int radeon_bo_create(struct radeon_device
*rdev
, struct drm_gem_object
*gobj
,
89 unsigned long size
, bool kernel
, u32 domain
,
90 struct radeon_bo
**bo_ptr
)
93 enum ttm_bo_type type
;
96 if (unlikely(rdev
->mman
.bdev
.dev_mapping
== NULL
)) {
97 rdev
->mman
.bdev
.dev_mapping
= rdev
->ddev
->dev_mapping
;
100 type
= ttm_bo_type_kernel
;
102 type
= ttm_bo_type_device
;
105 bo
= kzalloc(sizeof(struct radeon_bo
), GFP_KERNEL
);
110 bo
->surface_reg
= -1;
111 INIT_LIST_HEAD(&bo
->list
);
114 radeon_ttm_placement_from_domain(bo
, domain
);
115 /* Kernel allocation are uninterruptible */
116 mutex_lock(&rdev
->vram_mutex
);
117 r
= ttm_bo_init(&rdev
->mman
.bdev
, &bo
->tbo
, size
, type
,
118 &bo
->placement
, 0, 0, !kernel
, NULL
, size
,
119 &radeon_ttm_bo_destroy
);
120 mutex_unlock(&rdev
->vram_mutex
);
121 if (unlikely(r
!= 0)) {
122 if (r
!= -ERESTARTSYS
) {
123 if (domain
== RADEON_GEM_DOMAIN_VRAM
) {
124 domain
|= RADEON_GEM_DOMAIN_GTT
;
128 "object_init failed for (%lu, 0x%08X)\n",
135 mutex_lock(&bo
->rdev
->gem
.mutex
);
136 list_add_tail(&bo
->list
, &rdev
->gem
.objects
);
137 mutex_unlock(&bo
->rdev
->gem
.mutex
);
142 int radeon_bo_kmap(struct radeon_bo
*bo
, void **ptr
)
153 r
= ttm_bo_kmap(&bo
->tbo
, 0, bo
->tbo
.num_pages
, &bo
->kmap
);
157 bo
->kptr
= ttm_kmap_obj_virtual(&bo
->kmap
, &is_iomem
);
161 radeon_bo_check_tiling(bo
, 0, 0);
165 void radeon_bo_kunmap(struct radeon_bo
*bo
)
167 if (bo
->kptr
== NULL
)
170 radeon_bo_check_tiling(bo
, 0, 0);
171 ttm_bo_kunmap(&bo
->kmap
);
174 void radeon_bo_unref(struct radeon_bo
**bo
)
176 struct ttm_buffer_object
*tbo
;
177 struct radeon_device
*rdev
;
183 mutex_lock(&rdev
->vram_mutex
);
185 mutex_unlock(&rdev
->vram_mutex
);
190 int radeon_bo_pin(struct radeon_bo
*bo
, u32 domain
, u64
*gpu_addr
)
197 *gpu_addr
= radeon_bo_gpu_offset(bo
);
200 radeon_ttm_placement_from_domain(bo
, domain
);
201 if (domain
== RADEON_GEM_DOMAIN_VRAM
) {
202 /* force to pin into visible video ram */
203 bo
->placement
.lpfn
= bo
->rdev
->mc
.visible_vram_size
>> PAGE_SHIFT
;
205 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
206 bo
->placements
[i
] |= TTM_PL_FLAG_NO_EVICT
;
207 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, false, false, false);
208 if (likely(r
== 0)) {
210 if (gpu_addr
!= NULL
)
211 *gpu_addr
= radeon_bo_gpu_offset(bo
);
213 if (unlikely(r
!= 0))
214 dev_err(bo
->rdev
->dev
, "%p pin failed\n", bo
);
218 int radeon_bo_unpin(struct radeon_bo
*bo
)
222 if (!bo
->pin_count
) {
223 dev_warn(bo
->rdev
->dev
, "%p unpin not necessary\n", bo
);
229 for (i
= 0; i
< bo
->placement
.num_placement
; i
++)
230 bo
->placements
[i
] &= ~TTM_PL_FLAG_NO_EVICT
;
231 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, false, false, false);
232 if (unlikely(r
!= 0))
233 dev_err(bo
->rdev
->dev
, "%p validate failed for unpin\n", bo
);
237 int radeon_bo_evict_vram(struct radeon_device
*rdev
)
239 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
240 if (0 && (rdev
->flags
& RADEON_IS_IGP
)) {
241 if (rdev
->mc
.igp_sideport_enabled
== false)
242 /* Useless to evict on IGP chips */
245 return ttm_bo_evict_mm(&rdev
->mman
.bdev
, TTM_PL_VRAM
);
248 void radeon_bo_force_delete(struct radeon_device
*rdev
)
250 struct radeon_bo
*bo
, *n
;
251 struct drm_gem_object
*gobj
;
253 if (list_empty(&rdev
->gem
.objects
)) {
256 dev_err(rdev
->dev
, "Userspace still has active objects !\n");
257 list_for_each_entry_safe(bo
, n
, &rdev
->gem
.objects
, list
) {
258 mutex_lock(&rdev
->ddev
->struct_mutex
);
260 dev_err(rdev
->dev
, "%p %p %lu %lu force free\n",
261 gobj
, bo
, (unsigned long)gobj
->size
,
262 *((unsigned long *)&gobj
->refcount
));
263 mutex_lock(&bo
->rdev
->gem
.mutex
);
264 list_del_init(&bo
->list
);
265 mutex_unlock(&bo
->rdev
->gem
.mutex
);
266 radeon_bo_unref(&bo
);
267 gobj
->driver_private
= NULL
;
268 drm_gem_object_unreference(gobj
);
269 mutex_unlock(&rdev
->ddev
->struct_mutex
);
273 int radeon_bo_init(struct radeon_device
*rdev
)
275 /* Add an MTRR for the VRAM */
276 rdev
->mc
.vram_mtrr
= mtrr_add(rdev
->mc
.aper_base
, rdev
->mc
.aper_size
,
277 MTRR_TYPE_WRCOMB
, 1);
278 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
279 rdev
->mc
.mc_vram_size
>> 20,
280 (unsigned long long)rdev
->mc
.aper_size
>> 20);
281 DRM_INFO("RAM width %dbits %cDR\n",
282 rdev
->mc
.vram_width
, rdev
->mc
.vram_is_ddr
? 'D' : 'S');
283 return radeon_ttm_init(rdev
);
286 void radeon_bo_fini(struct radeon_device
*rdev
)
288 radeon_ttm_fini(rdev
);
291 void radeon_bo_list_add_object(struct radeon_bo_list
*lobj
,
292 struct list_head
*head
)
295 list_add(&lobj
->list
, head
);
297 list_add_tail(&lobj
->list
, head
);
301 int radeon_bo_list_reserve(struct list_head
*head
)
303 struct radeon_bo_list
*lobj
;
306 list_for_each_entry(lobj
, head
, list
){
307 r
= radeon_bo_reserve(lobj
->bo
, false);
308 if (unlikely(r
!= 0))
310 lobj
->reserved
= true;
315 void radeon_bo_list_unreserve(struct list_head
*head
)
317 struct radeon_bo_list
*lobj
;
319 list_for_each_entry(lobj
, head
, list
) {
320 /* only unreserve object we successfully reserved */
321 if (lobj
->reserved
&& radeon_bo_is_reserved(lobj
->bo
))
322 radeon_bo_unreserve(lobj
->bo
);
326 int radeon_bo_list_validate(struct list_head
*head
)
328 struct radeon_bo_list
*lobj
;
329 struct radeon_bo
*bo
;
333 list_for_each_entry(lobj
, head
, list
) {
334 lobj
->reserved
= false;
336 r
= radeon_bo_list_reserve(head
);
337 if (unlikely(r
!= 0)) {
340 list_for_each_entry(lobj
, head
, list
) {
342 if (!bo
->pin_count
) {
343 domain
= lobj
->wdomain
? lobj
->wdomain
: lobj
->rdomain
;
346 radeon_ttm_placement_from_domain(bo
, domain
);
347 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
,
350 if (r
!= -ERESTARTSYS
&& domain
== RADEON_GEM_DOMAIN_VRAM
) {
351 domain
|= RADEON_GEM_DOMAIN_GTT
;
357 lobj
->gpu_offset
= radeon_bo_gpu_offset(bo
);
358 lobj
->tiling_flags
= bo
->tiling_flags
;
363 void radeon_bo_list_fence(struct list_head
*head
, void *fence
)
365 struct radeon_bo_list
*lobj
;
366 struct radeon_bo
*bo
;
367 struct radeon_fence
*old_fence
= NULL
;
369 list_for_each_entry(lobj
, head
, list
) {
371 spin_lock(&bo
->tbo
.lock
);
372 old_fence
= (struct radeon_fence
*)bo
->tbo
.sync_obj
;
373 bo
->tbo
.sync_obj
= radeon_fence_ref(fence
);
374 bo
->tbo
.sync_obj_arg
= NULL
;
375 spin_unlock(&bo
->tbo
.lock
);
377 radeon_fence_unref(&old_fence
);
382 int radeon_bo_fbdev_mmap(struct radeon_bo
*bo
,
383 struct vm_area_struct
*vma
)
385 return ttm_fbdev_mmap(vma
, &bo
->tbo
);
388 int radeon_bo_get_surface_reg(struct radeon_bo
*bo
)
390 struct radeon_device
*rdev
= bo
->rdev
;
391 struct radeon_surface_reg
*reg
;
392 struct radeon_bo
*old_object
;
396 BUG_ON(!atomic_read(&bo
->tbo
.reserved
));
398 if (!bo
->tiling_flags
)
401 if (bo
->surface_reg
>= 0) {
402 reg
= &rdev
->surface_regs
[bo
->surface_reg
];
408 for (i
= 0; i
< RADEON_GEM_MAX_SURFACES
; i
++) {
410 reg
= &rdev
->surface_regs
[i
];
414 old_object
= reg
->bo
;
415 if (old_object
->pin_count
== 0)
419 /* if we are all out */
420 if (i
== RADEON_GEM_MAX_SURFACES
) {
423 /* find someone with a surface reg and nuke their BO */
424 reg
= &rdev
->surface_regs
[steal
];
425 old_object
= reg
->bo
;
426 /* blow away the mapping */
427 DRM_DEBUG("stealing surface reg %d from %p\n", steal
, old_object
);
428 ttm_bo_unmap_virtual(&old_object
->tbo
);
429 old_object
->surface_reg
= -1;
437 radeon_set_surface_reg(rdev
, i
, bo
->tiling_flags
, bo
->pitch
,
438 bo
->tbo
.mem
.mm_node
->start
<< PAGE_SHIFT
,
439 bo
->tbo
.num_pages
<< PAGE_SHIFT
);
443 static void radeon_bo_clear_surface_reg(struct radeon_bo
*bo
)
445 struct radeon_device
*rdev
= bo
->rdev
;
446 struct radeon_surface_reg
*reg
;
448 if (bo
->surface_reg
== -1)
451 reg
= &rdev
->surface_regs
[bo
->surface_reg
];
452 radeon_clear_surface_reg(rdev
, bo
->surface_reg
);
455 bo
->surface_reg
= -1;
458 int radeon_bo_set_tiling_flags(struct radeon_bo
*bo
,
459 uint32_t tiling_flags
, uint32_t pitch
)
463 r
= radeon_bo_reserve(bo
, false);
464 if (unlikely(r
!= 0))
466 bo
->tiling_flags
= tiling_flags
;
468 radeon_bo_unreserve(bo
);
472 void radeon_bo_get_tiling_flags(struct radeon_bo
*bo
,
473 uint32_t *tiling_flags
,
476 BUG_ON(!atomic_read(&bo
->tbo
.reserved
));
478 *tiling_flags
= bo
->tiling_flags
;
483 int radeon_bo_check_tiling(struct radeon_bo
*bo
, bool has_moved
,
486 BUG_ON(!atomic_read(&bo
->tbo
.reserved
));
488 if (!(bo
->tiling_flags
& RADEON_TILING_SURFACE
))
492 radeon_bo_clear_surface_reg(bo
);
496 if (bo
->tbo
.mem
.mem_type
!= TTM_PL_VRAM
) {
500 if (bo
->surface_reg
>= 0)
501 radeon_bo_clear_surface_reg(bo
);
505 if ((bo
->surface_reg
>= 0) && !has_moved
)
508 return radeon_bo_get_surface_reg(bo
);
511 void radeon_bo_move_notify(struct ttm_buffer_object
*bo
,
512 struct ttm_mem_reg
*mem
)
514 struct radeon_bo
*rbo
;
515 if (!radeon_ttm_bo_is_radeon_bo(bo
))
517 rbo
= container_of(bo
, struct radeon_bo
, tbo
);
518 radeon_bo_check_tiling(rbo
, 0, 1);
521 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object
*bo
)
523 struct radeon_device
*rdev
;
524 struct radeon_bo
*rbo
;
525 unsigned long offset
, size
;
528 if (!radeon_ttm_bo_is_radeon_bo(bo
))
530 rbo
= container_of(bo
, struct radeon_bo
, tbo
);
531 radeon_bo_check_tiling(rbo
, 0, 0);
533 if (bo
->mem
.mem_type
== TTM_PL_VRAM
) {
534 size
= bo
->mem
.num_pages
<< PAGE_SHIFT
;
535 offset
= bo
->mem
.mm_node
->start
<< PAGE_SHIFT
;
536 if ((offset
+ size
) > rdev
->mc
.visible_vram_size
) {
537 /* hurrah the memory is not visible ! */
538 radeon_ttm_placement_from_domain(rbo
, RADEON_GEM_DOMAIN_VRAM
);
539 rbo
->placement
.lpfn
= rdev
->mc
.visible_vram_size
>> PAGE_SHIFT
;
540 r
= ttm_bo_validate(bo
, &rbo
->placement
, false, true, false);
541 if (unlikely(r
!= 0))
543 offset
= bo
->mem
.mm_node
->start
<< PAGE_SHIFT
;
544 /* this should not happen */
545 if ((offset
+ size
) > rdev
->mc
.visible_vram_size
)
This page took 0.055907 seconds and 5 git commands to generate.