Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
32 | #include <linux/list.h> | |
5a0e3ad6 | 33 | #include <linux/slab.h> |
771fe6b9 | 34 | #include <drm/drmP.h> |
760285e7 | 35 | #include <drm/radeon_drm.h> |
771fe6b9 | 36 | #include "radeon.h" |
99ee7fac | 37 | #include "radeon_trace.h" |
771fe6b9 | 38 | |
771fe6b9 JG |
39 | |
40 | int radeon_ttm_init(struct radeon_device *rdev); | |
41 | void radeon_ttm_fini(struct radeon_device *rdev); | |
4c788679 | 42 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); |
771fe6b9 JG |
43 | |
44 | /* | |
45 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all | |
46 | * function are calling it. | |
47 | */ | |
48 | ||
721604a1 JG |
49 | void radeon_bo_clear_va(struct radeon_bo *bo) |
50 | { | |
51 | struct radeon_bo_va *bo_va, *tmp; | |
52 | ||
53 | list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) { | |
54 | /* remove from all vm address space */ | |
e971bd5e | 55 | radeon_vm_bo_rmv(bo->rdev, bo_va); |
721604a1 JG |
56 | } |
57 | } | |
58 | ||
4c788679 | 59 | static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
771fe6b9 | 60 | { |
4c788679 | 61 | struct radeon_bo *bo; |
771fe6b9 | 62 | |
4c788679 JG |
63 | bo = container_of(tbo, struct radeon_bo, tbo); |
64 | mutex_lock(&bo->rdev->gem.mutex); | |
65 | list_del_init(&bo->list); | |
66 | mutex_unlock(&bo->rdev->gem.mutex); | |
67 | radeon_bo_clear_surface_reg(bo); | |
721604a1 | 68 | radeon_bo_clear_va(bo); |
441921d5 | 69 | drm_gem_object_release(&bo->gem_base); |
4c788679 | 70 | kfree(bo); |
771fe6b9 JG |
71 | } |
72 | ||
d03d8589 JG |
73 | bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) |
74 | { | |
75 | if (bo->destroy == &radeon_ttm_bo_destroy) | |
76 | return true; | |
77 | return false; | |
78 | } | |
79 | ||
312ea8da JG |
80 | void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) |
81 | { | |
82 | u32 c = 0; | |
83 | ||
84 | rbo->placement.fpfn = 0; | |
93225b0d | 85 | rbo->placement.lpfn = 0; |
312ea8da JG |
86 | rbo->placement.placement = rbo->placements; |
87 | rbo->placement.busy_placement = rbo->placements; | |
88 | if (domain & RADEON_GEM_DOMAIN_VRAM) | |
89 | rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | | |
90 | TTM_PL_FLAG_VRAM; | |
91 | if (domain & RADEON_GEM_DOMAIN_GTT) | |
92 | rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | |
93 | if (domain & RADEON_GEM_DOMAIN_CPU) | |
94 | rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | |
9fb03e63 JG |
95 | if (!c) |
96 | rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | |
312ea8da JG |
97 | rbo->placement.num_placement = c; |
98 | rbo->placement.num_busy_placement = c; | |
99 | } | |
100 | ||
441921d5 | 101 | int radeon_bo_create(struct radeon_device *rdev, |
268b2510 | 102 | unsigned long size, int byte_align, bool kernel, u32 domain, |
40f5cf99 | 103 | struct sg_table *sg, struct radeon_bo **bo_ptr) |
771fe6b9 | 104 | { |
4c788679 | 105 | struct radeon_bo *bo; |
771fe6b9 | 106 | enum ttm_bo_type type; |
93225b0d | 107 | unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
57de4ba9 | 108 | size_t acc_size; |
771fe6b9 JG |
109 | int r; |
110 | ||
441921d5 DV |
111 | size = ALIGN(size, PAGE_SIZE); |
112 | ||
949c4a34 | 113 | rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; |
771fe6b9 JG |
114 | if (kernel) { |
115 | type = ttm_bo_type_kernel; | |
40f5cf99 AD |
116 | } else if (sg) { |
117 | type = ttm_bo_type_sg; | |
771fe6b9 JG |
118 | } else { |
119 | type = ttm_bo_type_device; | |
120 | } | |
4c788679 | 121 | *bo_ptr = NULL; |
2b66b50b | 122 | |
57de4ba9 JG |
123 | acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, |
124 | sizeof(struct radeon_bo)); | |
125 | ||
676bc2e1 | 126 | retry: |
4c788679 JG |
127 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
128 | if (bo == NULL) | |
771fe6b9 | 129 | return -ENOMEM; |
441921d5 DV |
130 | r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); |
131 | if (unlikely(r)) { | |
132 | kfree(bo); | |
133 | return r; | |
134 | } | |
4c788679 | 135 | bo->rdev = rdev; |
7e4d15d9 | 136 | bo->gem_base.driver_private = NULL; |
4c788679 JG |
137 | bo->surface_reg = -1; |
138 | INIT_LIST_HEAD(&bo->list); | |
721604a1 | 139 | INIT_LIST_HEAD(&bo->va); |
1fb107fc | 140 | radeon_ttm_placement_from_domain(bo, domain); |
5cc6fbab | 141 | /* Kernel allocation are uninterruptible */ |
db7fce39 | 142 | down_read(&rdev->pm.mclk_lock); |
1fb107fc | 143 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, |
57de4ba9 | 144 | &bo->placement, page_align, 0, !kernel, NULL, |
40f5cf99 | 145 | acc_size, sg, &radeon_ttm_bo_destroy); |
db7fce39 | 146 | up_read(&rdev->pm.mclk_lock); |
771fe6b9 | 147 | if (unlikely(r != 0)) { |
e376573f MD |
148 | if (r != -ERESTARTSYS) { |
149 | if (domain == RADEON_GEM_DOMAIN_VRAM) { | |
150 | domain |= RADEON_GEM_DOMAIN_GTT; | |
151 | goto retry; | |
152 | } | |
5cc6fbab | 153 | dev_err(rdev->dev, |
1fb107fc JG |
154 | "object_init failed for (%lu, 0x%08X)\n", |
155 | size, domain); | |
e376573f | 156 | } |
771fe6b9 JG |
157 | return r; |
158 | } | |
4c788679 | 159 | *bo_ptr = bo; |
441921d5 | 160 | |
99ee7fac | 161 | trace_radeon_bo_create(bo); |
441921d5 | 162 | |
771fe6b9 JG |
163 | return 0; |
164 | } | |
165 | ||
4c788679 | 166 | int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) |
771fe6b9 | 167 | { |
4c788679 | 168 | bool is_iomem; |
771fe6b9 JG |
169 | int r; |
170 | ||
4c788679 | 171 | if (bo->kptr) { |
771fe6b9 | 172 | if (ptr) { |
4c788679 | 173 | *ptr = bo->kptr; |
771fe6b9 | 174 | } |
771fe6b9 JG |
175 | return 0; |
176 | } | |
4c788679 | 177 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); |
771fe6b9 JG |
178 | if (r) { |
179 | return r; | |
180 | } | |
4c788679 | 181 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); |
771fe6b9 | 182 | if (ptr) { |
4c788679 | 183 | *ptr = bo->kptr; |
771fe6b9 | 184 | } |
4c788679 | 185 | radeon_bo_check_tiling(bo, 0, 0); |
771fe6b9 JG |
186 | return 0; |
187 | } | |
188 | ||
4c788679 | 189 | void radeon_bo_kunmap(struct radeon_bo *bo) |
771fe6b9 | 190 | { |
4c788679 | 191 | if (bo->kptr == NULL) |
771fe6b9 | 192 | return; |
4c788679 JG |
193 | bo->kptr = NULL; |
194 | radeon_bo_check_tiling(bo, 0, 0); | |
195 | ttm_bo_kunmap(&bo->kmap); | |
771fe6b9 JG |
196 | } |
197 | ||
4c788679 | 198 | void radeon_bo_unref(struct radeon_bo **bo) |
771fe6b9 | 199 | { |
4c788679 | 200 | struct ttm_buffer_object *tbo; |
f4b7fb94 | 201 | struct radeon_device *rdev; |
771fe6b9 | 202 | |
4c788679 | 203 | if ((*bo) == NULL) |
771fe6b9 | 204 | return; |
f4b7fb94 | 205 | rdev = (*bo)->rdev; |
4c788679 | 206 | tbo = &((*bo)->tbo); |
db7fce39 | 207 | down_read(&rdev->pm.mclk_lock); |
4c788679 | 208 | ttm_bo_unref(&tbo); |
db7fce39 | 209 | up_read(&rdev->pm.mclk_lock); |
4c788679 JG |
210 | if (tbo == NULL) |
211 | *bo = NULL; | |
771fe6b9 JG |
212 | } |
213 | ||
c4353016 MD |
214 | int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, |
215 | u64 *gpu_addr) | |
771fe6b9 | 216 | { |
312ea8da | 217 | int r, i; |
771fe6b9 | 218 | |
4c788679 JG |
219 | if (bo->pin_count) { |
220 | bo->pin_count++; | |
221 | if (gpu_addr) | |
222 | *gpu_addr = radeon_bo_gpu_offset(bo); | |
d936622c MD |
223 | |
224 | if (max_offset != 0) { | |
225 | u64 domain_start; | |
226 | ||
227 | if (domain == RADEON_GEM_DOMAIN_VRAM) | |
228 | domain_start = bo->rdev->mc.vram_start; | |
229 | else | |
230 | domain_start = bo->rdev->mc.gtt_start; | |
e199fd42 MD |
231 | WARN_ON_ONCE(max_offset < |
232 | (radeon_bo_gpu_offset(bo) - domain_start)); | |
d936622c MD |
233 | } |
234 | ||
771fe6b9 JG |
235 | return 0; |
236 | } | |
312ea8da | 237 | radeon_ttm_placement_from_domain(bo, domain); |
3ca82da3 MD |
238 | if (domain == RADEON_GEM_DOMAIN_VRAM) { |
239 | /* force to pin into visible video ram */ | |
240 | bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; | |
241 | } | |
c4353016 MD |
242 | if (max_offset) { |
243 | u64 lpfn = max_offset >> PAGE_SHIFT; | |
244 | ||
245 | if (!bo->placement.lpfn) | |
246 | bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; | |
247 | ||
248 | if (lpfn < bo->placement.lpfn) | |
249 | bo->placement.lpfn = lpfn; | |
250 | } | |
312ea8da JG |
251 | for (i = 0; i < bo->placement.num_placement; i++) |
252 | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; | |
9d87fa21 | 253 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); |
4c788679 JG |
254 | if (likely(r == 0)) { |
255 | bo->pin_count = 1; | |
256 | if (gpu_addr != NULL) | |
257 | *gpu_addr = radeon_bo_gpu_offset(bo); | |
771fe6b9 | 258 | } |
5cc6fbab | 259 | if (unlikely(r != 0)) |
4c788679 | 260 | dev_err(bo->rdev->dev, "%p pin failed\n", bo); |
771fe6b9 JG |
261 | return r; |
262 | } | |
c4353016 MD |
263 | |
264 | int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) | |
265 | { | |
266 | return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); | |
267 | } | |
771fe6b9 | 268 | |
4c788679 | 269 | int radeon_bo_unpin(struct radeon_bo *bo) |
771fe6b9 | 270 | { |
312ea8da | 271 | int r, i; |
771fe6b9 | 272 | |
4c788679 JG |
273 | if (!bo->pin_count) { |
274 | dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); | |
275 | return 0; | |
771fe6b9 | 276 | } |
4c788679 JG |
277 | bo->pin_count--; |
278 | if (bo->pin_count) | |
279 | return 0; | |
312ea8da JG |
280 | for (i = 0; i < bo->placement.num_placement; i++) |
281 | bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; | |
9d87fa21 | 282 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); |
5cc6fbab | 283 | if (unlikely(r != 0)) |
4c788679 | 284 | dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); |
5cc6fbab | 285 | return r; |
cefb87ef DA |
286 | } |
287 | ||
4c788679 | 288 | int radeon_bo_evict_vram(struct radeon_device *rdev) |
771fe6b9 | 289 | { |
d796d844 DA |
290 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ |
291 | if (0 && (rdev->flags & RADEON_IS_IGP)) { | |
06b6476d AD |
292 | if (rdev->mc.igp_sideport_enabled == false) |
293 | /* Useless to evict on IGP chips */ | |
294 | return 0; | |
771fe6b9 JG |
295 | } |
296 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); | |
297 | } | |
298 | ||
4c788679 | 299 | void radeon_bo_force_delete(struct radeon_device *rdev) |
771fe6b9 | 300 | { |
4c788679 | 301 | struct radeon_bo *bo, *n; |
771fe6b9 JG |
302 | |
303 | if (list_empty(&rdev->gem.objects)) { | |
304 | return; | |
305 | } | |
4c788679 JG |
306 | dev_err(rdev->dev, "Userspace still has active objects !\n"); |
307 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { | |
771fe6b9 | 308 | mutex_lock(&rdev->ddev->struct_mutex); |
4c788679 | 309 | dev_err(rdev->dev, "%p %p %lu %lu force free\n", |
31c3603d DV |
310 | &bo->gem_base, bo, (unsigned long)bo->gem_base.size, |
311 | *((unsigned long *)&bo->gem_base.refcount)); | |
4c788679 JG |
312 | mutex_lock(&bo->rdev->gem.mutex); |
313 | list_del_init(&bo->list); | |
314 | mutex_unlock(&bo->rdev->gem.mutex); | |
91132d6b | 315 | /* this should unref the ttm bo */ |
31c3603d | 316 | drm_gem_object_unreference(&bo->gem_base); |
771fe6b9 JG |
317 | mutex_unlock(&rdev->ddev->struct_mutex); |
318 | } | |
319 | } | |
320 | ||
4c788679 | 321 | int radeon_bo_init(struct radeon_device *rdev) |
771fe6b9 | 322 | { |
a4d68279 JG |
323 | /* Add an MTRR for the VRAM */ |
324 | rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, | |
325 | MTRR_TYPE_WRCOMB, 1); | |
326 | DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", | |
327 | rdev->mc.mc_vram_size >> 20, | |
328 | (unsigned long long)rdev->mc.aper_size >> 20); | |
329 | DRM_INFO("RAM width %dbits %cDR\n", | |
330 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); | |
771fe6b9 JG |
331 | return radeon_ttm_init(rdev); |
332 | } | |
333 | ||
4c788679 | 334 | void radeon_bo_fini(struct radeon_device *rdev) |
771fe6b9 JG |
335 | { |
336 | radeon_ttm_fini(rdev); | |
337 | } | |
338 | ||
4c788679 JG |
339 | void radeon_bo_list_add_object(struct radeon_bo_list *lobj, |
340 | struct list_head *head) | |
771fe6b9 JG |
341 | { |
342 | if (lobj->wdomain) { | |
147666fb | 343 | list_add(&lobj->tv.head, head); |
771fe6b9 | 344 | } else { |
147666fb | 345 | list_add_tail(&lobj->tv.head, head); |
771fe6b9 JG |
346 | } |
347 | } | |
348 | ||
6cb8e1f7 | 349 | int radeon_bo_list_validate(struct list_head *head) |
771fe6b9 | 350 | { |
4c788679 JG |
351 | struct radeon_bo_list *lobj; |
352 | struct radeon_bo *bo; | |
e376573f | 353 | u32 domain; |
771fe6b9 JG |
354 | int r; |
355 | ||
147666fb | 356 | r = ttm_eu_reserve_buffers(head); |
771fe6b9 | 357 | if (unlikely(r != 0)) { |
771fe6b9 JG |
358 | return r; |
359 | } | |
147666fb | 360 | list_for_each_entry(lobj, head, tv.head) { |
4c788679 JG |
361 | bo = lobj->bo; |
362 | if (!bo->pin_count) { | |
e376573f MD |
363 | domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; |
364 | ||
365 | retry: | |
366 | radeon_ttm_placement_from_domain(bo, domain); | |
1fb107fc | 367 | r = ttm_bo_validate(&bo->tbo, &bo->placement, |
9d87fa21 | 368 | true, false, false); |
e376573f MD |
369 | if (unlikely(r)) { |
370 | if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) { | |
371 | domain |= RADEON_GEM_DOMAIN_GTT; | |
372 | goto retry; | |
373 | } | |
771fe6b9 | 374 | return r; |
e376573f | 375 | } |
771fe6b9 | 376 | } |
4c788679 JG |
377 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
378 | lobj->tiling_flags = bo->tiling_flags; | |
771fe6b9 JG |
379 | } |
380 | return 0; | |
381 | } | |
382 | ||
4c788679 | 383 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
771fe6b9 JG |
384 | struct vm_area_struct *vma) |
385 | { | |
4c788679 | 386 | return ttm_fbdev_mmap(vma, &bo->tbo); |
771fe6b9 JG |
387 | } |
388 | ||
550e2d92 | 389 | int radeon_bo_get_surface_reg(struct radeon_bo *bo) |
771fe6b9 | 390 | { |
4c788679 | 391 | struct radeon_device *rdev = bo->rdev; |
e024e110 | 392 | struct radeon_surface_reg *reg; |
4c788679 | 393 | struct radeon_bo *old_object; |
e024e110 DA |
394 | int steal; |
395 | int i; | |
396 | ||
4c788679 JG |
397 | BUG_ON(!atomic_read(&bo->tbo.reserved)); |
398 | ||
399 | if (!bo->tiling_flags) | |
e024e110 DA |
400 | return 0; |
401 | ||
4c788679 JG |
402 | if (bo->surface_reg >= 0) { |
403 | reg = &rdev->surface_regs[bo->surface_reg]; | |
404 | i = bo->surface_reg; | |
e024e110 DA |
405 | goto out; |
406 | } | |
407 | ||
408 | steal = -1; | |
409 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { | |
410 | ||
411 | reg = &rdev->surface_regs[i]; | |
4c788679 | 412 | if (!reg->bo) |
e024e110 DA |
413 | break; |
414 | ||
4c788679 | 415 | old_object = reg->bo; |
e024e110 DA |
416 | if (old_object->pin_count == 0) |
417 | steal = i; | |
418 | } | |
419 | ||
420 | /* if we are all out */ | |
421 | if (i == RADEON_GEM_MAX_SURFACES) { | |
422 | if (steal == -1) | |
423 | return -ENOMEM; | |
424 | /* find someone with a surface reg and nuke their BO */ | |
425 | reg = &rdev->surface_regs[steal]; | |
4c788679 | 426 | old_object = reg->bo; |
e024e110 DA |
427 | /* blow away the mapping */ |
428 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); | |
4c788679 | 429 | ttm_bo_unmap_virtual(&old_object->tbo); |
e024e110 DA |
430 | old_object->surface_reg = -1; |
431 | i = steal; | |
432 | } | |
433 | ||
4c788679 JG |
434 | bo->surface_reg = i; |
435 | reg->bo = bo; | |
e024e110 DA |
436 | |
437 | out: | |
4c788679 | 438 | radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, |
d961db75 | 439 | bo->tbo.mem.start << PAGE_SHIFT, |
4c788679 | 440 | bo->tbo.num_pages << PAGE_SHIFT); |
e024e110 DA |
441 | return 0; |
442 | } | |
443 | ||
4c788679 | 444 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) |
e024e110 | 445 | { |
4c788679 | 446 | struct radeon_device *rdev = bo->rdev; |
e024e110 DA |
447 | struct radeon_surface_reg *reg; |
448 | ||
4c788679 | 449 | if (bo->surface_reg == -1) |
e024e110 DA |
450 | return; |
451 | ||
4c788679 JG |
452 | reg = &rdev->surface_regs[bo->surface_reg]; |
453 | radeon_clear_surface_reg(rdev, bo->surface_reg); | |
e024e110 | 454 | |
4c788679 JG |
455 | reg->bo = NULL; |
456 | bo->surface_reg = -1; | |
e024e110 DA |
457 | } |
458 | ||
4c788679 JG |
459 | int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
460 | uint32_t tiling_flags, uint32_t pitch) | |
e024e110 | 461 | { |
285484e2 | 462 | struct radeon_device *rdev = bo->rdev; |
4c788679 JG |
463 | int r; |
464 | ||
285484e2 JG |
465 | if (rdev->family >= CHIP_CEDAR) { |
466 | unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; | |
467 | ||
468 | bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; | |
469 | bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; | |
470 | mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; | |
471 | tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; | |
472 | stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; | |
473 | switch (bankw) { | |
474 | case 0: | |
475 | case 1: | |
476 | case 2: | |
477 | case 4: | |
478 | case 8: | |
479 | break; | |
480 | default: | |
481 | return -EINVAL; | |
482 | } | |
483 | switch (bankh) { | |
484 | case 0: | |
485 | case 1: | |
486 | case 2: | |
487 | case 4: | |
488 | case 8: | |
489 | break; | |
490 | default: | |
491 | return -EINVAL; | |
492 | } | |
493 | switch (mtaspect) { | |
494 | case 0: | |
495 | case 1: | |
496 | case 2: | |
497 | case 4: | |
498 | case 8: | |
499 | break; | |
500 | default: | |
501 | return -EINVAL; | |
502 | } | |
503 | if (tilesplit > 6) { | |
504 | return -EINVAL; | |
505 | } | |
506 | if (stilesplit > 6) { | |
507 | return -EINVAL; | |
508 | } | |
509 | } | |
4c788679 JG |
510 | r = radeon_bo_reserve(bo, false); |
511 | if (unlikely(r != 0)) | |
512 | return r; | |
513 | bo->tiling_flags = tiling_flags; | |
514 | bo->pitch = pitch; | |
515 | radeon_bo_unreserve(bo); | |
516 | return 0; | |
e024e110 DA |
517 | } |
518 | ||
4c788679 JG |
519 | void radeon_bo_get_tiling_flags(struct radeon_bo *bo, |
520 | uint32_t *tiling_flags, | |
521 | uint32_t *pitch) | |
e024e110 | 522 | { |
4c788679 | 523 | BUG_ON(!atomic_read(&bo->tbo.reserved)); |
e024e110 | 524 | if (tiling_flags) |
4c788679 | 525 | *tiling_flags = bo->tiling_flags; |
e024e110 | 526 | if (pitch) |
4c788679 | 527 | *pitch = bo->pitch; |
e024e110 DA |
528 | } |
529 | ||
4c788679 JG |
530 | int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, |
531 | bool force_drop) | |
e024e110 | 532 | { |
4c788679 JG |
533 | BUG_ON(!atomic_read(&bo->tbo.reserved)); |
534 | ||
535 | if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) | |
e024e110 DA |
536 | return 0; |
537 | ||
538 | if (force_drop) { | |
4c788679 | 539 | radeon_bo_clear_surface_reg(bo); |
e024e110 DA |
540 | return 0; |
541 | } | |
542 | ||
4c788679 | 543 | if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { |
e024e110 DA |
544 | if (!has_moved) |
545 | return 0; | |
546 | ||
4c788679 JG |
547 | if (bo->surface_reg >= 0) |
548 | radeon_bo_clear_surface_reg(bo); | |
e024e110 DA |
549 | return 0; |
550 | } | |
551 | ||
4c788679 | 552 | if ((bo->surface_reg >= 0) && !has_moved) |
e024e110 DA |
553 | return 0; |
554 | ||
4c788679 | 555 | return radeon_bo_get_surface_reg(bo); |
e024e110 DA |
556 | } |
557 | ||
558 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | |
d03d8589 | 559 | struct ttm_mem_reg *mem) |
e024e110 | 560 | { |
d03d8589 JG |
561 | struct radeon_bo *rbo; |
562 | if (!radeon_ttm_bo_is_radeon_bo(bo)) | |
563 | return; | |
564 | rbo = container_of(bo, struct radeon_bo, tbo); | |
4c788679 | 565 | radeon_bo_check_tiling(rbo, 0, 1); |
721604a1 | 566 | radeon_vm_bo_invalidate(rbo->rdev, rbo); |
e024e110 DA |
567 | } |
568 | ||
0a2d50e3 | 569 | int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
e024e110 | 570 | { |
0a2d50e3 | 571 | struct radeon_device *rdev; |
d03d8589 | 572 | struct radeon_bo *rbo; |
0a2d50e3 JG |
573 | unsigned long offset, size; |
574 | int r; | |
575 | ||
d03d8589 | 576 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
0a2d50e3 | 577 | return 0; |
d03d8589 | 578 | rbo = container_of(bo, struct radeon_bo, tbo); |
4c788679 | 579 | radeon_bo_check_tiling(rbo, 0, 0); |
0a2d50e3 JG |
580 | rdev = rbo->rdev; |
581 | if (bo->mem.mem_type == TTM_PL_VRAM) { | |
582 | size = bo->mem.num_pages << PAGE_SHIFT; | |
d961db75 | 583 | offset = bo->mem.start << PAGE_SHIFT; |
0a2d50e3 JG |
584 | if ((offset + size) > rdev->mc.visible_vram_size) { |
585 | /* hurrah the memory is not visible ! */ | |
586 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); | |
587 | rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; | |
588 | r = ttm_bo_validate(bo, &rbo->placement, false, true, false); | |
589 | if (unlikely(r != 0)) | |
590 | return r; | |
d961db75 | 591 | offset = bo->mem.start << PAGE_SHIFT; |
0a2d50e3 JG |
592 | /* this should not happen */ |
593 | if ((offset + size) > rdev->mc.visible_vram_size) | |
594 | return -EINVAL; | |
595 | } | |
596 | } | |
597 | return 0; | |
e024e110 | 598 | } |
ce580fab | 599 | |
83f30d0e | 600 | int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) |
ce580fab AK |
601 | { |
602 | int r; | |
603 | ||
604 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); | |
605 | if (unlikely(r != 0)) | |
606 | return r; | |
607 | spin_lock(&bo->tbo.bdev->fence_lock); | |
608 | if (mem_type) | |
609 | *mem_type = bo->tbo.mem.mem_type; | |
610 | if (bo->tbo.sync_obj) | |
1717c0e2 | 611 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); |
ce580fab AK |
612 | spin_unlock(&bo->tbo.bdev->fence_lock); |
613 | ttm_bo_unreserve(&bo->tbo); | |
614 | return r; | |
615 | } | |
616 | ||
617 | ||
618 | /** | |
619 | * radeon_bo_reserve - reserve bo | |
620 | * @bo: bo structure | |
d63dfed5 | 621 | * @no_intr: don't return -ERESTARTSYS on pending signal |
ce580fab AK |
622 | * |
623 | * Returns: | |
ce580fab AK |
624 | * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by |
625 | * a signal. Release all buffer reservations and return to user-space. | |
626 | */ | |
d63dfed5 | 627 | int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr) |
ce580fab AK |
628 | { |
629 | int r; | |
630 | ||
d63dfed5 | 631 | r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0); |
ce580fab AK |
632 | if (unlikely(r != 0)) { |
633 | if (r != -ERESTARTSYS) | |
634 | dev_err(bo->rdev->dev, "%p reserve failed\n", bo); | |
635 | return r; | |
636 | } | |
637 | return 0; | |
638 | } |