Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
32 | #include <linux/list.h> | |
5a0e3ad6 | 33 | #include <linux/slab.h> |
771fe6b9 JG |
34 | #include <drm/drmP.h> |
35 | #include "radeon_drm.h" | |
36 | #include "radeon.h" | |
99ee7fac | 37 | #include "radeon_trace.h" |
771fe6b9 | 38 | |
771fe6b9 JG |
39 | |
40 | int radeon_ttm_init(struct radeon_device *rdev); | |
41 | void radeon_ttm_fini(struct radeon_device *rdev); | |
4c788679 | 42 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); |
771fe6b9 JG |
43 | |
44 | /* | |
45 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all | |
46 | * function are calling it. | |
47 | */ | |
48 | ||
721604a1 JG |
49 | void radeon_bo_clear_va(struct radeon_bo *bo) |
50 | { | |
51 | struct radeon_bo_va *bo_va, *tmp; | |
52 | ||
53 | list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) { | |
54 | /* remove from all vm address space */ | |
55 | mutex_lock(&bo_va->vm->mutex); | |
56 | list_del(&bo_va->vm_list); | |
57 | mutex_unlock(&bo_va->vm->mutex); | |
58 | list_del(&bo_va->bo_list); | |
59 | kfree(bo_va); | |
60 | } | |
61 | } | |
62 | ||
4c788679 | 63 | static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
771fe6b9 | 64 | { |
4c788679 | 65 | struct radeon_bo *bo; |
771fe6b9 | 66 | |
4c788679 JG |
67 | bo = container_of(tbo, struct radeon_bo, tbo); |
68 | mutex_lock(&bo->rdev->gem.mutex); | |
69 | list_del_init(&bo->list); | |
70 | mutex_unlock(&bo->rdev->gem.mutex); | |
71 | radeon_bo_clear_surface_reg(bo); | |
721604a1 | 72 | radeon_bo_clear_va(bo); |
441921d5 | 73 | drm_gem_object_release(&bo->gem_base); |
4c788679 | 74 | kfree(bo); |
771fe6b9 JG |
75 | } |
76 | ||
d03d8589 JG |
77 | bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) |
78 | { | |
79 | if (bo->destroy == &radeon_ttm_bo_destroy) | |
80 | return true; | |
81 | return false; | |
82 | } | |
83 | ||
312ea8da JG |
84 | void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) |
85 | { | |
86 | u32 c = 0; | |
87 | ||
88 | rbo->placement.fpfn = 0; | |
93225b0d | 89 | rbo->placement.lpfn = 0; |
312ea8da JG |
90 | rbo->placement.placement = rbo->placements; |
91 | rbo->placement.busy_placement = rbo->placements; | |
92 | if (domain & RADEON_GEM_DOMAIN_VRAM) | |
93 | rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | | |
94 | TTM_PL_FLAG_VRAM; | |
95 | if (domain & RADEON_GEM_DOMAIN_GTT) | |
96 | rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | |
97 | if (domain & RADEON_GEM_DOMAIN_CPU) | |
98 | rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | |
9fb03e63 JG |
99 | if (!c) |
100 | rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | |
312ea8da JG |
101 | rbo->placement.num_placement = c; |
102 | rbo->placement.num_busy_placement = c; | |
103 | } | |
104 | ||
441921d5 | 105 | int radeon_bo_create(struct radeon_device *rdev, |
268b2510 | 106 | unsigned long size, int byte_align, bool kernel, u32 domain, |
40f5cf99 | 107 | struct sg_table *sg, struct radeon_bo **bo_ptr) |
771fe6b9 | 108 | { |
4c788679 | 109 | struct radeon_bo *bo; |
771fe6b9 | 110 | enum ttm_bo_type type; |
93225b0d JG |
111 | unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
112 | unsigned long max_size = 0; | |
57de4ba9 | 113 | size_t acc_size; |
771fe6b9 JG |
114 | int r; |
115 | ||
441921d5 DV |
116 | size = ALIGN(size, PAGE_SIZE); |
117 | ||
771fe6b9 JG |
118 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { |
119 | rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; | |
120 | } | |
121 | if (kernel) { | |
122 | type = ttm_bo_type_kernel; | |
40f5cf99 AD |
123 | } else if (sg) { |
124 | type = ttm_bo_type_sg; | |
771fe6b9 JG |
125 | } else { |
126 | type = ttm_bo_type_device; | |
127 | } | |
4c788679 | 128 | *bo_ptr = NULL; |
2b66b50b | 129 | |
93225b0d JG |
130 | /* maximun bo size is the minimun btw visible vram and gtt size */ |
131 | max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size); | |
132 | if ((page_align << PAGE_SHIFT) >= max_size) { | |
133 | printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n", | |
134 | __func__, __LINE__, page_align >> (20 - PAGE_SHIFT), max_size >> 20); | |
135 | return -ENOMEM; | |
136 | } | |
137 | ||
57de4ba9 JG |
138 | acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, |
139 | sizeof(struct radeon_bo)); | |
140 | ||
2b66b50b | 141 | retry: |
4c788679 JG |
142 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
143 | if (bo == NULL) | |
771fe6b9 | 144 | return -ENOMEM; |
441921d5 DV |
145 | r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); |
146 | if (unlikely(r)) { | |
147 | kfree(bo); | |
148 | return r; | |
149 | } | |
4c788679 | 150 | bo->rdev = rdev; |
7e4d15d9 | 151 | bo->gem_base.driver_private = NULL; |
4c788679 JG |
152 | bo->surface_reg = -1; |
153 | INIT_LIST_HEAD(&bo->list); | |
721604a1 | 154 | INIT_LIST_HEAD(&bo->va); |
1fb107fc | 155 | radeon_ttm_placement_from_domain(bo, domain); |
5cc6fbab | 156 | /* Kernel allocation are uninterruptible */ |
5876dd24 | 157 | mutex_lock(&rdev->vram_mutex); |
1fb107fc | 158 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, |
57de4ba9 | 159 | &bo->placement, page_align, 0, !kernel, NULL, |
40f5cf99 | 160 | acc_size, sg, &radeon_ttm_bo_destroy); |
5876dd24 | 161 | mutex_unlock(&rdev->vram_mutex); |
771fe6b9 | 162 | if (unlikely(r != 0)) { |
e376573f MD |
163 | if (r != -ERESTARTSYS) { |
164 | if (domain == RADEON_GEM_DOMAIN_VRAM) { | |
165 | domain |= RADEON_GEM_DOMAIN_GTT; | |
166 | goto retry; | |
167 | } | |
5cc6fbab | 168 | dev_err(rdev->dev, |
1fb107fc JG |
169 | "object_init failed for (%lu, 0x%08X)\n", |
170 | size, domain); | |
e376573f | 171 | } |
771fe6b9 JG |
172 | return r; |
173 | } | |
4c788679 | 174 | *bo_ptr = bo; |
441921d5 | 175 | |
99ee7fac | 176 | trace_radeon_bo_create(bo); |
441921d5 | 177 | |
771fe6b9 JG |
178 | return 0; |
179 | } | |
180 | ||
4c788679 | 181 | int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) |
771fe6b9 | 182 | { |
4c788679 | 183 | bool is_iomem; |
771fe6b9 JG |
184 | int r; |
185 | ||
4c788679 | 186 | if (bo->kptr) { |
771fe6b9 | 187 | if (ptr) { |
4c788679 | 188 | *ptr = bo->kptr; |
771fe6b9 | 189 | } |
771fe6b9 JG |
190 | return 0; |
191 | } | |
4c788679 | 192 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); |
771fe6b9 JG |
193 | if (r) { |
194 | return r; | |
195 | } | |
4c788679 | 196 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); |
771fe6b9 | 197 | if (ptr) { |
4c788679 | 198 | *ptr = bo->kptr; |
771fe6b9 | 199 | } |
4c788679 | 200 | radeon_bo_check_tiling(bo, 0, 0); |
771fe6b9 JG |
201 | return 0; |
202 | } | |
203 | ||
4c788679 | 204 | void radeon_bo_kunmap(struct radeon_bo *bo) |
771fe6b9 | 205 | { |
4c788679 | 206 | if (bo->kptr == NULL) |
771fe6b9 | 207 | return; |
4c788679 JG |
208 | bo->kptr = NULL; |
209 | radeon_bo_check_tiling(bo, 0, 0); | |
210 | ttm_bo_kunmap(&bo->kmap); | |
771fe6b9 JG |
211 | } |
212 | ||
4c788679 | 213 | void radeon_bo_unref(struct radeon_bo **bo) |
771fe6b9 | 214 | { |
4c788679 | 215 | struct ttm_buffer_object *tbo; |
f4b7fb94 | 216 | struct radeon_device *rdev; |
771fe6b9 | 217 | |
4c788679 | 218 | if ((*bo) == NULL) |
771fe6b9 | 219 | return; |
f4b7fb94 | 220 | rdev = (*bo)->rdev; |
4c788679 | 221 | tbo = &((*bo)->tbo); |
f4b7fb94 | 222 | mutex_lock(&rdev->vram_mutex); |
4c788679 | 223 | ttm_bo_unref(&tbo); |
f4b7fb94 | 224 | mutex_unlock(&rdev->vram_mutex); |
4c788679 JG |
225 | if (tbo == NULL) |
226 | *bo = NULL; | |
771fe6b9 JG |
227 | } |
228 | ||
c4353016 MD |
229 | int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, |
230 | u64 *gpu_addr) | |
771fe6b9 | 231 | { |
312ea8da | 232 | int r, i; |
771fe6b9 | 233 | |
4c788679 JG |
234 | if (bo->pin_count) { |
235 | bo->pin_count++; | |
236 | if (gpu_addr) | |
237 | *gpu_addr = radeon_bo_gpu_offset(bo); | |
d936622c MD |
238 | |
239 | if (max_offset != 0) { | |
240 | u64 domain_start; | |
241 | ||
242 | if (domain == RADEON_GEM_DOMAIN_VRAM) | |
243 | domain_start = bo->rdev->mc.vram_start; | |
244 | else | |
245 | domain_start = bo->rdev->mc.gtt_start; | |
e199fd42 MD |
246 | WARN_ON_ONCE(max_offset < |
247 | (radeon_bo_gpu_offset(bo) - domain_start)); | |
d936622c MD |
248 | } |
249 | ||
771fe6b9 JG |
250 | return 0; |
251 | } | |
312ea8da | 252 | radeon_ttm_placement_from_domain(bo, domain); |
3ca82da3 MD |
253 | if (domain == RADEON_GEM_DOMAIN_VRAM) { |
254 | /* force to pin into visible video ram */ | |
255 | bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; | |
256 | } | |
c4353016 MD |
257 | if (max_offset) { |
258 | u64 lpfn = max_offset >> PAGE_SHIFT; | |
259 | ||
260 | if (!bo->placement.lpfn) | |
261 | bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; | |
262 | ||
263 | if (lpfn < bo->placement.lpfn) | |
264 | bo->placement.lpfn = lpfn; | |
265 | } | |
312ea8da JG |
266 | for (i = 0; i < bo->placement.num_placement; i++) |
267 | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; | |
9d87fa21 | 268 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); |
4c788679 JG |
269 | if (likely(r == 0)) { |
270 | bo->pin_count = 1; | |
271 | if (gpu_addr != NULL) | |
272 | *gpu_addr = radeon_bo_gpu_offset(bo); | |
771fe6b9 | 273 | } |
5cc6fbab | 274 | if (unlikely(r != 0)) |
4c788679 | 275 | dev_err(bo->rdev->dev, "%p pin failed\n", bo); |
771fe6b9 JG |
276 | return r; |
277 | } | |
c4353016 MD |
278 | |
279 | int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) | |
280 | { | |
281 | return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); | |
282 | } | |
771fe6b9 | 283 | |
4c788679 | 284 | int radeon_bo_unpin(struct radeon_bo *bo) |
771fe6b9 | 285 | { |
312ea8da | 286 | int r, i; |
771fe6b9 | 287 | |
4c788679 JG |
288 | if (!bo->pin_count) { |
289 | dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); | |
290 | return 0; | |
771fe6b9 | 291 | } |
4c788679 JG |
292 | bo->pin_count--; |
293 | if (bo->pin_count) | |
294 | return 0; | |
312ea8da JG |
295 | for (i = 0; i < bo->placement.num_placement; i++) |
296 | bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT; | |
9d87fa21 | 297 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); |
5cc6fbab | 298 | if (unlikely(r != 0)) |
4c788679 | 299 | dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); |
5cc6fbab | 300 | return r; |
cefb87ef DA |
301 | } |
302 | ||
4c788679 | 303 | int radeon_bo_evict_vram(struct radeon_device *rdev) |
771fe6b9 | 304 | { |
d796d844 DA |
305 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ |
306 | if (0 && (rdev->flags & RADEON_IS_IGP)) { | |
06b6476d AD |
307 | if (rdev->mc.igp_sideport_enabled == false) |
308 | /* Useless to evict on IGP chips */ | |
309 | return 0; | |
771fe6b9 JG |
310 | } |
311 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); | |
312 | } | |
313 | ||
4c788679 | 314 | void radeon_bo_force_delete(struct radeon_device *rdev) |
771fe6b9 | 315 | { |
4c788679 | 316 | struct radeon_bo *bo, *n; |
771fe6b9 JG |
317 | |
318 | if (list_empty(&rdev->gem.objects)) { | |
319 | return; | |
320 | } | |
4c788679 JG |
321 | dev_err(rdev->dev, "Userspace still has active objects !\n"); |
322 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { | |
771fe6b9 | 323 | mutex_lock(&rdev->ddev->struct_mutex); |
4c788679 | 324 | dev_err(rdev->dev, "%p %p %lu %lu force free\n", |
31c3603d DV |
325 | &bo->gem_base, bo, (unsigned long)bo->gem_base.size, |
326 | *((unsigned long *)&bo->gem_base.refcount)); | |
4c788679 JG |
327 | mutex_lock(&bo->rdev->gem.mutex); |
328 | list_del_init(&bo->list); | |
329 | mutex_unlock(&bo->rdev->gem.mutex); | |
91132d6b | 330 | /* this should unref the ttm bo */ |
31c3603d | 331 | drm_gem_object_unreference(&bo->gem_base); |
771fe6b9 JG |
332 | mutex_unlock(&rdev->ddev->struct_mutex); |
333 | } | |
334 | } | |
335 | ||
4c788679 | 336 | int radeon_bo_init(struct radeon_device *rdev) |
771fe6b9 | 337 | { |
a4d68279 JG |
338 | /* Add an MTRR for the VRAM */ |
339 | rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, | |
340 | MTRR_TYPE_WRCOMB, 1); | |
341 | DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", | |
342 | rdev->mc.mc_vram_size >> 20, | |
343 | (unsigned long long)rdev->mc.aper_size >> 20); | |
344 | DRM_INFO("RAM width %dbits %cDR\n", | |
345 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); | |
771fe6b9 JG |
346 | return radeon_ttm_init(rdev); |
347 | } | |
348 | ||
4c788679 | 349 | void radeon_bo_fini(struct radeon_device *rdev) |
771fe6b9 JG |
350 | { |
351 | radeon_ttm_fini(rdev); | |
352 | } | |
353 | ||
4c788679 JG |
354 | void radeon_bo_list_add_object(struct radeon_bo_list *lobj, |
355 | struct list_head *head) | |
771fe6b9 JG |
356 | { |
357 | if (lobj->wdomain) { | |
147666fb | 358 | list_add(&lobj->tv.head, head); |
771fe6b9 | 359 | } else { |
147666fb | 360 | list_add_tail(&lobj->tv.head, head); |
771fe6b9 JG |
361 | } |
362 | } | |
363 | ||
6cb8e1f7 | 364 | int radeon_bo_list_validate(struct list_head *head) |
771fe6b9 | 365 | { |
4c788679 JG |
366 | struct radeon_bo_list *lobj; |
367 | struct radeon_bo *bo; | |
e376573f | 368 | u32 domain; |
771fe6b9 JG |
369 | int r; |
370 | ||
147666fb | 371 | r = ttm_eu_reserve_buffers(head); |
771fe6b9 | 372 | if (unlikely(r != 0)) { |
771fe6b9 JG |
373 | return r; |
374 | } | |
147666fb | 375 | list_for_each_entry(lobj, head, tv.head) { |
4c788679 JG |
376 | bo = lobj->bo; |
377 | if (!bo->pin_count) { | |
e376573f MD |
378 | domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain; |
379 | ||
380 | retry: | |
381 | radeon_ttm_placement_from_domain(bo, domain); | |
1fb107fc | 382 | r = ttm_bo_validate(&bo->tbo, &bo->placement, |
9d87fa21 | 383 | true, false, false); |
e376573f MD |
384 | if (unlikely(r)) { |
385 | if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) { | |
386 | domain |= RADEON_GEM_DOMAIN_GTT; | |
387 | goto retry; | |
388 | } | |
771fe6b9 | 389 | return r; |
e376573f | 390 | } |
771fe6b9 | 391 | } |
4c788679 JG |
392 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
393 | lobj->tiling_flags = bo->tiling_flags; | |
771fe6b9 JG |
394 | } |
395 | return 0; | |
396 | } | |
397 | ||
4c788679 | 398 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
771fe6b9 JG |
399 | struct vm_area_struct *vma) |
400 | { | |
4c788679 | 401 | return ttm_fbdev_mmap(vma, &bo->tbo); |
771fe6b9 JG |
402 | } |
403 | ||
550e2d92 | 404 | int radeon_bo_get_surface_reg(struct radeon_bo *bo) |
771fe6b9 | 405 | { |
4c788679 | 406 | struct radeon_device *rdev = bo->rdev; |
e024e110 | 407 | struct radeon_surface_reg *reg; |
4c788679 | 408 | struct radeon_bo *old_object; |
e024e110 DA |
409 | int steal; |
410 | int i; | |
411 | ||
4c788679 JG |
412 | BUG_ON(!atomic_read(&bo->tbo.reserved)); |
413 | ||
414 | if (!bo->tiling_flags) | |
e024e110 DA |
415 | return 0; |
416 | ||
4c788679 JG |
417 | if (bo->surface_reg >= 0) { |
418 | reg = &rdev->surface_regs[bo->surface_reg]; | |
419 | i = bo->surface_reg; | |
e024e110 DA |
420 | goto out; |
421 | } | |
422 | ||
423 | steal = -1; | |
424 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { | |
425 | ||
426 | reg = &rdev->surface_regs[i]; | |
4c788679 | 427 | if (!reg->bo) |
e024e110 DA |
428 | break; |
429 | ||
4c788679 | 430 | old_object = reg->bo; |
e024e110 DA |
431 | if (old_object->pin_count == 0) |
432 | steal = i; | |
433 | } | |
434 | ||
435 | /* if we are all out */ | |
436 | if (i == RADEON_GEM_MAX_SURFACES) { | |
437 | if (steal == -1) | |
438 | return -ENOMEM; | |
439 | /* find someone with a surface reg and nuke their BO */ | |
440 | reg = &rdev->surface_regs[steal]; | |
4c788679 | 441 | old_object = reg->bo; |
e024e110 DA |
442 | /* blow away the mapping */ |
443 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); | |
4c788679 | 444 | ttm_bo_unmap_virtual(&old_object->tbo); |
e024e110 DA |
445 | old_object->surface_reg = -1; |
446 | i = steal; | |
447 | } | |
448 | ||
4c788679 JG |
449 | bo->surface_reg = i; |
450 | reg->bo = bo; | |
e024e110 DA |
451 | |
452 | out: | |
4c788679 | 453 | radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, |
d961db75 | 454 | bo->tbo.mem.start << PAGE_SHIFT, |
4c788679 | 455 | bo->tbo.num_pages << PAGE_SHIFT); |
e024e110 DA |
456 | return 0; |
457 | } | |
458 | ||
4c788679 | 459 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) |
e024e110 | 460 | { |
4c788679 | 461 | struct radeon_device *rdev = bo->rdev; |
e024e110 DA |
462 | struct radeon_surface_reg *reg; |
463 | ||
4c788679 | 464 | if (bo->surface_reg == -1) |
e024e110 DA |
465 | return; |
466 | ||
4c788679 JG |
467 | reg = &rdev->surface_regs[bo->surface_reg]; |
468 | radeon_clear_surface_reg(rdev, bo->surface_reg); | |
e024e110 | 469 | |
4c788679 JG |
470 | reg->bo = NULL; |
471 | bo->surface_reg = -1; | |
e024e110 DA |
472 | } |
473 | ||
4c788679 JG |
474 | int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
475 | uint32_t tiling_flags, uint32_t pitch) | |
e024e110 | 476 | { |
285484e2 | 477 | struct radeon_device *rdev = bo->rdev; |
4c788679 JG |
478 | int r; |
479 | ||
285484e2 JG |
480 | if (rdev->family >= CHIP_CEDAR) { |
481 | unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; | |
482 | ||
483 | bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; | |
484 | bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; | |
485 | mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; | |
486 | tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; | |
487 | stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; | |
488 | switch (bankw) { | |
489 | case 0: | |
490 | case 1: | |
491 | case 2: | |
492 | case 4: | |
493 | case 8: | |
494 | break; | |
495 | default: | |
496 | return -EINVAL; | |
497 | } | |
498 | switch (bankh) { | |
499 | case 0: | |
500 | case 1: | |
501 | case 2: | |
502 | case 4: | |
503 | case 8: | |
504 | break; | |
505 | default: | |
506 | return -EINVAL; | |
507 | } | |
508 | switch (mtaspect) { | |
509 | case 0: | |
510 | case 1: | |
511 | case 2: | |
512 | case 4: | |
513 | case 8: | |
514 | break; | |
515 | default: | |
516 | return -EINVAL; | |
517 | } | |
518 | if (tilesplit > 6) { | |
519 | return -EINVAL; | |
520 | } | |
521 | if (stilesplit > 6) { | |
522 | return -EINVAL; | |
523 | } | |
524 | } | |
4c788679 JG |
525 | r = radeon_bo_reserve(bo, false); |
526 | if (unlikely(r != 0)) | |
527 | return r; | |
528 | bo->tiling_flags = tiling_flags; | |
529 | bo->pitch = pitch; | |
530 | radeon_bo_unreserve(bo); | |
531 | return 0; | |
e024e110 DA |
532 | } |
533 | ||
4c788679 JG |
534 | void radeon_bo_get_tiling_flags(struct radeon_bo *bo, |
535 | uint32_t *tiling_flags, | |
536 | uint32_t *pitch) | |
e024e110 | 537 | { |
4c788679 | 538 | BUG_ON(!atomic_read(&bo->tbo.reserved)); |
e024e110 | 539 | if (tiling_flags) |
4c788679 | 540 | *tiling_flags = bo->tiling_flags; |
e024e110 | 541 | if (pitch) |
4c788679 | 542 | *pitch = bo->pitch; |
e024e110 DA |
543 | } |
544 | ||
4c788679 JG |
545 | int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, |
546 | bool force_drop) | |
e024e110 | 547 | { |
4c788679 JG |
548 | BUG_ON(!atomic_read(&bo->tbo.reserved)); |
549 | ||
550 | if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) | |
e024e110 DA |
551 | return 0; |
552 | ||
553 | if (force_drop) { | |
4c788679 | 554 | radeon_bo_clear_surface_reg(bo); |
e024e110 DA |
555 | return 0; |
556 | } | |
557 | ||
4c788679 | 558 | if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { |
e024e110 DA |
559 | if (!has_moved) |
560 | return 0; | |
561 | ||
4c788679 JG |
562 | if (bo->surface_reg >= 0) |
563 | radeon_bo_clear_surface_reg(bo); | |
e024e110 DA |
564 | return 0; |
565 | } | |
566 | ||
4c788679 | 567 | if ((bo->surface_reg >= 0) && !has_moved) |
e024e110 DA |
568 | return 0; |
569 | ||
4c788679 | 570 | return radeon_bo_get_surface_reg(bo); |
e024e110 DA |
571 | } |
572 | ||
573 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | |
d03d8589 | 574 | struct ttm_mem_reg *mem) |
e024e110 | 575 | { |
d03d8589 JG |
576 | struct radeon_bo *rbo; |
577 | if (!radeon_ttm_bo_is_radeon_bo(bo)) | |
578 | return; | |
579 | rbo = container_of(bo, struct radeon_bo, tbo); | |
4c788679 | 580 | radeon_bo_check_tiling(rbo, 0, 1); |
721604a1 | 581 | radeon_vm_bo_invalidate(rbo->rdev, rbo); |
e024e110 DA |
582 | } |
583 | ||
0a2d50e3 | 584 | int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
e024e110 | 585 | { |
0a2d50e3 | 586 | struct radeon_device *rdev; |
d03d8589 | 587 | struct radeon_bo *rbo; |
0a2d50e3 JG |
588 | unsigned long offset, size; |
589 | int r; | |
590 | ||
d03d8589 | 591 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
0a2d50e3 | 592 | return 0; |
d03d8589 | 593 | rbo = container_of(bo, struct radeon_bo, tbo); |
4c788679 | 594 | radeon_bo_check_tiling(rbo, 0, 0); |
0a2d50e3 JG |
595 | rdev = rbo->rdev; |
596 | if (bo->mem.mem_type == TTM_PL_VRAM) { | |
597 | size = bo->mem.num_pages << PAGE_SHIFT; | |
d961db75 | 598 | offset = bo->mem.start << PAGE_SHIFT; |
0a2d50e3 JG |
599 | if ((offset + size) > rdev->mc.visible_vram_size) { |
600 | /* hurrah the memory is not visible ! */ | |
601 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); | |
602 | rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; | |
603 | r = ttm_bo_validate(bo, &rbo->placement, false, true, false); | |
604 | if (unlikely(r != 0)) | |
605 | return r; | |
d961db75 | 606 | offset = bo->mem.start << PAGE_SHIFT; |
0a2d50e3 JG |
607 | /* this should not happen */ |
608 | if ((offset + size) > rdev->mc.visible_vram_size) | |
609 | return -EINVAL; | |
610 | } | |
611 | } | |
612 | return 0; | |
e024e110 | 613 | } |
ce580fab | 614 | |
83f30d0e | 615 | int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) |
ce580fab AK |
616 | { |
617 | int r; | |
618 | ||
619 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); | |
620 | if (unlikely(r != 0)) | |
621 | return r; | |
622 | spin_lock(&bo->tbo.bdev->fence_lock); | |
623 | if (mem_type) | |
624 | *mem_type = bo->tbo.mem.mem_type; | |
625 | if (bo->tbo.sync_obj) | |
1717c0e2 | 626 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); |
ce580fab AK |
627 | spin_unlock(&bo->tbo.bdev->fence_lock); |
628 | ttm_bo_unreserve(&bo->tbo); | |
629 | return r; | |
630 | } | |
631 | ||
632 | ||
633 | /** | |
634 | * radeon_bo_reserve - reserve bo | |
635 | * @bo: bo structure | |
636 | * @no_wait: don't sleep while trying to reserve (return -EBUSY) | |
637 | * | |
638 | * Returns: | |
639 | * -EBUSY: buffer is busy and @no_wait is true | |
640 | * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by | |
641 | * a signal. Release all buffer reservations and return to user-space. | |
642 | */ | |
643 | int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait) | |
644 | { | |
645 | int r; | |
646 | ||
647 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0); | |
648 | if (unlikely(r != 0)) { | |
649 | if (r != -ERESTARTSYS) | |
650 | dev_err(bo->rdev->dev, "%p reserve failed\n", bo); | |
651 | return r; | |
652 | } | |
653 | return 0; | |
654 | } | |
721604a1 JG |
655 | |
656 | /* object have to be reserved */ | |
657 | struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo, struct radeon_vm *vm) | |
658 | { | |
659 | struct radeon_bo_va *bo_va; | |
660 | ||
661 | list_for_each_entry(bo_va, &rbo->va, bo_list) { | |
662 | if (bo_va->vm == vm) { | |
663 | return bo_va; | |
664 | } | |
665 | } | |
666 | return NULL; | |
667 | } |