Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
32 | #include <linux/list.h> | |
33 | #include <linux/slab.h> | |
34 | #include <drm/drmP.h> | |
35 | #include <drm/amdgpu_drm.h> | |
a187f17f | 36 | #include <drm/drm_cache.h> |
d38ceaf9 AD |
37 | #include "amdgpu.h" |
38 | #include "amdgpu_trace.h" | |
39 | ||
40 | ||
41 | int amdgpu_ttm_init(struct amdgpu_device *adev); | |
42 | void amdgpu_ttm_fini(struct amdgpu_device *adev); | |
43 | ||
44 | static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev, | |
7e5a547f | 45 | struct ttm_mem_reg *mem) |
d38ceaf9 | 46 | { |
6681c5eb CK |
47 | if (mem->start << PAGE_SHIFT >= adev->mc.visible_vram_size) |
48 | return 0; | |
49 | ||
50 | return ((mem->start << PAGE_SHIFT) + mem->size) > | |
51 | adev->mc.visible_vram_size ? | |
52 | adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) : | |
53 | mem->size; | |
d38ceaf9 AD |
54 | } |
55 | ||
56 | static void amdgpu_update_memory_usage(struct amdgpu_device *adev, | |
57 | struct ttm_mem_reg *old_mem, | |
58 | struct ttm_mem_reg *new_mem) | |
59 | { | |
60 | u64 vis_size; | |
61 | if (!adev) | |
62 | return; | |
63 | ||
64 | if (new_mem) { | |
65 | switch (new_mem->mem_type) { | |
66 | case TTM_PL_TT: | |
67 | atomic64_add(new_mem->size, &adev->gtt_usage); | |
68 | break; | |
69 | case TTM_PL_VRAM: | |
70 | atomic64_add(new_mem->size, &adev->vram_usage); | |
71 | vis_size = amdgpu_get_vis_part_size(adev, new_mem); | |
72 | atomic64_add(vis_size, &adev->vram_vis_usage); | |
73 | break; | |
74 | } | |
75 | } | |
76 | ||
77 | if (old_mem) { | |
78 | switch (old_mem->mem_type) { | |
79 | case TTM_PL_TT: | |
80 | atomic64_sub(old_mem->size, &adev->gtt_usage); | |
81 | break; | |
82 | case TTM_PL_VRAM: | |
83 | atomic64_sub(old_mem->size, &adev->vram_usage); | |
84 | vis_size = amdgpu_get_vis_part_size(adev, old_mem); | |
85 | atomic64_sub(vis_size, &adev->vram_vis_usage); | |
86 | break; | |
87 | } | |
88 | } | |
89 | } | |
90 | ||
91 | static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) | |
92 | { | |
93 | struct amdgpu_bo *bo; | |
94 | ||
95 | bo = container_of(tbo, struct amdgpu_bo, tbo); | |
96 | ||
97 | amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL); | |
d38ceaf9 | 98 | |
d38ceaf9 | 99 | drm_gem_object_release(&bo->gem_base); |
82b9c55b | 100 | amdgpu_bo_unref(&bo->parent); |
0c4e7fa5 CZ |
101 | if (!list_empty(&bo->shadow_list)) { |
102 | mutex_lock(&bo->adev->shadow_list_lock); | |
103 | list_del_init(&bo->shadow_list); | |
104 | mutex_unlock(&bo->adev->shadow_list_lock); | |
105 | } | |
d38ceaf9 AD |
106 | kfree(bo->metadata); |
107 | kfree(bo); | |
108 | } | |
109 | ||
110 | bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) | |
111 | { | |
112 | if (bo->destroy == &amdgpu_ttm_bo_destroy) | |
113 | return true; | |
114 | return false; | |
115 | } | |
116 | ||
7e5a547f CZ |
117 | static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, |
118 | struct ttm_placement *placement, | |
faceaf6a | 119 | struct ttm_place *places, |
7e5a547f | 120 | u32 domain, u64 flags) |
d38ceaf9 | 121 | { |
6369f6f1 | 122 | u32 c = 0; |
7e5a547f | 123 | |
d38ceaf9 | 124 | if (domain & AMDGPU_GEM_DOMAIN_VRAM) { |
faceaf6a CK |
125 | unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT; |
126 | ||
7e5a547f | 127 | if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS && |
6369f6f1 | 128 | !(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && |
faceaf6a CK |
129 | adev->mc.visible_vram_size < adev->mc.real_vram_size) { |
130 | places[c].fpfn = visible_pfn; | |
6369f6f1 | 131 | places[c].lpfn = 0; |
faceaf6a | 132 | places[c].flags = TTM_PL_FLAG_WC | |
6681c5eb CK |
133 | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM | |
134 | TTM_PL_FLAG_TOPDOWN; | |
faceaf6a | 135 | c++; |
d38ceaf9 | 136 | } |
faceaf6a CK |
137 | |
138 | places[c].fpfn = 0; | |
139 | places[c].lpfn = 0; | |
140 | places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | | |
7e5a547f | 141 | TTM_PL_FLAG_VRAM; |
faceaf6a CK |
142 | if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) |
143 | places[c].lpfn = visible_pfn; | |
144 | else | |
145 | places[c].flags |= TTM_PL_FLAG_TOPDOWN; | |
146 | c++; | |
d38ceaf9 AD |
147 | } |
148 | ||
149 | if (domain & AMDGPU_GEM_DOMAIN_GTT) { | |
faceaf6a CK |
150 | places[c].fpfn = 0; |
151 | places[c].lpfn = 0; | |
152 | places[c].flags = TTM_PL_FLAG_TT; | |
153 | if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) | |
154 | places[c].flags |= TTM_PL_FLAG_WC | | |
155 | TTM_PL_FLAG_UNCACHED; | |
156 | else | |
157 | places[c].flags |= TTM_PL_FLAG_CACHED; | |
158 | c++; | |
d38ceaf9 AD |
159 | } |
160 | ||
161 | if (domain & AMDGPU_GEM_DOMAIN_CPU) { | |
faceaf6a CK |
162 | places[c].fpfn = 0; |
163 | places[c].lpfn = 0; | |
164 | places[c].flags = TTM_PL_FLAG_SYSTEM; | |
165 | if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) | |
166 | places[c].flags |= TTM_PL_FLAG_WC | | |
167 | TTM_PL_FLAG_UNCACHED; | |
168 | else | |
169 | places[c].flags |= TTM_PL_FLAG_CACHED; | |
170 | c++; | |
d38ceaf9 AD |
171 | } |
172 | ||
173 | if (domain & AMDGPU_GEM_DOMAIN_GDS) { | |
faceaf6a CK |
174 | places[c].fpfn = 0; |
175 | places[c].lpfn = 0; | |
176 | places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS; | |
177 | c++; | |
d38ceaf9 | 178 | } |
faceaf6a | 179 | |
d38ceaf9 | 180 | if (domain & AMDGPU_GEM_DOMAIN_GWS) { |
faceaf6a CK |
181 | places[c].fpfn = 0; |
182 | places[c].lpfn = 0; | |
183 | places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS; | |
184 | c++; | |
d38ceaf9 | 185 | } |
faceaf6a | 186 | |
d38ceaf9 | 187 | if (domain & AMDGPU_GEM_DOMAIN_OA) { |
faceaf6a CK |
188 | places[c].fpfn = 0; |
189 | places[c].lpfn = 0; | |
190 | places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA; | |
191 | c++; | |
d38ceaf9 AD |
192 | } |
193 | ||
194 | if (!c) { | |
faceaf6a CK |
195 | places[c].fpfn = 0; |
196 | places[c].lpfn = 0; | |
197 | places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | |
198 | c++; | |
d38ceaf9 | 199 | } |
faceaf6a | 200 | |
7e5a547f | 201 | placement->num_placement = c; |
faceaf6a | 202 | placement->placement = places; |
d38ceaf9 | 203 | |
faceaf6a CK |
204 | placement->num_busy_placement = c; |
205 | placement->busy_placement = places; | |
d38ceaf9 AD |
206 | } |
207 | ||
7e5a547f CZ |
208 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain) |
209 | { | |
210 | amdgpu_ttm_placement_init(rbo->adev, &rbo->placement, | |
211 | rbo->placements, domain, rbo->flags); | |
212 | } | |
213 | ||
214 | static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, | |
215 | struct ttm_placement *placement) | |
216 | { | |
217 | BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1)); | |
218 | ||
219 | memcpy(bo->placements, placement->placement, | |
220 | placement->num_placement * sizeof(struct ttm_place)); | |
221 | bo->placement.num_placement = placement->num_placement; | |
222 | bo->placement.num_busy_placement = placement->num_busy_placement; | |
223 | bo->placement.placement = bo->placements; | |
224 | bo->placement.busy_placement = bo->placements; | |
225 | } | |
226 | ||
7c204889 CK |
227 | /** |
228 | * amdgpu_bo_create_kernel - create BO for kernel use | |
229 | * | |
230 | * @adev: amdgpu device object | |
231 | * @size: size for the new BO | |
232 | * @align: alignment for the new BO | |
233 | * @domain: where to place it | |
234 | * @bo_ptr: resulting BO | |
235 | * @gpu_addr: GPU addr of the pinned BO | |
236 | * @cpu_addr: optional CPU address mapping | |
237 | * | |
238 | * Allocates and pins a BO for kernel internal use. | |
239 | * | |
240 | * Returns 0 on success, negative error code otherwise. | |
241 | */ | |
242 | int amdgpu_bo_create_kernel(struct amdgpu_device *adev, | |
243 | unsigned long size, int align, | |
244 | u32 domain, struct amdgpu_bo **bo_ptr, | |
245 | u64 *gpu_addr, void **cpu_addr) | |
246 | { | |
247 | int r; | |
248 | ||
249 | r = amdgpu_bo_create(adev, size, align, true, domain, | |
250 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | |
251 | NULL, NULL, bo_ptr); | |
252 | if (r) { | |
253 | dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r); | |
254 | return r; | |
255 | } | |
256 | ||
257 | r = amdgpu_bo_reserve(*bo_ptr, false); | |
258 | if (r) { | |
259 | dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r); | |
260 | goto error_free; | |
261 | } | |
262 | ||
263 | r = amdgpu_bo_pin(*bo_ptr, domain, gpu_addr); | |
264 | if (r) { | |
265 | dev_err(adev->dev, "(%d) kernel bo pin failed\n", r); | |
266 | goto error_unreserve; | |
267 | } | |
268 | ||
269 | if (cpu_addr) { | |
270 | r = amdgpu_bo_kmap(*bo_ptr, cpu_addr); | |
271 | if (r) { | |
272 | dev_err(adev->dev, "(%d) kernel bo map failed\n", r); | |
273 | goto error_unreserve; | |
274 | } | |
275 | } | |
276 | ||
277 | amdgpu_bo_unreserve(*bo_ptr); | |
278 | ||
279 | return 0; | |
280 | ||
281 | error_unreserve: | |
282 | amdgpu_bo_unreserve(*bo_ptr); | |
283 | ||
284 | error_free: | |
285 | amdgpu_bo_unref(bo_ptr); | |
286 | ||
287 | return r; | |
288 | } | |
289 | ||
7e5a547f CZ |
290 | int amdgpu_bo_create_restricted(struct amdgpu_device *adev, |
291 | unsigned long size, int byte_align, | |
292 | bool kernel, u32 domain, u64 flags, | |
293 | struct sg_table *sg, | |
294 | struct ttm_placement *placement, | |
72d7668b | 295 | struct reservation_object *resv, |
7e5a547f | 296 | struct amdgpu_bo **bo_ptr) |
d38ceaf9 AD |
297 | { |
298 | struct amdgpu_bo *bo; | |
299 | enum ttm_bo_type type; | |
300 | unsigned long page_align; | |
301 | size_t acc_size; | |
302 | int r; | |
303 | ||
d38ceaf9 AD |
304 | page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
305 | size = ALIGN(size, PAGE_SIZE); | |
306 | ||
307 | if (kernel) { | |
308 | type = ttm_bo_type_kernel; | |
309 | } else if (sg) { | |
310 | type = ttm_bo_type_sg; | |
311 | } else { | |
312 | type = ttm_bo_type_device; | |
313 | } | |
314 | *bo_ptr = NULL; | |
315 | ||
316 | acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, | |
317 | sizeof(struct amdgpu_bo)); | |
318 | ||
319 | bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); | |
320 | if (bo == NULL) | |
321 | return -ENOMEM; | |
322 | r = drm_gem_object_init(adev->ddev, &bo->gem_base, size); | |
323 | if (unlikely(r)) { | |
324 | kfree(bo); | |
325 | return r; | |
326 | } | |
327 | bo->adev = adev; | |
328 | INIT_LIST_HEAD(&bo->list); | |
0c4e7fa5 | 329 | INIT_LIST_HEAD(&bo->shadow_list); |
d38ceaf9 | 330 | INIT_LIST_HEAD(&bo->va); |
1ea863fd CK |
331 | bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | |
332 | AMDGPU_GEM_DOMAIN_GTT | | |
333 | AMDGPU_GEM_DOMAIN_CPU | | |
334 | AMDGPU_GEM_DOMAIN_GDS | | |
335 | AMDGPU_GEM_DOMAIN_GWS | | |
336 | AMDGPU_GEM_DOMAIN_OA); | |
337 | bo->allowed_domains = bo->prefered_domains; | |
338 | if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) | |
339 | bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; | |
d38ceaf9 AD |
340 | |
341 | bo->flags = flags; | |
a187f17f OG |
342 | |
343 | /* For architectures that don't support WC memory, | |
344 | * mask out the WC flag from the BO | |
345 | */ | |
346 | if (!drm_arch_can_wc_memory()) | |
347 | bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; | |
348 | ||
7e5a547f | 349 | amdgpu_fill_placement_to_bo(bo, placement); |
d38ceaf9 | 350 | /* Kernel allocation are uninterruptible */ |
d38ceaf9 AD |
351 | r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, |
352 | &bo->placement, page_align, !kernel, NULL, | |
72d7668b | 353 | acc_size, sg, resv, &amdgpu_ttm_bo_destroy); |
d38ceaf9 AD |
354 | if (unlikely(r != 0)) { |
355 | return r; | |
356 | } | |
4fea83ff FC |
357 | |
358 | if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && | |
359 | bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { | |
360 | struct fence *fence; | |
361 | ||
362 | if (adev->mman.buffer_funcs_ring == NULL || | |
363 | !adev->mman.buffer_funcs_ring->ready) { | |
364 | r = -EBUSY; | |
365 | goto fail_free; | |
366 | } | |
367 | ||
368 | r = amdgpu_bo_reserve(bo, false); | |
369 | if (unlikely(r != 0)) | |
370 | goto fail_free; | |
371 | ||
372 | amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); | |
373 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | |
374 | if (unlikely(r != 0)) | |
375 | goto fail_unreserve; | |
376 | ||
377 | amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); | |
378 | amdgpu_bo_fence(bo, fence, false); | |
379 | amdgpu_bo_unreserve(bo); | |
380 | fence_put(bo->tbo.moving); | |
381 | bo->tbo.moving = fence_get(fence); | |
382 | fence_put(fence); | |
383 | } | |
d38ceaf9 AD |
384 | *bo_ptr = bo; |
385 | ||
386 | trace_amdgpu_bo_create(bo); | |
387 | ||
388 | return 0; | |
4fea83ff FC |
389 | |
390 | fail_unreserve: | |
391 | amdgpu_bo_unreserve(bo); | |
392 | fail_free: | |
393 | amdgpu_bo_unref(&bo); | |
394 | return r; | |
d38ceaf9 AD |
395 | } |
396 | ||
e7893c4b CZ |
397 | static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, |
398 | unsigned long size, int byte_align, | |
399 | struct amdgpu_bo *bo) | |
400 | { | |
401 | struct ttm_placement placement = {0}; | |
402 | struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; | |
403 | int r; | |
404 | ||
405 | if (bo->shadow) | |
406 | return 0; | |
407 | ||
408 | bo->flags |= AMDGPU_GEM_CREATE_SHADOW; | |
409 | memset(&placements, 0, | |
410 | (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); | |
411 | ||
412 | amdgpu_ttm_placement_init(adev, &placement, | |
413 | placements, AMDGPU_GEM_DOMAIN_GTT, | |
414 | AMDGPU_GEM_CREATE_CPU_GTT_USWC); | |
415 | ||
416 | r = amdgpu_bo_create_restricted(adev, size, byte_align, true, | |
417 | AMDGPU_GEM_DOMAIN_GTT, | |
418 | AMDGPU_GEM_CREATE_CPU_GTT_USWC, | |
419 | NULL, &placement, | |
420 | bo->tbo.resv, | |
421 | &bo->shadow); | |
0c4e7fa5 | 422 | if (!r) { |
e7893c4b | 423 | bo->shadow->parent = amdgpu_bo_ref(bo); |
0c4e7fa5 CZ |
424 | mutex_lock(&adev->shadow_list_lock); |
425 | list_add_tail(&bo->shadow_list, &adev->shadow_list); | |
426 | mutex_unlock(&adev->shadow_list_lock); | |
427 | } | |
e7893c4b CZ |
428 | |
429 | return r; | |
430 | } | |
431 | ||
7e5a547f CZ |
432 | int amdgpu_bo_create(struct amdgpu_device *adev, |
433 | unsigned long size, int byte_align, | |
434 | bool kernel, u32 domain, u64 flags, | |
72d7668b CK |
435 | struct sg_table *sg, |
436 | struct reservation_object *resv, | |
437 | struct amdgpu_bo **bo_ptr) | |
7e5a547f CZ |
438 | { |
439 | struct ttm_placement placement = {0}; | |
440 | struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; | |
e7893c4b | 441 | int r; |
7e5a547f CZ |
442 | |
443 | memset(&placements, 0, | |
444 | (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); | |
445 | ||
446 | amdgpu_ttm_placement_init(adev, &placement, | |
447 | placements, domain, flags); | |
448 | ||
e7893c4b CZ |
449 | r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, |
450 | domain, flags, sg, &placement, | |
451 | resv, bo_ptr); | |
452 | if (r) | |
453 | return r; | |
454 | ||
3ad81f16 | 455 | if (amdgpu_need_backup(adev) && (flags & AMDGPU_GEM_CREATE_SHADOW)) { |
e7893c4b CZ |
456 | r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr)); |
457 | if (r) | |
458 | amdgpu_bo_unref(bo_ptr); | |
459 | } | |
460 | ||
461 | return r; | |
7e5a547f CZ |
462 | } |
463 | ||
20f4eff1 CZ |
464 | int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev, |
465 | struct amdgpu_ring *ring, | |
466 | struct amdgpu_bo *bo, | |
467 | struct reservation_object *resv, | |
468 | struct fence **fence, | |
469 | bool direct) | |
470 | ||
471 | { | |
472 | struct amdgpu_bo *shadow = bo->shadow; | |
473 | uint64_t bo_addr, shadow_addr; | |
474 | int r; | |
475 | ||
476 | if (!shadow) | |
477 | return -EINVAL; | |
478 | ||
479 | bo_addr = amdgpu_bo_gpu_offset(bo); | |
480 | shadow_addr = amdgpu_bo_gpu_offset(bo->shadow); | |
481 | ||
482 | r = reservation_object_reserve_shared(bo->tbo.resv); | |
483 | if (r) | |
484 | goto err; | |
485 | ||
486 | r = amdgpu_copy_buffer(ring, bo_addr, shadow_addr, | |
487 | amdgpu_bo_size(bo), resv, fence, | |
488 | direct); | |
489 | if (!r) | |
490 | amdgpu_bo_fence(bo, *fence, true); | |
491 | ||
492 | err: | |
493 | return r; | |
494 | } | |
495 | ||
496 | int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, | |
497 | struct amdgpu_ring *ring, | |
498 | struct amdgpu_bo *bo, | |
499 | struct reservation_object *resv, | |
500 | struct fence **fence, | |
501 | bool direct) | |
502 | ||
503 | { | |
504 | struct amdgpu_bo *shadow = bo->shadow; | |
505 | uint64_t bo_addr, shadow_addr; | |
506 | int r; | |
507 | ||
508 | if (!shadow) | |
509 | return -EINVAL; | |
510 | ||
511 | bo_addr = amdgpu_bo_gpu_offset(bo); | |
512 | shadow_addr = amdgpu_bo_gpu_offset(bo->shadow); | |
513 | ||
514 | r = reservation_object_reserve_shared(bo->tbo.resv); | |
515 | if (r) | |
516 | goto err; | |
517 | ||
518 | r = amdgpu_copy_buffer(ring, shadow_addr, bo_addr, | |
519 | amdgpu_bo_size(bo), resv, fence, | |
520 | direct); | |
521 | if (!r) | |
522 | amdgpu_bo_fence(bo, *fence, true); | |
523 | ||
524 | err: | |
525 | return r; | |
526 | } | |
527 | ||
d38ceaf9 AD |
528 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) |
529 | { | |
530 | bool is_iomem; | |
587f3c70 | 531 | long r; |
d38ceaf9 | 532 | |
271c8125 CK |
533 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) |
534 | return -EPERM; | |
535 | ||
d38ceaf9 AD |
536 | if (bo->kptr) { |
537 | if (ptr) { | |
538 | *ptr = bo->kptr; | |
539 | } | |
540 | return 0; | |
541 | } | |
587f3c70 CK |
542 | |
543 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false, | |
544 | MAX_SCHEDULE_TIMEOUT); | |
545 | if (r < 0) | |
546 | return r; | |
547 | ||
d38ceaf9 | 548 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); |
587f3c70 | 549 | if (r) |
d38ceaf9 | 550 | return r; |
587f3c70 | 551 | |
d38ceaf9 | 552 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); |
587f3c70 | 553 | if (ptr) |
d38ceaf9 | 554 | *ptr = bo->kptr; |
587f3c70 | 555 | |
d38ceaf9 AD |
556 | return 0; |
557 | } | |
558 | ||
559 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo) | |
560 | { | |
561 | if (bo->kptr == NULL) | |
562 | return; | |
563 | bo->kptr = NULL; | |
564 | ttm_bo_kunmap(&bo->kmap); | |
565 | } | |
566 | ||
567 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) | |
568 | { | |
569 | if (bo == NULL) | |
570 | return NULL; | |
571 | ||
572 | ttm_bo_reference(&bo->tbo); | |
573 | return bo; | |
574 | } | |
575 | ||
576 | void amdgpu_bo_unref(struct amdgpu_bo **bo) | |
577 | { | |
578 | struct ttm_buffer_object *tbo; | |
579 | ||
580 | if ((*bo) == NULL) | |
581 | return; | |
582 | ||
583 | tbo = &((*bo)->tbo); | |
584 | ttm_bo_unref(&tbo); | |
585 | if (tbo == NULL) | |
586 | *bo = NULL; | |
587 | } | |
588 | ||
7e5a547f CZ |
589 | int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, |
590 | u64 min_offset, u64 max_offset, | |
d38ceaf9 AD |
591 | u64 *gpu_addr) |
592 | { | |
593 | int r, i; | |
7e5a547f | 594 | unsigned fpfn, lpfn; |
d38ceaf9 | 595 | |
cc325d19 | 596 | if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) |
d38ceaf9 AD |
597 | return -EPERM; |
598 | ||
7e5a547f CZ |
599 | if (WARN_ON_ONCE(min_offset > max_offset)) |
600 | return -EINVAL; | |
601 | ||
d38ceaf9 | 602 | if (bo->pin_count) { |
408778e8 FC |
603 | uint32_t mem_type = bo->tbo.mem.mem_type; |
604 | ||
605 | if (domain != amdgpu_mem_type_to_domain(mem_type)) | |
606 | return -EINVAL; | |
607 | ||
d38ceaf9 AD |
608 | bo->pin_count++; |
609 | if (gpu_addr) | |
610 | *gpu_addr = amdgpu_bo_gpu_offset(bo); | |
611 | ||
612 | if (max_offset != 0) { | |
27798e07 | 613 | u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset; |
d38ceaf9 AD |
614 | WARN_ON_ONCE(max_offset < |
615 | (amdgpu_bo_gpu_offset(bo) - domain_start)); | |
616 | } | |
617 | ||
618 | return 0; | |
619 | } | |
620 | amdgpu_ttm_placement_from_domain(bo, domain); | |
621 | for (i = 0; i < bo->placement.num_placement; i++) { | |
622 | /* force to pin into visible video ram */ | |
623 | if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && | |
7e5a547f | 624 | !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && |
6681c5eb CK |
625 | (!max_offset || max_offset > |
626 | bo->adev->mc.visible_vram_size)) { | |
7e5a547f CZ |
627 | if (WARN_ON_ONCE(min_offset > |
628 | bo->adev->mc.visible_vram_size)) | |
629 | return -EINVAL; | |
630 | fpfn = min_offset >> PAGE_SHIFT; | |
631 | lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT; | |
632 | } else { | |
633 | fpfn = min_offset >> PAGE_SHIFT; | |
634 | lpfn = max_offset >> PAGE_SHIFT; | |
635 | } | |
636 | if (fpfn > bo->placements[i].fpfn) | |
637 | bo->placements[i].fpfn = fpfn; | |
78d0e182 CK |
638 | if (!bo->placements[i].lpfn || |
639 | (lpfn && lpfn < bo->placements[i].lpfn)) | |
7e5a547f | 640 | bo->placements[i].lpfn = lpfn; |
d38ceaf9 AD |
641 | bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; |
642 | } | |
643 | ||
644 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | |
6681c5eb | 645 | if (unlikely(r)) { |
d38ceaf9 | 646 | dev_err(bo->adev->dev, "%p pin failed\n", bo); |
6681c5eb CK |
647 | goto error; |
648 | } | |
649 | ||
650 | bo->pin_count = 1; | |
651 | if (gpu_addr != NULL) | |
652 | *gpu_addr = amdgpu_bo_gpu_offset(bo); | |
653 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) { | |
654 | bo->adev->vram_pin_size += amdgpu_bo_size(bo); | |
655 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) | |
656 | bo->adev->invisible_pin_size += amdgpu_bo_size(bo); | |
32ab75f0 | 657 | } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { |
6681c5eb | 658 | bo->adev->gart_pin_size += amdgpu_bo_size(bo); |
d38ceaf9 | 659 | } |
6681c5eb CK |
660 | |
661 | error: | |
d38ceaf9 AD |
662 | return r; |
663 | } | |
664 | ||
665 | int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr) | |
666 | { | |
7e5a547f | 667 | return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr); |
d38ceaf9 AD |
668 | } |
669 | ||
670 | int amdgpu_bo_unpin(struct amdgpu_bo *bo) | |
671 | { | |
672 | int r, i; | |
673 | ||
674 | if (!bo->pin_count) { | |
675 | dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo); | |
676 | return 0; | |
677 | } | |
678 | bo->pin_count--; | |
679 | if (bo->pin_count) | |
680 | return 0; | |
681 | for (i = 0; i < bo->placement.num_placement; i++) { | |
682 | bo->placements[i].lpfn = 0; | |
683 | bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; | |
684 | } | |
685 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | |
6681c5eb | 686 | if (unlikely(r)) { |
d38ceaf9 | 687 | dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo); |
6681c5eb | 688 | goto error; |
d38ceaf9 | 689 | } |
6681c5eb CK |
690 | |
691 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { | |
692 | bo->adev->vram_pin_size -= amdgpu_bo_size(bo); | |
693 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) | |
694 | bo->adev->invisible_pin_size -= amdgpu_bo_size(bo); | |
695 | } else { | |
696 | bo->adev->gart_pin_size -= amdgpu_bo_size(bo); | |
697 | } | |
698 | ||
699 | error: | |
d38ceaf9 AD |
700 | return r; |
701 | } | |
702 | ||
703 | int amdgpu_bo_evict_vram(struct amdgpu_device *adev) | |
704 | { | |
705 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ | |
2f7d10b3 | 706 | if (0 && (adev->flags & AMD_IS_APU)) { |
d38ceaf9 AD |
707 | /* Useless to evict on IGP chips */ |
708 | return 0; | |
709 | } | |
710 | return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); | |
711 | } | |
712 | ||
1f8628c7 AD |
713 | static const char *amdgpu_vram_names[] = { |
714 | "UNKNOWN", | |
715 | "GDDR1", | |
716 | "DDR2", | |
717 | "GDDR3", | |
718 | "GDDR4", | |
719 | "GDDR5", | |
720 | "HBM", | |
721 | "DDR3" | |
722 | }; | |
723 | ||
d38ceaf9 AD |
724 | int amdgpu_bo_init(struct amdgpu_device *adev) |
725 | { | |
726 | /* Add an MTRR for the VRAM */ | |
727 | adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base, | |
728 | adev->mc.aper_size); | |
729 | DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", | |
730 | adev->mc.mc_vram_size >> 20, | |
731 | (unsigned long long)adev->mc.aper_size >> 20); | |
1f8628c7 AD |
732 | DRM_INFO("RAM width %dbits %s\n", |
733 | adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]); | |
d38ceaf9 AD |
734 | return amdgpu_ttm_init(adev); |
735 | } | |
736 | ||
737 | void amdgpu_bo_fini(struct amdgpu_device *adev) | |
738 | { | |
739 | amdgpu_ttm_fini(adev); | |
740 | arch_phys_wc_del(adev->mc.vram_mtrr); | |
741 | } | |
742 | ||
743 | int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, | |
744 | struct vm_area_struct *vma) | |
745 | { | |
746 | return ttm_fbdev_mmap(vma, &bo->tbo); | |
747 | } | |
748 | ||
749 | int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags) | |
750 | { | |
fbd76d59 | 751 | if (AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6) |
d38ceaf9 | 752 | return -EINVAL; |
d38ceaf9 AD |
753 | |
754 | bo->tiling_flags = tiling_flags; | |
755 | return 0; | |
756 | } | |
757 | ||
758 | void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) | |
759 | { | |
760 | lockdep_assert_held(&bo->tbo.resv->lock.base); | |
761 | ||
762 | if (tiling_flags) | |
763 | *tiling_flags = bo->tiling_flags; | |
764 | } | |
765 | ||
766 | int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, | |
767 | uint32_t metadata_size, uint64_t flags) | |
768 | { | |
769 | void *buffer; | |
770 | ||
771 | if (!metadata_size) { | |
772 | if (bo->metadata_size) { | |
773 | kfree(bo->metadata); | |
0092d3ed | 774 | bo->metadata = NULL; |
d38ceaf9 AD |
775 | bo->metadata_size = 0; |
776 | } | |
777 | return 0; | |
778 | } | |
779 | ||
780 | if (metadata == NULL) | |
781 | return -EINVAL; | |
782 | ||
71affda5 | 783 | buffer = kmemdup(metadata, metadata_size, GFP_KERNEL); |
d38ceaf9 AD |
784 | if (buffer == NULL) |
785 | return -ENOMEM; | |
786 | ||
d38ceaf9 AD |
787 | kfree(bo->metadata); |
788 | bo->metadata_flags = flags; | |
789 | bo->metadata = buffer; | |
790 | bo->metadata_size = metadata_size; | |
791 | ||
792 | return 0; | |
793 | } | |
794 | ||
795 | int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, | |
796 | size_t buffer_size, uint32_t *metadata_size, | |
797 | uint64_t *flags) | |
798 | { | |
799 | if (!buffer && !metadata_size) | |
800 | return -EINVAL; | |
801 | ||
802 | if (buffer) { | |
803 | if (buffer_size < bo->metadata_size) | |
804 | return -EINVAL; | |
805 | ||
806 | if (bo->metadata_size) | |
807 | memcpy(buffer, bo->metadata, bo->metadata_size); | |
808 | } | |
809 | ||
810 | if (metadata_size) | |
811 | *metadata_size = bo->metadata_size; | |
812 | if (flags) | |
813 | *flags = bo->metadata_flags; | |
814 | ||
815 | return 0; | |
816 | } | |
817 | ||
818 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, | |
819 | struct ttm_mem_reg *new_mem) | |
820 | { | |
821 | struct amdgpu_bo *rbo; | |
15da301d | 822 | struct ttm_mem_reg *old_mem = &bo->mem; |
d38ceaf9 AD |
823 | |
824 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) | |
825 | return; | |
826 | ||
827 | rbo = container_of(bo, struct amdgpu_bo, tbo); | |
828 | amdgpu_vm_bo_invalidate(rbo->adev, rbo); | |
829 | ||
830 | /* update statistics */ | |
831 | if (!new_mem) | |
832 | return; | |
833 | ||
834 | /* move_notify is called before move happens */ | |
835 | amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem); | |
15da301d DM |
836 | |
837 | trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type); | |
d38ceaf9 AD |
838 | } |
839 | ||
840 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |
841 | { | |
842 | struct amdgpu_device *adev; | |
5fb1941d CK |
843 | struct amdgpu_bo *abo; |
844 | unsigned long offset, size, lpfn; | |
845 | int i, r; | |
d38ceaf9 AD |
846 | |
847 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) | |
848 | return 0; | |
5fb1941d CK |
849 | |
850 | abo = container_of(bo, struct amdgpu_bo, tbo); | |
851 | adev = abo->adev; | |
852 | if (bo->mem.mem_type != TTM_PL_VRAM) | |
853 | return 0; | |
854 | ||
855 | size = bo->mem.num_pages << PAGE_SHIFT; | |
856 | offset = bo->mem.start << PAGE_SHIFT; | |
857 | if ((offset + size) <= adev->mc.visible_vram_size) | |
858 | return 0; | |
859 | ||
104ece97 MD |
860 | /* Can't move a pinned BO to visible VRAM */ |
861 | if (abo->pin_count > 0) | |
862 | return -EINVAL; | |
863 | ||
5fb1941d CK |
864 | /* hurrah the memory is not visible ! */ |
865 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); | |
866 | lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; | |
867 | for (i = 0; i < abo->placement.num_placement; i++) { | |
868 | /* Force into visible VRAM */ | |
869 | if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) && | |
6681c5eb CK |
870 | (!abo->placements[i].lpfn || |
871 | abo->placements[i].lpfn > lpfn)) | |
5fb1941d CK |
872 | abo->placements[i].lpfn = lpfn; |
873 | } | |
874 | r = ttm_bo_validate(bo, &abo->placement, false, false); | |
875 | if (unlikely(r == -ENOMEM)) { | |
876 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); | |
877 | return ttm_bo_validate(bo, &abo->placement, false, false); | |
878 | } else if (unlikely(r != 0)) { | |
879 | return r; | |
d38ceaf9 | 880 | } |
5fb1941d CK |
881 | |
882 | offset = bo->mem.start << PAGE_SHIFT; | |
883 | /* this should never happen */ | |
884 | if ((offset + size) > adev->mc.visible_vram_size) | |
885 | return -EINVAL; | |
886 | ||
d38ceaf9 AD |
887 | return 0; |
888 | } | |
889 | ||
890 | /** | |
891 | * amdgpu_bo_fence - add fence to buffer object | |
892 | * | |
893 | * @bo: buffer object in question | |
894 | * @fence: fence to add | |
895 | * @shared: true if fence should be added shared | |
896 | * | |
897 | */ | |
e40a3115 | 898 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, |
d38ceaf9 AD |
899 | bool shared) |
900 | { | |
901 | struct reservation_object *resv = bo->tbo.resv; | |
902 | ||
903 | if (shared) | |
e40a3115 | 904 | reservation_object_add_shared_fence(resv, fence); |
d38ceaf9 | 905 | else |
e40a3115 | 906 | reservation_object_add_excl_fence(resv, fence); |
d38ceaf9 | 907 | } |
cdb7e8f2 CK |
908 | |
909 | /** | |
910 | * amdgpu_bo_gpu_offset - return GPU offset of bo | |
911 | * @bo: amdgpu object for which we query the offset | |
912 | * | |
913 | * Returns current GPU offset of the object. | |
914 | * | |
915 | * Note: object should either be pinned or reserved when calling this | |
916 | * function, it might be useful to add check for this for debugging. | |
917 | */ | |
918 | u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) | |
919 | { | |
920 | WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); | |
921 | WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) && | |
922 | !bo->pin_count); | |
923 | ||
924 | return bo->tbo.offset; | |
925 | } |