Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
32 | #include <linux/list.h> | |
33 | #include <linux/slab.h> | |
34 | #include <drm/drmP.h> | |
35 | #include <drm/amdgpu_drm.h> | |
a187f17f | 36 | #include <drm/drm_cache.h> |
d38ceaf9 AD |
37 | #include "amdgpu.h" |
38 | #include "amdgpu_trace.h" | |
39 | ||
40 | ||
41 | int amdgpu_ttm_init(struct amdgpu_device *adev); | |
42 | void amdgpu_ttm_fini(struct amdgpu_device *adev); | |
43 | ||
44 | static u64 amdgpu_get_vis_part_size(struct amdgpu_device *adev, | |
7e5a547f | 45 | struct ttm_mem_reg *mem) |
d38ceaf9 AD |
46 | { |
47 | u64 ret = 0; | |
48 | if (mem->start << PAGE_SHIFT < adev->mc.visible_vram_size) { | |
49 | ret = (u64)((mem->start << PAGE_SHIFT) + mem->size) > | |
50 | adev->mc.visible_vram_size ? | |
7e5a547f | 51 | adev->mc.visible_vram_size - (mem->start << PAGE_SHIFT) : |
d38ceaf9 AD |
52 | mem->size; |
53 | } | |
54 | return ret; | |
55 | } | |
56 | ||
57 | static void amdgpu_update_memory_usage(struct amdgpu_device *adev, | |
58 | struct ttm_mem_reg *old_mem, | |
59 | struct ttm_mem_reg *new_mem) | |
60 | { | |
61 | u64 vis_size; | |
62 | if (!adev) | |
63 | return; | |
64 | ||
65 | if (new_mem) { | |
66 | switch (new_mem->mem_type) { | |
67 | case TTM_PL_TT: | |
68 | atomic64_add(new_mem->size, &adev->gtt_usage); | |
69 | break; | |
70 | case TTM_PL_VRAM: | |
71 | atomic64_add(new_mem->size, &adev->vram_usage); | |
72 | vis_size = amdgpu_get_vis_part_size(adev, new_mem); | |
73 | atomic64_add(vis_size, &adev->vram_vis_usage); | |
74 | break; | |
75 | } | |
76 | } | |
77 | ||
78 | if (old_mem) { | |
79 | switch (old_mem->mem_type) { | |
80 | case TTM_PL_TT: | |
81 | atomic64_sub(old_mem->size, &adev->gtt_usage); | |
82 | break; | |
83 | case TTM_PL_VRAM: | |
84 | atomic64_sub(old_mem->size, &adev->vram_usage); | |
85 | vis_size = amdgpu_get_vis_part_size(adev, old_mem); | |
86 | atomic64_sub(vis_size, &adev->vram_vis_usage); | |
87 | break; | |
88 | } | |
89 | } | |
90 | } | |
91 | ||
92 | static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) | |
93 | { | |
94 | struct amdgpu_bo *bo; | |
95 | ||
96 | bo = container_of(tbo, struct amdgpu_bo, tbo); | |
97 | ||
98 | amdgpu_update_memory_usage(bo->adev, &bo->tbo.mem, NULL); | |
d38ceaf9 | 99 | |
d38ceaf9 | 100 | drm_gem_object_release(&bo->gem_base); |
82b9c55b | 101 | amdgpu_bo_unref(&bo->parent); |
d38ceaf9 AD |
102 | kfree(bo->metadata); |
103 | kfree(bo); | |
104 | } | |
105 | ||
106 | bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo) | |
107 | { | |
108 | if (bo->destroy == &amdgpu_ttm_bo_destroy) | |
109 | return true; | |
110 | return false; | |
111 | } | |
112 | ||
7e5a547f CZ |
113 | static void amdgpu_ttm_placement_init(struct amdgpu_device *adev, |
114 | struct ttm_placement *placement, | |
115 | struct ttm_place *placements, | |
116 | u32 domain, u64 flags) | |
d38ceaf9 AD |
117 | { |
118 | u32 c = 0, i; | |
7e5a547f CZ |
119 | |
120 | placement->placement = placements; | |
121 | placement->busy_placement = placements; | |
d38ceaf9 AD |
122 | |
123 | if (domain & AMDGPU_GEM_DOMAIN_VRAM) { | |
7e5a547f CZ |
124 | if (flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS && |
125 | adev->mc.visible_vram_size < adev->mc.real_vram_size) { | |
126 | placements[c].fpfn = | |
127 | adev->mc.visible_vram_size >> PAGE_SHIFT; | |
128 | placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | | |
cace5dce | 129 | TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN; |
d38ceaf9 | 130 | } |
7e5a547f CZ |
131 | placements[c].fpfn = 0; |
132 | placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | | |
133 | TTM_PL_FLAG_VRAM; | |
95d79183 CZ |
134 | if (!(flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) |
135 | placements[c - 1].flags |= TTM_PL_FLAG_TOPDOWN; | |
d38ceaf9 AD |
136 | } |
137 | ||
138 | if (domain & AMDGPU_GEM_DOMAIN_GTT) { | |
7e5a547f CZ |
139 | if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { |
140 | placements[c].fpfn = 0; | |
141 | placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT | | |
142 | TTM_PL_FLAG_UNCACHED; | |
d38ceaf9 | 143 | } else { |
7e5a547f CZ |
144 | placements[c].fpfn = 0; |
145 | placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; | |
d38ceaf9 AD |
146 | } |
147 | } | |
148 | ||
149 | if (domain & AMDGPU_GEM_DOMAIN_CPU) { | |
7e5a547f CZ |
150 | if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) { |
151 | placements[c].fpfn = 0; | |
152 | placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM | | |
153 | TTM_PL_FLAG_UNCACHED; | |
d38ceaf9 | 154 | } else { |
7e5a547f CZ |
155 | placements[c].fpfn = 0; |
156 | placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM; | |
d38ceaf9 AD |
157 | } |
158 | } | |
159 | ||
160 | if (domain & AMDGPU_GEM_DOMAIN_GDS) { | |
7e5a547f CZ |
161 | placements[c].fpfn = 0; |
162 | placements[c++].flags = TTM_PL_FLAG_UNCACHED | | |
163 | AMDGPU_PL_FLAG_GDS; | |
d38ceaf9 AD |
164 | } |
165 | if (domain & AMDGPU_GEM_DOMAIN_GWS) { | |
7e5a547f CZ |
166 | placements[c].fpfn = 0; |
167 | placements[c++].flags = TTM_PL_FLAG_UNCACHED | | |
168 | AMDGPU_PL_FLAG_GWS; | |
d38ceaf9 AD |
169 | } |
170 | if (domain & AMDGPU_GEM_DOMAIN_OA) { | |
7e5a547f CZ |
171 | placements[c].fpfn = 0; |
172 | placements[c++].flags = TTM_PL_FLAG_UNCACHED | | |
173 | AMDGPU_PL_FLAG_OA; | |
d38ceaf9 AD |
174 | } |
175 | ||
176 | if (!c) { | |
7e5a547f CZ |
177 | placements[c].fpfn = 0; |
178 | placements[c++].flags = TTM_PL_MASK_CACHING | | |
179 | TTM_PL_FLAG_SYSTEM; | |
d38ceaf9 | 180 | } |
7e5a547f CZ |
181 | placement->num_placement = c; |
182 | placement->num_busy_placement = c; | |
d38ceaf9 AD |
183 | |
184 | for (i = 0; i < c; i++) { | |
7e5a547f CZ |
185 | if ((flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && |
186 | (placements[i].flags & TTM_PL_FLAG_VRAM) && | |
187 | !placements[i].fpfn) | |
188 | placements[i].lpfn = | |
189 | adev->mc.visible_vram_size >> PAGE_SHIFT; | |
d38ceaf9 | 190 | else |
7e5a547f | 191 | placements[i].lpfn = 0; |
d38ceaf9 | 192 | } |
d38ceaf9 AD |
193 | } |
194 | ||
7e5a547f CZ |
195 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain) |
196 | { | |
197 | amdgpu_ttm_placement_init(rbo->adev, &rbo->placement, | |
198 | rbo->placements, domain, rbo->flags); | |
199 | } | |
200 | ||
201 | static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo, | |
202 | struct ttm_placement *placement) | |
203 | { | |
204 | BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1)); | |
205 | ||
206 | memcpy(bo->placements, placement->placement, | |
207 | placement->num_placement * sizeof(struct ttm_place)); | |
208 | bo->placement.num_placement = placement->num_placement; | |
209 | bo->placement.num_busy_placement = placement->num_busy_placement; | |
210 | bo->placement.placement = bo->placements; | |
211 | bo->placement.busy_placement = bo->placements; | |
212 | } | |
213 | ||
214 | int amdgpu_bo_create_restricted(struct amdgpu_device *adev, | |
215 | unsigned long size, int byte_align, | |
216 | bool kernel, u32 domain, u64 flags, | |
217 | struct sg_table *sg, | |
218 | struct ttm_placement *placement, | |
72d7668b | 219 | struct reservation_object *resv, |
7e5a547f | 220 | struct amdgpu_bo **bo_ptr) |
d38ceaf9 AD |
221 | { |
222 | struct amdgpu_bo *bo; | |
223 | enum ttm_bo_type type; | |
224 | unsigned long page_align; | |
225 | size_t acc_size; | |
226 | int r; | |
227 | ||
d38ceaf9 AD |
228 | page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
229 | size = ALIGN(size, PAGE_SIZE); | |
230 | ||
231 | if (kernel) { | |
232 | type = ttm_bo_type_kernel; | |
233 | } else if (sg) { | |
234 | type = ttm_bo_type_sg; | |
235 | } else { | |
236 | type = ttm_bo_type_device; | |
237 | } | |
238 | *bo_ptr = NULL; | |
239 | ||
240 | acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, | |
241 | sizeof(struct amdgpu_bo)); | |
242 | ||
243 | bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); | |
244 | if (bo == NULL) | |
245 | return -ENOMEM; | |
246 | r = drm_gem_object_init(adev->ddev, &bo->gem_base, size); | |
247 | if (unlikely(r)) { | |
248 | kfree(bo); | |
249 | return r; | |
250 | } | |
251 | bo->adev = adev; | |
252 | INIT_LIST_HEAD(&bo->list); | |
253 | INIT_LIST_HEAD(&bo->va); | |
1ea863fd CK |
254 | bo->prefered_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | |
255 | AMDGPU_GEM_DOMAIN_GTT | | |
256 | AMDGPU_GEM_DOMAIN_CPU | | |
257 | AMDGPU_GEM_DOMAIN_GDS | | |
258 | AMDGPU_GEM_DOMAIN_GWS | | |
259 | AMDGPU_GEM_DOMAIN_OA); | |
260 | bo->allowed_domains = bo->prefered_domains; | |
261 | if (!kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) | |
262 | bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; | |
d38ceaf9 AD |
263 | |
264 | bo->flags = flags; | |
a187f17f OG |
265 | |
266 | /* For architectures that don't support WC memory, | |
267 | * mask out the WC flag from the BO | |
268 | */ | |
269 | if (!drm_arch_can_wc_memory()) | |
270 | bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; | |
271 | ||
7e5a547f | 272 | amdgpu_fill_placement_to_bo(bo, placement); |
d38ceaf9 | 273 | /* Kernel allocation are uninterruptible */ |
d38ceaf9 AD |
274 | r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, |
275 | &bo->placement, page_align, !kernel, NULL, | |
72d7668b | 276 | acc_size, sg, resv, &amdgpu_ttm_bo_destroy); |
d38ceaf9 AD |
277 | if (unlikely(r != 0)) { |
278 | return r; | |
279 | } | |
280 | *bo_ptr = bo; | |
281 | ||
282 | trace_amdgpu_bo_create(bo); | |
283 | ||
284 | return 0; | |
285 | } | |
286 | ||
7e5a547f CZ |
287 | int amdgpu_bo_create(struct amdgpu_device *adev, |
288 | unsigned long size, int byte_align, | |
289 | bool kernel, u32 domain, u64 flags, | |
72d7668b CK |
290 | struct sg_table *sg, |
291 | struct reservation_object *resv, | |
292 | struct amdgpu_bo **bo_ptr) | |
7e5a547f CZ |
293 | { |
294 | struct ttm_placement placement = {0}; | |
295 | struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; | |
296 | ||
297 | memset(&placements, 0, | |
298 | (AMDGPU_GEM_DOMAIN_MAX + 1) * sizeof(struct ttm_place)); | |
299 | ||
300 | amdgpu_ttm_placement_init(adev, &placement, | |
301 | placements, domain, flags); | |
302 | ||
72d7668b CK |
303 | return amdgpu_bo_create_restricted(adev, size, byte_align, kernel, |
304 | domain, flags, sg, &placement, | |
305 | resv, bo_ptr); | |
7e5a547f CZ |
306 | } |
307 | ||
d38ceaf9 AD |
308 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) |
309 | { | |
310 | bool is_iomem; | |
311 | int r; | |
312 | ||
271c8125 CK |
313 | if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) |
314 | return -EPERM; | |
315 | ||
d38ceaf9 AD |
316 | if (bo->kptr) { |
317 | if (ptr) { | |
318 | *ptr = bo->kptr; | |
319 | } | |
320 | return 0; | |
321 | } | |
322 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); | |
323 | if (r) { | |
324 | return r; | |
325 | } | |
326 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); | |
327 | if (ptr) { | |
328 | *ptr = bo->kptr; | |
329 | } | |
330 | return 0; | |
331 | } | |
332 | ||
333 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo) | |
334 | { | |
335 | if (bo->kptr == NULL) | |
336 | return; | |
337 | bo->kptr = NULL; | |
338 | ttm_bo_kunmap(&bo->kmap); | |
339 | } | |
340 | ||
341 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo) | |
342 | { | |
343 | if (bo == NULL) | |
344 | return NULL; | |
345 | ||
346 | ttm_bo_reference(&bo->tbo); | |
347 | return bo; | |
348 | } | |
349 | ||
350 | void amdgpu_bo_unref(struct amdgpu_bo **bo) | |
351 | { | |
352 | struct ttm_buffer_object *tbo; | |
353 | ||
354 | if ((*bo) == NULL) | |
355 | return; | |
356 | ||
357 | tbo = &((*bo)->tbo); | |
358 | ttm_bo_unref(&tbo); | |
359 | if (tbo == NULL) | |
360 | *bo = NULL; | |
361 | } | |
362 | ||
7e5a547f CZ |
363 | int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, |
364 | u64 min_offset, u64 max_offset, | |
d38ceaf9 AD |
365 | u64 *gpu_addr) |
366 | { | |
367 | int r, i; | |
7e5a547f | 368 | unsigned fpfn, lpfn; |
d38ceaf9 | 369 | |
cc325d19 | 370 | if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) |
d38ceaf9 AD |
371 | return -EPERM; |
372 | ||
7e5a547f CZ |
373 | if (WARN_ON_ONCE(min_offset > max_offset)) |
374 | return -EINVAL; | |
375 | ||
d38ceaf9 AD |
376 | if (bo->pin_count) { |
377 | bo->pin_count++; | |
378 | if (gpu_addr) | |
379 | *gpu_addr = amdgpu_bo_gpu_offset(bo); | |
380 | ||
381 | if (max_offset != 0) { | |
382 | u64 domain_start; | |
d38ceaf9 AD |
383 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) |
384 | domain_start = bo->adev->mc.vram_start; | |
385 | else | |
386 | domain_start = bo->adev->mc.gtt_start; | |
387 | WARN_ON_ONCE(max_offset < | |
388 | (amdgpu_bo_gpu_offset(bo) - domain_start)); | |
389 | } | |
390 | ||
391 | return 0; | |
392 | } | |
393 | amdgpu_ttm_placement_from_domain(bo, domain); | |
394 | for (i = 0; i < bo->placement.num_placement; i++) { | |
395 | /* force to pin into visible video ram */ | |
396 | if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && | |
7e5a547f CZ |
397 | !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) && |
398 | (!max_offset || max_offset > bo->adev->mc.visible_vram_size)) { | |
399 | if (WARN_ON_ONCE(min_offset > | |
400 | bo->adev->mc.visible_vram_size)) | |
401 | return -EINVAL; | |
402 | fpfn = min_offset >> PAGE_SHIFT; | |
403 | lpfn = bo->adev->mc.visible_vram_size >> PAGE_SHIFT; | |
404 | } else { | |
405 | fpfn = min_offset >> PAGE_SHIFT; | |
406 | lpfn = max_offset >> PAGE_SHIFT; | |
407 | } | |
408 | if (fpfn > bo->placements[i].fpfn) | |
409 | bo->placements[i].fpfn = fpfn; | |
78d0e182 CK |
410 | if (!bo->placements[i].lpfn || |
411 | (lpfn && lpfn < bo->placements[i].lpfn)) | |
7e5a547f | 412 | bo->placements[i].lpfn = lpfn; |
d38ceaf9 AD |
413 | bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; |
414 | } | |
415 | ||
416 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | |
417 | if (likely(r == 0)) { | |
418 | bo->pin_count = 1; | |
419 | if (gpu_addr != NULL) | |
420 | *gpu_addr = amdgpu_bo_gpu_offset(bo); | |
421 | if (domain == AMDGPU_GEM_DOMAIN_VRAM) | |
422 | bo->adev->vram_pin_size += amdgpu_bo_size(bo); | |
423 | else | |
424 | bo->adev->gart_pin_size += amdgpu_bo_size(bo); | |
425 | } else { | |
426 | dev_err(bo->adev->dev, "%p pin failed\n", bo); | |
427 | } | |
428 | return r; | |
429 | } | |
430 | ||
431 | int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr) | |
432 | { | |
7e5a547f | 433 | return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr); |
d38ceaf9 AD |
434 | } |
435 | ||
436 | int amdgpu_bo_unpin(struct amdgpu_bo *bo) | |
437 | { | |
438 | int r, i; | |
439 | ||
440 | if (!bo->pin_count) { | |
441 | dev_warn(bo->adev->dev, "%p unpin not necessary\n", bo); | |
442 | return 0; | |
443 | } | |
444 | bo->pin_count--; | |
445 | if (bo->pin_count) | |
446 | return 0; | |
447 | for (i = 0; i < bo->placement.num_placement; i++) { | |
448 | bo->placements[i].lpfn = 0; | |
449 | bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; | |
450 | } | |
451 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | |
452 | if (likely(r == 0)) { | |
453 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) | |
454 | bo->adev->vram_pin_size -= amdgpu_bo_size(bo); | |
455 | else | |
456 | bo->adev->gart_pin_size -= amdgpu_bo_size(bo); | |
457 | } else { | |
458 | dev_err(bo->adev->dev, "%p validate failed for unpin\n", bo); | |
459 | } | |
460 | return r; | |
461 | } | |
462 | ||
463 | int amdgpu_bo_evict_vram(struct amdgpu_device *adev) | |
464 | { | |
465 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ | |
2f7d10b3 | 466 | if (0 && (adev->flags & AMD_IS_APU)) { |
d38ceaf9 AD |
467 | /* Useless to evict on IGP chips */ |
468 | return 0; | |
469 | } | |
470 | return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); | |
471 | } | |
472 | ||
d38ceaf9 AD |
473 | int amdgpu_bo_init(struct amdgpu_device *adev) |
474 | { | |
475 | /* Add an MTRR for the VRAM */ | |
476 | adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base, | |
477 | adev->mc.aper_size); | |
478 | DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", | |
479 | adev->mc.mc_vram_size >> 20, | |
480 | (unsigned long long)adev->mc.aper_size >> 20); | |
481 | DRM_INFO("RAM width %dbits DDR\n", | |
482 | adev->mc.vram_width); | |
483 | return amdgpu_ttm_init(adev); | |
484 | } | |
485 | ||
486 | void amdgpu_bo_fini(struct amdgpu_device *adev) | |
487 | { | |
488 | amdgpu_ttm_fini(adev); | |
489 | arch_phys_wc_del(adev->mc.vram_mtrr); | |
490 | } | |
491 | ||
492 | int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, | |
493 | struct vm_area_struct *vma) | |
494 | { | |
495 | return ttm_fbdev_mmap(vma, &bo->tbo); | |
496 | } | |
497 | ||
498 | int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags) | |
499 | { | |
fbd76d59 | 500 | if (AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6) |
d38ceaf9 | 501 | return -EINVAL; |
d38ceaf9 AD |
502 | |
503 | bo->tiling_flags = tiling_flags; | |
504 | return 0; | |
505 | } | |
506 | ||
507 | void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) | |
508 | { | |
509 | lockdep_assert_held(&bo->tbo.resv->lock.base); | |
510 | ||
511 | if (tiling_flags) | |
512 | *tiling_flags = bo->tiling_flags; | |
513 | } | |
514 | ||
515 | int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, | |
516 | uint32_t metadata_size, uint64_t flags) | |
517 | { | |
518 | void *buffer; | |
519 | ||
520 | if (!metadata_size) { | |
521 | if (bo->metadata_size) { | |
522 | kfree(bo->metadata); | |
523 | bo->metadata_size = 0; | |
524 | } | |
525 | return 0; | |
526 | } | |
527 | ||
528 | if (metadata == NULL) | |
529 | return -EINVAL; | |
530 | ||
71affda5 | 531 | buffer = kmemdup(metadata, metadata_size, GFP_KERNEL); |
d38ceaf9 AD |
532 | if (buffer == NULL) |
533 | return -ENOMEM; | |
534 | ||
d38ceaf9 AD |
535 | kfree(bo->metadata); |
536 | bo->metadata_flags = flags; | |
537 | bo->metadata = buffer; | |
538 | bo->metadata_size = metadata_size; | |
539 | ||
540 | return 0; | |
541 | } | |
542 | ||
543 | int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, | |
544 | size_t buffer_size, uint32_t *metadata_size, | |
545 | uint64_t *flags) | |
546 | { | |
547 | if (!buffer && !metadata_size) | |
548 | return -EINVAL; | |
549 | ||
550 | if (buffer) { | |
551 | if (buffer_size < bo->metadata_size) | |
552 | return -EINVAL; | |
553 | ||
554 | if (bo->metadata_size) | |
555 | memcpy(buffer, bo->metadata, bo->metadata_size); | |
556 | } | |
557 | ||
558 | if (metadata_size) | |
559 | *metadata_size = bo->metadata_size; | |
560 | if (flags) | |
561 | *flags = bo->metadata_flags; | |
562 | ||
563 | return 0; | |
564 | } | |
565 | ||
566 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, | |
567 | struct ttm_mem_reg *new_mem) | |
568 | { | |
569 | struct amdgpu_bo *rbo; | |
570 | ||
571 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) | |
572 | return; | |
573 | ||
574 | rbo = container_of(bo, struct amdgpu_bo, tbo); | |
575 | amdgpu_vm_bo_invalidate(rbo->adev, rbo); | |
576 | ||
577 | /* update statistics */ | |
578 | if (!new_mem) | |
579 | return; | |
580 | ||
581 | /* move_notify is called before move happens */ | |
582 | amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem); | |
583 | } | |
584 | ||
585 | int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) | |
586 | { | |
587 | struct amdgpu_device *adev; | |
5fb1941d CK |
588 | struct amdgpu_bo *abo; |
589 | unsigned long offset, size, lpfn; | |
590 | int i, r; | |
d38ceaf9 AD |
591 | |
592 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) | |
593 | return 0; | |
5fb1941d CK |
594 | |
595 | abo = container_of(bo, struct amdgpu_bo, tbo); | |
596 | adev = abo->adev; | |
597 | if (bo->mem.mem_type != TTM_PL_VRAM) | |
598 | return 0; | |
599 | ||
600 | size = bo->mem.num_pages << PAGE_SHIFT; | |
601 | offset = bo->mem.start << PAGE_SHIFT; | |
602 | if ((offset + size) <= adev->mc.visible_vram_size) | |
603 | return 0; | |
604 | ||
605 | /* hurrah the memory is not visible ! */ | |
606 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM); | |
607 | lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT; | |
608 | for (i = 0; i < abo->placement.num_placement; i++) { | |
609 | /* Force into visible VRAM */ | |
610 | if ((abo->placements[i].flags & TTM_PL_FLAG_VRAM) && | |
611 | (!abo->placements[i].lpfn || abo->placements[i].lpfn > lpfn)) | |
612 | abo->placements[i].lpfn = lpfn; | |
613 | } | |
614 | r = ttm_bo_validate(bo, &abo->placement, false, false); | |
615 | if (unlikely(r == -ENOMEM)) { | |
616 | amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); | |
617 | return ttm_bo_validate(bo, &abo->placement, false, false); | |
618 | } else if (unlikely(r != 0)) { | |
619 | return r; | |
d38ceaf9 | 620 | } |
5fb1941d CK |
621 | |
622 | offset = bo->mem.start << PAGE_SHIFT; | |
623 | /* this should never happen */ | |
624 | if ((offset + size) > adev->mc.visible_vram_size) | |
625 | return -EINVAL; | |
626 | ||
d38ceaf9 AD |
627 | return 0; |
628 | } | |
629 | ||
630 | /** | |
631 | * amdgpu_bo_fence - add fence to buffer object | |
632 | * | |
633 | * @bo: buffer object in question | |
634 | * @fence: fence to add | |
635 | * @shared: true if fence should be added shared | |
636 | * | |
637 | */ | |
e40a3115 | 638 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence, |
d38ceaf9 AD |
639 | bool shared) |
640 | { | |
641 | struct reservation_object *resv = bo->tbo.resv; | |
642 | ||
643 | if (shared) | |
e40a3115 | 644 | reservation_object_add_shared_fence(resv, fence); |
d38ceaf9 | 645 | else |
e40a3115 | 646 | reservation_object_add_excl_fence(resv, fence); |
d38ceaf9 | 647 | } |