Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
760285e7 DH |
28 | #include <drm/drmP.h> |
29 | #include <drm/radeon_drm.h> | |
771fe6b9 JG |
30 | #include "radeon.h" |
31 | ||
771fe6b9 JG |
32 | void radeon_gem_object_free(struct drm_gem_object *gobj) |
33 | { | |
7e4d15d9 | 34 | struct radeon_bo *robj = gem_to_radeon_bo(gobj); |
771fe6b9 | 35 | |
771fe6b9 | 36 | if (robj) { |
40f5cf99 AD |
37 | if (robj->gem_base.import_attach) |
38 | drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); | |
4c788679 | 39 | radeon_bo_unref(&robj); |
771fe6b9 JG |
40 | } |
41 | } | |
42 | ||
391bfec3 | 43 | int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, |
4c788679 | 44 | int alignment, int initial_domain, |
ed5cb43f | 45 | u32 flags, bool kernel, |
4c788679 | 46 | struct drm_gem_object **obj) |
771fe6b9 | 47 | { |
4c788679 | 48 | struct radeon_bo *robj; |
6c0d112f | 49 | unsigned long max_size; |
771fe6b9 JG |
50 | int r; |
51 | ||
52 | *obj = NULL; | |
771fe6b9 JG |
53 | /* At least align on page size */ |
54 | if (alignment < PAGE_SIZE) { | |
55 | alignment = PAGE_SIZE; | |
56 | } | |
6c0d112f | 57 | |
391bfec3 AD |
58 | /* Maximum bo size is the unpinned gtt size since we use the gtt to |
59 | * handle vram to system pool migrations. | |
60 | */ | |
61 | max_size = rdev->mc.gtt_size - rdev->gart_pin_size; | |
6c0d112f | 62 | if (size > max_size) { |
391bfec3 | 63 | DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", |
380670ae | 64 | size >> 20, max_size >> 20); |
6c0d112f CK |
65 | return -ENOMEM; |
66 | } | |
67 | ||
0fe7158c | 68 | retry: |
02376d82 | 69 | r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, |
831b6966 | 70 | flags, NULL, NULL, &robj); |
771fe6b9 | 71 | if (r) { |
0fe7158c CK |
72 | if (r != -ERESTARTSYS) { |
73 | if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { | |
74 | initial_domain |= RADEON_GEM_DOMAIN_GTT; | |
75 | goto retry; | |
76 | } | |
391bfec3 | 77 | DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", |
ecabd32a | 78 | size, initial_domain, alignment, r); |
0fe7158c | 79 | } |
771fe6b9 JG |
80 | return r; |
81 | } | |
441921d5 | 82 | *obj = &robj->gem_base; |
409851f4 | 83 | robj->pid = task_pid_nr(current); |
441921d5 DV |
84 | |
85 | mutex_lock(&rdev->gem.mutex); | |
86 | list_add_tail(&robj->list, &rdev->gem.objects); | |
87 | mutex_unlock(&rdev->gem.mutex); | |
88 | ||
771fe6b9 JG |
89 | return 0; |
90 | } | |
91 | ||
248a6c4a | 92 | static int radeon_gem_set_domain(struct drm_gem_object *gobj, |
771fe6b9 JG |
93 | uint32_t rdomain, uint32_t wdomain) |
94 | { | |
4c788679 | 95 | struct radeon_bo *robj; |
771fe6b9 | 96 | uint32_t domain; |
39e7f6f8 | 97 | long r; |
771fe6b9 JG |
98 | |
99 | /* FIXME: reeimplement */ | |
7e4d15d9 | 100 | robj = gem_to_radeon_bo(gobj); |
771fe6b9 JG |
101 | /* work out where to validate the buffer to */ |
102 | domain = wdomain; | |
103 | if (!domain) { | |
104 | domain = rdomain; | |
105 | } | |
106 | if (!domain) { | |
107 | /* Do nothings */ | |
b6cafa27 | 108 | printk(KERN_WARNING "Set domain without domain !\n"); |
771fe6b9 JG |
109 | return 0; |
110 | } | |
111 | if (domain == RADEON_GEM_DOMAIN_CPU) { | |
112 | /* Asking for cpu access wait for object idle */ | |
39e7f6f8 ML |
113 | r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); |
114 | if (!r) | |
115 | r = -EBUSY; | |
116 | ||
117 | if (r < 0 && r != -EINTR) { | |
118 | printk(KERN_ERR "Failed to wait for object: %li\n", r); | |
771fe6b9 JG |
119 | return r; |
120 | } | |
121 | } | |
122 | return 0; | |
123 | } | |
124 | ||
125 | int radeon_gem_init(struct radeon_device *rdev) | |
126 | { | |
127 | INIT_LIST_HEAD(&rdev->gem.objects); | |
128 | return 0; | |
129 | } | |
130 | ||
131 | void radeon_gem_fini(struct radeon_device *rdev) | |
132 | { | |
4c788679 | 133 | radeon_bo_force_delete(rdev); |
771fe6b9 JG |
134 | } |
135 | ||
721604a1 JG |
136 | /* |
137 | * Call from drm_gem_handle_create which appear in both new and open ioctl | |
138 | * case. | |
139 | */ | |
140 | int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) | |
141 | { | |
e971bd5e CK |
142 | struct radeon_bo *rbo = gem_to_radeon_bo(obj); |
143 | struct radeon_device *rdev = rbo->rdev; | |
144 | struct radeon_fpriv *fpriv = file_priv->driver_priv; | |
145 | struct radeon_vm *vm = &fpriv->vm; | |
146 | struct radeon_bo_va *bo_va; | |
147 | int r; | |
148 | ||
544143f9 AD |
149 | if ((rdev->family < CHIP_CAYMAN) || |
150 | (!rdev->accel_working)) { | |
e971bd5e CK |
151 | return 0; |
152 | } | |
153 | ||
154 | r = radeon_bo_reserve(rbo, false); | |
155 | if (r) { | |
156 | return r; | |
157 | } | |
158 | ||
159 | bo_va = radeon_vm_bo_find(vm, rbo); | |
160 | if (!bo_va) { | |
161 | bo_va = radeon_vm_bo_add(rdev, vm, rbo); | |
162 | } else { | |
163 | ++bo_va->ref_count; | |
164 | } | |
165 | radeon_bo_unreserve(rbo); | |
166 | ||
721604a1 JG |
167 | return 0; |
168 | } | |
169 | ||
170 | void radeon_gem_object_close(struct drm_gem_object *obj, | |
171 | struct drm_file *file_priv) | |
172 | { | |
173 | struct radeon_bo *rbo = gem_to_radeon_bo(obj); | |
174 | struct radeon_device *rdev = rbo->rdev; | |
175 | struct radeon_fpriv *fpriv = file_priv->driver_priv; | |
176 | struct radeon_vm *vm = &fpriv->vm; | |
e971bd5e | 177 | struct radeon_bo_va *bo_va; |
d59f7021 | 178 | int r; |
721604a1 | 179 | |
544143f9 AD |
180 | if ((rdev->family < CHIP_CAYMAN) || |
181 | (!rdev->accel_working)) { | |
721604a1 JG |
182 | return; |
183 | } | |
184 | ||
d59f7021 CK |
185 | r = radeon_bo_reserve(rbo, true); |
186 | if (r) { | |
187 | dev_err(rdev->dev, "leaking bo va because " | |
188 | "we fail to reserve bo (%d)\n", r); | |
721604a1 JG |
189 | return; |
190 | } | |
e971bd5e CK |
191 | bo_va = radeon_vm_bo_find(vm, rbo); |
192 | if (bo_va) { | |
193 | if (--bo_va->ref_count == 0) { | |
194 | radeon_vm_bo_rmv(rdev, bo_va); | |
195 | } | |
196 | } | |
721604a1 JG |
197 | radeon_bo_unreserve(rbo); |
198 | } | |
199 | ||
6c6f4783 CK |
200 | static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) |
201 | { | |
202 | if (r == -EDEADLK) { | |
6c6f4783 CK |
203 | r = radeon_gpu_reset(rdev); |
204 | if (!r) | |
205 | r = -EAGAIN; | |
6c6f4783 CK |
206 | } |
207 | return r; | |
208 | } | |
771fe6b9 JG |
209 | |
210 | /* | |
211 | * GEM ioctls. | |
212 | */ | |
213 | int radeon_gem_info_ioctl(struct drm_device *dev, void *data, | |
214 | struct drm_file *filp) | |
215 | { | |
216 | struct radeon_device *rdev = dev->dev_private; | |
217 | struct drm_radeon_gem_info *args = data; | |
53595338 DA |
218 | struct ttm_mem_type_manager *man; |
219 | ||
220 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; | |
771fe6b9 | 221 | |
7a50f01a | 222 | args->vram_size = rdev->mc.real_vram_size; |
53595338 | 223 | args->vram_visible = (u64)man->size << PAGE_SHIFT; |
ccbe0060 AD |
224 | args->vram_visible -= rdev->vram_pin_size; |
225 | args->gart_size = rdev->mc.gtt_size; | |
226 | args->gart_size -= rdev->gart_pin_size; | |
227 | ||
771fe6b9 JG |
228 | return 0; |
229 | } | |
230 | ||
231 | int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, | |
232 | struct drm_file *filp) | |
233 | { | |
234 | /* TODO: implement */ | |
235 | DRM_ERROR("unimplemented %s\n", __func__); | |
236 | return -ENOSYS; | |
237 | } | |
238 | ||
239 | int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |
240 | struct drm_file *filp) | |
241 | { | |
242 | /* TODO: implement */ | |
243 | DRM_ERROR("unimplemented %s\n", __func__); | |
244 | return -ENOSYS; | |
245 | } | |
246 | ||
247 | int radeon_gem_create_ioctl(struct drm_device *dev, void *data, | |
248 | struct drm_file *filp) | |
249 | { | |
250 | struct radeon_device *rdev = dev->dev_private; | |
251 | struct drm_radeon_gem_create *args = data; | |
252 | struct drm_gem_object *gobj; | |
253 | uint32_t handle; | |
254 | int r; | |
255 | ||
dee53e7f | 256 | down_read(&rdev->exclusive_lock); |
771fe6b9 JG |
257 | /* create a gem object to contain this object in */ |
258 | args->size = roundup(args->size, PAGE_SIZE); | |
259 | r = radeon_gem_object_create(rdev, args->size, args->alignment, | |
02376d82 | 260 | args->initial_domain, args->flags, |
ed5cb43f | 261 | false, &gobj); |
771fe6b9 | 262 | if (r) { |
dee53e7f | 263 | up_read(&rdev->exclusive_lock); |
6c6f4783 | 264 | r = radeon_gem_handle_lockup(rdev, r); |
771fe6b9 JG |
265 | return r; |
266 | } | |
267 | r = drm_gem_handle_create(filp, gobj, &handle); | |
29d08b3e DA |
268 | /* drop reference from allocate - handle holds it now */ |
269 | drm_gem_object_unreference_unlocked(gobj); | |
771fe6b9 | 270 | if (r) { |
dee53e7f | 271 | up_read(&rdev->exclusive_lock); |
6c6f4783 | 272 | r = radeon_gem_handle_lockup(rdev, r); |
771fe6b9 JG |
273 | return r; |
274 | } | |
771fe6b9 | 275 | args->handle = handle; |
dee53e7f | 276 | up_read(&rdev->exclusive_lock); |
771fe6b9 JG |
277 | return 0; |
278 | } | |
279 | ||
f72a113a CK |
280 | int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, |
281 | struct drm_file *filp) | |
282 | { | |
283 | struct radeon_device *rdev = dev->dev_private; | |
284 | struct drm_radeon_gem_userptr *args = data; | |
285 | struct drm_gem_object *gobj; | |
286 | struct radeon_bo *bo; | |
287 | uint32_t handle; | |
288 | int r; | |
289 | ||
290 | if (offset_in_page(args->addr | args->size)) | |
291 | return -EINVAL; | |
292 | ||
f72a113a | 293 | /* reject unknown flag values */ |
ddd00e33 | 294 | if (args->flags & ~(RADEON_GEM_USERPTR_READONLY | |
341cb9e4 CK |
295 | RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE | |
296 | RADEON_GEM_USERPTR_REGISTER)) | |
f72a113a CK |
297 | return -EINVAL; |
298 | ||
bd645e43 CK |
299 | if (args->flags & RADEON_GEM_USERPTR_READONLY) { |
300 | /* readonly pages not tested on older hardware */ | |
301 | if (rdev->family < CHIP_R600) | |
302 | return -EINVAL; | |
303 | ||
304 | } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) || | |
305 | !(args->flags & RADEON_GEM_USERPTR_REGISTER)) { | |
306 | ||
307 | /* if we want to write to it we must require anonymous | |
308 | memory and install a MMU notifier */ | |
309 | return -EACCES; | |
310 | } | |
f72a113a CK |
311 | |
312 | down_read(&rdev->exclusive_lock); | |
313 | ||
314 | /* create a gem object to contain this object in */ | |
315 | r = radeon_gem_object_create(rdev, args->size, 0, | |
316 | RADEON_GEM_DOMAIN_CPU, 0, | |
317 | false, &gobj); | |
318 | if (r) | |
319 | goto handle_lockup; | |
320 | ||
321 | bo = gem_to_radeon_bo(gobj); | |
322 | r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); | |
323 | if (r) | |
324 | goto release_object; | |
325 | ||
341cb9e4 CK |
326 | if (args->flags & RADEON_GEM_USERPTR_REGISTER) { |
327 | r = radeon_mn_register(bo, args->addr); | |
328 | if (r) | |
329 | goto release_object; | |
330 | } | |
331 | ||
2a84a447 CK |
332 | if (args->flags & RADEON_GEM_USERPTR_VALIDATE) { |
333 | down_read(¤t->mm->mmap_sem); | |
334 | r = radeon_bo_reserve(bo, true); | |
335 | if (r) { | |
336 | up_read(¤t->mm->mmap_sem); | |
337 | goto release_object; | |
338 | } | |
339 | ||
340 | radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT); | |
341 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | |
342 | radeon_bo_unreserve(bo); | |
343 | up_read(¤t->mm->mmap_sem); | |
344 | if (r) | |
345 | goto release_object; | |
346 | } | |
347 | ||
f72a113a CK |
348 | r = drm_gem_handle_create(filp, gobj, &handle); |
349 | /* drop reference from allocate - handle holds it now */ | |
350 | drm_gem_object_unreference_unlocked(gobj); | |
351 | if (r) | |
352 | goto handle_lockup; | |
353 | ||
354 | args->handle = handle; | |
355 | up_read(&rdev->exclusive_lock); | |
356 | return 0; | |
357 | ||
358 | release_object: | |
359 | drm_gem_object_unreference_unlocked(gobj); | |
360 | ||
361 | handle_lockup: | |
362 | up_read(&rdev->exclusive_lock); | |
363 | r = radeon_gem_handle_lockup(rdev, r); | |
364 | ||
365 | return r; | |
366 | } | |
367 | ||
771fe6b9 JG |
368 | int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
369 | struct drm_file *filp) | |
370 | { | |
371 | /* transition the BO to a domain - | |
372 | * just validate the BO into a certain domain */ | |
dee53e7f | 373 | struct radeon_device *rdev = dev->dev_private; |
771fe6b9 JG |
374 | struct drm_radeon_gem_set_domain *args = data; |
375 | struct drm_gem_object *gobj; | |
4c788679 | 376 | struct radeon_bo *robj; |
771fe6b9 JG |
377 | int r; |
378 | ||
379 | /* for now if someone requests domain CPU - | |
380 | * just make sure the buffer is finished with */ | |
dee53e7f | 381 | down_read(&rdev->exclusive_lock); |
771fe6b9 JG |
382 | |
383 | /* just do a BO wait for now */ | |
384 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
385 | if (gobj == NULL) { | |
dee53e7f | 386 | up_read(&rdev->exclusive_lock); |
bf79cb91 | 387 | return -ENOENT; |
771fe6b9 | 388 | } |
7e4d15d9 | 389 | robj = gem_to_radeon_bo(gobj); |
771fe6b9 JG |
390 | |
391 | r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); | |
392 | ||
bc9025bd | 393 | drm_gem_object_unreference_unlocked(gobj); |
dee53e7f | 394 | up_read(&rdev->exclusive_lock); |
6c6f4783 | 395 | r = radeon_gem_handle_lockup(robj->rdev, r); |
771fe6b9 JG |
396 | return r; |
397 | } | |
398 | ||
da6b51d0 DA |
399 | int radeon_mode_dumb_mmap(struct drm_file *filp, |
400 | struct drm_device *dev, | |
401 | uint32_t handle, uint64_t *offset_p) | |
771fe6b9 | 402 | { |
771fe6b9 | 403 | struct drm_gem_object *gobj; |
4c788679 | 404 | struct radeon_bo *robj; |
771fe6b9 | 405 | |
ff72145b | 406 | gobj = drm_gem_object_lookup(dev, filp, handle); |
771fe6b9 | 407 | if (gobj == NULL) { |
bf79cb91 | 408 | return -ENOENT; |
771fe6b9 | 409 | } |
7e4d15d9 | 410 | robj = gem_to_radeon_bo(gobj); |
f72a113a CK |
411 | if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { |
412 | drm_gem_object_unreference_unlocked(gobj); | |
413 | return -EPERM; | |
414 | } | |
ff72145b | 415 | *offset_p = radeon_bo_mmap_offset(robj); |
bc9025bd | 416 | drm_gem_object_unreference_unlocked(gobj); |
4c788679 | 417 | return 0; |
771fe6b9 JG |
418 | } |
419 | ||
ff72145b DA |
420 | int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, |
421 | struct drm_file *filp) | |
422 | { | |
423 | struct drm_radeon_gem_mmap *args = data; | |
424 | ||
da6b51d0 | 425 | return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); |
ff72145b DA |
426 | } |
427 | ||
771fe6b9 JG |
428 | int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, |
429 | struct drm_file *filp) | |
430 | { | |
1ef5325b | 431 | struct radeon_device *rdev = dev->dev_private; |
cefb87ef DA |
432 | struct drm_radeon_gem_busy *args = data; |
433 | struct drm_gem_object *gobj; | |
4c788679 | 434 | struct radeon_bo *robj; |
cefb87ef | 435 | int r; |
4361e52a | 436 | uint32_t cur_placement = 0; |
cefb87ef DA |
437 | |
438 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
439 | if (gobj == NULL) { | |
bf79cb91 | 440 | return -ENOENT; |
cefb87ef | 441 | } |
7e4d15d9 | 442 | robj = gem_to_radeon_bo(gobj); |
4c788679 | 443 | r = radeon_bo_wait(robj, &cur_placement, true); |
0bc490a8 | 444 | args->domain = radeon_mem_type_to_domain(cur_placement); |
bc9025bd | 445 | drm_gem_object_unreference_unlocked(gobj); |
1ef5325b | 446 | r = radeon_gem_handle_lockup(rdev, r); |
e3b2415e | 447 | return r; |
771fe6b9 JG |
448 | } |
449 | ||
450 | int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |
451 | struct drm_file *filp) | |
452 | { | |
1ef5325b | 453 | struct radeon_device *rdev = dev->dev_private; |
771fe6b9 JG |
454 | struct drm_radeon_gem_wait_idle *args = data; |
455 | struct drm_gem_object *gobj; | |
4c788679 | 456 | struct radeon_bo *robj; |
39e7f6f8 | 457 | int r = 0; |
404a6a51 | 458 | uint32_t cur_placement = 0; |
39e7f6f8 | 459 | long ret; |
771fe6b9 JG |
460 | |
461 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
462 | if (gobj == NULL) { | |
bf79cb91 | 463 | return -ENOENT; |
771fe6b9 | 464 | } |
7e4d15d9 | 465 | robj = gem_to_radeon_bo(gobj); |
39e7f6f8 ML |
466 | |
467 | ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); | |
468 | if (ret == 0) | |
469 | r = -EBUSY; | |
470 | else if (ret < 0) | |
471 | r = ret; | |
472 | ||
124764f1 | 473 | /* Flush HDP cache via MMIO if necessary */ |
404a6a51 MD |
474 | if (rdev->asic->mmio_hdp_flush && |
475 | radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) | |
124764f1 | 476 | robj->rdev->asic->mmio_hdp_flush(rdev); |
bc9025bd | 477 | drm_gem_object_unreference_unlocked(gobj); |
1ef5325b | 478 | r = radeon_gem_handle_lockup(rdev, r); |
771fe6b9 JG |
479 | return r; |
480 | } | |
e024e110 DA |
481 | |
482 | int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | |
483 | struct drm_file *filp) | |
484 | { | |
485 | struct drm_radeon_gem_set_tiling *args = data; | |
486 | struct drm_gem_object *gobj; | |
4c788679 | 487 | struct radeon_bo *robj; |
e024e110 DA |
488 | int r = 0; |
489 | ||
490 | DRM_DEBUG("%d \n", args->handle); | |
491 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
492 | if (gobj == NULL) | |
bf79cb91 | 493 | return -ENOENT; |
7e4d15d9 | 494 | robj = gem_to_radeon_bo(gobj); |
4c788679 | 495 | r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); |
bc9025bd | 496 | drm_gem_object_unreference_unlocked(gobj); |
e024e110 DA |
497 | return r; |
498 | } | |
499 | ||
500 | int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | |
501 | struct drm_file *filp) | |
502 | { | |
503 | struct drm_radeon_gem_get_tiling *args = data; | |
504 | struct drm_gem_object *gobj; | |
4c788679 | 505 | struct radeon_bo *rbo; |
e024e110 DA |
506 | int r = 0; |
507 | ||
508 | DRM_DEBUG("\n"); | |
509 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
510 | if (gobj == NULL) | |
bf79cb91 | 511 | return -ENOENT; |
7e4d15d9 | 512 | rbo = gem_to_radeon_bo(gobj); |
4c788679 JG |
513 | r = radeon_bo_reserve(rbo, false); |
514 | if (unlikely(r != 0)) | |
51f07b7e | 515 | goto out; |
4c788679 JG |
516 | radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); |
517 | radeon_bo_unreserve(rbo); | |
51f07b7e | 518 | out: |
bc9025bd | 519 | drm_gem_object_unreference_unlocked(gobj); |
721604a1 JG |
520 | return r; |
521 | } | |
522 | ||
2f2624c2 CK |
523 | /** |
524 | * radeon_gem_va_update_vm -update the bo_va in its VM | |
525 | * | |
526 | * @rdev: radeon_device pointer | |
527 | * @bo_va: bo_va to update | |
528 | * | |
529 | * Update the bo_va directly after setting it's address. Errors are not | |
530 | * vital here, so they are not reported back to userspace. | |
531 | */ | |
532 | static void radeon_gem_va_update_vm(struct radeon_device *rdev, | |
533 | struct radeon_bo_va *bo_va) | |
534 | { | |
535 | struct ttm_validate_buffer tv, *entry; | |
1d0c0942 | 536 | struct radeon_bo_list *vm_bos; |
2f2624c2 CK |
537 | struct ww_acquire_ctx ticket; |
538 | struct list_head list; | |
539 | unsigned domain; | |
540 | int r; | |
541 | ||
542 | INIT_LIST_HEAD(&list); | |
543 | ||
544 | tv.bo = &bo_va->bo->tbo; | |
545 | tv.shared = true; | |
546 | list_add(&tv.head, &list); | |
547 | ||
548 | vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list); | |
549 | if (!vm_bos) | |
550 | return; | |
551 | ||
aa35071c | 552 | r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); |
2f2624c2 CK |
553 | if (r) |
554 | goto error_free; | |
555 | ||
556 | list_for_each_entry(entry, &list, head) { | |
557 | domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type); | |
558 | /* if anything is swapped out don't swap it in here, | |
559 | just abort and wait for the next CS */ | |
560 | if (domain == RADEON_GEM_DOMAIN_CPU) | |
561 | goto error_unreserve; | |
562 | } | |
563 | ||
564 | mutex_lock(&bo_va->vm->mutex); | |
565 | r = radeon_vm_clear_freed(rdev, bo_va->vm); | |
566 | if (r) | |
567 | goto error_unlock; | |
568 | ||
569 | if (bo_va->it.start) | |
570 | r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem); | |
571 | ||
572 | error_unlock: | |
573 | mutex_unlock(&bo_va->vm->mutex); | |
574 | ||
575 | error_unreserve: | |
576 | ttm_eu_backoff_reservation(&ticket, &list); | |
577 | ||
578 | error_free: | |
579 | drm_free_large(vm_bos); | |
580 | ||
ad1a6222 | 581 | if (r && r != -ERESTARTSYS) |
2f2624c2 CK |
582 | DRM_ERROR("Couldn't update BO_VA (%d)\n", r); |
583 | } | |
584 | ||
721604a1 JG |
585 | int radeon_gem_va_ioctl(struct drm_device *dev, void *data, |
586 | struct drm_file *filp) | |
587 | { | |
588 | struct drm_radeon_gem_va *args = data; | |
589 | struct drm_gem_object *gobj; | |
590 | struct radeon_device *rdev = dev->dev_private; | |
591 | struct radeon_fpriv *fpriv = filp->driver_priv; | |
592 | struct radeon_bo *rbo; | |
593 | struct radeon_bo_va *bo_va; | |
594 | u32 invalid_flags; | |
595 | int r = 0; | |
596 | ||
67e915e4 AD |
597 | if (!rdev->vm_manager.enabled) { |
598 | args->operation = RADEON_VA_RESULT_ERROR; | |
599 | return -ENOTTY; | |
600 | } | |
601 | ||
721604a1 JG |
602 | /* !! DONT REMOVE !! |
603 | * We don't support vm_id yet, to be sure we don't have have broken | |
604 | * userspace, reject anyone trying to use non 0 value thus moving | |
605 | * forward we can use those fields without breaking existant userspace | |
606 | */ | |
607 | if (args->vm_id) { | |
608 | args->operation = RADEON_VA_RESULT_ERROR; | |
609 | return -EINVAL; | |
610 | } | |
611 | ||
612 | if (args->offset < RADEON_VA_RESERVED_SIZE) { | |
613 | dev_err(&dev->pdev->dev, | |
614 | "offset 0x%lX is in reserved area 0x%X\n", | |
615 | (unsigned long)args->offset, | |
616 | RADEON_VA_RESERVED_SIZE); | |
617 | args->operation = RADEON_VA_RESULT_ERROR; | |
618 | return -EINVAL; | |
619 | } | |
620 | ||
621 | /* don't remove, we need to enforce userspace to set the snooped flag | |
622 | * otherwise we will endup with broken userspace and we won't be able | |
623 | * to enable this feature without adding new interface | |
624 | */ | |
625 | invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; | |
626 | if ((args->flags & invalid_flags)) { | |
627 | dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", | |
628 | args->flags, invalid_flags); | |
629 | args->operation = RADEON_VA_RESULT_ERROR; | |
630 | return -EINVAL; | |
631 | } | |
721604a1 JG |
632 | |
633 | switch (args->operation) { | |
634 | case RADEON_VA_MAP: | |
635 | case RADEON_VA_UNMAP: | |
636 | break; | |
637 | default: | |
638 | dev_err(&dev->pdev->dev, "unsupported operation %d\n", | |
639 | args->operation); | |
640 | args->operation = RADEON_VA_RESULT_ERROR; | |
641 | return -EINVAL; | |
642 | } | |
643 | ||
644 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
645 | if (gobj == NULL) { | |
646 | args->operation = RADEON_VA_RESULT_ERROR; | |
647 | return -ENOENT; | |
648 | } | |
649 | rbo = gem_to_radeon_bo(gobj); | |
650 | r = radeon_bo_reserve(rbo, false); | |
651 | if (r) { | |
652 | args->operation = RADEON_VA_RESULT_ERROR; | |
653 | drm_gem_object_unreference_unlocked(gobj); | |
654 | return r; | |
655 | } | |
e971bd5e CK |
656 | bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); |
657 | if (!bo_va) { | |
658 | args->operation = RADEON_VA_RESULT_ERROR; | |
659 | drm_gem_object_unreference_unlocked(gobj); | |
660 | return -ENOENT; | |
661 | } | |
662 | ||
721604a1 JG |
663 | switch (args->operation) { |
664 | case RADEON_VA_MAP: | |
0aea5e4a | 665 | if (bo_va->it.start) { |
721604a1 | 666 | args->operation = RADEON_VA_RESULT_VA_EXIST; |
0aea5e4a | 667 | args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; |
85761f60 | 668 | radeon_bo_unreserve(rbo); |
721604a1 JG |
669 | goto out; |
670 | } | |
e971bd5e | 671 | r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); |
721604a1 JG |
672 | break; |
673 | case RADEON_VA_UNMAP: | |
e971bd5e | 674 | r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0); |
721604a1 JG |
675 | break; |
676 | default: | |
677 | break; | |
678 | } | |
2f2624c2 CK |
679 | if (!r) |
680 | radeon_gem_va_update_vm(rdev, bo_va); | |
721604a1 JG |
681 | args->operation = RADEON_VA_RESULT_OK; |
682 | if (r) { | |
683 | args->operation = RADEON_VA_RESULT_ERROR; | |
684 | } | |
685 | out: | |
721604a1 | 686 | drm_gem_object_unreference_unlocked(gobj); |
e024e110 | 687 | return r; |
bda72d58 MO |
688 | } |
689 | ||
690 | int radeon_gem_op_ioctl(struct drm_device *dev, void *data, | |
691 | struct drm_file *filp) | |
692 | { | |
693 | struct drm_radeon_gem_op *args = data; | |
694 | struct drm_gem_object *gobj; | |
695 | struct radeon_bo *robj; | |
696 | int r; | |
697 | ||
698 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
699 | if (gobj == NULL) { | |
700 | return -ENOENT; | |
701 | } | |
702 | robj = gem_to_radeon_bo(gobj); | |
f72a113a CK |
703 | |
704 | r = -EPERM; | |
705 | if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) | |
706 | goto out; | |
707 | ||
bda72d58 MO |
708 | r = radeon_bo_reserve(robj, false); |
709 | if (unlikely(r)) | |
710 | goto out; | |
711 | ||
712 | switch (args->op) { | |
713 | case RADEON_GEM_OP_GET_INITIAL_DOMAIN: | |
714 | args->value = robj->initial_domain; | |
715 | break; | |
716 | case RADEON_GEM_OP_SET_INITIAL_DOMAIN: | |
717 | robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | | |
718 | RADEON_GEM_DOMAIN_GTT | | |
719 | RADEON_GEM_DOMAIN_CPU); | |
720 | break; | |
721 | default: | |
722 | r = -EINVAL; | |
723 | } | |
724 | ||
725 | radeon_bo_unreserve(robj); | |
726 | out: | |
727 | drm_gem_object_unreference_unlocked(gobj); | |
728 | return r; | |
e024e110 | 729 | } |
ff72145b DA |
730 | |
731 | int radeon_mode_dumb_create(struct drm_file *file_priv, | |
732 | struct drm_device *dev, | |
733 | struct drm_mode_create_dumb *args) | |
734 | { | |
735 | struct radeon_device *rdev = dev->dev_private; | |
736 | struct drm_gem_object *gobj; | |
c87a8d8d | 737 | uint32_t handle; |
ff72145b DA |
738 | int r; |
739 | ||
740 | args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); | |
741 | args->size = args->pitch * args->height; | |
742 | args->size = ALIGN(args->size, PAGE_SIZE); | |
743 | ||
744 | r = radeon_gem_object_create(rdev, args->size, 0, | |
02376d82 | 745 | RADEON_GEM_DOMAIN_VRAM, 0, |
ed5cb43f | 746 | false, &gobj); |
ff72145b DA |
747 | if (r) |
748 | return -ENOMEM; | |
749 | ||
c87a8d8d DA |
750 | r = drm_gem_handle_create(file_priv, gobj, &handle); |
751 | /* drop reference from allocate - handle holds it now */ | |
752 | drm_gem_object_unreference_unlocked(gobj); | |
ff72145b | 753 | if (r) { |
ff72145b DA |
754 | return r; |
755 | } | |
c87a8d8d | 756 | args->handle = handle; |
ff72145b DA |
757 | return 0; |
758 | } | |
759 | ||
409851f4 JG |
760 | #if defined(CONFIG_DEBUG_FS) |
761 | static int radeon_debugfs_gem_info(struct seq_file *m, void *data) | |
762 | { | |
763 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
764 | struct drm_device *dev = node->minor->dev; | |
765 | struct radeon_device *rdev = dev->dev_private; | |
766 | struct radeon_bo *rbo; | |
767 | unsigned i = 0; | |
768 | ||
769 | mutex_lock(&rdev->gem.mutex); | |
770 | list_for_each_entry(rbo, &rdev->gem.objects, list) { | |
771 | unsigned domain; | |
772 | const char *placement; | |
773 | ||
774 | domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type); | |
775 | switch (domain) { | |
776 | case RADEON_GEM_DOMAIN_VRAM: | |
777 | placement = "VRAM"; | |
778 | break; | |
779 | case RADEON_GEM_DOMAIN_GTT: | |
780 | placement = " GTT"; | |
781 | break; | |
782 | case RADEON_GEM_DOMAIN_CPU: | |
783 | default: | |
784 | placement = " CPU"; | |
785 | break; | |
786 | } | |
787 | seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", | |
788 | i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20, | |
789 | placement, (unsigned long)rbo->pid); | |
790 | i++; | |
791 | } | |
792 | mutex_unlock(&rdev->gem.mutex); | |
793 | return 0; | |
794 | } | |
795 | ||
796 | static struct drm_info_list radeon_debugfs_gem_list[] = { | |
797 | {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL}, | |
798 | }; | |
799 | #endif | |
800 | ||
801 | int radeon_gem_debugfs_init(struct radeon_device *rdev) | |
802 | { | |
803 | #if defined(CONFIG_DEBUG_FS) | |
804 | return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1); | |
805 | #endif | |
806 | return 0; | |
807 | } |