Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
760285e7 DH |
28 | #include <drm/drmP.h> |
29 | #include <drm/radeon_drm.h> | |
771fe6b9 JG |
30 | #include "radeon.h" |
31 | ||
771fe6b9 JG |
32 | void radeon_gem_object_free(struct drm_gem_object *gobj) |
33 | { | |
7e4d15d9 | 34 | struct radeon_bo *robj = gem_to_radeon_bo(gobj); |
771fe6b9 | 35 | |
771fe6b9 | 36 | if (robj) { |
40f5cf99 AD |
37 | if (robj->gem_base.import_attach) |
38 | drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); | |
4c788679 | 39 | radeon_bo_unref(&robj); |
771fe6b9 JG |
40 | } |
41 | } | |
42 | ||
391bfec3 | 43 | int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size, |
4c788679 | 44 | int alignment, int initial_domain, |
ed5cb43f | 45 | u32 flags, bool kernel, |
4c788679 | 46 | struct drm_gem_object **obj) |
771fe6b9 | 47 | { |
4c788679 | 48 | struct radeon_bo *robj; |
6c0d112f | 49 | unsigned long max_size; |
771fe6b9 JG |
50 | int r; |
51 | ||
52 | *obj = NULL; | |
771fe6b9 JG |
53 | /* At least align on page size */ |
54 | if (alignment < PAGE_SIZE) { | |
55 | alignment = PAGE_SIZE; | |
56 | } | |
6c0d112f | 57 | |
391bfec3 AD |
58 | /* Maximum bo size is the unpinned gtt size since we use the gtt to |
59 | * handle vram to system pool migrations. | |
60 | */ | |
61 | max_size = rdev->mc.gtt_size - rdev->gart_pin_size; | |
6c0d112f | 62 | if (size > max_size) { |
391bfec3 | 63 | DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", |
380670ae | 64 | size >> 20, max_size >> 20); |
6c0d112f CK |
65 | return -ENOMEM; |
66 | } | |
67 | ||
0fe7158c | 68 | retry: |
02376d82 MD |
69 | r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, |
70 | flags, NULL, &robj); | |
771fe6b9 | 71 | if (r) { |
0fe7158c CK |
72 | if (r != -ERESTARTSYS) { |
73 | if (initial_domain == RADEON_GEM_DOMAIN_VRAM) { | |
74 | initial_domain |= RADEON_GEM_DOMAIN_GTT; | |
75 | goto retry; | |
76 | } | |
391bfec3 | 77 | DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", |
ecabd32a | 78 | size, initial_domain, alignment, r); |
0fe7158c | 79 | } |
771fe6b9 JG |
80 | return r; |
81 | } | |
441921d5 | 82 | *obj = &robj->gem_base; |
409851f4 | 83 | robj->pid = task_pid_nr(current); |
441921d5 DV |
84 | |
85 | mutex_lock(&rdev->gem.mutex); | |
86 | list_add_tail(&robj->list, &rdev->gem.objects); | |
87 | mutex_unlock(&rdev->gem.mutex); | |
88 | ||
771fe6b9 JG |
89 | return 0; |
90 | } | |
91 | ||
248a6c4a | 92 | static int radeon_gem_set_domain(struct drm_gem_object *gobj, |
771fe6b9 JG |
93 | uint32_t rdomain, uint32_t wdomain) |
94 | { | |
4c788679 | 95 | struct radeon_bo *robj; |
771fe6b9 JG |
96 | uint32_t domain; |
97 | int r; | |
98 | ||
99 | /* FIXME: reeimplement */ | |
7e4d15d9 | 100 | robj = gem_to_radeon_bo(gobj); |
771fe6b9 JG |
101 | /* work out where to validate the buffer to */ |
102 | domain = wdomain; | |
103 | if (!domain) { | |
104 | domain = rdomain; | |
105 | } | |
106 | if (!domain) { | |
107 | /* Do nothings */ | |
b6cafa27 | 108 | printk(KERN_WARNING "Set domain without domain !\n"); |
771fe6b9 JG |
109 | return 0; |
110 | } | |
111 | if (domain == RADEON_GEM_DOMAIN_CPU) { | |
112 | /* Asking for cpu access wait for object idle */ | |
4c788679 | 113 | r = radeon_bo_wait(robj, NULL, false); |
771fe6b9 JG |
114 | if (r) { |
115 | printk(KERN_ERR "Failed to wait for object !\n"); | |
116 | return r; | |
117 | } | |
118 | } | |
119 | return 0; | |
120 | } | |
121 | ||
122 | int radeon_gem_init(struct radeon_device *rdev) | |
123 | { | |
124 | INIT_LIST_HEAD(&rdev->gem.objects); | |
125 | return 0; | |
126 | } | |
127 | ||
128 | void radeon_gem_fini(struct radeon_device *rdev) | |
129 | { | |
4c788679 | 130 | radeon_bo_force_delete(rdev); |
771fe6b9 JG |
131 | } |
132 | ||
721604a1 JG |
133 | /* |
134 | * Call from drm_gem_handle_create which appear in both new and open ioctl | |
135 | * case. | |
136 | */ | |
137 | int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) | |
138 | { | |
e971bd5e CK |
139 | struct radeon_bo *rbo = gem_to_radeon_bo(obj); |
140 | struct radeon_device *rdev = rbo->rdev; | |
141 | struct radeon_fpriv *fpriv = file_priv->driver_priv; | |
142 | struct radeon_vm *vm = &fpriv->vm; | |
143 | struct radeon_bo_va *bo_va; | |
144 | int r; | |
145 | ||
146 | if (rdev->family < CHIP_CAYMAN) { | |
147 | return 0; | |
148 | } | |
149 | ||
150 | r = radeon_bo_reserve(rbo, false); | |
151 | if (r) { | |
152 | return r; | |
153 | } | |
154 | ||
155 | bo_va = radeon_vm_bo_find(vm, rbo); | |
156 | if (!bo_va) { | |
157 | bo_va = radeon_vm_bo_add(rdev, vm, rbo); | |
158 | } else { | |
159 | ++bo_va->ref_count; | |
160 | } | |
161 | radeon_bo_unreserve(rbo); | |
162 | ||
721604a1 JG |
163 | return 0; |
164 | } | |
165 | ||
166 | void radeon_gem_object_close(struct drm_gem_object *obj, | |
167 | struct drm_file *file_priv) | |
168 | { | |
169 | struct radeon_bo *rbo = gem_to_radeon_bo(obj); | |
170 | struct radeon_device *rdev = rbo->rdev; | |
171 | struct radeon_fpriv *fpriv = file_priv->driver_priv; | |
172 | struct radeon_vm *vm = &fpriv->vm; | |
e971bd5e | 173 | struct radeon_bo_va *bo_va; |
d59f7021 | 174 | int r; |
721604a1 JG |
175 | |
176 | if (rdev->family < CHIP_CAYMAN) { | |
177 | return; | |
178 | } | |
179 | ||
d59f7021 CK |
180 | r = radeon_bo_reserve(rbo, true); |
181 | if (r) { | |
182 | dev_err(rdev->dev, "leaking bo va because " | |
183 | "we fail to reserve bo (%d)\n", r); | |
721604a1 JG |
184 | return; |
185 | } | |
e971bd5e CK |
186 | bo_va = radeon_vm_bo_find(vm, rbo); |
187 | if (bo_va) { | |
188 | if (--bo_va->ref_count == 0) { | |
189 | radeon_vm_bo_rmv(rdev, bo_va); | |
190 | } | |
191 | } | |
721604a1 JG |
192 | radeon_bo_unreserve(rbo); |
193 | } | |
194 | ||
6c6f4783 CK |
195 | static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) |
196 | { | |
197 | if (r == -EDEADLK) { | |
6c6f4783 CK |
198 | r = radeon_gpu_reset(rdev); |
199 | if (!r) | |
200 | r = -EAGAIN; | |
6c6f4783 CK |
201 | } |
202 | return r; | |
203 | } | |
771fe6b9 JG |
204 | |
205 | /* | |
206 | * GEM ioctls. | |
207 | */ | |
208 | int radeon_gem_info_ioctl(struct drm_device *dev, void *data, | |
209 | struct drm_file *filp) | |
210 | { | |
211 | struct radeon_device *rdev = dev->dev_private; | |
212 | struct drm_radeon_gem_info *args = data; | |
53595338 DA |
213 | struct ttm_mem_type_manager *man; |
214 | ||
215 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; | |
771fe6b9 | 216 | |
7a50f01a | 217 | args->vram_size = rdev->mc.real_vram_size; |
53595338 | 218 | args->vram_visible = (u64)man->size << PAGE_SHIFT; |
ccbe0060 AD |
219 | args->vram_visible -= rdev->vram_pin_size; |
220 | args->gart_size = rdev->mc.gtt_size; | |
221 | args->gart_size -= rdev->gart_pin_size; | |
222 | ||
771fe6b9 JG |
223 | return 0; |
224 | } | |
225 | ||
226 | int radeon_gem_pread_ioctl(struct drm_device *dev, void *data, | |
227 | struct drm_file *filp) | |
228 | { | |
229 | /* TODO: implement */ | |
230 | DRM_ERROR("unimplemented %s\n", __func__); | |
231 | return -ENOSYS; | |
232 | } | |
233 | ||
234 | int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |
235 | struct drm_file *filp) | |
236 | { | |
237 | /* TODO: implement */ | |
238 | DRM_ERROR("unimplemented %s\n", __func__); | |
239 | return -ENOSYS; | |
240 | } | |
241 | ||
242 | int radeon_gem_create_ioctl(struct drm_device *dev, void *data, | |
243 | struct drm_file *filp) | |
244 | { | |
245 | struct radeon_device *rdev = dev->dev_private; | |
246 | struct drm_radeon_gem_create *args = data; | |
247 | struct drm_gem_object *gobj; | |
248 | uint32_t handle; | |
249 | int r; | |
250 | ||
dee53e7f | 251 | down_read(&rdev->exclusive_lock); |
771fe6b9 JG |
252 | /* create a gem object to contain this object in */ |
253 | args->size = roundup(args->size, PAGE_SIZE); | |
254 | r = radeon_gem_object_create(rdev, args->size, args->alignment, | |
02376d82 | 255 | args->initial_domain, args->flags, |
ed5cb43f | 256 | false, &gobj); |
771fe6b9 | 257 | if (r) { |
dee53e7f | 258 | up_read(&rdev->exclusive_lock); |
6c6f4783 | 259 | r = radeon_gem_handle_lockup(rdev, r); |
771fe6b9 JG |
260 | return r; |
261 | } | |
262 | r = drm_gem_handle_create(filp, gobj, &handle); | |
29d08b3e DA |
263 | /* drop reference from allocate - handle holds it now */ |
264 | drm_gem_object_unreference_unlocked(gobj); | |
771fe6b9 | 265 | if (r) { |
dee53e7f | 266 | up_read(&rdev->exclusive_lock); |
6c6f4783 | 267 | r = radeon_gem_handle_lockup(rdev, r); |
771fe6b9 JG |
268 | return r; |
269 | } | |
771fe6b9 | 270 | args->handle = handle; |
dee53e7f | 271 | up_read(&rdev->exclusive_lock); |
771fe6b9 JG |
272 | return 0; |
273 | } | |
274 | ||
f72a113a CK |
275 | int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data, |
276 | struct drm_file *filp) | |
277 | { | |
278 | struct radeon_device *rdev = dev->dev_private; | |
279 | struct drm_radeon_gem_userptr *args = data; | |
280 | struct drm_gem_object *gobj; | |
281 | struct radeon_bo *bo; | |
282 | uint32_t handle; | |
283 | int r; | |
284 | ||
285 | if (offset_in_page(args->addr | args->size)) | |
286 | return -EINVAL; | |
287 | ||
288 | /* we only support read only mappings for now */ | |
289 | if (!(args->flags & RADEON_GEM_USERPTR_READONLY)) | |
290 | return -EACCES; | |
291 | ||
292 | /* reject unknown flag values */ | |
293 | if (args->flags & ~RADEON_GEM_USERPTR_READONLY) | |
294 | return -EINVAL; | |
295 | ||
296 | /* readonly pages not tested on older hardware */ | |
297 | if (rdev->family < CHIP_R600) | |
298 | return -EINVAL; | |
299 | ||
300 | down_read(&rdev->exclusive_lock); | |
301 | ||
302 | /* create a gem object to contain this object in */ | |
303 | r = radeon_gem_object_create(rdev, args->size, 0, | |
304 | RADEON_GEM_DOMAIN_CPU, 0, | |
305 | false, &gobj); | |
306 | if (r) | |
307 | goto handle_lockup; | |
308 | ||
309 | bo = gem_to_radeon_bo(gobj); | |
310 | r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); | |
311 | if (r) | |
312 | goto release_object; | |
313 | ||
314 | r = drm_gem_handle_create(filp, gobj, &handle); | |
315 | /* drop reference from allocate - handle holds it now */ | |
316 | drm_gem_object_unreference_unlocked(gobj); | |
317 | if (r) | |
318 | goto handle_lockup; | |
319 | ||
320 | args->handle = handle; | |
321 | up_read(&rdev->exclusive_lock); | |
322 | return 0; | |
323 | ||
324 | release_object: | |
325 | drm_gem_object_unreference_unlocked(gobj); | |
326 | ||
327 | handle_lockup: | |
328 | up_read(&rdev->exclusive_lock); | |
329 | r = radeon_gem_handle_lockup(rdev, r); | |
330 | ||
331 | return r; | |
332 | } | |
333 | ||
771fe6b9 JG |
334 | int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, |
335 | struct drm_file *filp) | |
336 | { | |
337 | /* transition the BO to a domain - | |
338 | * just validate the BO into a certain domain */ | |
dee53e7f | 339 | struct radeon_device *rdev = dev->dev_private; |
771fe6b9 JG |
340 | struct drm_radeon_gem_set_domain *args = data; |
341 | struct drm_gem_object *gobj; | |
4c788679 | 342 | struct radeon_bo *robj; |
771fe6b9 JG |
343 | int r; |
344 | ||
345 | /* for now if someone requests domain CPU - | |
346 | * just make sure the buffer is finished with */ | |
dee53e7f | 347 | down_read(&rdev->exclusive_lock); |
771fe6b9 JG |
348 | |
349 | /* just do a BO wait for now */ | |
350 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
351 | if (gobj == NULL) { | |
dee53e7f | 352 | up_read(&rdev->exclusive_lock); |
bf79cb91 | 353 | return -ENOENT; |
771fe6b9 | 354 | } |
7e4d15d9 | 355 | robj = gem_to_radeon_bo(gobj); |
771fe6b9 JG |
356 | |
357 | r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain); | |
358 | ||
bc9025bd | 359 | drm_gem_object_unreference_unlocked(gobj); |
dee53e7f | 360 | up_read(&rdev->exclusive_lock); |
6c6f4783 | 361 | r = radeon_gem_handle_lockup(robj->rdev, r); |
771fe6b9 JG |
362 | return r; |
363 | } | |
364 | ||
ff72145b DA |
365 | int radeon_mode_dumb_mmap(struct drm_file *filp, |
366 | struct drm_device *dev, | |
367 | uint32_t handle, uint64_t *offset_p) | |
771fe6b9 | 368 | { |
771fe6b9 | 369 | struct drm_gem_object *gobj; |
4c788679 | 370 | struct radeon_bo *robj; |
771fe6b9 | 371 | |
ff72145b | 372 | gobj = drm_gem_object_lookup(dev, filp, handle); |
771fe6b9 | 373 | if (gobj == NULL) { |
bf79cb91 | 374 | return -ENOENT; |
771fe6b9 | 375 | } |
7e4d15d9 | 376 | robj = gem_to_radeon_bo(gobj); |
f72a113a CK |
377 | if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { |
378 | drm_gem_object_unreference_unlocked(gobj); | |
379 | return -EPERM; | |
380 | } | |
ff72145b | 381 | *offset_p = radeon_bo_mmap_offset(robj); |
bc9025bd | 382 | drm_gem_object_unreference_unlocked(gobj); |
4c788679 | 383 | return 0; |
771fe6b9 JG |
384 | } |
385 | ||
ff72145b DA |
386 | int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, |
387 | struct drm_file *filp) | |
388 | { | |
389 | struct drm_radeon_gem_mmap *args = data; | |
390 | ||
391 | return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); | |
392 | } | |
393 | ||
771fe6b9 JG |
394 | int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, |
395 | struct drm_file *filp) | |
396 | { | |
1ef5325b | 397 | struct radeon_device *rdev = dev->dev_private; |
cefb87ef DA |
398 | struct drm_radeon_gem_busy *args = data; |
399 | struct drm_gem_object *gobj; | |
4c788679 | 400 | struct radeon_bo *robj; |
cefb87ef | 401 | int r; |
4361e52a | 402 | uint32_t cur_placement = 0; |
cefb87ef DA |
403 | |
404 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
405 | if (gobj == NULL) { | |
bf79cb91 | 406 | return -ENOENT; |
cefb87ef | 407 | } |
7e4d15d9 | 408 | robj = gem_to_radeon_bo(gobj); |
4c788679 | 409 | r = radeon_bo_wait(robj, &cur_placement, true); |
0bc490a8 | 410 | args->domain = radeon_mem_type_to_domain(cur_placement); |
bc9025bd | 411 | drm_gem_object_unreference_unlocked(gobj); |
1ef5325b | 412 | r = radeon_gem_handle_lockup(rdev, r); |
e3b2415e | 413 | return r; |
771fe6b9 JG |
414 | } |
415 | ||
416 | int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |
417 | struct drm_file *filp) | |
418 | { | |
1ef5325b | 419 | struct radeon_device *rdev = dev->dev_private; |
771fe6b9 JG |
420 | struct drm_radeon_gem_wait_idle *args = data; |
421 | struct drm_gem_object *gobj; | |
4c788679 | 422 | struct radeon_bo *robj; |
771fe6b9 | 423 | int r; |
404a6a51 | 424 | uint32_t cur_placement = 0; |
771fe6b9 JG |
425 | |
426 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
427 | if (gobj == NULL) { | |
bf79cb91 | 428 | return -ENOENT; |
771fe6b9 | 429 | } |
7e4d15d9 | 430 | robj = gem_to_radeon_bo(gobj); |
404a6a51 | 431 | r = radeon_bo_wait(robj, &cur_placement, false); |
124764f1 | 432 | /* Flush HDP cache via MMIO if necessary */ |
404a6a51 MD |
433 | if (rdev->asic->mmio_hdp_flush && |
434 | radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) | |
124764f1 | 435 | robj->rdev->asic->mmio_hdp_flush(rdev); |
bc9025bd | 436 | drm_gem_object_unreference_unlocked(gobj); |
1ef5325b | 437 | r = radeon_gem_handle_lockup(rdev, r); |
771fe6b9 JG |
438 | return r; |
439 | } | |
e024e110 DA |
440 | |
441 | int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data, | |
442 | struct drm_file *filp) | |
443 | { | |
444 | struct drm_radeon_gem_set_tiling *args = data; | |
445 | struct drm_gem_object *gobj; | |
4c788679 | 446 | struct radeon_bo *robj; |
e024e110 DA |
447 | int r = 0; |
448 | ||
449 | DRM_DEBUG("%d \n", args->handle); | |
450 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
451 | if (gobj == NULL) | |
bf79cb91 | 452 | return -ENOENT; |
7e4d15d9 | 453 | robj = gem_to_radeon_bo(gobj); |
4c788679 | 454 | r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch); |
bc9025bd | 455 | drm_gem_object_unreference_unlocked(gobj); |
e024e110 DA |
456 | return r; |
457 | } | |
458 | ||
459 | int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data, | |
460 | struct drm_file *filp) | |
461 | { | |
462 | struct drm_radeon_gem_get_tiling *args = data; | |
463 | struct drm_gem_object *gobj; | |
4c788679 | 464 | struct radeon_bo *rbo; |
e024e110 DA |
465 | int r = 0; |
466 | ||
467 | DRM_DEBUG("\n"); | |
468 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
469 | if (gobj == NULL) | |
bf79cb91 | 470 | return -ENOENT; |
7e4d15d9 | 471 | rbo = gem_to_radeon_bo(gobj); |
4c788679 JG |
472 | r = radeon_bo_reserve(rbo, false); |
473 | if (unlikely(r != 0)) | |
51f07b7e | 474 | goto out; |
4c788679 JG |
475 | radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch); |
476 | radeon_bo_unreserve(rbo); | |
51f07b7e | 477 | out: |
bc9025bd | 478 | drm_gem_object_unreference_unlocked(gobj); |
721604a1 JG |
479 | return r; |
480 | } | |
481 | ||
482 | int radeon_gem_va_ioctl(struct drm_device *dev, void *data, | |
483 | struct drm_file *filp) | |
484 | { | |
485 | struct drm_radeon_gem_va *args = data; | |
486 | struct drm_gem_object *gobj; | |
487 | struct radeon_device *rdev = dev->dev_private; | |
488 | struct radeon_fpriv *fpriv = filp->driver_priv; | |
489 | struct radeon_bo *rbo; | |
490 | struct radeon_bo_va *bo_va; | |
491 | u32 invalid_flags; | |
492 | int r = 0; | |
493 | ||
67e915e4 AD |
494 | if (!rdev->vm_manager.enabled) { |
495 | args->operation = RADEON_VA_RESULT_ERROR; | |
496 | return -ENOTTY; | |
497 | } | |
498 | ||
721604a1 JG |
499 | /* !! DONT REMOVE !! |
500 | * We don't support vm_id yet, to be sure we don't have have broken | |
501 | * userspace, reject anyone trying to use non 0 value thus moving | |
502 | * forward we can use those fields without breaking existant userspace | |
503 | */ | |
504 | if (args->vm_id) { | |
505 | args->operation = RADEON_VA_RESULT_ERROR; | |
506 | return -EINVAL; | |
507 | } | |
508 | ||
509 | if (args->offset < RADEON_VA_RESERVED_SIZE) { | |
510 | dev_err(&dev->pdev->dev, | |
511 | "offset 0x%lX is in reserved area 0x%X\n", | |
512 | (unsigned long)args->offset, | |
513 | RADEON_VA_RESERVED_SIZE); | |
514 | args->operation = RADEON_VA_RESULT_ERROR; | |
515 | return -EINVAL; | |
516 | } | |
517 | ||
518 | /* don't remove, we need to enforce userspace to set the snooped flag | |
519 | * otherwise we will endup with broken userspace and we won't be able | |
520 | * to enable this feature without adding new interface | |
521 | */ | |
522 | invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; | |
523 | if ((args->flags & invalid_flags)) { | |
524 | dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", | |
525 | args->flags, invalid_flags); | |
526 | args->operation = RADEON_VA_RESULT_ERROR; | |
527 | return -EINVAL; | |
528 | } | |
721604a1 JG |
529 | |
530 | switch (args->operation) { | |
531 | case RADEON_VA_MAP: | |
532 | case RADEON_VA_UNMAP: | |
533 | break; | |
534 | default: | |
535 | dev_err(&dev->pdev->dev, "unsupported operation %d\n", | |
536 | args->operation); | |
537 | args->operation = RADEON_VA_RESULT_ERROR; | |
538 | return -EINVAL; | |
539 | } | |
540 | ||
541 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
542 | if (gobj == NULL) { | |
543 | args->operation = RADEON_VA_RESULT_ERROR; | |
544 | return -ENOENT; | |
545 | } | |
546 | rbo = gem_to_radeon_bo(gobj); | |
547 | r = radeon_bo_reserve(rbo, false); | |
548 | if (r) { | |
549 | args->operation = RADEON_VA_RESULT_ERROR; | |
550 | drm_gem_object_unreference_unlocked(gobj); | |
551 | return r; | |
552 | } | |
e971bd5e CK |
553 | bo_va = radeon_vm_bo_find(&fpriv->vm, rbo); |
554 | if (!bo_va) { | |
555 | args->operation = RADEON_VA_RESULT_ERROR; | |
556 | drm_gem_object_unreference_unlocked(gobj); | |
557 | return -ENOENT; | |
558 | } | |
559 | ||
721604a1 JG |
560 | switch (args->operation) { |
561 | case RADEON_VA_MAP: | |
0aea5e4a | 562 | if (bo_va->it.start) { |
721604a1 | 563 | args->operation = RADEON_VA_RESULT_VA_EXIST; |
0aea5e4a | 564 | args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; |
721604a1 JG |
565 | goto out; |
566 | } | |
e971bd5e | 567 | r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); |
721604a1 JG |
568 | break; |
569 | case RADEON_VA_UNMAP: | |
e971bd5e | 570 | r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0); |
721604a1 JG |
571 | break; |
572 | default: | |
573 | break; | |
574 | } | |
575 | args->operation = RADEON_VA_RESULT_OK; | |
576 | if (r) { | |
577 | args->operation = RADEON_VA_RESULT_ERROR; | |
578 | } | |
579 | out: | |
580 | radeon_bo_unreserve(rbo); | |
581 | drm_gem_object_unreference_unlocked(gobj); | |
e024e110 | 582 | return r; |
bda72d58 MO |
583 | } |
584 | ||
585 | int radeon_gem_op_ioctl(struct drm_device *dev, void *data, | |
586 | struct drm_file *filp) | |
587 | { | |
588 | struct drm_radeon_gem_op *args = data; | |
589 | struct drm_gem_object *gobj; | |
590 | struct radeon_bo *robj; | |
591 | int r; | |
592 | ||
593 | gobj = drm_gem_object_lookup(dev, filp, args->handle); | |
594 | if (gobj == NULL) { | |
595 | return -ENOENT; | |
596 | } | |
597 | robj = gem_to_radeon_bo(gobj); | |
f72a113a CK |
598 | |
599 | r = -EPERM; | |
600 | if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) | |
601 | goto out; | |
602 | ||
bda72d58 MO |
603 | r = radeon_bo_reserve(robj, false); |
604 | if (unlikely(r)) | |
605 | goto out; | |
606 | ||
607 | switch (args->op) { | |
608 | case RADEON_GEM_OP_GET_INITIAL_DOMAIN: | |
609 | args->value = robj->initial_domain; | |
610 | break; | |
611 | case RADEON_GEM_OP_SET_INITIAL_DOMAIN: | |
612 | robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM | | |
613 | RADEON_GEM_DOMAIN_GTT | | |
614 | RADEON_GEM_DOMAIN_CPU); | |
615 | break; | |
616 | default: | |
617 | r = -EINVAL; | |
618 | } | |
619 | ||
620 | radeon_bo_unreserve(robj); | |
621 | out: | |
622 | drm_gem_object_unreference_unlocked(gobj); | |
623 | return r; | |
e024e110 | 624 | } |
ff72145b DA |
625 | |
626 | int radeon_mode_dumb_create(struct drm_file *file_priv, | |
627 | struct drm_device *dev, | |
628 | struct drm_mode_create_dumb *args) | |
629 | { | |
630 | struct radeon_device *rdev = dev->dev_private; | |
631 | struct drm_gem_object *gobj; | |
c87a8d8d | 632 | uint32_t handle; |
ff72145b DA |
633 | int r; |
634 | ||
635 | args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); | |
636 | args->size = args->pitch * args->height; | |
637 | args->size = ALIGN(args->size, PAGE_SIZE); | |
638 | ||
639 | r = radeon_gem_object_create(rdev, args->size, 0, | |
02376d82 | 640 | RADEON_GEM_DOMAIN_VRAM, 0, |
ed5cb43f | 641 | false, &gobj); |
ff72145b DA |
642 | if (r) |
643 | return -ENOMEM; | |
644 | ||
c87a8d8d DA |
645 | r = drm_gem_handle_create(file_priv, gobj, &handle); |
646 | /* drop reference from allocate - handle holds it now */ | |
647 | drm_gem_object_unreference_unlocked(gobj); | |
ff72145b | 648 | if (r) { |
ff72145b DA |
649 | return r; |
650 | } | |
c87a8d8d | 651 | args->handle = handle; |
ff72145b DA |
652 | return 0; |
653 | } | |
654 | ||
409851f4 JG |
655 | #if defined(CONFIG_DEBUG_FS) |
656 | static int radeon_debugfs_gem_info(struct seq_file *m, void *data) | |
657 | { | |
658 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
659 | struct drm_device *dev = node->minor->dev; | |
660 | struct radeon_device *rdev = dev->dev_private; | |
661 | struct radeon_bo *rbo; | |
662 | unsigned i = 0; | |
663 | ||
664 | mutex_lock(&rdev->gem.mutex); | |
665 | list_for_each_entry(rbo, &rdev->gem.objects, list) { | |
666 | unsigned domain; | |
667 | const char *placement; | |
668 | ||
669 | domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type); | |
670 | switch (domain) { | |
671 | case RADEON_GEM_DOMAIN_VRAM: | |
672 | placement = "VRAM"; | |
673 | break; | |
674 | case RADEON_GEM_DOMAIN_GTT: | |
675 | placement = " GTT"; | |
676 | break; | |
677 | case RADEON_GEM_DOMAIN_CPU: | |
678 | default: | |
679 | placement = " CPU"; | |
680 | break; | |
681 | } | |
682 | seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n", | |
683 | i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20, | |
684 | placement, (unsigned long)rbo->pid); | |
685 | i++; | |
686 | } | |
687 | mutex_unlock(&rdev->gem.mutex); | |
688 | return 0; | |
689 | } | |
690 | ||
691 | static struct drm_info_list radeon_debugfs_gem_list[] = { | |
692 | {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL}, | |
693 | }; | |
694 | #endif | |
695 | ||
696 | int radeon_gem_debugfs_init(struct radeon_device *rdev) | |
697 | { | |
698 | #if defined(CONFIG_DEBUG_FS) | |
699 | return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1); | |
700 | #endif | |
701 | return 0; | |
702 | } |