drm/radeon: GPU virtual memory support v22
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_gem.c
1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include "drmP.h"
29 #include "drm.h"
30 #include "radeon_drm.h"
31 #include "radeon.h"
32
33 int radeon_gem_object_init(struct drm_gem_object *obj)
34 {
35 BUG();
36
37 return 0;
38 }
39
40 void radeon_gem_object_free(struct drm_gem_object *gobj)
41 {
42 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
43
44 if (robj) {
45 radeon_bo_unref(&robj);
46 }
47 }
48
49 int radeon_gem_object_create(struct radeon_device *rdev, int size,
50 int alignment, int initial_domain,
51 bool discardable, bool kernel,
52 struct drm_gem_object **obj)
53 {
54 struct radeon_bo *robj;
55 int r;
56
57 *obj = NULL;
58 /* At least align on page size */
59 if (alignment < PAGE_SIZE) {
60 alignment = PAGE_SIZE;
61 }
62 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj);
63 if (r) {
64 if (r != -ERESTARTSYS)
65 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
66 size, initial_domain, alignment, r);
67 return r;
68 }
69 *obj = &robj->gem_base;
70
71 mutex_lock(&rdev->gem.mutex);
72 list_add_tail(&robj->list, &rdev->gem.objects);
73 mutex_unlock(&rdev->gem.mutex);
74
75 return 0;
76 }
77
78 int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
79 uint64_t *gpu_addr)
80 {
81 struct radeon_bo *robj = gem_to_radeon_bo(obj);
82 int r;
83
84 r = radeon_bo_reserve(robj, false);
85 if (unlikely(r != 0))
86 return r;
87 r = radeon_bo_pin(robj, pin_domain, gpu_addr);
88 radeon_bo_unreserve(robj);
89 return r;
90 }
91
92 void radeon_gem_object_unpin(struct drm_gem_object *obj)
93 {
94 struct radeon_bo *robj = gem_to_radeon_bo(obj);
95 int r;
96
97 r = radeon_bo_reserve(robj, false);
98 if (likely(r == 0)) {
99 radeon_bo_unpin(robj);
100 radeon_bo_unreserve(robj);
101 }
102 }
103
104 int radeon_gem_set_domain(struct drm_gem_object *gobj,
105 uint32_t rdomain, uint32_t wdomain)
106 {
107 struct radeon_bo *robj;
108 uint32_t domain;
109 int r;
110
111 /* FIXME: reeimplement */
112 robj = gem_to_radeon_bo(gobj);
113 /* work out where to validate the buffer to */
114 domain = wdomain;
115 if (!domain) {
116 domain = rdomain;
117 }
118 if (!domain) {
119 /* Do nothings */
120 printk(KERN_WARNING "Set domain withou domain !\n");
121 return 0;
122 }
123 if (domain == RADEON_GEM_DOMAIN_CPU) {
124 /* Asking for cpu access wait for object idle */
125 r = radeon_bo_wait(robj, NULL, false);
126 if (r) {
127 printk(KERN_ERR "Failed to wait for object !\n");
128 return r;
129 }
130 }
131 return 0;
132 }
133
134 int radeon_gem_init(struct radeon_device *rdev)
135 {
136 INIT_LIST_HEAD(&rdev->gem.objects);
137 return 0;
138 }
139
140 void radeon_gem_fini(struct radeon_device *rdev)
141 {
142 radeon_bo_force_delete(rdev);
143 }
144
145 /*
146 * Call from drm_gem_handle_create which appear in both new and open ioctl
147 * case.
148 */
149 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
150 {
151 return 0;
152 }
153
154 void radeon_gem_object_close(struct drm_gem_object *obj,
155 struct drm_file *file_priv)
156 {
157 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
158 struct radeon_device *rdev = rbo->rdev;
159 struct radeon_fpriv *fpriv = file_priv->driver_priv;
160 struct radeon_vm *vm = &fpriv->vm;
161 struct radeon_bo_va *bo_va, *tmp;
162
163 if (rdev->family < CHIP_CAYMAN) {
164 return;
165 }
166
167 if (radeon_bo_reserve(rbo, false)) {
168 return;
169 }
170 list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) {
171 if (bo_va->vm == vm) {
172 /* remove from this vm address space */
173 mutex_lock(&vm->mutex);
174 list_del(&bo_va->vm_list);
175 mutex_unlock(&vm->mutex);
176 list_del(&bo_va->bo_list);
177 kfree(bo_va);
178 }
179 }
180 radeon_bo_unreserve(rbo);
181 }
182
183
184 /*
185 * GEM ioctls.
186 */
187 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
188 struct drm_file *filp)
189 {
190 struct radeon_device *rdev = dev->dev_private;
191 struct drm_radeon_gem_info *args = data;
192 struct ttm_mem_type_manager *man;
193 unsigned i;
194
195 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
196
197 args->vram_size = rdev->mc.real_vram_size;
198 args->vram_visible = (u64)man->size << PAGE_SHIFT;
199 if (rdev->stollen_vga_memory)
200 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
201 args->vram_visible -= radeon_fbdev_total_size(rdev);
202 args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
203 for(i = 0; i < RADEON_NUM_RINGS; ++i)
204 args->gart_size -= rdev->ring[i].ring_size;
205 return 0;
206 }
207
208 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
209 struct drm_file *filp)
210 {
211 /* TODO: implement */
212 DRM_ERROR("unimplemented %s\n", __func__);
213 return -ENOSYS;
214 }
215
216 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
217 struct drm_file *filp)
218 {
219 /* TODO: implement */
220 DRM_ERROR("unimplemented %s\n", __func__);
221 return -ENOSYS;
222 }
223
224 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
225 struct drm_file *filp)
226 {
227 struct radeon_device *rdev = dev->dev_private;
228 struct drm_radeon_gem_create *args = data;
229 struct drm_gem_object *gobj;
230 uint32_t handle;
231 int r;
232
233 /* create a gem object to contain this object in */
234 args->size = roundup(args->size, PAGE_SIZE);
235 r = radeon_gem_object_create(rdev, args->size, args->alignment,
236 args->initial_domain, false,
237 false, &gobj);
238 if (r) {
239 return r;
240 }
241 r = drm_gem_handle_create(filp, gobj, &handle);
242 /* drop reference from allocate - handle holds it now */
243 drm_gem_object_unreference_unlocked(gobj);
244 if (r) {
245 return r;
246 }
247 args->handle = handle;
248 return 0;
249 }
250
251 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
252 struct drm_file *filp)
253 {
254 /* transition the BO to a domain -
255 * just validate the BO into a certain domain */
256 struct drm_radeon_gem_set_domain *args = data;
257 struct drm_gem_object *gobj;
258 struct radeon_bo *robj;
259 int r;
260
261 /* for now if someone requests domain CPU -
262 * just make sure the buffer is finished with */
263
264 /* just do a BO wait for now */
265 gobj = drm_gem_object_lookup(dev, filp, args->handle);
266 if (gobj == NULL) {
267 return -ENOENT;
268 }
269 robj = gem_to_radeon_bo(gobj);
270
271 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
272
273 drm_gem_object_unreference_unlocked(gobj);
274 return r;
275 }
276
277 int radeon_mode_dumb_mmap(struct drm_file *filp,
278 struct drm_device *dev,
279 uint32_t handle, uint64_t *offset_p)
280 {
281 struct drm_gem_object *gobj;
282 struct radeon_bo *robj;
283
284 gobj = drm_gem_object_lookup(dev, filp, handle);
285 if (gobj == NULL) {
286 return -ENOENT;
287 }
288 robj = gem_to_radeon_bo(gobj);
289 *offset_p = radeon_bo_mmap_offset(robj);
290 drm_gem_object_unreference_unlocked(gobj);
291 return 0;
292 }
293
294 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
295 struct drm_file *filp)
296 {
297 struct drm_radeon_gem_mmap *args = data;
298
299 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
300 }
301
302 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
303 struct drm_file *filp)
304 {
305 struct drm_radeon_gem_busy *args = data;
306 struct drm_gem_object *gobj;
307 struct radeon_bo *robj;
308 int r;
309 uint32_t cur_placement = 0;
310
311 gobj = drm_gem_object_lookup(dev, filp, args->handle);
312 if (gobj == NULL) {
313 return -ENOENT;
314 }
315 robj = gem_to_radeon_bo(gobj);
316 r = radeon_bo_wait(robj, &cur_placement, true);
317 switch (cur_placement) {
318 case TTM_PL_VRAM:
319 args->domain = RADEON_GEM_DOMAIN_VRAM;
320 break;
321 case TTM_PL_TT:
322 args->domain = RADEON_GEM_DOMAIN_GTT;
323 break;
324 case TTM_PL_SYSTEM:
325 args->domain = RADEON_GEM_DOMAIN_CPU;
326 default:
327 break;
328 }
329 drm_gem_object_unreference_unlocked(gobj);
330 return r;
331 }
332
333 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
334 struct drm_file *filp)
335 {
336 struct drm_radeon_gem_wait_idle *args = data;
337 struct drm_gem_object *gobj;
338 struct radeon_bo *robj;
339 int r;
340
341 gobj = drm_gem_object_lookup(dev, filp, args->handle);
342 if (gobj == NULL) {
343 return -ENOENT;
344 }
345 robj = gem_to_radeon_bo(gobj);
346 r = radeon_bo_wait(robj, NULL, false);
347 /* callback hw specific functions if any */
348 if (robj->rdev->asic->ioctl_wait_idle)
349 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
350 drm_gem_object_unreference_unlocked(gobj);
351 return r;
352 }
353
354 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
355 struct drm_file *filp)
356 {
357 struct drm_radeon_gem_set_tiling *args = data;
358 struct drm_gem_object *gobj;
359 struct radeon_bo *robj;
360 int r = 0;
361
362 DRM_DEBUG("%d \n", args->handle);
363 gobj = drm_gem_object_lookup(dev, filp, args->handle);
364 if (gobj == NULL)
365 return -ENOENT;
366 robj = gem_to_radeon_bo(gobj);
367 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
368 drm_gem_object_unreference_unlocked(gobj);
369 return r;
370 }
371
372 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
373 struct drm_file *filp)
374 {
375 struct drm_radeon_gem_get_tiling *args = data;
376 struct drm_gem_object *gobj;
377 struct radeon_bo *rbo;
378 int r = 0;
379
380 DRM_DEBUG("\n");
381 gobj = drm_gem_object_lookup(dev, filp, args->handle);
382 if (gobj == NULL)
383 return -ENOENT;
384 rbo = gem_to_radeon_bo(gobj);
385 r = radeon_bo_reserve(rbo, false);
386 if (unlikely(r != 0))
387 goto out;
388 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
389 radeon_bo_unreserve(rbo);
390 out:
391 drm_gem_object_unreference_unlocked(gobj);
392 return r;
393 }
394
395 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
396 struct drm_file *filp)
397 {
398 struct drm_radeon_gem_va *args = data;
399 struct drm_gem_object *gobj;
400 struct radeon_device *rdev = dev->dev_private;
401 struct radeon_fpriv *fpriv = filp->driver_priv;
402 struct radeon_bo *rbo;
403 struct radeon_bo_va *bo_va;
404 u32 invalid_flags;
405 int r = 0;
406
407 /* !! DONT REMOVE !!
408 * We don't support vm_id yet, to be sure we don't have have broken
409 * userspace, reject anyone trying to use non 0 value thus moving
410 * forward we can use those fields without breaking existant userspace
411 */
412 if (args->vm_id) {
413 args->operation = RADEON_VA_RESULT_ERROR;
414 return -EINVAL;
415 }
416
417 if (args->offset < RADEON_VA_RESERVED_SIZE) {
418 dev_err(&dev->pdev->dev,
419 "offset 0x%lX is in reserved area 0x%X\n",
420 (unsigned long)args->offset,
421 RADEON_VA_RESERVED_SIZE);
422 args->operation = RADEON_VA_RESULT_ERROR;
423 return -EINVAL;
424 }
425
426 /* don't remove, we need to enforce userspace to set the snooped flag
427 * otherwise we will endup with broken userspace and we won't be able
428 * to enable this feature without adding new interface
429 */
430 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
431 if ((args->flags & invalid_flags)) {
432 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
433 args->flags, invalid_flags);
434 args->operation = RADEON_VA_RESULT_ERROR;
435 return -EINVAL;
436 }
437 if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
438 dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
439 args->operation = RADEON_VA_RESULT_ERROR;
440 return -EINVAL;
441 }
442
443 switch (args->operation) {
444 case RADEON_VA_MAP:
445 case RADEON_VA_UNMAP:
446 break;
447 default:
448 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
449 args->operation);
450 args->operation = RADEON_VA_RESULT_ERROR;
451 return -EINVAL;
452 }
453
454 gobj = drm_gem_object_lookup(dev, filp, args->handle);
455 if (gobj == NULL) {
456 args->operation = RADEON_VA_RESULT_ERROR;
457 return -ENOENT;
458 }
459 rbo = gem_to_radeon_bo(gobj);
460 r = radeon_bo_reserve(rbo, false);
461 if (r) {
462 args->operation = RADEON_VA_RESULT_ERROR;
463 drm_gem_object_unreference_unlocked(gobj);
464 return r;
465 }
466 switch (args->operation) {
467 case RADEON_VA_MAP:
468 bo_va = radeon_bo_va(rbo, &fpriv->vm);
469 if (bo_va) {
470 args->operation = RADEON_VA_RESULT_VA_EXIST;
471 args->offset = bo_va->soffset;
472 goto out;
473 }
474 r = radeon_vm_bo_add(rdev, &fpriv->vm, rbo,
475 args->offset, args->flags);
476 break;
477 case RADEON_VA_UNMAP:
478 r = radeon_vm_bo_rmv(rdev, &fpriv->vm, rbo);
479 break;
480 default:
481 break;
482 }
483 args->operation = RADEON_VA_RESULT_OK;
484 if (r) {
485 args->operation = RADEON_VA_RESULT_ERROR;
486 }
487 out:
488 radeon_bo_unreserve(rbo);
489 drm_gem_object_unreference_unlocked(gobj);
490 return r;
491 }
492
493 int radeon_mode_dumb_create(struct drm_file *file_priv,
494 struct drm_device *dev,
495 struct drm_mode_create_dumb *args)
496 {
497 struct radeon_device *rdev = dev->dev_private;
498 struct drm_gem_object *gobj;
499 uint32_t handle;
500 int r;
501
502 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
503 args->size = args->pitch * args->height;
504 args->size = ALIGN(args->size, PAGE_SIZE);
505
506 r = radeon_gem_object_create(rdev, args->size, 0,
507 RADEON_GEM_DOMAIN_VRAM,
508 false, ttm_bo_type_device,
509 &gobj);
510 if (r)
511 return -ENOMEM;
512
513 r = drm_gem_handle_create(file_priv, gobj, &handle);
514 /* drop reference from allocate - handle holds it now */
515 drm_gem_object_unreference_unlocked(gobj);
516 if (r) {
517 return r;
518 }
519 args->handle = handle;
520 return 0;
521 }
522
523 int radeon_mode_dumb_destroy(struct drm_file *file_priv,
524 struct drm_device *dev,
525 uint32_t handle)
526 {
527 return drm_gem_handle_delete(file_priv, handle);
528 }
This page took 0.056729 seconds and 6 git commands to generate.