Merge branch 'gma500-next' of git://github.com/patjak/drm-gma500 into drm-next
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_gem.c
1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include <drm/drmP.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon.h"
31
32 void radeon_gem_object_free(struct drm_gem_object *gobj)
33 {
34 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
35
36 if (robj) {
37 if (robj->gem_base.import_attach)
38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
39 radeon_bo_unref(&robj);
40 }
41 }
42
43 int radeon_gem_object_create(struct radeon_device *rdev, int size,
44 int alignment, int initial_domain,
45 bool discardable, bool kernel,
46 struct drm_gem_object **obj)
47 {
48 struct radeon_bo *robj;
49 unsigned long max_size;
50 int r;
51
52 *obj = NULL;
53 /* At least align on page size */
54 if (alignment < PAGE_SIZE) {
55 alignment = PAGE_SIZE;
56 }
57
58 /* maximun bo size is the minimun btw visible vram and gtt size */
59 max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
60 if (size > max_size) {
61 printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
62 __func__, __LINE__, size >> 20, max_size >> 20);
63 return -ENOMEM;
64 }
65
66 retry:
67 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
68 if (r) {
69 if (r != -ERESTARTSYS) {
70 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
71 initial_domain |= RADEON_GEM_DOMAIN_GTT;
72 goto retry;
73 }
74 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
75 size, initial_domain, alignment, r);
76 }
77 return r;
78 }
79 *obj = &robj->gem_base;
80 robj->pid = task_pid_nr(current);
81
82 mutex_lock(&rdev->gem.mutex);
83 list_add_tail(&robj->list, &rdev->gem.objects);
84 mutex_unlock(&rdev->gem.mutex);
85
86 return 0;
87 }
88
89 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
90 uint32_t rdomain, uint32_t wdomain)
91 {
92 struct radeon_bo *robj;
93 uint32_t domain;
94 int r;
95
96 /* FIXME: reeimplement */
97 robj = gem_to_radeon_bo(gobj);
98 /* work out where to validate the buffer to */
99 domain = wdomain;
100 if (!domain) {
101 domain = rdomain;
102 }
103 if (!domain) {
104 /* Do nothings */
105 printk(KERN_WARNING "Set domain without domain !\n");
106 return 0;
107 }
108 if (domain == RADEON_GEM_DOMAIN_CPU) {
109 /* Asking for cpu access wait for object idle */
110 r = radeon_bo_wait(robj, NULL, false);
111 if (r) {
112 printk(KERN_ERR "Failed to wait for object !\n");
113 return r;
114 }
115 }
116 return 0;
117 }
118
119 int radeon_gem_init(struct radeon_device *rdev)
120 {
121 INIT_LIST_HEAD(&rdev->gem.objects);
122 return 0;
123 }
124
125 void radeon_gem_fini(struct radeon_device *rdev)
126 {
127 radeon_bo_force_delete(rdev);
128 }
129
130 /*
131 * Call from drm_gem_handle_create which appear in both new and open ioctl
132 * case.
133 */
134 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
135 {
136 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
137 struct radeon_device *rdev = rbo->rdev;
138 struct radeon_fpriv *fpriv = file_priv->driver_priv;
139 struct radeon_vm *vm = &fpriv->vm;
140 struct radeon_bo_va *bo_va;
141 int r;
142
143 if (rdev->family < CHIP_CAYMAN) {
144 return 0;
145 }
146
147 r = radeon_bo_reserve(rbo, false);
148 if (r) {
149 return r;
150 }
151
152 bo_va = radeon_vm_bo_find(vm, rbo);
153 if (!bo_va) {
154 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
155 } else {
156 ++bo_va->ref_count;
157 }
158 radeon_bo_unreserve(rbo);
159
160 return 0;
161 }
162
163 void radeon_gem_object_close(struct drm_gem_object *obj,
164 struct drm_file *file_priv)
165 {
166 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
167 struct radeon_device *rdev = rbo->rdev;
168 struct radeon_fpriv *fpriv = file_priv->driver_priv;
169 struct radeon_vm *vm = &fpriv->vm;
170 struct radeon_bo_va *bo_va;
171 int r;
172
173 if (rdev->family < CHIP_CAYMAN) {
174 return;
175 }
176
177 r = radeon_bo_reserve(rbo, true);
178 if (r) {
179 dev_err(rdev->dev, "leaking bo va because "
180 "we fail to reserve bo (%d)\n", r);
181 return;
182 }
183 bo_va = radeon_vm_bo_find(vm, rbo);
184 if (bo_va) {
185 if (--bo_va->ref_count == 0) {
186 radeon_vm_bo_rmv(rdev, bo_va);
187 }
188 }
189 radeon_bo_unreserve(rbo);
190 }
191
192 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
193 {
194 if (r == -EDEADLK) {
195 r = radeon_gpu_reset(rdev);
196 if (!r)
197 r = -EAGAIN;
198 }
199 return r;
200 }
201
202 /*
203 * GEM ioctls.
204 */
205 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
206 struct drm_file *filp)
207 {
208 struct radeon_device *rdev = dev->dev_private;
209 struct drm_radeon_gem_info *args = data;
210 struct ttm_mem_type_manager *man;
211 unsigned i;
212
213 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
214
215 args->vram_size = rdev->mc.real_vram_size;
216 args->vram_visible = (u64)man->size << PAGE_SHIFT;
217 if (rdev->stollen_vga_memory)
218 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
219 args->vram_visible -= radeon_fbdev_total_size(rdev);
220 args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
221 for(i = 0; i < RADEON_NUM_RINGS; ++i)
222 args->gart_size -= rdev->ring[i].ring_size;
223 return 0;
224 }
225
226 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
227 struct drm_file *filp)
228 {
229 /* TODO: implement */
230 DRM_ERROR("unimplemented %s\n", __func__);
231 return -ENOSYS;
232 }
233
234 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
235 struct drm_file *filp)
236 {
237 /* TODO: implement */
238 DRM_ERROR("unimplemented %s\n", __func__);
239 return -ENOSYS;
240 }
241
242 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
243 struct drm_file *filp)
244 {
245 struct radeon_device *rdev = dev->dev_private;
246 struct drm_radeon_gem_create *args = data;
247 struct drm_gem_object *gobj;
248 uint32_t handle;
249 int r;
250
251 down_read(&rdev->exclusive_lock);
252 /* create a gem object to contain this object in */
253 args->size = roundup(args->size, PAGE_SIZE);
254 r = radeon_gem_object_create(rdev, args->size, args->alignment,
255 args->initial_domain, false,
256 false, &gobj);
257 if (r) {
258 up_read(&rdev->exclusive_lock);
259 r = radeon_gem_handle_lockup(rdev, r);
260 return r;
261 }
262 r = drm_gem_handle_create(filp, gobj, &handle);
263 /* drop reference from allocate - handle holds it now */
264 drm_gem_object_unreference_unlocked(gobj);
265 if (r) {
266 up_read(&rdev->exclusive_lock);
267 r = radeon_gem_handle_lockup(rdev, r);
268 return r;
269 }
270 args->handle = handle;
271 up_read(&rdev->exclusive_lock);
272 return 0;
273 }
274
275 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
276 struct drm_file *filp)
277 {
278 /* transition the BO to a domain -
279 * just validate the BO into a certain domain */
280 struct radeon_device *rdev = dev->dev_private;
281 struct drm_radeon_gem_set_domain *args = data;
282 struct drm_gem_object *gobj;
283 struct radeon_bo *robj;
284 int r;
285
286 /* for now if someone requests domain CPU -
287 * just make sure the buffer is finished with */
288 down_read(&rdev->exclusive_lock);
289
290 /* just do a BO wait for now */
291 gobj = drm_gem_object_lookup(dev, filp, args->handle);
292 if (gobj == NULL) {
293 up_read(&rdev->exclusive_lock);
294 return -ENOENT;
295 }
296 robj = gem_to_radeon_bo(gobj);
297
298 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
299
300 drm_gem_object_unreference_unlocked(gobj);
301 up_read(&rdev->exclusive_lock);
302 r = radeon_gem_handle_lockup(robj->rdev, r);
303 return r;
304 }
305
306 int radeon_mode_dumb_mmap(struct drm_file *filp,
307 struct drm_device *dev,
308 uint32_t handle, uint64_t *offset_p)
309 {
310 struct drm_gem_object *gobj;
311 struct radeon_bo *robj;
312
313 gobj = drm_gem_object_lookup(dev, filp, handle);
314 if (gobj == NULL) {
315 return -ENOENT;
316 }
317 robj = gem_to_radeon_bo(gobj);
318 *offset_p = radeon_bo_mmap_offset(robj);
319 drm_gem_object_unreference_unlocked(gobj);
320 return 0;
321 }
322
323 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
324 struct drm_file *filp)
325 {
326 struct drm_radeon_gem_mmap *args = data;
327
328 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
329 }
330
331 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
332 struct drm_file *filp)
333 {
334 struct radeon_device *rdev = dev->dev_private;
335 struct drm_radeon_gem_busy *args = data;
336 struct drm_gem_object *gobj;
337 struct radeon_bo *robj;
338 int r;
339 uint32_t cur_placement = 0;
340
341 gobj = drm_gem_object_lookup(dev, filp, args->handle);
342 if (gobj == NULL) {
343 return -ENOENT;
344 }
345 robj = gem_to_radeon_bo(gobj);
346 r = radeon_bo_wait(robj, &cur_placement, true);
347 switch (cur_placement) {
348 case TTM_PL_VRAM:
349 args->domain = RADEON_GEM_DOMAIN_VRAM;
350 break;
351 case TTM_PL_TT:
352 args->domain = RADEON_GEM_DOMAIN_GTT;
353 break;
354 case TTM_PL_SYSTEM:
355 args->domain = RADEON_GEM_DOMAIN_CPU;
356 default:
357 break;
358 }
359 drm_gem_object_unreference_unlocked(gobj);
360 r = radeon_gem_handle_lockup(rdev, r);
361 return r;
362 }
363
364 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
365 struct drm_file *filp)
366 {
367 struct radeon_device *rdev = dev->dev_private;
368 struct drm_radeon_gem_wait_idle *args = data;
369 struct drm_gem_object *gobj;
370 struct radeon_bo *robj;
371 int r;
372
373 gobj = drm_gem_object_lookup(dev, filp, args->handle);
374 if (gobj == NULL) {
375 return -ENOENT;
376 }
377 robj = gem_to_radeon_bo(gobj);
378 r = radeon_bo_wait(robj, NULL, false);
379 /* callback hw specific functions if any */
380 if (rdev->asic->ioctl_wait_idle)
381 robj->rdev->asic->ioctl_wait_idle(rdev, robj);
382 drm_gem_object_unreference_unlocked(gobj);
383 r = radeon_gem_handle_lockup(rdev, r);
384 return r;
385 }
386
387 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
388 struct drm_file *filp)
389 {
390 struct drm_radeon_gem_set_tiling *args = data;
391 struct drm_gem_object *gobj;
392 struct radeon_bo *robj;
393 int r = 0;
394
395 DRM_DEBUG("%d \n", args->handle);
396 gobj = drm_gem_object_lookup(dev, filp, args->handle);
397 if (gobj == NULL)
398 return -ENOENT;
399 robj = gem_to_radeon_bo(gobj);
400 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
401 drm_gem_object_unreference_unlocked(gobj);
402 return r;
403 }
404
405 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
406 struct drm_file *filp)
407 {
408 struct drm_radeon_gem_get_tiling *args = data;
409 struct drm_gem_object *gobj;
410 struct radeon_bo *rbo;
411 int r = 0;
412
413 DRM_DEBUG("\n");
414 gobj = drm_gem_object_lookup(dev, filp, args->handle);
415 if (gobj == NULL)
416 return -ENOENT;
417 rbo = gem_to_radeon_bo(gobj);
418 r = radeon_bo_reserve(rbo, false);
419 if (unlikely(r != 0))
420 goto out;
421 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
422 radeon_bo_unreserve(rbo);
423 out:
424 drm_gem_object_unreference_unlocked(gobj);
425 return r;
426 }
427
428 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
429 struct drm_file *filp)
430 {
431 struct drm_radeon_gem_va *args = data;
432 struct drm_gem_object *gobj;
433 struct radeon_device *rdev = dev->dev_private;
434 struct radeon_fpriv *fpriv = filp->driver_priv;
435 struct radeon_bo *rbo;
436 struct radeon_bo_va *bo_va;
437 u32 invalid_flags;
438 int r = 0;
439
440 if (!rdev->vm_manager.enabled) {
441 args->operation = RADEON_VA_RESULT_ERROR;
442 return -ENOTTY;
443 }
444
445 /* !! DONT REMOVE !!
446 * We don't support vm_id yet, to be sure we don't have have broken
447 * userspace, reject anyone trying to use non 0 value thus moving
448 * forward we can use those fields without breaking existant userspace
449 */
450 if (args->vm_id) {
451 args->operation = RADEON_VA_RESULT_ERROR;
452 return -EINVAL;
453 }
454
455 if (args->offset < RADEON_VA_RESERVED_SIZE) {
456 dev_err(&dev->pdev->dev,
457 "offset 0x%lX is in reserved area 0x%X\n",
458 (unsigned long)args->offset,
459 RADEON_VA_RESERVED_SIZE);
460 args->operation = RADEON_VA_RESULT_ERROR;
461 return -EINVAL;
462 }
463
464 /* don't remove, we need to enforce userspace to set the snooped flag
465 * otherwise we will endup with broken userspace and we won't be able
466 * to enable this feature without adding new interface
467 */
468 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
469 if ((args->flags & invalid_flags)) {
470 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
471 args->flags, invalid_flags);
472 args->operation = RADEON_VA_RESULT_ERROR;
473 return -EINVAL;
474 }
475 if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
476 dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
477 args->operation = RADEON_VA_RESULT_ERROR;
478 return -EINVAL;
479 }
480
481 switch (args->operation) {
482 case RADEON_VA_MAP:
483 case RADEON_VA_UNMAP:
484 break;
485 default:
486 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
487 args->operation);
488 args->operation = RADEON_VA_RESULT_ERROR;
489 return -EINVAL;
490 }
491
492 gobj = drm_gem_object_lookup(dev, filp, args->handle);
493 if (gobj == NULL) {
494 args->operation = RADEON_VA_RESULT_ERROR;
495 return -ENOENT;
496 }
497 rbo = gem_to_radeon_bo(gobj);
498 r = radeon_bo_reserve(rbo, false);
499 if (r) {
500 args->operation = RADEON_VA_RESULT_ERROR;
501 drm_gem_object_unreference_unlocked(gobj);
502 return r;
503 }
504 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
505 if (!bo_va) {
506 args->operation = RADEON_VA_RESULT_ERROR;
507 drm_gem_object_unreference_unlocked(gobj);
508 return -ENOENT;
509 }
510
511 switch (args->operation) {
512 case RADEON_VA_MAP:
513 if (bo_va->soffset) {
514 args->operation = RADEON_VA_RESULT_VA_EXIST;
515 args->offset = bo_va->soffset;
516 goto out;
517 }
518 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
519 break;
520 case RADEON_VA_UNMAP:
521 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
522 break;
523 default:
524 break;
525 }
526 args->operation = RADEON_VA_RESULT_OK;
527 if (r) {
528 args->operation = RADEON_VA_RESULT_ERROR;
529 }
530 out:
531 radeon_bo_unreserve(rbo);
532 drm_gem_object_unreference_unlocked(gobj);
533 return r;
534 }
535
536 int radeon_mode_dumb_create(struct drm_file *file_priv,
537 struct drm_device *dev,
538 struct drm_mode_create_dumb *args)
539 {
540 struct radeon_device *rdev = dev->dev_private;
541 struct drm_gem_object *gobj;
542 uint32_t handle;
543 int r;
544
545 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
546 args->size = args->pitch * args->height;
547 args->size = ALIGN(args->size, PAGE_SIZE);
548
549 r = radeon_gem_object_create(rdev, args->size, 0,
550 RADEON_GEM_DOMAIN_VRAM,
551 false, ttm_bo_type_device,
552 &gobj);
553 if (r)
554 return -ENOMEM;
555
556 r = drm_gem_handle_create(file_priv, gobj, &handle);
557 /* drop reference from allocate - handle holds it now */
558 drm_gem_object_unreference_unlocked(gobj);
559 if (r) {
560 return r;
561 }
562 args->handle = handle;
563 return 0;
564 }
565
566 #if defined(CONFIG_DEBUG_FS)
567 static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
568 {
569 struct drm_info_node *node = (struct drm_info_node *)m->private;
570 struct drm_device *dev = node->minor->dev;
571 struct radeon_device *rdev = dev->dev_private;
572 struct radeon_bo *rbo;
573 unsigned i = 0;
574
575 mutex_lock(&rdev->gem.mutex);
576 list_for_each_entry(rbo, &rdev->gem.objects, list) {
577 unsigned domain;
578 const char *placement;
579
580 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
581 switch (domain) {
582 case RADEON_GEM_DOMAIN_VRAM:
583 placement = "VRAM";
584 break;
585 case RADEON_GEM_DOMAIN_GTT:
586 placement = " GTT";
587 break;
588 case RADEON_GEM_DOMAIN_CPU:
589 default:
590 placement = " CPU";
591 break;
592 }
593 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
594 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
595 placement, (unsigned long)rbo->pid);
596 i++;
597 }
598 mutex_unlock(&rdev->gem.mutex);
599 return 0;
600 }
601
602 static struct drm_info_list radeon_debugfs_gem_list[] = {
603 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
604 };
605 #endif
606
607 int radeon_gem_debugfs_init(struct radeon_device *rdev)
608 {
609 #if defined(CONFIG_DEBUG_FS)
610 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
611 #endif
612 return 0;
613 }
This page took 0.082376 seconds and 6 git commands to generate.