3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
14 #include <linux/shmem_fs.h>
15 #include <drm/exynos_drm.h>
17 #include "exynos_drm_drv.h"
18 #include "exynos_drm_gem.h"
19 #include "exynos_drm_buf.h"
21 static unsigned int convert_to_vm_err_msg(int msg
)
29 out_msg
= VM_FAULT_NOPAGE
;
33 out_msg
= VM_FAULT_OOM
;
37 out_msg
= VM_FAULT_SIGBUS
;
44 static int check_gem_flags(unsigned int flags
)
46 if (flags
& ~(EXYNOS_BO_MASK
)) {
47 DRM_ERROR("invalid flags.\n");
54 static void update_vm_cache_attr(struct exynos_drm_gem_obj
*obj
,
55 struct vm_area_struct
*vma
)
57 DRM_DEBUG_KMS("flags = 0x%x\n", obj
->flags
);
59 /* non-cachable as default. */
60 if (obj
->flags
& EXYNOS_BO_CACHABLE
)
61 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
62 else if (obj
->flags
& EXYNOS_BO_WC
)
64 pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
67 pgprot_noncached(vm_get_page_prot(vma
->vm_flags
));
70 static unsigned long roundup_gem_size(unsigned long size
, unsigned int flags
)
74 return roundup(size
, PAGE_SIZE
);
77 static int exynos_drm_gem_map_buf(struct drm_gem_object
*obj
,
78 struct vm_area_struct
*vma
,
79 unsigned long f_vaddr
,
82 struct exynos_drm_gem_obj
*exynos_gem_obj
= to_exynos_gem_obj(obj
);
83 struct exynos_drm_gem_buf
*buf
= exynos_gem_obj
->buffer
;
84 struct scatterlist
*sgl
;
91 if (page_offset
>= (buf
->size
>> PAGE_SHIFT
)) {
92 DRM_ERROR("invalid page offset\n");
97 for_each_sg(buf
->sgt
->sgl
, sgl
, buf
->sgt
->nents
, i
) {
98 if (page_offset
< (sgl
->length
>> PAGE_SHIFT
))
100 page_offset
-= (sgl
->length
>> PAGE_SHIFT
);
103 pfn
= __phys_to_pfn(sg_phys(sgl
)) + page_offset
;
105 return vm_insert_mixed(vma
, f_vaddr
, pfn
);
108 static int exynos_drm_gem_handle_create(struct drm_gem_object
*obj
,
109 struct drm_file
*file_priv
,
110 unsigned int *handle
)
115 * allocate a id of idr table where the obj is registered
116 * and handle has the id what user can see.
118 ret
= drm_gem_handle_create(file_priv
, obj
, handle
);
122 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle
);
124 /* drop reference from allocate - handle holds it now. */
125 drm_gem_object_unreference_unlocked(obj
);
130 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj
*exynos_gem_obj
)
132 struct drm_gem_object
*obj
;
133 struct exynos_drm_gem_buf
*buf
;
135 DRM_DEBUG_KMS("%s\n", __FILE__
);
137 obj
= &exynos_gem_obj
->base
;
138 buf
= exynos_gem_obj
->buffer
;
140 DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj
->handle_count
));
143 * do not release memory region from exporter.
145 * the region will be released by exporter
146 * once dmabuf's refcount becomes 0.
148 if (obj
->import_attach
)
151 exynos_drm_free_buf(obj
->dev
, exynos_gem_obj
->flags
, buf
);
154 exynos_drm_fini_buf(obj
->dev
, buf
);
155 exynos_gem_obj
->buffer
= NULL
;
157 if (obj
->map_list
.map
)
158 drm_gem_free_mmap_offset(obj
);
160 /* release file pointer to gem object. */
161 drm_gem_object_release(obj
);
163 kfree(exynos_gem_obj
);
164 exynos_gem_obj
= NULL
;
167 struct exynos_drm_gem_obj
*exynos_drm_gem_init(struct drm_device
*dev
,
170 struct exynos_drm_gem_obj
*exynos_gem_obj
;
171 struct drm_gem_object
*obj
;
174 exynos_gem_obj
= kzalloc(sizeof(*exynos_gem_obj
), GFP_KERNEL
);
175 if (!exynos_gem_obj
) {
176 DRM_ERROR("failed to allocate exynos gem object\n");
180 exynos_gem_obj
->size
= size
;
181 obj
= &exynos_gem_obj
->base
;
183 ret
= drm_gem_object_init(dev
, obj
, size
);
185 DRM_ERROR("failed to initialize gem object\n");
186 kfree(exynos_gem_obj
);
190 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj
->filp
);
192 return exynos_gem_obj
;
195 struct exynos_drm_gem_obj
*exynos_drm_gem_create(struct drm_device
*dev
,
199 struct exynos_drm_gem_obj
*exynos_gem_obj
;
200 struct exynos_drm_gem_buf
*buf
;
204 DRM_ERROR("invalid size.\n");
205 return ERR_PTR(-EINVAL
);
208 size
= roundup_gem_size(size
, flags
);
209 DRM_DEBUG_KMS("%s\n", __FILE__
);
211 ret
= check_gem_flags(flags
);
215 buf
= exynos_drm_init_buf(dev
, size
);
217 return ERR_PTR(-ENOMEM
);
219 exynos_gem_obj
= exynos_drm_gem_init(dev
, size
);
220 if (!exynos_gem_obj
) {
225 exynos_gem_obj
->buffer
= buf
;
227 /* set memory type and cache attribute from user side. */
228 exynos_gem_obj
->flags
= flags
;
230 ret
= exynos_drm_alloc_buf(dev
, buf
, flags
);
232 drm_gem_object_release(&exynos_gem_obj
->base
);
236 return exynos_gem_obj
;
239 exynos_drm_fini_buf(dev
, buf
);
243 int exynos_drm_gem_create_ioctl(struct drm_device
*dev
, void *data
,
244 struct drm_file
*file_priv
)
246 struct drm_exynos_gem_create
*args
= data
;
247 struct exynos_drm_gem_obj
*exynos_gem_obj
;
250 DRM_DEBUG_KMS("%s\n", __FILE__
);
252 exynos_gem_obj
= exynos_drm_gem_create(dev
, args
->flags
, args
->size
);
253 if (IS_ERR(exynos_gem_obj
))
254 return PTR_ERR(exynos_gem_obj
);
256 ret
= exynos_drm_gem_handle_create(&exynos_gem_obj
->base
, file_priv
,
259 exynos_drm_gem_destroy(exynos_gem_obj
);
266 dma_addr_t
*exynos_drm_gem_get_dma_addr(struct drm_device
*dev
,
267 unsigned int gem_handle
,
268 struct drm_file
*filp
)
270 struct exynos_drm_gem_obj
*exynos_gem_obj
;
271 struct drm_gem_object
*obj
;
273 obj
= drm_gem_object_lookup(dev
, filp
, gem_handle
);
275 DRM_ERROR("failed to lookup gem object.\n");
276 return ERR_PTR(-EINVAL
);
279 exynos_gem_obj
= to_exynos_gem_obj(obj
);
281 return &exynos_gem_obj
->buffer
->dma_addr
;
284 void exynos_drm_gem_put_dma_addr(struct drm_device
*dev
,
285 unsigned int gem_handle
,
286 struct drm_file
*filp
)
288 struct exynos_drm_gem_obj
*exynos_gem_obj
;
289 struct drm_gem_object
*obj
;
291 obj
= drm_gem_object_lookup(dev
, filp
, gem_handle
);
293 DRM_ERROR("failed to lookup gem object.\n");
297 exynos_gem_obj
= to_exynos_gem_obj(obj
);
299 drm_gem_object_unreference_unlocked(obj
);
302 * decrease obj->refcount one more time because we has already
303 * increased it at exynos_drm_gem_get_dma_addr().
305 drm_gem_object_unreference_unlocked(obj
);
308 int exynos_drm_gem_map_offset_ioctl(struct drm_device
*dev
, void *data
,
309 struct drm_file
*file_priv
)
311 struct drm_exynos_gem_map_off
*args
= data
;
313 DRM_DEBUG_KMS("%s\n", __FILE__
);
315 DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
316 args
->handle
, (unsigned long)args
->offset
);
318 if (!(dev
->driver
->driver_features
& DRIVER_GEM
)) {
319 DRM_ERROR("does not support GEM.\n");
323 return exynos_drm_gem_dumb_map_offset(file_priv
, dev
, args
->handle
,
327 static struct drm_file
*exynos_drm_find_drm_file(struct drm_device
*drm_dev
,
330 struct drm_file
*file_priv
;
332 mutex_lock(&drm_dev
->struct_mutex
);
334 /* find current process's drm_file from filelist. */
335 list_for_each_entry(file_priv
, &drm_dev
->filelist
, lhead
) {
336 if (file_priv
->filp
== filp
) {
337 mutex_unlock(&drm_dev
->struct_mutex
);
342 mutex_unlock(&drm_dev
->struct_mutex
);
345 return ERR_PTR(-EFAULT
);
348 static int exynos_drm_gem_mmap_buffer(struct file
*filp
,
349 struct vm_area_struct
*vma
)
351 struct drm_gem_object
*obj
= filp
->private_data
;
352 struct exynos_drm_gem_obj
*exynos_gem_obj
= to_exynos_gem_obj(obj
);
353 struct drm_device
*drm_dev
= obj
->dev
;
354 struct exynos_drm_gem_buf
*buffer
;
355 struct drm_file
*file_priv
;
356 unsigned long vm_size
;
359 DRM_DEBUG_KMS("%s\n", __FILE__
);
361 vma
->vm_flags
|= VM_IO
| VM_DONTEXPAND
| VM_DONTDUMP
;
362 vma
->vm_private_data
= obj
;
363 vma
->vm_ops
= drm_dev
->driver
->gem_vm_ops
;
365 /* restore it to driver's fops. */
366 filp
->f_op
= fops_get(drm_dev
->driver
->fops
);
368 file_priv
= exynos_drm_find_drm_file(drm_dev
, filp
);
369 if (IS_ERR(file_priv
))
370 return PTR_ERR(file_priv
);
372 /* restore it to drm_file. */
373 filp
->private_data
= file_priv
;
375 update_vm_cache_attr(exynos_gem_obj
, vma
);
377 vm_size
= vma
->vm_end
- vma
->vm_start
;
380 * a buffer contains information to physically continuous memory
381 * allocated by user request or at framebuffer creation.
383 buffer
= exynos_gem_obj
->buffer
;
385 /* check if user-requested size is valid. */
386 if (vm_size
> buffer
->size
)
389 ret
= dma_mmap_attrs(drm_dev
->dev
, vma
, buffer
->pages
,
390 buffer
->dma_addr
, buffer
->size
,
393 DRM_ERROR("failed to mmap.\n");
398 * take a reference to this mapping of the object. And this reference
399 * is unreferenced by the corresponding vm_close call.
401 drm_gem_object_reference(obj
);
403 mutex_lock(&drm_dev
->struct_mutex
);
404 drm_vm_open_locked(drm_dev
, vma
);
405 mutex_unlock(&drm_dev
->struct_mutex
);
410 static const struct file_operations exynos_drm_gem_fops
= {
411 .mmap
= exynos_drm_gem_mmap_buffer
,
414 int exynos_drm_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
415 struct drm_file
*file_priv
)
417 struct drm_exynos_gem_mmap
*args
= data
;
418 struct drm_gem_object
*obj
;
421 DRM_DEBUG_KMS("%s\n", __FILE__
);
423 if (!(dev
->driver
->driver_features
& DRIVER_GEM
)) {
424 DRM_ERROR("does not support GEM.\n");
428 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
430 DRM_ERROR("failed to lookup gem object.\n");
435 * Set specific mmper's fops. And it will be restored by
436 * exynos_drm_gem_mmap_buffer to dev->driver->fops.
437 * This is used to call specific mapper temporarily.
439 file_priv
->filp
->f_op
= &exynos_drm_gem_fops
;
442 * Set gem object to private_data so that specific mmaper
443 * can get the gem object. And it will be restored by
444 * exynos_drm_gem_mmap_buffer to drm_file.
446 file_priv
->filp
->private_data
= obj
;
448 addr
= vm_mmap(file_priv
->filp
, 0, args
->size
,
449 PROT_READ
| PROT_WRITE
, MAP_SHARED
, 0);
451 drm_gem_object_unreference_unlocked(obj
);
453 if (IS_ERR((void *)addr
)) {
454 file_priv
->filp
->private_data
= file_priv
;
455 return PTR_ERR((void *)addr
);
460 DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args
->mapped
);
465 int exynos_drm_gem_get_ioctl(struct drm_device
*dev
, void *data
,
466 struct drm_file
*file_priv
)
467 { struct exynos_drm_gem_obj
*exynos_gem_obj
;
468 struct drm_exynos_gem_info
*args
= data
;
469 struct drm_gem_object
*obj
;
471 mutex_lock(&dev
->struct_mutex
);
473 obj
= drm_gem_object_lookup(dev
, file_priv
, args
->handle
);
475 DRM_ERROR("failed to lookup gem object.\n");
476 mutex_unlock(&dev
->struct_mutex
);
480 exynos_gem_obj
= to_exynos_gem_obj(obj
);
482 args
->flags
= exynos_gem_obj
->flags
;
483 args
->size
= exynos_gem_obj
->size
;
485 drm_gem_object_unreference(obj
);
486 mutex_unlock(&dev
->struct_mutex
);
491 struct vm_area_struct
*exynos_gem_get_vma(struct vm_area_struct
*vma
)
493 struct vm_area_struct
*vma_copy
;
495 vma_copy
= kmalloc(sizeof(*vma_copy
), GFP_KERNEL
);
499 if (vma
->vm_ops
&& vma
->vm_ops
->open
)
500 vma
->vm_ops
->open(vma
);
503 get_file(vma
->vm_file
);
505 memcpy(vma_copy
, vma
, sizeof(*vma
));
507 vma_copy
->vm_mm
= NULL
;
508 vma_copy
->vm_next
= NULL
;
509 vma_copy
->vm_prev
= NULL
;
514 void exynos_gem_put_vma(struct vm_area_struct
*vma
)
519 if (vma
->vm_ops
&& vma
->vm_ops
->close
)
520 vma
->vm_ops
->close(vma
);
528 int exynos_gem_get_pages_from_userptr(unsigned long start
,
531 struct vm_area_struct
*vma
)
535 /* the memory region mmaped with VM_PFNMAP. */
536 if (vma_is_io(vma
)) {
539 for (i
= 0; i
< npages
; ++i
, start
+= PAGE_SIZE
) {
541 int ret
= follow_pfn(vma
, start
, &pfn
);
545 pages
[i
] = pfn_to_page(pfn
);
549 DRM_ERROR("failed to get user_pages.\n");
556 get_npages
= get_user_pages(current
, current
->mm
, start
,
557 npages
, 1, 1, pages
, NULL
);
558 get_npages
= max(get_npages
, 0);
559 if (get_npages
!= npages
) {
560 DRM_ERROR("failed to get user_pages.\n");
562 put_page(pages
[--get_npages
]);
569 void exynos_gem_put_pages_to_userptr(struct page
**pages
,
571 struct vm_area_struct
*vma
)
573 if (!vma_is_io(vma
)) {
576 for (i
= 0; i
< npages
; i
++) {
577 set_page_dirty_lock(pages
[i
]);
580 * undo the reference we took when populating
588 int exynos_gem_map_sgt_with_dma(struct drm_device
*drm_dev
,
589 struct sg_table
*sgt
,
590 enum dma_data_direction dir
)
594 mutex_lock(&drm_dev
->struct_mutex
);
596 nents
= dma_map_sg(drm_dev
->dev
, sgt
->sgl
, sgt
->nents
, dir
);
598 DRM_ERROR("failed to map sgl with dma.\n");
599 mutex_unlock(&drm_dev
->struct_mutex
);
603 mutex_unlock(&drm_dev
->struct_mutex
);
607 void exynos_gem_unmap_sgt_from_dma(struct drm_device
*drm_dev
,
608 struct sg_table
*sgt
,
609 enum dma_data_direction dir
)
611 dma_unmap_sg(drm_dev
->dev
, sgt
->sgl
, sgt
->nents
, dir
);
614 int exynos_drm_gem_init_object(struct drm_gem_object
*obj
)
616 DRM_DEBUG_KMS("%s\n", __FILE__
);
621 void exynos_drm_gem_free_object(struct drm_gem_object
*obj
)
623 struct exynos_drm_gem_obj
*exynos_gem_obj
;
624 struct exynos_drm_gem_buf
*buf
;
626 DRM_DEBUG_KMS("%s\n", __FILE__
);
628 exynos_gem_obj
= to_exynos_gem_obj(obj
);
629 buf
= exynos_gem_obj
->buffer
;
631 if (obj
->import_attach
)
632 drm_prime_gem_destroy(obj
, buf
->sgt
);
634 exynos_drm_gem_destroy(to_exynos_gem_obj(obj
));
637 int exynos_drm_gem_dumb_create(struct drm_file
*file_priv
,
638 struct drm_device
*dev
,
639 struct drm_mode_create_dumb
*args
)
641 struct exynos_drm_gem_obj
*exynos_gem_obj
;
644 DRM_DEBUG_KMS("%s\n", __FILE__
);
647 * alocate memory to be used for framebuffer.
648 * - this callback would be called by user application
649 * with DRM_IOCTL_MODE_CREATE_DUMB command.
652 args
->pitch
= args
->width
* ((args
->bpp
+ 7) / 8);
653 args
->size
= args
->pitch
* args
->height
;
655 exynos_gem_obj
= exynos_drm_gem_create(dev
, args
->flags
, args
->size
);
656 if (IS_ERR(exynos_gem_obj
))
657 return PTR_ERR(exynos_gem_obj
);
659 ret
= exynos_drm_gem_handle_create(&exynos_gem_obj
->base
, file_priv
,
662 exynos_drm_gem_destroy(exynos_gem_obj
);
669 int exynos_drm_gem_dumb_map_offset(struct drm_file
*file_priv
,
670 struct drm_device
*dev
, uint32_t handle
,
673 struct drm_gem_object
*obj
;
676 DRM_DEBUG_KMS("%s\n", __FILE__
);
678 mutex_lock(&dev
->struct_mutex
);
681 * get offset of memory allocated for drm framebuffer.
682 * - this callback would be called by user application
683 * with DRM_IOCTL_MODE_MAP_DUMB command.
686 obj
= drm_gem_object_lookup(dev
, file_priv
, handle
);
688 DRM_ERROR("failed to lookup gem object.\n");
693 if (!obj
->map_list
.map
) {
694 ret
= drm_gem_create_mmap_offset(obj
);
699 *offset
= (u64
)obj
->map_list
.hash
.key
<< PAGE_SHIFT
;
700 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset
);
703 drm_gem_object_unreference(obj
);
705 mutex_unlock(&dev
->struct_mutex
);
709 int exynos_drm_gem_dumb_destroy(struct drm_file
*file_priv
,
710 struct drm_device
*dev
,
715 DRM_DEBUG_KMS("%s\n", __FILE__
);
718 * obj->refcount and obj->handle_count are decreased and
719 * if both them are 0 then exynos_drm_gem_free_object()
720 * would be called by callback to release resources.
722 ret
= drm_gem_handle_delete(file_priv
, handle
);
724 DRM_ERROR("failed to delete drm_gem_handle.\n");
731 int exynos_drm_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
733 struct drm_gem_object
*obj
= vma
->vm_private_data
;
734 struct drm_device
*dev
= obj
->dev
;
735 unsigned long f_vaddr
;
739 page_offset
= ((unsigned long)vmf
->virtual_address
-
740 vma
->vm_start
) >> PAGE_SHIFT
;
741 f_vaddr
= (unsigned long)vmf
->virtual_address
;
743 mutex_lock(&dev
->struct_mutex
);
745 ret
= exynos_drm_gem_map_buf(obj
, vma
, f_vaddr
, page_offset
);
747 DRM_ERROR("failed to map a buffer with user.\n");
749 mutex_unlock(&dev
->struct_mutex
);
751 return convert_to_vm_err_msg(ret
);
754 int exynos_drm_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
756 struct exynos_drm_gem_obj
*exynos_gem_obj
;
757 struct drm_gem_object
*obj
;
760 DRM_DEBUG_KMS("%s\n", __FILE__
);
762 /* set vm_area_struct. */
763 ret
= drm_gem_mmap(filp
, vma
);
765 DRM_ERROR("failed to mmap.\n");
769 obj
= vma
->vm_private_data
;
770 exynos_gem_obj
= to_exynos_gem_obj(obj
);
772 ret
= check_gem_flags(exynos_gem_obj
->flags
);
774 drm_gem_vm_close(vma
);
775 drm_gem_free_mmap_offset(obj
);
779 vma
->vm_flags
&= ~VM_PFNMAP
;
780 vma
->vm_flags
|= VM_MIXEDMAP
;
782 update_vm_cache_attr(exynos_gem_obj
, vma
);