fa04b9add09aaf79cbb5e8c1c945adbe4b2c40b0
[deliverable/linux.git] / drivers / gpu / drm / exynos / exynos_drm_gem.c
1 /* exynos_drm_gem.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12 #include <drm/drmP.h>
13 #include <drm/drm_vma_manager.h>
14
15 #include <linux/shmem_fs.h>
16 #include <drm/exynos_drm.h>
17
18 #include "exynos_drm_drv.h"
19 #include "exynos_drm_gem.h"
20 #include "exynos_drm_buf.h"
21 #include "exynos_drm_iommu.h"
22
23 static int check_gem_flags(unsigned int flags)
24 {
25 if (flags & ~(EXYNOS_BO_MASK)) {
26 DRM_ERROR("invalid flags.\n");
27 return -EINVAL;
28 }
29
30 return 0;
31 }
32
33 static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
34 struct vm_area_struct *vma)
35 {
36 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
37
38 /* non-cachable as default. */
39 if (obj->flags & EXYNOS_BO_CACHABLE)
40 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41 else if (obj->flags & EXYNOS_BO_WC)
42 vma->vm_page_prot =
43 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
44 else
45 vma->vm_page_prot =
46 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
47 }
48
49 static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
50 {
51 /* TODO */
52
53 return roundup(size, PAGE_SIZE);
54 }
55
56 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
57 struct drm_file *file_priv,
58 unsigned int *handle)
59 {
60 int ret;
61
62 /*
63 * allocate a id of idr table where the obj is registered
64 * and handle has the id what user can see.
65 */
66 ret = drm_gem_handle_create(file_priv, obj, handle);
67 if (ret)
68 return ret;
69
70 DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
71
72 /* drop reference from allocate - handle holds it now. */
73 drm_gem_object_unreference_unlocked(obj);
74
75 return 0;
76 }
77
78 void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
79 {
80 struct drm_gem_object *obj;
81 struct exynos_drm_gem_buf *buf;
82
83 obj = &exynos_gem_obj->base;
84 buf = exynos_gem_obj->buffer;
85
86 DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
87
88 /*
89 * do not release memory region from exporter.
90 *
91 * the region will be released by exporter
92 * once dmabuf's refcount becomes 0.
93 */
94 if (obj->import_attach)
95 goto out;
96
97 exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
98
99 out:
100 exynos_drm_fini_buf(obj->dev, buf);
101 exynos_gem_obj->buffer = NULL;
102
103 drm_gem_free_mmap_offset(obj);
104
105 /* release file pointer to gem object. */
106 drm_gem_object_release(obj);
107
108 kfree(exynos_gem_obj);
109 exynos_gem_obj = NULL;
110 }
111
112 unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
113 unsigned int gem_handle,
114 struct drm_file *file_priv)
115 {
116 struct exynos_drm_gem_obj *exynos_gem_obj;
117 struct drm_gem_object *obj;
118
119 obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
120 if (!obj) {
121 DRM_ERROR("failed to lookup gem object.\n");
122 return 0;
123 }
124
125 exynos_gem_obj = to_exynos_gem_obj(obj);
126
127 drm_gem_object_unreference_unlocked(obj);
128
129 return exynos_gem_obj->buffer->size;
130 }
131
132
133 struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
134 unsigned long size)
135 {
136 struct exynos_drm_gem_obj *exynos_gem_obj;
137 struct drm_gem_object *obj;
138 int ret;
139
140 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
141 if (!exynos_gem_obj)
142 return NULL;
143
144 exynos_gem_obj->size = size;
145 obj = &exynos_gem_obj->base;
146
147 ret = drm_gem_object_init(dev, obj, size);
148 if (ret < 0) {
149 DRM_ERROR("failed to initialize gem object\n");
150 kfree(exynos_gem_obj);
151 return NULL;
152 }
153
154 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
155
156 return exynos_gem_obj;
157 }
158
159 struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
160 unsigned int flags,
161 unsigned long size)
162 {
163 struct exynos_drm_gem_obj *exynos_gem_obj;
164 struct exynos_drm_gem_buf *buf;
165 int ret;
166
167 if (!size) {
168 DRM_ERROR("invalid size.\n");
169 return ERR_PTR(-EINVAL);
170 }
171
172 size = roundup_gem_size(size, flags);
173
174 ret = check_gem_flags(flags);
175 if (ret)
176 return ERR_PTR(ret);
177
178 buf = exynos_drm_init_buf(dev, size);
179 if (!buf)
180 return ERR_PTR(-ENOMEM);
181
182 exynos_gem_obj = exynos_drm_gem_init(dev, size);
183 if (!exynos_gem_obj) {
184 ret = -ENOMEM;
185 goto err_fini_buf;
186 }
187
188 exynos_gem_obj->buffer = buf;
189
190 /* set memory type and cache attribute from user side. */
191 exynos_gem_obj->flags = flags;
192
193 ret = exynos_drm_alloc_buf(dev, buf, flags);
194 if (ret < 0)
195 goto err_gem_fini;
196
197 return exynos_gem_obj;
198
199 err_gem_fini:
200 drm_gem_object_release(&exynos_gem_obj->base);
201 kfree(exynos_gem_obj);
202 err_fini_buf:
203 exynos_drm_fini_buf(dev, buf);
204 return ERR_PTR(ret);
205 }
206
207 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
208 struct drm_file *file_priv)
209 {
210 struct drm_exynos_gem_create *args = data;
211 struct exynos_drm_gem_obj *exynos_gem_obj;
212 int ret;
213
214 exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
215 if (IS_ERR(exynos_gem_obj))
216 return PTR_ERR(exynos_gem_obj);
217
218 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
219 &args->handle);
220 if (ret) {
221 exynos_drm_gem_destroy(exynos_gem_obj);
222 return ret;
223 }
224
225 return 0;
226 }
227
228 dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
229 unsigned int gem_handle,
230 struct drm_file *filp)
231 {
232 struct exynos_drm_gem_obj *exynos_gem_obj;
233 struct drm_gem_object *obj;
234
235 obj = drm_gem_object_lookup(dev, filp, gem_handle);
236 if (!obj) {
237 DRM_ERROR("failed to lookup gem object.\n");
238 return ERR_PTR(-EINVAL);
239 }
240
241 exynos_gem_obj = to_exynos_gem_obj(obj);
242
243 return &exynos_gem_obj->buffer->dma_addr;
244 }
245
246 void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
247 unsigned int gem_handle,
248 struct drm_file *filp)
249 {
250 struct drm_gem_object *obj;
251
252 obj = drm_gem_object_lookup(dev, filp, gem_handle);
253 if (!obj) {
254 DRM_ERROR("failed to lookup gem object.\n");
255 return;
256 }
257
258 drm_gem_object_unreference_unlocked(obj);
259
260 /*
261 * decrease obj->refcount one more time because we has already
262 * increased it at exynos_drm_gem_get_dma_addr().
263 */
264 drm_gem_object_unreference_unlocked(obj);
265 }
266
267 int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
268 struct vm_area_struct *vma)
269 {
270 struct drm_device *drm_dev = exynos_gem_obj->base.dev;
271 struct exynos_drm_gem_buf *buffer;
272 unsigned long vm_size;
273 int ret;
274
275 vma->vm_flags &= ~VM_PFNMAP;
276 vma->vm_pgoff = 0;
277
278 vm_size = vma->vm_end - vma->vm_start;
279
280 /*
281 * a buffer contains information to physically continuous memory
282 * allocated by user request or at framebuffer creation.
283 */
284 buffer = exynos_gem_obj->buffer;
285
286 /* check if user-requested size is valid. */
287 if (vm_size > buffer->size)
288 return -EINVAL;
289
290 ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
291 buffer->dma_addr, buffer->size,
292 &buffer->dma_attrs);
293 if (ret < 0) {
294 DRM_ERROR("failed to mmap.\n");
295 return ret;
296 }
297
298 return 0;
299 }
300
301 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
302 struct drm_file *file_priv)
303 { struct exynos_drm_gem_obj *exynos_gem_obj;
304 struct drm_exynos_gem_info *args = data;
305 struct drm_gem_object *obj;
306
307 mutex_lock(&dev->struct_mutex);
308
309 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
310 if (!obj) {
311 DRM_ERROR("failed to lookup gem object.\n");
312 mutex_unlock(&dev->struct_mutex);
313 return -EINVAL;
314 }
315
316 exynos_gem_obj = to_exynos_gem_obj(obj);
317
318 args->flags = exynos_gem_obj->flags;
319 args->size = exynos_gem_obj->size;
320
321 drm_gem_object_unreference(obj);
322 mutex_unlock(&dev->struct_mutex);
323
324 return 0;
325 }
326
327 struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
328 {
329 struct vm_area_struct *vma_copy;
330
331 vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
332 if (!vma_copy)
333 return NULL;
334
335 if (vma->vm_ops && vma->vm_ops->open)
336 vma->vm_ops->open(vma);
337
338 if (vma->vm_file)
339 get_file(vma->vm_file);
340
341 memcpy(vma_copy, vma, sizeof(*vma));
342
343 vma_copy->vm_mm = NULL;
344 vma_copy->vm_next = NULL;
345 vma_copy->vm_prev = NULL;
346
347 return vma_copy;
348 }
349
350 void exynos_gem_put_vma(struct vm_area_struct *vma)
351 {
352 if (!vma)
353 return;
354
355 if (vma->vm_ops && vma->vm_ops->close)
356 vma->vm_ops->close(vma);
357
358 if (vma->vm_file)
359 fput(vma->vm_file);
360
361 kfree(vma);
362 }
363
364 int exynos_gem_get_pages_from_userptr(unsigned long start,
365 unsigned int npages,
366 struct page **pages,
367 struct vm_area_struct *vma)
368 {
369 int get_npages;
370
371 /* the memory region mmaped with VM_PFNMAP. */
372 if (vma_is_io(vma)) {
373 unsigned int i;
374
375 for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
376 unsigned long pfn;
377 int ret = follow_pfn(vma, start, &pfn);
378 if (ret)
379 return ret;
380
381 pages[i] = pfn_to_page(pfn);
382 }
383
384 if (i != npages) {
385 DRM_ERROR("failed to get user_pages.\n");
386 return -EINVAL;
387 }
388
389 return 0;
390 }
391
392 get_npages = get_user_pages(current, current->mm, start,
393 npages, 1, 1, pages, NULL);
394 get_npages = max(get_npages, 0);
395 if (get_npages != npages) {
396 DRM_ERROR("failed to get user_pages.\n");
397 while (get_npages)
398 put_page(pages[--get_npages]);
399 return -EFAULT;
400 }
401
402 return 0;
403 }
404
405 void exynos_gem_put_pages_to_userptr(struct page **pages,
406 unsigned int npages,
407 struct vm_area_struct *vma)
408 {
409 if (!vma_is_io(vma)) {
410 unsigned int i;
411
412 for (i = 0; i < npages; i++) {
413 set_page_dirty_lock(pages[i]);
414
415 /*
416 * undo the reference we took when populating
417 * the table.
418 */
419 put_page(pages[i]);
420 }
421 }
422 }
423
424 int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
425 struct sg_table *sgt,
426 enum dma_data_direction dir)
427 {
428 int nents;
429
430 mutex_lock(&drm_dev->struct_mutex);
431
432 nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
433 if (!nents) {
434 DRM_ERROR("failed to map sgl with dma.\n");
435 mutex_unlock(&drm_dev->struct_mutex);
436 return nents;
437 }
438
439 mutex_unlock(&drm_dev->struct_mutex);
440 return 0;
441 }
442
443 void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
444 struct sg_table *sgt,
445 enum dma_data_direction dir)
446 {
447 dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
448 }
449
450 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
451 {
452 struct exynos_drm_gem_obj *exynos_gem_obj;
453 struct exynos_drm_gem_buf *buf;
454
455 exynos_gem_obj = to_exynos_gem_obj(obj);
456 buf = exynos_gem_obj->buffer;
457
458 exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
459 }
460
461 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
462 struct drm_device *dev,
463 struct drm_mode_create_dumb *args)
464 {
465 struct exynos_drm_gem_obj *exynos_gem_obj;
466 int ret;
467
468 /*
469 * allocate memory to be used for framebuffer.
470 * - this callback would be called by user application
471 * with DRM_IOCTL_MODE_CREATE_DUMB command.
472 */
473
474 args->pitch = args->width * ((args->bpp + 7) / 8);
475 args->size = args->pitch * args->height;
476
477 if (is_drm_iommu_supported(dev)) {
478 exynos_gem_obj = exynos_drm_gem_create(dev,
479 EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
480 args->size);
481 } else {
482 exynos_gem_obj = exynos_drm_gem_create(dev,
483 EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
484 args->size);
485 }
486
487 if (IS_ERR(exynos_gem_obj)) {
488 dev_warn(dev->dev, "FB allocation failed.\n");
489 return PTR_ERR(exynos_gem_obj);
490 }
491
492 ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
493 &args->handle);
494 if (ret) {
495 exynos_drm_gem_destroy(exynos_gem_obj);
496 return ret;
497 }
498
499 return 0;
500 }
501
502 int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
503 struct drm_device *dev, uint32_t handle,
504 uint64_t *offset)
505 {
506 struct drm_gem_object *obj;
507 int ret = 0;
508
509 mutex_lock(&dev->struct_mutex);
510
511 /*
512 * get offset of memory allocated for drm framebuffer.
513 * - this callback would be called by user application
514 * with DRM_IOCTL_MODE_MAP_DUMB command.
515 */
516
517 obj = drm_gem_object_lookup(dev, file_priv, handle);
518 if (!obj) {
519 DRM_ERROR("failed to lookup gem object.\n");
520 ret = -EINVAL;
521 goto unlock;
522 }
523
524 ret = drm_gem_create_mmap_offset(obj);
525 if (ret)
526 goto out;
527
528 *offset = drm_vma_node_offset_addr(&obj->vma_node);
529 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
530
531 out:
532 drm_gem_object_unreference(obj);
533 unlock:
534 mutex_unlock(&dev->struct_mutex);
535 return ret;
536 }
537
538 int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
539 {
540 struct drm_gem_object *obj = vma->vm_private_data;
541 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
542 struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
543 unsigned long pfn;
544 pgoff_t page_offset;
545 int ret;
546
547 page_offset = ((unsigned long)vmf->virtual_address -
548 vma->vm_start) >> PAGE_SHIFT;
549
550 if (page_offset >= (buf->size >> PAGE_SHIFT)) {
551 DRM_ERROR("invalid page offset\n");
552 ret = -EINVAL;
553 goto out;
554 }
555
556 pfn = page_to_pfn(buf->pages[page_offset]);
557 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
558
559 out:
560 switch (ret) {
561 case 0:
562 case -ERESTARTSYS:
563 case -EINTR:
564 return VM_FAULT_NOPAGE;
565 case -ENOMEM:
566 return VM_FAULT_OOM;
567 default:
568 return VM_FAULT_SIGBUS;
569 }
570 }
571
572 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
573 {
574 struct exynos_drm_gem_obj *exynos_gem_obj;
575 struct drm_gem_object *obj;
576 int ret;
577
578 /* set vm_area_struct. */
579 ret = drm_gem_mmap(filp, vma);
580 if (ret < 0) {
581 DRM_ERROR("failed to mmap.\n");
582 return ret;
583 }
584
585 obj = vma->vm_private_data;
586 exynos_gem_obj = to_exynos_gem_obj(obj);
587
588 ret = check_gem_flags(exynos_gem_obj->flags);
589 if (ret)
590 goto err_close_vm;
591
592 update_vm_cache_attr(exynos_gem_obj, vma);
593
594 ret = exynos_drm_gem_mmap_buffer(exynos_gem_obj, vma);
595 if (ret)
596 goto err_close_vm;
597
598 return ret;
599
600 err_close_vm:
601 drm_gem_vm_close(vma);
602 drm_gem_free_mmap_offset(obj);
603
604 return ret;
605 }
This page took 0.043757 seconds and 4 git commands to generate.