Merge tag 'pm+acpi-3.12-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[deliverable/linux.git] / drivers / media / v4l2-core / videobuf2-dma-contig.c
1 /*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/dma-mapping.h>
19
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-dma-contig.h>
22 #include <media/videobuf2-memops.h>
23
24 struct vb2_dc_conf {
25 struct device *dev;
26 };
27
28 struct vb2_dc_buf {
29 struct device *dev;
30 void *vaddr;
31 unsigned long size;
32 dma_addr_t dma_addr;
33 enum dma_data_direction dma_dir;
34 struct sg_table *dma_sgt;
35
36 /* MMAP related */
37 struct vb2_vmarea_handler handler;
38 atomic_t refcount;
39 struct sg_table *sgt_base;
40
41 /* USERPTR related */
42 struct vm_area_struct *vma;
43
44 /* DMABUF related */
45 struct dma_buf_attachment *db_attach;
46 };
47
48 /*********************************************/
49 /* scatterlist table functions */
50 /*********************************************/
51
52
53 static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
54 void (*cb)(struct page *pg))
55 {
56 struct scatterlist *s;
57 unsigned int i;
58
59 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
60 struct page *page = sg_page(s);
61 unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
62 >> PAGE_SHIFT;
63 unsigned int j;
64
65 for (j = 0; j < n_pages; ++j, ++page)
66 cb(page);
67 }
68 }
69
70 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
71 {
72 struct scatterlist *s;
73 dma_addr_t expected = sg_dma_address(sgt->sgl);
74 unsigned int i;
75 unsigned long size = 0;
76
77 for_each_sg(sgt->sgl, s, sgt->nents, i) {
78 if (sg_dma_address(s) != expected)
79 break;
80 expected = sg_dma_address(s) + sg_dma_len(s);
81 size += sg_dma_len(s);
82 }
83 return size;
84 }
85
86 /*********************************************/
87 /* callbacks for all buffers */
88 /*********************************************/
89
90 static void *vb2_dc_cookie(void *buf_priv)
91 {
92 struct vb2_dc_buf *buf = buf_priv;
93
94 return &buf->dma_addr;
95 }
96
97 static void *vb2_dc_vaddr(void *buf_priv)
98 {
99 struct vb2_dc_buf *buf = buf_priv;
100
101 return buf->vaddr;
102 }
103
104 static unsigned int vb2_dc_num_users(void *buf_priv)
105 {
106 struct vb2_dc_buf *buf = buf_priv;
107
108 return atomic_read(&buf->refcount);
109 }
110
111 static void vb2_dc_prepare(void *buf_priv)
112 {
113 struct vb2_dc_buf *buf = buf_priv;
114 struct sg_table *sgt = buf->dma_sgt;
115
116 /* DMABUF exporter will flush the cache for us */
117 if (!sgt || buf->db_attach)
118 return;
119
120 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
121 }
122
123 static void vb2_dc_finish(void *buf_priv)
124 {
125 struct vb2_dc_buf *buf = buf_priv;
126 struct sg_table *sgt = buf->dma_sgt;
127
128 /* DMABUF exporter will flush the cache for us */
129 if (!sgt || buf->db_attach)
130 return;
131
132 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
133 }
134
135 /*********************************************/
136 /* callbacks for MMAP buffers */
137 /*********************************************/
138
139 static void vb2_dc_put(void *buf_priv)
140 {
141 struct vb2_dc_buf *buf = buf_priv;
142
143 if (!atomic_dec_and_test(&buf->refcount))
144 return;
145
146 if (buf->sgt_base) {
147 sg_free_table(buf->sgt_base);
148 kfree(buf->sgt_base);
149 }
150 dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
151 put_device(buf->dev);
152 kfree(buf);
153 }
154
155 static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
156 {
157 struct vb2_dc_conf *conf = alloc_ctx;
158 struct device *dev = conf->dev;
159 struct vb2_dc_buf *buf;
160
161 buf = kzalloc(sizeof *buf, GFP_KERNEL);
162 if (!buf)
163 return ERR_PTR(-ENOMEM);
164
165 buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr,
166 GFP_KERNEL | gfp_flags);
167 if (!buf->vaddr) {
168 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
169 kfree(buf);
170 return ERR_PTR(-ENOMEM);
171 }
172
173 /* Prevent the device from being released while the buffer is used */
174 buf->dev = get_device(dev);
175 buf->size = size;
176
177 buf->handler.refcount = &buf->refcount;
178 buf->handler.put = vb2_dc_put;
179 buf->handler.arg = buf;
180
181 atomic_inc(&buf->refcount);
182
183 return buf;
184 }
185
186 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
187 {
188 struct vb2_dc_buf *buf = buf_priv;
189 int ret;
190
191 if (!buf) {
192 printk(KERN_ERR "No buffer to map\n");
193 return -EINVAL;
194 }
195
196 /*
197 * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
198 * map whole buffer
199 */
200 vma->vm_pgoff = 0;
201
202 ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
203 buf->dma_addr, buf->size);
204
205 if (ret) {
206 pr_err("Remapping memory failed, error: %d\n", ret);
207 return ret;
208 }
209
210 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
211 vma->vm_private_data = &buf->handler;
212 vma->vm_ops = &vb2_common_vm_ops;
213
214 vma->vm_ops->open(vma);
215
216 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
217 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
218 buf->size);
219
220 return 0;
221 }
222
223 /*********************************************/
224 /* DMABUF ops for exporters */
225 /*********************************************/
226
227 struct vb2_dc_attachment {
228 struct sg_table sgt;
229 enum dma_data_direction dir;
230 };
231
232 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
233 struct dma_buf_attachment *dbuf_attach)
234 {
235 struct vb2_dc_attachment *attach;
236 unsigned int i;
237 struct scatterlist *rd, *wr;
238 struct sg_table *sgt;
239 struct vb2_dc_buf *buf = dbuf->priv;
240 int ret;
241
242 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
243 if (!attach)
244 return -ENOMEM;
245
246 sgt = &attach->sgt;
247 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
248 * map the same scatter list to multiple attachments at the same time.
249 */
250 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
251 if (ret) {
252 kfree(attach);
253 return -ENOMEM;
254 }
255
256 rd = buf->sgt_base->sgl;
257 wr = sgt->sgl;
258 for (i = 0; i < sgt->orig_nents; ++i) {
259 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
260 rd = sg_next(rd);
261 wr = sg_next(wr);
262 }
263
264 attach->dir = DMA_NONE;
265 dbuf_attach->priv = attach;
266
267 return 0;
268 }
269
270 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
271 struct dma_buf_attachment *db_attach)
272 {
273 struct vb2_dc_attachment *attach = db_attach->priv;
274 struct sg_table *sgt;
275
276 if (!attach)
277 return;
278
279 sgt = &attach->sgt;
280
281 /* release the scatterlist cache */
282 if (attach->dir != DMA_NONE)
283 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
284 attach->dir);
285 sg_free_table(sgt);
286 kfree(attach);
287 db_attach->priv = NULL;
288 }
289
290 static struct sg_table *vb2_dc_dmabuf_ops_map(
291 struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
292 {
293 struct vb2_dc_attachment *attach = db_attach->priv;
294 /* stealing dmabuf mutex to serialize map/unmap operations */
295 struct mutex *lock = &db_attach->dmabuf->lock;
296 struct sg_table *sgt;
297 int ret;
298
299 mutex_lock(lock);
300
301 sgt = &attach->sgt;
302 /* return previously mapped sg table */
303 if (attach->dir == dir) {
304 mutex_unlock(lock);
305 return sgt;
306 }
307
308 /* release any previous cache */
309 if (attach->dir != DMA_NONE) {
310 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
311 attach->dir);
312 attach->dir = DMA_NONE;
313 }
314
315 /* mapping to the client with new direction */
316 ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
317 if (ret <= 0) {
318 pr_err("failed to map scatterlist\n");
319 mutex_unlock(lock);
320 return ERR_PTR(-EIO);
321 }
322
323 attach->dir = dir;
324
325 mutex_unlock(lock);
326
327 return sgt;
328 }
329
330 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
331 struct sg_table *sgt, enum dma_data_direction dir)
332 {
333 /* nothing to be done here */
334 }
335
336 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
337 {
338 /* drop reference obtained in vb2_dc_get_dmabuf */
339 vb2_dc_put(dbuf->priv);
340 }
341
342 static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
343 {
344 struct vb2_dc_buf *buf = dbuf->priv;
345
346 return buf->vaddr + pgnum * PAGE_SIZE;
347 }
348
349 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
350 {
351 struct vb2_dc_buf *buf = dbuf->priv;
352
353 return buf->vaddr;
354 }
355
356 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
357 struct vm_area_struct *vma)
358 {
359 return vb2_dc_mmap(dbuf->priv, vma);
360 }
361
362 static struct dma_buf_ops vb2_dc_dmabuf_ops = {
363 .attach = vb2_dc_dmabuf_ops_attach,
364 .detach = vb2_dc_dmabuf_ops_detach,
365 .map_dma_buf = vb2_dc_dmabuf_ops_map,
366 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
367 .kmap = vb2_dc_dmabuf_ops_kmap,
368 .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
369 .vmap = vb2_dc_dmabuf_ops_vmap,
370 .mmap = vb2_dc_dmabuf_ops_mmap,
371 .release = vb2_dc_dmabuf_ops_release,
372 };
373
374 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
375 {
376 int ret;
377 struct sg_table *sgt;
378
379 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
380 if (!sgt) {
381 dev_err(buf->dev, "failed to alloc sg table\n");
382 return NULL;
383 }
384
385 ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
386 buf->size);
387 if (ret < 0) {
388 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
389 kfree(sgt);
390 return NULL;
391 }
392
393 return sgt;
394 }
395
396 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
397 {
398 struct vb2_dc_buf *buf = buf_priv;
399 struct dma_buf *dbuf;
400
401 if (!buf->sgt_base)
402 buf->sgt_base = vb2_dc_get_base_sgt(buf);
403
404 if (WARN_ON(!buf->sgt_base))
405 return NULL;
406
407 dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
408 if (IS_ERR(dbuf))
409 return NULL;
410
411 /* dmabuf keeps reference to vb2 buffer */
412 atomic_inc(&buf->refcount);
413
414 return dbuf;
415 }
416
417 /*********************************************/
418 /* callbacks for USERPTR buffers */
419 /*********************************************/
420
421 static inline int vma_is_io(struct vm_area_struct *vma)
422 {
423 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
424 }
425
426 static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
427 struct vm_area_struct *vma, unsigned long *res)
428 {
429 unsigned long pfn, start_pfn, prev_pfn;
430 unsigned int i;
431 int ret;
432
433 if (!vma_is_io(vma))
434 return -EFAULT;
435
436 ret = follow_pfn(vma, start, &pfn);
437 if (ret)
438 return ret;
439
440 start_pfn = pfn;
441 start += PAGE_SIZE;
442
443 for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
444 prev_pfn = pfn;
445 ret = follow_pfn(vma, start, &pfn);
446
447 if (ret) {
448 pr_err("no page for address %lu\n", start);
449 return ret;
450 }
451 if (pfn != prev_pfn + 1)
452 return -EINVAL;
453 }
454
455 *res = start_pfn;
456 return 0;
457 }
458
459 static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
460 int n_pages, struct vm_area_struct *vma, int write)
461 {
462 if (vma_is_io(vma)) {
463 unsigned int i;
464
465 for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
466 unsigned long pfn;
467 int ret = follow_pfn(vma, start, &pfn);
468
469 if (!pfn_valid(pfn))
470 return -EINVAL;
471
472 if (ret) {
473 pr_err("no page for address %lu\n", start);
474 return ret;
475 }
476 pages[i] = pfn_to_page(pfn);
477 }
478 } else {
479 int n;
480
481 n = get_user_pages(current, current->mm, start & PAGE_MASK,
482 n_pages, write, 1, pages, NULL);
483 /* negative error means that no page was pinned */
484 n = max(n, 0);
485 if (n != n_pages) {
486 pr_err("got only %d of %d user pages\n", n, n_pages);
487 while (n)
488 put_page(pages[--n]);
489 return -EFAULT;
490 }
491 }
492
493 return 0;
494 }
495
496 static void vb2_dc_put_dirty_page(struct page *page)
497 {
498 set_page_dirty_lock(page);
499 put_page(page);
500 }
501
502 static void vb2_dc_put_userptr(void *buf_priv)
503 {
504 struct vb2_dc_buf *buf = buf_priv;
505 struct sg_table *sgt = buf->dma_sgt;
506
507 if (sgt) {
508 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
509 if (!vma_is_io(buf->vma))
510 vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
511
512 sg_free_table(sgt);
513 kfree(sgt);
514 }
515 vb2_put_vma(buf->vma);
516 kfree(buf);
517 }
518
519 /*
520 * For some kind of reserved memory there might be no struct page available,
521 * so all that can be done to support such 'pages' is to try to convert
522 * pfn to dma address or at the last resort just assume that
523 * dma address == physical address (like it has been assumed in earlier version
524 * of videobuf2-dma-contig
525 */
526
527 #ifdef __arch_pfn_to_dma
528 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
529 {
530 return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
531 }
532 #elif defined(__pfn_to_bus)
533 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
534 {
535 return (dma_addr_t)__pfn_to_bus(pfn);
536 }
537 #elif defined(__pfn_to_phys)
538 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
539 {
540 return (dma_addr_t)__pfn_to_phys(pfn);
541 }
542 #else
543 static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
544 {
545 /* really, we cannot do anything better at this point */
546 return (dma_addr_t)(pfn) << PAGE_SHIFT;
547 }
548 #endif
549
550 static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
551 unsigned long size, int write)
552 {
553 struct vb2_dc_conf *conf = alloc_ctx;
554 struct vb2_dc_buf *buf;
555 unsigned long start;
556 unsigned long end;
557 unsigned long offset;
558 struct page **pages;
559 int n_pages;
560 int ret = 0;
561 struct vm_area_struct *vma;
562 struct sg_table *sgt;
563 unsigned long contig_size;
564 unsigned long dma_align = dma_get_cache_alignment();
565
566 /* Only cache aligned DMA transfers are reliable */
567 if (!IS_ALIGNED(vaddr | size, dma_align)) {
568 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
569 return ERR_PTR(-EINVAL);
570 }
571
572 if (!size) {
573 pr_debug("size is zero\n");
574 return ERR_PTR(-EINVAL);
575 }
576
577 buf = kzalloc(sizeof *buf, GFP_KERNEL);
578 if (!buf)
579 return ERR_PTR(-ENOMEM);
580
581 buf->dev = conf->dev;
582 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
583
584 start = vaddr & PAGE_MASK;
585 offset = vaddr & ~PAGE_MASK;
586 end = PAGE_ALIGN(vaddr + size);
587 n_pages = (end - start) >> PAGE_SHIFT;
588
589 pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
590 if (!pages) {
591 ret = -ENOMEM;
592 pr_err("failed to allocate pages table\n");
593 goto fail_buf;
594 }
595
596 /* current->mm->mmap_sem is taken by videobuf2 core */
597 vma = find_vma(current->mm, vaddr);
598 if (!vma) {
599 pr_err("no vma for address %lu\n", vaddr);
600 ret = -EFAULT;
601 goto fail_pages;
602 }
603
604 if (vma->vm_end < vaddr + size) {
605 pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
606 ret = -EFAULT;
607 goto fail_pages;
608 }
609
610 buf->vma = vb2_get_vma(vma);
611 if (!buf->vma) {
612 pr_err("failed to copy vma\n");
613 ret = -ENOMEM;
614 goto fail_pages;
615 }
616
617 /* extract page list from userspace mapping */
618 ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
619 if (ret) {
620 unsigned long pfn;
621 if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
622 buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
623 buf->size = size;
624 kfree(pages);
625 return buf;
626 }
627
628 pr_err("failed to get user pages\n");
629 goto fail_vma;
630 }
631
632 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
633 if (!sgt) {
634 pr_err("failed to allocate sg table\n");
635 ret = -ENOMEM;
636 goto fail_get_user_pages;
637 }
638
639 ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
640 offset, size, GFP_KERNEL);
641 if (ret) {
642 pr_err("failed to initialize sg table\n");
643 goto fail_sgt;
644 }
645
646 /* pages are no longer needed */
647 kfree(pages);
648 pages = NULL;
649
650 sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
651 buf->dma_dir);
652 if (sgt->nents <= 0) {
653 pr_err("failed to map scatterlist\n");
654 ret = -EIO;
655 goto fail_sgt_init;
656 }
657
658 contig_size = vb2_dc_get_contiguous_size(sgt);
659 if (contig_size < size) {
660 pr_err("contiguous mapping is too small %lu/%lu\n",
661 contig_size, size);
662 ret = -EFAULT;
663 goto fail_map_sg;
664 }
665
666 buf->dma_addr = sg_dma_address(sgt->sgl);
667 buf->size = size;
668 buf->dma_sgt = sgt;
669
670 return buf;
671
672 fail_map_sg:
673 dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
674
675 fail_sgt_init:
676 if (!vma_is_io(buf->vma))
677 vb2_dc_sgt_foreach_page(sgt, put_page);
678 sg_free_table(sgt);
679
680 fail_sgt:
681 kfree(sgt);
682
683 fail_get_user_pages:
684 if (pages && !vma_is_io(buf->vma))
685 while (n_pages)
686 put_page(pages[--n_pages]);
687
688 fail_vma:
689 vb2_put_vma(buf->vma);
690
691 fail_pages:
692 kfree(pages); /* kfree is NULL-proof */
693
694 fail_buf:
695 kfree(buf);
696
697 return ERR_PTR(ret);
698 }
699
700 /*********************************************/
701 /* callbacks for DMABUF buffers */
702 /*********************************************/
703
704 static int vb2_dc_map_dmabuf(void *mem_priv)
705 {
706 struct vb2_dc_buf *buf = mem_priv;
707 struct sg_table *sgt;
708 unsigned long contig_size;
709
710 if (WARN_ON(!buf->db_attach)) {
711 pr_err("trying to pin a non attached buffer\n");
712 return -EINVAL;
713 }
714
715 if (WARN_ON(buf->dma_sgt)) {
716 pr_err("dmabuf buffer is already pinned\n");
717 return 0;
718 }
719
720 /* get the associated scatterlist for this buffer */
721 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
722 if (IS_ERR_OR_NULL(sgt)) {
723 pr_err("Error getting dmabuf scatterlist\n");
724 return -EINVAL;
725 }
726
727 /* checking if dmabuf is big enough to store contiguous chunk */
728 contig_size = vb2_dc_get_contiguous_size(sgt);
729 if (contig_size < buf->size) {
730 pr_err("contiguous chunk is too small %lu/%lu b\n",
731 contig_size, buf->size);
732 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
733 return -EFAULT;
734 }
735
736 buf->dma_addr = sg_dma_address(sgt->sgl);
737 buf->dma_sgt = sgt;
738
739 return 0;
740 }
741
742 static void vb2_dc_unmap_dmabuf(void *mem_priv)
743 {
744 struct vb2_dc_buf *buf = mem_priv;
745 struct sg_table *sgt = buf->dma_sgt;
746
747 if (WARN_ON(!buf->db_attach)) {
748 pr_err("trying to unpin a not attached buffer\n");
749 return;
750 }
751
752 if (WARN_ON(!sgt)) {
753 pr_err("dmabuf buffer is already unpinned\n");
754 return;
755 }
756
757 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
758
759 buf->dma_addr = 0;
760 buf->dma_sgt = NULL;
761 }
762
763 static void vb2_dc_detach_dmabuf(void *mem_priv)
764 {
765 struct vb2_dc_buf *buf = mem_priv;
766
767 /* if vb2 works correctly you should never detach mapped buffer */
768 if (WARN_ON(buf->dma_addr))
769 vb2_dc_unmap_dmabuf(buf);
770
771 /* detach this attachment */
772 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
773 kfree(buf);
774 }
775
776 static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
777 unsigned long size, int write)
778 {
779 struct vb2_dc_conf *conf = alloc_ctx;
780 struct vb2_dc_buf *buf;
781 struct dma_buf_attachment *dba;
782
783 if (dbuf->size < size)
784 return ERR_PTR(-EFAULT);
785
786 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
787 if (!buf)
788 return ERR_PTR(-ENOMEM);
789
790 buf->dev = conf->dev;
791 /* create attachment for the dmabuf with the user device */
792 dba = dma_buf_attach(dbuf, buf->dev);
793 if (IS_ERR(dba)) {
794 pr_err("failed to attach dmabuf\n");
795 kfree(buf);
796 return dba;
797 }
798
799 buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
800 buf->size = size;
801 buf->db_attach = dba;
802
803 return buf;
804 }
805
806 /*********************************************/
807 /* DMA CONTIG exported functions */
808 /*********************************************/
809
810 const struct vb2_mem_ops vb2_dma_contig_memops = {
811 .alloc = vb2_dc_alloc,
812 .put = vb2_dc_put,
813 .get_dmabuf = vb2_dc_get_dmabuf,
814 .cookie = vb2_dc_cookie,
815 .vaddr = vb2_dc_vaddr,
816 .mmap = vb2_dc_mmap,
817 .get_userptr = vb2_dc_get_userptr,
818 .put_userptr = vb2_dc_put_userptr,
819 .prepare = vb2_dc_prepare,
820 .finish = vb2_dc_finish,
821 .map_dmabuf = vb2_dc_map_dmabuf,
822 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
823 .attach_dmabuf = vb2_dc_attach_dmabuf,
824 .detach_dmabuf = vb2_dc_detach_dmabuf,
825 .num_users = vb2_dc_num_users,
826 };
827 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
828
829 void *vb2_dma_contig_init_ctx(struct device *dev)
830 {
831 struct vb2_dc_conf *conf;
832
833 conf = kzalloc(sizeof *conf, GFP_KERNEL);
834 if (!conf)
835 return ERR_PTR(-ENOMEM);
836
837 conf->dev = dev;
838
839 return conf;
840 }
841 EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
842
843 void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
844 {
845 kfree(alloc_ctx);
846 }
847 EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
848
849 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
850 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
851 MODULE_LICENSE("GPL");
This page took 0.047235 seconds and 6 git commands to generate.