Merge remote-tracking branch 'lightnvm/for-next'
[deliverable/linux.git] / drivers / media / v4l2-core / videobuf2-dma-sg.c
1 /*
2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13 #include <linux/module.h>
14 #include <linux/mm.h>
15 #include <linux/scatterlist.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19
20 #include <media/videobuf2-v4l2.h>
21 #include <media/videobuf2-memops.h>
22 #include <media/videobuf2-dma-sg.h>
23
24 static int debug;
25 module_param(debug, int, 0644);
26
27 #define dprintk(level, fmt, arg...) \
28 do { \
29 if (debug >= level) \
30 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
31 } while (0)
32
33 struct vb2_dma_sg_buf {
34 struct device *dev;
35 void *vaddr;
36 struct page **pages;
37 struct frame_vector *vec;
38 int offset;
39 enum dma_data_direction dma_dir;
40 struct sg_table sg_table;
41 /*
42 * This will point to sg_table when used with the MMAP or USERPTR
43 * memory model, and to the dma_buf sglist when used with the
44 * DMABUF memory model.
45 */
46 struct sg_table *dma_sgt;
47 size_t size;
48 unsigned int num_pages;
49 atomic_t refcount;
50 struct vb2_vmarea_handler handler;
51
52 struct dma_buf_attachment *db_attach;
53 };
54
55 static void vb2_dma_sg_put(void *buf_priv);
56
57 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
58 gfp_t gfp_flags)
59 {
60 unsigned int last_page = 0;
61 int size = buf->size;
62
63 while (size > 0) {
64 struct page *pages;
65 int order;
66 int i;
67
68 order = get_order(size);
69 /* Dont over allocate*/
70 if ((PAGE_SIZE << order) > size)
71 order--;
72
73 pages = NULL;
74 while (!pages) {
75 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
76 __GFP_NOWARN | gfp_flags, order);
77 if (pages)
78 break;
79
80 if (order == 0) {
81 while (last_page--)
82 __free_page(buf->pages[last_page]);
83 return -ENOMEM;
84 }
85 order--;
86 }
87
88 split_page(pages, order);
89 for (i = 0; i < (1 << order); i++)
90 buf->pages[last_page++] = &pages[i];
91
92 size -= PAGE_SIZE << order;
93 }
94
95 return 0;
96 }
97
98 static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
99 unsigned long size, enum dma_data_direction dma_dir,
100 gfp_t gfp_flags)
101 {
102 struct vb2_dma_sg_buf *buf;
103 struct sg_table *sgt;
104 int ret;
105 int num_pages;
106
107 if (WARN_ON(!dev))
108 return ERR_PTR(-EINVAL);
109
110 buf = kzalloc(sizeof *buf, GFP_KERNEL);
111 if (!buf)
112 return ERR_PTR(-ENOMEM);
113
114 buf->vaddr = NULL;
115 buf->dma_dir = dma_dir;
116 buf->offset = 0;
117 buf->size = size;
118 /* size is already page aligned */
119 buf->num_pages = size >> PAGE_SHIFT;
120 buf->dma_sgt = &buf->sg_table;
121
122 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
123 GFP_KERNEL);
124 if (!buf->pages)
125 goto fail_pages_array_alloc;
126
127 ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
128 if (ret)
129 goto fail_pages_alloc;
130
131 ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
132 buf->num_pages, 0, size, GFP_KERNEL);
133 if (ret)
134 goto fail_table_alloc;
135
136 /* Prevent the device from being released while the buffer is used */
137 buf->dev = get_device(dev);
138
139 sgt = &buf->sg_table;
140 /*
141 * No need to sync to the device, this will happen later when the
142 * prepare() memop is called.
143 */
144 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
145 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
146 if (!sgt->nents)
147 goto fail_map;
148
149 buf->handler.refcount = &buf->refcount;
150 buf->handler.put = vb2_dma_sg_put;
151 buf->handler.arg = buf;
152
153 atomic_inc(&buf->refcount);
154
155 dprintk(1, "%s: Allocated buffer of %d pages\n",
156 __func__, buf->num_pages);
157 return buf;
158
159 fail_map:
160 put_device(buf->dev);
161 sg_free_table(buf->dma_sgt);
162 fail_table_alloc:
163 num_pages = buf->num_pages;
164 while (num_pages--)
165 __free_page(buf->pages[num_pages]);
166 fail_pages_alloc:
167 kfree(buf->pages);
168 fail_pages_array_alloc:
169 kfree(buf);
170 return ERR_PTR(-ENOMEM);
171 }
172
173 static void vb2_dma_sg_put(void *buf_priv)
174 {
175 struct vb2_dma_sg_buf *buf = buf_priv;
176 struct sg_table *sgt = &buf->sg_table;
177 int i = buf->num_pages;
178
179 if (atomic_dec_and_test(&buf->refcount)) {
180 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
181 buf->num_pages);
182 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
183 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
184 if (buf->vaddr)
185 vm_unmap_ram(buf->vaddr, buf->num_pages);
186 sg_free_table(buf->dma_sgt);
187 while (--i >= 0)
188 __free_page(buf->pages[i]);
189 kfree(buf->pages);
190 put_device(buf->dev);
191 kfree(buf);
192 }
193 }
194
195 static void vb2_dma_sg_prepare(void *buf_priv)
196 {
197 struct vb2_dma_sg_buf *buf = buf_priv;
198 struct sg_table *sgt = buf->dma_sgt;
199
200 /* DMABUF exporter will flush the cache for us */
201 if (buf->db_attach)
202 return;
203
204 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
205 buf->dma_dir);
206 }
207
208 static void vb2_dma_sg_finish(void *buf_priv)
209 {
210 struct vb2_dma_sg_buf *buf = buf_priv;
211 struct sg_table *sgt = buf->dma_sgt;
212
213 /* DMABUF exporter will flush the cache for us */
214 if (buf->db_attach)
215 return;
216
217 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
218 }
219
220 static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
221 unsigned long size,
222 enum dma_data_direction dma_dir)
223 {
224 struct vb2_dma_sg_buf *buf;
225 struct sg_table *sgt;
226 struct frame_vector *vec;
227
228 if (WARN_ON(!dev))
229 return ERR_PTR(-EINVAL);
230
231 buf = kzalloc(sizeof *buf, GFP_KERNEL);
232 if (!buf)
233 return ERR_PTR(-ENOMEM);
234
235 buf->vaddr = NULL;
236 buf->dev = dev;
237 buf->dma_dir = dma_dir;
238 buf->offset = vaddr & ~PAGE_MASK;
239 buf->size = size;
240 buf->dma_sgt = &buf->sg_table;
241 vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE);
242 if (IS_ERR(vec))
243 goto userptr_fail_pfnvec;
244 buf->vec = vec;
245
246 buf->pages = frame_vector_pages(vec);
247 if (IS_ERR(buf->pages))
248 goto userptr_fail_sgtable;
249 buf->num_pages = frame_vector_count(vec);
250
251 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
252 buf->num_pages, buf->offset, size, 0))
253 goto userptr_fail_sgtable;
254
255 sgt = &buf->sg_table;
256 /*
257 * No need to sync to the device, this will happen later when the
258 * prepare() memop is called.
259 */
260 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
261 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
262 if (!sgt->nents)
263 goto userptr_fail_map;
264
265 return buf;
266
267 userptr_fail_map:
268 sg_free_table(&buf->sg_table);
269 userptr_fail_sgtable:
270 vb2_destroy_framevec(vec);
271 userptr_fail_pfnvec:
272 kfree(buf);
273 return ERR_PTR(-ENOMEM);
274 }
275
276 /*
277 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
278 * be used
279 */
280 static void vb2_dma_sg_put_userptr(void *buf_priv)
281 {
282 struct vb2_dma_sg_buf *buf = buf_priv;
283 struct sg_table *sgt = &buf->sg_table;
284 int i = buf->num_pages;
285
286 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
287 __func__, buf->num_pages);
288 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
289 DMA_ATTR_SKIP_CPU_SYNC);
290 if (buf->vaddr)
291 vm_unmap_ram(buf->vaddr, buf->num_pages);
292 sg_free_table(buf->dma_sgt);
293 while (--i >= 0) {
294 if (buf->dma_dir == DMA_FROM_DEVICE)
295 set_page_dirty_lock(buf->pages[i]);
296 }
297 vb2_destroy_framevec(buf->vec);
298 kfree(buf);
299 }
300
301 static void *vb2_dma_sg_vaddr(void *buf_priv)
302 {
303 struct vb2_dma_sg_buf *buf = buf_priv;
304
305 BUG_ON(!buf);
306
307 if (!buf->vaddr) {
308 if (buf->db_attach)
309 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
310 else
311 buf->vaddr = vm_map_ram(buf->pages,
312 buf->num_pages, -1, PAGE_KERNEL);
313 }
314
315 /* add offset in case userptr is not page-aligned */
316 return buf->vaddr ? buf->vaddr + buf->offset : NULL;
317 }
318
319 static unsigned int vb2_dma_sg_num_users(void *buf_priv)
320 {
321 struct vb2_dma_sg_buf *buf = buf_priv;
322
323 return atomic_read(&buf->refcount);
324 }
325
326 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
327 {
328 struct vb2_dma_sg_buf *buf = buf_priv;
329 unsigned long uaddr = vma->vm_start;
330 unsigned long usize = vma->vm_end - vma->vm_start;
331 int i = 0;
332
333 if (!buf) {
334 printk(KERN_ERR "No memory to map\n");
335 return -EINVAL;
336 }
337
338 do {
339 int ret;
340
341 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
342 if (ret) {
343 printk(KERN_ERR "Remapping memory, error: %d\n", ret);
344 return ret;
345 }
346
347 uaddr += PAGE_SIZE;
348 usize -= PAGE_SIZE;
349 } while (usize > 0);
350
351
352 /*
353 * Use common vm_area operations to track buffer refcount.
354 */
355 vma->vm_private_data = &buf->handler;
356 vma->vm_ops = &vb2_common_vm_ops;
357
358 vma->vm_ops->open(vma);
359
360 return 0;
361 }
362
363 /*********************************************/
364 /* DMABUF ops for exporters */
365 /*********************************************/
366
367 struct vb2_dma_sg_attachment {
368 struct sg_table sgt;
369 enum dma_data_direction dma_dir;
370 };
371
372 static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
373 struct dma_buf_attachment *dbuf_attach)
374 {
375 struct vb2_dma_sg_attachment *attach;
376 unsigned int i;
377 struct scatterlist *rd, *wr;
378 struct sg_table *sgt;
379 struct vb2_dma_sg_buf *buf = dbuf->priv;
380 int ret;
381
382 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
383 if (!attach)
384 return -ENOMEM;
385
386 sgt = &attach->sgt;
387 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
388 * map the same scatter list to multiple attachments at the same time.
389 */
390 ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
391 if (ret) {
392 kfree(attach);
393 return -ENOMEM;
394 }
395
396 rd = buf->dma_sgt->sgl;
397 wr = sgt->sgl;
398 for (i = 0; i < sgt->orig_nents; ++i) {
399 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
400 rd = sg_next(rd);
401 wr = sg_next(wr);
402 }
403
404 attach->dma_dir = DMA_NONE;
405 dbuf_attach->priv = attach;
406
407 return 0;
408 }
409
410 static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
411 struct dma_buf_attachment *db_attach)
412 {
413 struct vb2_dma_sg_attachment *attach = db_attach->priv;
414 struct sg_table *sgt;
415
416 if (!attach)
417 return;
418
419 sgt = &attach->sgt;
420
421 /* release the scatterlist cache */
422 if (attach->dma_dir != DMA_NONE)
423 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
424 attach->dma_dir);
425 sg_free_table(sgt);
426 kfree(attach);
427 db_attach->priv = NULL;
428 }
429
430 static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
431 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
432 {
433 struct vb2_dma_sg_attachment *attach = db_attach->priv;
434 /* stealing dmabuf mutex to serialize map/unmap operations */
435 struct mutex *lock = &db_attach->dmabuf->lock;
436 struct sg_table *sgt;
437
438 mutex_lock(lock);
439
440 sgt = &attach->sgt;
441 /* return previously mapped sg table */
442 if (attach->dma_dir == dma_dir) {
443 mutex_unlock(lock);
444 return sgt;
445 }
446
447 /* release any previous cache */
448 if (attach->dma_dir != DMA_NONE) {
449 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
450 attach->dma_dir);
451 attach->dma_dir = DMA_NONE;
452 }
453
454 /* mapping to the client with new direction */
455 sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
456 dma_dir);
457 if (!sgt->nents) {
458 pr_err("failed to map scatterlist\n");
459 mutex_unlock(lock);
460 return ERR_PTR(-EIO);
461 }
462
463 attach->dma_dir = dma_dir;
464
465 mutex_unlock(lock);
466
467 return sgt;
468 }
469
470 static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
471 struct sg_table *sgt, enum dma_data_direction dma_dir)
472 {
473 /* nothing to be done here */
474 }
475
476 static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
477 {
478 /* drop reference obtained in vb2_dma_sg_get_dmabuf */
479 vb2_dma_sg_put(dbuf->priv);
480 }
481
482 static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
483 {
484 struct vb2_dma_sg_buf *buf = dbuf->priv;
485
486 return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL;
487 }
488
489 static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
490 {
491 struct vb2_dma_sg_buf *buf = dbuf->priv;
492
493 return vb2_dma_sg_vaddr(buf);
494 }
495
496 static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
497 struct vm_area_struct *vma)
498 {
499 return vb2_dma_sg_mmap(dbuf->priv, vma);
500 }
501
502 static struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
503 .attach = vb2_dma_sg_dmabuf_ops_attach,
504 .detach = vb2_dma_sg_dmabuf_ops_detach,
505 .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
506 .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
507 .kmap = vb2_dma_sg_dmabuf_ops_kmap,
508 .kmap_atomic = vb2_dma_sg_dmabuf_ops_kmap,
509 .vmap = vb2_dma_sg_dmabuf_ops_vmap,
510 .mmap = vb2_dma_sg_dmabuf_ops_mmap,
511 .release = vb2_dma_sg_dmabuf_ops_release,
512 };
513
514 static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags)
515 {
516 struct vb2_dma_sg_buf *buf = buf_priv;
517 struct dma_buf *dbuf;
518 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
519
520 exp_info.ops = &vb2_dma_sg_dmabuf_ops;
521 exp_info.size = buf->size;
522 exp_info.flags = flags;
523 exp_info.priv = buf;
524
525 if (WARN_ON(!buf->dma_sgt))
526 return NULL;
527
528 dbuf = dma_buf_export(&exp_info);
529 if (IS_ERR(dbuf))
530 return NULL;
531
532 /* dmabuf keeps reference to vb2 buffer */
533 atomic_inc(&buf->refcount);
534
535 return dbuf;
536 }
537
538 /*********************************************/
539 /* callbacks for DMABUF buffers */
540 /*********************************************/
541
542 static int vb2_dma_sg_map_dmabuf(void *mem_priv)
543 {
544 struct vb2_dma_sg_buf *buf = mem_priv;
545 struct sg_table *sgt;
546
547 if (WARN_ON(!buf->db_attach)) {
548 pr_err("trying to pin a non attached buffer\n");
549 return -EINVAL;
550 }
551
552 if (WARN_ON(buf->dma_sgt)) {
553 pr_err("dmabuf buffer is already pinned\n");
554 return 0;
555 }
556
557 /* get the associated scatterlist for this buffer */
558 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
559 if (IS_ERR(sgt)) {
560 pr_err("Error getting dmabuf scatterlist\n");
561 return -EINVAL;
562 }
563
564 buf->dma_sgt = sgt;
565 buf->vaddr = NULL;
566
567 return 0;
568 }
569
570 static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
571 {
572 struct vb2_dma_sg_buf *buf = mem_priv;
573 struct sg_table *sgt = buf->dma_sgt;
574
575 if (WARN_ON(!buf->db_attach)) {
576 pr_err("trying to unpin a not attached buffer\n");
577 return;
578 }
579
580 if (WARN_ON(!sgt)) {
581 pr_err("dmabuf buffer is already unpinned\n");
582 return;
583 }
584
585 if (buf->vaddr) {
586 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
587 buf->vaddr = NULL;
588 }
589 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
590
591 buf->dma_sgt = NULL;
592 }
593
594 static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
595 {
596 struct vb2_dma_sg_buf *buf = mem_priv;
597
598 /* if vb2 works correctly you should never detach mapped buffer */
599 if (WARN_ON(buf->dma_sgt))
600 vb2_dma_sg_unmap_dmabuf(buf);
601
602 /* detach this attachment */
603 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
604 kfree(buf);
605 }
606
607 static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
608 unsigned long size, enum dma_data_direction dma_dir)
609 {
610 struct vb2_dma_sg_buf *buf;
611 struct dma_buf_attachment *dba;
612
613 if (WARN_ON(!dev))
614 return ERR_PTR(-EINVAL);
615
616 if (dbuf->size < size)
617 return ERR_PTR(-EFAULT);
618
619 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
620 if (!buf)
621 return ERR_PTR(-ENOMEM);
622
623 buf->dev = dev;
624 /* create attachment for the dmabuf with the user device */
625 dba = dma_buf_attach(dbuf, buf->dev);
626 if (IS_ERR(dba)) {
627 pr_err("failed to attach dmabuf\n");
628 kfree(buf);
629 return dba;
630 }
631
632 buf->dma_dir = dma_dir;
633 buf->size = size;
634 buf->db_attach = dba;
635
636 return buf;
637 }
638
639 static void *vb2_dma_sg_cookie(void *buf_priv)
640 {
641 struct vb2_dma_sg_buf *buf = buf_priv;
642
643 return buf->dma_sgt;
644 }
645
646 const struct vb2_mem_ops vb2_dma_sg_memops = {
647 .alloc = vb2_dma_sg_alloc,
648 .put = vb2_dma_sg_put,
649 .get_userptr = vb2_dma_sg_get_userptr,
650 .put_userptr = vb2_dma_sg_put_userptr,
651 .prepare = vb2_dma_sg_prepare,
652 .finish = vb2_dma_sg_finish,
653 .vaddr = vb2_dma_sg_vaddr,
654 .mmap = vb2_dma_sg_mmap,
655 .num_users = vb2_dma_sg_num_users,
656 .get_dmabuf = vb2_dma_sg_get_dmabuf,
657 .map_dmabuf = vb2_dma_sg_map_dmabuf,
658 .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf,
659 .attach_dmabuf = vb2_dma_sg_attach_dmabuf,
660 .detach_dmabuf = vb2_dma_sg_detach_dmabuf,
661 .cookie = vb2_dma_sg_cookie,
662 };
663 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
664
665 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
666 MODULE_AUTHOR("Andrzej Pietrasiewicz");
667 MODULE_LICENSE("GPL");
This page took 0.046763 seconds and 5 git commands to generate.