[media] v4l2-dv-timings.c: add the new 4K timings to the list
[deliverable/linux.git] / drivers / media / v4l2-core / videobuf2-core.c
CommitLineData
e23ccc0a
PO
1/*
2 * videobuf2-core.c - V4L2 driver helper framework
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
95072084 6 * Author: Pawel Osciak <pawel@osciak.com>
e23ccc0a
PO
7 * Marek Szyprowski <m.szyprowski@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation.
12 */
13
14#include <linux/err.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/mm.h>
18#include <linux/poll.h>
19#include <linux/slab.h>
20#include <linux/sched.h>
21
95213ceb
HV
22#include <media/v4l2-dev.h>
23#include <media/v4l2-fh.h>
24#include <media/v4l2-event.h>
e23ccc0a
PO
25#include <media/videobuf2-core.h>
26
27static int debug;
28module_param(debug, int, 0644);
29
30#define dprintk(level, fmt, arg...) \
31 do { \
32 if (debug >= level) \
33 printk(KERN_DEBUG "vb2: " fmt, ## arg); \
34 } while (0)
35
b5b4541e
HV
36#ifdef CONFIG_VIDEO_ADV_DEBUG
37
38/*
a1d36d8c
HV
39 * If advanced debugging is on, then count how often each op is called
40 * successfully, which can either be per-buffer or per-queue.
b5b4541e 41 *
a1d36d8c 42 * This makes it easy to check that the 'init' and 'cleanup'
b5b4541e
HV
43 * (and variations thereof) stay balanced.
44 */
45
a1d36d8c
HV
46#define log_memop(vb, op) \
47 dprintk(2, "call_memop(%p, %d, %s)%s\n", \
48 (vb)->vb2_queue, (vb)->v4l2_buf.index, #op, \
49 (vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
50
b5b4541e
HV
51#define call_memop(vb, op, args...) \
52({ \
53 struct vb2_queue *_q = (vb)->vb2_queue; \
a1d36d8c
HV
54 int err; \
55 \
56 log_memop(vb, op); \
57 err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \
58 if (!err) \
59 (vb)->cnt_mem_ ## op++; \
60 err; \
61})
62
63#define call_ptr_memop(vb, op, args...) \
64({ \
65 struct vb2_queue *_q = (vb)->vb2_queue; \
66 void *ptr; \
67 \
68 log_memop(vb, op); \
69 ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \
70 if (!IS_ERR_OR_NULL(ptr)) \
71 (vb)->cnt_mem_ ## op++; \
72 ptr; \
73})
74
75#define call_void_memop(vb, op, args...) \
76({ \
77 struct vb2_queue *_q = (vb)->vb2_queue; \
78 \
79 log_memop(vb, op); \
80 if (_q->mem_ops->op) \
81 _q->mem_ops->op(args); \
b5b4541e 82 (vb)->cnt_mem_ ## op++; \
b5b4541e 83})
a1d36d8c
HV
84
85#define log_qop(q, op) \
86 dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \
87 (q)->ops->op ? "" : " (nop)")
b5b4541e
HV
88
89#define call_qop(q, op, args...) \
90({ \
a1d36d8c
HV
91 int err; \
92 \
93 log_qop(q, op); \
94 err = (q)->ops->op ? (q)->ops->op(args) : 0; \
95 if (!err) \
96 (q)->cnt_ ## op++; \
97 err; \
98})
99
100#define call_void_qop(q, op, args...) \
101({ \
102 log_qop(q, op); \
103 if ((q)->ops->op) \
104 (q)->ops->op(args); \
b5b4541e 105 (q)->cnt_ ## op++; \
b5b4541e 106})
a1d36d8c
HV
107
108#define log_vb_qop(vb, op, args...) \
109 dprintk(2, "call_vb_qop(%p, %d, %s)%s\n", \
110 (vb)->vb2_queue, (vb)->v4l2_buf.index, #op, \
111 (vb)->vb2_queue->ops->op ? "" : " (nop)")
b5b4541e
HV
112
113#define call_vb_qop(vb, op, args...) \
114({ \
a1d36d8c
HV
115 int err; \
116 \
117 log_vb_qop(vb, op); \
118 err = (vb)->vb2_queue->ops->op ? \
119 (vb)->vb2_queue->ops->op(args) : 0; \
120 if (!err) \
121 (vb)->cnt_ ## op++; \
122 err; \
123})
124
125#define call_void_vb_qop(vb, op, args...) \
126({ \
127 log_vb_qop(vb, op); \
128 if ((vb)->vb2_queue->ops->op) \
129 (vb)->vb2_queue->ops->op(args); \
b5b4541e 130 (vb)->cnt_ ## op++; \
b5b4541e 131})
b5b4541e
HV
132
133#else
134
135#define call_memop(vb, op, args...) \
a1d36d8c
HV
136 ((vb)->vb2_queue->mem_ops->op ? \
137 (vb)->vb2_queue->mem_ops->op(args) : 0)
138
139#define call_ptr_memop(vb, op, args...) \
140 ((vb)->vb2_queue->mem_ops->op ? \
141 (vb)->vb2_queue->mem_ops->op(args) : NULL)
142
143#define call_void_memop(vb, op, args...) \
144 do { \
145 if ((vb)->vb2_queue->mem_ops->op) \
146 (vb)->vb2_queue->mem_ops->op(args); \
147 } while (0)
e23ccc0a
PO
148
149#define call_qop(q, op, args...) \
b5b4541e 150 ((q)->ops->op ? (q)->ops->op(args) : 0)
a1d36d8c
HV
151
152#define call_void_qop(q, op, args...) \
153 do { \
154 if ((q)->ops->op) \
155 (q)->ops->op(args); \
156 } while (0)
b5b4541e
HV
157
158#define call_vb_qop(vb, op, args...) \
159 ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
a1d36d8c
HV
160
161#define call_void_vb_qop(vb, op, args...) \
162 do { \
163 if ((vb)->vb2_queue->ops->op) \
164 (vb)->vb2_queue->ops->op(args); \
165 } while (0)
b5b4541e
HV
166
167#endif
e23ccc0a 168
f1343281 169/* Flags that are set by the vb2 core */
1b18e7a0 170#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
2d86401c 171 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
1b18e7a0
SA
172 V4L2_BUF_FLAG_PREPARED | \
173 V4L2_BUF_FLAG_TIMESTAMP_MASK)
f1343281
HV
174/* Output buffer flags that should be passed on to the driver */
175#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
176 V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
ea42c8ec 177
fb64dca8
HV
178static void __vb2_queue_cancel(struct vb2_queue *q);
179
e23ccc0a
PO
180/**
181 * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
182 */
c1426bc7 183static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
e23ccc0a
PO
184{
185 struct vb2_queue *q = vb->vb2_queue;
186 void *mem_priv;
187 int plane;
188
7f841459
MCC
189 /*
190 * Allocate memory for all planes in this buffer
191 * NOTE: mmapped areas should be page aligned
192 */
e23ccc0a 193 for (plane = 0; plane < vb->num_planes; ++plane) {
7f841459
MCC
194 unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]);
195
a1d36d8c 196 mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane],
7f841459 197 size, q->gfp_flags);
62a79436 198 if (IS_ERR_OR_NULL(mem_priv))
e23ccc0a
PO
199 goto free;
200
201 /* Associate allocator private data with this plane */
202 vb->planes[plane].mem_priv = mem_priv;
c1426bc7 203 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
e23ccc0a
PO
204 }
205
206 return 0;
207free:
208 /* Free already allocated memory if one of the allocations failed */
a00d0266 209 for (; plane > 0; --plane) {
a1d36d8c 210 call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);
a00d0266
MS
211 vb->planes[plane - 1].mem_priv = NULL;
212 }
e23ccc0a
PO
213
214 return -ENOMEM;
215}
216
217/**
218 * __vb2_buf_mem_free() - free memory of the given buffer
219 */
220static void __vb2_buf_mem_free(struct vb2_buffer *vb)
221{
e23ccc0a
PO
222 unsigned int plane;
223
224 for (plane = 0; plane < vb->num_planes; ++plane) {
a1d36d8c 225 call_void_memop(vb, put, vb->planes[plane].mem_priv);
e23ccc0a 226 vb->planes[plane].mem_priv = NULL;
a00d0266
MS
227 dprintk(3, "Freed plane %d of buffer %d\n", plane,
228 vb->v4l2_buf.index);
e23ccc0a
PO
229 }
230}
231
232/**
233 * __vb2_buf_userptr_put() - release userspace memory associated with
234 * a USERPTR buffer
235 */
236static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
237{
e23ccc0a
PO
238 unsigned int plane;
239
240 for (plane = 0; plane < vb->num_planes; ++plane) {
a00d0266 241 if (vb->planes[plane].mem_priv)
a1d36d8c 242 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
a00d0266 243 vb->planes[plane].mem_priv = NULL;
e23ccc0a
PO
244 }
245}
246
c5384048
SS
247/**
248 * __vb2_plane_dmabuf_put() - release memory associated with
249 * a DMABUF shared plane
250 */
b5b4541e 251static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
c5384048
SS
252{
253 if (!p->mem_priv)
254 return;
255
256 if (p->dbuf_mapped)
a1d36d8c 257 call_void_memop(vb, unmap_dmabuf, p->mem_priv);
c5384048 258
a1d36d8c 259 call_void_memop(vb, detach_dmabuf, p->mem_priv);
c5384048
SS
260 dma_buf_put(p->dbuf);
261 memset(p, 0, sizeof(*p));
262}
263
264/**
265 * __vb2_buf_dmabuf_put() - release memory associated with
266 * a DMABUF shared buffer
267 */
268static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
269{
c5384048
SS
270 unsigned int plane;
271
272 for (plane = 0; plane < vb->num_planes; ++plane)
b5b4541e 273 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
c5384048
SS
274}
275
a5e3d743
HV
276/**
277 * __setup_lengths() - setup initial lengths for every plane in
278 * every buffer on the queue
279 */
280static void __setup_lengths(struct vb2_queue *q, unsigned int n)
281{
282 unsigned int buffer, plane;
283 struct vb2_buffer *vb;
284
285 for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
286 vb = q->bufs[buffer];
287 if (!vb)
288 continue;
289
290 for (plane = 0; plane < vb->num_planes; ++plane)
291 vb->v4l2_planes[plane].length = q->plane_sizes[plane];
292 }
293}
294
e23ccc0a
PO
295/**
296 * __setup_offsets() - setup unique offsets ("cookies") for every plane in
297 * every buffer on the queue
298 */
2d86401c 299static void __setup_offsets(struct vb2_queue *q, unsigned int n)
e23ccc0a
PO
300{
301 unsigned int buffer, plane;
302 struct vb2_buffer *vb;
2d86401c 303 unsigned long off;
e23ccc0a 304
2d86401c
GL
305 if (q->num_buffers) {
306 struct v4l2_plane *p;
307 vb = q->bufs[q->num_buffers - 1];
308 p = &vb->v4l2_planes[vb->num_planes - 1];
309 off = PAGE_ALIGN(p->m.mem_offset + p->length);
310 } else {
311 off = 0;
312 }
313
314 for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
e23ccc0a
PO
315 vb = q->bufs[buffer];
316 if (!vb)
317 continue;
318
319 for (plane = 0; plane < vb->num_planes; ++plane) {
320 vb->v4l2_planes[plane].m.mem_offset = off;
321
322 dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
323 buffer, plane, off);
324
325 off += vb->v4l2_planes[plane].length;
326 off = PAGE_ALIGN(off);
327 }
328 }
329}
330
331/**
332 * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
333 * video buffer memory for all buffers/planes on the queue and initializes the
334 * queue
335 *
336 * Returns the number of buffers successfully allocated.
337 */
338static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
c1426bc7 339 unsigned int num_buffers, unsigned int num_planes)
e23ccc0a
PO
340{
341 unsigned int buffer;
342 struct vb2_buffer *vb;
343 int ret;
344
345 for (buffer = 0; buffer < num_buffers; ++buffer) {
346 /* Allocate videobuf buffer structures */
347 vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
348 if (!vb) {
349 dprintk(1, "Memory alloc for buffer struct failed\n");
350 break;
351 }
352
353 /* Length stores number of planes for multiplanar buffers */
354 if (V4L2_TYPE_IS_MULTIPLANAR(q->type))
355 vb->v4l2_buf.length = num_planes;
356
357 vb->state = VB2_BUF_STATE_DEQUEUED;
358 vb->vb2_queue = q;
359 vb->num_planes = num_planes;
2d86401c 360 vb->v4l2_buf.index = q->num_buffers + buffer;
e23ccc0a
PO
361 vb->v4l2_buf.type = q->type;
362 vb->v4l2_buf.memory = memory;
363
364 /* Allocate video buffer memory for the MMAP type */
365 if (memory == V4L2_MEMORY_MMAP) {
c1426bc7 366 ret = __vb2_buf_mem_alloc(vb);
e23ccc0a
PO
367 if (ret) {
368 dprintk(1, "Failed allocating memory for "
369 "buffer %d\n", buffer);
370 kfree(vb);
371 break;
372 }
373 /*
374 * Call the driver-provided buffer initialization
375 * callback, if given. An error in initialization
376 * results in queue setup failure.
377 */
b5b4541e 378 ret = call_vb_qop(vb, buf_init, vb);
e23ccc0a
PO
379 if (ret) {
380 dprintk(1, "Buffer %d %p initialization"
381 " failed\n", buffer, vb);
382 __vb2_buf_mem_free(vb);
383 kfree(vb);
384 break;
385 }
386 }
387
2d86401c 388 q->bufs[q->num_buffers + buffer] = vb;
e23ccc0a
PO
389 }
390
a5e3d743 391 __setup_lengths(q, buffer);
dc77523c
PZ
392 if (memory == V4L2_MEMORY_MMAP)
393 __setup_offsets(q, buffer);
e23ccc0a
PO
394
395 dprintk(1, "Allocated %d buffers, %d plane(s) each\n",
2d86401c 396 buffer, num_planes);
e23ccc0a
PO
397
398 return buffer;
399}
400
401/**
402 * __vb2_free_mem() - release all video buffer memory for a given queue
403 */
2d86401c 404static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
e23ccc0a
PO
405{
406 unsigned int buffer;
407 struct vb2_buffer *vb;
408
2d86401c
GL
409 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
410 ++buffer) {
e23ccc0a
PO
411 vb = q->bufs[buffer];
412 if (!vb)
413 continue;
414
415 /* Free MMAP buffers or release USERPTR buffers */
416 if (q->memory == V4L2_MEMORY_MMAP)
417 __vb2_buf_mem_free(vb);
c5384048
SS
418 else if (q->memory == V4L2_MEMORY_DMABUF)
419 __vb2_buf_dmabuf_put(vb);
e23ccc0a
PO
420 else
421 __vb2_buf_userptr_put(vb);
422 }
423}
424
425/**
2d86401c
GL
426 * __vb2_queue_free() - free buffers at the end of the queue - video memory and
427 * related information, if no buffers are left return the queue to an
428 * uninitialized state. Might be called even if the queue has already been freed.
e23ccc0a 429 */
63faabfd 430static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
e23ccc0a
PO
431{
432 unsigned int buffer;
433
63faabfd
HV
434 /*
435 * Sanity check: when preparing a buffer the queue lock is released for
436 * a short while (see __buf_prepare for the details), which would allow
437 * a race with a reqbufs which can call this function. Removing the
438 * buffers from underneath __buf_prepare is obviously a bad idea, so we
439 * check if any of the buffers is in the state PREPARING, and if so we
440 * just return -EAGAIN.
441 */
442 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
443 ++buffer) {
444 if (q->bufs[buffer] == NULL)
445 continue;
446 if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
447 dprintk(1, "reqbufs: preparing buffers, cannot free\n");
448 return -EAGAIN;
449 }
450 }
451
e23ccc0a 452 /* Call driver-provided cleanup function for each buffer, if provided */
b5b4541e
HV
453 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
454 ++buffer) {
256f3162
HV
455 struct vb2_buffer *vb = q->bufs[buffer];
456
457 if (vb && vb->planes[0].mem_priv)
a1d36d8c 458 call_void_vb_qop(vb, buf_cleanup, vb);
e23ccc0a
PO
459 }
460
461 /* Release video buffer memory */
2d86401c 462 __vb2_free_mem(q, buffers);
e23ccc0a 463
b5b4541e
HV
464#ifdef CONFIG_VIDEO_ADV_DEBUG
465 /*
466 * Check that all the calls were balances during the life-time of this
467 * queue. If not (or if the debug level is 1 or up), then dump the
468 * counters to the kernel log.
469 */
470 if (q->num_buffers) {
471 bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
472 q->cnt_wait_prepare != q->cnt_wait_finish;
473
474 if (unbalanced || debug) {
475 pr_info("vb2: counters for queue %p:%s\n", q,
476 unbalanced ? " UNBALANCED!" : "");
477 pr_info("vb2: setup: %u start_streaming: %u stop_streaming: %u\n",
478 q->cnt_queue_setup, q->cnt_start_streaming,
479 q->cnt_stop_streaming);
480 pr_info("vb2: wait_prepare: %u wait_finish: %u\n",
481 q->cnt_wait_prepare, q->cnt_wait_finish);
482 }
483 q->cnt_queue_setup = 0;
484 q->cnt_wait_prepare = 0;
485 q->cnt_wait_finish = 0;
486 q->cnt_start_streaming = 0;
487 q->cnt_stop_streaming = 0;
488 }
489 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
490 struct vb2_buffer *vb = q->bufs[buffer];
491 bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
492 vb->cnt_mem_prepare != vb->cnt_mem_finish ||
493 vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
494 vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
495 vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
496 vb->cnt_buf_queue != vb->cnt_buf_done ||
497 vb->cnt_buf_prepare != vb->cnt_buf_finish ||
498 vb->cnt_buf_init != vb->cnt_buf_cleanup;
499
500 if (unbalanced || debug) {
501 pr_info("vb2: counters for queue %p, buffer %d:%s\n",
502 q, buffer, unbalanced ? " UNBALANCED!" : "");
503 pr_info("vb2: buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
504 vb->cnt_buf_init, vb->cnt_buf_cleanup,
505 vb->cnt_buf_prepare, vb->cnt_buf_finish);
506 pr_info("vb2: buf_queue: %u buf_done: %u\n",
507 vb->cnt_buf_queue, vb->cnt_buf_done);
508 pr_info("vb2: alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
509 vb->cnt_mem_alloc, vb->cnt_mem_put,
510 vb->cnt_mem_prepare, vb->cnt_mem_finish,
511 vb->cnt_mem_mmap);
512 pr_info("vb2: get_userptr: %u put_userptr: %u\n",
513 vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
514 pr_info("vb2: attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
515 vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
516 vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
517 pr_info("vb2: get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
518 vb->cnt_mem_get_dmabuf,
519 vb->cnt_mem_num_users,
520 vb->cnt_mem_vaddr,
521 vb->cnt_mem_cookie);
522 }
523 }
524#endif
525
e23ccc0a 526 /* Free videobuf buffers */
2d86401c
GL
527 for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
528 ++buffer) {
e23ccc0a
PO
529 kfree(q->bufs[buffer]);
530 q->bufs[buffer] = NULL;
531 }
532
2d86401c 533 q->num_buffers -= buffers;
a7afcacc 534 if (!q->num_buffers) {
2d86401c 535 q->memory = 0;
a7afcacc
HV
536 INIT_LIST_HEAD(&q->queued_list);
537 }
63faabfd 538 return 0;
e23ccc0a
PO
539}
540
541/**
542 * __verify_planes_array() - verify that the planes array passed in struct
543 * v4l2_buffer from userspace can be safely used
544 */
2d86401c 545static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
e23ccc0a 546{
32a77260
HV
547 if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
548 return 0;
549
e23ccc0a
PO
550 /* Is memory for copying plane information present? */
551 if (NULL == b->m.planes) {
552 dprintk(1, "Multi-planar buffer passed but "
553 "planes array not provided\n");
554 return -EINVAL;
555 }
556
557 if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) {
558 dprintk(1, "Incorrect planes array length, "
559 "expected %d, got %d\n", vb->num_planes, b->length);
560 return -EINVAL;
561 }
562
563 return 0;
564}
565
8023ed09
LP
566/**
567 * __verify_length() - Verify that the bytesused value for each plane fits in
568 * the plane length and that the data offset doesn't exceed the bytesused value.
569 */
570static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
571{
572 unsigned int length;
573 unsigned int plane;
574
575 if (!V4L2_TYPE_IS_OUTPUT(b->type))
576 return 0;
577
578 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
579 for (plane = 0; plane < vb->num_planes; ++plane) {
580 length = (b->memory == V4L2_MEMORY_USERPTR)
581 ? b->m.planes[plane].length
582 : vb->v4l2_planes[plane].length;
583
584 if (b->m.planes[plane].bytesused > length)
585 return -EINVAL;
3c5c23c5
SN
586
587 if (b->m.planes[plane].data_offset > 0 &&
588 b->m.planes[plane].data_offset >=
8023ed09
LP
589 b->m.planes[plane].bytesused)
590 return -EINVAL;
591 }
592 } else {
593 length = (b->memory == V4L2_MEMORY_USERPTR)
594 ? b->length : vb->v4l2_planes[0].length;
595
596 if (b->bytesused > length)
597 return -EINVAL;
598 }
599
600 return 0;
601}
602
25a27d91
MS
603/**
604 * __buffer_in_use() - return true if the buffer is in use and
605 * the queue cannot be freed (by the means of REQBUFS(0)) call
606 */
607static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
608{
609 unsigned int plane;
610 for (plane = 0; plane < vb->num_planes; ++plane) {
2c2dd6ac 611 void *mem_priv = vb->planes[plane].mem_priv;
25a27d91
MS
612 /*
613 * If num_users() has not been provided, call_memop
614 * will return 0, apparently nobody cares about this
615 * case anyway. If num_users() returns more than 1,
616 * we are not the only user of the plane's memory.
617 */
b5b4541e 618 if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
25a27d91
MS
619 return true;
620 }
621 return false;
622}
623
624/**
625 * __buffers_in_use() - return true if any buffers on the queue are in use and
626 * the queue cannot be freed (by the means of REQBUFS(0)) call
627 */
628static bool __buffers_in_use(struct vb2_queue *q)
629{
630 unsigned int buffer;
631 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
632 if (__buffer_in_use(q, q->bufs[buffer]))
633 return true;
634 }
635 return false;
636}
637
e23ccc0a
PO
638/**
639 * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
640 * returned to userspace
641 */
32a77260 642static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
e23ccc0a
PO
643{
644 struct vb2_queue *q = vb->vb2_queue;
e23ccc0a 645
2b719d7b 646 /* Copy back data such as timestamp, flags, etc. */
e23ccc0a 647 memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
2b719d7b 648 b->reserved2 = vb->v4l2_buf.reserved2;
e23ccc0a
PO
649 b->reserved = vb->v4l2_buf.reserved;
650
651 if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
e23ccc0a
PO
652 /*
653 * Fill in plane-related data if userspace provided an array
32a77260 654 * for it. The caller has already verified memory and size.
e23ccc0a 655 */
3c0b6061 656 b->length = vb->num_planes;
e23ccc0a
PO
657 memcpy(b->m.planes, vb->v4l2_planes,
658 b->length * sizeof(struct v4l2_plane));
659 } else {
660 /*
661 * We use length and offset in v4l2_planes array even for
662 * single-planar buffers, but userspace does not.
663 */
664 b->length = vb->v4l2_planes[0].length;
665 b->bytesused = vb->v4l2_planes[0].bytesused;
666 if (q->memory == V4L2_MEMORY_MMAP)
667 b->m.offset = vb->v4l2_planes[0].m.mem_offset;
668 else if (q->memory == V4L2_MEMORY_USERPTR)
669 b->m.userptr = vb->v4l2_planes[0].m.userptr;
c5384048
SS
670 else if (q->memory == V4L2_MEMORY_DMABUF)
671 b->m.fd = vb->v4l2_planes[0].m.fd;
e23ccc0a
PO
672 }
673
ea42c8ec
MS
674 /*
675 * Clear any buffer state related flags.
676 */
1b18e7a0 677 b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
7ce6fd8f
SA
678 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
679 if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
680 V4L2_BUF_FLAG_TIMESTAMP_COPY) {
681 /*
682 * For non-COPY timestamps, drop timestamp source bits
683 * and obtain the timestamp source from the queue.
684 */
685 b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
686 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
687 }
e23ccc0a
PO
688
689 switch (vb->state) {
690 case VB2_BUF_STATE_QUEUED:
691 case VB2_BUF_STATE_ACTIVE:
692 b->flags |= V4L2_BUF_FLAG_QUEUED;
693 break;
694 case VB2_BUF_STATE_ERROR:
695 b->flags |= V4L2_BUF_FLAG_ERROR;
696 /* fall through */
697 case VB2_BUF_STATE_DONE:
698 b->flags |= V4L2_BUF_FLAG_DONE;
699 break;
ebc087d0 700 case VB2_BUF_STATE_PREPARED:
2d86401c
GL
701 b->flags |= V4L2_BUF_FLAG_PREPARED;
702 break;
b18a8ff2 703 case VB2_BUF_STATE_PREPARING:
2d86401c 704 case VB2_BUF_STATE_DEQUEUED:
e23ccc0a
PO
705 /* nothing */
706 break;
707 }
708
25a27d91 709 if (__buffer_in_use(q, vb))
e23ccc0a 710 b->flags |= V4L2_BUF_FLAG_MAPPED;
e23ccc0a
PO
711}
712
713/**
714 * vb2_querybuf() - query video buffer information
715 * @q: videobuf queue
716 * @b: buffer struct passed from userspace to vidioc_querybuf handler
717 * in driver
718 *
719 * Should be called from vidioc_querybuf ioctl handler in driver.
720 * This function will verify the passed v4l2_buffer structure and fill the
721 * relevant information for the userspace.
722 *
723 * The return values from this function are intended to be directly returned
724 * from vidioc_querybuf handler in driver.
725 */
726int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
727{
728 struct vb2_buffer *vb;
32a77260 729 int ret;
e23ccc0a
PO
730
731 if (b->type != q->type) {
732 dprintk(1, "querybuf: wrong buffer type\n");
733 return -EINVAL;
734 }
735
736 if (b->index >= q->num_buffers) {
737 dprintk(1, "querybuf: buffer index out of range\n");
738 return -EINVAL;
739 }
740 vb = q->bufs[b->index];
32a77260
HV
741 ret = __verify_planes_array(vb, b);
742 if (!ret)
743 __fill_v4l2_buffer(vb, b);
744 return ret;
e23ccc0a
PO
745}
746EXPORT_SYMBOL(vb2_querybuf);
747
748/**
749 * __verify_userptr_ops() - verify that all memory operations required for
750 * USERPTR queue type have been provided
751 */
752static int __verify_userptr_ops(struct vb2_queue *q)
753{
754 if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
755 !q->mem_ops->put_userptr)
756 return -EINVAL;
757
758 return 0;
759}
760
761/**
762 * __verify_mmap_ops() - verify that all memory operations required for
763 * MMAP queue type have been provided
764 */
765static int __verify_mmap_ops(struct vb2_queue *q)
766{
767 if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
768 !q->mem_ops->put || !q->mem_ops->mmap)
769 return -EINVAL;
770
771 return 0;
772}
773
c5384048
SS
774/**
775 * __verify_dmabuf_ops() - verify that all memory operations required for
776 * DMABUF queue type have been provided
777 */
778static int __verify_dmabuf_ops(struct vb2_queue *q)
779{
780 if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
781 !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
782 !q->mem_ops->unmap_dmabuf)
783 return -EINVAL;
784
785 return 0;
786}
787
e23ccc0a 788/**
37d9ed94
HV
789 * __verify_memory_type() - Check whether the memory type and buffer type
790 * passed to a buffer operation are compatible with the queue.
791 */
792static int __verify_memory_type(struct vb2_queue *q,
793 enum v4l2_memory memory, enum v4l2_buf_type type)
794{
c5384048
SS
795 if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR &&
796 memory != V4L2_MEMORY_DMABUF) {
37d9ed94
HV
797 dprintk(1, "reqbufs: unsupported memory type\n");
798 return -EINVAL;
799 }
800
801 if (type != q->type) {
802 dprintk(1, "reqbufs: requested type is incorrect\n");
803 return -EINVAL;
804 }
805
806 /*
807 * Make sure all the required memory ops for given memory type
808 * are available.
809 */
810 if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
811 dprintk(1, "reqbufs: MMAP for current setup unsupported\n");
812 return -EINVAL;
813 }
814
815 if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
816 dprintk(1, "reqbufs: USERPTR for current setup unsupported\n");
817 return -EINVAL;
818 }
819
c5384048
SS
820 if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
821 dprintk(1, "reqbufs: DMABUF for current setup unsupported\n");
822 return -EINVAL;
823 }
824
37d9ed94
HV
825 /*
826 * Place the busy tests at the end: -EBUSY can be ignored when
827 * create_bufs is called with count == 0, but count == 0 should still
828 * do the memory and type validation.
829 */
830 if (q->fileio) {
831 dprintk(1, "reqbufs: file io in progress\n");
832 return -EBUSY;
833 }
834 return 0;
835}
836
837/**
838 * __reqbufs() - Initiate streaming
e23ccc0a
PO
839 * @q: videobuf2 queue
840 * @req: struct passed from userspace to vidioc_reqbufs handler in driver
841 *
842 * Should be called from vidioc_reqbufs ioctl handler of a driver.
843 * This function:
844 * 1) verifies streaming parameters passed from the userspace,
845 * 2) sets up the queue,
846 * 3) negotiates number of buffers and planes per buffer with the driver
847 * to be used during streaming,
848 * 4) allocates internal buffer structures (struct vb2_buffer), according to
849 * the agreed parameters,
850 * 5) for MMAP memory type, allocates actual video memory, using the
851 * memory handling/allocation routines provided during queue initialization
852 *
853 * If req->count is 0, all the memory will be freed instead.
854 * If the queue has been allocated previously (by a previous vb2_reqbufs) call
855 * and the queue is not busy, memory will be reallocated.
856 *
857 * The return values from this function are intended to be directly returned
858 * from vidioc_reqbufs handler in driver.
859 */
37d9ed94 860static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
e23ccc0a 861{
2d86401c 862 unsigned int num_buffers, allocated_buffers, num_planes = 0;
37d9ed94 863 int ret;
e23ccc0a
PO
864
865 if (q->streaming) {
866 dprintk(1, "reqbufs: streaming active\n");
867 return -EBUSY;
868 }
869
29e3fbd8 870 if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
e23ccc0a
PO
871 /*
872 * We already have buffers allocated, so first check if they
873 * are not in use and can be freed.
874 */
875 if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
876 dprintk(1, "reqbufs: memory in use, cannot free\n");
877 return -EBUSY;
878 }
879
fb64dca8
HV
880 /*
881 * Call queue_cancel to clean up any buffers in the PREPARED or
882 * QUEUED state which is possible if buffers were prepared or
883 * queued without ever calling STREAMON.
884 */
885 __vb2_queue_cancel(q);
63faabfd
HV
886 ret = __vb2_queue_free(q, q->num_buffers);
887 if (ret)
888 return ret;
29e3fbd8
MS
889
890 /*
891 * In case of REQBUFS(0) return immediately without calling
892 * driver's queue_setup() callback and allocating resources.
893 */
894 if (req->count == 0)
895 return 0;
e23ccc0a
PO
896 }
897
898 /*
899 * Make sure the requested values and current defaults are sane.
900 */
901 num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
b3379c62 902 num_buffers = max_t(unsigned int, req->count, q->min_buffers_needed);
c1426bc7 903 memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
e23ccc0a 904 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
13b14095 905 q->memory = req->memory;
e23ccc0a
PO
906
907 /*
908 * Ask the driver how many buffers and planes per buffer it requires.
909 * Driver also sets the size and allocator context for each plane.
910 */
fc714e70 911 ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes,
c1426bc7 912 q->plane_sizes, q->alloc_ctx);
a1d36d8c 913 if (ret)
e23ccc0a
PO
914 return ret;
915
916 /* Finally, allocate buffers and video memory */
a7afcacc
HV
917 allocated_buffers = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
918 if (allocated_buffers == 0) {
66072d4f
MS
919 dprintk(1, "Memory allocation failed\n");
920 return -ENOMEM;
e23ccc0a
PO
921 }
922
b3379c62
HV
923 /*
924 * There is no point in continuing if we can't allocate the minimum
925 * number of buffers needed by this vb2_queue.
926 */
927 if (allocated_buffers < q->min_buffers_needed)
928 ret = -ENOMEM;
929
e23ccc0a
PO
930 /*
931 * Check if driver can handle the allocated number of buffers.
932 */
b3379c62 933 if (!ret && allocated_buffers < num_buffers) {
2d86401c 934 num_buffers = allocated_buffers;
e23ccc0a 935
fc714e70
GL
936 ret = call_qop(q, queue_setup, q, NULL, &num_buffers,
937 &num_planes, q->plane_sizes, q->alloc_ctx);
e23ccc0a 938
2d86401c 939 if (!ret && allocated_buffers < num_buffers)
e23ccc0a 940 ret = -ENOMEM;
e23ccc0a
PO
941
942 /*
2d86401c
GL
943 * Either the driver has accepted a smaller number of buffers,
944 * or .queue_setup() returned an error
e23ccc0a 945 */
2d86401c
GL
946 }
947
948 q->num_buffers = allocated_buffers;
949
950 if (ret < 0) {
a7afcacc
HV
951 /*
952 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
953 * from q->num_buffers.
954 */
2d86401c
GL
955 __vb2_queue_free(q, allocated_buffers);
956 return ret;
e23ccc0a
PO
957 }
958
e23ccc0a
PO
959 /*
960 * Return the number of successfully allocated buffers
961 * to the userspace.
962 */
2d86401c 963 req->count = allocated_buffers;
e23ccc0a
PO
964
965 return 0;
e23ccc0a 966}
37d9ed94
HV
967
968/**
969 * vb2_reqbufs() - Wrapper for __reqbufs() that also verifies the memory and
970 * type values.
971 * @q: videobuf2 queue
972 * @req: struct passed from userspace to vidioc_reqbufs handler in driver
973 */
974int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
975{
976 int ret = __verify_memory_type(q, req->memory, req->type);
977
978 return ret ? ret : __reqbufs(q, req);
979}
e23ccc0a
PO
980EXPORT_SYMBOL_GPL(vb2_reqbufs);
981
2d86401c 982/**
37d9ed94 983 * __create_bufs() - Allocate buffers and any required auxiliary structs
2d86401c
GL
984 * @q: videobuf2 queue
985 * @create: creation parameters, passed from userspace to vidioc_create_bufs
986 * handler in driver
987 *
988 * Should be called from vidioc_create_bufs ioctl handler of a driver.
989 * This function:
990 * 1) verifies parameter sanity
991 * 2) calls the .queue_setup() queue operation
992 * 3) performs any necessary memory allocations
993 *
994 * The return values from this function are intended to be directly returned
995 * from vidioc_create_bufs handler in driver.
996 */
37d9ed94 997static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
2d86401c
GL
998{
999 unsigned int num_planes = 0, num_buffers, allocated_buffers;
37d9ed94 1000 int ret;
2d86401c
GL
1001
1002 if (q->num_buffers == VIDEO_MAX_FRAME) {
1003 dprintk(1, "%s(): maximum number of buffers already allocated\n",
1004 __func__);
1005 return -ENOBUFS;
1006 }
1007
2d86401c
GL
1008 if (!q->num_buffers) {
1009 memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
1010 memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
1011 q->memory = create->memory;
1012 }
1013
1014 num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
1015
1016 /*
1017 * Ask the driver, whether the requested number of buffers, planes per
1018 * buffer and their sizes are acceptable
1019 */
1020 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
1021 &num_planes, q->plane_sizes, q->alloc_ctx);
a1d36d8c 1022 if (ret)
2d86401c
GL
1023 return ret;
1024
1025 /* Finally, allocate buffers and video memory */
a7afcacc 1026 allocated_buffers = __vb2_queue_alloc(q, create->memory, num_buffers,
2d86401c 1027 num_planes);
a7afcacc 1028 if (allocated_buffers == 0) {
f05393d2
HV
1029 dprintk(1, "Memory allocation failed\n");
1030 return -ENOMEM;
2d86401c
GL
1031 }
1032
2d86401c
GL
1033 /*
1034 * Check if driver can handle the so far allocated number of buffers.
1035 */
a7afcacc
HV
1036 if (allocated_buffers < num_buffers) {
1037 num_buffers = allocated_buffers;
2d86401c
GL
1038
1039 /*
1040 * q->num_buffers contains the total number of buffers, that the
1041 * queue driver has set up
1042 */
1043 ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
1044 &num_planes, q->plane_sizes, q->alloc_ctx);
1045
1046 if (!ret && allocated_buffers < num_buffers)
1047 ret = -ENOMEM;
1048
1049 /*
1050 * Either the driver has accepted a smaller number of buffers,
1051 * or .queue_setup() returned an error
1052 */
1053 }
1054
1055 q->num_buffers += allocated_buffers;
1056
1057 if (ret < 0) {
a7afcacc
HV
1058 /*
1059 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
1060 * from q->num_buffers.
1061 */
2d86401c 1062 __vb2_queue_free(q, allocated_buffers);
f05393d2 1063 return -ENOMEM;
2d86401c
GL
1064 }
1065
1066 /*
1067 * Return the number of successfully allocated buffers
1068 * to the userspace.
1069 */
1070 create->count = allocated_buffers;
1071
1072 return 0;
1073}
37d9ed94
HV
1074
1075/**
53aa3b19
NT
1076 * vb2_create_bufs() - Wrapper for __create_bufs() that also verifies the
1077 * memory and type values.
37d9ed94
HV
1078 * @q: videobuf2 queue
1079 * @create: creation parameters, passed from userspace to vidioc_create_bufs
1080 * handler in driver
1081 */
1082int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
1083{
1084 int ret = __verify_memory_type(q, create->memory, create->format.type);
1085
1086 create->index = q->num_buffers;
f05393d2
HV
1087 if (create->count == 0)
1088 return ret != -EBUSY ? ret : 0;
37d9ed94
HV
1089 return ret ? ret : __create_bufs(q, create);
1090}
2d86401c
GL
1091EXPORT_SYMBOL_GPL(vb2_create_bufs);
1092
e23ccc0a
PO
1093/**
1094 * vb2_plane_vaddr() - Return a kernel virtual address of a given plane
1095 * @vb: vb2_buffer to which the plane in question belongs to
1096 * @plane_no: plane number for which the address is to be returned
1097 *
1098 * This function returns a kernel virtual address of a given plane if
1099 * such a mapping exist, NULL otherwise.
1100 */
1101void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
1102{
a00d0266 1103 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
e23ccc0a
PO
1104 return NULL;
1105
a1d36d8c 1106 return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
e23ccc0a
PO
1107
1108}
1109EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
1110
1111/**
1112 * vb2_plane_cookie() - Return allocator specific cookie for the given plane
1113 * @vb: vb2_buffer to which the plane in question belongs to
1114 * @plane_no: plane number for which the cookie is to be returned
1115 *
1116 * This function returns an allocator specific cookie for a given plane if
1117 * available, NULL otherwise. The allocator should provide some simple static
1118 * inline function, which would convert this cookie to the allocator specific
1119 * type that can be used directly by the driver to access the buffer. This can
1120 * be for example physical address, pointer to scatter list or IOMMU mapping.
1121 */
1122void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
1123{
a00d0266 1124 if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
e23ccc0a
PO
1125 return NULL;
1126
a1d36d8c 1127 return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
e23ccc0a
PO
1128}
1129EXPORT_SYMBOL_GPL(vb2_plane_cookie);
1130
1131/**
1132 * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
1133 * @vb: vb2_buffer returned from the driver
1134 * @state: either VB2_BUF_STATE_DONE if the operation finished successfully
b3379c62
HV
1135 * or VB2_BUF_STATE_ERROR if the operation finished with an error.
1136 * If start_streaming fails then it should return buffers with state
1137 * VB2_BUF_STATE_QUEUED to put them back into the queue.
e23ccc0a
PO
1138 *
1139 * This function should be called by the driver after a hardware operation on
1140 * a buffer is finished and the buffer may be returned to userspace. The driver
1141 * cannot use this buffer anymore until it is queued back to it by videobuf
1142 * by the means of buf_queue callback. Only buffers previously queued to the
1143 * driver by buf_queue can be passed to this function.
b3379c62
HV
1144 *
1145 * While streaming a buffer can only be returned in state DONE or ERROR.
1146 * The start_streaming op can also return them in case the DMA engine cannot
1147 * be started for some reason. In that case the buffers should be returned with
1148 * state QUEUED.
e23ccc0a
PO
1149 */
1150void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
1151{
1152 struct vb2_queue *q = vb->vb2_queue;
1153 unsigned long flags;
3e0c2f20 1154 unsigned int plane;
e23ccc0a 1155
b3379c62 1156 if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
e23ccc0a
PO
1157 return;
1158
b3379c62
HV
1159 if (!q->start_streaming_called) {
1160 if (WARN_ON(state != VB2_BUF_STATE_QUEUED))
1161 state = VB2_BUF_STATE_QUEUED;
1162 } else if (!WARN_ON(!q->start_streaming_called)) {
1163 if (WARN_ON(state != VB2_BUF_STATE_DONE &&
1164 state != VB2_BUF_STATE_ERROR))
1165 state = VB2_BUF_STATE_ERROR;
1166 }
e23ccc0a 1167
b5b4541e
HV
1168#ifdef CONFIG_VIDEO_ADV_DEBUG
1169 /*
1170 * Although this is not a callback, it still does have to balance
1171 * with the buf_queue op. So update this counter manually.
1172 */
1173 vb->cnt_buf_done++;
1174#endif
e23ccc0a 1175 dprintk(4, "Done processing on buffer %d, state: %d\n",
9b6f5dc0 1176 vb->v4l2_buf.index, state);
e23ccc0a 1177
3e0c2f20
MS
1178 /* sync buffers */
1179 for (plane = 0; plane < vb->num_planes; ++plane)
a1d36d8c 1180 call_void_memop(vb, finish, vb->planes[plane].mem_priv);
3e0c2f20 1181
e23ccc0a
PO
1182 /* Add the buffer to the done buffers list */
1183 spin_lock_irqsave(&q->done_lock, flags);
1184 vb->state = state;
b3379c62
HV
1185 if (state != VB2_BUF_STATE_QUEUED)
1186 list_add_tail(&vb->done_entry, &q->done_list);
6ea3b980 1187 atomic_dec(&q->owned_by_drv_count);
e23ccc0a
PO
1188 spin_unlock_irqrestore(&q->done_lock, flags);
1189
b3379c62
HV
1190 if (state == VB2_BUF_STATE_QUEUED)
1191 return;
1192
e23ccc0a
PO
1193 /* Inform any processes that may be waiting for buffers */
1194 wake_up(&q->done_wq);
1195}
1196EXPORT_SYMBOL_GPL(vb2_buffer_done);
1197
1198/**
32a77260
HV
1199 * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
1200 * v4l2_buffer by the userspace. The caller has already verified that struct
1201 * v4l2_buffer has a valid number of planes.
e23ccc0a 1202 */
32a77260 1203static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
e23ccc0a
PO
1204 struct v4l2_plane *v4l2_planes)
1205{
1206 unsigned int plane;
e23ccc0a
PO
1207
1208 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
e23ccc0a
PO
1209 /* Fill in driver-provided information for OUTPUT types */
1210 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
1211 /*
1212 * Will have to go up to b->length when API starts
1213 * accepting variable number of planes.
1214 */
1215 for (plane = 0; plane < vb->num_planes; ++plane) {
1216 v4l2_planes[plane].bytesused =
1217 b->m.planes[plane].bytesused;
1218 v4l2_planes[plane].data_offset =
1219 b->m.planes[plane].data_offset;
1220 }
1221 }
1222
1223 if (b->memory == V4L2_MEMORY_USERPTR) {
1224 for (plane = 0; plane < vb->num_planes; ++plane) {
1225 v4l2_planes[plane].m.userptr =
1226 b->m.planes[plane].m.userptr;
1227 v4l2_planes[plane].length =
1228 b->m.planes[plane].length;
1229 }
1230 }
c5384048
SS
1231 if (b->memory == V4L2_MEMORY_DMABUF) {
1232 for (plane = 0; plane < vb->num_planes; ++plane) {
1233 v4l2_planes[plane].m.fd =
1234 b->m.planes[plane].m.fd;
1235 v4l2_planes[plane].length =
1236 b->m.planes[plane].length;
1237 v4l2_planes[plane].data_offset =
1238 b->m.planes[plane].data_offset;
1239 }
1240 }
e23ccc0a
PO
1241 } else {
1242 /*
1243 * Single-planar buffers do not use planes array,
1244 * so fill in relevant v4l2_buffer struct fields instead.
1245 * In videobuf we use our internal V4l2_planes struct for
1246 * single-planar buffers as well, for simplicity.
1247 */
ac706bf7 1248 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
e23ccc0a 1249 v4l2_planes[0].bytesused = b->bytesused;
ac706bf7
LP
1250 v4l2_planes[0].data_offset = 0;
1251 }
e23ccc0a
PO
1252
1253 if (b->memory == V4L2_MEMORY_USERPTR) {
1254 v4l2_planes[0].m.userptr = b->m.userptr;
1255 v4l2_planes[0].length = b->length;
1256 }
c5384048
SS
1257
1258 if (b->memory == V4L2_MEMORY_DMABUF) {
1259 v4l2_planes[0].m.fd = b->m.fd;
1260 v4l2_planes[0].length = b->length;
1261 v4l2_planes[0].data_offset = 0;
1262 }
1263
e23ccc0a
PO
1264 }
1265
f1343281 1266 /* Zero flags that the vb2 core handles */
1b18e7a0 1267 vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
7ce6fd8f
SA
1268 if ((vb->vb2_queue->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
1269 V4L2_BUF_FLAG_TIMESTAMP_COPY || !V4L2_TYPE_IS_OUTPUT(b->type)) {
1270 /*
1271 * Non-COPY timestamps and non-OUTPUT queues will get
1272 * their timestamp and timestamp source flags from the
1273 * queue.
1274 */
1275 vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1276 }
1277
f1343281
HV
1278 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
1279 /*
1280 * For output buffers mask out the timecode flag:
1281 * this will be handled later in vb2_internal_qbuf().
1282 * The 'field' is valid metadata for this output buffer
1283 * and so that needs to be copied here.
1284 */
1285 vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TIMECODE;
1286 vb->v4l2_buf.field = b->field;
1287 } else {
1288 /* Zero any output buffer flags as this is a capture buffer */
1289 vb->v4l2_buf.flags &= ~V4L2_BUFFER_OUT_FLAGS;
1290 }
e23ccc0a
PO
1291}
1292
1293/**
1294 * __qbuf_userptr() - handle qbuf of a USERPTR buffer
1295 */
2d86401c 1296static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
e23ccc0a
PO
1297{
1298 struct v4l2_plane planes[VIDEO_MAX_PLANES];
1299 struct vb2_queue *q = vb->vb2_queue;
1300 void *mem_priv;
1301 unsigned int plane;
1302 int ret;
1303 int write = !V4L2_TYPE_IS_OUTPUT(q->type);
256f3162 1304 bool reacquired = vb->planes[0].mem_priv == NULL;
e23ccc0a 1305
32a77260
HV
1306 /* Copy relevant information provided by the userspace */
1307 __fill_vb2_buffer(vb, b, planes);
e23ccc0a
PO
1308
1309 for (plane = 0; plane < vb->num_planes; ++plane) {
1310 /* Skip the plane if already verified */
f0b7c7fc
MS
1311 if (vb->v4l2_planes[plane].m.userptr &&
1312 vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr
e23ccc0a
PO
1313 && vb->v4l2_planes[plane].length == planes[plane].length)
1314 continue;
1315
1316 dprintk(3, "qbuf: userspace address for plane %d changed, "
1317 "reacquiring memory\n", plane);
1318
c1426bc7
MS
1319 /* Check if the provided plane buffer is large enough */
1320 if (planes[plane].length < q->plane_sizes[plane]) {
2484a7e2
SWK
1321 dprintk(1, "qbuf: provided buffer size %u is less than "
1322 "setup size %u for plane %d\n",
1323 planes[plane].length,
1324 q->plane_sizes[plane], plane);
4c2625db 1325 ret = -EINVAL;
c1426bc7
MS
1326 goto err;
1327 }
1328
e23ccc0a 1329 /* Release previously acquired memory if present */
256f3162
HV
1330 if (vb->planes[plane].mem_priv) {
1331 if (!reacquired) {
1332 reacquired = true;
a1d36d8c 1333 call_void_vb_qop(vb, buf_cleanup, vb);
256f3162 1334 }
a1d36d8c 1335 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
256f3162 1336 }
e23ccc0a
PO
1337
1338 vb->planes[plane].mem_priv = NULL;
256f3162 1339 memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
e23ccc0a
PO
1340
1341 /* Acquire each plane's memory */
a1d36d8c 1342 mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane],
a00d0266
MS
1343 planes[plane].m.userptr,
1344 planes[plane].length, write);
1345 if (IS_ERR_OR_NULL(mem_priv)) {
1346 dprintk(1, "qbuf: failed acquiring userspace "
e23ccc0a 1347 "memory for plane %d\n", plane);
a00d0266
MS
1348 ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
1349 goto err;
e23ccc0a 1350 }
a00d0266 1351 vb->planes[plane].mem_priv = mem_priv;
e23ccc0a
PO
1352 }
1353
e23ccc0a
PO
1354 /*
1355 * Now that everything is in order, copy relevant information
1356 * provided by userspace.
1357 */
1358 for (plane = 0; plane < vb->num_planes; ++plane)
1359 vb->v4l2_planes[plane] = planes[plane];
1360
256f3162
HV
1361 if (reacquired) {
1362 /*
1363 * One or more planes changed, so we must call buf_init to do
1364 * the driver-specific initialization on the newly acquired
1365 * buffer, if provided.
1366 */
1367 ret = call_vb_qop(vb, buf_init, vb);
1368 if (ret) {
1369 dprintk(1, "qbuf: buffer initialization failed\n");
256f3162
HV
1370 goto err;
1371 }
1372 }
1373
1374 ret = call_vb_qop(vb, buf_prepare, vb);
1375 if (ret) {
1376 dprintk(1, "qbuf: buffer preparation failed\n");
a1d36d8c 1377 call_void_vb_qop(vb, buf_cleanup, vb);
256f3162
HV
1378 goto err;
1379 }
1380
e23ccc0a
PO
1381 return 0;
1382err:
1383 /* In case of errors, release planes that were already acquired */
c1426bc7
MS
1384 for (plane = 0; plane < vb->num_planes; ++plane) {
1385 if (vb->planes[plane].mem_priv)
a1d36d8c 1386 call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
c1426bc7
MS
1387 vb->planes[plane].mem_priv = NULL;
1388 vb->v4l2_planes[plane].m.userptr = 0;
1389 vb->v4l2_planes[plane].length = 0;
e23ccc0a
PO
1390 }
1391
1392 return ret;
1393}
1394
1395/**
1396 * __qbuf_mmap() - handle qbuf of an MMAP buffer
1397 */
2d86401c 1398static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
e23ccc0a 1399{
32a77260 1400 __fill_vb2_buffer(vb, b, vb->v4l2_planes);
a1d36d8c 1401 return call_vb_qop(vb, buf_prepare, vb);
e23ccc0a
PO
1402}
1403
c5384048
SS
1404/**
1405 * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
1406 */
1407static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1408{
1409 struct v4l2_plane planes[VIDEO_MAX_PLANES];
1410 struct vb2_queue *q = vb->vb2_queue;
1411 void *mem_priv;
1412 unsigned int plane;
1413 int ret;
1414 int write = !V4L2_TYPE_IS_OUTPUT(q->type);
256f3162 1415 bool reacquired = vb->planes[0].mem_priv == NULL;
c5384048 1416
6f546c5f 1417 /* Copy relevant information provided by the userspace */
c5384048
SS
1418 __fill_vb2_buffer(vb, b, planes);
1419
1420 for (plane = 0; plane < vb->num_planes; ++plane) {
1421 struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
1422
1423 if (IS_ERR_OR_NULL(dbuf)) {
1424 dprintk(1, "qbuf: invalid dmabuf fd for plane %d\n",
1425 plane);
1426 ret = -EINVAL;
1427 goto err;
1428 }
1429
1430 /* use DMABUF size if length is not provided */
1431 if (planes[plane].length == 0)
1432 planes[plane].length = dbuf->size;
1433
1434 if (planes[plane].length < planes[plane].data_offset +
1435 q->plane_sizes[plane]) {
77c0782e
SWK
1436 dprintk(1, "qbuf: invalid dmabuf length for plane %d\n",
1437 plane);
c5384048
SS
1438 ret = -EINVAL;
1439 goto err;
1440 }
1441
1442 /* Skip the plane if already verified */
1443 if (dbuf == vb->planes[plane].dbuf &&
1444 vb->v4l2_planes[plane].length == planes[plane].length) {
1445 dma_buf_put(dbuf);
1446 continue;
1447 }
1448
1449 dprintk(1, "qbuf: buffer for plane %d changed\n", plane);
1450
256f3162
HV
1451 if (!reacquired) {
1452 reacquired = true;
a1d36d8c 1453 call_void_vb_qop(vb, buf_cleanup, vb);
256f3162
HV
1454 }
1455
c5384048 1456 /* Release previously acquired memory if present */
b5b4541e 1457 __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
c5384048
SS
1458 memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
1459
1460 /* Acquire each plane's memory */
a1d36d8c 1461 mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane],
c5384048
SS
1462 dbuf, planes[plane].length, write);
1463 if (IS_ERR(mem_priv)) {
1464 dprintk(1, "qbuf: failed to attach dmabuf\n");
1465 ret = PTR_ERR(mem_priv);
1466 dma_buf_put(dbuf);
1467 goto err;
1468 }
1469
1470 vb->planes[plane].dbuf = dbuf;
1471 vb->planes[plane].mem_priv = mem_priv;
1472 }
1473
1474 /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but
1475 * really we want to do this just before the DMA, not while queueing
1476 * the buffer(s)..
1477 */
1478 for (plane = 0; plane < vb->num_planes; ++plane) {
b5b4541e 1479 ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
c5384048
SS
1480 if (ret) {
1481 dprintk(1, "qbuf: failed to map dmabuf for plane %d\n",
1482 plane);
1483 goto err;
1484 }
1485 vb->planes[plane].dbuf_mapped = 1;
1486 }
1487
c5384048
SS
1488 /*
1489 * Now that everything is in order, copy relevant information
1490 * provided by userspace.
1491 */
1492 for (plane = 0; plane < vb->num_planes; ++plane)
1493 vb->v4l2_planes[plane] = planes[plane];
1494
256f3162
HV
1495 if (reacquired) {
1496 /*
1497 * Call driver-specific initialization on the newly acquired buffer,
1498 * if provided.
1499 */
1500 ret = call_vb_qop(vb, buf_init, vb);
1501 if (ret) {
1502 dprintk(1, "qbuf: buffer initialization failed\n");
256f3162
HV
1503 goto err;
1504 }
1505 }
1506
1507 ret = call_vb_qop(vb, buf_prepare, vb);
1508 if (ret) {
1509 dprintk(1, "qbuf: buffer preparation failed\n");
a1d36d8c 1510 call_void_vb_qop(vb, buf_cleanup, vb);
256f3162
HV
1511 goto err;
1512 }
1513
c5384048
SS
1514 return 0;
1515err:
1516 /* In case of errors, release planes that were already acquired */
1517 __vb2_buf_dmabuf_put(vb);
1518
1519 return ret;
1520}
1521
e23ccc0a
PO
1522/**
1523 * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
1524 */
1525static void __enqueue_in_driver(struct vb2_buffer *vb)
1526{
1527 struct vb2_queue *q = vb->vb2_queue;
3e0c2f20 1528 unsigned int plane;
e23ccc0a
PO
1529
1530 vb->state = VB2_BUF_STATE_ACTIVE;
6ea3b980 1531 atomic_inc(&q->owned_by_drv_count);
3e0c2f20
MS
1532
1533 /* sync buffers */
1534 for (plane = 0; plane < vb->num_planes; ++plane)
a1d36d8c 1535 call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
3e0c2f20 1536
a1d36d8c 1537 call_void_vb_qop(vb, buf_queue, vb);
e23ccc0a
PO
1538}
1539
2d86401c 1540static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
ebc087d0
GL
1541{
1542 struct vb2_queue *q = vb->vb2_queue;
b18a8ff2 1543 struct rw_semaphore *mmap_sem;
ebc087d0
GL
1544 int ret;
1545
8023ed09 1546 ret = __verify_length(vb, b);
3a9621b0
SN
1547 if (ret < 0) {
1548 dprintk(1, "%s(): plane parameters verification failed: %d\n",
1549 __func__, ret);
8023ed09 1550 return ret;
3a9621b0 1551 }
8023ed09 1552
b18a8ff2 1553 vb->state = VB2_BUF_STATE_PREPARING;
f1343281
HV
1554 vb->v4l2_buf.timestamp.tv_sec = 0;
1555 vb->v4l2_buf.timestamp.tv_usec = 0;
1556 vb->v4l2_buf.sequence = 0;
1557
ebc087d0
GL
1558 switch (q->memory) {
1559 case V4L2_MEMORY_MMAP:
1560 ret = __qbuf_mmap(vb, b);
1561 break;
1562 case V4L2_MEMORY_USERPTR:
b18a8ff2 1563 /*
f103b5d6
MCC
1564 * In case of user pointer buffers vb2 allocators need to get
1565 * direct access to userspace pages. This requires getting
1566 * the mmap semaphore for read access in the current process
1567 * structure. The same semaphore is taken before calling mmap
1568 * operation, while both qbuf/prepare_buf and mmap are called
1569 * by the driver or v4l2 core with the driver's lock held.
1570 * To avoid an AB-BA deadlock (mmap_sem then driver's lock in
1571 * mmap and driver's lock then mmap_sem in qbuf/prepare_buf),
1572 * the videobuf2 core releases the driver's lock, takes
1573 * mmap_sem and then takes the driver's lock again.
b18a8ff2
HV
1574 */
1575 mmap_sem = &current->mm->mmap_sem;
a1d36d8c 1576 call_void_qop(q, wait_prepare, q);
b18a8ff2 1577 down_read(mmap_sem);
a1d36d8c 1578 call_void_qop(q, wait_finish, q);
b18a8ff2 1579
ebc087d0 1580 ret = __qbuf_userptr(vb, b);
b18a8ff2
HV
1581
1582 up_read(mmap_sem);
ebc087d0 1583 break;
c5384048
SS
1584 case V4L2_MEMORY_DMABUF:
1585 ret = __qbuf_dmabuf(vb, b);
1586 break;
ebc087d0
GL
1587 default:
1588 WARN(1, "Invalid queue type\n");
1589 ret = -EINVAL;
1590 }
1591
ebc087d0
GL
1592 if (ret)
1593 dprintk(1, "qbuf: buffer preparation failed: %d\n", ret);
b18a8ff2 1594 vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED;
ebc087d0
GL
1595
1596 return ret;
1597}
1598
012043b8 1599static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
4138111a 1600 const char *opname)
2d86401c 1601{
2d86401c 1602 if (b->type != q->type) {
012043b8 1603 dprintk(1, "%s(): invalid buffer type\n", opname);
b18a8ff2 1604 return -EINVAL;
2d86401c
GL
1605 }
1606
1607 if (b->index >= q->num_buffers) {
012043b8 1608 dprintk(1, "%s(): buffer index out of range\n", opname);
b18a8ff2 1609 return -EINVAL;
2d86401c
GL
1610 }
1611
4138111a 1612 if (q->bufs[b->index] == NULL) {
2d86401c 1613 /* Should never happen */
012043b8 1614 dprintk(1, "%s(): buffer is NULL\n", opname);
b18a8ff2 1615 return -EINVAL;
2d86401c
GL
1616 }
1617
1618 if (b->memory != q->memory) {
012043b8 1619 dprintk(1, "%s(): invalid memory type\n", opname);
b18a8ff2 1620 return -EINVAL;
2d86401c
GL
1621 }
1622
4138111a 1623 return __verify_planes_array(q->bufs[b->index], b);
012043b8 1624}
2d86401c 1625
e23ccc0a 1626/**
012043b8 1627 * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
e23ccc0a 1628 * @q: videobuf2 queue
012043b8
LP
1629 * @b: buffer structure passed from userspace to vidioc_prepare_buf
1630 * handler in driver
e23ccc0a 1631 *
012043b8 1632 * Should be called from vidioc_prepare_buf ioctl handler of a driver.
e23ccc0a
PO
1633 * This function:
1634 * 1) verifies the passed buffer,
012043b8
LP
1635 * 2) calls buf_prepare callback in the driver (if provided), in which
1636 * driver-specific buffer initialization can be performed,
e23ccc0a
PO
1637 *
1638 * The return values from this function are intended to be directly returned
012043b8 1639 * from vidioc_prepare_buf handler in driver.
e23ccc0a 1640 */
012043b8 1641int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
e23ccc0a 1642{
4138111a 1643 struct vb2_buffer *vb;
b2f2f047
HV
1644 int ret;
1645
1646 if (q->fileio) {
1647 dprintk(1, "%s(): file io in progress\n", __func__);
1648 return -EBUSY;
1649 }
4138111a 1650
b2f2f047 1651 ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf");
4138111a
HV
1652 if (ret)
1653 return ret;
1654
1655 vb = q->bufs[b->index];
1656 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1657 dprintk(1, "%s(): invalid buffer state %d\n", __func__,
1658 vb->state);
1659 return -EINVAL;
1660 }
1661
1662 ret = __buf_prepare(vb, b);
1663 if (!ret) {
1664 /* Fill buffer information for the userspace */
1665 __fill_v4l2_buffer(vb, b);
1666
1667 dprintk(1, "%s() of buffer %d succeeded\n", __func__, vb->v4l2_buf.index);
1668 }
1669 return ret;
012043b8
LP
1670}
1671EXPORT_SYMBOL_GPL(vb2_prepare_buf);
e23ccc0a 1672
02f142ec
HV
1673/**
1674 * vb2_start_streaming() - Attempt to start streaming.
1675 * @q: videobuf2 queue
1676 *
b3379c62
HV
1677 * Attempt to start streaming. When this function is called there must be
1678 * at least q->min_buffers_needed buffers queued up (i.e. the minimum
1679 * number of buffers required for the DMA engine to function). If the
1680 * @start_streaming op fails it is supposed to return all the driver-owned
1681 * buffers back to vb2 in state QUEUED. Check if that happened and if
1682 * not warn and reclaim them forcefully.
02f142ec
HV
1683 */
1684static int vb2_start_streaming(struct vb2_queue *q)
1685{
b3379c62 1686 struct vb2_buffer *vb;
02f142ec
HV
1687 int ret;
1688
02f142ec 1689 /*
b3379c62
HV
1690 * If any buffers were queued before streamon,
1691 * we can now pass them to driver for processing.
02f142ec 1692 */
b3379c62
HV
1693 list_for_each_entry(vb, &q->queued_list, queued_entry)
1694 __enqueue_in_driver(vb);
1695
1696 /* Tell the driver to start streaming */
1697 ret = call_qop(q, start_streaming, q,
1698 atomic_read(&q->owned_by_drv_count));
1699 q->start_streaming_called = ret == 0;
1700 if (!ret)
02f142ec 1701 return 0;
b3379c62 1702
b3379c62
HV
1703 dprintk(1, "qbuf: driver refused to start streaming\n");
1704 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
1705 unsigned i;
1706
1707 /*
1708 * Forcefully reclaim buffers if the driver did not
1709 * correctly return them to vb2.
1710 */
1711 for (i = 0; i < q->num_buffers; ++i) {
1712 vb = q->bufs[i];
1713 if (vb->state == VB2_BUF_STATE_ACTIVE)
1714 vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
1715 }
1716 /* Must be zero now */
1717 WARN_ON(atomic_read(&q->owned_by_drv_count));
02f142ec 1718 }
02f142ec
HV
1719 return ret;
1720}
1721
b2f2f047 1722static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
012043b8 1723{
4138111a
HV
1724 int ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
1725 struct vb2_buffer *vb;
1726
1727 if (ret)
1728 return ret;
1729
1730 vb = q->bufs[b->index];
e23ccc0a 1731
ebc087d0
GL
1732 switch (vb->state) {
1733 case VB2_BUF_STATE_DEQUEUED:
1734 ret = __buf_prepare(vb, b);
1735 if (ret)
012043b8 1736 return ret;
4138111a 1737 break;
ebc087d0
GL
1738 case VB2_BUF_STATE_PREPARED:
1739 break;
b18a8ff2
HV
1740 case VB2_BUF_STATE_PREPARING:
1741 dprintk(1, "qbuf: buffer still being prepared\n");
1742 return -EINVAL;
ebc087d0 1743 default:
952c9ee2
HV
1744 dprintk(1, "%s(): invalid buffer state %d\n", __func__,
1745 vb->state);
012043b8 1746 return -EINVAL;
e23ccc0a
PO
1747 }
1748
e23ccc0a
PO
1749 /*
1750 * Add to the queued buffers list, a buffer will stay on it until
1751 * dequeued in dqbuf.
1752 */
1753 list_add_tail(&vb->queued_entry, &q->queued_list);
b3379c62 1754 q->queued_count++;
e23ccc0a 1755 vb->state = VB2_BUF_STATE_QUEUED;
f1343281
HV
1756 if (V4L2_TYPE_IS_OUTPUT(q->type)) {
1757 /*
1758 * For output buffers copy the timestamp if needed,
1759 * and the timecode field and flag if needed.
1760 */
c57ff792
SA
1761 if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
1762 V4L2_BUF_FLAG_TIMESTAMP_COPY)
f1343281
HV
1763 vb->v4l2_buf.timestamp = b->timestamp;
1764 vb->v4l2_buf.flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
1765 if (b->flags & V4L2_BUF_FLAG_TIMECODE)
1766 vb->v4l2_buf.timecode = b->timecode;
1767 }
e23ccc0a
PO
1768
1769 /*
1770 * If already streaming, give the buffer to driver for processing.
1771 * If not, the buffer will be given to driver on next streamon.
1772 */
b3379c62 1773 if (q->start_streaming_called)
e23ccc0a
PO
1774 __enqueue_in_driver(vb);
1775
4138111a
HV
1776 /* Fill buffer information for the userspace */
1777 __fill_v4l2_buffer(vb, b);
21db3e07 1778
b3379c62
HV
1779 /*
1780 * If streamon has been called, and we haven't yet called
1781 * start_streaming() since not enough buffers were queued, and
1782 * we now have reached the minimum number of queued buffers,
1783 * then we can finally call start_streaming().
1784 */
1785 if (q->streaming && !q->start_streaming_called &&
1786 q->queued_count >= q->min_buffers_needed) {
02f142ec
HV
1787 ret = vb2_start_streaming(q);
1788 if (ret)
1789 return ret;
1790 }
1791
4138111a
HV
1792 dprintk(1, "%s() of buffer %d succeeded\n", __func__, vb->v4l2_buf.index);
1793 return 0;
e23ccc0a 1794}
b2f2f047
HV
1795
1796/**
1797 * vb2_qbuf() - Queue a buffer from userspace
1798 * @q: videobuf2 queue
1799 * @b: buffer structure passed from userspace to vidioc_qbuf handler
1800 * in driver
1801 *
1802 * Should be called from vidioc_qbuf ioctl handler of a driver.
1803 * This function:
1804 * 1) verifies the passed buffer,
1805 * 2) if necessary, calls buf_prepare callback in the driver (if provided), in
1806 * which driver-specific buffer initialization can be performed,
1807 * 3) if streaming is on, queues the buffer in driver by the means of buf_queue
1808 * callback for processing.
1809 *
1810 * The return values from this function are intended to be directly returned
1811 * from vidioc_qbuf handler in driver.
1812 */
1813int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
1814{
1815 if (q->fileio) {
1816 dprintk(1, "%s(): file io in progress\n", __func__);
1817 return -EBUSY;
1818 }
1819
1820 return vb2_internal_qbuf(q, b);
1821}
e23ccc0a
PO
1822EXPORT_SYMBOL_GPL(vb2_qbuf);
1823
1824/**
1825 * __vb2_wait_for_done_vb() - wait for a buffer to become available
1826 * for dequeuing
1827 *
1828 * Will sleep if required for nonblocking == false.
1829 */
1830static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1831{
1832 /*
1833 * All operations on vb_done_list are performed under done_lock
1834 * spinlock protection. However, buffers may be removed from
1835 * it and returned to userspace only while holding both driver's
1836 * lock and the done_lock spinlock. Thus we can be sure that as
1837 * long as we hold the driver's lock, the list will remain not
1838 * empty if list_empty() check succeeds.
1839 */
1840
1841 for (;;) {
1842 int ret;
1843
1844 if (!q->streaming) {
1845 dprintk(1, "Streaming off, will not wait for buffers\n");
1846 return -EINVAL;
1847 }
1848
1849 if (!list_empty(&q->done_list)) {
1850 /*
1851 * Found a buffer that we were waiting for.
1852 */
1853 break;
1854 }
1855
1856 if (nonblocking) {
1857 dprintk(1, "Nonblocking and no buffers to dequeue, "
1858 "will not wait\n");
1859 return -EAGAIN;
1860 }
1861
1862 /*
1863 * We are streaming and blocking, wait for another buffer to
1864 * become ready or for streamoff. Driver's lock is released to
1865 * allow streamoff or qbuf to be called while waiting.
1866 */
a1d36d8c 1867 call_void_qop(q, wait_prepare, q);
e23ccc0a
PO
1868
1869 /*
1870 * All locks have been released, it is safe to sleep now.
1871 */
1872 dprintk(3, "Will sleep waiting for buffers\n");
1873 ret = wait_event_interruptible(q->done_wq,
1874 !list_empty(&q->done_list) || !q->streaming);
1875
1876 /*
1877 * We need to reevaluate both conditions again after reacquiring
1878 * the locks or return an error if one occurred.
1879 */
a1d36d8c 1880 call_void_qop(q, wait_finish, q);
32a77260
HV
1881 if (ret) {
1882 dprintk(1, "Sleep was interrupted\n");
e23ccc0a 1883 return ret;
32a77260 1884 }
e23ccc0a
PO
1885 }
1886 return 0;
1887}
1888
1889/**
1890 * __vb2_get_done_vb() - get a buffer ready for dequeuing
1891 *
1892 * Will sleep if required for nonblocking == false.
1893 */
1894static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
32a77260 1895 struct v4l2_buffer *b, int nonblocking)
e23ccc0a
PO
1896{
1897 unsigned long flags;
1898 int ret;
1899
1900 /*
1901 * Wait for at least one buffer to become available on the done_list.
1902 */
1903 ret = __vb2_wait_for_done_vb(q, nonblocking);
1904 if (ret)
1905 return ret;
1906
1907 /*
1908 * Driver's lock has been held since we last verified that done_list
1909 * is not empty, so no need for another list_empty(done_list) check.
1910 */
1911 spin_lock_irqsave(&q->done_lock, flags);
1912 *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
32a77260
HV
1913 /*
1914 * Only remove the buffer from done_list if v4l2_buffer can handle all
1915 * the planes.
1916 */
1917 ret = __verify_planes_array(*vb, b);
1918 if (!ret)
1919 list_del(&(*vb)->done_entry);
e23ccc0a
PO
1920 spin_unlock_irqrestore(&q->done_lock, flags);
1921
32a77260 1922 return ret;
e23ccc0a
PO
1923}
1924
1925/**
1926 * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2
1927 * @q: videobuf2 queue
1928 *
1929 * This function will wait until all buffers that have been given to the driver
1930 * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call
1931 * wait_prepare, wait_finish pair. It is intended to be called with all locks
1932 * taken, for example from stop_streaming() callback.
1933 */
1934int vb2_wait_for_all_buffers(struct vb2_queue *q)
1935{
1936 if (!q->streaming) {
1937 dprintk(1, "Streaming off, will not wait for buffers\n");
1938 return -EINVAL;
1939 }
1940
b3379c62 1941 if (q->start_streaming_called)
6ea3b980 1942 wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
e23ccc0a
PO
1943 return 0;
1944}
1945EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
1946
c5384048
SS
1947/**
1948 * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
1949 */
1950static void __vb2_dqbuf(struct vb2_buffer *vb)
1951{
1952 struct vb2_queue *q = vb->vb2_queue;
1953 unsigned int i;
1954
1955 /* nothing to do if the buffer is already dequeued */
1956 if (vb->state == VB2_BUF_STATE_DEQUEUED)
1957 return;
1958
1959 vb->state = VB2_BUF_STATE_DEQUEUED;
1960
1961 /* unmap DMABUF buffer */
1962 if (q->memory == V4L2_MEMORY_DMABUF)
1963 for (i = 0; i < vb->num_planes; ++i) {
1964 if (!vb->planes[i].dbuf_mapped)
1965 continue;
a1d36d8c 1966 call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
c5384048
SS
1967 vb->planes[i].dbuf_mapped = 0;
1968 }
1969}
1970
b2f2f047 1971static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
e23ccc0a
PO
1972{
1973 struct vb2_buffer *vb = NULL;
1974 int ret;
1975
1976 if (b->type != q->type) {
1977 dprintk(1, "dqbuf: invalid buffer type\n");
1978 return -EINVAL;
1979 }
32a77260
HV
1980 ret = __vb2_get_done_vb(q, &vb, b, nonblocking);
1981 if (ret < 0)
e23ccc0a 1982 return ret;
e23ccc0a 1983
e23ccc0a
PO
1984 switch (vb->state) {
1985 case VB2_BUF_STATE_DONE:
1986 dprintk(3, "dqbuf: Returning done buffer\n");
1987 break;
1988 case VB2_BUF_STATE_ERROR:
1989 dprintk(3, "dqbuf: Returning done buffer with errors\n");
1990 break;
1991 default:
1992 dprintk(1, "dqbuf: Invalid buffer state\n");
1993 return -EINVAL;
1994 }
1995
a1d36d8c 1996 call_void_vb_qop(vb, buf_finish, vb);
9cf3c31a 1997
e23ccc0a
PO
1998 /* Fill buffer information for the userspace */
1999 __fill_v4l2_buffer(vb, b);
2000 /* Remove from videobuf queue */
2001 list_del(&vb->queued_entry);
b3379c62 2002 q->queued_count--;
c5384048
SS
2003 /* go back to dequeued state */
2004 __vb2_dqbuf(vb);
e23ccc0a
PO
2005
2006 dprintk(1, "dqbuf of buffer %d, with state %d\n",
2007 vb->v4l2_buf.index, vb->state);
2008
e23ccc0a
PO
2009 return 0;
2010}
b2f2f047
HV
2011
2012/**
2013 * vb2_dqbuf() - Dequeue a buffer to the userspace
2014 * @q: videobuf2 queue
2015 * @b: buffer structure passed from userspace to vidioc_dqbuf handler
2016 * in driver
2017 * @nonblocking: if true, this call will not sleep waiting for a buffer if no
2018 * buffers ready for dequeuing are present. Normally the driver
2019 * would be passing (file->f_flags & O_NONBLOCK) here
2020 *
2021 * Should be called from vidioc_dqbuf ioctl handler of a driver.
2022 * This function:
2023 * 1) verifies the passed buffer,
2024 * 2) calls buf_finish callback in the driver (if provided), in which
2025 * driver can perform any additional operations that may be required before
2026 * returning the buffer to userspace, such as cache sync,
2027 * 3) the buffer struct members are filled with relevant information for
2028 * the userspace.
2029 *
2030 * The return values from this function are intended to be directly returned
2031 * from vidioc_dqbuf handler in driver.
2032 */
2033int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
2034{
2035 if (q->fileio) {
2036 dprintk(1, "dqbuf: file io in progress\n");
2037 return -EBUSY;
2038 }
2039 return vb2_internal_dqbuf(q, b, nonblocking);
2040}
e23ccc0a
PO
2041EXPORT_SYMBOL_GPL(vb2_dqbuf);
2042
bd323e28
MS
2043/**
2044 * __vb2_queue_cancel() - cancel and stop (pause) streaming
2045 *
2046 * Removes all queued buffers from driver's queue and all buffers queued by
2047 * userspace from videobuf's queue. Returns to state after reqbufs.
2048 */
2049static void __vb2_queue_cancel(struct vb2_queue *q)
2050{
2051 unsigned int i;
2052
2053 /*
2054 * Tell driver to stop all transactions and release all queued
2055 * buffers.
2056 */
b3379c62 2057 if (q->start_streaming_called)
bd323e28
MS
2058 call_qop(q, stop_streaming, q);
2059 q->streaming = 0;
b3379c62
HV
2060 q->start_streaming_called = 0;
2061 q->queued_count = 0;
2062
2063 if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
2064 for (i = 0; i < q->num_buffers; ++i)
2065 if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE)
2066 vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR);
2067 /* Must be zero now */
2068 WARN_ON(atomic_read(&q->owned_by_drv_count));
2069 }
bd323e28
MS
2070
2071 /*
2072 * Remove all buffers from videobuf's list...
2073 */
2074 INIT_LIST_HEAD(&q->queued_list);
2075 /*
2076 * ...and done list; userspace will not receive any buffers it
2077 * has not already dequeued before initiating cancel.
2078 */
2079 INIT_LIST_HEAD(&q->done_list);
6ea3b980 2080 atomic_set(&q->owned_by_drv_count, 0);
bd323e28
MS
2081 wake_up_all(&q->done_wq);
2082
2083 /*
2084 * Reinitialize all buffers for next use.
9c0863b1
HV
2085 * Make sure to call buf_finish for any queued buffers. Normally
2086 * that's done in dqbuf, but that's not going to happen when we
2087 * cancel the whole queue. Note: this code belongs here, not in
2088 * __vb2_dqbuf() since in vb2_internal_dqbuf() there is a critical
2089 * call to __fill_v4l2_buffer() after buf_finish(). That order can't
2090 * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
bd323e28 2091 */
9c0863b1
HV
2092 for (i = 0; i < q->num_buffers; ++i) {
2093 struct vb2_buffer *vb = q->bufs[i];
2094
2095 if (vb->state != VB2_BUF_STATE_DEQUEUED) {
2096 vb->state = VB2_BUF_STATE_PREPARED;
a1d36d8c 2097 call_void_vb_qop(vb, buf_finish, vb);
9c0863b1
HV
2098 }
2099 __vb2_dqbuf(vb);
2100 }
bd323e28
MS
2101}
2102
b2f2f047 2103static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
e23ccc0a 2104{
5db2c3ba 2105 int ret;
e23ccc0a
PO
2106
2107 if (type != q->type) {
2108 dprintk(1, "streamon: invalid stream type\n");
2109 return -EINVAL;
2110 }
2111
2112 if (q->streaming) {
f956035c
RR
2113 dprintk(3, "streamon successful: already streaming\n");
2114 return 0;
e23ccc0a
PO
2115 }
2116
548df783
RR
2117 if (!q->num_buffers) {
2118 dprintk(1, "streamon: no buffers have been allocated\n");
2119 return -EINVAL;
2120 }
2121
249f5a58
RRD
2122 if (!q->num_buffers) {
2123 dprintk(1, "streamon: no buffers have been allocated\n");
2124 return -EINVAL;
2125 }
b3379c62
HV
2126 if (q->num_buffers < q->min_buffers_needed) {
2127 dprintk(1, "streamon: need at least %u allocated buffers\n",
2128 q->min_buffers_needed);
2129 return -EINVAL;
2130 }
249f5a58 2131
e23ccc0a 2132 /*
b3379c62
HV
2133 * Tell driver to start streaming provided sufficient buffers
2134 * are available.
e23ccc0a 2135 */
b3379c62
HV
2136 if (q->queued_count >= q->min_buffers_needed) {
2137 ret = vb2_start_streaming(q);
2138 if (ret) {
2139 __vb2_queue_cancel(q);
2140 return ret;
2141 }
5db2c3ba
PO
2142 }
2143
2144 q->streaming = 1;
e23ccc0a 2145
e23ccc0a
PO
2146 dprintk(3, "Streamon successful\n");
2147 return 0;
2148}
e23ccc0a
PO
2149
2150/**
b2f2f047 2151 * vb2_streamon - start streaming
e23ccc0a 2152 * @q: videobuf2 queue
b2f2f047 2153 * @type: type argument passed from userspace to vidioc_streamon handler
e23ccc0a 2154 *
b2f2f047 2155 * Should be called from vidioc_streamon handler of a driver.
e23ccc0a 2156 * This function:
b2f2f047
HV
2157 * 1) verifies current state
2158 * 2) passes any previously queued buffers to the driver and starts streaming
e23ccc0a 2159 *
e23ccc0a 2160 * The return values from this function are intended to be directly returned
b2f2f047 2161 * from vidioc_streamon handler in the driver.
e23ccc0a 2162 */
b2f2f047 2163int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
e23ccc0a 2164{
b25748fe 2165 if (q->fileio) {
b2f2f047 2166 dprintk(1, "streamon: file io in progress\n");
b25748fe
MS
2167 return -EBUSY;
2168 }
b2f2f047
HV
2169 return vb2_internal_streamon(q, type);
2170}
2171EXPORT_SYMBOL_GPL(vb2_streamon);
b25748fe 2172
b2f2f047
HV
2173static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
2174{
e23ccc0a
PO
2175 if (type != q->type) {
2176 dprintk(1, "streamoff: invalid stream type\n");
2177 return -EINVAL;
2178 }
2179
e23ccc0a
PO
2180 /*
2181 * Cancel will pause streaming and remove all buffers from the driver
2182 * and videobuf, effectively returning control over them to userspace.
3f1a9a33
HV
2183 *
2184 * Note that we do this even if q->streaming == 0: if you prepare or
2185 * queue buffers, and then call streamoff without ever having called
2186 * streamon, you would still expect those buffers to be returned to
2187 * their normal dequeued state.
e23ccc0a
PO
2188 */
2189 __vb2_queue_cancel(q);
2190
2191 dprintk(3, "Streamoff successful\n");
2192 return 0;
2193}
b2f2f047
HV
2194
2195/**
2196 * vb2_streamoff - stop streaming
2197 * @q: videobuf2 queue
2198 * @type: type argument passed from userspace to vidioc_streamoff handler
2199 *
2200 * Should be called from vidioc_streamoff handler of a driver.
2201 * This function:
2202 * 1) verifies current state,
2203 * 2) stop streaming and dequeues any queued buffers, including those previously
2204 * passed to the driver (after waiting for the driver to finish).
2205 *
2206 * This call can be used for pausing playback.
2207 * The return values from this function are intended to be directly returned
2208 * from vidioc_streamoff handler in the driver
2209 */
2210int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
2211{
2212 if (q->fileio) {
2213 dprintk(1, "streamoff: file io in progress\n");
2214 return -EBUSY;
2215 }
2216 return vb2_internal_streamoff(q, type);
2217}
e23ccc0a
PO
2218EXPORT_SYMBOL_GPL(vb2_streamoff);
2219
2220/**
2221 * __find_plane_by_offset() - find plane associated with the given offset off
2222 */
2223static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
2224 unsigned int *_buffer, unsigned int *_plane)
2225{
2226 struct vb2_buffer *vb;
2227 unsigned int buffer, plane;
2228
2229 /*
2230 * Go over all buffers and their planes, comparing the given offset
2231 * with an offset assigned to each plane. If a match is found,
2232 * return its buffer and plane numbers.
2233 */
2234 for (buffer = 0; buffer < q->num_buffers; ++buffer) {
2235 vb = q->bufs[buffer];
2236
2237 for (plane = 0; plane < vb->num_planes; ++plane) {
2238 if (vb->v4l2_planes[plane].m.mem_offset == off) {
2239 *_buffer = buffer;
2240 *_plane = plane;
2241 return 0;
2242 }
2243 }
2244 }
2245
2246 return -EINVAL;
2247}
2248
83ae7c5a
TS
2249/**
2250 * vb2_expbuf() - Export a buffer as a file descriptor
2251 * @q: videobuf2 queue
2252 * @eb: export buffer structure passed from userspace to vidioc_expbuf
2253 * handler in driver
2254 *
2255 * The return values from this function are intended to be directly returned
2256 * from vidioc_expbuf handler in driver.
2257 */
2258int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
2259{
2260 struct vb2_buffer *vb = NULL;
2261 struct vb2_plane *vb_plane;
2262 int ret;
2263 struct dma_buf *dbuf;
2264
2265 if (q->memory != V4L2_MEMORY_MMAP) {
2266 dprintk(1, "Queue is not currently set up for mmap\n");
2267 return -EINVAL;
2268 }
2269
2270 if (!q->mem_ops->get_dmabuf) {
2271 dprintk(1, "Queue does not support DMA buffer exporting\n");
2272 return -EINVAL;
2273 }
2274
ea3aba84
PZ
2275 if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) {
2276 dprintk(1, "Queue does support only O_CLOEXEC and access mode flags\n");
83ae7c5a
TS
2277 return -EINVAL;
2278 }
2279
2280 if (eb->type != q->type) {
2281 dprintk(1, "qbuf: invalid buffer type\n");
2282 return -EINVAL;
2283 }
2284
2285 if (eb->index >= q->num_buffers) {
2286 dprintk(1, "buffer index out of range\n");
2287 return -EINVAL;
2288 }
2289
2290 vb = q->bufs[eb->index];
2291
2292 if (eb->plane >= vb->num_planes) {
2293 dprintk(1, "buffer plane out of range\n");
2294 return -EINVAL;
2295 }
2296
2297 vb_plane = &vb->planes[eb->plane];
2298
a1d36d8c 2299 dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE);
83ae7c5a
TS
2300 if (IS_ERR_OR_NULL(dbuf)) {
2301 dprintk(1, "Failed to export buffer %d, plane %d\n",
2302 eb->index, eb->plane);
2303 return -EINVAL;
2304 }
2305
ea3aba84 2306 ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE);
83ae7c5a
TS
2307 if (ret < 0) {
2308 dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
2309 eb->index, eb->plane, ret);
2310 dma_buf_put(dbuf);
2311 return ret;
2312 }
2313
2314 dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
2315 eb->index, eb->plane, ret);
2316 eb->fd = ret;
2317
2318 return 0;
2319}
2320EXPORT_SYMBOL_GPL(vb2_expbuf);
2321
e23ccc0a
PO
2322/**
2323 * vb2_mmap() - map video buffers into application address space
2324 * @q: videobuf2 queue
2325 * @vma: vma passed to the mmap file operation handler in the driver
2326 *
2327 * Should be called from mmap file operation handler of a driver.
2328 * This function maps one plane of one of the available video buffers to
2329 * userspace. To map whole video memory allocated on reqbufs, this function
2330 * has to be called once per each plane per each buffer previously allocated.
2331 *
2332 * When the userspace application calls mmap, it passes to it an offset returned
2333 * to it earlier by the means of vidioc_querybuf handler. That offset acts as
2334 * a "cookie", which is then used to identify the plane to be mapped.
2335 * This function finds a plane with a matching offset and a mapping is performed
2336 * by the means of a provided memory operation.
2337 *
2338 * The return values from this function are intended to be directly returned
2339 * from the mmap handler in driver.
2340 */
2341int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
2342{
2343 unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
e23ccc0a
PO
2344 struct vb2_buffer *vb;
2345 unsigned int buffer, plane;
2346 int ret;
7f841459 2347 unsigned long length;
e23ccc0a
PO
2348
2349 if (q->memory != V4L2_MEMORY_MMAP) {
2350 dprintk(1, "Queue is not currently set up for mmap\n");
2351 return -EINVAL;
2352 }
2353
2354 /*
2355 * Check memory area access mode.
2356 */
2357 if (!(vma->vm_flags & VM_SHARED)) {
2358 dprintk(1, "Invalid vma flags, VM_SHARED needed\n");
2359 return -EINVAL;
2360 }
2361 if (V4L2_TYPE_IS_OUTPUT(q->type)) {
2362 if (!(vma->vm_flags & VM_WRITE)) {
2363 dprintk(1, "Invalid vma flags, VM_WRITE needed\n");
2364 return -EINVAL;
2365 }
2366 } else {
2367 if (!(vma->vm_flags & VM_READ)) {
2368 dprintk(1, "Invalid vma flags, VM_READ needed\n");
2369 return -EINVAL;
2370 }
2371 }
2372
2373 /*
2374 * Find the plane corresponding to the offset passed by userspace.
2375 */
2376 ret = __find_plane_by_offset(q, off, &buffer, &plane);
2377 if (ret)
2378 return ret;
2379
2380 vb = q->bufs[buffer];
e23ccc0a 2381
7f841459
MCC
2382 /*
2383 * MMAP requires page_aligned buffers.
2384 * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
2385 * so, we need to do the same here.
2386 */
2387 length = PAGE_ALIGN(vb->v4l2_planes[plane].length);
2388 if (length < (vma->vm_end - vma->vm_start)) {
2389 dprintk(1,
2390 "MMAP invalid, as it would overflow buffer length\n");
068a0df7
SWK
2391 return -EINVAL;
2392 }
2393
b5b4541e 2394 ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
a1d36d8c 2395 if (ret)
e23ccc0a
PO
2396 return ret;
2397
e23ccc0a
PO
2398 dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane);
2399 return 0;
2400}
2401EXPORT_SYMBOL_GPL(vb2_mmap);
2402
6f524ec1
SJ
2403#ifndef CONFIG_MMU
2404unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
2405 unsigned long addr,
2406 unsigned long len,
2407 unsigned long pgoff,
2408 unsigned long flags)
2409{
2410 unsigned long off = pgoff << PAGE_SHIFT;
2411 struct vb2_buffer *vb;
2412 unsigned int buffer, plane;
2413 int ret;
2414
2415 if (q->memory != V4L2_MEMORY_MMAP) {
2416 dprintk(1, "Queue is not currently set up for mmap\n");
2417 return -EINVAL;
2418 }
2419
2420 /*
2421 * Find the plane corresponding to the offset passed by userspace.
2422 */
2423 ret = __find_plane_by_offset(q, off, &buffer, &plane);
2424 if (ret)
2425 return ret;
2426
2427 vb = q->bufs[buffer];
2428
2429 return (unsigned long)vb2_plane_vaddr(vb, plane);
2430}
2431EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
2432#endif
2433
b25748fe
MS
2434static int __vb2_init_fileio(struct vb2_queue *q, int read);
2435static int __vb2_cleanup_fileio(struct vb2_queue *q);
e23ccc0a
PO
2436
2437/**
2438 * vb2_poll() - implements poll userspace operation
2439 * @q: videobuf2 queue
2440 * @file: file argument passed to the poll file operation handler
2441 * @wait: wait argument passed to the poll file operation handler
2442 *
2443 * This function implements poll file operation handler for a driver.
2444 * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
2445 * be informed that the file descriptor of a video device is available for
2446 * reading.
2447 * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
2448 * will be reported as available for writing.
2449 *
95213ceb
HV
2450 * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any
2451 * pending events.
2452 *
e23ccc0a
PO
2453 * The return values from this function are intended to be directly returned
2454 * from poll handler in driver.
2455 */
2456unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
2457{
95213ceb 2458 struct video_device *vfd = video_devdata(file);
bf5c7cbb 2459 unsigned long req_events = poll_requested_events(wait);
e23ccc0a 2460 struct vb2_buffer *vb = NULL;
95213ceb
HV
2461 unsigned int res = 0;
2462 unsigned long flags;
2463
2464 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
2465 struct v4l2_fh *fh = file->private_data;
2466
2467 if (v4l2_event_pending(fh))
2468 res = POLLPRI;
2469 else if (req_events & POLLPRI)
2470 poll_wait(file, &fh->wait, wait);
2471 }
e23ccc0a 2472
cd13823f
HV
2473 if (!V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLIN | POLLRDNORM)))
2474 return res;
2475 if (V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLOUT | POLLWRNORM)))
2476 return res;
2477
b25748fe 2478 /*
4ffabdb3 2479 * Start file I/O emulator only if streaming API has not been used yet.
b25748fe
MS
2480 */
2481 if (q->num_buffers == 0 && q->fileio == NULL) {
bf5c7cbb
HV
2482 if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
2483 (req_events & (POLLIN | POLLRDNORM))) {
95213ceb
HV
2484 if (__vb2_init_fileio(q, 1))
2485 return res | POLLERR;
b25748fe 2486 }
bf5c7cbb
HV
2487 if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
2488 (req_events & (POLLOUT | POLLWRNORM))) {
95213ceb
HV
2489 if (__vb2_init_fileio(q, 0))
2490 return res | POLLERR;
b25748fe
MS
2491 /*
2492 * Write to OUTPUT queue can be done immediately.
2493 */
95213ceb 2494 return res | POLLOUT | POLLWRNORM;
b25748fe
MS
2495 }
2496 }
2497
e23ccc0a
PO
2498 /*
2499 * There is nothing to wait for if no buffers have already been queued.
2500 */
2501 if (list_empty(&q->queued_list))
95213ceb 2502 return res | POLLERR;
e23ccc0a 2503
412cb87d
SWK
2504 if (list_empty(&q->done_list))
2505 poll_wait(file, &q->done_wq, wait);
e23ccc0a
PO
2506
2507 /*
2508 * Take first buffer available for dequeuing.
2509 */
2510 spin_lock_irqsave(&q->done_lock, flags);
2511 if (!list_empty(&q->done_list))
2512 vb = list_first_entry(&q->done_list, struct vb2_buffer,
2513 done_entry);
2514 spin_unlock_irqrestore(&q->done_lock, flags);
2515
2516 if (vb && (vb->state == VB2_BUF_STATE_DONE
2517 || vb->state == VB2_BUF_STATE_ERROR)) {
95213ceb
HV
2518 return (V4L2_TYPE_IS_OUTPUT(q->type)) ?
2519 res | POLLOUT | POLLWRNORM :
2520 res | POLLIN | POLLRDNORM;
e23ccc0a 2521 }
95213ceb 2522 return res;
e23ccc0a
PO
2523}
2524EXPORT_SYMBOL_GPL(vb2_poll);
2525
2526/**
2527 * vb2_queue_init() - initialize a videobuf2 queue
2528 * @q: videobuf2 queue; this structure should be allocated in driver
2529 *
2530 * The vb2_queue structure should be allocated by the driver. The driver is
2531 * responsible of clearing it's content and setting initial values for some
2532 * required entries before calling this function.
2533 * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
2534 * to the struct vb2_queue description in include/media/videobuf2-core.h
2535 * for more information.
2536 */
2537int vb2_queue_init(struct vb2_queue *q)
2538{
896f38f5
EG
2539 /*
2540 * Sanity check
2541 */
2542 if (WARN_ON(!q) ||
2543 WARN_ON(!q->ops) ||
2544 WARN_ON(!q->mem_ops) ||
2545 WARN_ON(!q->type) ||
2546 WARN_ON(!q->io_modes) ||
2547 WARN_ON(!q->ops->queue_setup) ||
6aa69f99 2548 WARN_ON(!q->ops->buf_queue) ||
872484ce
SA
2549 WARN_ON(q->timestamp_flags &
2550 ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
2551 V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
896f38f5 2552 return -EINVAL;
e23ccc0a 2553
6aa69f99 2554 /* Warn that the driver should choose an appropriate timestamp type */
c57ff792
SA
2555 WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
2556 V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
6aa69f99 2557
e23ccc0a
PO
2558 INIT_LIST_HEAD(&q->queued_list);
2559 INIT_LIST_HEAD(&q->done_list);
2560 spin_lock_init(&q->done_lock);
2561 init_waitqueue_head(&q->done_wq);
2562
2563 if (q->buf_struct_size == 0)
2564 q->buf_struct_size = sizeof(struct vb2_buffer);
2565
2566 return 0;
2567}
2568EXPORT_SYMBOL_GPL(vb2_queue_init);
2569
2570/**
2571 * vb2_queue_release() - stop streaming, release the queue and free memory
2572 * @q: videobuf2 queue
2573 *
2574 * This function stops streaming and performs necessary clean ups, including
2575 * freeing video buffer memory. The driver is responsible for freeing
2576 * the vb2_queue structure itself.
2577 */
2578void vb2_queue_release(struct vb2_queue *q)
2579{
b25748fe 2580 __vb2_cleanup_fileio(q);
e23ccc0a 2581 __vb2_queue_cancel(q);
2d86401c 2582 __vb2_queue_free(q, q->num_buffers);
e23ccc0a
PO
2583}
2584EXPORT_SYMBOL_GPL(vb2_queue_release);
2585
b25748fe
MS
2586/**
2587 * struct vb2_fileio_buf - buffer context used by file io emulator
2588 *
2589 * vb2 provides a compatibility layer and emulator of file io (read and
2590 * write) calls on top of streaming API. This structure is used for
2591 * tracking context related to the buffers.
2592 */
2593struct vb2_fileio_buf {
2594 void *vaddr;
2595 unsigned int size;
2596 unsigned int pos;
2597 unsigned int queued:1;
2598};
2599
2600/**
2601 * struct vb2_fileio_data - queue context used by file io emulator
2602 *
4e5a4d8a
HV
2603 * @cur_index: the index of the buffer currently being read from or
2604 * written to. If equal to q->num_buffers then a new buffer
2605 * must be dequeued.
2606 * @initial_index: in the read() case all buffers are queued up immediately
2607 * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
2608 * buffers. However, in the write() case no buffers are initially
2609 * queued, instead whenever a buffer is full it is queued up by
2610 * __vb2_perform_fileio(). Only once all available buffers have
2611 * been queued up will __vb2_perform_fileio() start to dequeue
2612 * buffers. This means that initially __vb2_perform_fileio()
2613 * needs to know what buffer index to use when it is queuing up
2614 * the buffers for the first time. That initial index is stored
2615 * in this field. Once it is equal to q->num_buffers all
2616 * available buffers have been queued and __vb2_perform_fileio()
2617 * should start the normal dequeue/queue cycle.
2618 *
b25748fe
MS
2619 * vb2 provides a compatibility layer and emulator of file io (read and
2620 * write) calls on top of streaming API. For proper operation it required
2621 * this structure to save the driver state between each call of the read
2622 * or write function.
2623 */
2624struct vb2_fileio_data {
2625 struct v4l2_requestbuffers req;
2626 struct v4l2_buffer b;
2627 struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME];
4e5a4d8a
HV
2628 unsigned int cur_index;
2629 unsigned int initial_index;
b25748fe
MS
2630 unsigned int q_count;
2631 unsigned int dq_count;
2632 unsigned int flags;
2633};
2634
2635/**
2636 * __vb2_init_fileio() - initialize file io emulator
2637 * @q: videobuf2 queue
2638 * @read: mode selector (1 means read, 0 means write)
2639 */
2640static int __vb2_init_fileio(struct vb2_queue *q, int read)
2641{
2642 struct vb2_fileio_data *fileio;
2643 int i, ret;
2644 unsigned int count = 0;
2645
2646 /*
2647 * Sanity check
2648 */
e4d25816
HV
2649 if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
2650 (!read && !(q->io_modes & VB2_WRITE))))
2651 return -EINVAL;
b25748fe
MS
2652
2653 /*
2654 * Check if device supports mapping buffers to kernel virtual space.
2655 */
2656 if (!q->mem_ops->vaddr)
2657 return -EBUSY;
2658
2659 /*
2660 * Check if streaming api has not been already activated.
2661 */
2662 if (q->streaming || q->num_buffers > 0)
2663 return -EBUSY;
2664
2665 /*
2666 * Start with count 1, driver can increase it in queue_setup()
2667 */
2668 count = 1;
2669
2670 dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n",
2671 (read) ? "read" : "write", count, q->io_flags);
2672
2673 fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
2674 if (fileio == NULL)
2675 return -ENOMEM;
2676
2677 fileio->flags = q->io_flags;
2678
2679 /*
2680 * Request buffers and use MMAP type to force driver
2681 * to allocate buffers by itself.
2682 */
2683 fileio->req.count = count;
2684 fileio->req.memory = V4L2_MEMORY_MMAP;
2685 fileio->req.type = q->type;
2686 ret = vb2_reqbufs(q, &fileio->req);
2687 if (ret)
2688 goto err_kfree;
2689
2690 /*
2691 * Check if plane_count is correct
2692 * (multiplane buffers are not supported).
2693 */
2694 if (q->bufs[0]->num_planes != 1) {
b25748fe
MS
2695 ret = -EBUSY;
2696 goto err_reqbufs;
2697 }
2698
2699 /*
2700 * Get kernel address of each buffer.
2701 */
2702 for (i = 0; i < q->num_buffers; i++) {
2703 fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
5dd6946c
WY
2704 if (fileio->bufs[i].vaddr == NULL) {
2705 ret = -EINVAL;
b25748fe 2706 goto err_reqbufs;
5dd6946c 2707 }
b25748fe
MS
2708 fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
2709 }
2710
2711 /*
2712 * Read mode requires pre queuing of all buffers.
2713 */
2714 if (read) {
2715 /*
2716 * Queue all buffers.
2717 */
2718 for (i = 0; i < q->num_buffers; i++) {
2719 struct v4l2_buffer *b = &fileio->b;
2720 memset(b, 0, sizeof(*b));
2721 b->type = q->type;
2722 b->memory = q->memory;
2723 b->index = i;
2724 ret = vb2_qbuf(q, b);
2725 if (ret)
2726 goto err_reqbufs;
2727 fileio->bufs[i].queued = 1;
2728 }
4e5a4d8a
HV
2729 /*
2730 * All buffers have been queued, so mark that by setting
2731 * initial_index to q->num_buffers
2732 */
2733 fileio->initial_index = q->num_buffers;
2734 fileio->cur_index = q->num_buffers;
b25748fe
MS
2735 }
2736
02f142ec
HV
2737 /*
2738 * Start streaming.
2739 */
2740 ret = vb2_streamon(q, q->type);
2741 if (ret)
2742 goto err_reqbufs;
2743
b25748fe
MS
2744 q->fileio = fileio;
2745
2746 return ret;
2747
2748err_reqbufs:
a67e1722 2749 fileio->req.count = 0;
b25748fe
MS
2750 vb2_reqbufs(q, &fileio->req);
2751
2752err_kfree:
2753 kfree(fileio);
2754 return ret;
2755}
2756
2757/**
2758 * __vb2_cleanup_fileio() - free resourced used by file io emulator
2759 * @q: videobuf2 queue
2760 */
2761static int __vb2_cleanup_fileio(struct vb2_queue *q)
2762{
2763 struct vb2_fileio_data *fileio = q->fileio;
2764
2765 if (fileio) {
b2f2f047 2766 vb2_internal_streamoff(q, q->type);
b25748fe 2767 q->fileio = NULL;
b25748fe
MS
2768 fileio->req.count = 0;
2769 vb2_reqbufs(q, &fileio->req);
2770 kfree(fileio);
2771 dprintk(3, "file io emulator closed\n");
2772 }
2773 return 0;
2774}
2775
2776/**
2777 * __vb2_perform_fileio() - perform a single file io (read or write) operation
2778 * @q: videobuf2 queue
2779 * @data: pointed to target userspace buffer
2780 * @count: number of bytes to read or write
2781 * @ppos: file handle position tracking pointer
2782 * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
2783 * @read: access mode selector (1 means read, 0 means write)
2784 */
2785static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
2786 loff_t *ppos, int nonblock, int read)
2787{
2788 struct vb2_fileio_data *fileio;
2789 struct vb2_fileio_buf *buf;
2790 int ret, index;
2791
08b99e26 2792 dprintk(3, "file io: mode %s, offset %ld, count %zd, %sblocking\n",
b25748fe
MS
2793 read ? "read" : "write", (long)*ppos, count,
2794 nonblock ? "non" : "");
2795
2796 if (!data)
2797 return -EINVAL;
2798
2799 /*
2800 * Initialize emulator on first call.
2801 */
2802 if (!q->fileio) {
2803 ret = __vb2_init_fileio(q, read);
2804 dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
2805 if (ret)
2806 return ret;
2807 }
2808 fileio = q->fileio;
2809
b25748fe
MS
2810 /*
2811 * Check if we need to dequeue the buffer.
2812 */
4e5a4d8a 2813 index = fileio->cur_index;
88e26870 2814 if (index >= q->num_buffers) {
b25748fe
MS
2815 /*
2816 * Call vb2_dqbuf to get buffer back.
2817 */
2818 memset(&fileio->b, 0, sizeof(fileio->b));
2819 fileio->b.type = q->type;
2820 fileio->b.memory = q->memory;
b2f2f047 2821 ret = vb2_internal_dqbuf(q, &fileio->b, nonblock);
b25748fe
MS
2822 dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
2823 if (ret)
b2f2f047 2824 return ret;
b25748fe
MS
2825 fileio->dq_count += 1;
2826
4e5a4d8a 2827 fileio->cur_index = index = fileio->b.index;
88e26870
HV
2828 buf = &fileio->bufs[index];
2829
b25748fe
MS
2830 /*
2831 * Get number of bytes filled by the driver
2832 */
88e26870 2833 buf->pos = 0;
b25748fe 2834 buf->queued = 0;
88e26870
HV
2835 buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
2836 : vb2_plane_size(q->bufs[index], 0);
2837 } else {
2838 buf = &fileio->bufs[index];
b25748fe
MS
2839 }
2840
2841 /*
2842 * Limit count on last few bytes of the buffer.
2843 */
2844 if (buf->pos + count > buf->size) {
2845 count = buf->size - buf->pos;
08b99e26 2846 dprintk(5, "reducing read count: %zd\n", count);
b25748fe
MS
2847 }
2848
2849 /*
2850 * Transfer data to userspace.
2851 */
08b99e26 2852 dprintk(3, "file io: copying %zd bytes - buffer %d, offset %u\n",
b25748fe
MS
2853 count, index, buf->pos);
2854 if (read)
2855 ret = copy_to_user(data, buf->vaddr + buf->pos, count);
2856 else
2857 ret = copy_from_user(buf->vaddr + buf->pos, data, count);
2858 if (ret) {
2859 dprintk(3, "file io: error copying data\n");
b2f2f047 2860 return -EFAULT;
b25748fe
MS
2861 }
2862
2863 /*
2864 * Update counters.
2865 */
2866 buf->pos += count;
2867 *ppos += count;
2868
2869 /*
2870 * Queue next buffer if required.
2871 */
2872 if (buf->pos == buf->size ||
2873 (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) {
2874 /*
2875 * Check if this is the last buffer to read.
2876 */
2877 if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&
2878 fileio->dq_count == 1) {
2879 dprintk(3, "file io: read limit reached\n");
b25748fe
MS
2880 return __vb2_cleanup_fileio(q);
2881 }
2882
2883 /*
2884 * Call vb2_qbuf and give buffer to the driver.
2885 */
2886 memset(&fileio->b, 0, sizeof(fileio->b));
2887 fileio->b.type = q->type;
2888 fileio->b.memory = q->memory;
2889 fileio->b.index = index;
2890 fileio->b.bytesused = buf->pos;
b2f2f047 2891 ret = vb2_internal_qbuf(q, &fileio->b);
b25748fe
MS
2892 dprintk(5, "file io: vb2_dbuf result: %d\n", ret);
2893 if (ret)
b2f2f047 2894 return ret;
b25748fe
MS
2895
2896 /*
2897 * Buffer has been queued, update the status
2898 */
2899 buf->pos = 0;
2900 buf->queued = 1;
88e26870 2901 buf->size = vb2_plane_size(q->bufs[index], 0);
b25748fe 2902 fileio->q_count += 1;
4e5a4d8a
HV
2903 /*
2904 * If we are queuing up buffers for the first time, then
2905 * increase initial_index by one.
2906 */
2907 if (fileio->initial_index < q->num_buffers)
2908 fileio->initial_index++;
2909 /*
2910 * The next buffer to use is either a buffer that's going to be
2911 * queued for the first time (initial_index < q->num_buffers)
2912 * or it is equal to q->num_buffers, meaning that the next
2913 * time we need to dequeue a buffer since we've now queued up
2914 * all the 'first time' buffers.
2915 */
2916 fileio->cur_index = fileio->initial_index;
b25748fe
MS
2917 }
2918
2919 /*
2920 * Return proper number of bytes processed.
2921 */
2922 if (ret == 0)
2923 ret = count;
b25748fe
MS
2924 return ret;
2925}
2926
2927size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
2928 loff_t *ppos, int nonblocking)
2929{
2930 return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
2931}
2932EXPORT_SYMBOL_GPL(vb2_read);
2933
819585bc 2934size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
b25748fe
MS
2935 loff_t *ppos, int nonblocking)
2936{
819585bc
RR
2937 return __vb2_perform_fileio(q, (char __user *) data, count,
2938 ppos, nonblocking, 0);
b25748fe
MS
2939}
2940EXPORT_SYMBOL_GPL(vb2_write);
2941
4c1ffcaa
HV
2942
2943/*
2944 * The following functions are not part of the vb2 core API, but are helper
2945 * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
2946 * and struct vb2_ops.
2947 * They contain boilerplate code that most if not all drivers have to do
2948 * and so they simplify the driver code.
2949 */
2950
2951/* The queue is busy if there is a owner and you are not that owner. */
2952static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
2953{
2954 return vdev->queue->owner && vdev->queue->owner != file->private_data;
2955}
2956
2957/* vb2 ioctl helpers */
2958
2959int vb2_ioctl_reqbufs(struct file *file, void *priv,
2960 struct v4l2_requestbuffers *p)
2961{
2962 struct video_device *vdev = video_devdata(file);
2963 int res = __verify_memory_type(vdev->queue, p->memory, p->type);
2964
2965 if (res)
2966 return res;
2967 if (vb2_queue_is_busy(vdev, file))
2968 return -EBUSY;
2969 res = __reqbufs(vdev->queue, p);
2970 /* If count == 0, then the owner has released all buffers and he
2971 is no longer owner of the queue. Otherwise we have a new owner. */
2972 if (res == 0)
2973 vdev->queue->owner = p->count ? file->private_data : NULL;
2974 return res;
2975}
2976EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
2977
2978int vb2_ioctl_create_bufs(struct file *file, void *priv,
2979 struct v4l2_create_buffers *p)
2980{
2981 struct video_device *vdev = video_devdata(file);
2982 int res = __verify_memory_type(vdev->queue, p->memory, p->format.type);
2983
2984 p->index = vdev->queue->num_buffers;
2985 /* If count == 0, then just check if memory and type are valid.
2986 Any -EBUSY result from __verify_memory_type can be mapped to 0. */
2987 if (p->count == 0)
2988 return res != -EBUSY ? res : 0;
2989 if (res)
2990 return res;
2991 if (vb2_queue_is_busy(vdev, file))
2992 return -EBUSY;
2993 res = __create_bufs(vdev->queue, p);
2994 if (res == 0)
2995 vdev->queue->owner = file->private_data;
2996 return res;
2997}
2998EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
2999
3000int vb2_ioctl_prepare_buf(struct file *file, void *priv,
3001 struct v4l2_buffer *p)
3002{
3003 struct video_device *vdev = video_devdata(file);
3004
3005 if (vb2_queue_is_busy(vdev, file))
3006 return -EBUSY;
3007 return vb2_prepare_buf(vdev->queue, p);
3008}
3009EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
3010
3011int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
3012{
3013 struct video_device *vdev = video_devdata(file);
3014
3015 /* No need to call vb2_queue_is_busy(), anyone can query buffers. */
3016 return vb2_querybuf(vdev->queue, p);
3017}
3018EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
3019
3020int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
3021{
3022 struct video_device *vdev = video_devdata(file);
3023
3024 if (vb2_queue_is_busy(vdev, file))
3025 return -EBUSY;
3026 return vb2_qbuf(vdev->queue, p);
3027}
3028EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
3029
3030int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
3031{
3032 struct video_device *vdev = video_devdata(file);
3033
3034 if (vb2_queue_is_busy(vdev, file))
3035 return -EBUSY;
3036 return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
3037}
3038EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
3039
3040int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
3041{
3042 struct video_device *vdev = video_devdata(file);
3043
3044 if (vb2_queue_is_busy(vdev, file))
3045 return -EBUSY;
3046 return vb2_streamon(vdev->queue, i);
3047}
3048EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
3049
3050int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
3051{
3052 struct video_device *vdev = video_devdata(file);
3053
3054 if (vb2_queue_is_busy(vdev, file))
3055 return -EBUSY;
3056 return vb2_streamoff(vdev->queue, i);
3057}
3058EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
3059
83ae7c5a
TS
3060int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
3061{
3062 struct video_device *vdev = video_devdata(file);
3063
3064 if (vb2_queue_is_busy(vdev, file))
3065 return -EBUSY;
3066 return vb2_expbuf(vdev->queue, p);
3067}
3068EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
3069
4c1ffcaa
HV
3070/* v4l2_file_operations helpers */
3071
3072int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
3073{
3074 struct video_device *vdev = video_devdata(file);
8a90f1a6
LP
3075 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3076 int err;
4c1ffcaa 3077
8a90f1a6
LP
3078 if (lock && mutex_lock_interruptible(lock))
3079 return -ERESTARTSYS;
3080 err = vb2_mmap(vdev->queue, vma);
3081 if (lock)
3082 mutex_unlock(lock);
3083 return err;
4c1ffcaa
HV
3084}
3085EXPORT_SYMBOL_GPL(vb2_fop_mmap);
3086
1380f575 3087int _vb2_fop_release(struct file *file, struct mutex *lock)
4c1ffcaa
HV
3088{
3089 struct video_device *vdev = video_devdata(file);
3090
3091 if (file->private_data == vdev->queue->owner) {
1380f575
RR
3092 if (lock)
3093 mutex_lock(lock);
4c1ffcaa
HV
3094 vb2_queue_release(vdev->queue);
3095 vdev->queue->owner = NULL;
1380f575
RR
3096 if (lock)
3097 mutex_unlock(lock);
4c1ffcaa
HV
3098 }
3099 return v4l2_fh_release(file);
3100}
1380f575
RR
3101EXPORT_SYMBOL_GPL(_vb2_fop_release);
3102
3103int vb2_fop_release(struct file *file)
3104{
3105 struct video_device *vdev = video_devdata(file);
3106 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3107
3108 return _vb2_fop_release(file, lock);
3109}
4c1ffcaa
HV
3110EXPORT_SYMBOL_GPL(vb2_fop_release);
3111
819585bc 3112ssize_t vb2_fop_write(struct file *file, const char __user *buf,
4c1ffcaa
HV
3113 size_t count, loff_t *ppos)
3114{
3115 struct video_device *vdev = video_devdata(file);
3116 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
4c1ffcaa
HV
3117 int err = -EBUSY;
3118
cf533735 3119 if (lock && mutex_lock_interruptible(lock))
4c1ffcaa
HV
3120 return -ERESTARTSYS;
3121 if (vb2_queue_is_busy(vdev, file))
3122 goto exit;
3123 err = vb2_write(vdev->queue, buf, count, ppos,
3124 file->f_flags & O_NONBLOCK);
8c82c75c 3125 if (vdev->queue->fileio)
4c1ffcaa
HV
3126 vdev->queue->owner = file->private_data;
3127exit:
cf533735 3128 if (lock)
4c1ffcaa
HV
3129 mutex_unlock(lock);
3130 return err;
3131}
3132EXPORT_SYMBOL_GPL(vb2_fop_write);
3133
3134ssize_t vb2_fop_read(struct file *file, char __user *buf,
3135 size_t count, loff_t *ppos)
3136{
3137 struct video_device *vdev = video_devdata(file);
3138 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
4c1ffcaa
HV
3139 int err = -EBUSY;
3140
cf533735 3141 if (lock && mutex_lock_interruptible(lock))
4c1ffcaa
HV
3142 return -ERESTARTSYS;
3143 if (vb2_queue_is_busy(vdev, file))
3144 goto exit;
3145 err = vb2_read(vdev->queue, buf, count, ppos,
3146 file->f_flags & O_NONBLOCK);
8c82c75c 3147 if (vdev->queue->fileio)
4c1ffcaa
HV
3148 vdev->queue->owner = file->private_data;
3149exit:
cf533735 3150 if (lock)
4c1ffcaa
HV
3151 mutex_unlock(lock);
3152 return err;
3153}
3154EXPORT_SYMBOL_GPL(vb2_fop_read);
3155
3156unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
3157{
3158 struct video_device *vdev = video_devdata(file);
3159 struct vb2_queue *q = vdev->queue;
3160 struct mutex *lock = q->lock ? q->lock : vdev->lock;
3161 unsigned long req_events = poll_requested_events(wait);
3162 unsigned res;
3163 void *fileio;
4c1ffcaa
HV
3164 bool must_lock = false;
3165
3166 /* Try to be smart: only lock if polling might start fileio,
3167 otherwise locking will only introduce unwanted delays. */
3168 if (q->num_buffers == 0 && q->fileio == NULL) {
3169 if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
3170 (req_events & (POLLIN | POLLRDNORM)))
3171 must_lock = true;
3172 else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
3173 (req_events & (POLLOUT | POLLWRNORM)))
3174 must_lock = true;
3175 }
3176
3177 /* If locking is needed, but this helper doesn't know how, then you
3178 shouldn't be using this helper but you should write your own. */
cf533735 3179 WARN_ON(must_lock && !lock);
4c1ffcaa 3180
cf533735 3181 if (must_lock && lock && mutex_lock_interruptible(lock))
4c1ffcaa
HV
3182 return POLLERR;
3183
3184 fileio = q->fileio;
3185
3186 res = vb2_poll(vdev->queue, file, wait);
3187
3188 /* If fileio was started, then we have a new queue owner. */
3189 if (must_lock && !fileio && q->fileio)
3190 q->owner = file->private_data;
cf533735 3191 if (must_lock && lock)
4c1ffcaa
HV
3192 mutex_unlock(lock);
3193 return res;
3194}
3195EXPORT_SYMBOL_GPL(vb2_fop_poll);
3196
3197#ifndef CONFIG_MMU
3198unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
3199 unsigned long len, unsigned long pgoff, unsigned long flags)
3200{
3201 struct video_device *vdev = video_devdata(file);
8a90f1a6
LP
3202 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3203 int ret;
4c1ffcaa 3204
8a90f1a6
LP
3205 if (lock && mutex_lock_interruptible(lock))
3206 return -ERESTARTSYS;
3207 ret = vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
3208 if (lock)
3209 mutex_unlock(lock);
3210 return ret;
4c1ffcaa
HV
3211}
3212EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
3213#endif
3214
3215/* vb2_ops helpers. Only use if vq->lock is non-NULL. */
3216
3217void vb2_ops_wait_prepare(struct vb2_queue *vq)
3218{
3219 mutex_unlock(vq->lock);
3220}
3221EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
3222
3223void vb2_ops_wait_finish(struct vb2_queue *vq)
3224{
3225 mutex_lock(vq->lock);
3226}
3227EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
3228
e23ccc0a 3229MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
95072084 3230MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
e23ccc0a 3231MODULE_LICENSE("GPL");
This page took 0.383176 seconds and 5 git commands to generate.