drm/nouveau: add lockdep annotations
[deliverable/linux.git] / drivers / media / platform / omap3isp / ispvideo.c
1 /*
2 * ispvideo.c
3 *
4 * TI OMAP3 ISP - Generic video node
5 *
6 * Copyright (C) 2009-2010 Nokia Corporation
7 *
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26 #include <asm/cacheflush.h>
27 #include <linux/clk.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/omap-iommu.h>
31 #include <linux/pagemap.h>
32 #include <linux/scatterlist.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/vmalloc.h>
36 #include <media/v4l2-dev.h>
37 #include <media/v4l2-ioctl.h>
38 #include <plat/iommu.h>
39 #include <plat/iovmm.h>
40 #include <plat/omap-pm.h>
41
42 #include "ispvideo.h"
43 #include "isp.h"
44
45
46 /* -----------------------------------------------------------------------------
47 * Helper functions
48 */
49
50 /*
51 * NOTE: When adding new media bus codes, always remember to add
52 * corresponding in-memory formats to the table below!!!
53 */
54 static struct isp_format_info formats[] = {
55 { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
56 V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
57 V4L2_PIX_FMT_GREY, 8, 1, },
58 { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10,
59 V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8,
60 V4L2_PIX_FMT_Y10, 10, 2, },
61 { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10,
62 V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8,
63 V4L2_PIX_FMT_Y12, 12, 2, },
64 { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
65 V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
66 V4L2_PIX_FMT_SBGGR8, 8, 1, },
67 { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
68 V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
69 V4L2_PIX_FMT_SGBRG8, 8, 1, },
70 { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
71 V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
72 V4L2_PIX_FMT_SGRBG8, 8, 1, },
73 { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
74 V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
75 V4L2_PIX_FMT_SRGGB8, 8, 1, },
76 { V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8, V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8,
77 V4L2_MBUS_FMT_SBGGR10_1X10, 0,
78 V4L2_PIX_FMT_SBGGR10DPCM8, 8, 1, },
79 { V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8, V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8,
80 V4L2_MBUS_FMT_SGBRG10_1X10, 0,
81 V4L2_PIX_FMT_SGBRG10DPCM8, 8, 1, },
82 { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
83 V4L2_MBUS_FMT_SGRBG10_1X10, 0,
84 V4L2_PIX_FMT_SGRBG10DPCM8, 8, 1, },
85 { V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8, V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8,
86 V4L2_MBUS_FMT_SRGGB10_1X10, 0,
87 V4L2_PIX_FMT_SRGGB10DPCM8, 8, 1, },
88 { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
89 V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
90 V4L2_PIX_FMT_SBGGR10, 10, 2, },
91 { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10,
92 V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8,
93 V4L2_PIX_FMT_SGBRG10, 10, 2, },
94 { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10,
95 V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8,
96 V4L2_PIX_FMT_SGRBG10, 10, 2, },
97 { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10,
98 V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8,
99 V4L2_PIX_FMT_SRGGB10, 10, 2, },
100 { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10,
101 V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8,
102 V4L2_PIX_FMT_SBGGR12, 12, 2, },
103 { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10,
104 V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8,
105 V4L2_PIX_FMT_SGBRG12, 12, 2, },
106 { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10,
107 V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8,
108 V4L2_PIX_FMT_SGRBG12, 12, 2, },
109 { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10,
110 V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8,
111 V4L2_PIX_FMT_SRGGB12, 12, 2, },
112 { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16,
113 V4L2_MBUS_FMT_UYVY8_1X16, 0,
114 V4L2_PIX_FMT_UYVY, 16, 2, },
115 { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16,
116 V4L2_MBUS_FMT_YUYV8_1X16, 0,
117 V4L2_PIX_FMT_YUYV, 16, 2, },
118 { V4L2_MBUS_FMT_UYVY8_2X8, V4L2_MBUS_FMT_UYVY8_2X8,
119 V4L2_MBUS_FMT_UYVY8_2X8, 0,
120 V4L2_PIX_FMT_UYVY, 8, 2, },
121 { V4L2_MBUS_FMT_YUYV8_2X8, V4L2_MBUS_FMT_YUYV8_2X8,
122 V4L2_MBUS_FMT_YUYV8_2X8, 0,
123 V4L2_PIX_FMT_YUYV, 8, 2, },
124 /* Empty entry to catch the unsupported pixel code (0) used by the CCDC
125 * module and avoid NULL pointer dereferences.
126 */
127 { 0, }
128 };
129
130 const struct isp_format_info *
131 omap3isp_video_format_info(enum v4l2_mbus_pixelcode code)
132 {
133 unsigned int i;
134
135 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
136 if (formats[i].code == code)
137 return &formats[i];
138 }
139
140 return NULL;
141 }
142
143 /*
144 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
145 * @video: ISP video instance
146 * @mbus: v4l2_mbus_framefmt format (input)
147 * @pix: v4l2_pix_format format (output)
148 *
149 * Fill the output pix structure with information from the input mbus format.
150 * The bytesperline and sizeimage fields are computed from the requested bytes
151 * per line value in the pix format and information from the video instance.
152 *
153 * Return the number of padding bytes at end of line.
154 */
155 static unsigned int isp_video_mbus_to_pix(const struct isp_video *video,
156 const struct v4l2_mbus_framefmt *mbus,
157 struct v4l2_pix_format *pix)
158 {
159 unsigned int bpl = pix->bytesperline;
160 unsigned int min_bpl;
161 unsigned int i;
162
163 memset(pix, 0, sizeof(*pix));
164 pix->width = mbus->width;
165 pix->height = mbus->height;
166
167 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
168 if (formats[i].code == mbus->code)
169 break;
170 }
171
172 if (WARN_ON(i == ARRAY_SIZE(formats)))
173 return 0;
174
175 min_bpl = pix->width * formats[i].bpp;
176
177 /* Clamp the requested bytes per line value. If the maximum bytes per
178 * line value is zero, the module doesn't support user configurable line
179 * sizes. Override the requested value with the minimum in that case.
180 */
181 if (video->bpl_max)
182 bpl = clamp(bpl, min_bpl, video->bpl_max);
183 else
184 bpl = min_bpl;
185
186 if (!video->bpl_zero_padding || bpl != min_bpl)
187 bpl = ALIGN(bpl, video->bpl_alignment);
188
189 pix->pixelformat = formats[i].pixelformat;
190 pix->bytesperline = bpl;
191 pix->sizeimage = pix->bytesperline * pix->height;
192 pix->colorspace = mbus->colorspace;
193 pix->field = mbus->field;
194
195 return bpl - min_bpl;
196 }
197
198 static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix,
199 struct v4l2_mbus_framefmt *mbus)
200 {
201 unsigned int i;
202
203 memset(mbus, 0, sizeof(*mbus));
204 mbus->width = pix->width;
205 mbus->height = pix->height;
206
207 /* Skip the last format in the loop so that it will be selected if no
208 * match is found.
209 */
210 for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
211 if (formats[i].pixelformat == pix->pixelformat)
212 break;
213 }
214
215 mbus->code = formats[i].code;
216 mbus->colorspace = pix->colorspace;
217 mbus->field = pix->field;
218 }
219
220 static struct v4l2_subdev *
221 isp_video_remote_subdev(struct isp_video *video, u32 *pad)
222 {
223 struct media_pad *remote;
224
225 remote = media_entity_remote_source(&video->pad);
226
227 if (remote == NULL ||
228 media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
229 return NULL;
230
231 if (pad)
232 *pad = remote->index;
233
234 return media_entity_to_v4l2_subdev(remote->entity);
235 }
236
237 /* Return a pointer to the ISP video instance at the far end of the pipeline. */
238 static int isp_video_get_graph_data(struct isp_video *video,
239 struct isp_pipeline *pipe)
240 {
241 struct media_entity_graph graph;
242 struct media_entity *entity = &video->video.entity;
243 struct media_device *mdev = entity->parent;
244 struct isp_video *far_end = NULL;
245
246 mutex_lock(&mdev->graph_mutex);
247 media_entity_graph_walk_start(&graph, entity);
248
249 while ((entity = media_entity_graph_walk_next(&graph))) {
250 struct isp_video *__video;
251
252 pipe->entities |= 1 << entity->id;
253
254 if (far_end != NULL)
255 continue;
256
257 if (entity == &video->video.entity)
258 continue;
259
260 if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
261 continue;
262
263 __video = to_isp_video(media_entity_to_video_device(entity));
264 if (__video->type != video->type)
265 far_end = __video;
266 }
267
268 mutex_unlock(&mdev->graph_mutex);
269
270 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
271 pipe->input = far_end;
272 pipe->output = video;
273 } else {
274 if (far_end == NULL)
275 return -EPIPE;
276
277 pipe->input = video;
278 pipe->output = far_end;
279 }
280
281 return 0;
282 }
283
284 /*
285 * Validate a pipeline by checking both ends of all links for format
286 * discrepancies.
287 *
288 * Compute the minimum time per frame value as the maximum of time per frame
289 * limits reported by every block in the pipeline.
290 *
291 * Return 0 if all formats match, or -EPIPE if at least one link is found with
292 * different formats on its two ends or if the pipeline doesn't start with a
293 * video source (either a subdev with no input pad, or a non-subdev entity).
294 */
295 static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
296 {
297 struct isp_device *isp = pipe->output->isp;
298 struct media_pad *pad;
299 struct v4l2_subdev *subdev;
300
301 subdev = isp_video_remote_subdev(pipe->output, NULL);
302 if (subdev == NULL)
303 return -EPIPE;
304
305 while (1) {
306 /* Retrieve the sink format */
307 pad = &subdev->entity.pads[0];
308 if (!(pad->flags & MEDIA_PAD_FL_SINK))
309 break;
310
311 /* Update the maximum frame rate */
312 if (subdev == &isp->isp_res.subdev)
313 omap3isp_resizer_max_rate(&isp->isp_res,
314 &pipe->max_rate);
315
316 /* Retrieve the source format. Return an error if no source
317 * entity can be found, and stop checking the pipeline if the
318 * source entity isn't a subdev.
319 */
320 pad = media_entity_remote_source(pad);
321 if (pad == NULL)
322 return -EPIPE;
323
324 if (media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
325 break;
326
327 subdev = media_entity_to_v4l2_subdev(pad->entity);
328 }
329
330 return 0;
331 }
332
333 static int
334 __isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
335 {
336 struct v4l2_subdev_format fmt;
337 struct v4l2_subdev *subdev;
338 u32 pad;
339 int ret;
340
341 subdev = isp_video_remote_subdev(video, &pad);
342 if (subdev == NULL)
343 return -EINVAL;
344
345 mutex_lock(&video->mutex);
346
347 fmt.pad = pad;
348 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
349 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
350 if (ret == -ENOIOCTLCMD)
351 ret = -EINVAL;
352
353 mutex_unlock(&video->mutex);
354
355 if (ret)
356 return ret;
357
358 format->type = video->type;
359 return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
360 }
361
362 static int
363 isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
364 {
365 struct v4l2_format format;
366 int ret;
367
368 memcpy(&format, &vfh->format, sizeof(format));
369 ret = __isp_video_get_format(video, &format);
370 if (ret < 0)
371 return ret;
372
373 if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat ||
374 vfh->format.fmt.pix.height != format.fmt.pix.height ||
375 vfh->format.fmt.pix.width != format.fmt.pix.width ||
376 vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline ||
377 vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage)
378 return -EINVAL;
379
380 return ret;
381 }
382
383 /* -----------------------------------------------------------------------------
384 * IOMMU management
385 */
386
387 #define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
388
389 /*
390 * ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list
391 * @dev: Device pointer specific to the OMAP3 ISP.
392 * @sglist: Pointer to source Scatter gather list to allocate.
393 * @sglen: Number of elements of the scatter-gatter list.
394 *
395 * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if
396 * we ran out of memory.
397 */
398 static dma_addr_t
399 ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
400 {
401 struct sg_table *sgt;
402 u32 da;
403
404 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
405 if (sgt == NULL)
406 return -ENOMEM;
407
408 sgt->sgl = (struct scatterlist *)sglist;
409 sgt->nents = sglen;
410 sgt->orig_nents = sglen;
411
412 da = omap_iommu_vmap(isp->domain, isp->dev, 0, sgt, IOMMU_FLAG);
413 if (IS_ERR_VALUE(da))
414 kfree(sgt);
415
416 return da;
417 }
418
419 /*
420 * ispmmu_vunmap - Unmap a device address from the ISP MMU
421 * @dev: Device pointer specific to the OMAP3 ISP.
422 * @da: Device address generated from a ispmmu_vmap call.
423 */
424 static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
425 {
426 struct sg_table *sgt;
427
428 sgt = omap_iommu_vunmap(isp->domain, isp->dev, (u32)da);
429 kfree(sgt);
430 }
431
432 /* -----------------------------------------------------------------------------
433 * Video queue operations
434 */
435
436 static void isp_video_queue_prepare(struct isp_video_queue *queue,
437 unsigned int *nbuffers, unsigned int *size)
438 {
439 struct isp_video_fh *vfh =
440 container_of(queue, struct isp_video_fh, queue);
441 struct isp_video *video = vfh->video;
442
443 *size = vfh->format.fmt.pix.sizeimage;
444 if (*size == 0)
445 return;
446
447 *nbuffers = min(*nbuffers, video->capture_mem / PAGE_ALIGN(*size));
448 }
449
450 static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
451 {
452 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
453 struct isp_buffer *buffer = to_isp_buffer(buf);
454 struct isp_video *video = vfh->video;
455
456 if (buffer->isp_addr) {
457 ispmmu_vunmap(video->isp, buffer->isp_addr);
458 buffer->isp_addr = 0;
459 }
460 }
461
462 static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
463 {
464 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
465 struct isp_buffer *buffer = to_isp_buffer(buf);
466 struct isp_video *video = vfh->video;
467 unsigned long addr;
468
469 addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen);
470 if (IS_ERR_VALUE(addr))
471 return -EIO;
472
473 if (!IS_ALIGNED(addr, 32)) {
474 dev_dbg(video->isp->dev, "Buffer address must be "
475 "aligned to 32 bytes boundary.\n");
476 ispmmu_vunmap(video->isp, buffer->isp_addr);
477 return -EINVAL;
478 }
479
480 buf->vbuf.bytesused = vfh->format.fmt.pix.sizeimage;
481 buffer->isp_addr = addr;
482 return 0;
483 }
484
485 /*
486 * isp_video_buffer_queue - Add buffer to streaming queue
487 * @buf: Video buffer
488 *
489 * In memory-to-memory mode, start streaming on the pipeline if buffers are
490 * queued on both the input and the output, if the pipeline isn't already busy.
491 * If the pipeline is busy, it will be restarted in the output module interrupt
492 * handler.
493 */
494 static void isp_video_buffer_queue(struct isp_video_buffer *buf)
495 {
496 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
497 struct isp_buffer *buffer = to_isp_buffer(buf);
498 struct isp_video *video = vfh->video;
499 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
500 enum isp_pipeline_state state;
501 unsigned long flags;
502 unsigned int empty;
503 unsigned int start;
504
505 empty = list_empty(&video->dmaqueue);
506 list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue);
507
508 if (empty) {
509 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
510 state = ISP_PIPELINE_QUEUE_OUTPUT;
511 else
512 state = ISP_PIPELINE_QUEUE_INPUT;
513
514 spin_lock_irqsave(&pipe->lock, flags);
515 pipe->state |= state;
516 video->ops->queue(video, buffer);
517 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
518
519 start = isp_pipeline_ready(pipe);
520 if (start)
521 pipe->state |= ISP_PIPELINE_STREAM;
522 spin_unlock_irqrestore(&pipe->lock, flags);
523
524 if (start)
525 omap3isp_pipeline_set_stream(pipe,
526 ISP_PIPELINE_STREAM_SINGLESHOT);
527 }
528 }
529
530 static const struct isp_video_queue_operations isp_video_queue_ops = {
531 .queue_prepare = &isp_video_queue_prepare,
532 .buffer_prepare = &isp_video_buffer_prepare,
533 .buffer_queue = &isp_video_buffer_queue,
534 .buffer_cleanup = &isp_video_buffer_cleanup,
535 };
536
537 /*
538 * omap3isp_video_buffer_next - Complete the current buffer and return the next
539 * @video: ISP video object
540 *
541 * Remove the current video buffer from the DMA queue and fill its timestamp,
542 * field count and state fields before waking up its completion handler.
543 *
544 * For capture video nodes the buffer state is set to ISP_BUF_STATE_DONE if no
545 * error has been flagged in the pipeline, or to ISP_BUF_STATE_ERROR otherwise.
546 * For video output nodes the buffer state is always set to ISP_BUF_STATE_DONE.
547 *
548 * The DMA queue is expected to contain at least one buffer.
549 *
550 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
551 * empty.
552 */
553 struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
554 {
555 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
556 struct isp_video_queue *queue = video->queue;
557 enum isp_pipeline_state state;
558 struct isp_video_buffer *buf;
559 unsigned long flags;
560 struct timespec ts;
561
562 spin_lock_irqsave(&queue->irqlock, flags);
563 if (WARN_ON(list_empty(&video->dmaqueue))) {
564 spin_unlock_irqrestore(&queue->irqlock, flags);
565 return NULL;
566 }
567
568 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
569 irqlist);
570 list_del(&buf->irqlist);
571 spin_unlock_irqrestore(&queue->irqlock, flags);
572
573 ktime_get_ts(&ts);
574 buf->vbuf.timestamp.tv_sec = ts.tv_sec;
575 buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
576
577 /* Do frame number propagation only if this is the output video node.
578 * Frame number either comes from the CSI receivers or it gets
579 * incremented here if H3A is not active.
580 * Note: There is no guarantee that the output buffer will finish
581 * first, so the input number might lag behind by 1 in some cases.
582 */
583 if (video == pipe->output && !pipe->do_propagation)
584 buf->vbuf.sequence = atomic_inc_return(&pipe->frame_number);
585 else
586 buf->vbuf.sequence = atomic_read(&pipe->frame_number);
587
588 /* Report pipeline errors to userspace on the capture device side. */
589 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
590 buf->state = ISP_BUF_STATE_ERROR;
591 pipe->error = false;
592 } else {
593 buf->state = ISP_BUF_STATE_DONE;
594 }
595
596 wake_up(&buf->wait);
597
598 if (list_empty(&video->dmaqueue)) {
599 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
600 state = ISP_PIPELINE_QUEUE_OUTPUT
601 | ISP_PIPELINE_STREAM;
602 else
603 state = ISP_PIPELINE_QUEUE_INPUT
604 | ISP_PIPELINE_STREAM;
605
606 spin_lock_irqsave(&pipe->lock, flags);
607 pipe->state &= ~state;
608 if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS)
609 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
610 spin_unlock_irqrestore(&pipe->lock, flags);
611 return NULL;
612 }
613
614 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
615 spin_lock_irqsave(&pipe->lock, flags);
616 pipe->state &= ~ISP_PIPELINE_STREAM;
617 spin_unlock_irqrestore(&pipe->lock, flags);
618 }
619
620 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
621 irqlist);
622 buf->state = ISP_BUF_STATE_ACTIVE;
623 return to_isp_buffer(buf);
624 }
625
626 /*
627 * omap3isp_video_resume - Perform resume operation on the buffers
628 * @video: ISP video object
629 * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise
630 *
631 * This function is intended to be used on suspend/resume scenario. It
632 * requests video queue layer to discard buffers marked as DONE if it's in
633 * continuous mode and requests ISP modules to queue again the ACTIVE buffer
634 * if there's any.
635 */
636 void omap3isp_video_resume(struct isp_video *video, int continuous)
637 {
638 struct isp_buffer *buf = NULL;
639
640 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
641 omap3isp_video_queue_discard_done(video->queue);
642
643 if (!list_empty(&video->dmaqueue)) {
644 buf = list_first_entry(&video->dmaqueue,
645 struct isp_buffer, buffer.irqlist);
646 video->ops->queue(video, buf);
647 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
648 } else {
649 if (continuous)
650 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
651 }
652 }
653
654 /* -----------------------------------------------------------------------------
655 * V4L2 ioctls
656 */
657
658 static int
659 isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
660 {
661 struct isp_video *video = video_drvdata(file);
662
663 strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
664 strlcpy(cap->card, video->video.name, sizeof(cap->card));
665 strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
666
667 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
668 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
669 else
670 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
671
672 return 0;
673 }
674
675 static int
676 isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
677 {
678 struct isp_video_fh *vfh = to_isp_video_fh(fh);
679 struct isp_video *video = video_drvdata(file);
680
681 if (format->type != video->type)
682 return -EINVAL;
683
684 mutex_lock(&video->mutex);
685 *format = vfh->format;
686 mutex_unlock(&video->mutex);
687
688 return 0;
689 }
690
691 static int
692 isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
693 {
694 struct isp_video_fh *vfh = to_isp_video_fh(fh);
695 struct isp_video *video = video_drvdata(file);
696 struct v4l2_mbus_framefmt fmt;
697
698 if (format->type != video->type)
699 return -EINVAL;
700
701 mutex_lock(&video->mutex);
702
703 /* Fill the bytesperline and sizeimage fields by converting to media bus
704 * format and back to pixel format.
705 */
706 isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
707 isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
708
709 vfh->format = *format;
710
711 mutex_unlock(&video->mutex);
712 return 0;
713 }
714
715 static int
716 isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
717 {
718 struct isp_video *video = video_drvdata(file);
719 struct v4l2_subdev_format fmt;
720 struct v4l2_subdev *subdev;
721 u32 pad;
722 int ret;
723
724 if (format->type != video->type)
725 return -EINVAL;
726
727 subdev = isp_video_remote_subdev(video, &pad);
728 if (subdev == NULL)
729 return -EINVAL;
730
731 isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
732
733 fmt.pad = pad;
734 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
735 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
736 if (ret)
737 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
738
739 isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
740 return 0;
741 }
742
743 static int
744 isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
745 {
746 struct isp_video *video = video_drvdata(file);
747 struct v4l2_subdev *subdev;
748 int ret;
749
750 subdev = isp_video_remote_subdev(video, NULL);
751 if (subdev == NULL)
752 return -EINVAL;
753
754 mutex_lock(&video->mutex);
755 ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
756 mutex_unlock(&video->mutex);
757
758 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
759 }
760
761 static int
762 isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
763 {
764 struct isp_video *video = video_drvdata(file);
765 struct v4l2_subdev_format format;
766 struct v4l2_subdev *subdev;
767 u32 pad;
768 int ret;
769
770 subdev = isp_video_remote_subdev(video, &pad);
771 if (subdev == NULL)
772 return -EINVAL;
773
774 /* Try the get crop operation first and fallback to get format if not
775 * implemented.
776 */
777 ret = v4l2_subdev_call(subdev, video, g_crop, crop);
778 if (ret != -ENOIOCTLCMD)
779 return ret;
780
781 format.pad = pad;
782 format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
783 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
784 if (ret < 0)
785 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
786
787 crop->c.left = 0;
788 crop->c.top = 0;
789 crop->c.width = format.format.width;
790 crop->c.height = format.format.height;
791
792 return 0;
793 }
794
795 static int
796 isp_video_set_crop(struct file *file, void *fh, const struct v4l2_crop *crop)
797 {
798 struct isp_video *video = video_drvdata(file);
799 struct v4l2_subdev *subdev;
800 int ret;
801
802 subdev = isp_video_remote_subdev(video, NULL);
803 if (subdev == NULL)
804 return -EINVAL;
805
806 mutex_lock(&video->mutex);
807 ret = v4l2_subdev_call(subdev, video, s_crop, crop);
808 mutex_unlock(&video->mutex);
809
810 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
811 }
812
813 static int
814 isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
815 {
816 struct isp_video_fh *vfh = to_isp_video_fh(fh);
817 struct isp_video *video = video_drvdata(file);
818
819 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
820 video->type != a->type)
821 return -EINVAL;
822
823 memset(a, 0, sizeof(*a));
824 a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
825 a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
826 a->parm.output.timeperframe = vfh->timeperframe;
827
828 return 0;
829 }
830
831 static int
832 isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
833 {
834 struct isp_video_fh *vfh = to_isp_video_fh(fh);
835 struct isp_video *video = video_drvdata(file);
836
837 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
838 video->type != a->type)
839 return -EINVAL;
840
841 if (a->parm.output.timeperframe.denominator == 0)
842 a->parm.output.timeperframe.denominator = 1;
843
844 vfh->timeperframe = a->parm.output.timeperframe;
845
846 return 0;
847 }
848
849 static int
850 isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
851 {
852 struct isp_video_fh *vfh = to_isp_video_fh(fh);
853
854 return omap3isp_video_queue_reqbufs(&vfh->queue, rb);
855 }
856
857 static int
858 isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
859 {
860 struct isp_video_fh *vfh = to_isp_video_fh(fh);
861
862 return omap3isp_video_queue_querybuf(&vfh->queue, b);
863 }
864
865 static int
866 isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
867 {
868 struct isp_video_fh *vfh = to_isp_video_fh(fh);
869
870 return omap3isp_video_queue_qbuf(&vfh->queue, b);
871 }
872
873 static int
874 isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
875 {
876 struct isp_video_fh *vfh = to_isp_video_fh(fh);
877
878 return omap3isp_video_queue_dqbuf(&vfh->queue, b,
879 file->f_flags & O_NONBLOCK);
880 }
881
882 static int isp_video_check_external_subdevs(struct isp_video *video,
883 struct isp_pipeline *pipe)
884 {
885 struct isp_device *isp = video->isp;
886 struct media_entity *ents[] = {
887 &isp->isp_csi2a.subdev.entity,
888 &isp->isp_csi2c.subdev.entity,
889 &isp->isp_ccp2.subdev.entity,
890 &isp->isp_ccdc.subdev.entity
891 };
892 struct media_pad *source_pad;
893 struct media_entity *source = NULL;
894 struct media_entity *sink;
895 struct v4l2_subdev_format fmt;
896 struct v4l2_ext_controls ctrls;
897 struct v4l2_ext_control ctrl;
898 unsigned int i;
899 int ret = 0;
900
901 for (i = 0; i < ARRAY_SIZE(ents); i++) {
902 /* Is the entity part of the pipeline? */
903 if (!(pipe->entities & (1 << ents[i]->id)))
904 continue;
905
906 /* ISP entities have always sink pad == 0. Find source. */
907 source_pad = media_entity_remote_source(&ents[i]->pads[0]);
908 if (source_pad == NULL)
909 continue;
910
911 source = source_pad->entity;
912 sink = ents[i];
913 break;
914 }
915
916 if (!source) {
917 dev_warn(isp->dev, "can't find source, failing now\n");
918 return ret;
919 }
920
921 if (media_entity_type(source) != MEDIA_ENT_T_V4L2_SUBDEV)
922 return 0;
923
924 pipe->external = media_entity_to_v4l2_subdev(source);
925
926 fmt.pad = source_pad->index;
927 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
928 ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(sink),
929 pad, get_fmt, NULL, &fmt);
930 if (unlikely(ret < 0)) {
931 dev_warn(isp->dev, "get_fmt returned null!\n");
932 return ret;
933 }
934
935 pipe->external_width =
936 omap3isp_video_format_info(fmt.format.code)->width;
937
938 memset(&ctrls, 0, sizeof(ctrls));
939 memset(&ctrl, 0, sizeof(ctrl));
940
941 ctrl.id = V4L2_CID_PIXEL_RATE;
942
943 ctrls.count = 1;
944 ctrls.controls = &ctrl;
945
946 ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &ctrls);
947 if (ret < 0) {
948 dev_warn(isp->dev, "no pixel rate control in subdev %s\n",
949 pipe->external->name);
950 return ret;
951 }
952
953 pipe->external_rate = ctrl.value64;
954
955 if (pipe->entities & (1 << isp->isp_ccdc.subdev.entity.id)) {
956 unsigned int rate = UINT_MAX;
957 /*
958 * Check that maximum allowed CCDC pixel rate isn't
959 * exceeded by the pixel rate.
960 */
961 omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
962 if (pipe->external_rate > rate)
963 return -ENOSPC;
964 }
965
966 return 0;
967 }
968
969 /*
970 * Stream management
971 *
972 * Every ISP pipeline has a single input and a single output. The input can be
973 * either a sensor or a video node. The output is always a video node.
974 *
975 * As every pipeline has an output video node, the ISP video objects at the
976 * pipeline output stores the pipeline state. It tracks the streaming state of
977 * both the input and output, as well as the availability of buffers.
978 *
979 * In sensor-to-memory mode, frames are always available at the pipeline input.
980 * Starting the sensor usually requires I2C transfers and must be done in
981 * interruptible context. The pipeline is started and stopped synchronously
982 * to the stream on/off commands. All modules in the pipeline will get their
983 * subdev set stream handler called. The module at the end of the pipeline must
984 * delay starting the hardware until buffers are available at its output.
985 *
986 * In memory-to-memory mode, starting/stopping the stream requires
987 * synchronization between the input and output. ISP modules can't be stopped
988 * in the middle of a frame, and at least some of the modules seem to become
989 * busy as soon as they're started, even if they don't receive a frame start
990 * event. For that reason frames need to be processed in single-shot mode. The
991 * driver needs to wait until a frame is completely processed and written to
992 * memory before restarting the pipeline for the next frame. Pipelined
993 * processing might be possible but requires more testing.
994 *
995 * Stream start must be delayed until buffers are available at both the input
996 * and output. The pipeline must be started in the videobuf queue callback with
997 * the buffers queue spinlock held. The modules subdev set stream operation must
998 * not sleep.
999 */
1000 static int
1001 isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
1002 {
1003 struct isp_video_fh *vfh = to_isp_video_fh(fh);
1004 struct isp_video *video = video_drvdata(file);
1005 enum isp_pipeline_state state;
1006 struct isp_pipeline *pipe;
1007 unsigned long flags;
1008 int ret;
1009
1010 if (type != video->type)
1011 return -EINVAL;
1012
1013 mutex_lock(&video->stream_lock);
1014
1015 if (video->streaming) {
1016 mutex_unlock(&video->stream_lock);
1017 return -EBUSY;
1018 }
1019
1020 /* Start streaming on the pipeline. No link touching an entity in the
1021 * pipeline can be activated or deactivated once streaming is started.
1022 */
1023 pipe = video->video.entity.pipe
1024 ? to_isp_pipeline(&video->video.entity) : &video->pipe;
1025
1026 pipe->entities = 0;
1027
1028 if (video->isp->pdata->set_constraints)
1029 video->isp->pdata->set_constraints(video->isp, true);
1030 pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
1031 pipe->max_rate = pipe->l3_ick;
1032
1033 ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
1034 if (ret < 0)
1035 goto err_pipeline_start;
1036
1037 /* Verify that the currently configured format matches the output of
1038 * the connected subdev.
1039 */
1040 ret = isp_video_check_format(video, vfh);
1041 if (ret < 0)
1042 goto err_check_format;
1043
1044 video->bpl_padding = ret;
1045 video->bpl_value = vfh->format.fmt.pix.bytesperline;
1046
1047 ret = isp_video_get_graph_data(video, pipe);
1048 if (ret < 0)
1049 goto err_check_format;
1050
1051 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1052 state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT;
1053 else
1054 state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
1055
1056 ret = isp_video_check_external_subdevs(video, pipe);
1057 if (ret < 0)
1058 goto err_check_format;
1059
1060 /* Validate the pipeline and update its state. */
1061 ret = isp_video_validate_pipeline(pipe);
1062 if (ret < 0)
1063 goto err_check_format;
1064
1065 pipe->error = false;
1066
1067 spin_lock_irqsave(&pipe->lock, flags);
1068 pipe->state &= ~ISP_PIPELINE_STREAM;
1069 pipe->state |= state;
1070 spin_unlock_irqrestore(&pipe->lock, flags);
1071
1072 /* Set the maximum time per frame as the value requested by userspace.
1073 * This is a soft limit that can be overridden if the hardware doesn't
1074 * support the request limit.
1075 */
1076 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1077 pipe->max_timeperframe = vfh->timeperframe;
1078
1079 video->queue = &vfh->queue;
1080 INIT_LIST_HEAD(&video->dmaqueue);
1081 atomic_set(&pipe->frame_number, -1);
1082
1083 ret = omap3isp_video_queue_streamon(&vfh->queue);
1084 if (ret < 0)
1085 goto err_check_format;
1086
1087 /* In sensor-to-memory mode, the stream can be started synchronously
1088 * to the stream on command. In memory-to-memory mode, it will be
1089 * started when buffers are queued on both the input and output.
1090 */
1091 if (pipe->input == NULL) {
1092 ret = omap3isp_pipeline_set_stream(pipe,
1093 ISP_PIPELINE_STREAM_CONTINUOUS);
1094 if (ret < 0)
1095 goto err_set_stream;
1096 spin_lock_irqsave(&video->queue->irqlock, flags);
1097 if (list_empty(&video->dmaqueue))
1098 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
1099 spin_unlock_irqrestore(&video->queue->irqlock, flags);
1100 }
1101
1102 video->streaming = 1;
1103
1104 mutex_unlock(&video->stream_lock);
1105 return 0;
1106
1107 err_set_stream:
1108 omap3isp_video_queue_streamoff(&vfh->queue);
1109 err_check_format:
1110 media_entity_pipeline_stop(&video->video.entity);
1111 err_pipeline_start:
1112 if (video->isp->pdata->set_constraints)
1113 video->isp->pdata->set_constraints(video->isp, false);
1114 /* The DMA queue must be emptied here, otherwise CCDC interrupts that
1115 * will get triggered the next time the CCDC is powered up will try to
1116 * access buffers that might have been freed but still present in the
1117 * DMA queue. This can easily get triggered if the above
1118 * omap3isp_pipeline_set_stream() call fails on a system with a
1119 * free-running sensor.
1120 */
1121 INIT_LIST_HEAD(&video->dmaqueue);
1122 video->queue = NULL;
1123
1124 mutex_unlock(&video->stream_lock);
1125 return ret;
1126 }
1127
1128 static int
1129 isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
1130 {
1131 struct isp_video_fh *vfh = to_isp_video_fh(fh);
1132 struct isp_video *video = video_drvdata(file);
1133 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
1134 enum isp_pipeline_state state;
1135 unsigned int streaming;
1136 unsigned long flags;
1137
1138 if (type != video->type)
1139 return -EINVAL;
1140
1141 mutex_lock(&video->stream_lock);
1142
1143 /* Make sure we're not streaming yet. */
1144 mutex_lock(&vfh->queue.lock);
1145 streaming = vfh->queue.streaming;
1146 mutex_unlock(&vfh->queue.lock);
1147
1148 if (!streaming)
1149 goto done;
1150
1151 /* Update the pipeline state. */
1152 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1153 state = ISP_PIPELINE_STREAM_OUTPUT
1154 | ISP_PIPELINE_QUEUE_OUTPUT;
1155 else
1156 state = ISP_PIPELINE_STREAM_INPUT
1157 | ISP_PIPELINE_QUEUE_INPUT;
1158
1159 spin_lock_irqsave(&pipe->lock, flags);
1160 pipe->state &= ~state;
1161 spin_unlock_irqrestore(&pipe->lock, flags);
1162
1163 /* Stop the stream. */
1164 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
1165 omap3isp_video_queue_streamoff(&vfh->queue);
1166 video->queue = NULL;
1167 video->streaming = 0;
1168
1169 if (video->isp->pdata->set_constraints)
1170 video->isp->pdata->set_constraints(video->isp, false);
1171 media_entity_pipeline_stop(&video->video.entity);
1172
1173 done:
1174 mutex_unlock(&video->stream_lock);
1175 return 0;
1176 }
1177
1178 static int
1179 isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1180 {
1181 if (input->index > 0)
1182 return -EINVAL;
1183
1184 strlcpy(input->name, "camera", sizeof(input->name));
1185 input->type = V4L2_INPUT_TYPE_CAMERA;
1186
1187 return 0;
1188 }
1189
1190 static int
1191 isp_video_g_input(struct file *file, void *fh, unsigned int *input)
1192 {
1193 *input = 0;
1194
1195 return 0;
1196 }
1197
1198 static int
1199 isp_video_s_input(struct file *file, void *fh, unsigned int input)
1200 {
1201 return input == 0 ? 0 : -EINVAL;
1202 }
1203
1204 static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
1205 .vidioc_querycap = isp_video_querycap,
1206 .vidioc_g_fmt_vid_cap = isp_video_get_format,
1207 .vidioc_s_fmt_vid_cap = isp_video_set_format,
1208 .vidioc_try_fmt_vid_cap = isp_video_try_format,
1209 .vidioc_g_fmt_vid_out = isp_video_get_format,
1210 .vidioc_s_fmt_vid_out = isp_video_set_format,
1211 .vidioc_try_fmt_vid_out = isp_video_try_format,
1212 .vidioc_cropcap = isp_video_cropcap,
1213 .vidioc_g_crop = isp_video_get_crop,
1214 .vidioc_s_crop = isp_video_set_crop,
1215 .vidioc_g_parm = isp_video_get_param,
1216 .vidioc_s_parm = isp_video_set_param,
1217 .vidioc_reqbufs = isp_video_reqbufs,
1218 .vidioc_querybuf = isp_video_querybuf,
1219 .vidioc_qbuf = isp_video_qbuf,
1220 .vidioc_dqbuf = isp_video_dqbuf,
1221 .vidioc_streamon = isp_video_streamon,
1222 .vidioc_streamoff = isp_video_streamoff,
1223 .vidioc_enum_input = isp_video_enum_input,
1224 .vidioc_g_input = isp_video_g_input,
1225 .vidioc_s_input = isp_video_s_input,
1226 };
1227
1228 /* -----------------------------------------------------------------------------
1229 * V4L2 file operations
1230 */
1231
1232 static int isp_video_open(struct file *file)
1233 {
1234 struct isp_video *video = video_drvdata(file);
1235 struct isp_video_fh *handle;
1236 int ret = 0;
1237
1238 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1239 if (handle == NULL)
1240 return -ENOMEM;
1241
1242 v4l2_fh_init(&handle->vfh, &video->video);
1243 v4l2_fh_add(&handle->vfh);
1244
1245 /* If this is the first user, initialise the pipeline. */
1246 if (omap3isp_get(video->isp) == NULL) {
1247 ret = -EBUSY;
1248 goto done;
1249 }
1250
1251 ret = omap3isp_pipeline_pm_use(&video->video.entity, 1);
1252 if (ret < 0) {
1253 omap3isp_put(video->isp);
1254 goto done;
1255 }
1256
1257 omap3isp_video_queue_init(&handle->queue, video->type,
1258 &isp_video_queue_ops, video->isp->dev,
1259 sizeof(struct isp_buffer));
1260
1261 memset(&handle->format, 0, sizeof(handle->format));
1262 handle->format.type = video->type;
1263 handle->timeperframe.denominator = 1;
1264
1265 handle->video = video;
1266 file->private_data = &handle->vfh;
1267
1268 done:
1269 if (ret < 0) {
1270 v4l2_fh_del(&handle->vfh);
1271 kfree(handle);
1272 }
1273
1274 return ret;
1275 }
1276
1277 static int isp_video_release(struct file *file)
1278 {
1279 struct isp_video *video = video_drvdata(file);
1280 struct v4l2_fh *vfh = file->private_data;
1281 struct isp_video_fh *handle = to_isp_video_fh(vfh);
1282
1283 /* Disable streaming and free the buffers queue resources. */
1284 isp_video_streamoff(file, vfh, video->type);
1285
1286 mutex_lock(&handle->queue.lock);
1287 omap3isp_video_queue_cleanup(&handle->queue);
1288 mutex_unlock(&handle->queue.lock);
1289
1290 omap3isp_pipeline_pm_use(&video->video.entity, 0);
1291
1292 /* Release the file handle. */
1293 v4l2_fh_del(vfh);
1294 kfree(handle);
1295 file->private_data = NULL;
1296
1297 omap3isp_put(video->isp);
1298
1299 return 0;
1300 }
1301
1302 static unsigned int isp_video_poll(struct file *file, poll_table *wait)
1303 {
1304 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1305 struct isp_video_queue *queue = &vfh->queue;
1306
1307 return omap3isp_video_queue_poll(queue, file, wait);
1308 }
1309
1310 static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
1311 {
1312 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1313
1314 return omap3isp_video_queue_mmap(&vfh->queue, vma);
1315 }
1316
1317 static struct v4l2_file_operations isp_video_fops = {
1318 .owner = THIS_MODULE,
1319 .unlocked_ioctl = video_ioctl2,
1320 .open = isp_video_open,
1321 .release = isp_video_release,
1322 .poll = isp_video_poll,
1323 .mmap = isp_video_mmap,
1324 };
1325
1326 /* -----------------------------------------------------------------------------
1327 * ISP video core
1328 */
1329
1330 static const struct isp_video_operations isp_video_dummy_ops = {
1331 };
1332
1333 int omap3isp_video_init(struct isp_video *video, const char *name)
1334 {
1335 const char *direction;
1336 int ret;
1337
1338 switch (video->type) {
1339 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1340 direction = "output";
1341 video->pad.flags = MEDIA_PAD_FL_SINK;
1342 break;
1343 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1344 direction = "input";
1345 video->pad.flags = MEDIA_PAD_FL_SOURCE;
1346 video->video.vfl_dir = VFL_DIR_TX;
1347 break;
1348
1349 default:
1350 return -EINVAL;
1351 }
1352
1353 ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
1354 if (ret < 0)
1355 return ret;
1356
1357 mutex_init(&video->mutex);
1358 atomic_set(&video->active, 0);
1359
1360 spin_lock_init(&video->pipe.lock);
1361 mutex_init(&video->stream_lock);
1362
1363 /* Initialize the video device. */
1364 if (video->ops == NULL)
1365 video->ops = &isp_video_dummy_ops;
1366
1367 video->video.fops = &isp_video_fops;
1368 snprintf(video->video.name, sizeof(video->video.name),
1369 "OMAP3 ISP %s %s", name, direction);
1370 video->video.vfl_type = VFL_TYPE_GRABBER;
1371 video->video.release = video_device_release_empty;
1372 video->video.ioctl_ops = &isp_video_ioctl_ops;
1373 video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED;
1374
1375 video_set_drvdata(&video->video, video);
1376
1377 return 0;
1378 }
1379
1380 void omap3isp_video_cleanup(struct isp_video *video)
1381 {
1382 media_entity_cleanup(&video->video.entity);
1383 mutex_destroy(&video->stream_lock);
1384 mutex_destroy(&video->mutex);
1385 }
1386
1387 int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
1388 {
1389 int ret;
1390
1391 video->video.v4l2_dev = vdev;
1392
1393 ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
1394 if (ret < 0)
1395 dev_err(video->isp->dev,
1396 "%s: could not register video device (%d)\n",
1397 __func__, ret);
1398
1399 return ret;
1400 }
1401
1402 void omap3isp_video_unregister(struct isp_video *video)
1403 {
1404 if (video_is_registered(&video->video))
1405 video_unregister_device(&video->video);
1406 }
This page took 0.060299 seconds and 5 git commands to generate.