ARM: OMAP2+: Move iommu/iovmm headers to platform_data
[deliverable/linux.git] / drivers / media / platform / omap3isp / ispvideo.c
1 /*
2 * ispvideo.c
3 *
4 * TI OMAP3 ISP - Generic video node
5 *
6 * Copyright (C) 2009-2010 Nokia Corporation
7 *
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26 #include <asm/cacheflush.h>
27 #include <linux/clk.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/omap-iommu.h>
31 #include <linux/pagemap.h>
32 #include <linux/scatterlist.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/vmalloc.h>
36 #include <media/v4l2-dev.h>
37 #include <media/v4l2-ioctl.h>
38 #include <plat/omap-pm.h>
39
40 #include "ispvideo.h"
41 #include "isp.h"
42
43
44 /* -----------------------------------------------------------------------------
45 * Helper functions
46 */
47
48 /*
49 * NOTE: When adding new media bus codes, always remember to add
50 * corresponding in-memory formats to the table below!!!
51 */
52 static struct isp_format_info formats[] = {
53 { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
54 V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
55 V4L2_PIX_FMT_GREY, 8, 1, },
56 { V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y10_1X10,
57 V4L2_MBUS_FMT_Y10_1X10, V4L2_MBUS_FMT_Y8_1X8,
58 V4L2_PIX_FMT_Y10, 10, 2, },
59 { V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y10_1X10,
60 V4L2_MBUS_FMT_Y12_1X12, V4L2_MBUS_FMT_Y8_1X8,
61 V4L2_PIX_FMT_Y12, 12, 2, },
62 { V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
63 V4L2_MBUS_FMT_SBGGR8_1X8, V4L2_MBUS_FMT_SBGGR8_1X8,
64 V4L2_PIX_FMT_SBGGR8, 8, 1, },
65 { V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
66 V4L2_MBUS_FMT_SGBRG8_1X8, V4L2_MBUS_FMT_SGBRG8_1X8,
67 V4L2_PIX_FMT_SGBRG8, 8, 1, },
68 { V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
69 V4L2_MBUS_FMT_SGRBG8_1X8, V4L2_MBUS_FMT_SGRBG8_1X8,
70 V4L2_PIX_FMT_SGRBG8, 8, 1, },
71 { V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
72 V4L2_MBUS_FMT_SRGGB8_1X8, V4L2_MBUS_FMT_SRGGB8_1X8,
73 V4L2_PIX_FMT_SRGGB8, 8, 1, },
74 { V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8, V4L2_MBUS_FMT_SBGGR10_DPCM8_1X8,
75 V4L2_MBUS_FMT_SBGGR10_1X10, 0,
76 V4L2_PIX_FMT_SBGGR10DPCM8, 8, 1, },
77 { V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8, V4L2_MBUS_FMT_SGBRG10_DPCM8_1X8,
78 V4L2_MBUS_FMT_SGBRG10_1X10, 0,
79 V4L2_PIX_FMT_SGBRG10DPCM8, 8, 1, },
80 { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
81 V4L2_MBUS_FMT_SGRBG10_1X10, 0,
82 V4L2_PIX_FMT_SGRBG10DPCM8, 8, 1, },
83 { V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8, V4L2_MBUS_FMT_SRGGB10_DPCM8_1X8,
84 V4L2_MBUS_FMT_SRGGB10_1X10, 0,
85 V4L2_PIX_FMT_SRGGB10DPCM8, 8, 1, },
86 { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
87 V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR8_1X8,
88 V4L2_PIX_FMT_SBGGR10, 10, 2, },
89 { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10,
90 V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG8_1X8,
91 V4L2_PIX_FMT_SGBRG10, 10, 2, },
92 { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10,
93 V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG8_1X8,
94 V4L2_PIX_FMT_SGRBG10, 10, 2, },
95 { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10,
96 V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB8_1X8,
97 V4L2_PIX_FMT_SRGGB10, 10, 2, },
98 { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10,
99 V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR8_1X8,
100 V4L2_PIX_FMT_SBGGR12, 12, 2, },
101 { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10,
102 V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG8_1X8,
103 V4L2_PIX_FMT_SGBRG12, 12, 2, },
104 { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10,
105 V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG8_1X8,
106 V4L2_PIX_FMT_SGRBG12, 12, 2, },
107 { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10,
108 V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB8_1X8,
109 V4L2_PIX_FMT_SRGGB12, 12, 2, },
110 { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16,
111 V4L2_MBUS_FMT_UYVY8_1X16, 0,
112 V4L2_PIX_FMT_UYVY, 16, 2, },
113 { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16,
114 V4L2_MBUS_FMT_YUYV8_1X16, 0,
115 V4L2_PIX_FMT_YUYV, 16, 2, },
116 { V4L2_MBUS_FMT_UYVY8_2X8, V4L2_MBUS_FMT_UYVY8_2X8,
117 V4L2_MBUS_FMT_UYVY8_2X8, 0,
118 V4L2_PIX_FMT_UYVY, 8, 2, },
119 { V4L2_MBUS_FMT_YUYV8_2X8, V4L2_MBUS_FMT_YUYV8_2X8,
120 V4L2_MBUS_FMT_YUYV8_2X8, 0,
121 V4L2_PIX_FMT_YUYV, 8, 2, },
122 /* Empty entry to catch the unsupported pixel code (0) used by the CCDC
123 * module and avoid NULL pointer dereferences.
124 */
125 { 0, }
126 };
127
128 const struct isp_format_info *
129 omap3isp_video_format_info(enum v4l2_mbus_pixelcode code)
130 {
131 unsigned int i;
132
133 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
134 if (formats[i].code == code)
135 return &formats[i];
136 }
137
138 return NULL;
139 }
140
141 /*
142 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
143 * @video: ISP video instance
144 * @mbus: v4l2_mbus_framefmt format (input)
145 * @pix: v4l2_pix_format format (output)
146 *
147 * Fill the output pix structure with information from the input mbus format.
148 * The bytesperline and sizeimage fields are computed from the requested bytes
149 * per line value in the pix format and information from the video instance.
150 *
151 * Return the number of padding bytes at end of line.
152 */
153 static unsigned int isp_video_mbus_to_pix(const struct isp_video *video,
154 const struct v4l2_mbus_framefmt *mbus,
155 struct v4l2_pix_format *pix)
156 {
157 unsigned int bpl = pix->bytesperline;
158 unsigned int min_bpl;
159 unsigned int i;
160
161 memset(pix, 0, sizeof(*pix));
162 pix->width = mbus->width;
163 pix->height = mbus->height;
164
165 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
166 if (formats[i].code == mbus->code)
167 break;
168 }
169
170 if (WARN_ON(i == ARRAY_SIZE(formats)))
171 return 0;
172
173 min_bpl = pix->width * formats[i].bpp;
174
175 /* Clamp the requested bytes per line value. If the maximum bytes per
176 * line value is zero, the module doesn't support user configurable line
177 * sizes. Override the requested value with the minimum in that case.
178 */
179 if (video->bpl_max)
180 bpl = clamp(bpl, min_bpl, video->bpl_max);
181 else
182 bpl = min_bpl;
183
184 if (!video->bpl_zero_padding || bpl != min_bpl)
185 bpl = ALIGN(bpl, video->bpl_alignment);
186
187 pix->pixelformat = formats[i].pixelformat;
188 pix->bytesperline = bpl;
189 pix->sizeimage = pix->bytesperline * pix->height;
190 pix->colorspace = mbus->colorspace;
191 pix->field = mbus->field;
192
193 return bpl - min_bpl;
194 }
195
196 static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix,
197 struct v4l2_mbus_framefmt *mbus)
198 {
199 unsigned int i;
200
201 memset(mbus, 0, sizeof(*mbus));
202 mbus->width = pix->width;
203 mbus->height = pix->height;
204
205 /* Skip the last format in the loop so that it will be selected if no
206 * match is found.
207 */
208 for (i = 0; i < ARRAY_SIZE(formats) - 1; ++i) {
209 if (formats[i].pixelformat == pix->pixelformat)
210 break;
211 }
212
213 mbus->code = formats[i].code;
214 mbus->colorspace = pix->colorspace;
215 mbus->field = pix->field;
216 }
217
218 static struct v4l2_subdev *
219 isp_video_remote_subdev(struct isp_video *video, u32 *pad)
220 {
221 struct media_pad *remote;
222
223 remote = media_entity_remote_source(&video->pad);
224
225 if (remote == NULL ||
226 media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
227 return NULL;
228
229 if (pad)
230 *pad = remote->index;
231
232 return media_entity_to_v4l2_subdev(remote->entity);
233 }
234
235 /* Return a pointer to the ISP video instance at the far end of the pipeline. */
236 static int isp_video_get_graph_data(struct isp_video *video,
237 struct isp_pipeline *pipe)
238 {
239 struct media_entity_graph graph;
240 struct media_entity *entity = &video->video.entity;
241 struct media_device *mdev = entity->parent;
242 struct isp_video *far_end = NULL;
243
244 mutex_lock(&mdev->graph_mutex);
245 media_entity_graph_walk_start(&graph, entity);
246
247 while ((entity = media_entity_graph_walk_next(&graph))) {
248 struct isp_video *__video;
249
250 pipe->entities |= 1 << entity->id;
251
252 if (far_end != NULL)
253 continue;
254
255 if (entity == &video->video.entity)
256 continue;
257
258 if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
259 continue;
260
261 __video = to_isp_video(media_entity_to_video_device(entity));
262 if (__video->type != video->type)
263 far_end = __video;
264 }
265
266 mutex_unlock(&mdev->graph_mutex);
267
268 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
269 pipe->input = far_end;
270 pipe->output = video;
271 } else {
272 if (far_end == NULL)
273 return -EPIPE;
274
275 pipe->input = video;
276 pipe->output = far_end;
277 }
278
279 return 0;
280 }
281
282 /*
283 * Validate a pipeline by checking both ends of all links for format
284 * discrepancies.
285 *
286 * Compute the minimum time per frame value as the maximum of time per frame
287 * limits reported by every block in the pipeline.
288 *
289 * Return 0 if all formats match, or -EPIPE if at least one link is found with
290 * different formats on its two ends or if the pipeline doesn't start with a
291 * video source (either a subdev with no input pad, or a non-subdev entity).
292 */
293 static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
294 {
295 struct isp_device *isp = pipe->output->isp;
296 struct media_pad *pad;
297 struct v4l2_subdev *subdev;
298
299 subdev = isp_video_remote_subdev(pipe->output, NULL);
300 if (subdev == NULL)
301 return -EPIPE;
302
303 while (1) {
304 /* Retrieve the sink format */
305 pad = &subdev->entity.pads[0];
306 if (!(pad->flags & MEDIA_PAD_FL_SINK))
307 break;
308
309 /* Update the maximum frame rate */
310 if (subdev == &isp->isp_res.subdev)
311 omap3isp_resizer_max_rate(&isp->isp_res,
312 &pipe->max_rate);
313
314 /* Retrieve the source format. Return an error if no source
315 * entity can be found, and stop checking the pipeline if the
316 * source entity isn't a subdev.
317 */
318 pad = media_entity_remote_source(pad);
319 if (pad == NULL)
320 return -EPIPE;
321
322 if (media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
323 break;
324
325 subdev = media_entity_to_v4l2_subdev(pad->entity);
326 }
327
328 return 0;
329 }
330
331 static int
332 __isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
333 {
334 struct v4l2_subdev_format fmt;
335 struct v4l2_subdev *subdev;
336 u32 pad;
337 int ret;
338
339 subdev = isp_video_remote_subdev(video, &pad);
340 if (subdev == NULL)
341 return -EINVAL;
342
343 mutex_lock(&video->mutex);
344
345 fmt.pad = pad;
346 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
347 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
348 if (ret == -ENOIOCTLCMD)
349 ret = -EINVAL;
350
351 mutex_unlock(&video->mutex);
352
353 if (ret)
354 return ret;
355
356 format->type = video->type;
357 return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
358 }
359
360 static int
361 isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
362 {
363 struct v4l2_format format;
364 int ret;
365
366 memcpy(&format, &vfh->format, sizeof(format));
367 ret = __isp_video_get_format(video, &format);
368 if (ret < 0)
369 return ret;
370
371 if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat ||
372 vfh->format.fmt.pix.height != format.fmt.pix.height ||
373 vfh->format.fmt.pix.width != format.fmt.pix.width ||
374 vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline ||
375 vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage)
376 return -EINVAL;
377
378 return ret;
379 }
380
381 /* -----------------------------------------------------------------------------
382 * IOMMU management
383 */
384
385 #define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
386
387 /*
388 * ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list
389 * @dev: Device pointer specific to the OMAP3 ISP.
390 * @sglist: Pointer to source Scatter gather list to allocate.
391 * @sglen: Number of elements of the scatter-gatter list.
392 *
393 * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if
394 * we ran out of memory.
395 */
396 static dma_addr_t
397 ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
398 {
399 struct sg_table *sgt;
400 u32 da;
401
402 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
403 if (sgt == NULL)
404 return -ENOMEM;
405
406 sgt->sgl = (struct scatterlist *)sglist;
407 sgt->nents = sglen;
408 sgt->orig_nents = sglen;
409
410 da = omap_iommu_vmap(isp->domain, isp->dev, 0, sgt, IOMMU_FLAG);
411 if (IS_ERR_VALUE(da))
412 kfree(sgt);
413
414 return da;
415 }
416
417 /*
418 * ispmmu_vunmap - Unmap a device address from the ISP MMU
419 * @dev: Device pointer specific to the OMAP3 ISP.
420 * @da: Device address generated from a ispmmu_vmap call.
421 */
422 static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
423 {
424 struct sg_table *sgt;
425
426 sgt = omap_iommu_vunmap(isp->domain, isp->dev, (u32)da);
427 kfree(sgt);
428 }
429
430 /* -----------------------------------------------------------------------------
431 * Video queue operations
432 */
433
434 static void isp_video_queue_prepare(struct isp_video_queue *queue,
435 unsigned int *nbuffers, unsigned int *size)
436 {
437 struct isp_video_fh *vfh =
438 container_of(queue, struct isp_video_fh, queue);
439 struct isp_video *video = vfh->video;
440
441 *size = vfh->format.fmt.pix.sizeimage;
442 if (*size == 0)
443 return;
444
445 *nbuffers = min(*nbuffers, video->capture_mem / PAGE_ALIGN(*size));
446 }
447
448 static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
449 {
450 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
451 struct isp_buffer *buffer = to_isp_buffer(buf);
452 struct isp_video *video = vfh->video;
453
454 if (buffer->isp_addr) {
455 ispmmu_vunmap(video->isp, buffer->isp_addr);
456 buffer->isp_addr = 0;
457 }
458 }
459
460 static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
461 {
462 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
463 struct isp_buffer *buffer = to_isp_buffer(buf);
464 struct isp_video *video = vfh->video;
465 unsigned long addr;
466
467 addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen);
468 if (IS_ERR_VALUE(addr))
469 return -EIO;
470
471 if (!IS_ALIGNED(addr, 32)) {
472 dev_dbg(video->isp->dev, "Buffer address must be "
473 "aligned to 32 bytes boundary.\n");
474 ispmmu_vunmap(video->isp, buffer->isp_addr);
475 return -EINVAL;
476 }
477
478 buf->vbuf.bytesused = vfh->format.fmt.pix.sizeimage;
479 buffer->isp_addr = addr;
480 return 0;
481 }
482
483 /*
484 * isp_video_buffer_queue - Add buffer to streaming queue
485 * @buf: Video buffer
486 *
487 * In memory-to-memory mode, start streaming on the pipeline if buffers are
488 * queued on both the input and the output, if the pipeline isn't already busy.
489 * If the pipeline is busy, it will be restarted in the output module interrupt
490 * handler.
491 */
492 static void isp_video_buffer_queue(struct isp_video_buffer *buf)
493 {
494 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
495 struct isp_buffer *buffer = to_isp_buffer(buf);
496 struct isp_video *video = vfh->video;
497 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
498 enum isp_pipeline_state state;
499 unsigned long flags;
500 unsigned int empty;
501 unsigned int start;
502
503 empty = list_empty(&video->dmaqueue);
504 list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue);
505
506 if (empty) {
507 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
508 state = ISP_PIPELINE_QUEUE_OUTPUT;
509 else
510 state = ISP_PIPELINE_QUEUE_INPUT;
511
512 spin_lock_irqsave(&pipe->lock, flags);
513 pipe->state |= state;
514 video->ops->queue(video, buffer);
515 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
516
517 start = isp_pipeline_ready(pipe);
518 if (start)
519 pipe->state |= ISP_PIPELINE_STREAM;
520 spin_unlock_irqrestore(&pipe->lock, flags);
521
522 if (start)
523 omap3isp_pipeline_set_stream(pipe,
524 ISP_PIPELINE_STREAM_SINGLESHOT);
525 }
526 }
527
528 static const struct isp_video_queue_operations isp_video_queue_ops = {
529 .queue_prepare = &isp_video_queue_prepare,
530 .buffer_prepare = &isp_video_buffer_prepare,
531 .buffer_queue = &isp_video_buffer_queue,
532 .buffer_cleanup = &isp_video_buffer_cleanup,
533 };
534
535 /*
536 * omap3isp_video_buffer_next - Complete the current buffer and return the next
537 * @video: ISP video object
538 *
539 * Remove the current video buffer from the DMA queue and fill its timestamp,
540 * field count and state fields before waking up its completion handler.
541 *
542 * For capture video nodes the buffer state is set to ISP_BUF_STATE_DONE if no
543 * error has been flagged in the pipeline, or to ISP_BUF_STATE_ERROR otherwise.
544 * For video output nodes the buffer state is always set to ISP_BUF_STATE_DONE.
545 *
546 * The DMA queue is expected to contain at least one buffer.
547 *
548 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
549 * empty.
550 */
551 struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
552 {
553 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
554 struct isp_video_queue *queue = video->queue;
555 enum isp_pipeline_state state;
556 struct isp_video_buffer *buf;
557 unsigned long flags;
558 struct timespec ts;
559
560 spin_lock_irqsave(&queue->irqlock, flags);
561 if (WARN_ON(list_empty(&video->dmaqueue))) {
562 spin_unlock_irqrestore(&queue->irqlock, flags);
563 return NULL;
564 }
565
566 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
567 irqlist);
568 list_del(&buf->irqlist);
569 spin_unlock_irqrestore(&queue->irqlock, flags);
570
571 ktime_get_ts(&ts);
572 buf->vbuf.timestamp.tv_sec = ts.tv_sec;
573 buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
574
575 /* Do frame number propagation only if this is the output video node.
576 * Frame number either comes from the CSI receivers or it gets
577 * incremented here if H3A is not active.
578 * Note: There is no guarantee that the output buffer will finish
579 * first, so the input number might lag behind by 1 in some cases.
580 */
581 if (video == pipe->output && !pipe->do_propagation)
582 buf->vbuf.sequence = atomic_inc_return(&pipe->frame_number);
583 else
584 buf->vbuf.sequence = atomic_read(&pipe->frame_number);
585
586 /* Report pipeline errors to userspace on the capture device side. */
587 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
588 buf->state = ISP_BUF_STATE_ERROR;
589 pipe->error = false;
590 } else {
591 buf->state = ISP_BUF_STATE_DONE;
592 }
593
594 wake_up(&buf->wait);
595
596 if (list_empty(&video->dmaqueue)) {
597 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
598 state = ISP_PIPELINE_QUEUE_OUTPUT
599 | ISP_PIPELINE_STREAM;
600 else
601 state = ISP_PIPELINE_QUEUE_INPUT
602 | ISP_PIPELINE_STREAM;
603
604 spin_lock_irqsave(&pipe->lock, flags);
605 pipe->state &= ~state;
606 if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS)
607 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
608 spin_unlock_irqrestore(&pipe->lock, flags);
609 return NULL;
610 }
611
612 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
613 spin_lock_irqsave(&pipe->lock, flags);
614 pipe->state &= ~ISP_PIPELINE_STREAM;
615 spin_unlock_irqrestore(&pipe->lock, flags);
616 }
617
618 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
619 irqlist);
620 buf->state = ISP_BUF_STATE_ACTIVE;
621 return to_isp_buffer(buf);
622 }
623
624 /*
625 * omap3isp_video_resume - Perform resume operation on the buffers
626 * @video: ISP video object
627 * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise
628 *
629 * This function is intended to be used on suspend/resume scenario. It
630 * requests video queue layer to discard buffers marked as DONE if it's in
631 * continuous mode and requests ISP modules to queue again the ACTIVE buffer
632 * if there's any.
633 */
634 void omap3isp_video_resume(struct isp_video *video, int continuous)
635 {
636 struct isp_buffer *buf = NULL;
637
638 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
639 omap3isp_video_queue_discard_done(video->queue);
640
641 if (!list_empty(&video->dmaqueue)) {
642 buf = list_first_entry(&video->dmaqueue,
643 struct isp_buffer, buffer.irqlist);
644 video->ops->queue(video, buf);
645 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
646 } else {
647 if (continuous)
648 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
649 }
650 }
651
652 /* -----------------------------------------------------------------------------
653 * V4L2 ioctls
654 */
655
656 static int
657 isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
658 {
659 struct isp_video *video = video_drvdata(file);
660
661 strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
662 strlcpy(cap->card, video->video.name, sizeof(cap->card));
663 strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
664
665 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
666 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
667 else
668 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
669
670 return 0;
671 }
672
673 static int
674 isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
675 {
676 struct isp_video_fh *vfh = to_isp_video_fh(fh);
677 struct isp_video *video = video_drvdata(file);
678
679 if (format->type != video->type)
680 return -EINVAL;
681
682 mutex_lock(&video->mutex);
683 *format = vfh->format;
684 mutex_unlock(&video->mutex);
685
686 return 0;
687 }
688
689 static int
690 isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
691 {
692 struct isp_video_fh *vfh = to_isp_video_fh(fh);
693 struct isp_video *video = video_drvdata(file);
694 struct v4l2_mbus_framefmt fmt;
695
696 if (format->type != video->type)
697 return -EINVAL;
698
699 mutex_lock(&video->mutex);
700
701 /* Fill the bytesperline and sizeimage fields by converting to media bus
702 * format and back to pixel format.
703 */
704 isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
705 isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
706
707 vfh->format = *format;
708
709 mutex_unlock(&video->mutex);
710 return 0;
711 }
712
713 static int
714 isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
715 {
716 struct isp_video *video = video_drvdata(file);
717 struct v4l2_subdev_format fmt;
718 struct v4l2_subdev *subdev;
719 u32 pad;
720 int ret;
721
722 if (format->type != video->type)
723 return -EINVAL;
724
725 subdev = isp_video_remote_subdev(video, &pad);
726 if (subdev == NULL)
727 return -EINVAL;
728
729 isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
730
731 fmt.pad = pad;
732 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
733 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
734 if (ret)
735 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
736
737 isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
738 return 0;
739 }
740
741 static int
742 isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
743 {
744 struct isp_video *video = video_drvdata(file);
745 struct v4l2_subdev *subdev;
746 int ret;
747
748 subdev = isp_video_remote_subdev(video, NULL);
749 if (subdev == NULL)
750 return -EINVAL;
751
752 mutex_lock(&video->mutex);
753 ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
754 mutex_unlock(&video->mutex);
755
756 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
757 }
758
759 static int
760 isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
761 {
762 struct isp_video *video = video_drvdata(file);
763 struct v4l2_subdev_format format;
764 struct v4l2_subdev *subdev;
765 u32 pad;
766 int ret;
767
768 subdev = isp_video_remote_subdev(video, &pad);
769 if (subdev == NULL)
770 return -EINVAL;
771
772 /* Try the get crop operation first and fallback to get format if not
773 * implemented.
774 */
775 ret = v4l2_subdev_call(subdev, video, g_crop, crop);
776 if (ret != -ENOIOCTLCMD)
777 return ret;
778
779 format.pad = pad;
780 format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
781 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
782 if (ret < 0)
783 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
784
785 crop->c.left = 0;
786 crop->c.top = 0;
787 crop->c.width = format.format.width;
788 crop->c.height = format.format.height;
789
790 return 0;
791 }
792
793 static int
794 isp_video_set_crop(struct file *file, void *fh, struct v4l2_crop *crop)
795 {
796 struct isp_video *video = video_drvdata(file);
797 struct v4l2_subdev *subdev;
798 int ret;
799
800 subdev = isp_video_remote_subdev(video, NULL);
801 if (subdev == NULL)
802 return -EINVAL;
803
804 mutex_lock(&video->mutex);
805 ret = v4l2_subdev_call(subdev, video, s_crop, crop);
806 mutex_unlock(&video->mutex);
807
808 return ret == -ENOIOCTLCMD ? -ENOTTY : ret;
809 }
810
811 static int
812 isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
813 {
814 struct isp_video_fh *vfh = to_isp_video_fh(fh);
815 struct isp_video *video = video_drvdata(file);
816
817 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
818 video->type != a->type)
819 return -EINVAL;
820
821 memset(a, 0, sizeof(*a));
822 a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
823 a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
824 a->parm.output.timeperframe = vfh->timeperframe;
825
826 return 0;
827 }
828
829 static int
830 isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
831 {
832 struct isp_video_fh *vfh = to_isp_video_fh(fh);
833 struct isp_video *video = video_drvdata(file);
834
835 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
836 video->type != a->type)
837 return -EINVAL;
838
839 if (a->parm.output.timeperframe.denominator == 0)
840 a->parm.output.timeperframe.denominator = 1;
841
842 vfh->timeperframe = a->parm.output.timeperframe;
843
844 return 0;
845 }
846
847 static int
848 isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
849 {
850 struct isp_video_fh *vfh = to_isp_video_fh(fh);
851
852 return omap3isp_video_queue_reqbufs(&vfh->queue, rb);
853 }
854
855 static int
856 isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
857 {
858 struct isp_video_fh *vfh = to_isp_video_fh(fh);
859
860 return omap3isp_video_queue_querybuf(&vfh->queue, b);
861 }
862
863 static int
864 isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
865 {
866 struct isp_video_fh *vfh = to_isp_video_fh(fh);
867
868 return omap3isp_video_queue_qbuf(&vfh->queue, b);
869 }
870
871 static int
872 isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
873 {
874 struct isp_video_fh *vfh = to_isp_video_fh(fh);
875
876 return omap3isp_video_queue_dqbuf(&vfh->queue, b,
877 file->f_flags & O_NONBLOCK);
878 }
879
880 static int isp_video_check_external_subdevs(struct isp_video *video,
881 struct isp_pipeline *pipe)
882 {
883 struct isp_device *isp = video->isp;
884 struct media_entity *ents[] = {
885 &isp->isp_csi2a.subdev.entity,
886 &isp->isp_csi2c.subdev.entity,
887 &isp->isp_ccp2.subdev.entity,
888 &isp->isp_ccdc.subdev.entity
889 };
890 struct media_pad *source_pad;
891 struct media_entity *source = NULL;
892 struct media_entity *sink;
893 struct v4l2_subdev_format fmt;
894 struct v4l2_ext_controls ctrls;
895 struct v4l2_ext_control ctrl;
896 unsigned int i;
897 int ret = 0;
898
899 for (i = 0; i < ARRAY_SIZE(ents); i++) {
900 /* Is the entity part of the pipeline? */
901 if (!(pipe->entities & (1 << ents[i]->id)))
902 continue;
903
904 /* ISP entities have always sink pad == 0. Find source. */
905 source_pad = media_entity_remote_source(&ents[i]->pads[0]);
906 if (source_pad == NULL)
907 continue;
908
909 source = source_pad->entity;
910 sink = ents[i];
911 break;
912 }
913
914 if (!source) {
915 dev_warn(isp->dev, "can't find source, failing now\n");
916 return ret;
917 }
918
919 if (media_entity_type(source) != MEDIA_ENT_T_V4L2_SUBDEV)
920 return 0;
921
922 pipe->external = media_entity_to_v4l2_subdev(source);
923
924 fmt.pad = source_pad->index;
925 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
926 ret = v4l2_subdev_call(media_entity_to_v4l2_subdev(sink),
927 pad, get_fmt, NULL, &fmt);
928 if (unlikely(ret < 0)) {
929 dev_warn(isp->dev, "get_fmt returned null!\n");
930 return ret;
931 }
932
933 pipe->external_width =
934 omap3isp_video_format_info(fmt.format.code)->width;
935
936 memset(&ctrls, 0, sizeof(ctrls));
937 memset(&ctrl, 0, sizeof(ctrl));
938
939 ctrl.id = V4L2_CID_PIXEL_RATE;
940
941 ctrls.count = 1;
942 ctrls.controls = &ctrl;
943
944 ret = v4l2_g_ext_ctrls(pipe->external->ctrl_handler, &ctrls);
945 if (ret < 0) {
946 dev_warn(isp->dev, "no pixel rate control in subdev %s\n",
947 pipe->external->name);
948 return ret;
949 }
950
951 pipe->external_rate = ctrl.value64;
952
953 if (pipe->entities & (1 << isp->isp_ccdc.subdev.entity.id)) {
954 unsigned int rate = UINT_MAX;
955 /*
956 * Check that maximum allowed CCDC pixel rate isn't
957 * exceeded by the pixel rate.
958 */
959 omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
960 if (pipe->external_rate > rate)
961 return -ENOSPC;
962 }
963
964 return 0;
965 }
966
967 /*
968 * Stream management
969 *
970 * Every ISP pipeline has a single input and a single output. The input can be
971 * either a sensor or a video node. The output is always a video node.
972 *
973 * As every pipeline has an output video node, the ISP video objects at the
974 * pipeline output stores the pipeline state. It tracks the streaming state of
975 * both the input and output, as well as the availability of buffers.
976 *
977 * In sensor-to-memory mode, frames are always available at the pipeline input.
978 * Starting the sensor usually requires I2C transfers and must be done in
979 * interruptible context. The pipeline is started and stopped synchronously
980 * to the stream on/off commands. All modules in the pipeline will get their
981 * subdev set stream handler called. The module at the end of the pipeline must
982 * delay starting the hardware until buffers are available at its output.
983 *
984 * In memory-to-memory mode, starting/stopping the stream requires
985 * synchronization between the input and output. ISP modules can't be stopped
986 * in the middle of a frame, and at least some of the modules seem to become
987 * busy as soon as they're started, even if they don't receive a frame start
988 * event. For that reason frames need to be processed in single-shot mode. The
989 * driver needs to wait until a frame is completely processed and written to
990 * memory before restarting the pipeline for the next frame. Pipelined
991 * processing might be possible but requires more testing.
992 *
993 * Stream start must be delayed until buffers are available at both the input
994 * and output. The pipeline must be started in the videobuf queue callback with
995 * the buffers queue spinlock held. The modules subdev set stream operation must
996 * not sleep.
997 */
998 static int
999 isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
1000 {
1001 struct isp_video_fh *vfh = to_isp_video_fh(fh);
1002 struct isp_video *video = video_drvdata(file);
1003 enum isp_pipeline_state state;
1004 struct isp_pipeline *pipe;
1005 unsigned long flags;
1006 int ret;
1007
1008 if (type != video->type)
1009 return -EINVAL;
1010
1011 mutex_lock(&video->stream_lock);
1012
1013 if (video->streaming) {
1014 mutex_unlock(&video->stream_lock);
1015 return -EBUSY;
1016 }
1017
1018 /* Start streaming on the pipeline. No link touching an entity in the
1019 * pipeline can be activated or deactivated once streaming is started.
1020 */
1021 pipe = video->video.entity.pipe
1022 ? to_isp_pipeline(&video->video.entity) : &video->pipe;
1023
1024 pipe->entities = 0;
1025
1026 if (video->isp->pdata->set_constraints)
1027 video->isp->pdata->set_constraints(video->isp, true);
1028 pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
1029 pipe->max_rate = pipe->l3_ick;
1030
1031 ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
1032 if (ret < 0)
1033 goto err_pipeline_start;
1034
1035 /* Verify that the currently configured format matches the output of
1036 * the connected subdev.
1037 */
1038 ret = isp_video_check_format(video, vfh);
1039 if (ret < 0)
1040 goto err_check_format;
1041
1042 video->bpl_padding = ret;
1043 video->bpl_value = vfh->format.fmt.pix.bytesperline;
1044
1045 ret = isp_video_get_graph_data(video, pipe);
1046 if (ret < 0)
1047 goto err_check_format;
1048
1049 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1050 state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT;
1051 else
1052 state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
1053
1054 ret = isp_video_check_external_subdevs(video, pipe);
1055 if (ret < 0)
1056 goto err_check_format;
1057
1058 /* Validate the pipeline and update its state. */
1059 ret = isp_video_validate_pipeline(pipe);
1060 if (ret < 0)
1061 goto err_check_format;
1062
1063 pipe->error = false;
1064
1065 spin_lock_irqsave(&pipe->lock, flags);
1066 pipe->state &= ~ISP_PIPELINE_STREAM;
1067 pipe->state |= state;
1068 spin_unlock_irqrestore(&pipe->lock, flags);
1069
1070 /* Set the maximum time per frame as the value requested by userspace.
1071 * This is a soft limit that can be overridden if the hardware doesn't
1072 * support the request limit.
1073 */
1074 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
1075 pipe->max_timeperframe = vfh->timeperframe;
1076
1077 video->queue = &vfh->queue;
1078 INIT_LIST_HEAD(&video->dmaqueue);
1079 atomic_set(&pipe->frame_number, -1);
1080
1081 ret = omap3isp_video_queue_streamon(&vfh->queue);
1082 if (ret < 0)
1083 goto err_check_format;
1084
1085 /* In sensor-to-memory mode, the stream can be started synchronously
1086 * to the stream on command. In memory-to-memory mode, it will be
1087 * started when buffers are queued on both the input and output.
1088 */
1089 if (pipe->input == NULL) {
1090 ret = omap3isp_pipeline_set_stream(pipe,
1091 ISP_PIPELINE_STREAM_CONTINUOUS);
1092 if (ret < 0)
1093 goto err_set_stream;
1094 spin_lock_irqsave(&video->queue->irqlock, flags);
1095 if (list_empty(&video->dmaqueue))
1096 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
1097 spin_unlock_irqrestore(&video->queue->irqlock, flags);
1098 }
1099
1100 video->streaming = 1;
1101
1102 mutex_unlock(&video->stream_lock);
1103 return 0;
1104
1105 err_set_stream:
1106 omap3isp_video_queue_streamoff(&vfh->queue);
1107 err_check_format:
1108 media_entity_pipeline_stop(&video->video.entity);
1109 err_pipeline_start:
1110 if (video->isp->pdata->set_constraints)
1111 video->isp->pdata->set_constraints(video->isp, false);
1112 /* The DMA queue must be emptied here, otherwise CCDC interrupts that
1113 * will get triggered the next time the CCDC is powered up will try to
1114 * access buffers that might have been freed but still present in the
1115 * DMA queue. This can easily get triggered if the above
1116 * omap3isp_pipeline_set_stream() call fails on a system with a
1117 * free-running sensor.
1118 */
1119 INIT_LIST_HEAD(&video->dmaqueue);
1120 video->queue = NULL;
1121
1122 mutex_unlock(&video->stream_lock);
1123 return ret;
1124 }
1125
1126 static int
1127 isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
1128 {
1129 struct isp_video_fh *vfh = to_isp_video_fh(fh);
1130 struct isp_video *video = video_drvdata(file);
1131 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
1132 enum isp_pipeline_state state;
1133 unsigned int streaming;
1134 unsigned long flags;
1135
1136 if (type != video->type)
1137 return -EINVAL;
1138
1139 mutex_lock(&video->stream_lock);
1140
1141 /* Make sure we're not streaming yet. */
1142 mutex_lock(&vfh->queue.lock);
1143 streaming = vfh->queue.streaming;
1144 mutex_unlock(&vfh->queue.lock);
1145
1146 if (!streaming)
1147 goto done;
1148
1149 /* Update the pipeline state. */
1150 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1151 state = ISP_PIPELINE_STREAM_OUTPUT
1152 | ISP_PIPELINE_QUEUE_OUTPUT;
1153 else
1154 state = ISP_PIPELINE_STREAM_INPUT
1155 | ISP_PIPELINE_QUEUE_INPUT;
1156
1157 spin_lock_irqsave(&pipe->lock, flags);
1158 pipe->state &= ~state;
1159 spin_unlock_irqrestore(&pipe->lock, flags);
1160
1161 /* Stop the stream. */
1162 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
1163 omap3isp_video_queue_streamoff(&vfh->queue);
1164 video->queue = NULL;
1165 video->streaming = 0;
1166
1167 if (video->isp->pdata->set_constraints)
1168 video->isp->pdata->set_constraints(video->isp, false);
1169 media_entity_pipeline_stop(&video->video.entity);
1170
1171 done:
1172 mutex_unlock(&video->stream_lock);
1173 return 0;
1174 }
1175
1176 static int
1177 isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1178 {
1179 if (input->index > 0)
1180 return -EINVAL;
1181
1182 strlcpy(input->name, "camera", sizeof(input->name));
1183 input->type = V4L2_INPUT_TYPE_CAMERA;
1184
1185 return 0;
1186 }
1187
1188 static int
1189 isp_video_g_input(struct file *file, void *fh, unsigned int *input)
1190 {
1191 *input = 0;
1192
1193 return 0;
1194 }
1195
1196 static int
1197 isp_video_s_input(struct file *file, void *fh, unsigned int input)
1198 {
1199 return input == 0 ? 0 : -EINVAL;
1200 }
1201
1202 static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
1203 .vidioc_querycap = isp_video_querycap,
1204 .vidioc_g_fmt_vid_cap = isp_video_get_format,
1205 .vidioc_s_fmt_vid_cap = isp_video_set_format,
1206 .vidioc_try_fmt_vid_cap = isp_video_try_format,
1207 .vidioc_g_fmt_vid_out = isp_video_get_format,
1208 .vidioc_s_fmt_vid_out = isp_video_set_format,
1209 .vidioc_try_fmt_vid_out = isp_video_try_format,
1210 .vidioc_cropcap = isp_video_cropcap,
1211 .vidioc_g_crop = isp_video_get_crop,
1212 .vidioc_s_crop = isp_video_set_crop,
1213 .vidioc_g_parm = isp_video_get_param,
1214 .vidioc_s_parm = isp_video_set_param,
1215 .vidioc_reqbufs = isp_video_reqbufs,
1216 .vidioc_querybuf = isp_video_querybuf,
1217 .vidioc_qbuf = isp_video_qbuf,
1218 .vidioc_dqbuf = isp_video_dqbuf,
1219 .vidioc_streamon = isp_video_streamon,
1220 .vidioc_streamoff = isp_video_streamoff,
1221 .vidioc_enum_input = isp_video_enum_input,
1222 .vidioc_g_input = isp_video_g_input,
1223 .vidioc_s_input = isp_video_s_input,
1224 };
1225
1226 /* -----------------------------------------------------------------------------
1227 * V4L2 file operations
1228 */
1229
1230 static int isp_video_open(struct file *file)
1231 {
1232 struct isp_video *video = video_drvdata(file);
1233 struct isp_video_fh *handle;
1234 int ret = 0;
1235
1236 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1237 if (handle == NULL)
1238 return -ENOMEM;
1239
1240 v4l2_fh_init(&handle->vfh, &video->video);
1241 v4l2_fh_add(&handle->vfh);
1242
1243 /* If this is the first user, initialise the pipeline. */
1244 if (omap3isp_get(video->isp) == NULL) {
1245 ret = -EBUSY;
1246 goto done;
1247 }
1248
1249 ret = omap3isp_pipeline_pm_use(&video->video.entity, 1);
1250 if (ret < 0) {
1251 omap3isp_put(video->isp);
1252 goto done;
1253 }
1254
1255 omap3isp_video_queue_init(&handle->queue, video->type,
1256 &isp_video_queue_ops, video->isp->dev,
1257 sizeof(struct isp_buffer));
1258
1259 memset(&handle->format, 0, sizeof(handle->format));
1260 handle->format.type = video->type;
1261 handle->timeperframe.denominator = 1;
1262
1263 handle->video = video;
1264 file->private_data = &handle->vfh;
1265
1266 done:
1267 if (ret < 0) {
1268 v4l2_fh_del(&handle->vfh);
1269 kfree(handle);
1270 }
1271
1272 return ret;
1273 }
1274
1275 static int isp_video_release(struct file *file)
1276 {
1277 struct isp_video *video = video_drvdata(file);
1278 struct v4l2_fh *vfh = file->private_data;
1279 struct isp_video_fh *handle = to_isp_video_fh(vfh);
1280
1281 /* Disable streaming and free the buffers queue resources. */
1282 isp_video_streamoff(file, vfh, video->type);
1283
1284 mutex_lock(&handle->queue.lock);
1285 omap3isp_video_queue_cleanup(&handle->queue);
1286 mutex_unlock(&handle->queue.lock);
1287
1288 omap3isp_pipeline_pm_use(&video->video.entity, 0);
1289
1290 /* Release the file handle. */
1291 v4l2_fh_del(vfh);
1292 kfree(handle);
1293 file->private_data = NULL;
1294
1295 omap3isp_put(video->isp);
1296
1297 return 0;
1298 }
1299
1300 static unsigned int isp_video_poll(struct file *file, poll_table *wait)
1301 {
1302 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1303 struct isp_video_queue *queue = &vfh->queue;
1304
1305 return omap3isp_video_queue_poll(queue, file, wait);
1306 }
1307
1308 static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
1309 {
1310 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1311
1312 return omap3isp_video_queue_mmap(&vfh->queue, vma);
1313 }
1314
1315 static struct v4l2_file_operations isp_video_fops = {
1316 .owner = THIS_MODULE,
1317 .unlocked_ioctl = video_ioctl2,
1318 .open = isp_video_open,
1319 .release = isp_video_release,
1320 .poll = isp_video_poll,
1321 .mmap = isp_video_mmap,
1322 };
1323
1324 /* -----------------------------------------------------------------------------
1325 * ISP video core
1326 */
1327
1328 static const struct isp_video_operations isp_video_dummy_ops = {
1329 };
1330
1331 int omap3isp_video_init(struct isp_video *video, const char *name)
1332 {
1333 const char *direction;
1334 int ret;
1335
1336 switch (video->type) {
1337 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1338 direction = "output";
1339 video->pad.flags = MEDIA_PAD_FL_SINK;
1340 break;
1341 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1342 direction = "input";
1343 video->pad.flags = MEDIA_PAD_FL_SOURCE;
1344 video->video.vfl_dir = VFL_DIR_TX;
1345 break;
1346
1347 default:
1348 return -EINVAL;
1349 }
1350
1351 ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
1352 if (ret < 0)
1353 return ret;
1354
1355 mutex_init(&video->mutex);
1356 atomic_set(&video->active, 0);
1357
1358 spin_lock_init(&video->pipe.lock);
1359 mutex_init(&video->stream_lock);
1360
1361 /* Initialize the video device. */
1362 if (video->ops == NULL)
1363 video->ops = &isp_video_dummy_ops;
1364
1365 video->video.fops = &isp_video_fops;
1366 snprintf(video->video.name, sizeof(video->video.name),
1367 "OMAP3 ISP %s %s", name, direction);
1368 video->video.vfl_type = VFL_TYPE_GRABBER;
1369 video->video.release = video_device_release_empty;
1370 video->video.ioctl_ops = &isp_video_ioctl_ops;
1371 video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED;
1372
1373 video_set_drvdata(&video->video, video);
1374
1375 return 0;
1376 }
1377
1378 void omap3isp_video_cleanup(struct isp_video *video)
1379 {
1380 media_entity_cleanup(&video->video.entity);
1381 mutex_destroy(&video->stream_lock);
1382 mutex_destroy(&video->mutex);
1383 }
1384
1385 int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
1386 {
1387 int ret;
1388
1389 video->video.v4l2_dev = vdev;
1390
1391 ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
1392 if (ret < 0)
1393 printk(KERN_ERR "%s: could not register video device (%d)\n",
1394 __func__, ret);
1395
1396 return ret;
1397 }
1398
1399 void omap3isp_video_unregister(struct isp_video *video)
1400 {
1401 if (video_is_registered(&video->video))
1402 video_unregister_device(&video->video);
1403 }
This page took 0.100861 seconds and 5 git commands to generate.