Fix common misspellings
[deliverable/linux.git] / drivers / media / video / omap3isp / ispvideo.c
1 /*
2 * ispvideo.c
3 *
4 * TI OMAP3 ISP - Generic video node
5 *
6 * Copyright (C) 2009-2010 Nokia Corporation
7 *
8 * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
9 * Sakari Ailus <sakari.ailus@iki.fi>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * 02110-1301 USA
24 */
25
26 #include <asm/cacheflush.h>
27 #include <linux/clk.h>
28 #include <linux/mm.h>
29 #include <linux/pagemap.h>
30 #include <linux/scatterlist.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/vmalloc.h>
34 #include <media/v4l2-dev.h>
35 #include <media/v4l2-ioctl.h>
36 #include <plat/iommu.h>
37 #include <plat/iovmm.h>
38 #include <plat/omap-pm.h>
39
40 #include "ispvideo.h"
41 #include "isp.h"
42
43
44 /* -----------------------------------------------------------------------------
45 * Helper functions
46 */
47
48 static struct isp_format_info formats[] = {
49 { V4L2_MBUS_FMT_Y8_1X8, V4L2_MBUS_FMT_Y8_1X8,
50 V4L2_MBUS_FMT_Y8_1X8, V4L2_PIX_FMT_GREY, 8, },
51 { V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8, V4L2_MBUS_FMT_SGRBG10_DPCM8_1X8,
52 V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10DPCM8, 8, },
53 { V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_MBUS_FMT_SBGGR10_1X10,
54 V4L2_MBUS_FMT_SBGGR10_1X10, V4L2_PIX_FMT_SBGGR10, 10, },
55 { V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_MBUS_FMT_SGBRG10_1X10,
56 V4L2_MBUS_FMT_SGBRG10_1X10, V4L2_PIX_FMT_SGBRG10, 10, },
57 { V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_MBUS_FMT_SGRBG10_1X10,
58 V4L2_MBUS_FMT_SGRBG10_1X10, V4L2_PIX_FMT_SGRBG10, 10, },
59 { V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_MBUS_FMT_SRGGB10_1X10,
60 V4L2_MBUS_FMT_SRGGB10_1X10, V4L2_PIX_FMT_SRGGB10, 10, },
61 { V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_MBUS_FMT_SBGGR10_1X10,
62 V4L2_MBUS_FMT_SBGGR12_1X12, V4L2_PIX_FMT_SBGGR12, 12, },
63 { V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_MBUS_FMT_SGBRG10_1X10,
64 V4L2_MBUS_FMT_SGBRG12_1X12, V4L2_PIX_FMT_SGBRG12, 12, },
65 { V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_MBUS_FMT_SGRBG10_1X10,
66 V4L2_MBUS_FMT_SGRBG12_1X12, V4L2_PIX_FMT_SGRBG12, 12, },
67 { V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_MBUS_FMT_SRGGB10_1X10,
68 V4L2_MBUS_FMT_SRGGB12_1X12, V4L2_PIX_FMT_SRGGB12, 12, },
69 { V4L2_MBUS_FMT_UYVY8_1X16, V4L2_MBUS_FMT_UYVY8_1X16,
70 V4L2_MBUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_UYVY, 16, },
71 { V4L2_MBUS_FMT_YUYV8_1X16, V4L2_MBUS_FMT_YUYV8_1X16,
72 V4L2_MBUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_YUYV, 16, },
73 };
74
75 const struct isp_format_info *
76 omap3isp_video_format_info(enum v4l2_mbus_pixelcode code)
77 {
78 unsigned int i;
79
80 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
81 if (formats[i].code == code)
82 return &formats[i];
83 }
84
85 return NULL;
86 }
87
88 /*
89 * isp_video_mbus_to_pix - Convert v4l2_mbus_framefmt to v4l2_pix_format
90 * @video: ISP video instance
91 * @mbus: v4l2_mbus_framefmt format (input)
92 * @pix: v4l2_pix_format format (output)
93 *
94 * Fill the output pix structure with information from the input mbus format.
95 * The bytesperline and sizeimage fields are computed from the requested bytes
96 * per line value in the pix format and information from the video instance.
97 *
98 * Return the number of padding bytes at end of line.
99 */
100 static unsigned int isp_video_mbus_to_pix(const struct isp_video *video,
101 const struct v4l2_mbus_framefmt *mbus,
102 struct v4l2_pix_format *pix)
103 {
104 unsigned int bpl = pix->bytesperline;
105 unsigned int min_bpl;
106 unsigned int i;
107
108 memset(pix, 0, sizeof(*pix));
109 pix->width = mbus->width;
110 pix->height = mbus->height;
111
112 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
113 if (formats[i].code == mbus->code)
114 break;
115 }
116
117 if (WARN_ON(i == ARRAY_SIZE(formats)))
118 return 0;
119
120 min_bpl = pix->width * ALIGN(formats[i].bpp, 8) / 8;
121
122 /* Clamp the requested bytes per line value. If the maximum bytes per
123 * line value is zero, the module doesn't support user configurable line
124 * sizes. Override the requested value with the minimum in that case.
125 */
126 if (video->bpl_max)
127 bpl = clamp(bpl, min_bpl, video->bpl_max);
128 else
129 bpl = min_bpl;
130
131 if (!video->bpl_zero_padding || bpl != min_bpl)
132 bpl = ALIGN(bpl, video->bpl_alignment);
133
134 pix->pixelformat = formats[i].pixelformat;
135 pix->bytesperline = bpl;
136 pix->sizeimage = pix->bytesperline * pix->height;
137 pix->colorspace = mbus->colorspace;
138 pix->field = mbus->field;
139
140 return bpl - min_bpl;
141 }
142
143 static void isp_video_pix_to_mbus(const struct v4l2_pix_format *pix,
144 struct v4l2_mbus_framefmt *mbus)
145 {
146 unsigned int i;
147
148 memset(mbus, 0, sizeof(*mbus));
149 mbus->width = pix->width;
150 mbus->height = pix->height;
151
152 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
153 if (formats[i].pixelformat == pix->pixelformat)
154 break;
155 }
156
157 if (WARN_ON(i == ARRAY_SIZE(formats)))
158 return;
159
160 mbus->code = formats[i].code;
161 mbus->colorspace = pix->colorspace;
162 mbus->field = pix->field;
163 }
164
165 static struct v4l2_subdev *
166 isp_video_remote_subdev(struct isp_video *video, u32 *pad)
167 {
168 struct media_pad *remote;
169
170 remote = media_entity_remote_source(&video->pad);
171
172 if (remote == NULL ||
173 media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
174 return NULL;
175
176 if (pad)
177 *pad = remote->index;
178
179 return media_entity_to_v4l2_subdev(remote->entity);
180 }
181
182 /* Return a pointer to the ISP video instance at the far end of the pipeline. */
183 static struct isp_video *
184 isp_video_far_end(struct isp_video *video)
185 {
186 struct media_entity_graph graph;
187 struct media_entity *entity = &video->video.entity;
188 struct media_device *mdev = entity->parent;
189 struct isp_video *far_end = NULL;
190
191 mutex_lock(&mdev->graph_mutex);
192 media_entity_graph_walk_start(&graph, entity);
193
194 while ((entity = media_entity_graph_walk_next(&graph))) {
195 if (entity == &video->video.entity)
196 continue;
197
198 if (media_entity_type(entity) != MEDIA_ENT_T_DEVNODE)
199 continue;
200
201 far_end = to_isp_video(media_entity_to_video_device(entity));
202 if (far_end->type != video->type)
203 break;
204
205 far_end = NULL;
206 }
207
208 mutex_unlock(&mdev->graph_mutex);
209 return far_end;
210 }
211
212 /*
213 * Validate a pipeline by checking both ends of all links for format
214 * discrepancies.
215 *
216 * Compute the minimum time per frame value as the maximum of time per frame
217 * limits reported by every block in the pipeline.
218 *
219 * Return 0 if all formats match, or -EPIPE if at least one link is found with
220 * different formats on its two ends.
221 */
222 static int isp_video_validate_pipeline(struct isp_pipeline *pipe)
223 {
224 struct isp_device *isp = pipe->output->isp;
225 struct v4l2_subdev_format fmt_source;
226 struct v4l2_subdev_format fmt_sink;
227 struct media_pad *pad;
228 struct v4l2_subdev *subdev;
229 int ret;
230
231 pipe->max_rate = pipe->l3_ick;
232
233 subdev = isp_video_remote_subdev(pipe->output, NULL);
234 if (subdev == NULL)
235 return -EPIPE;
236
237 while (1) {
238 /* Retrieve the sink format */
239 pad = &subdev->entity.pads[0];
240 if (!(pad->flags & MEDIA_PAD_FL_SINK))
241 break;
242
243 fmt_sink.pad = pad->index;
244 fmt_sink.which = V4L2_SUBDEV_FORMAT_ACTIVE;
245 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_sink);
246 if (ret < 0 && ret != -ENOIOCTLCMD)
247 return -EPIPE;
248
249 /* Update the maximum frame rate */
250 if (subdev == &isp->isp_res.subdev)
251 omap3isp_resizer_max_rate(&isp->isp_res,
252 &pipe->max_rate);
253
254 /* Check ccdc maximum data rate when data comes from sensor
255 * TODO: Include ccdc rate in pipe->max_rate and compare the
256 * total pipe rate with the input data rate from sensor.
257 */
258 if (subdev == &isp->isp_ccdc.subdev && pipe->input == NULL) {
259 unsigned int rate = UINT_MAX;
260
261 omap3isp_ccdc_max_rate(&isp->isp_ccdc, &rate);
262 if (isp->isp_ccdc.vpcfg.pixelclk > rate)
263 return -ENOSPC;
264 }
265
266 /* Retrieve the source format */
267 pad = media_entity_remote_source(pad);
268 if (pad == NULL ||
269 media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
270 break;
271
272 subdev = media_entity_to_v4l2_subdev(pad->entity);
273
274 fmt_source.pad = pad->index;
275 fmt_source.which = V4L2_SUBDEV_FORMAT_ACTIVE;
276 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt_source);
277 if (ret < 0 && ret != -ENOIOCTLCMD)
278 return -EPIPE;
279
280 /* Check if the two ends match */
281 if (fmt_source.format.code != fmt_sink.format.code ||
282 fmt_source.format.width != fmt_sink.format.width ||
283 fmt_source.format.height != fmt_sink.format.height)
284 return -EPIPE;
285 }
286
287 return 0;
288 }
289
290 static int
291 __isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
292 {
293 struct v4l2_subdev_format fmt;
294 struct v4l2_subdev *subdev;
295 u32 pad;
296 int ret;
297
298 subdev = isp_video_remote_subdev(video, &pad);
299 if (subdev == NULL)
300 return -EINVAL;
301
302 mutex_lock(&video->mutex);
303
304 fmt.pad = pad;
305 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
306 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
307 if (ret == -ENOIOCTLCMD)
308 ret = -EINVAL;
309
310 mutex_unlock(&video->mutex);
311
312 if (ret)
313 return ret;
314
315 format->type = video->type;
316 return isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
317 }
318
319 static int
320 isp_video_check_format(struct isp_video *video, struct isp_video_fh *vfh)
321 {
322 struct v4l2_format format;
323 int ret;
324
325 memcpy(&format, &vfh->format, sizeof(format));
326 ret = __isp_video_get_format(video, &format);
327 if (ret < 0)
328 return ret;
329
330 if (vfh->format.fmt.pix.pixelformat != format.fmt.pix.pixelformat ||
331 vfh->format.fmt.pix.height != format.fmt.pix.height ||
332 vfh->format.fmt.pix.width != format.fmt.pix.width ||
333 vfh->format.fmt.pix.bytesperline != format.fmt.pix.bytesperline ||
334 vfh->format.fmt.pix.sizeimage != format.fmt.pix.sizeimage)
335 return -EINVAL;
336
337 return ret;
338 }
339
340 /* -----------------------------------------------------------------------------
341 * IOMMU management
342 */
343
344 #define IOMMU_FLAG (IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_8)
345
346 /*
347 * ispmmu_vmap - Wrapper for Virtual memory mapping of a scatter gather list
348 * @dev: Device pointer specific to the OMAP3 ISP.
349 * @sglist: Pointer to source Scatter gather list to allocate.
350 * @sglen: Number of elements of the scatter-gatter list.
351 *
352 * Returns a resulting mapped device address by the ISP MMU, or -ENOMEM if
353 * we ran out of memory.
354 */
355 static dma_addr_t
356 ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
357 {
358 struct sg_table *sgt;
359 u32 da;
360
361 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
362 if (sgt == NULL)
363 return -ENOMEM;
364
365 sgt->sgl = (struct scatterlist *)sglist;
366 sgt->nents = sglen;
367 sgt->orig_nents = sglen;
368
369 da = iommu_vmap(isp->iommu, 0, sgt, IOMMU_FLAG);
370 if (IS_ERR_VALUE(da))
371 kfree(sgt);
372
373 return da;
374 }
375
376 /*
377 * ispmmu_vunmap - Unmap a device address from the ISP MMU
378 * @dev: Device pointer specific to the OMAP3 ISP.
379 * @da: Device address generated from a ispmmu_vmap call.
380 */
381 static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
382 {
383 struct sg_table *sgt;
384
385 sgt = iommu_vunmap(isp->iommu, (u32)da);
386 kfree(sgt);
387 }
388
389 /* -----------------------------------------------------------------------------
390 * Video queue operations
391 */
392
393 static void isp_video_queue_prepare(struct isp_video_queue *queue,
394 unsigned int *nbuffers, unsigned int *size)
395 {
396 struct isp_video_fh *vfh =
397 container_of(queue, struct isp_video_fh, queue);
398 struct isp_video *video = vfh->video;
399
400 *size = vfh->format.fmt.pix.sizeimage;
401 if (*size == 0)
402 return;
403
404 *nbuffers = min(*nbuffers, video->capture_mem / PAGE_ALIGN(*size));
405 }
406
407 static void isp_video_buffer_cleanup(struct isp_video_buffer *buf)
408 {
409 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
410 struct isp_buffer *buffer = to_isp_buffer(buf);
411 struct isp_video *video = vfh->video;
412
413 if (buffer->isp_addr) {
414 ispmmu_vunmap(video->isp, buffer->isp_addr);
415 buffer->isp_addr = 0;
416 }
417 }
418
419 static int isp_video_buffer_prepare(struct isp_video_buffer *buf)
420 {
421 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
422 struct isp_buffer *buffer = to_isp_buffer(buf);
423 struct isp_video *video = vfh->video;
424 unsigned long addr;
425
426 addr = ispmmu_vmap(video->isp, buf->sglist, buf->sglen);
427 if (IS_ERR_VALUE(addr))
428 return -EIO;
429
430 if (!IS_ALIGNED(addr, 32)) {
431 dev_dbg(video->isp->dev, "Buffer address must be "
432 "aligned to 32 bytes boundary.\n");
433 ispmmu_vunmap(video->isp, buffer->isp_addr);
434 return -EINVAL;
435 }
436
437 buf->vbuf.bytesused = vfh->format.fmt.pix.sizeimage;
438 buffer->isp_addr = addr;
439 return 0;
440 }
441
442 /*
443 * isp_video_buffer_queue - Add buffer to streaming queue
444 * @buf: Video buffer
445 *
446 * In memory-to-memory mode, start streaming on the pipeline if buffers are
447 * queued on both the input and the output, if the pipeline isn't already busy.
448 * If the pipeline is busy, it will be restarted in the output module interrupt
449 * handler.
450 */
451 static void isp_video_buffer_queue(struct isp_video_buffer *buf)
452 {
453 struct isp_video_fh *vfh = isp_video_queue_to_isp_video_fh(buf->queue);
454 struct isp_buffer *buffer = to_isp_buffer(buf);
455 struct isp_video *video = vfh->video;
456 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
457 enum isp_pipeline_state state;
458 unsigned long flags;
459 unsigned int empty;
460 unsigned int start;
461
462 empty = list_empty(&video->dmaqueue);
463 list_add_tail(&buffer->buffer.irqlist, &video->dmaqueue);
464
465 if (empty) {
466 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
467 state = ISP_PIPELINE_QUEUE_OUTPUT;
468 else
469 state = ISP_PIPELINE_QUEUE_INPUT;
470
471 spin_lock_irqsave(&pipe->lock, flags);
472 pipe->state |= state;
473 video->ops->queue(video, buffer);
474 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
475
476 start = isp_pipeline_ready(pipe);
477 if (start)
478 pipe->state |= ISP_PIPELINE_STREAM;
479 spin_unlock_irqrestore(&pipe->lock, flags);
480
481 if (start)
482 omap3isp_pipeline_set_stream(pipe,
483 ISP_PIPELINE_STREAM_SINGLESHOT);
484 }
485 }
486
487 static const struct isp_video_queue_operations isp_video_queue_ops = {
488 .queue_prepare = &isp_video_queue_prepare,
489 .buffer_prepare = &isp_video_buffer_prepare,
490 .buffer_queue = &isp_video_buffer_queue,
491 .buffer_cleanup = &isp_video_buffer_cleanup,
492 };
493
494 /*
495 * omap3isp_video_buffer_next - Complete the current buffer and return the next
496 * @video: ISP video object
497 * @error: Whether an error occurred during capture
498 *
499 * Remove the current video buffer from the DMA queue and fill its timestamp,
500 * field count and state fields before waking up its completion handler.
501 *
502 * The buffer state is set to VIDEOBUF_DONE if no error occurred (@error is 0)
503 * or VIDEOBUF_ERROR otherwise (@error is non-zero).
504 *
505 * The DMA queue is expected to contain at least one buffer.
506 *
507 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
508 * empty.
509 */
510 struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video,
511 unsigned int error)
512 {
513 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
514 struct isp_video_queue *queue = video->queue;
515 enum isp_pipeline_state state;
516 struct isp_video_buffer *buf;
517 unsigned long flags;
518 struct timespec ts;
519
520 spin_lock_irqsave(&queue->irqlock, flags);
521 if (WARN_ON(list_empty(&video->dmaqueue))) {
522 spin_unlock_irqrestore(&queue->irqlock, flags);
523 return NULL;
524 }
525
526 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
527 irqlist);
528 list_del(&buf->irqlist);
529 spin_unlock_irqrestore(&queue->irqlock, flags);
530
531 ktime_get_ts(&ts);
532 buf->vbuf.timestamp.tv_sec = ts.tv_sec;
533 buf->vbuf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
534
535 /* Do frame number propagation only if this is the output video node.
536 * Frame number either comes from the CSI receivers or it gets
537 * incremented here if H3A is not active.
538 * Note: There is no guarantee that the output buffer will finish
539 * first, so the input number might lag behind by 1 in some cases.
540 */
541 if (video == pipe->output && !pipe->do_propagation)
542 buf->vbuf.sequence = atomic_inc_return(&pipe->frame_number);
543 else
544 buf->vbuf.sequence = atomic_read(&pipe->frame_number);
545
546 buf->state = error ? ISP_BUF_STATE_ERROR : ISP_BUF_STATE_DONE;
547
548 wake_up(&buf->wait);
549
550 if (list_empty(&video->dmaqueue)) {
551 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
552 state = ISP_PIPELINE_QUEUE_OUTPUT
553 | ISP_PIPELINE_STREAM;
554 else
555 state = ISP_PIPELINE_QUEUE_INPUT
556 | ISP_PIPELINE_STREAM;
557
558 spin_lock_irqsave(&pipe->lock, flags);
559 pipe->state &= ~state;
560 if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS)
561 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
562 spin_unlock_irqrestore(&pipe->lock, flags);
563 return NULL;
564 }
565
566 if (queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
567 spin_lock_irqsave(&pipe->lock, flags);
568 pipe->state &= ~ISP_PIPELINE_STREAM;
569 spin_unlock_irqrestore(&pipe->lock, flags);
570 }
571
572 buf = list_first_entry(&video->dmaqueue, struct isp_video_buffer,
573 irqlist);
574 buf->state = ISP_BUF_STATE_ACTIVE;
575 return to_isp_buffer(buf);
576 }
577
578 /*
579 * omap3isp_video_resume - Perform resume operation on the buffers
580 * @video: ISP video object
581 * @continuous: Pipeline is in single shot mode if 0 or continuous mode otherwise
582 *
583 * This function is intended to be used on suspend/resume scenario. It
584 * requests video queue layer to discard buffers marked as DONE if it's in
585 * continuous mode and requests ISP modules to queue again the ACTIVE buffer
586 * if there's any.
587 */
588 void omap3isp_video_resume(struct isp_video *video, int continuous)
589 {
590 struct isp_buffer *buf = NULL;
591
592 if (continuous && video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
593 omap3isp_video_queue_discard_done(video->queue);
594
595 if (!list_empty(&video->dmaqueue)) {
596 buf = list_first_entry(&video->dmaqueue,
597 struct isp_buffer, buffer.irqlist);
598 video->ops->queue(video, buf);
599 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_QUEUED;
600 } else {
601 if (continuous)
602 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
603 }
604 }
605
606 /* -----------------------------------------------------------------------------
607 * V4L2 ioctls
608 */
609
610 static int
611 isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
612 {
613 struct isp_video *video = video_drvdata(file);
614
615 strlcpy(cap->driver, ISP_VIDEO_DRIVER_NAME, sizeof(cap->driver));
616 strlcpy(cap->card, video->video.name, sizeof(cap->card));
617 strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
618 cap->version = ISP_VIDEO_DRIVER_VERSION;
619
620 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
621 cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
622 else
623 cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
624
625 return 0;
626 }
627
628 static int
629 isp_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
630 {
631 struct isp_video_fh *vfh = to_isp_video_fh(fh);
632 struct isp_video *video = video_drvdata(file);
633
634 if (format->type != video->type)
635 return -EINVAL;
636
637 mutex_lock(&video->mutex);
638 *format = vfh->format;
639 mutex_unlock(&video->mutex);
640
641 return 0;
642 }
643
644 static int
645 isp_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
646 {
647 struct isp_video_fh *vfh = to_isp_video_fh(fh);
648 struct isp_video *video = video_drvdata(file);
649 struct v4l2_mbus_framefmt fmt;
650
651 if (format->type != video->type)
652 return -EINVAL;
653
654 mutex_lock(&video->mutex);
655
656 /* Fill the bytesperline and sizeimage fields by converting to media bus
657 * format and back to pixel format.
658 */
659 isp_video_pix_to_mbus(&format->fmt.pix, &fmt);
660 isp_video_mbus_to_pix(video, &fmt, &format->fmt.pix);
661
662 vfh->format = *format;
663
664 mutex_unlock(&video->mutex);
665 return 0;
666 }
667
668 static int
669 isp_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
670 {
671 struct isp_video *video = video_drvdata(file);
672 struct v4l2_subdev_format fmt;
673 struct v4l2_subdev *subdev;
674 u32 pad;
675 int ret;
676
677 if (format->type != video->type)
678 return -EINVAL;
679
680 subdev = isp_video_remote_subdev(video, &pad);
681 if (subdev == NULL)
682 return -EINVAL;
683
684 isp_video_pix_to_mbus(&format->fmt.pix, &fmt.format);
685
686 fmt.pad = pad;
687 fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
688 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
689 if (ret)
690 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
691
692 isp_video_mbus_to_pix(video, &fmt.format, &format->fmt.pix);
693 return 0;
694 }
695
696 static int
697 isp_video_cropcap(struct file *file, void *fh, struct v4l2_cropcap *cropcap)
698 {
699 struct isp_video *video = video_drvdata(file);
700 struct v4l2_subdev *subdev;
701 int ret;
702
703 subdev = isp_video_remote_subdev(video, NULL);
704 if (subdev == NULL)
705 return -EINVAL;
706
707 mutex_lock(&video->mutex);
708 ret = v4l2_subdev_call(subdev, video, cropcap, cropcap);
709 mutex_unlock(&video->mutex);
710
711 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
712 }
713
714 static int
715 isp_video_get_crop(struct file *file, void *fh, struct v4l2_crop *crop)
716 {
717 struct isp_video *video = video_drvdata(file);
718 struct v4l2_subdev_format format;
719 struct v4l2_subdev *subdev;
720 u32 pad;
721 int ret;
722
723 subdev = isp_video_remote_subdev(video, &pad);
724 if (subdev == NULL)
725 return -EINVAL;
726
727 /* Try the get crop operation first and fallback to get format if not
728 * implemented.
729 */
730 ret = v4l2_subdev_call(subdev, video, g_crop, crop);
731 if (ret != -ENOIOCTLCMD)
732 return ret;
733
734 format.pad = pad;
735 format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
736 ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &format);
737 if (ret < 0)
738 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
739
740 crop->c.left = 0;
741 crop->c.top = 0;
742 crop->c.width = format.format.width;
743 crop->c.height = format.format.height;
744
745 return 0;
746 }
747
748 static int
749 isp_video_set_crop(struct file *file, void *fh, struct v4l2_crop *crop)
750 {
751 struct isp_video *video = video_drvdata(file);
752 struct v4l2_subdev *subdev;
753 int ret;
754
755 subdev = isp_video_remote_subdev(video, NULL);
756 if (subdev == NULL)
757 return -EINVAL;
758
759 mutex_lock(&video->mutex);
760 ret = v4l2_subdev_call(subdev, video, s_crop, crop);
761 mutex_unlock(&video->mutex);
762
763 return ret == -ENOIOCTLCMD ? -EINVAL : ret;
764 }
765
766 static int
767 isp_video_get_param(struct file *file, void *fh, struct v4l2_streamparm *a)
768 {
769 struct isp_video_fh *vfh = to_isp_video_fh(fh);
770 struct isp_video *video = video_drvdata(file);
771
772 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
773 video->type != a->type)
774 return -EINVAL;
775
776 memset(a, 0, sizeof(*a));
777 a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
778 a->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
779 a->parm.output.timeperframe = vfh->timeperframe;
780
781 return 0;
782 }
783
784 static int
785 isp_video_set_param(struct file *file, void *fh, struct v4l2_streamparm *a)
786 {
787 struct isp_video_fh *vfh = to_isp_video_fh(fh);
788 struct isp_video *video = video_drvdata(file);
789
790 if (video->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
791 video->type != a->type)
792 return -EINVAL;
793
794 if (a->parm.output.timeperframe.denominator == 0)
795 a->parm.output.timeperframe.denominator = 1;
796
797 vfh->timeperframe = a->parm.output.timeperframe;
798
799 return 0;
800 }
801
802 static int
803 isp_video_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *rb)
804 {
805 struct isp_video_fh *vfh = to_isp_video_fh(fh);
806
807 return omap3isp_video_queue_reqbufs(&vfh->queue, rb);
808 }
809
810 static int
811 isp_video_querybuf(struct file *file, void *fh, struct v4l2_buffer *b)
812 {
813 struct isp_video_fh *vfh = to_isp_video_fh(fh);
814
815 return omap3isp_video_queue_querybuf(&vfh->queue, b);
816 }
817
818 static int
819 isp_video_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
820 {
821 struct isp_video_fh *vfh = to_isp_video_fh(fh);
822
823 return omap3isp_video_queue_qbuf(&vfh->queue, b);
824 }
825
826 static int
827 isp_video_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
828 {
829 struct isp_video_fh *vfh = to_isp_video_fh(fh);
830
831 return omap3isp_video_queue_dqbuf(&vfh->queue, b,
832 file->f_flags & O_NONBLOCK);
833 }
834
835 /*
836 * Stream management
837 *
838 * Every ISP pipeline has a single input and a single output. The input can be
839 * either a sensor or a video node. The output is always a video node.
840 *
841 * As every pipeline has an output video node, the ISP video objects at the
842 * pipeline output stores the pipeline state. It tracks the streaming state of
843 * both the input and output, as well as the availability of buffers.
844 *
845 * In sensor-to-memory mode, frames are always available at the pipeline input.
846 * Starting the sensor usually requires I2C transfers and must be done in
847 * interruptible context. The pipeline is started and stopped synchronously
848 * to the stream on/off commands. All modules in the pipeline will get their
849 * subdev set stream handler called. The module at the end of the pipeline must
850 * delay starting the hardware until buffers are available at its output.
851 *
852 * In memory-to-memory mode, starting/stopping the stream requires
853 * synchronization between the input and output. ISP modules can't be stopped
854 * in the middle of a frame, and at least some of the modules seem to become
855 * busy as soon as they're started, even if they don't receive a frame start
856 * event. For that reason frames need to be processed in single-shot mode. The
857 * driver needs to wait until a frame is completely processed and written to
858 * memory before restarting the pipeline for the next frame. Pipelined
859 * processing might be possible but requires more testing.
860 *
861 * Stream start must be delayed until buffers are available at both the input
862 * and output. The pipeline must be started in the videobuf queue callback with
863 * the buffers queue spinlock held. The modules subdev set stream operation must
864 * not sleep.
865 */
866 static int
867 isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
868 {
869 struct isp_video_fh *vfh = to_isp_video_fh(fh);
870 struct isp_video *video = video_drvdata(file);
871 enum isp_pipeline_state state;
872 struct isp_pipeline *pipe;
873 struct isp_video *far_end;
874 unsigned long flags;
875 int ret;
876
877 if (type != video->type)
878 return -EINVAL;
879
880 mutex_lock(&video->stream_lock);
881
882 if (video->streaming) {
883 mutex_unlock(&video->stream_lock);
884 return -EBUSY;
885 }
886
887 /* Start streaming on the pipeline. No link touching an entity in the
888 * pipeline can be activated or deactivated once streaming is started.
889 */
890 pipe = video->video.entity.pipe
891 ? to_isp_pipeline(&video->video.entity) : &video->pipe;
892 media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
893
894 /* Verify that the currently configured format matches the output of
895 * the connected subdev.
896 */
897 ret = isp_video_check_format(video, vfh);
898 if (ret < 0)
899 goto error;
900
901 video->bpl_padding = ret;
902 video->bpl_value = vfh->format.fmt.pix.bytesperline;
903
904 /* Find the ISP video node connected at the far end of the pipeline and
905 * update the pipeline.
906 */
907 far_end = isp_video_far_end(video);
908
909 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
910 state = ISP_PIPELINE_STREAM_OUTPUT | ISP_PIPELINE_IDLE_OUTPUT;
911 pipe->input = far_end;
912 pipe->output = video;
913 } else {
914 if (far_end == NULL) {
915 ret = -EPIPE;
916 goto error;
917 }
918
919 state = ISP_PIPELINE_STREAM_INPUT | ISP_PIPELINE_IDLE_INPUT;
920 pipe->input = video;
921 pipe->output = far_end;
922 }
923
924 if (video->isp->pdata->set_constraints)
925 video->isp->pdata->set_constraints(video->isp, true);
926 pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]);
927
928 /* Validate the pipeline and update its state. */
929 ret = isp_video_validate_pipeline(pipe);
930 if (ret < 0)
931 goto error;
932
933 spin_lock_irqsave(&pipe->lock, flags);
934 pipe->state &= ~ISP_PIPELINE_STREAM;
935 pipe->state |= state;
936 spin_unlock_irqrestore(&pipe->lock, flags);
937
938 /* Set the maximum time per frame as the value requested by userspace.
939 * This is a soft limit that can be overridden if the hardware doesn't
940 * support the request limit.
941 */
942 if (video->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
943 pipe->max_timeperframe = vfh->timeperframe;
944
945 video->queue = &vfh->queue;
946 INIT_LIST_HEAD(&video->dmaqueue);
947 atomic_set(&pipe->frame_number, -1);
948
949 ret = omap3isp_video_queue_streamon(&vfh->queue);
950 if (ret < 0)
951 goto error;
952
953 /* In sensor-to-memory mode, the stream can be started synchronously
954 * to the stream on command. In memory-to-memory mode, it will be
955 * started when buffers are queued on both the input and output.
956 */
957 if (pipe->input == NULL) {
958 ret = omap3isp_pipeline_set_stream(pipe,
959 ISP_PIPELINE_STREAM_CONTINUOUS);
960 if (ret < 0)
961 goto error;
962 spin_lock_irqsave(&video->queue->irqlock, flags);
963 if (list_empty(&video->dmaqueue))
964 video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
965 spin_unlock_irqrestore(&video->queue->irqlock, flags);
966 }
967
968 error:
969 if (ret < 0) {
970 omap3isp_video_queue_streamoff(&vfh->queue);
971 if (video->isp->pdata->set_constraints)
972 video->isp->pdata->set_constraints(video->isp, false);
973 media_entity_pipeline_stop(&video->video.entity);
974 video->queue = NULL;
975 }
976
977 if (!ret)
978 video->streaming = 1;
979
980 mutex_unlock(&video->stream_lock);
981 return ret;
982 }
983
984 static int
985 isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
986 {
987 struct isp_video_fh *vfh = to_isp_video_fh(fh);
988 struct isp_video *video = video_drvdata(file);
989 struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
990 enum isp_pipeline_state state;
991 unsigned int streaming;
992 unsigned long flags;
993
994 if (type != video->type)
995 return -EINVAL;
996
997 mutex_lock(&video->stream_lock);
998
999 /* Make sure we're not streaming yet. */
1000 mutex_lock(&vfh->queue.lock);
1001 streaming = vfh->queue.streaming;
1002 mutex_unlock(&vfh->queue.lock);
1003
1004 if (!streaming)
1005 goto done;
1006
1007 /* Update the pipeline state. */
1008 if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1009 state = ISP_PIPELINE_STREAM_OUTPUT
1010 | ISP_PIPELINE_QUEUE_OUTPUT;
1011 else
1012 state = ISP_PIPELINE_STREAM_INPUT
1013 | ISP_PIPELINE_QUEUE_INPUT;
1014
1015 spin_lock_irqsave(&pipe->lock, flags);
1016 pipe->state &= ~state;
1017 spin_unlock_irqrestore(&pipe->lock, flags);
1018
1019 /* Stop the stream. */
1020 omap3isp_pipeline_set_stream(pipe, ISP_PIPELINE_STREAM_STOPPED);
1021 omap3isp_video_queue_streamoff(&vfh->queue);
1022 video->queue = NULL;
1023 video->streaming = 0;
1024
1025 if (video->isp->pdata->set_constraints)
1026 video->isp->pdata->set_constraints(video->isp, false);
1027 media_entity_pipeline_stop(&video->video.entity);
1028
1029 done:
1030 mutex_unlock(&video->stream_lock);
1031 return 0;
1032 }
1033
1034 static int
1035 isp_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
1036 {
1037 if (input->index > 0)
1038 return -EINVAL;
1039
1040 strlcpy(input->name, "camera", sizeof(input->name));
1041 input->type = V4L2_INPUT_TYPE_CAMERA;
1042
1043 return 0;
1044 }
1045
1046 static int
1047 isp_video_g_input(struct file *file, void *fh, unsigned int *input)
1048 {
1049 *input = 0;
1050
1051 return 0;
1052 }
1053
1054 static int
1055 isp_video_s_input(struct file *file, void *fh, unsigned int input)
1056 {
1057 return input == 0 ? 0 : -EINVAL;
1058 }
1059
1060 static const struct v4l2_ioctl_ops isp_video_ioctl_ops = {
1061 .vidioc_querycap = isp_video_querycap,
1062 .vidioc_g_fmt_vid_cap = isp_video_get_format,
1063 .vidioc_s_fmt_vid_cap = isp_video_set_format,
1064 .vidioc_try_fmt_vid_cap = isp_video_try_format,
1065 .vidioc_g_fmt_vid_out = isp_video_get_format,
1066 .vidioc_s_fmt_vid_out = isp_video_set_format,
1067 .vidioc_try_fmt_vid_out = isp_video_try_format,
1068 .vidioc_cropcap = isp_video_cropcap,
1069 .vidioc_g_crop = isp_video_get_crop,
1070 .vidioc_s_crop = isp_video_set_crop,
1071 .vidioc_g_parm = isp_video_get_param,
1072 .vidioc_s_parm = isp_video_set_param,
1073 .vidioc_reqbufs = isp_video_reqbufs,
1074 .vidioc_querybuf = isp_video_querybuf,
1075 .vidioc_qbuf = isp_video_qbuf,
1076 .vidioc_dqbuf = isp_video_dqbuf,
1077 .vidioc_streamon = isp_video_streamon,
1078 .vidioc_streamoff = isp_video_streamoff,
1079 .vidioc_enum_input = isp_video_enum_input,
1080 .vidioc_g_input = isp_video_g_input,
1081 .vidioc_s_input = isp_video_s_input,
1082 };
1083
1084 /* -----------------------------------------------------------------------------
1085 * V4L2 file operations
1086 */
1087
1088 static int isp_video_open(struct file *file)
1089 {
1090 struct isp_video *video = video_drvdata(file);
1091 struct isp_video_fh *handle;
1092 int ret = 0;
1093
1094 handle = kzalloc(sizeof(*handle), GFP_KERNEL);
1095 if (handle == NULL)
1096 return -ENOMEM;
1097
1098 v4l2_fh_init(&handle->vfh, &video->video);
1099 v4l2_fh_add(&handle->vfh);
1100
1101 /* If this is the first user, initialise the pipeline. */
1102 if (omap3isp_get(video->isp) == NULL) {
1103 ret = -EBUSY;
1104 goto done;
1105 }
1106
1107 ret = omap3isp_pipeline_pm_use(&video->video.entity, 1);
1108 if (ret < 0) {
1109 omap3isp_put(video->isp);
1110 goto done;
1111 }
1112
1113 omap3isp_video_queue_init(&handle->queue, video->type,
1114 &isp_video_queue_ops, video->isp->dev,
1115 sizeof(struct isp_buffer));
1116
1117 memset(&handle->format, 0, sizeof(handle->format));
1118 handle->format.type = video->type;
1119 handle->timeperframe.denominator = 1;
1120
1121 handle->video = video;
1122 file->private_data = &handle->vfh;
1123
1124 done:
1125 if (ret < 0) {
1126 v4l2_fh_del(&handle->vfh);
1127 kfree(handle);
1128 }
1129
1130 return ret;
1131 }
1132
1133 static int isp_video_release(struct file *file)
1134 {
1135 struct isp_video *video = video_drvdata(file);
1136 struct v4l2_fh *vfh = file->private_data;
1137 struct isp_video_fh *handle = to_isp_video_fh(vfh);
1138
1139 /* Disable streaming and free the buffers queue resources. */
1140 isp_video_streamoff(file, vfh, video->type);
1141
1142 mutex_lock(&handle->queue.lock);
1143 omap3isp_video_queue_cleanup(&handle->queue);
1144 mutex_unlock(&handle->queue.lock);
1145
1146 omap3isp_pipeline_pm_use(&video->video.entity, 0);
1147
1148 /* Release the file handle. */
1149 v4l2_fh_del(vfh);
1150 kfree(handle);
1151 file->private_data = NULL;
1152
1153 omap3isp_put(video->isp);
1154
1155 return 0;
1156 }
1157
1158 static unsigned int isp_video_poll(struct file *file, poll_table *wait)
1159 {
1160 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1161 struct isp_video_queue *queue = &vfh->queue;
1162
1163 return omap3isp_video_queue_poll(queue, file, wait);
1164 }
1165
1166 static int isp_video_mmap(struct file *file, struct vm_area_struct *vma)
1167 {
1168 struct isp_video_fh *vfh = to_isp_video_fh(file->private_data);
1169
1170 return omap3isp_video_queue_mmap(&vfh->queue, vma);
1171 }
1172
1173 static struct v4l2_file_operations isp_video_fops = {
1174 .owner = THIS_MODULE,
1175 .unlocked_ioctl = video_ioctl2,
1176 .open = isp_video_open,
1177 .release = isp_video_release,
1178 .poll = isp_video_poll,
1179 .mmap = isp_video_mmap,
1180 };
1181
1182 /* -----------------------------------------------------------------------------
1183 * ISP video core
1184 */
1185
1186 static const struct isp_video_operations isp_video_dummy_ops = {
1187 };
1188
1189 int omap3isp_video_init(struct isp_video *video, const char *name)
1190 {
1191 const char *direction;
1192 int ret;
1193
1194 switch (video->type) {
1195 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
1196 direction = "output";
1197 video->pad.flags = MEDIA_PAD_FL_SINK;
1198 break;
1199 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1200 direction = "input";
1201 video->pad.flags = MEDIA_PAD_FL_SOURCE;
1202 break;
1203
1204 default:
1205 return -EINVAL;
1206 }
1207
1208 ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
1209 if (ret < 0)
1210 return ret;
1211
1212 mutex_init(&video->mutex);
1213 atomic_set(&video->active, 0);
1214
1215 spin_lock_init(&video->pipe.lock);
1216 mutex_init(&video->stream_lock);
1217
1218 /* Initialize the video device. */
1219 if (video->ops == NULL)
1220 video->ops = &isp_video_dummy_ops;
1221
1222 video->video.fops = &isp_video_fops;
1223 snprintf(video->video.name, sizeof(video->video.name),
1224 "OMAP3 ISP %s %s", name, direction);
1225 video->video.vfl_type = VFL_TYPE_GRABBER;
1226 video->video.release = video_device_release_empty;
1227 video->video.ioctl_ops = &isp_video_ioctl_ops;
1228 video->pipe.stream_state = ISP_PIPELINE_STREAM_STOPPED;
1229
1230 video_set_drvdata(&video->video, video);
1231
1232 return 0;
1233 }
1234
1235 int omap3isp_video_register(struct isp_video *video, struct v4l2_device *vdev)
1236 {
1237 int ret;
1238
1239 video->video.v4l2_dev = vdev;
1240
1241 ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
1242 if (ret < 0)
1243 printk(KERN_ERR "%s: could not register video device (%d)\n",
1244 __func__, ret);
1245
1246 return ret;
1247 }
1248
1249 void omap3isp_video_unregister(struct isp_video *video)
1250 {
1251 if (video_is_registered(&video->video)) {
1252 media_entity_cleanup(&video->video.entity);
1253 video_unregister_device(&video->video);
1254 }
1255 }
This page took 0.058168 seconds and 5 git commands to generate.