2 * Samsung TV Mixer driver
4 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
6 * Tomasz Stanislawski, <t.stanislaws@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published
10 * by the Free Software Foundation. either version 2 of the License,
11 * or (at your option) any later version
14 #define pr_fmt(fmt) "s5p-tv (mixer): " fmt
18 #include <media/v4l2-ioctl.h>
19 #include <linux/videodev2.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/timer.h>
24 #include <media/videobuf2-dma-contig.h>
26 static int find_reg_callback(struct device
*dev
, void *p
)
28 struct v4l2_subdev
**sd
= p
;
30 *sd
= dev_get_drvdata(dev
);
31 /* non-zero value stops iteration */
35 static struct v4l2_subdev
*find_and_register_subdev(
36 struct mxr_device
*mdev
, char *module_name
)
38 struct device_driver
*drv
;
39 struct v4l2_subdev
*sd
= NULL
;
42 /* TODO: add waiting until probe is finished */
43 drv
= driver_find(module_name
, &platform_bus_type
);
45 mxr_warn(mdev
, "module %s is missing\n", module_name
);
48 /* driver refcnt is increased, it is safe to iterate over devices */
49 ret
= driver_for_each_device(drv
, NULL
, &sd
, find_reg_callback
);
50 /* ret == 0 means that find_reg_callback was never executed */
52 mxr_warn(mdev
, "module %s provides no subdev!\n", module_name
);
55 /* v4l2_device_register_subdev detects if sd is NULL */
56 ret
= v4l2_device_register_subdev(&mdev
->v4l2_dev
, sd
);
58 mxr_warn(mdev
, "failed to register subdev %s\n", sd
->name
);
66 int mxr_acquire_video(struct mxr_device
*mdev
,
67 struct mxr_output_conf
*output_conf
, int output_count
)
69 struct device
*dev
= mdev
->dev
;
70 struct v4l2_device
*v4l2_dev
= &mdev
->v4l2_dev
;
73 struct v4l2_subdev
*sd
;
75 strlcpy(v4l2_dev
->name
, dev_name(mdev
->dev
), sizeof(v4l2_dev
->name
));
76 /* prepare context for V4L2 device */
77 ret
= v4l2_device_register(dev
, v4l2_dev
);
79 mxr_err(mdev
, "could not register v4l2 device.\n");
83 vb2_dma_contig_set_max_seg_size(mdev
->dev
, DMA_BIT_MASK(32));
85 /* registering outputs */
87 for (i
= 0; i
< output_count
; ++i
) {
88 struct mxr_output_conf
*conf
= &output_conf
[i
];
89 struct mxr_output
*out
;
91 sd
= find_and_register_subdev(mdev
, conf
->module_name
);
92 /* trying to register next output */
95 out
= kzalloc(sizeof(*out
), GFP_KERNEL
);
97 mxr_err(mdev
, "no memory for '%s'\n",
100 /* registered subdevs are removed in fail_v4l2_dev */
103 strlcpy(out
->name
, conf
->output_name
, sizeof(out
->name
));
105 out
->cookie
= conf
->cookie
;
106 mdev
->output
[mdev
->output_cnt
++] = out
;
107 mxr_info(mdev
, "added output '%s' from module '%s'\n",
108 conf
->output_name
, conf
->module_name
);
109 /* checking if maximal number of outputs is reached */
110 if (mdev
->output_cnt
>= MXR_MAX_OUTPUTS
)
114 if (mdev
->output_cnt
== 0) {
115 mxr_err(mdev
, "failed to register any output\n");
117 /* skipping fail_output because there is nothing to free */
124 /* kfree is NULL-safe */
125 for (i
= 0; i
< mdev
->output_cnt
; ++i
)
126 kfree(mdev
->output
[i
]);
127 memset(mdev
->output
, 0, sizeof(mdev
->output
));
130 /* NOTE: automatically unregister all subdevs */
131 v4l2_device_unregister(v4l2_dev
);
137 void mxr_release_video(struct mxr_device
*mdev
)
141 /* kfree is NULL-safe */
142 for (i
= 0; i
< mdev
->output_cnt
; ++i
)
143 kfree(mdev
->output
[i
]);
145 vb2_dma_contig_clear_max_seg_size(mdev
->dev
);
146 v4l2_device_unregister(&mdev
->v4l2_dev
);
149 static int mxr_querycap(struct file
*file
, void *priv
,
150 struct v4l2_capability
*cap
)
152 struct mxr_layer
*layer
= video_drvdata(file
);
154 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
156 strlcpy(cap
->driver
, MXR_DRIVER_NAME
, sizeof(cap
->driver
));
157 strlcpy(cap
->card
, layer
->vfd
.name
, sizeof(cap
->card
));
158 sprintf(cap
->bus_info
, "%d", layer
->idx
);
159 cap
->device_caps
= V4L2_CAP_STREAMING
| V4L2_CAP_VIDEO_OUTPUT_MPLANE
;
160 cap
->capabilities
= cap
->device_caps
| V4L2_CAP_DEVICE_CAPS
;
165 static void mxr_geometry_dump(struct mxr_device
*mdev
, struct mxr_geometry
*geo
)
167 mxr_dbg(mdev
, "src.full_size = (%u, %u)\n",
168 geo
->src
.full_width
, geo
->src
.full_height
);
169 mxr_dbg(mdev
, "src.size = (%u, %u)\n",
170 geo
->src
.width
, geo
->src
.height
);
171 mxr_dbg(mdev
, "src.offset = (%u, %u)\n",
172 geo
->src
.x_offset
, geo
->src
.y_offset
);
173 mxr_dbg(mdev
, "dst.full_size = (%u, %u)\n",
174 geo
->dst
.full_width
, geo
->dst
.full_height
);
175 mxr_dbg(mdev
, "dst.size = (%u, %u)\n",
176 geo
->dst
.width
, geo
->dst
.height
);
177 mxr_dbg(mdev
, "dst.offset = (%u, %u)\n",
178 geo
->dst
.x_offset
, geo
->dst
.y_offset
);
179 mxr_dbg(mdev
, "ratio = (%u, %u)\n",
180 geo
->x_ratio
, geo
->y_ratio
);
183 static void mxr_layer_default_geo(struct mxr_layer
*layer
)
185 struct mxr_device
*mdev
= layer
->mdev
;
186 struct v4l2_mbus_framefmt mbus_fmt
;
188 memset(&layer
->geo
, 0, sizeof(layer
->geo
));
190 mxr_get_mbus_fmt(mdev
, &mbus_fmt
);
192 layer
->geo
.dst
.full_width
= mbus_fmt
.width
;
193 layer
->geo
.dst
.full_height
= mbus_fmt
.height
;
194 layer
->geo
.dst
.width
= layer
->geo
.dst
.full_width
;
195 layer
->geo
.dst
.height
= layer
->geo
.dst
.full_height
;
196 layer
->geo
.dst
.field
= mbus_fmt
.field
;
198 layer
->geo
.src
.full_width
= mbus_fmt
.width
;
199 layer
->geo
.src
.full_height
= mbus_fmt
.height
;
200 layer
->geo
.src
.width
= layer
->geo
.src
.full_width
;
201 layer
->geo
.src
.height
= layer
->geo
.src
.full_height
;
203 mxr_geometry_dump(mdev
, &layer
->geo
);
204 layer
->ops
.fix_geometry(layer
, MXR_GEOMETRY_SINK
, 0);
205 mxr_geometry_dump(mdev
, &layer
->geo
);
208 static void mxr_layer_update_output(struct mxr_layer
*layer
)
210 struct mxr_device
*mdev
= layer
->mdev
;
211 struct v4l2_mbus_framefmt mbus_fmt
;
213 mxr_get_mbus_fmt(mdev
, &mbus_fmt
);
214 /* checking if update is needed */
215 if (layer
->geo
.dst
.full_width
== mbus_fmt
.width
&&
216 layer
->geo
.dst
.full_height
== mbus_fmt
.width
)
219 layer
->geo
.dst
.full_width
= mbus_fmt
.width
;
220 layer
->geo
.dst
.full_height
= mbus_fmt
.height
;
221 layer
->geo
.dst
.field
= mbus_fmt
.field
;
222 layer
->ops
.fix_geometry(layer
, MXR_GEOMETRY_SINK
, 0);
224 mxr_geometry_dump(mdev
, &layer
->geo
);
227 static const struct mxr_format
*find_format_by_fourcc(
228 struct mxr_layer
*layer
, unsigned long fourcc
);
229 static const struct mxr_format
*find_format_by_index(
230 struct mxr_layer
*layer
, unsigned long index
);
232 static int mxr_enum_fmt(struct file
*file
, void *priv
,
233 struct v4l2_fmtdesc
*f
)
235 struct mxr_layer
*layer
= video_drvdata(file
);
236 struct mxr_device
*mdev
= layer
->mdev
;
237 const struct mxr_format
*fmt
;
239 mxr_dbg(mdev
, "%s\n", __func__
);
240 fmt
= find_format_by_index(layer
, f
->index
);
244 strlcpy(f
->description
, fmt
->name
, sizeof(f
->description
));
245 f
->pixelformat
= fmt
->fourcc
;
250 static unsigned int divup(unsigned int divident
, unsigned int divisor
)
252 return (divident
+ divisor
- 1) / divisor
;
255 unsigned long mxr_get_plane_size(const struct mxr_block
*blk
,
256 unsigned int width
, unsigned int height
)
258 unsigned int bl_width
= divup(width
, blk
->width
);
259 unsigned int bl_height
= divup(height
, blk
->height
);
261 return bl_width
* bl_height
* blk
->size
;
264 static void mxr_mplane_fill(struct v4l2_plane_pix_format
*planes
,
265 const struct mxr_format
*fmt
, u32 width
, u32 height
)
269 /* checking if nothing to fill */
273 memset(planes
, 0, sizeof(*planes
) * fmt
->num_subframes
);
274 for (i
= 0; i
< fmt
->num_planes
; ++i
) {
275 struct v4l2_plane_pix_format
*plane
= planes
276 + fmt
->plane2subframe
[i
];
277 const struct mxr_block
*blk
= &fmt
->plane
[i
];
278 u32 bl_width
= divup(width
, blk
->width
);
279 u32 bl_height
= divup(height
, blk
->height
);
280 u32 sizeimage
= bl_width
* bl_height
* blk
->size
;
281 u32 bytesperline
= bl_width
* blk
->size
/ blk
->height
;
283 plane
->sizeimage
+= sizeimage
;
284 plane
->bytesperline
= max(plane
->bytesperline
, bytesperline
);
288 static int mxr_g_fmt(struct file
*file
, void *priv
,
289 struct v4l2_format
*f
)
291 struct mxr_layer
*layer
= video_drvdata(file
);
292 struct v4l2_pix_format_mplane
*pix
= &f
->fmt
.pix_mp
;
294 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
296 pix
->width
= layer
->geo
.src
.full_width
;
297 pix
->height
= layer
->geo
.src
.full_height
;
298 pix
->field
= V4L2_FIELD_NONE
;
299 pix
->pixelformat
= layer
->fmt
->fourcc
;
300 pix
->colorspace
= layer
->fmt
->colorspace
;
301 mxr_mplane_fill(pix
->plane_fmt
, layer
->fmt
, pix
->width
, pix
->height
);
306 static int mxr_s_fmt(struct file
*file
, void *priv
,
307 struct v4l2_format
*f
)
309 struct mxr_layer
*layer
= video_drvdata(file
);
310 const struct mxr_format
*fmt
;
311 struct v4l2_pix_format_mplane
*pix
;
312 struct mxr_device
*mdev
= layer
->mdev
;
313 struct mxr_geometry
*geo
= &layer
->geo
;
315 mxr_dbg(mdev
, "%s:%d\n", __func__
, __LINE__
);
317 pix
= &f
->fmt
.pix_mp
;
318 fmt
= find_format_by_fourcc(layer
, pix
->pixelformat
);
320 mxr_warn(mdev
, "not recognized fourcc: %08x\n",
325 /* set source size to highest accepted value */
326 geo
->src
.full_width
= max(geo
->dst
.full_width
, pix
->width
);
327 geo
->src
.full_height
= max(geo
->dst
.full_height
, pix
->height
);
328 layer
->ops
.fix_geometry(layer
, MXR_GEOMETRY_SOURCE
, 0);
329 mxr_geometry_dump(mdev
, &layer
->geo
);
330 /* set cropping to total visible screen */
331 geo
->src
.width
= pix
->width
;
332 geo
->src
.height
= pix
->height
;
333 geo
->src
.x_offset
= 0;
334 geo
->src
.y_offset
= 0;
335 /* assure consistency of geometry */
336 layer
->ops
.fix_geometry(layer
, MXR_GEOMETRY_CROP
, MXR_NO_OFFSET
);
337 mxr_geometry_dump(mdev
, &layer
->geo
);
338 /* set full size to lowest possible value */
339 geo
->src
.full_width
= 0;
340 geo
->src
.full_height
= 0;
341 layer
->ops
.fix_geometry(layer
, MXR_GEOMETRY_SOURCE
, 0);
342 mxr_geometry_dump(mdev
, &layer
->geo
);
344 /* returning results */
345 mxr_g_fmt(file
, priv
, f
);
350 static int mxr_g_selection(struct file
*file
, void *fh
,
351 struct v4l2_selection
*s
)
353 struct mxr_layer
*layer
= video_drvdata(file
);
354 struct mxr_geometry
*geo
= &layer
->geo
;
356 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
358 if (s
->type
!= V4L2_BUF_TYPE_VIDEO_OUTPUT
&&
359 s
->type
!= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
)
363 case V4L2_SEL_TGT_CROP
:
364 s
->r
.left
= geo
->src
.x_offset
;
365 s
->r
.top
= geo
->src
.y_offset
;
366 s
->r
.width
= geo
->src
.width
;
367 s
->r
.height
= geo
->src
.height
;
369 case V4L2_SEL_TGT_CROP_DEFAULT
:
370 case V4L2_SEL_TGT_CROP_BOUNDS
:
373 s
->r
.width
= geo
->src
.full_width
;
374 s
->r
.height
= geo
->src
.full_height
;
376 case V4L2_SEL_TGT_COMPOSE
:
377 case V4L2_SEL_TGT_COMPOSE_PADDED
:
378 s
->r
.left
= geo
->dst
.x_offset
;
379 s
->r
.top
= geo
->dst
.y_offset
;
380 s
->r
.width
= geo
->dst
.width
;
381 s
->r
.height
= geo
->dst
.height
;
383 case V4L2_SEL_TGT_COMPOSE_DEFAULT
:
384 case V4L2_SEL_TGT_COMPOSE_BOUNDS
:
387 s
->r
.width
= geo
->dst
.full_width
;
388 s
->r
.height
= geo
->dst
.full_height
;
397 /* returns 1 if rectangle 'a' is inside 'b' */
398 static int mxr_is_rect_inside(struct v4l2_rect
*a
, struct v4l2_rect
*b
)
400 if (a
->left
< b
->left
)
404 if (a
->left
+ a
->width
> b
->left
+ b
->width
)
406 if (a
->top
+ a
->height
> b
->top
+ b
->height
)
411 static int mxr_s_selection(struct file
*file
, void *fh
,
412 struct v4l2_selection
*s
)
414 struct mxr_layer
*layer
= video_drvdata(file
);
415 struct mxr_geometry
*geo
= &layer
->geo
;
416 struct mxr_crop
*target
= NULL
;
417 enum mxr_geometry_stage stage
;
418 struct mxr_geometry tmp
;
419 struct v4l2_rect res
;
421 memset(&res
, 0, sizeof(res
));
423 mxr_dbg(layer
->mdev
, "%s: rect: %dx%d@%d,%d\n", __func__
,
424 s
->r
.width
, s
->r
.height
, s
->r
.left
, s
->r
.top
);
426 if (s
->type
!= V4L2_BUF_TYPE_VIDEO_OUTPUT
&&
427 s
->type
!= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
)
431 /* ignore read-only targets */
432 case V4L2_SEL_TGT_CROP_DEFAULT
:
433 case V4L2_SEL_TGT_CROP_BOUNDS
:
434 res
.width
= geo
->src
.full_width
;
435 res
.height
= geo
->src
.full_height
;
438 /* ignore read-only targets */
439 case V4L2_SEL_TGT_COMPOSE_DEFAULT
:
440 case V4L2_SEL_TGT_COMPOSE_BOUNDS
:
441 res
.width
= geo
->dst
.full_width
;
442 res
.height
= geo
->dst
.full_height
;
445 case V4L2_SEL_TGT_CROP
:
447 stage
= MXR_GEOMETRY_CROP
;
449 case V4L2_SEL_TGT_COMPOSE
:
450 case V4L2_SEL_TGT_COMPOSE_PADDED
:
452 stage
= MXR_GEOMETRY_COMPOSE
;
457 /* apply change and update geometry if needed */
459 /* backup current geometry if setup fails */
460 memcpy(&tmp
, geo
, sizeof(tmp
));
462 /* apply requested selection */
463 target
->x_offset
= s
->r
.left
;
464 target
->y_offset
= s
->r
.top
;
465 target
->width
= s
->r
.width
;
466 target
->height
= s
->r
.height
;
468 layer
->ops
.fix_geometry(layer
, stage
, s
->flags
);
470 /* retrieve update selection rectangle */
471 res
.left
= target
->x_offset
;
472 res
.top
= target
->y_offset
;
473 res
.width
= target
->width
;
474 res
.height
= target
->height
;
476 mxr_geometry_dump(layer
->mdev
, &layer
->geo
);
479 /* checking if the rectangle satisfies constraints */
480 if ((s
->flags
& V4L2_SEL_FLAG_LE
) && !mxr_is_rect_inside(&res
, &s
->r
))
482 if ((s
->flags
& V4L2_SEL_FLAG_GE
) && !mxr_is_rect_inside(&s
->r
, &res
))
485 /* return result rectangle */
490 /* restore old geometry, which is not touched if target is NULL */
492 memcpy(geo
, &tmp
, sizeof(tmp
));
496 static int mxr_enum_dv_timings(struct file
*file
, void *fh
,
497 struct v4l2_enum_dv_timings
*timings
)
499 struct mxr_layer
*layer
= video_drvdata(file
);
500 struct mxr_device
*mdev
= layer
->mdev
;
505 /* lock protects from changing sd_out */
506 mutex_lock(&mdev
->mutex
);
507 ret
= v4l2_subdev_call(to_outsd(mdev
), pad
, enum_dv_timings
, timings
);
508 mutex_unlock(&mdev
->mutex
);
510 return ret
? -EINVAL
: 0;
513 static int mxr_s_dv_timings(struct file
*file
, void *fh
,
514 struct v4l2_dv_timings
*timings
)
516 struct mxr_layer
*layer
= video_drvdata(file
);
517 struct mxr_device
*mdev
= layer
->mdev
;
520 /* lock protects from changing sd_out */
521 mutex_lock(&mdev
->mutex
);
523 /* timings change cannot be done while there is an entity
524 * dependent on output configuration
526 if (mdev
->n_output
> 0) {
527 mutex_unlock(&mdev
->mutex
);
531 ret
= v4l2_subdev_call(to_outsd(mdev
), video
, s_dv_timings
, timings
);
533 mutex_unlock(&mdev
->mutex
);
535 mxr_layer_update_output(layer
);
537 /* any failure should return EINVAL according to V4L2 doc */
538 return ret
? -EINVAL
: 0;
541 static int mxr_g_dv_timings(struct file
*file
, void *fh
,
542 struct v4l2_dv_timings
*timings
)
544 struct mxr_layer
*layer
= video_drvdata(file
);
545 struct mxr_device
*mdev
= layer
->mdev
;
548 /* lock protects from changing sd_out */
549 mutex_lock(&mdev
->mutex
);
550 ret
= v4l2_subdev_call(to_outsd(mdev
), video
, g_dv_timings
, timings
);
551 mutex_unlock(&mdev
->mutex
);
553 return ret
? -EINVAL
: 0;
556 static int mxr_dv_timings_cap(struct file
*file
, void *fh
,
557 struct v4l2_dv_timings_cap
*cap
)
559 struct mxr_layer
*layer
= video_drvdata(file
);
560 struct mxr_device
*mdev
= layer
->mdev
;
565 /* lock protects from changing sd_out */
566 mutex_lock(&mdev
->mutex
);
567 ret
= v4l2_subdev_call(to_outsd(mdev
), pad
, dv_timings_cap
, cap
);
568 mutex_unlock(&mdev
->mutex
);
570 return ret
? -EINVAL
: 0;
573 static int mxr_s_std(struct file
*file
, void *fh
, v4l2_std_id norm
)
575 struct mxr_layer
*layer
= video_drvdata(file
);
576 struct mxr_device
*mdev
= layer
->mdev
;
579 /* lock protects from changing sd_out */
580 mutex_lock(&mdev
->mutex
);
582 /* standard change cannot be done while there is an entity
583 * dependent on output configuration
585 if (mdev
->n_output
> 0) {
586 mutex_unlock(&mdev
->mutex
);
590 ret
= v4l2_subdev_call(to_outsd(mdev
), video
, s_std_output
, norm
);
592 mutex_unlock(&mdev
->mutex
);
594 mxr_layer_update_output(layer
);
596 return ret
? -EINVAL
: 0;
599 static int mxr_g_std(struct file
*file
, void *fh
, v4l2_std_id
*norm
)
601 struct mxr_layer
*layer
= video_drvdata(file
);
602 struct mxr_device
*mdev
= layer
->mdev
;
605 /* lock protects from changing sd_out */
606 mutex_lock(&mdev
->mutex
);
607 ret
= v4l2_subdev_call(to_outsd(mdev
), video
, g_std_output
, norm
);
608 mutex_unlock(&mdev
->mutex
);
610 return ret
? -EINVAL
: 0;
613 static int mxr_enum_output(struct file
*file
, void *fh
, struct v4l2_output
*a
)
615 struct mxr_layer
*layer
= video_drvdata(file
);
616 struct mxr_device
*mdev
= layer
->mdev
;
617 struct mxr_output
*out
;
618 struct v4l2_subdev
*sd
;
620 if (a
->index
>= mdev
->output_cnt
)
622 out
= mdev
->output
[a
->index
];
625 strlcpy(a
->name
, out
->name
, sizeof(a
->name
));
627 /* try to obtain supported tv norms */
628 v4l2_subdev_call(sd
, video
, g_tvnorms_output
, &a
->std
);
630 if (sd
->ops
->video
&& sd
->ops
->video
->s_dv_timings
)
631 a
->capabilities
|= V4L2_OUT_CAP_DV_TIMINGS
;
632 if (sd
->ops
->video
&& sd
->ops
->video
->s_std_output
)
633 a
->capabilities
|= V4L2_OUT_CAP_STD
;
634 a
->type
= V4L2_OUTPUT_TYPE_ANALOG
;
639 static int mxr_s_output(struct file
*file
, void *fh
, unsigned int i
)
641 struct video_device
*vfd
= video_devdata(file
);
642 struct mxr_layer
*layer
= video_drvdata(file
);
643 struct mxr_device
*mdev
= layer
->mdev
;
645 if (i
>= mdev
->output_cnt
|| mdev
->output
[i
] == NULL
)
648 mutex_lock(&mdev
->mutex
);
649 if (mdev
->n_output
> 0) {
650 mutex_unlock(&mdev
->mutex
);
653 mdev
->current_output
= i
;
655 v4l2_subdev_call(to_outsd(mdev
), video
, g_tvnorms_output
,
657 mutex_unlock(&mdev
->mutex
);
659 /* update layers geometry */
660 mxr_layer_update_output(layer
);
662 mxr_dbg(mdev
, "tvnorms = %08llx\n", vfd
->tvnorms
);
667 static int mxr_g_output(struct file
*file
, void *fh
, unsigned int *p
)
669 struct mxr_layer
*layer
= video_drvdata(file
);
670 struct mxr_device
*mdev
= layer
->mdev
;
672 mutex_lock(&mdev
->mutex
);
673 *p
= mdev
->current_output
;
674 mutex_unlock(&mdev
->mutex
);
679 static int mxr_reqbufs(struct file
*file
, void *priv
,
680 struct v4l2_requestbuffers
*p
)
682 struct mxr_layer
*layer
= video_drvdata(file
);
684 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
685 return vb2_reqbufs(&layer
->vb_queue
, p
);
688 static int mxr_querybuf(struct file
*file
, void *priv
, struct v4l2_buffer
*p
)
690 struct mxr_layer
*layer
= video_drvdata(file
);
692 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
693 return vb2_querybuf(&layer
->vb_queue
, p
);
696 static int mxr_qbuf(struct file
*file
, void *priv
, struct v4l2_buffer
*p
)
698 struct mxr_layer
*layer
= video_drvdata(file
);
700 mxr_dbg(layer
->mdev
, "%s:%d(%d)\n", __func__
, __LINE__
, p
->index
);
701 return vb2_qbuf(&layer
->vb_queue
, p
);
704 static int mxr_dqbuf(struct file
*file
, void *priv
, struct v4l2_buffer
*p
)
706 struct mxr_layer
*layer
= video_drvdata(file
);
708 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
709 return vb2_dqbuf(&layer
->vb_queue
, p
, file
->f_flags
& O_NONBLOCK
);
712 static int mxr_expbuf(struct file
*file
, void *priv
,
713 struct v4l2_exportbuffer
*eb
)
715 struct mxr_layer
*layer
= video_drvdata(file
);
717 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
718 return vb2_expbuf(&layer
->vb_queue
, eb
);
721 static int mxr_streamon(struct file
*file
, void *priv
, enum v4l2_buf_type i
)
723 struct mxr_layer
*layer
= video_drvdata(file
);
725 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
726 return vb2_streamon(&layer
->vb_queue
, i
);
729 static int mxr_streamoff(struct file
*file
, void *priv
, enum v4l2_buf_type i
)
731 struct mxr_layer
*layer
= video_drvdata(file
);
733 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
734 return vb2_streamoff(&layer
->vb_queue
, i
);
737 static const struct v4l2_ioctl_ops mxr_ioctl_ops
= {
738 .vidioc_querycap
= mxr_querycap
,
739 /* format handling */
740 .vidioc_enum_fmt_vid_out_mplane
= mxr_enum_fmt
,
741 .vidioc_s_fmt_vid_out_mplane
= mxr_s_fmt
,
742 .vidioc_g_fmt_vid_out_mplane
= mxr_g_fmt
,
744 .vidioc_reqbufs
= mxr_reqbufs
,
745 .vidioc_querybuf
= mxr_querybuf
,
746 .vidioc_qbuf
= mxr_qbuf
,
747 .vidioc_dqbuf
= mxr_dqbuf
,
748 .vidioc_expbuf
= mxr_expbuf
,
749 /* Streaming control */
750 .vidioc_streamon
= mxr_streamon
,
751 .vidioc_streamoff
= mxr_streamoff
,
752 /* DV Timings functions */
753 .vidioc_enum_dv_timings
= mxr_enum_dv_timings
,
754 .vidioc_s_dv_timings
= mxr_s_dv_timings
,
755 .vidioc_g_dv_timings
= mxr_g_dv_timings
,
756 .vidioc_dv_timings_cap
= mxr_dv_timings_cap
,
757 /* analog TV standard functions */
758 .vidioc_s_std
= mxr_s_std
,
759 .vidioc_g_std
= mxr_g_std
,
760 /* Output handling */
761 .vidioc_enum_output
= mxr_enum_output
,
762 .vidioc_s_output
= mxr_s_output
,
763 .vidioc_g_output
= mxr_g_output
,
764 /* selection ioctls */
765 .vidioc_g_selection
= mxr_g_selection
,
766 .vidioc_s_selection
= mxr_s_selection
,
769 static int mxr_video_open(struct file
*file
)
771 struct mxr_layer
*layer
= video_drvdata(file
);
772 struct mxr_device
*mdev
= layer
->mdev
;
775 mxr_dbg(mdev
, "%s:%d\n", __func__
, __LINE__
);
776 if (mutex_lock_interruptible(&layer
->mutex
))
778 /* assure device probe is finished */
779 wait_for_device_probe();
780 /* creating context for file descriptor */
781 ret
= v4l2_fh_open(file
);
783 mxr_err(mdev
, "v4l2_fh_open failed\n");
787 /* leaving if layer is already initialized */
788 if (!v4l2_fh_is_singular_file(file
))
791 /* FIXME: should power be enabled on open? */
792 ret
= mxr_power_get(mdev
);
794 mxr_err(mdev
, "power on failed\n");
798 ret
= vb2_queue_init(&layer
->vb_queue
);
800 mxr_err(mdev
, "failed to initialize vb2 queue\n");
803 /* set default format, first on the list */
804 layer
->fmt
= layer
->fmt_array
[0];
805 /* setup default geometry */
806 mxr_layer_default_geo(layer
);
807 mutex_unlock(&layer
->mutex
);
815 v4l2_fh_release(file
);
818 mutex_unlock(&layer
->mutex
);
824 mxr_video_poll(struct file
*file
, struct poll_table_struct
*wait
)
826 struct mxr_layer
*layer
= video_drvdata(file
);
829 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
831 mutex_lock(&layer
->mutex
);
832 res
= vb2_poll(&layer
->vb_queue
, file
, wait
);
833 mutex_unlock(&layer
->mutex
);
837 static int mxr_video_mmap(struct file
*file
, struct vm_area_struct
*vma
)
839 struct mxr_layer
*layer
= video_drvdata(file
);
842 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
844 if (mutex_lock_interruptible(&layer
->mutex
))
846 ret
= vb2_mmap(&layer
->vb_queue
, vma
);
847 mutex_unlock(&layer
->mutex
);
851 static int mxr_video_release(struct file
*file
)
853 struct mxr_layer
*layer
= video_drvdata(file
);
855 mxr_dbg(layer
->mdev
, "%s:%d\n", __func__
, __LINE__
);
856 mutex_lock(&layer
->mutex
);
857 if (v4l2_fh_is_singular_file(file
)) {
858 vb2_queue_release(&layer
->vb_queue
);
859 mxr_power_put(layer
->mdev
);
861 v4l2_fh_release(file
);
862 mutex_unlock(&layer
->mutex
);
866 static const struct v4l2_file_operations mxr_fops
= {
867 .owner
= THIS_MODULE
,
868 .open
= mxr_video_open
,
869 .poll
= mxr_video_poll
,
870 .mmap
= mxr_video_mmap
,
871 .release
= mxr_video_release
,
872 .unlocked_ioctl
= video_ioctl2
,
875 static int queue_setup(struct vb2_queue
*vq
,
876 unsigned int *nbuffers
, unsigned int *nplanes
, unsigned int sizes
[],
877 struct device
*alloc_devs
[])
879 struct mxr_layer
*layer
= vb2_get_drv_priv(vq
);
880 const struct mxr_format
*fmt
= layer
->fmt
;
882 struct mxr_device
*mdev
= layer
->mdev
;
883 struct v4l2_plane_pix_format planes
[3];
885 mxr_dbg(mdev
, "%s\n", __func__
);
886 /* checking if format was configured */
889 mxr_dbg(mdev
, "fmt = %s\n", fmt
->name
);
890 mxr_mplane_fill(planes
, fmt
, layer
->geo
.src
.full_width
,
891 layer
->geo
.src
.full_height
);
893 *nplanes
= fmt
->num_subframes
;
894 for (i
= 0; i
< fmt
->num_subframes
; ++i
) {
895 sizes
[i
] = planes
[i
].sizeimage
;
896 mxr_dbg(mdev
, "size[%d] = %08x\n", i
, sizes
[i
]);
905 static void buf_queue(struct vb2_buffer
*vb
)
907 struct vb2_v4l2_buffer
*vbuf
= to_vb2_v4l2_buffer(vb
);
908 struct mxr_buffer
*buffer
= container_of(vbuf
, struct mxr_buffer
, vb
);
909 struct mxr_layer
*layer
= vb2_get_drv_priv(vb
->vb2_queue
);
910 struct mxr_device
*mdev
= layer
->mdev
;
913 spin_lock_irqsave(&layer
->enq_slock
, flags
);
914 list_add_tail(&buffer
->list
, &layer
->enq_list
);
915 spin_unlock_irqrestore(&layer
->enq_slock
, flags
);
917 mxr_dbg(mdev
, "queuing buffer\n");
920 static int start_streaming(struct vb2_queue
*vq
, unsigned int count
)
922 struct mxr_layer
*layer
= vb2_get_drv_priv(vq
);
923 struct mxr_device
*mdev
= layer
->mdev
;
926 mxr_dbg(mdev
, "%s\n", __func__
);
928 /* block any changes in output configuration */
929 mxr_output_get(mdev
);
931 mxr_layer_update_output(layer
);
932 layer
->ops
.format_set(layer
);
933 /* enabling layer in hardware */
934 spin_lock_irqsave(&layer
->enq_slock
, flags
);
935 layer
->state
= MXR_LAYER_STREAMING
;
936 spin_unlock_irqrestore(&layer
->enq_slock
, flags
);
938 layer
->ops
.stream_set(layer
, MXR_ENABLE
);
939 mxr_streamer_get(mdev
);
944 static void mxr_watchdog(unsigned long arg
)
946 struct mxr_layer
*layer
= (struct mxr_layer
*) arg
;
947 struct mxr_device
*mdev
= layer
->mdev
;
950 mxr_err(mdev
, "watchdog fired for layer %s\n", layer
->vfd
.name
);
952 spin_lock_irqsave(&layer
->enq_slock
, flags
);
954 if (layer
->update_buf
== layer
->shadow_buf
)
955 layer
->update_buf
= NULL
;
956 if (layer
->update_buf
) {
957 vb2_buffer_done(&layer
->update_buf
->vb
.vb2_buf
,
958 VB2_BUF_STATE_ERROR
);
959 layer
->update_buf
= NULL
;
961 if (layer
->shadow_buf
) {
962 vb2_buffer_done(&layer
->shadow_buf
->vb
.vb2_buf
,
963 VB2_BUF_STATE_ERROR
);
964 layer
->shadow_buf
= NULL
;
966 spin_unlock_irqrestore(&layer
->enq_slock
, flags
);
969 static void stop_streaming(struct vb2_queue
*vq
)
971 struct mxr_layer
*layer
= vb2_get_drv_priv(vq
);
972 struct mxr_device
*mdev
= layer
->mdev
;
974 struct timer_list watchdog
;
975 struct mxr_buffer
*buf
, *buf_tmp
;
977 mxr_dbg(mdev
, "%s\n", __func__
);
979 spin_lock_irqsave(&layer
->enq_slock
, flags
);
982 layer
->state
= MXR_LAYER_STREAMING_FINISH
;
984 /* set all buffer to be done */
985 list_for_each_entry_safe(buf
, buf_tmp
, &layer
->enq_list
, list
) {
986 list_del(&buf
->list
);
987 vb2_buffer_done(&buf
->vb
.vb2_buf
, VB2_BUF_STATE_ERROR
);
990 spin_unlock_irqrestore(&layer
->enq_slock
, flags
);
992 /* give 1 seconds to complete to complete last buffers */
993 setup_timer_on_stack(&watchdog
, mxr_watchdog
,
994 (unsigned long)layer
);
995 mod_timer(&watchdog
, jiffies
+ msecs_to_jiffies(1000));
997 /* wait until all buffers are goes to done state */
998 vb2_wait_for_all_buffers(vq
);
1000 /* stop timer if all synchronization is done */
1001 del_timer_sync(&watchdog
);
1002 destroy_timer_on_stack(&watchdog
);
1004 /* stopping hardware */
1005 spin_lock_irqsave(&layer
->enq_slock
, flags
);
1006 layer
->state
= MXR_LAYER_IDLE
;
1007 spin_unlock_irqrestore(&layer
->enq_slock
, flags
);
1009 /* disabling layer in hardware */
1010 layer
->ops
.stream_set(layer
, MXR_DISABLE
);
1011 /* remove one streamer */
1012 mxr_streamer_put(mdev
);
1013 /* allow changes in output configuration */
1014 mxr_output_put(mdev
);
1017 static struct vb2_ops mxr_video_qops
= {
1018 .queue_setup
= queue_setup
,
1019 .buf_queue
= buf_queue
,
1020 .wait_prepare
= vb2_ops_wait_prepare
,
1021 .wait_finish
= vb2_ops_wait_finish
,
1022 .start_streaming
= start_streaming
,
1023 .stop_streaming
= stop_streaming
,
1026 /* FIXME: try to put this functions to mxr_base_layer_create */
1027 int mxr_base_layer_register(struct mxr_layer
*layer
)
1029 struct mxr_device
*mdev
= layer
->mdev
;
1032 ret
= video_register_device(&layer
->vfd
, VFL_TYPE_GRABBER
, -1);
1034 mxr_err(mdev
, "failed to register video device\n");
1036 mxr_info(mdev
, "registered layer %s as /dev/video%d\n",
1037 layer
->vfd
.name
, layer
->vfd
.num
);
1041 void mxr_base_layer_unregister(struct mxr_layer
*layer
)
1043 video_unregister_device(&layer
->vfd
);
1046 void mxr_layer_release(struct mxr_layer
*layer
)
1048 if (layer
->ops
.release
)
1049 layer
->ops
.release(layer
);
1052 void mxr_base_layer_release(struct mxr_layer
*layer
)
1057 static void mxr_vfd_release(struct video_device
*vdev
)
1059 pr_info("video device release\n");
1062 struct mxr_layer
*mxr_base_layer_create(struct mxr_device
*mdev
,
1063 int idx
, char *name
, const struct mxr_layer_ops
*ops
)
1065 struct mxr_layer
*layer
;
1067 layer
= kzalloc(sizeof(*layer
), GFP_KERNEL
);
1068 if (layer
== NULL
) {
1069 mxr_err(mdev
, "not enough memory for layer.\n");
1077 spin_lock_init(&layer
->enq_slock
);
1078 INIT_LIST_HEAD(&layer
->enq_list
);
1079 mutex_init(&layer
->mutex
);
1081 layer
->vfd
= (struct video_device
) {
1083 .release
= mxr_vfd_release
,
1085 .vfl_dir
= VFL_DIR_TX
,
1086 .ioctl_ops
= &mxr_ioctl_ops
,
1088 strlcpy(layer
->vfd
.name
, name
, sizeof(layer
->vfd
.name
));
1090 video_set_drvdata(&layer
->vfd
, layer
);
1091 layer
->vfd
.lock
= &layer
->mutex
;
1092 layer
->vfd
.v4l2_dev
= &mdev
->v4l2_dev
;
1094 layer
->vb_queue
= (struct vb2_queue
) {
1095 .type
= V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE
,
1096 .io_modes
= VB2_MMAP
| VB2_USERPTR
| VB2_DMABUF
,
1098 .buf_struct_size
= sizeof(struct mxr_buffer
),
1099 .ops
= &mxr_video_qops
,
1100 .min_buffers_needed
= 1,
1101 .mem_ops
= &vb2_dma_contig_memops
,
1102 .lock
= &layer
->mutex
,
1112 static const struct mxr_format
*find_format_by_fourcc(
1113 struct mxr_layer
*layer
, unsigned long fourcc
)
1117 for (i
= 0; i
< layer
->fmt_array_size
; ++i
)
1118 if (layer
->fmt_array
[i
]->fourcc
== fourcc
)
1119 return layer
->fmt_array
[i
];
1123 static const struct mxr_format
*find_format_by_index(
1124 struct mxr_layer
*layer
, unsigned long index
)
1126 if (index
>= layer
->fmt_array_size
)
1128 return layer
->fmt_array
[index
];