raid5: allow arbitrary max_hw_sectors
[deliverable/linux.git] / drivers / media / platform / s5p-tv / mixer_video.c
1 /*
2 * Samsung TV Mixer driver
3 *
4 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
5 *
6 * Tomasz Stanislawski, <t.stanislaws@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published
10 * by the Free Software Foundation. either version 2 of the License,
11 * or (at your option) any later version
12 */
13
14 #define pr_fmt(fmt) "s5p-tv (mixer): " fmt
15
16 #include "mixer.h"
17
18 #include <media/v4l2-ioctl.h>
19 #include <linux/videodev2.h>
20 #include <linux/mm.h>
21 #include <linux/module.h>
22 #include <linux/platform_device.h>
23 #include <linux/timer.h>
24 #include <media/videobuf2-dma-contig.h>
25
26 static int find_reg_callback(struct device *dev, void *p)
27 {
28 struct v4l2_subdev **sd = p;
29
30 *sd = dev_get_drvdata(dev);
31 /* non-zero value stops iteration */
32 return 1;
33 }
34
35 static struct v4l2_subdev *find_and_register_subdev(
36 struct mxr_device *mdev, char *module_name)
37 {
38 struct device_driver *drv;
39 struct v4l2_subdev *sd = NULL;
40 int ret;
41
42 /* TODO: add waiting until probe is finished */
43 drv = driver_find(module_name, &platform_bus_type);
44 if (!drv) {
45 mxr_warn(mdev, "module %s is missing\n", module_name);
46 return NULL;
47 }
48 /* driver refcnt is increased, it is safe to iterate over devices */
49 ret = driver_for_each_device(drv, NULL, &sd, find_reg_callback);
50 /* ret == 0 means that find_reg_callback was never executed */
51 if (sd == NULL) {
52 mxr_warn(mdev, "module %s provides no subdev!\n", module_name);
53 goto done;
54 }
55 /* v4l2_device_register_subdev detects if sd is NULL */
56 ret = v4l2_device_register_subdev(&mdev->v4l2_dev, sd);
57 if (ret) {
58 mxr_warn(mdev, "failed to register subdev %s\n", sd->name);
59 sd = NULL;
60 }
61
62 done:
63 return sd;
64 }
65
66 int mxr_acquire_video(struct mxr_device *mdev,
67 struct mxr_output_conf *output_conf, int output_count)
68 {
69 struct device *dev = mdev->dev;
70 struct v4l2_device *v4l2_dev = &mdev->v4l2_dev;
71 int i;
72 int ret = 0;
73 struct v4l2_subdev *sd;
74
75 strlcpy(v4l2_dev->name, dev_name(mdev->dev), sizeof(v4l2_dev->name));
76 /* prepare context for V4L2 device */
77 ret = v4l2_device_register(dev, v4l2_dev);
78 if (ret) {
79 mxr_err(mdev, "could not register v4l2 device.\n");
80 goto fail;
81 }
82
83 vb2_dma_contig_set_max_seg_size(mdev->dev, DMA_BIT_MASK(32));
84
85 /* registering outputs */
86 mdev->output_cnt = 0;
87 for (i = 0; i < output_count; ++i) {
88 struct mxr_output_conf *conf = &output_conf[i];
89 struct mxr_output *out;
90
91 sd = find_and_register_subdev(mdev, conf->module_name);
92 /* trying to register next output */
93 if (sd == NULL)
94 continue;
95 out = kzalloc(sizeof(*out), GFP_KERNEL);
96 if (out == NULL) {
97 mxr_err(mdev, "no memory for '%s'\n",
98 conf->output_name);
99 ret = -ENOMEM;
100 /* registered subdevs are removed in fail_v4l2_dev */
101 goto fail_output;
102 }
103 strlcpy(out->name, conf->output_name, sizeof(out->name));
104 out->sd = sd;
105 out->cookie = conf->cookie;
106 mdev->output[mdev->output_cnt++] = out;
107 mxr_info(mdev, "added output '%s' from module '%s'\n",
108 conf->output_name, conf->module_name);
109 /* checking if maximal number of outputs is reached */
110 if (mdev->output_cnt >= MXR_MAX_OUTPUTS)
111 break;
112 }
113
114 if (mdev->output_cnt == 0) {
115 mxr_err(mdev, "failed to register any output\n");
116 ret = -ENODEV;
117 /* skipping fail_output because there is nothing to free */
118 goto fail_v4l2_dev;
119 }
120
121 return 0;
122
123 fail_output:
124 /* kfree is NULL-safe */
125 for (i = 0; i < mdev->output_cnt; ++i)
126 kfree(mdev->output[i]);
127 memset(mdev->output, 0, sizeof(mdev->output));
128
129 fail_v4l2_dev:
130 /* NOTE: automatically unregister all subdevs */
131 v4l2_device_unregister(v4l2_dev);
132
133 fail:
134 return ret;
135 }
136
137 void mxr_release_video(struct mxr_device *mdev)
138 {
139 int i;
140
141 /* kfree is NULL-safe */
142 for (i = 0; i < mdev->output_cnt; ++i)
143 kfree(mdev->output[i]);
144
145 vb2_dma_contig_clear_max_seg_size(mdev->dev);
146 v4l2_device_unregister(&mdev->v4l2_dev);
147 }
148
149 static int mxr_querycap(struct file *file, void *priv,
150 struct v4l2_capability *cap)
151 {
152 struct mxr_layer *layer = video_drvdata(file);
153
154 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
155
156 strlcpy(cap->driver, MXR_DRIVER_NAME, sizeof(cap->driver));
157 strlcpy(cap->card, layer->vfd.name, sizeof(cap->card));
158 sprintf(cap->bus_info, "%d", layer->idx);
159 cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
160 cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
161
162 return 0;
163 }
164
165 static void mxr_geometry_dump(struct mxr_device *mdev, struct mxr_geometry *geo)
166 {
167 mxr_dbg(mdev, "src.full_size = (%u, %u)\n",
168 geo->src.full_width, geo->src.full_height);
169 mxr_dbg(mdev, "src.size = (%u, %u)\n",
170 geo->src.width, geo->src.height);
171 mxr_dbg(mdev, "src.offset = (%u, %u)\n",
172 geo->src.x_offset, geo->src.y_offset);
173 mxr_dbg(mdev, "dst.full_size = (%u, %u)\n",
174 geo->dst.full_width, geo->dst.full_height);
175 mxr_dbg(mdev, "dst.size = (%u, %u)\n",
176 geo->dst.width, geo->dst.height);
177 mxr_dbg(mdev, "dst.offset = (%u, %u)\n",
178 geo->dst.x_offset, geo->dst.y_offset);
179 mxr_dbg(mdev, "ratio = (%u, %u)\n",
180 geo->x_ratio, geo->y_ratio);
181 }
182
183 static void mxr_layer_default_geo(struct mxr_layer *layer)
184 {
185 struct mxr_device *mdev = layer->mdev;
186 struct v4l2_mbus_framefmt mbus_fmt;
187
188 memset(&layer->geo, 0, sizeof(layer->geo));
189
190 mxr_get_mbus_fmt(mdev, &mbus_fmt);
191
192 layer->geo.dst.full_width = mbus_fmt.width;
193 layer->geo.dst.full_height = mbus_fmt.height;
194 layer->geo.dst.width = layer->geo.dst.full_width;
195 layer->geo.dst.height = layer->geo.dst.full_height;
196 layer->geo.dst.field = mbus_fmt.field;
197
198 layer->geo.src.full_width = mbus_fmt.width;
199 layer->geo.src.full_height = mbus_fmt.height;
200 layer->geo.src.width = layer->geo.src.full_width;
201 layer->geo.src.height = layer->geo.src.full_height;
202
203 mxr_geometry_dump(mdev, &layer->geo);
204 layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
205 mxr_geometry_dump(mdev, &layer->geo);
206 }
207
208 static void mxr_layer_update_output(struct mxr_layer *layer)
209 {
210 struct mxr_device *mdev = layer->mdev;
211 struct v4l2_mbus_framefmt mbus_fmt;
212
213 mxr_get_mbus_fmt(mdev, &mbus_fmt);
214 /* checking if update is needed */
215 if (layer->geo.dst.full_width == mbus_fmt.width &&
216 layer->geo.dst.full_height == mbus_fmt.width)
217 return;
218
219 layer->geo.dst.full_width = mbus_fmt.width;
220 layer->geo.dst.full_height = mbus_fmt.height;
221 layer->geo.dst.field = mbus_fmt.field;
222 layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
223
224 mxr_geometry_dump(mdev, &layer->geo);
225 }
226
227 static const struct mxr_format *find_format_by_fourcc(
228 struct mxr_layer *layer, unsigned long fourcc);
229 static const struct mxr_format *find_format_by_index(
230 struct mxr_layer *layer, unsigned long index);
231
232 static int mxr_enum_fmt(struct file *file, void *priv,
233 struct v4l2_fmtdesc *f)
234 {
235 struct mxr_layer *layer = video_drvdata(file);
236 struct mxr_device *mdev = layer->mdev;
237 const struct mxr_format *fmt;
238
239 mxr_dbg(mdev, "%s\n", __func__);
240 fmt = find_format_by_index(layer, f->index);
241 if (fmt == NULL)
242 return -EINVAL;
243
244 strlcpy(f->description, fmt->name, sizeof(f->description));
245 f->pixelformat = fmt->fourcc;
246
247 return 0;
248 }
249
250 static unsigned int divup(unsigned int divident, unsigned int divisor)
251 {
252 return (divident + divisor - 1) / divisor;
253 }
254
255 unsigned long mxr_get_plane_size(const struct mxr_block *blk,
256 unsigned int width, unsigned int height)
257 {
258 unsigned int bl_width = divup(width, blk->width);
259 unsigned int bl_height = divup(height, blk->height);
260
261 return bl_width * bl_height * blk->size;
262 }
263
264 static void mxr_mplane_fill(struct v4l2_plane_pix_format *planes,
265 const struct mxr_format *fmt, u32 width, u32 height)
266 {
267 int i;
268
269 /* checking if nothing to fill */
270 if (!planes)
271 return;
272
273 memset(planes, 0, sizeof(*planes) * fmt->num_subframes);
274 for (i = 0; i < fmt->num_planes; ++i) {
275 struct v4l2_plane_pix_format *plane = planes
276 + fmt->plane2subframe[i];
277 const struct mxr_block *blk = &fmt->plane[i];
278 u32 bl_width = divup(width, blk->width);
279 u32 bl_height = divup(height, blk->height);
280 u32 sizeimage = bl_width * bl_height * blk->size;
281 u32 bytesperline = bl_width * blk->size / blk->height;
282
283 plane->sizeimage += sizeimage;
284 plane->bytesperline = max(plane->bytesperline, bytesperline);
285 }
286 }
287
288 static int mxr_g_fmt(struct file *file, void *priv,
289 struct v4l2_format *f)
290 {
291 struct mxr_layer *layer = video_drvdata(file);
292 struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
293
294 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
295
296 pix->width = layer->geo.src.full_width;
297 pix->height = layer->geo.src.full_height;
298 pix->field = V4L2_FIELD_NONE;
299 pix->pixelformat = layer->fmt->fourcc;
300 pix->colorspace = layer->fmt->colorspace;
301 mxr_mplane_fill(pix->plane_fmt, layer->fmt, pix->width, pix->height);
302
303 return 0;
304 }
305
306 static int mxr_s_fmt(struct file *file, void *priv,
307 struct v4l2_format *f)
308 {
309 struct mxr_layer *layer = video_drvdata(file);
310 const struct mxr_format *fmt;
311 struct v4l2_pix_format_mplane *pix;
312 struct mxr_device *mdev = layer->mdev;
313 struct mxr_geometry *geo = &layer->geo;
314
315 mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
316
317 pix = &f->fmt.pix_mp;
318 fmt = find_format_by_fourcc(layer, pix->pixelformat);
319 if (fmt == NULL) {
320 mxr_warn(mdev, "not recognized fourcc: %08x\n",
321 pix->pixelformat);
322 return -EINVAL;
323 }
324 layer->fmt = fmt;
325 /* set source size to highest accepted value */
326 geo->src.full_width = max(geo->dst.full_width, pix->width);
327 geo->src.full_height = max(geo->dst.full_height, pix->height);
328 layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
329 mxr_geometry_dump(mdev, &layer->geo);
330 /* set cropping to total visible screen */
331 geo->src.width = pix->width;
332 geo->src.height = pix->height;
333 geo->src.x_offset = 0;
334 geo->src.y_offset = 0;
335 /* assure consistency of geometry */
336 layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
337 mxr_geometry_dump(mdev, &layer->geo);
338 /* set full size to lowest possible value */
339 geo->src.full_width = 0;
340 geo->src.full_height = 0;
341 layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
342 mxr_geometry_dump(mdev, &layer->geo);
343
344 /* returning results */
345 mxr_g_fmt(file, priv, f);
346
347 return 0;
348 }
349
350 static int mxr_g_selection(struct file *file, void *fh,
351 struct v4l2_selection *s)
352 {
353 struct mxr_layer *layer = video_drvdata(file);
354 struct mxr_geometry *geo = &layer->geo;
355
356 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
357
358 if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
359 s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
360 return -EINVAL;
361
362 switch (s->target) {
363 case V4L2_SEL_TGT_CROP:
364 s->r.left = geo->src.x_offset;
365 s->r.top = geo->src.y_offset;
366 s->r.width = geo->src.width;
367 s->r.height = geo->src.height;
368 break;
369 case V4L2_SEL_TGT_CROP_DEFAULT:
370 case V4L2_SEL_TGT_CROP_BOUNDS:
371 s->r.left = 0;
372 s->r.top = 0;
373 s->r.width = geo->src.full_width;
374 s->r.height = geo->src.full_height;
375 break;
376 case V4L2_SEL_TGT_COMPOSE:
377 case V4L2_SEL_TGT_COMPOSE_PADDED:
378 s->r.left = geo->dst.x_offset;
379 s->r.top = geo->dst.y_offset;
380 s->r.width = geo->dst.width;
381 s->r.height = geo->dst.height;
382 break;
383 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
384 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
385 s->r.left = 0;
386 s->r.top = 0;
387 s->r.width = geo->dst.full_width;
388 s->r.height = geo->dst.full_height;
389 break;
390 default:
391 return -EINVAL;
392 }
393
394 return 0;
395 }
396
397 /* returns 1 if rectangle 'a' is inside 'b' */
398 static int mxr_is_rect_inside(struct v4l2_rect *a, struct v4l2_rect *b)
399 {
400 if (a->left < b->left)
401 return 0;
402 if (a->top < b->top)
403 return 0;
404 if (a->left + a->width > b->left + b->width)
405 return 0;
406 if (a->top + a->height > b->top + b->height)
407 return 0;
408 return 1;
409 }
410
411 static int mxr_s_selection(struct file *file, void *fh,
412 struct v4l2_selection *s)
413 {
414 struct mxr_layer *layer = video_drvdata(file);
415 struct mxr_geometry *geo = &layer->geo;
416 struct mxr_crop *target = NULL;
417 enum mxr_geometry_stage stage;
418 struct mxr_geometry tmp;
419 struct v4l2_rect res;
420
421 memset(&res, 0, sizeof(res));
422
423 mxr_dbg(layer->mdev, "%s: rect: %dx%d@%d,%d\n", __func__,
424 s->r.width, s->r.height, s->r.left, s->r.top);
425
426 if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT &&
427 s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
428 return -EINVAL;
429
430 switch (s->target) {
431 /* ignore read-only targets */
432 case V4L2_SEL_TGT_CROP_DEFAULT:
433 case V4L2_SEL_TGT_CROP_BOUNDS:
434 res.width = geo->src.full_width;
435 res.height = geo->src.full_height;
436 break;
437
438 /* ignore read-only targets */
439 case V4L2_SEL_TGT_COMPOSE_DEFAULT:
440 case V4L2_SEL_TGT_COMPOSE_BOUNDS:
441 res.width = geo->dst.full_width;
442 res.height = geo->dst.full_height;
443 break;
444
445 case V4L2_SEL_TGT_CROP:
446 target = &geo->src;
447 stage = MXR_GEOMETRY_CROP;
448 break;
449 case V4L2_SEL_TGT_COMPOSE:
450 case V4L2_SEL_TGT_COMPOSE_PADDED:
451 target = &geo->dst;
452 stage = MXR_GEOMETRY_COMPOSE;
453 break;
454 default:
455 return -EINVAL;
456 }
457 /* apply change and update geometry if needed */
458 if (target) {
459 /* backup current geometry if setup fails */
460 memcpy(&tmp, geo, sizeof(tmp));
461
462 /* apply requested selection */
463 target->x_offset = s->r.left;
464 target->y_offset = s->r.top;
465 target->width = s->r.width;
466 target->height = s->r.height;
467
468 layer->ops.fix_geometry(layer, stage, s->flags);
469
470 /* retrieve update selection rectangle */
471 res.left = target->x_offset;
472 res.top = target->y_offset;
473 res.width = target->width;
474 res.height = target->height;
475
476 mxr_geometry_dump(layer->mdev, &layer->geo);
477 }
478
479 /* checking if the rectangle satisfies constraints */
480 if ((s->flags & V4L2_SEL_FLAG_LE) && !mxr_is_rect_inside(&res, &s->r))
481 goto fail;
482 if ((s->flags & V4L2_SEL_FLAG_GE) && !mxr_is_rect_inside(&s->r, &res))
483 goto fail;
484
485 /* return result rectangle */
486 s->r = res;
487
488 return 0;
489 fail:
490 /* restore old geometry, which is not touched if target is NULL */
491 if (target)
492 memcpy(geo, &tmp, sizeof(tmp));
493 return -ERANGE;
494 }
495
496 static int mxr_enum_dv_timings(struct file *file, void *fh,
497 struct v4l2_enum_dv_timings *timings)
498 {
499 struct mxr_layer *layer = video_drvdata(file);
500 struct mxr_device *mdev = layer->mdev;
501 int ret;
502
503 timings->pad = 0;
504
505 /* lock protects from changing sd_out */
506 mutex_lock(&mdev->mutex);
507 ret = v4l2_subdev_call(to_outsd(mdev), pad, enum_dv_timings, timings);
508 mutex_unlock(&mdev->mutex);
509
510 return ret ? -EINVAL : 0;
511 }
512
513 static int mxr_s_dv_timings(struct file *file, void *fh,
514 struct v4l2_dv_timings *timings)
515 {
516 struct mxr_layer *layer = video_drvdata(file);
517 struct mxr_device *mdev = layer->mdev;
518 int ret;
519
520 /* lock protects from changing sd_out */
521 mutex_lock(&mdev->mutex);
522
523 /* timings change cannot be done while there is an entity
524 * dependent on output configuration
525 */
526 if (mdev->n_output > 0) {
527 mutex_unlock(&mdev->mutex);
528 return -EBUSY;
529 }
530
531 ret = v4l2_subdev_call(to_outsd(mdev), video, s_dv_timings, timings);
532
533 mutex_unlock(&mdev->mutex);
534
535 mxr_layer_update_output(layer);
536
537 /* any failure should return EINVAL according to V4L2 doc */
538 return ret ? -EINVAL : 0;
539 }
540
541 static int mxr_g_dv_timings(struct file *file, void *fh,
542 struct v4l2_dv_timings *timings)
543 {
544 struct mxr_layer *layer = video_drvdata(file);
545 struct mxr_device *mdev = layer->mdev;
546 int ret;
547
548 /* lock protects from changing sd_out */
549 mutex_lock(&mdev->mutex);
550 ret = v4l2_subdev_call(to_outsd(mdev), video, g_dv_timings, timings);
551 mutex_unlock(&mdev->mutex);
552
553 return ret ? -EINVAL : 0;
554 }
555
556 static int mxr_dv_timings_cap(struct file *file, void *fh,
557 struct v4l2_dv_timings_cap *cap)
558 {
559 struct mxr_layer *layer = video_drvdata(file);
560 struct mxr_device *mdev = layer->mdev;
561 int ret;
562
563 cap->pad = 0;
564
565 /* lock protects from changing sd_out */
566 mutex_lock(&mdev->mutex);
567 ret = v4l2_subdev_call(to_outsd(mdev), pad, dv_timings_cap, cap);
568 mutex_unlock(&mdev->mutex);
569
570 return ret ? -EINVAL : 0;
571 }
572
573 static int mxr_s_std(struct file *file, void *fh, v4l2_std_id norm)
574 {
575 struct mxr_layer *layer = video_drvdata(file);
576 struct mxr_device *mdev = layer->mdev;
577 int ret;
578
579 /* lock protects from changing sd_out */
580 mutex_lock(&mdev->mutex);
581
582 /* standard change cannot be done while there is an entity
583 * dependent on output configuration
584 */
585 if (mdev->n_output > 0) {
586 mutex_unlock(&mdev->mutex);
587 return -EBUSY;
588 }
589
590 ret = v4l2_subdev_call(to_outsd(mdev), video, s_std_output, norm);
591
592 mutex_unlock(&mdev->mutex);
593
594 mxr_layer_update_output(layer);
595
596 return ret ? -EINVAL : 0;
597 }
598
599 static int mxr_g_std(struct file *file, void *fh, v4l2_std_id *norm)
600 {
601 struct mxr_layer *layer = video_drvdata(file);
602 struct mxr_device *mdev = layer->mdev;
603 int ret;
604
605 /* lock protects from changing sd_out */
606 mutex_lock(&mdev->mutex);
607 ret = v4l2_subdev_call(to_outsd(mdev), video, g_std_output, norm);
608 mutex_unlock(&mdev->mutex);
609
610 return ret ? -EINVAL : 0;
611 }
612
613 static int mxr_enum_output(struct file *file, void *fh, struct v4l2_output *a)
614 {
615 struct mxr_layer *layer = video_drvdata(file);
616 struct mxr_device *mdev = layer->mdev;
617 struct mxr_output *out;
618 struct v4l2_subdev *sd;
619
620 if (a->index >= mdev->output_cnt)
621 return -EINVAL;
622 out = mdev->output[a->index];
623 BUG_ON(out == NULL);
624 sd = out->sd;
625 strlcpy(a->name, out->name, sizeof(a->name));
626
627 /* try to obtain supported tv norms */
628 v4l2_subdev_call(sd, video, g_tvnorms_output, &a->std);
629 a->capabilities = 0;
630 if (sd->ops->video && sd->ops->video->s_dv_timings)
631 a->capabilities |= V4L2_OUT_CAP_DV_TIMINGS;
632 if (sd->ops->video && sd->ops->video->s_std_output)
633 a->capabilities |= V4L2_OUT_CAP_STD;
634 a->type = V4L2_OUTPUT_TYPE_ANALOG;
635
636 return 0;
637 }
638
639 static int mxr_s_output(struct file *file, void *fh, unsigned int i)
640 {
641 struct video_device *vfd = video_devdata(file);
642 struct mxr_layer *layer = video_drvdata(file);
643 struct mxr_device *mdev = layer->mdev;
644
645 if (i >= mdev->output_cnt || mdev->output[i] == NULL)
646 return -EINVAL;
647
648 mutex_lock(&mdev->mutex);
649 if (mdev->n_output > 0) {
650 mutex_unlock(&mdev->mutex);
651 return -EBUSY;
652 }
653 mdev->current_output = i;
654 vfd->tvnorms = 0;
655 v4l2_subdev_call(to_outsd(mdev), video, g_tvnorms_output,
656 &vfd->tvnorms);
657 mutex_unlock(&mdev->mutex);
658
659 /* update layers geometry */
660 mxr_layer_update_output(layer);
661
662 mxr_dbg(mdev, "tvnorms = %08llx\n", vfd->tvnorms);
663
664 return 0;
665 }
666
667 static int mxr_g_output(struct file *file, void *fh, unsigned int *p)
668 {
669 struct mxr_layer *layer = video_drvdata(file);
670 struct mxr_device *mdev = layer->mdev;
671
672 mutex_lock(&mdev->mutex);
673 *p = mdev->current_output;
674 mutex_unlock(&mdev->mutex);
675
676 return 0;
677 }
678
679 static int mxr_reqbufs(struct file *file, void *priv,
680 struct v4l2_requestbuffers *p)
681 {
682 struct mxr_layer *layer = video_drvdata(file);
683
684 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
685 return vb2_reqbufs(&layer->vb_queue, p);
686 }
687
688 static int mxr_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
689 {
690 struct mxr_layer *layer = video_drvdata(file);
691
692 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
693 return vb2_querybuf(&layer->vb_queue, p);
694 }
695
696 static int mxr_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
697 {
698 struct mxr_layer *layer = video_drvdata(file);
699
700 mxr_dbg(layer->mdev, "%s:%d(%d)\n", __func__, __LINE__, p->index);
701 return vb2_qbuf(&layer->vb_queue, p);
702 }
703
704 static int mxr_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
705 {
706 struct mxr_layer *layer = video_drvdata(file);
707
708 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
709 return vb2_dqbuf(&layer->vb_queue, p, file->f_flags & O_NONBLOCK);
710 }
711
712 static int mxr_expbuf(struct file *file, void *priv,
713 struct v4l2_exportbuffer *eb)
714 {
715 struct mxr_layer *layer = video_drvdata(file);
716
717 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
718 return vb2_expbuf(&layer->vb_queue, eb);
719 }
720
721 static int mxr_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
722 {
723 struct mxr_layer *layer = video_drvdata(file);
724
725 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
726 return vb2_streamon(&layer->vb_queue, i);
727 }
728
729 static int mxr_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
730 {
731 struct mxr_layer *layer = video_drvdata(file);
732
733 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
734 return vb2_streamoff(&layer->vb_queue, i);
735 }
736
737 static const struct v4l2_ioctl_ops mxr_ioctl_ops = {
738 .vidioc_querycap = mxr_querycap,
739 /* format handling */
740 .vidioc_enum_fmt_vid_out_mplane = mxr_enum_fmt,
741 .vidioc_s_fmt_vid_out_mplane = mxr_s_fmt,
742 .vidioc_g_fmt_vid_out_mplane = mxr_g_fmt,
743 /* buffer control */
744 .vidioc_reqbufs = mxr_reqbufs,
745 .vidioc_querybuf = mxr_querybuf,
746 .vidioc_qbuf = mxr_qbuf,
747 .vidioc_dqbuf = mxr_dqbuf,
748 .vidioc_expbuf = mxr_expbuf,
749 /* Streaming control */
750 .vidioc_streamon = mxr_streamon,
751 .vidioc_streamoff = mxr_streamoff,
752 /* DV Timings functions */
753 .vidioc_enum_dv_timings = mxr_enum_dv_timings,
754 .vidioc_s_dv_timings = mxr_s_dv_timings,
755 .vidioc_g_dv_timings = mxr_g_dv_timings,
756 .vidioc_dv_timings_cap = mxr_dv_timings_cap,
757 /* analog TV standard functions */
758 .vidioc_s_std = mxr_s_std,
759 .vidioc_g_std = mxr_g_std,
760 /* Output handling */
761 .vidioc_enum_output = mxr_enum_output,
762 .vidioc_s_output = mxr_s_output,
763 .vidioc_g_output = mxr_g_output,
764 /* selection ioctls */
765 .vidioc_g_selection = mxr_g_selection,
766 .vidioc_s_selection = mxr_s_selection,
767 };
768
769 static int mxr_video_open(struct file *file)
770 {
771 struct mxr_layer *layer = video_drvdata(file);
772 struct mxr_device *mdev = layer->mdev;
773 int ret = 0;
774
775 mxr_dbg(mdev, "%s:%d\n", __func__, __LINE__);
776 if (mutex_lock_interruptible(&layer->mutex))
777 return -ERESTARTSYS;
778 /* assure device probe is finished */
779 wait_for_device_probe();
780 /* creating context for file descriptor */
781 ret = v4l2_fh_open(file);
782 if (ret) {
783 mxr_err(mdev, "v4l2_fh_open failed\n");
784 goto unlock;
785 }
786
787 /* leaving if layer is already initialized */
788 if (!v4l2_fh_is_singular_file(file))
789 goto unlock;
790
791 /* FIXME: should power be enabled on open? */
792 ret = mxr_power_get(mdev);
793 if (ret) {
794 mxr_err(mdev, "power on failed\n");
795 goto fail_fh_open;
796 }
797
798 ret = vb2_queue_init(&layer->vb_queue);
799 if (ret != 0) {
800 mxr_err(mdev, "failed to initialize vb2 queue\n");
801 goto fail_power;
802 }
803 /* set default format, first on the list */
804 layer->fmt = layer->fmt_array[0];
805 /* setup default geometry */
806 mxr_layer_default_geo(layer);
807 mutex_unlock(&layer->mutex);
808
809 return 0;
810
811 fail_power:
812 mxr_power_put(mdev);
813
814 fail_fh_open:
815 v4l2_fh_release(file);
816
817 unlock:
818 mutex_unlock(&layer->mutex);
819
820 return ret;
821 }
822
823 static unsigned int
824 mxr_video_poll(struct file *file, struct poll_table_struct *wait)
825 {
826 struct mxr_layer *layer = video_drvdata(file);
827 unsigned int res;
828
829 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
830
831 mutex_lock(&layer->mutex);
832 res = vb2_poll(&layer->vb_queue, file, wait);
833 mutex_unlock(&layer->mutex);
834 return res;
835 }
836
837 static int mxr_video_mmap(struct file *file, struct vm_area_struct *vma)
838 {
839 struct mxr_layer *layer = video_drvdata(file);
840 int ret;
841
842 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
843
844 if (mutex_lock_interruptible(&layer->mutex))
845 return -ERESTARTSYS;
846 ret = vb2_mmap(&layer->vb_queue, vma);
847 mutex_unlock(&layer->mutex);
848 return ret;
849 }
850
851 static int mxr_video_release(struct file *file)
852 {
853 struct mxr_layer *layer = video_drvdata(file);
854
855 mxr_dbg(layer->mdev, "%s:%d\n", __func__, __LINE__);
856 mutex_lock(&layer->mutex);
857 if (v4l2_fh_is_singular_file(file)) {
858 vb2_queue_release(&layer->vb_queue);
859 mxr_power_put(layer->mdev);
860 }
861 v4l2_fh_release(file);
862 mutex_unlock(&layer->mutex);
863 return 0;
864 }
865
866 static const struct v4l2_file_operations mxr_fops = {
867 .owner = THIS_MODULE,
868 .open = mxr_video_open,
869 .poll = mxr_video_poll,
870 .mmap = mxr_video_mmap,
871 .release = mxr_video_release,
872 .unlocked_ioctl = video_ioctl2,
873 };
874
875 static int queue_setup(struct vb2_queue *vq,
876 unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[],
877 struct device *alloc_devs[])
878 {
879 struct mxr_layer *layer = vb2_get_drv_priv(vq);
880 const struct mxr_format *fmt = layer->fmt;
881 int i;
882 struct mxr_device *mdev = layer->mdev;
883 struct v4l2_plane_pix_format planes[3];
884
885 mxr_dbg(mdev, "%s\n", __func__);
886 /* checking if format was configured */
887 if (fmt == NULL)
888 return -EINVAL;
889 mxr_dbg(mdev, "fmt = %s\n", fmt->name);
890 mxr_mplane_fill(planes, fmt, layer->geo.src.full_width,
891 layer->geo.src.full_height);
892
893 *nplanes = fmt->num_subframes;
894 for (i = 0; i < fmt->num_subframes; ++i) {
895 sizes[i] = planes[i].sizeimage;
896 mxr_dbg(mdev, "size[%d] = %08x\n", i, sizes[i]);
897 }
898
899 if (*nbuffers == 0)
900 *nbuffers = 1;
901
902 return 0;
903 }
904
905 static void buf_queue(struct vb2_buffer *vb)
906 {
907 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
908 struct mxr_buffer *buffer = container_of(vbuf, struct mxr_buffer, vb);
909 struct mxr_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
910 struct mxr_device *mdev = layer->mdev;
911 unsigned long flags;
912
913 spin_lock_irqsave(&layer->enq_slock, flags);
914 list_add_tail(&buffer->list, &layer->enq_list);
915 spin_unlock_irqrestore(&layer->enq_slock, flags);
916
917 mxr_dbg(mdev, "queuing buffer\n");
918 }
919
920 static int start_streaming(struct vb2_queue *vq, unsigned int count)
921 {
922 struct mxr_layer *layer = vb2_get_drv_priv(vq);
923 struct mxr_device *mdev = layer->mdev;
924 unsigned long flags;
925
926 mxr_dbg(mdev, "%s\n", __func__);
927
928 /* block any changes in output configuration */
929 mxr_output_get(mdev);
930
931 mxr_layer_update_output(layer);
932 layer->ops.format_set(layer);
933 /* enabling layer in hardware */
934 spin_lock_irqsave(&layer->enq_slock, flags);
935 layer->state = MXR_LAYER_STREAMING;
936 spin_unlock_irqrestore(&layer->enq_slock, flags);
937
938 layer->ops.stream_set(layer, MXR_ENABLE);
939 mxr_streamer_get(mdev);
940
941 return 0;
942 }
943
944 static void mxr_watchdog(unsigned long arg)
945 {
946 struct mxr_layer *layer = (struct mxr_layer *) arg;
947 struct mxr_device *mdev = layer->mdev;
948 unsigned long flags;
949
950 mxr_err(mdev, "watchdog fired for layer %s\n", layer->vfd.name);
951
952 spin_lock_irqsave(&layer->enq_slock, flags);
953
954 if (layer->update_buf == layer->shadow_buf)
955 layer->update_buf = NULL;
956 if (layer->update_buf) {
957 vb2_buffer_done(&layer->update_buf->vb.vb2_buf,
958 VB2_BUF_STATE_ERROR);
959 layer->update_buf = NULL;
960 }
961 if (layer->shadow_buf) {
962 vb2_buffer_done(&layer->shadow_buf->vb.vb2_buf,
963 VB2_BUF_STATE_ERROR);
964 layer->shadow_buf = NULL;
965 }
966 spin_unlock_irqrestore(&layer->enq_slock, flags);
967 }
968
969 static void stop_streaming(struct vb2_queue *vq)
970 {
971 struct mxr_layer *layer = vb2_get_drv_priv(vq);
972 struct mxr_device *mdev = layer->mdev;
973 unsigned long flags;
974 struct timer_list watchdog;
975 struct mxr_buffer *buf, *buf_tmp;
976
977 mxr_dbg(mdev, "%s\n", __func__);
978
979 spin_lock_irqsave(&layer->enq_slock, flags);
980
981 /* reset list */
982 layer->state = MXR_LAYER_STREAMING_FINISH;
983
984 /* set all buffer to be done */
985 list_for_each_entry_safe(buf, buf_tmp, &layer->enq_list, list) {
986 list_del(&buf->list);
987 vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
988 }
989
990 spin_unlock_irqrestore(&layer->enq_slock, flags);
991
992 /* give 1 seconds to complete to complete last buffers */
993 setup_timer_on_stack(&watchdog, mxr_watchdog,
994 (unsigned long)layer);
995 mod_timer(&watchdog, jiffies + msecs_to_jiffies(1000));
996
997 /* wait until all buffers are goes to done state */
998 vb2_wait_for_all_buffers(vq);
999
1000 /* stop timer if all synchronization is done */
1001 del_timer_sync(&watchdog);
1002 destroy_timer_on_stack(&watchdog);
1003
1004 /* stopping hardware */
1005 spin_lock_irqsave(&layer->enq_slock, flags);
1006 layer->state = MXR_LAYER_IDLE;
1007 spin_unlock_irqrestore(&layer->enq_slock, flags);
1008
1009 /* disabling layer in hardware */
1010 layer->ops.stream_set(layer, MXR_DISABLE);
1011 /* remove one streamer */
1012 mxr_streamer_put(mdev);
1013 /* allow changes in output configuration */
1014 mxr_output_put(mdev);
1015 }
1016
1017 static struct vb2_ops mxr_video_qops = {
1018 .queue_setup = queue_setup,
1019 .buf_queue = buf_queue,
1020 .wait_prepare = vb2_ops_wait_prepare,
1021 .wait_finish = vb2_ops_wait_finish,
1022 .start_streaming = start_streaming,
1023 .stop_streaming = stop_streaming,
1024 };
1025
1026 /* FIXME: try to put this functions to mxr_base_layer_create */
1027 int mxr_base_layer_register(struct mxr_layer *layer)
1028 {
1029 struct mxr_device *mdev = layer->mdev;
1030 int ret;
1031
1032 ret = video_register_device(&layer->vfd, VFL_TYPE_GRABBER, -1);
1033 if (ret)
1034 mxr_err(mdev, "failed to register video device\n");
1035 else
1036 mxr_info(mdev, "registered layer %s as /dev/video%d\n",
1037 layer->vfd.name, layer->vfd.num);
1038 return ret;
1039 }
1040
1041 void mxr_base_layer_unregister(struct mxr_layer *layer)
1042 {
1043 video_unregister_device(&layer->vfd);
1044 }
1045
1046 void mxr_layer_release(struct mxr_layer *layer)
1047 {
1048 if (layer->ops.release)
1049 layer->ops.release(layer);
1050 }
1051
1052 void mxr_base_layer_release(struct mxr_layer *layer)
1053 {
1054 kfree(layer);
1055 }
1056
1057 static void mxr_vfd_release(struct video_device *vdev)
1058 {
1059 pr_info("video device release\n");
1060 }
1061
1062 struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
1063 int idx, char *name, const struct mxr_layer_ops *ops)
1064 {
1065 struct mxr_layer *layer;
1066
1067 layer = kzalloc(sizeof(*layer), GFP_KERNEL);
1068 if (layer == NULL) {
1069 mxr_err(mdev, "not enough memory for layer.\n");
1070 goto fail;
1071 }
1072
1073 layer->mdev = mdev;
1074 layer->idx = idx;
1075 layer->ops = *ops;
1076
1077 spin_lock_init(&layer->enq_slock);
1078 INIT_LIST_HEAD(&layer->enq_list);
1079 mutex_init(&layer->mutex);
1080
1081 layer->vfd = (struct video_device) {
1082 .minor = -1,
1083 .release = mxr_vfd_release,
1084 .fops = &mxr_fops,
1085 .vfl_dir = VFL_DIR_TX,
1086 .ioctl_ops = &mxr_ioctl_ops,
1087 };
1088 strlcpy(layer->vfd.name, name, sizeof(layer->vfd.name));
1089
1090 video_set_drvdata(&layer->vfd, layer);
1091 layer->vfd.lock = &layer->mutex;
1092 layer->vfd.v4l2_dev = &mdev->v4l2_dev;
1093
1094 layer->vb_queue = (struct vb2_queue) {
1095 .type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE,
1096 .io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF,
1097 .drv_priv = layer,
1098 .buf_struct_size = sizeof(struct mxr_buffer),
1099 .ops = &mxr_video_qops,
1100 .min_buffers_needed = 1,
1101 .mem_ops = &vb2_dma_contig_memops,
1102 .lock = &layer->mutex,
1103 .dev = mdev->dev,
1104 };
1105
1106 return layer;
1107
1108 fail:
1109 return NULL;
1110 }
1111
1112 static const struct mxr_format *find_format_by_fourcc(
1113 struct mxr_layer *layer, unsigned long fourcc)
1114 {
1115 int i;
1116
1117 for (i = 0; i < layer->fmt_array_size; ++i)
1118 if (layer->fmt_array[i]->fourcc == fourcc)
1119 return layer->fmt_array[i];
1120 return NULL;
1121 }
1122
1123 static const struct mxr_format *find_format_by_index(
1124 struct mxr_layer *layer, unsigned long index)
1125 {
1126 if (index >= layer->fmt_array_size)
1127 return NULL;
1128 return layer->fmt_array[index];
1129 }
1130
This page took 0.077726 seconds and 5 git commands to generate.