Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph...
[deliverable/linux.git] / drivers / media / platform / omap3isp / isphist.c
1 /*
2 * isphist.c
3 *
4 * TI OMAP3 ISP - Histogram module
5 *
6 * Copyright (C) 2010 Nokia Corporation
7 * Copyright (C) 2009 Texas Instruments, Inc.
8 *
9 * Contacts: David Cohen <dacohen@gmail.com>
10 * Laurent Pinchart <laurent.pinchart@ideasonboard.com>
11 * Sakari Ailus <sakari.ailus@iki.fi>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 * 02110-1301 USA
26 */
27
28 #include <linux/delay.h>
29 #include <linux/slab.h>
30 #include <linux/uaccess.h>
31 #include <linux/device.h>
32
33 #include "isp.h"
34 #include "ispreg.h"
35 #include "isphist.h"
36
37 #define OMAP24XX_DMA_NO_DEVICE 0
38
39 #define HIST_CONFIG_DMA 1
40
41 #define HIST_USING_DMA(hist) ((hist)->dma_ch >= 0)
42
43 /*
44 * hist_reset_mem - clear Histogram memory before start stats engine.
45 */
46 static void hist_reset_mem(struct ispstat *hist)
47 {
48 struct isp_device *isp = hist->isp;
49 struct omap3isp_hist_config *conf = hist->priv;
50 unsigned int i;
51
52 isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
53
54 /*
55 * By setting it, the histogram internal buffer is being cleared at the
56 * same time it's being read. This bit must be cleared afterwards.
57 */
58 isp_reg_set(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
59
60 /*
61 * We'll clear 4 words at each iteration for optimization. It avoids
62 * 3/4 of the jumps. We also know HIST_MEM_SIZE is divisible by 4.
63 */
64 for (i = OMAP3ISP_HIST_MEM_SIZE / 4; i > 0; i--) {
65 isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
66 isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
67 isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
68 isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
69 }
70 isp_reg_clr(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
71
72 hist->wait_acc_frames = conf->num_acc_frames;
73 }
74
75 static void hist_dma_config(struct ispstat *hist)
76 {
77 struct isp_device *isp = hist->isp;
78
79 hist->dma_config.data_type = OMAP_DMA_DATA_TYPE_S32;
80 hist->dma_config.sync_mode = OMAP_DMA_SYNC_ELEMENT;
81 hist->dma_config.frame_count = 1;
82 hist->dma_config.src_amode = OMAP_DMA_AMODE_CONSTANT;
83 hist->dma_config.src_start = isp->mmio_base_phys[OMAP3_ISP_IOMEM_HIST]
84 + ISPHIST_DATA;
85 hist->dma_config.dst_amode = OMAP_DMA_AMODE_POST_INC;
86 hist->dma_config.src_or_dst_synch = OMAP_DMA_SRC_SYNC;
87 }
88
89 /*
90 * hist_setup_regs - Helper function to update Histogram registers.
91 */
92 static void hist_setup_regs(struct ispstat *hist, void *priv)
93 {
94 struct isp_device *isp = hist->isp;
95 struct omap3isp_hist_config *conf = priv;
96 int c;
97 u32 cnt;
98 u32 wb_gain;
99 u32 reg_hor[OMAP3ISP_HIST_MAX_REGIONS];
100 u32 reg_ver[OMAP3ISP_HIST_MAX_REGIONS];
101
102 if (!hist->update || hist->state == ISPSTAT_DISABLED ||
103 hist->state == ISPSTAT_DISABLING)
104 return;
105
106 cnt = conf->cfa << ISPHIST_CNT_CFA_SHIFT;
107
108 wb_gain = conf->wg[0] << ISPHIST_WB_GAIN_WG00_SHIFT;
109 wb_gain |= conf->wg[1] << ISPHIST_WB_GAIN_WG01_SHIFT;
110 wb_gain |= conf->wg[2] << ISPHIST_WB_GAIN_WG02_SHIFT;
111 if (conf->cfa == OMAP3ISP_HIST_CFA_BAYER)
112 wb_gain |= conf->wg[3] << ISPHIST_WB_GAIN_WG03_SHIFT;
113
114 /* Regions size and position */
115 for (c = 0; c < OMAP3ISP_HIST_MAX_REGIONS; c++) {
116 if (c < conf->num_regions) {
117 reg_hor[c] = conf->region[c].h_start <<
118 ISPHIST_REG_START_SHIFT;
119 reg_hor[c] = conf->region[c].h_end <<
120 ISPHIST_REG_END_SHIFT;
121 reg_ver[c] = conf->region[c].v_start <<
122 ISPHIST_REG_START_SHIFT;
123 reg_ver[c] = conf->region[c].v_end <<
124 ISPHIST_REG_END_SHIFT;
125 } else {
126 reg_hor[c] = 0;
127 reg_ver[c] = 0;
128 }
129 }
130
131 cnt |= conf->hist_bins << ISPHIST_CNT_BINS_SHIFT;
132 switch (conf->hist_bins) {
133 case OMAP3ISP_HIST_BINS_256:
134 cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 8) <<
135 ISPHIST_CNT_SHIFT_SHIFT;
136 break;
137 case OMAP3ISP_HIST_BINS_128:
138 cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 7) <<
139 ISPHIST_CNT_SHIFT_SHIFT;
140 break;
141 case OMAP3ISP_HIST_BINS_64:
142 cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 6) <<
143 ISPHIST_CNT_SHIFT_SHIFT;
144 break;
145 default: /* OMAP3ISP_HIST_BINS_32 */
146 cnt |= (ISPHIST_IN_BIT_WIDTH_CCDC - 5) <<
147 ISPHIST_CNT_SHIFT_SHIFT;
148 break;
149 }
150
151 hist_reset_mem(hist);
152
153 isp_reg_writel(isp, cnt, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT);
154 isp_reg_writel(isp, wb_gain, OMAP3_ISP_IOMEM_HIST, ISPHIST_WB_GAIN);
155 isp_reg_writel(isp, reg_hor[0], OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_HORZ);
156 isp_reg_writel(isp, reg_ver[0], OMAP3_ISP_IOMEM_HIST, ISPHIST_R0_VERT);
157 isp_reg_writel(isp, reg_hor[1], OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_HORZ);
158 isp_reg_writel(isp, reg_ver[1], OMAP3_ISP_IOMEM_HIST, ISPHIST_R1_VERT);
159 isp_reg_writel(isp, reg_hor[2], OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_HORZ);
160 isp_reg_writel(isp, reg_ver[2], OMAP3_ISP_IOMEM_HIST, ISPHIST_R2_VERT);
161 isp_reg_writel(isp, reg_hor[3], OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_HORZ);
162 isp_reg_writel(isp, reg_ver[3], OMAP3_ISP_IOMEM_HIST, ISPHIST_R3_VERT);
163
164 hist->update = 0;
165 hist->config_counter += hist->inc_config;
166 hist->inc_config = 0;
167 hist->buf_size = conf->buf_size;
168 }
169
170 static void hist_enable(struct ispstat *hist, int enable)
171 {
172 if (enable) {
173 isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR,
174 ISPHIST_PCR_ENABLE);
175 omap3isp_subclk_enable(hist->isp, OMAP3_ISP_SUBCLK_HIST);
176 } else {
177 isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR,
178 ISPHIST_PCR_ENABLE);
179 omap3isp_subclk_disable(hist->isp, OMAP3_ISP_SUBCLK_HIST);
180 }
181 }
182
183 static int hist_busy(struct ispstat *hist)
184 {
185 return isp_reg_readl(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_PCR)
186 & ISPHIST_PCR_BUSY;
187 }
188
189 static void hist_dma_cb(int lch, u16 ch_status, void *data)
190 {
191 struct ispstat *hist = data;
192
193 if (ch_status & ~OMAP_DMA_BLOCK_IRQ) {
194 dev_dbg(hist->isp->dev, "hist: DMA error. status = 0x%04x\n",
195 ch_status);
196 omap_stop_dma(lch);
197 hist_reset_mem(hist);
198 atomic_set(&hist->buf_err, 1);
199 }
200 isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
201 ISPHIST_CNT_CLEAR);
202
203 omap3isp_stat_dma_isr(hist);
204 if (hist->state != ISPSTAT_DISABLED)
205 omap3isp_hist_dma_done(hist->isp);
206 }
207
208 static int hist_buf_dma(struct ispstat *hist)
209 {
210 dma_addr_t dma_addr = hist->active_buf->dma_addr;
211
212 if (unlikely(!dma_addr)) {
213 dev_dbg(hist->isp->dev, "hist: invalid DMA buffer address\n");
214 hist_reset_mem(hist);
215 return STAT_NO_BUF;
216 }
217
218 isp_reg_writel(hist->isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
219 isp_reg_set(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
220 ISPHIST_CNT_CLEAR);
221 omap3isp_flush(hist->isp);
222 hist->dma_config.dst_start = dma_addr;
223 hist->dma_config.elem_count = hist->buf_size / sizeof(u32);
224 omap_set_dma_params(hist->dma_ch, &hist->dma_config);
225
226 omap_start_dma(hist->dma_ch);
227
228 return STAT_BUF_WAITING_DMA;
229 }
230
231 static int hist_buf_pio(struct ispstat *hist)
232 {
233 struct isp_device *isp = hist->isp;
234 u32 *buf = hist->active_buf->virt_addr;
235 unsigned int i;
236
237 if (!buf) {
238 dev_dbg(isp->dev, "hist: invalid PIO buffer address\n");
239 hist_reset_mem(hist);
240 return STAT_NO_BUF;
241 }
242
243 isp_reg_writel(isp, 0, OMAP3_ISP_IOMEM_HIST, ISPHIST_ADDR);
244
245 /*
246 * By setting it, the histogram internal buffer is being cleared at the
247 * same time it's being read. This bit must be cleared just after all
248 * data is acquired.
249 */
250 isp_reg_set(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT, ISPHIST_CNT_CLEAR);
251
252 /*
253 * We'll read 4 times a 4-bytes-word at each iteration for
254 * optimization. It avoids 3/4 of the jumps. We also know buf_size is
255 * divisible by 16.
256 */
257 for (i = hist->buf_size / 16; i > 0; i--) {
258 *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
259 *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
260 *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
261 *buf++ = isp_reg_readl(isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_DATA);
262 }
263 isp_reg_clr(hist->isp, OMAP3_ISP_IOMEM_HIST, ISPHIST_CNT,
264 ISPHIST_CNT_CLEAR);
265
266 return STAT_BUF_DONE;
267 }
268
269 /*
270 * hist_buf_process - Callback from ISP driver for HIST interrupt.
271 */
272 static int hist_buf_process(struct ispstat *hist)
273 {
274 struct omap3isp_hist_config *user_cfg = hist->priv;
275 int ret;
276
277 if (atomic_read(&hist->buf_err) || hist->state != ISPSTAT_ENABLED) {
278 hist_reset_mem(hist);
279 return STAT_NO_BUF;
280 }
281
282 if (--(hist->wait_acc_frames))
283 return STAT_NO_BUF;
284
285 if (HIST_USING_DMA(hist))
286 ret = hist_buf_dma(hist);
287 else
288 ret = hist_buf_pio(hist);
289
290 hist->wait_acc_frames = user_cfg->num_acc_frames;
291
292 return ret;
293 }
294
295 static u32 hist_get_buf_size(struct omap3isp_hist_config *conf)
296 {
297 return OMAP3ISP_HIST_MEM_SIZE_BINS(conf->hist_bins) * conf->num_regions;
298 }
299
300 /*
301 * hist_validate_params - Helper function to check user given params.
302 * @user_cfg: Pointer to user configuration structure.
303 *
304 * Returns 0 on success configuration.
305 */
306 static int hist_validate_params(struct ispstat *hist, void *new_conf)
307 {
308 struct omap3isp_hist_config *user_cfg = new_conf;
309 int c;
310 u32 buf_size;
311
312 if (user_cfg->cfa > OMAP3ISP_HIST_CFA_FOVEONX3)
313 return -EINVAL;
314
315 /* Regions size and position */
316
317 if ((user_cfg->num_regions < OMAP3ISP_HIST_MIN_REGIONS) ||
318 (user_cfg->num_regions > OMAP3ISP_HIST_MAX_REGIONS))
319 return -EINVAL;
320
321 /* Regions */
322 for (c = 0; c < user_cfg->num_regions; c++) {
323 if (user_cfg->region[c].h_start & ~ISPHIST_REG_START_END_MASK)
324 return -EINVAL;
325 if (user_cfg->region[c].h_end & ~ISPHIST_REG_START_END_MASK)
326 return -EINVAL;
327 if (user_cfg->region[c].v_start & ~ISPHIST_REG_START_END_MASK)
328 return -EINVAL;
329 if (user_cfg->region[c].v_end & ~ISPHIST_REG_START_END_MASK)
330 return -EINVAL;
331 if (user_cfg->region[c].h_start > user_cfg->region[c].h_end)
332 return -EINVAL;
333 if (user_cfg->region[c].v_start > user_cfg->region[c].v_end)
334 return -EINVAL;
335 }
336
337 switch (user_cfg->num_regions) {
338 case 1:
339 if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_256)
340 return -EINVAL;
341 break;
342 case 2:
343 if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_128)
344 return -EINVAL;
345 break;
346 default: /* 3 or 4 */
347 if (user_cfg->hist_bins > OMAP3ISP_HIST_BINS_64)
348 return -EINVAL;
349 break;
350 }
351
352 buf_size = hist_get_buf_size(user_cfg);
353 if (buf_size > user_cfg->buf_size)
354 /* User's buf_size request wasn't enoght */
355 user_cfg->buf_size = buf_size;
356 else if (user_cfg->buf_size > OMAP3ISP_HIST_MAX_BUF_SIZE)
357 user_cfg->buf_size = OMAP3ISP_HIST_MAX_BUF_SIZE;
358
359 return 0;
360 }
361
362 static int hist_comp_params(struct ispstat *hist,
363 struct omap3isp_hist_config *user_cfg)
364 {
365 struct omap3isp_hist_config *cur_cfg = hist->priv;
366 int c;
367
368 if (cur_cfg->cfa != user_cfg->cfa)
369 return 1;
370
371 if (cur_cfg->num_acc_frames != user_cfg->num_acc_frames)
372 return 1;
373
374 if (cur_cfg->hist_bins != user_cfg->hist_bins)
375 return 1;
376
377 for (c = 0; c < OMAP3ISP_HIST_MAX_WG; c++) {
378 if (c == 3 && user_cfg->cfa == OMAP3ISP_HIST_CFA_FOVEONX3)
379 break;
380 else if (cur_cfg->wg[c] != user_cfg->wg[c])
381 return 1;
382 }
383
384 if (cur_cfg->num_regions != user_cfg->num_regions)
385 return 1;
386
387 /* Regions */
388 for (c = 0; c < user_cfg->num_regions; c++) {
389 if (cur_cfg->region[c].h_start != user_cfg->region[c].h_start)
390 return 1;
391 if (cur_cfg->region[c].h_end != user_cfg->region[c].h_end)
392 return 1;
393 if (cur_cfg->region[c].v_start != user_cfg->region[c].v_start)
394 return 1;
395 if (cur_cfg->region[c].v_end != user_cfg->region[c].v_end)
396 return 1;
397 }
398
399 return 0;
400 }
401
402 /*
403 * hist_update_params - Helper function to check and store user given params.
404 * @new_conf: Pointer to user configuration structure.
405 */
406 static void hist_set_params(struct ispstat *hist, void *new_conf)
407 {
408 struct omap3isp_hist_config *user_cfg = new_conf;
409 struct omap3isp_hist_config *cur_cfg = hist->priv;
410
411 if (!hist->configured || hist_comp_params(hist, user_cfg)) {
412 memcpy(cur_cfg, user_cfg, sizeof(*user_cfg));
413 if (user_cfg->num_acc_frames == 0)
414 user_cfg->num_acc_frames = 1;
415 hist->inc_config++;
416 hist->update = 1;
417 /*
418 * User might be asked for a bigger buffer than necessary for
419 * this configuration. In order to return the right amount of
420 * data during buffer request, let's calculate the size here
421 * instead of stick with user_cfg->buf_size.
422 */
423 cur_cfg->buf_size = hist_get_buf_size(cur_cfg);
424
425 }
426 }
427
428 static long hist_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
429 {
430 struct ispstat *stat = v4l2_get_subdevdata(sd);
431
432 switch (cmd) {
433 case VIDIOC_OMAP3ISP_HIST_CFG:
434 return omap3isp_stat_config(stat, arg);
435 case VIDIOC_OMAP3ISP_STAT_REQ:
436 return omap3isp_stat_request_statistics(stat, arg);
437 case VIDIOC_OMAP3ISP_STAT_EN: {
438 int *en = arg;
439 return omap3isp_stat_enable(stat, !!*en);
440 }
441 }
442
443 return -ENOIOCTLCMD;
444
445 }
446
447 static const struct ispstat_ops hist_ops = {
448 .validate_params = hist_validate_params,
449 .set_params = hist_set_params,
450 .setup_regs = hist_setup_regs,
451 .enable = hist_enable,
452 .busy = hist_busy,
453 .buf_process = hist_buf_process,
454 };
455
456 static const struct v4l2_subdev_core_ops hist_subdev_core_ops = {
457 .ioctl = hist_ioctl,
458 .subscribe_event = omap3isp_stat_subscribe_event,
459 .unsubscribe_event = omap3isp_stat_unsubscribe_event,
460 };
461
462 static const struct v4l2_subdev_video_ops hist_subdev_video_ops = {
463 .s_stream = omap3isp_stat_s_stream,
464 };
465
466 static const struct v4l2_subdev_ops hist_subdev_ops = {
467 .core = &hist_subdev_core_ops,
468 .video = &hist_subdev_video_ops,
469 };
470
471 /*
472 * omap3isp_hist_init - Module Initialization.
473 */
474 int omap3isp_hist_init(struct isp_device *isp)
475 {
476 struct ispstat *hist = &isp->isp_hist;
477 struct omap3isp_hist_config *hist_cfg;
478 int ret = -1;
479
480 hist_cfg = kzalloc(sizeof(*hist_cfg), GFP_KERNEL);
481 if (hist_cfg == NULL)
482 return -ENOMEM;
483
484 memset(hist, 0, sizeof(*hist));
485 hist->isp = isp;
486
487 if (HIST_CONFIG_DMA)
488 ret = omap_request_dma(OMAP24XX_DMA_NO_DEVICE, "DMA_ISP_HIST",
489 hist_dma_cb, hist, &hist->dma_ch);
490 if (ret) {
491 if (HIST_CONFIG_DMA)
492 dev_warn(isp->dev, "hist: DMA request channel failed. "
493 "Using PIO only.\n");
494 hist->dma_ch = -1;
495 } else {
496 dev_dbg(isp->dev, "hist: DMA channel = %d\n", hist->dma_ch);
497 hist_dma_config(hist);
498 omap_enable_dma_irq(hist->dma_ch, OMAP_DMA_BLOCK_IRQ);
499 }
500
501 hist->ops = &hist_ops;
502 hist->priv = hist_cfg;
503 hist->event_type = V4L2_EVENT_OMAP3ISP_HIST;
504
505 ret = omap3isp_stat_init(hist, "histogram", &hist_subdev_ops);
506 if (ret) {
507 kfree(hist_cfg);
508 if (HIST_USING_DMA(hist))
509 omap_free_dma(hist->dma_ch);
510 }
511
512 return ret;
513 }
514
515 /*
516 * omap3isp_hist_cleanup - Module cleanup.
517 */
518 void omap3isp_hist_cleanup(struct isp_device *isp)
519 {
520 if (HIST_USING_DMA(&isp->isp_hist))
521 omap_free_dma(isp->isp_hist.dma_ch);
522 kfree(isp->isp_hist.priv);
523 omap3isp_stat_cleanup(&isp->isp_hist);
524 }
This page took 0.051692 seconds and 6 git commands to generate.