[media] cx25840: fix probing of cx2583x chips
[deliverable/linux.git] / drivers / media / video / ivtv / ivtv-irq.c
CommitLineData
1a0adaf3
HV
1/* interrupt handling
2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include "ivtv-driver.h"
1a0adaf3
HV
22#include "ivtv-queue.h"
23#include "ivtv-udma.h"
24#include "ivtv-irq.h"
1a0adaf3
HV
25#include "ivtv-mailbox.h"
26#include "ivtv-vbi.h"
1e13f9e3 27#include "ivtv-yuv.h"
09250193 28#include <media/v4l2-event.h>
1a0adaf3
HV
29
30#define DMA_MAGIC_COOKIE 0x000001fe
31
1a0adaf3
HV
32static void ivtv_dma_dec_start(struct ivtv_stream *s);
33
34static const int ivtv_stream_map[] = {
35 IVTV_ENC_STREAM_TYPE_MPG,
36 IVTV_ENC_STREAM_TYPE_YUV,
37 IVTV_ENC_STREAM_TYPE_PCM,
38 IVTV_ENC_STREAM_TYPE_VBI,
39};
40
dc02d50a
HV
41
42static void ivtv_pio_work_handler(struct ivtv *itv)
1a0adaf3 43{
dc02d50a
HV
44 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
45 struct ivtv_buffer *buf;
dc02d50a
HV
46 int i = 0;
47
bd58df6d 48 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
dc02d50a 49 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
8ac05ae3 50 s->vdev == NULL || !ivtv_use_pio(s)) {
dc02d50a
HV
51 itv->cur_pio_stream = -1;
52 /* trigger PIO complete user interrupt */
53 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
54 return;
55 }
bd58df6d 56 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
805a4392 57 list_for_each_entry(buf, &s->q_dma.list, list) {
37093b1e 58 u32 size = s->sg_processing[i].size & 0x3ffff;
1a0adaf3 59
dc02d50a
HV
60 /* Copy the data from the card to the buffer */
61 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
37093b1e 62 memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
dc02d50a
HV
63 }
64 else {
37093b1e 65 memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
dc02d50a 66 }
dc02d50a 67 i++;
37093b1e
HV
68 if (i == s->sg_processing_size)
69 break;
dc02d50a
HV
70 }
71 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
1a0adaf3
HV
72}
73
7bc46560 74void ivtv_irq_work_handler(struct kthread_work *work)
1e13f9e3 75{
7bc46560 76 struct ivtv *itv = container_of(work, struct ivtv, irq_work);
1e13f9e3 77
dc02d50a
HV
78 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
79 ivtv_pio_work_handler(itv);
80
1e13f9e3 81 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
dc02d50a 82 ivtv_vbi_work_handler(itv);
1e13f9e3
HV
83
84 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
85 ivtv_yuv_work_handler(itv);
86}
87
1a0adaf3
HV
88/* Determine the required DMA size, setup enough buffers in the predma queue and
89 actually copy the data from the card to the buffers in case a PIO transfer is
90 required for this stream.
91 */
92static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
93{
94 struct ivtv *itv = s->itv;
95 struct ivtv_buffer *buf;
1a0adaf3
HV
96 u32 bytes_needed = 0;
97 u32 offset, size;
98 u32 UVoffset = 0, UVsize = 0;
99 int skip_bufs = s->q_predma.buffers;
37093b1e 100 int idx = s->sg_pending_size;
1a0adaf3
HV
101 int rc;
102
103 /* sanity checks */
8ac05ae3 104 if (s->vdev == NULL) {
1a0adaf3
HV
105 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
106 return -1;
107 }
108 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
109 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
110 return -1;
111 }
112
113 /* determine offset, size and PTS for the various streams */
114 switch (s->type) {
115 case IVTV_ENC_STREAM_TYPE_MPG:
116 offset = data[1];
117 size = data[2];
37093b1e 118 s->pending_pts = 0;
1a0adaf3
HV
119 break;
120
121 case IVTV_ENC_STREAM_TYPE_YUV:
122 offset = data[1];
123 size = data[2];
124 UVoffset = data[3];
125 UVsize = data[4];
37093b1e 126 s->pending_pts = ((u64) data[5] << 32) | data[6];
1a0adaf3
HV
127 break;
128
129 case IVTV_ENC_STREAM_TYPE_PCM:
130 offset = data[1] + 12;
131 size = data[2] - 12;
37093b1e 132 s->pending_pts = read_dec(offset - 8) |
1a0adaf3
HV
133 ((u64)(read_dec(offset - 12)) << 32);
134 if (itv->has_cx23415)
135 offset += IVTV_DECODER_OFFSET;
136 break;
137
138 case IVTV_ENC_STREAM_TYPE_VBI:
139 size = itv->vbi.enc_size * itv->vbi.fpi;
140 offset = read_enc(itv->vbi.enc_start - 4) + 12;
141 if (offset == 12) {
142 IVTV_DEBUG_INFO("VBI offset == 0\n");
143 return -1;
144 }
37093b1e 145 s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
1a0adaf3
HV
146 break;
147
148 case IVTV_DEC_STREAM_TYPE_VBI:
149 size = read_dec(itv->vbi.dec_start + 4) + 8;
150 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
37093b1e 151 s->pending_pts = 0;
1a0adaf3
HV
152 offset += IVTV_DECODER_OFFSET;
153 break;
154 default:
155 /* shouldn't happen */
156 return -1;
157 }
158
159 /* if this is the start of the DMA then fill in the magic cookie */
51a99c04 160 if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
1a0adaf3
HV
161 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
162 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
37093b1e 163 s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
1a0adaf3
HV
164 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
165 }
166 else {
37093b1e 167 s->pending_backup = read_enc(offset);
1a0adaf3
HV
168 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
169 }
37093b1e 170 s->pending_offset = offset;
1a0adaf3
HV
171 }
172
173 bytes_needed = size;
174 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
175 /* The size for the Y samples needs to be rounded upwards to a
176 multiple of the buf_size. The UV samples then start in the
177 next buffer. */
178 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
179 bytes_needed += UVsize;
180 }
181
bd58df6d 182 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
1a0adaf3
HV
183 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
184
185 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
186 if (rc < 0) { /* Insufficient buffers */
187 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
188 bytes_needed, s->name);
189 return -1;
190 }
ec105a42 191 if (rc && !s->buffers_stolen && test_bit(IVTV_F_S_APPL_IO, &s->s_flags)) {
1a0adaf3
HV
192 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
193 IVTV_WARN("Cause: the application is not reading fast enough.\n");
194 }
195 s->buffers_stolen = rc;
196
37093b1e 197 /* got the buffers, now fill in sg_pending */
1a0adaf3
HV
198 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
199 memset(buf->buf, 0, 128);
805a4392 200 list_for_each_entry(buf, &s->q_predma.list, list) {
1a0adaf3
HV
201 if (skip_bufs-- > 0)
202 continue;
37093b1e
HV
203 s->sg_pending[idx].dst = buf->dma_handle;
204 s->sg_pending[idx].src = offset;
205 s->sg_pending[idx].size = s->buf_size;
14d5deba 206 buf->bytesused = min(size, s->buf_size);
f4071b85 207 buf->dma_xfer_cnt = s->dma_xfer_cnt;
1a0adaf3 208
1a0adaf3
HV
209 s->q_predma.bytesused += buf->bytesused;
210 size -= buf->bytesused;
211 offset += s->buf_size;
212
213 /* Sync SG buffers */
214 ivtv_buf_sync_for_device(s, buf);
215
216 if (size == 0) { /* YUV */
217 /* process the UV section */
218 offset = UVoffset;
219 size = UVsize;
220 }
221 idx++;
222 }
37093b1e 223 s->sg_pending_size = idx;
1a0adaf3
HV
224 return 0;
225}
226
227static void dma_post(struct ivtv_stream *s)
228{
229 struct ivtv *itv = s->itv;
230 struct ivtv_buffer *buf = NULL;
231 struct list_head *p;
232 u32 offset;
b0510f8d 233 __le32 *u32buf;
1a0adaf3
HV
234 int x = 0;
235
bd58df6d 236 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
1a0adaf3
HV
237 s->name, s->dma_offset);
238 list_for_each(p, &s->q_dma.list) {
239 buf = list_entry(p, struct ivtv_buffer, list);
b0510f8d 240 u32buf = (__le32 *)buf->buf;
1a0adaf3
HV
241
242 /* Sync Buffer */
243 ivtv_buf_sync_for_cpu(s, buf);
244
51a99c04 245 if (x == 0 && ivtv_use_dma(s)) {
1a0adaf3
HV
246 offset = s->dma_last_offset;
247 if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
248 {
249 for (offset = 0; offset < 64; offset++) {
250 if (u32buf[offset] == DMA_MAGIC_COOKIE) {
251 break;
252 }
253 }
254 offset *= 4;
255 if (offset == 256) {
256 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
257 offset = s->dma_last_offset;
258 }
259 if (s->dma_last_offset != offset)
260 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
261 s->dma_last_offset = offset;
262 }
263 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
264 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
265 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
266 }
267 else {
268 write_enc_sync(0, s->dma_offset);
269 }
270 if (offset) {
271 buf->bytesused -= offset;
272 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
273 }
274 *u32buf = cpu_to_le32(s->dma_backup);
275 }
276 x++;
277 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
278 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
279 s->type == IVTV_ENC_STREAM_TYPE_VBI)
f4071b85 280 buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
1a0adaf3
HV
281 }
282 if (buf)
283 buf->bytesused += s->dma_last_offset;
284 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
805a4392 285 list_for_each_entry(buf, &s->q_dma.list, list) {
dc02d50a
HV
286 /* Parse and Groom VBI Data */
287 s->q_dma.bytesused -= buf->bytesused;
288 ivtv_process_vbi_data(itv, buf, 0, s->type);
289 s->q_dma.bytesused += buf->bytesused;
290 }
1a0adaf3
HV
291 if (s->id == -1) {
292 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
293 return;
294 }
295 }
296 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
297 if (s->id != -1)
298 wake_up(&s->waitq);
299}
300
301void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
302{
303 struct ivtv *itv = s->itv;
77aded6b
IA
304 struct yuv_playback_info *yi = &itv->yuv_info;
305 u8 frame = yi->draw_frame;
306 struct yuv_frame_info *f = &yi->new_frame_info[frame];
1a0adaf3 307 struct ivtv_buffer *buf;
77aded6b 308 u32 y_size = 720 * ((f->src_h + 31) & ~31);
1a0adaf3
HV
309 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
310 int y_done = 0;
311 int bytes_written = 0;
312 unsigned long flags = 0;
313 int idx = 0;
314
bd58df6d 315 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
77aded6b
IA
316
317 /* Insert buffer block for YUV if needed */
318 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
319 if (yi->blanking_dmaptr) {
320 s->sg_pending[idx].src = yi->blanking_dmaptr;
321 s->sg_pending[idx].dst = offset;
322 s->sg_pending[idx].size = 720 * 16;
323 }
324 offset += 720 * 16;
325 idx++;
326 }
327
805a4392 328 list_for_each_entry(buf, &s->q_predma.list, list) {
1a0adaf3 329 /* YUV UV Offset from Y Buffer */
c240ad00
IA
330 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done &&
331 (bytes_written + buf->bytesused) >= y_size) {
332 s->sg_pending[idx].src = buf->dma_handle;
333 s->sg_pending[idx].dst = offset;
334 s->sg_pending[idx].size = y_size - bytes_written;
1a0adaf3 335 offset = uv_offset;
c240ad00
IA
336 if (s->sg_pending[idx].size != buf->bytesused) {
337 idx++;
338 s->sg_pending[idx].src =
339 buf->dma_handle + s->sg_pending[idx - 1].size;
340 s->sg_pending[idx].dst = offset;
341 s->sg_pending[idx].size =
342 buf->bytesused - s->sg_pending[idx - 1].size;
343 offset += s->sg_pending[idx].size;
344 }
1a0adaf3 345 y_done = 1;
c240ad00
IA
346 } else {
347 s->sg_pending[idx].src = buf->dma_handle;
348 s->sg_pending[idx].dst = offset;
349 s->sg_pending[idx].size = buf->bytesused;
350 offset += buf->bytesused;
1a0adaf3 351 }
1a0adaf3
HV
352 bytes_written += buf->bytesused;
353
354 /* Sync SG buffers */
355 ivtv_buf_sync_for_device(s, buf);
356 idx++;
357 }
37093b1e 358 s->sg_pending_size = idx;
1a0adaf3
HV
359
360 /* Sync Hardware SG List of buffers */
361 ivtv_stream_sync_for_device(s);
362 if (lock)
363 spin_lock_irqsave(&itv->dma_reg_lock, flags);
364 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
365 ivtv_dma_dec_start(s);
366 }
367 else {
368 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
369 }
370 if (lock)
371 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
372}
373
37093b1e
HV
374static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
375{
376 struct ivtv *itv = s->itv;
377
378 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
379 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
380 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
381 s->sg_processed++;
382 /* Sync Hardware SG List of buffers */
383 ivtv_stream_sync_for_device(s);
384 write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
385 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
2968e313 386 itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
9b2e5c6b 387 add_timer(&itv->dma_timer);
37093b1e
HV
388}
389
390static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
391{
392 struct ivtv *itv = s->itv;
393
394 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
395 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
396 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
397 s->sg_processed++;
398 /* Sync Hardware SG List of buffers */
399 ivtv_stream_sync_for_device(s);
400 write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
401 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
2968e313 402 itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
9b2e5c6b 403 add_timer(&itv->dma_timer);
37093b1e
HV
404}
405
1a0adaf3
HV
406/* start the encoder DMA */
407static void ivtv_dma_enc_start(struct ivtv_stream *s)
408{
409 struct ivtv *itv = s->itv;
410 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
411 int i;
412
bd58df6d 413 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
dc02d50a 414
1a0adaf3
HV
415 if (s->q_predma.bytesused)
416 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
dc02d50a
HV
417
418 if (ivtv_use_dma(s))
37093b1e 419 s->sg_pending[s->sg_pending_size - 1].size += 256;
1a0adaf3
HV
420
421 /* If this is an MPEG stream, and VBI data is also pending, then append the
422 VBI DMA to the MPEG DMA and transfer both sets of data at once.
423
424 VBI DMA is a second class citizen compared to MPEG and mixing them together
425 will confuse the firmware (the end of a VBI DMA is seen as the end of a
426 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
427 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
428 use. This way no conflicts occur. */
429 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
37093b1e
HV
430 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
431 s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
1a0adaf3 432 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
dc02d50a 433 if (ivtv_use_dma(s_vbi))
37093b1e
HV
434 s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
435 for (i = 0; i < s_vbi->sg_pending_size; i++) {
436 s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
1a0adaf3 437 }
37093b1e
HV
438 s_vbi->dma_offset = s_vbi->pending_offset;
439 s_vbi->sg_pending_size = 0;
f4071b85 440 s_vbi->dma_xfer_cnt++;
1a0adaf3 441 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
6b1e5676 442 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi->name);
1a0adaf3
HV
443 }
444
f4071b85 445 s->dma_xfer_cnt++;
b0510f8d 446 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
37093b1e
HV
447 s->sg_processing_size = s->sg_pending_size;
448 s->sg_pending_size = 0;
449 s->sg_processed = 0;
450 s->dma_offset = s->pending_offset;
451 s->dma_backup = s->pending_backup;
452 s->dma_pts = s->pending_pts;
dd1e729d 453
dc02d50a 454 if (ivtv_use_pio(s)) {
dc02d50a
HV
455 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
456 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
457 set_bit(IVTV_F_I_PIO, &itv->i_flags);
458 itv->cur_pio_stream = s->type;
459 }
460 else {
37093b1e
HV
461 itv->dma_retries = 0;
462 ivtv_dma_enc_start_xfer(s);
dc02d50a
HV
463 set_bit(IVTV_F_I_DMA, &itv->i_flags);
464 itv->cur_dma_stream = s->type;
dc02d50a 465 }
1a0adaf3
HV
466}
467
468static void ivtv_dma_dec_start(struct ivtv_stream *s)
469{
470 struct ivtv *itv = s->itv;
471
472 if (s->q_predma.bytesused)
473 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
37093b1e 474 s->dma_xfer_cnt++;
b0510f8d 475 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
37093b1e
HV
476 s->sg_processing_size = s->sg_pending_size;
477 s->sg_pending_size = 0;
478 s->sg_processed = 0;
479
bd58df6d 480 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
37093b1e
HV
481 itv->dma_retries = 0;
482 ivtv_dma_dec_start_xfer(s);
1a0adaf3
HV
483 set_bit(IVTV_F_I_DMA, &itv->i_flags);
484 itv->cur_dma_stream = s->type;
1a0adaf3
HV
485}
486
487static void ivtv_irq_dma_read(struct ivtv *itv)
488{
489 struct ivtv_stream *s = NULL;
490 struct ivtv_buffer *buf;
37093b1e 491 int hw_stream_type = 0;
1a0adaf3 492
bd58df6d 493 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
9b2e5c6b
HV
494
495 del_timer(&itv->dma_timer);
496
497 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0)
37093b1e 498 return;
37093b1e 499
1a0adaf3 500 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
37093b1e
HV
501 s = &itv->streams[itv->cur_dma_stream];
502 ivtv_stream_sync_for_cpu(s);
503
504 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
505 IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
506 read_reg(IVTV_REG_DMASTATUS),
507 s->sg_processed, s->sg_processing_size, itv->dma_retries);
508 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
509 if (itv->dma_retries == 3) {
e17a06ba 510 /* Too many retries, give up on this frame */
37093b1e 511 itv->dma_retries = 0;
e17a06ba 512 s->sg_processed = s->sg_processing_size;
37093b1e
HV
513 }
514 else {
515 /* Retry, starting with the first xfer segment.
516 Just retrying the current segment is not sufficient. */
517 s->sg_processed = 0;
518 itv->dma_retries++;
519 }
1a0adaf3 520 }
37093b1e
HV
521 if (s->sg_processed < s->sg_processing_size) {
522 /* DMA next buffer */
523 ivtv_dma_dec_start_xfer(s);
524 return;
1a0adaf3 525 }
37093b1e
HV
526 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
527 hw_stream_type = 2;
bd58df6d 528 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
1a0adaf3 529
1a0adaf3
HV
530 /* For some reason must kick the firmware, like PIO mode,
531 I think this tells the firmware we are done and the size
532 of the xfer so it can calculate what we need next.
533 I think we can do this part ourselves but would have to
534 fully calculate xfer info ourselves and not use interrupts
535 */
536 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
537 hw_stream_type);
538
539 /* Free last DMA call */
540 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
541 ivtv_buf_sync_for_cpu(s, buf);
542 ivtv_enqueue(s, buf, &s->q_free);
543 }
544 wake_up(&s->waitq);
545 }
546 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
547 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
548 itv->cur_dma_stream = -1;
549 wake_up(&itv->dma_waitq);
550}
551
552static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
553{
554 u32 data[CX2341X_MBOX_MAX_DATA];
555 struct ivtv_stream *s;
556
587808d5 557 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
37093b1e 558 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
9b2e5c6b
HV
559
560 del_timer(&itv->dma_timer);
561
562 if (itv->cur_dma_stream < 0)
1a0adaf3 563 return;
9b2e5c6b 564
37093b1e
HV
565 s = &itv->streams[itv->cur_dma_stream];
566 ivtv_stream_sync_for_cpu(s);
567
1a0adaf3 568 if (data[0] & 0x18) {
37093b1e
HV
569 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
570 s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
1a0adaf3 571 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
37093b1e 572 if (itv->dma_retries == 3) {
e17a06ba 573 /* Too many retries, give up on this frame */
37093b1e 574 itv->dma_retries = 0;
e17a06ba 575 s->sg_processed = s->sg_processing_size;
37093b1e
HV
576 }
577 else {
578 /* Retry, starting with the first xfer segment.
579 Just retrying the current segment is not sufficient. */
580 s->sg_processed = 0;
581 itv->dma_retries++;
582 }
1a0adaf3 583 }
37093b1e
HV
584 if (s->sg_processed < s->sg_processing_size) {
585 /* DMA next buffer */
586 ivtv_dma_enc_start_xfer(s);
587 return;
588 }
1a0adaf3
HV
589 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
590 itv->cur_dma_stream = -1;
591 dma_post(s);
1a0adaf3 592 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
1a0adaf3 593 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
1a0adaf3 594 dma_post(s);
1a0adaf3 595 }
37093b1e
HV
596 s->sg_processing_size = 0;
597 s->sg_processed = 0;
1a0adaf3
HV
598 wake_up(&itv->dma_waitq);
599}
600
dc02d50a
HV
601static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
602{
603 struct ivtv_stream *s;
604
605 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
606 itv->cur_pio_stream = -1;
607 return;
608 }
609 s = &itv->streams[itv->cur_pio_stream];
bd58df6d 610 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
dc02d50a
HV
611 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
612 itv->cur_pio_stream = -1;
613 dma_post(s);
614 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
615 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
616 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
617 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
618 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
619 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
620 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
621 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
dc02d50a 622 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
dc02d50a 623 dma_post(s);
dc02d50a
HV
624 }
625 wake_up(&itv->dma_waitq);
626}
627
1a0adaf3
HV
628static void ivtv_irq_dma_err(struct ivtv *itv)
629{
630 u32 data[CX2341X_MBOX_MAX_DATA];
631
632 del_timer(&itv->dma_timer);
587808d5 633 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
1a0adaf3 634 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
37093b1e
HV
635 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
636 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
1a0adaf3
HV
637 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
638 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
639 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
640
641 /* retry */
1a0adaf3
HV
642 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
643 ivtv_dma_dec_start(s);
644 else
645 ivtv_dma_enc_start(s);
646 return;
647 }
37093b1e
HV
648 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
649 ivtv_udma_start(itv);
650 return;
651 }
1a0adaf3
HV
652 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
653 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
654 itv->cur_dma_stream = -1;
655 wake_up(&itv->dma_waitq);
656}
657
658static void ivtv_irq_enc_start_cap(struct ivtv *itv)
659{
660 u32 data[CX2341X_MBOX_MAX_DATA];
661 struct ivtv_stream *s;
662
663 /* Get DMA destination and size arguments from card */
587808d5 664 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, 7, data);
bd58df6d 665 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
1a0adaf3
HV
666
667 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
668 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
669 data[0], data[1], data[2]);
670 return;
671 }
1a0adaf3
HV
672 s = &itv->streams[ivtv_stream_map[data[0]]];
673 if (!stream_enc_dma_append(s, data)) {
dc02d50a 674 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
1a0adaf3
HV
675 }
676}
677
678static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
679{
1a0adaf3
HV
680 u32 data[CX2341X_MBOX_MAX_DATA];
681 struct ivtv_stream *s;
682
bd58df6d 683 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
1a0adaf3
HV
684 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
685
d526afe0 686 if (!stream_enc_dma_append(s, data))
dc02d50a 687 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
1a0adaf3
HV
688}
689
dc02d50a 690static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
1a0adaf3
HV
691{
692 u32 data[CX2341X_MBOX_MAX_DATA];
693 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
694
bd58df6d 695 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
1a0adaf3
HV
696 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
697 !stream_enc_dma_append(s, data)) {
dc02d50a 698 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
1a0adaf3
HV
699 }
700}
701
702static void ivtv_irq_dec_data_req(struct ivtv *itv)
703{
704 u32 data[CX2341X_MBOX_MAX_DATA];
705 struct ivtv_stream *s;
706
707 /* YUV or MPG */
1a0adaf3
HV
708
709 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
587808d5 710 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 2, data);
77aded6b
IA
711 itv->dma_data_req_size =
712 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
713 itv->dma_data_req_offset = data[1];
714 if (atomic_read(&itv->yuv_info.next_dma_frame) >= 0)
715 ivtv_yuv_frame_complete(itv);
1a0adaf3
HV
716 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
717 }
718 else {
587808d5 719 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 3, data);
14d5deba 720 itv->dma_data_req_size = min_t(u32, data[2], 0x10000);
1a0adaf3
HV
721 itv->dma_data_req_offset = data[1];
722 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
723 }
bd58df6d 724 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
1a0adaf3
HV
725 itv->dma_data_req_offset, itv->dma_data_req_size);
726 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
727 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
728 }
729 else {
77aded6b
IA
730 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags))
731 ivtv_yuv_setup_stream_frame(itv);
1a0adaf3
HV
732 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
733 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
734 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
735 }
736}
737
738static void ivtv_irq_vsync(struct ivtv *itv)
739{
740 /* The vsync interrupt is unusual in that it won't clear until
741 * the end of the first line for the current field, at which
742 * point it clears itself. This can result in repeated vsync
743 * interrupts, or a missed vsync. Read some of the registers
744 * to determine the line being displayed and ensure we handle
745 * one vsync per frame.
746 */
4e1af31a 747 unsigned int frame = read_reg(IVTV_REG_DEC_LINE_FIELD) & 1;
a3e5f5e2 748 struct yuv_playback_info *yi = &itv->yuv_info;
2bd7ac55 749 int last_dma_frame = atomic_read(&yi->next_dma_frame);
3b5c1c8e 750 struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame];
1a0adaf3
HV
751
752 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
753
3b5c1c8e
IA
754 if (((frame ^ f->sync_field) == 0 &&
755 ((itv->last_vsync_field & 1) ^ f->sync_field)) ||
756 (frame != (itv->last_vsync_field & 1) && !f->interlaced)) {
1a0adaf3
HV
757 int next_dma_frame = last_dma_frame;
758
3b5c1c8e 759 if (!(f->interlaced && f->delay && yi->fields_lapsed < 1)) {
a3e5f5e2 760 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&yi->next_fill_frame)) {
bfd7beac
IA
761 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
762 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
763 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
764 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
a3e5f5e2
IA
765 next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS;
766 atomic_set(&yi->next_dma_frame, next_dma_frame);
767 yi->fields_lapsed = -1;
2bd7ac55 768 yi->running = 1;
bfd7beac 769 }
1a0adaf3
HV
770 }
771 }
a158f355 772 if (frame != (itv->last_vsync_field & 1)) {
09250193
HV
773 static const struct v4l2_event evtop = {
774 .type = V4L2_EVENT_VSYNC,
775 .u.vsync.field = V4L2_FIELD_TOP,
776 };
777 static const struct v4l2_event evbottom = {
778 .type = V4L2_EVENT_VSYNC,
779 .u.vsync.field = V4L2_FIELD_BOTTOM,
780 };
1a0adaf3
HV
781 struct ivtv_stream *s = ivtv_get_output_stream(itv);
782
a158f355 783 itv->last_vsync_field += 1;
1a0adaf3
HV
784 if (frame == 0) {
785 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
786 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
787 }
788 else {
789 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
790 }
791 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
792 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
793 wake_up(&itv->event_waitq);
09250193
HV
794 if (s)
795 wake_up(&s->waitq);
1a0adaf3 796 }
09250193
HV
797 if (s && s->vdev)
798 v4l2_event_queue(s->vdev, frame ? &evtop : &evbottom);
1a0adaf3 799 wake_up(&itv->vsync_waitq);
1a0adaf3
HV
800
801 /* Send VBI to saa7127 */
2f3a9893
HV
802 if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
803 test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) ||
804 test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) ||
805 test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) {
1e13f9e3 806 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
dc02d50a 807 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
1e13f9e3 808 }
1a0adaf3
HV
809
810 /* Check if we need to update the yuv registers */
2bd7ac55 811 if (yi->running && (yi->yuv_forced_update || f->update)) {
3b5c1c8e 812 if (!f->update) {
2bd7ac55
IA
813 last_dma_frame =
814 (u8)(atomic_read(&yi->next_dma_frame) -
815 1) % IVTV_YUV_BUFFERS;
3b5c1c8e
IA
816 f = &yi->new_frame_info[last_dma_frame];
817 }
1a0adaf3 818
3b5c1c8e 819 if (f->src_w) {
a3e5f5e2 820 yi->update_frame = last_dma_frame;
3b5c1c8e 821 f->update = 0;
a3e5f5e2 822 yi->yuv_forced_update = 0;
1e13f9e3 823 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
dc02d50a 824 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
1a0adaf3
HV
825 }
826 }
bfd7beac 827
a3e5f5e2 828 yi->fields_lapsed++;
1a0adaf3
HV
829 }
830}
831
2f3a9893 832#define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
1a0adaf3
HV
833
834irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
835{
836 struct ivtv *itv = (struct ivtv *)dev_id;
837 u32 combo;
838 u32 stat;
839 int i;
840 u8 vsync_force = 0;
841
842 spin_lock(&itv->dma_reg_lock);
843 /* get contents of irq status register */
844 stat = read_reg(IVTV_REG_IRQSTATUS);
845
846 combo = ~itv->irqmask & stat;
847
848 /* Clear out IRQ */
849 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
850
851 if (0 == combo) {
852 /* The vsync interrupt is unusual and clears itself. If we
853 * took too long, we may have missed it. Do some checks
854 */
855 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
856 /* vsync is enabled, see if we're in a new field */
4e1af31a
AW
857 if ((itv->last_vsync_field & 1) !=
858 (read_reg(IVTV_REG_DEC_LINE_FIELD) & 1)) {
1a0adaf3 859 /* New field, looks like we missed it */
4e1af31a
AW
860 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",
861 read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16);
1a0adaf3
HV
862 vsync_force = 1;
863 }
864 }
865
866 if (!vsync_force) {
867 /* No Vsync expected, wasn't for us */
868 spin_unlock(&itv->dma_reg_lock);
869 return IRQ_NONE;
870 }
871 }
872
873 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
874 these messages */
875 if (combo & ~0xff6d0400)
bd58df6d 876 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
1a0adaf3
HV
877
878 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
bd58df6d 879 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
1a0adaf3
HV
880 }
881
882 if (combo & IVTV_IRQ_DMA_READ) {
883 ivtv_irq_dma_read(itv);
884 }
885
886 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
887 ivtv_irq_enc_dma_complete(itv);
888 }
889
dc02d50a
HV
890 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
891 ivtv_irq_enc_pio_complete(itv);
892 }
893
1a0adaf3
HV
894 if (combo & IVTV_IRQ_DMA_ERR) {
895 ivtv_irq_dma_err(itv);
896 }
897
898 if (combo & IVTV_IRQ_ENC_START_CAP) {
899 ivtv_irq_enc_start_cap(itv);
900 }
901
902 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
903 ivtv_irq_enc_vbi_cap(itv);
904 }
905
906 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
dc02d50a 907 ivtv_irq_dec_vbi_reinsert(itv);
1a0adaf3
HV
908 }
909
910 if (combo & IVTV_IRQ_ENC_EOS) {
911 IVTV_DEBUG_IRQ("ENC EOS\n");
912 set_bit(IVTV_F_I_EOS, &itv->i_flags);
fd8b281a 913 wake_up(&itv->eos_waitq);
1a0adaf3
HV
914 }
915
916 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
917 ivtv_irq_dec_data_req(itv);
918 }
919
920 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
921 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
922 ivtv_irq_vsync(itv);
923 }
924
925 if (combo & IVTV_IRQ_ENC_VIM_RST) {
926 IVTV_DEBUG_IRQ("VIM RST\n");
927 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
928 }
929
930 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
931 IVTV_DEBUG_INFO("Stereo mode changed\n");
932 }
933
934 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
33bc4dea 935 itv->irq_rr_idx++;
1a0adaf3 936 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
33bc4dea 937 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
1a0adaf3
HV
938 struct ivtv_stream *s = &itv->streams[idx];
939
940 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
941 continue;
942 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
943 ivtv_dma_dec_start(s);
944 else
945 ivtv_dma_enc_start(s);
946 break;
947 }
b6e436b2
IA
948
949 if (i == IVTV_MAX_STREAMS &&
950 test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags))
1a0adaf3 951 ivtv_udma_start(itv);
1a0adaf3
HV
952 }
953
dc02d50a 954 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
33bc4dea 955 itv->irq_rr_idx++;
dc02d50a 956 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
33bc4dea 957 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
dc02d50a
HV
958 struct ivtv_stream *s = &itv->streams[idx];
959
960 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
961 continue;
962 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
963 ivtv_dma_enc_start(s);
964 break;
965 }
966 }
967
2f3a9893 968 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
7bc46560 969 queue_kthread_work(&itv->irq_worker, &itv->irq_work);
2f3a9893 970 }
dc02d50a 971
1a0adaf3
HV
972 spin_unlock(&itv->dma_reg_lock);
973
974 /* If we've just handled a 'forced' vsync, it's safest to say it
975 * wasn't ours. Another device may have triggered it at just
976 * the right time.
977 */
978 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
979}
980
981void ivtv_unfinished_dma(unsigned long arg)
982{
983 struct ivtv *itv = (struct ivtv *)arg;
984
985 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
986 return;
987 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
988
989 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
990 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
991 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
992 itv->cur_dma_stream = -1;
993 wake_up(&itv->dma_waitq);
994}
This page took 0.478904 seconds and 5 git commands to generate.