Merge branch 'akpm/master'
[deliverable/linux.git] / drivers / media / pci / ivtv / ivtv-irq.c
CommitLineData
1a0adaf3
HV
1/* interrupt handling
2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include "ivtv-driver.h"
1a0adaf3
HV
22#include "ivtv-queue.h"
23#include "ivtv-udma.h"
24#include "ivtv-irq.h"
1a0adaf3
HV
25#include "ivtv-mailbox.h"
26#include "ivtv-vbi.h"
1e13f9e3 27#include "ivtv-yuv.h"
09250193 28#include <media/v4l2-event.h>
1a0adaf3
HV
29
30#define DMA_MAGIC_COOKIE 0x000001fe
31
1a0adaf3
HV
32static void ivtv_dma_dec_start(struct ivtv_stream *s);
33
34static const int ivtv_stream_map[] = {
35 IVTV_ENC_STREAM_TYPE_MPG,
36 IVTV_ENC_STREAM_TYPE_YUV,
37 IVTV_ENC_STREAM_TYPE_PCM,
38 IVTV_ENC_STREAM_TYPE_VBI,
39};
40
4313902e
AW
41static void ivtv_pcm_work_handler(struct ivtv *itv)
42{
43 struct ivtv_stream *s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
44 struct ivtv_buffer *buf;
45
46 /* Pass the PCM data to ivtv-alsa */
47
48 while (1) {
49 /*
50 * Users should not be using both the ALSA and V4L2 PCM audio
51 * capture interfaces at the same time. If the user is doing
52 * this, there maybe a buffer in q_io to grab, use, and put
53 * back in rotation.
54 */
55 buf = ivtv_dequeue(s, &s->q_io);
56 if (buf == NULL)
57 buf = ivtv_dequeue(s, &s->q_full);
58 if (buf == NULL)
59 break;
60
61 if (buf->readpos < buf->bytesused)
62 itv->pcm_announce_callback(itv->alsa,
63 (u8 *)(buf->buf + buf->readpos),
64 (size_t)(buf->bytesused - buf->readpos));
65
66 ivtv_enqueue(s, buf, &s->q_free);
67 }
68}
dc02d50a
HV
69
70static void ivtv_pio_work_handler(struct ivtv *itv)
1a0adaf3 71{
dc02d50a
HV
72 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
73 struct ivtv_buffer *buf;
dc02d50a
HV
74 int i = 0;
75
bd58df6d 76 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
dc02d50a 77 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
635d62f0 78 s->vdev.v4l2_dev == NULL || !ivtv_use_pio(s)) {
dc02d50a
HV
79 itv->cur_pio_stream = -1;
80 /* trigger PIO complete user interrupt */
81 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
82 return;
83 }
bd58df6d 84 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
805a4392 85 list_for_each_entry(buf, &s->q_dma.list, list) {
37093b1e 86 u32 size = s->sg_processing[i].size & 0x3ffff;
1a0adaf3 87
dc02d50a
HV
88 /* Copy the data from the card to the buffer */
89 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
37093b1e 90 memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
dc02d50a
HV
91 }
92 else {
37093b1e 93 memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
dc02d50a 94 }
dc02d50a 95 i++;
37093b1e
HV
96 if (i == s->sg_processing_size)
97 break;
dc02d50a
HV
98 }
99 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
1a0adaf3
HV
100}
101
7bc46560 102void ivtv_irq_work_handler(struct kthread_work *work)
1e13f9e3 103{
7bc46560 104 struct ivtv *itv = container_of(work, struct ivtv, irq_work);
1e13f9e3 105
dc02d50a
HV
106 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
107 ivtv_pio_work_handler(itv);
108
1e13f9e3 109 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
dc02d50a 110 ivtv_vbi_work_handler(itv);
1e13f9e3
HV
111
112 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
113 ivtv_yuv_work_handler(itv);
4313902e
AW
114
115 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PCM, &itv->i_flags))
116 ivtv_pcm_work_handler(itv);
1e13f9e3
HV
117}
118
1a0adaf3
HV
119/* Determine the required DMA size, setup enough buffers in the predma queue and
120 actually copy the data from the card to the buffers in case a PIO transfer is
121 required for this stream.
122 */
123static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
124{
125 struct ivtv *itv = s->itv;
126 struct ivtv_buffer *buf;
1a0adaf3
HV
127 u32 bytes_needed = 0;
128 u32 offset, size;
129 u32 UVoffset = 0, UVsize = 0;
130 int skip_bufs = s->q_predma.buffers;
37093b1e 131 int idx = s->sg_pending_size;
1a0adaf3
HV
132 int rc;
133
134 /* sanity checks */
635d62f0 135 if (s->vdev.v4l2_dev == NULL) {
1a0adaf3
HV
136 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
137 return -1;
138 }
139 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
140 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
141 return -1;
142 }
143
144 /* determine offset, size and PTS for the various streams */
145 switch (s->type) {
146 case IVTV_ENC_STREAM_TYPE_MPG:
147 offset = data[1];
148 size = data[2];
37093b1e 149 s->pending_pts = 0;
1a0adaf3
HV
150 break;
151
152 case IVTV_ENC_STREAM_TYPE_YUV:
153 offset = data[1];
154 size = data[2];
155 UVoffset = data[3];
156 UVsize = data[4];
37093b1e 157 s->pending_pts = ((u64) data[5] << 32) | data[6];
1a0adaf3
HV
158 break;
159
160 case IVTV_ENC_STREAM_TYPE_PCM:
161 offset = data[1] + 12;
162 size = data[2] - 12;
37093b1e 163 s->pending_pts = read_dec(offset - 8) |
1a0adaf3
HV
164 ((u64)(read_dec(offset - 12)) << 32);
165 if (itv->has_cx23415)
166 offset += IVTV_DECODER_OFFSET;
167 break;
168
169 case IVTV_ENC_STREAM_TYPE_VBI:
170 size = itv->vbi.enc_size * itv->vbi.fpi;
171 offset = read_enc(itv->vbi.enc_start - 4) + 12;
172 if (offset == 12) {
173 IVTV_DEBUG_INFO("VBI offset == 0\n");
174 return -1;
175 }
37093b1e 176 s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
1a0adaf3
HV
177 break;
178
179 case IVTV_DEC_STREAM_TYPE_VBI:
180 size = read_dec(itv->vbi.dec_start + 4) + 8;
181 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
37093b1e 182 s->pending_pts = 0;
1a0adaf3
HV
183 offset += IVTV_DECODER_OFFSET;
184 break;
185 default:
186 /* shouldn't happen */
187 return -1;
188 }
189
190 /* if this is the start of the DMA then fill in the magic cookie */
51a99c04 191 if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
1a0adaf3
HV
192 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
193 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
37093b1e 194 s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
3efb8ab6 195 write_dec_sync(DMA_MAGIC_COOKIE, offset - IVTV_DECODER_OFFSET);
1a0adaf3
HV
196 }
197 else {
37093b1e 198 s->pending_backup = read_enc(offset);
3efb8ab6 199 write_enc_sync(DMA_MAGIC_COOKIE, offset);
1a0adaf3 200 }
37093b1e 201 s->pending_offset = offset;
1a0adaf3
HV
202 }
203
204 bytes_needed = size;
205 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
206 /* The size for the Y samples needs to be rounded upwards to a
207 multiple of the buf_size. The UV samples then start in the
208 next buffer. */
209 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
210 bytes_needed += UVsize;
211 }
212
bd58df6d 213 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
1a0adaf3
HV
214 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
215
216 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
217 if (rc < 0) { /* Insufficient buffers */
218 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
219 bytes_needed, s->name);
220 return -1;
221 }
ec105a42 222 if (rc && !s->buffers_stolen && test_bit(IVTV_F_S_APPL_IO, &s->s_flags)) {
1a0adaf3
HV
223 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
224 IVTV_WARN("Cause: the application is not reading fast enough.\n");
225 }
226 s->buffers_stolen = rc;
227
37093b1e 228 /* got the buffers, now fill in sg_pending */
1a0adaf3
HV
229 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
230 memset(buf->buf, 0, 128);
805a4392 231 list_for_each_entry(buf, &s->q_predma.list, list) {
1a0adaf3
HV
232 if (skip_bufs-- > 0)
233 continue;
37093b1e
HV
234 s->sg_pending[idx].dst = buf->dma_handle;
235 s->sg_pending[idx].src = offset;
236 s->sg_pending[idx].size = s->buf_size;
14d5deba 237 buf->bytesused = min(size, s->buf_size);
f4071b85 238 buf->dma_xfer_cnt = s->dma_xfer_cnt;
1a0adaf3 239
1a0adaf3
HV
240 s->q_predma.bytesused += buf->bytesused;
241 size -= buf->bytesused;
242 offset += s->buf_size;
243
244 /* Sync SG buffers */
245 ivtv_buf_sync_for_device(s, buf);
246
247 if (size == 0) { /* YUV */
248 /* process the UV section */
249 offset = UVoffset;
250 size = UVsize;
251 }
252 idx++;
253 }
37093b1e 254 s->sg_pending_size = idx;
1a0adaf3
HV
255 return 0;
256}
257
258static void dma_post(struct ivtv_stream *s)
259{
260 struct ivtv *itv = s->itv;
261 struct ivtv_buffer *buf = NULL;
262 struct list_head *p;
263 u32 offset;
b0510f8d 264 __le32 *u32buf;
1a0adaf3
HV
265 int x = 0;
266
bd58df6d 267 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
1a0adaf3
HV
268 s->name, s->dma_offset);
269 list_for_each(p, &s->q_dma.list) {
270 buf = list_entry(p, struct ivtv_buffer, list);
b0510f8d 271 u32buf = (__le32 *)buf->buf;
1a0adaf3
HV
272
273 /* Sync Buffer */
274 ivtv_buf_sync_for_cpu(s, buf);
275
51a99c04 276 if (x == 0 && ivtv_use_dma(s)) {
1a0adaf3 277 offset = s->dma_last_offset;
3efb8ab6 278 if (le32_to_cpu(u32buf[offset / 4]) != DMA_MAGIC_COOKIE)
1a0adaf3 279 {
3efb8ab6
HV
280 for (offset = 0; offset < 64; offset++)
281 if (le32_to_cpu(u32buf[offset]) == DMA_MAGIC_COOKIE)
1a0adaf3 282 break;
1a0adaf3
HV
283 offset *= 4;
284 if (offset == 256) {
285 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
286 offset = s->dma_last_offset;
287 }
288 if (s->dma_last_offset != offset)
289 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
290 s->dma_last_offset = offset;
291 }
292 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
293 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
294 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
295 }
296 else {
297 write_enc_sync(0, s->dma_offset);
298 }
299 if (offset) {
300 buf->bytesused -= offset;
301 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
302 }
303 *u32buf = cpu_to_le32(s->dma_backup);
304 }
305 x++;
306 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
307 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
308 s->type == IVTV_ENC_STREAM_TYPE_VBI)
f4071b85 309 buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
1a0adaf3
HV
310 }
311 if (buf)
312 buf->bytesused += s->dma_last_offset;
313 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
805a4392 314 list_for_each_entry(buf, &s->q_dma.list, list) {
dc02d50a
HV
315 /* Parse and Groom VBI Data */
316 s->q_dma.bytesused -= buf->bytesused;
317 ivtv_process_vbi_data(itv, buf, 0, s->type);
318 s->q_dma.bytesused += buf->bytesused;
319 }
61bb725e 320 if (s->fh == NULL) {
1a0adaf3
HV
321 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
322 return;
323 }
324 }
4313902e 325
1a0adaf3 326 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
4313902e
AW
327
328 if (s->type == IVTV_ENC_STREAM_TYPE_PCM &&
329 itv->pcm_announce_callback != NULL) {
330 /*
331 * Set up the work handler to pass the data to ivtv-alsa.
332 *
333 * We just use q_full and let the work handler race with users
334 * making ivtv-fileops.c calls on the PCM device node.
335 *
336 * Users should not be using both the ALSA and V4L2 PCM audio
337 * capture interfaces at the same time. If the user does this,
338 * fragments of data will just go out each interface as they
339 * race for PCM data.
340 */
341 set_bit(IVTV_F_I_WORK_HANDLER_PCM, &itv->i_flags);
342 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
343 }
344
61bb725e 345 if (s->fh)
1a0adaf3
HV
346 wake_up(&s->waitq);
347}
348
349void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
350{
351 struct ivtv *itv = s->itv;
77aded6b
IA
352 struct yuv_playback_info *yi = &itv->yuv_info;
353 u8 frame = yi->draw_frame;
354 struct yuv_frame_info *f = &yi->new_frame_info[frame];
1a0adaf3 355 struct ivtv_buffer *buf;
77aded6b 356 u32 y_size = 720 * ((f->src_h + 31) & ~31);
1a0adaf3
HV
357 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
358 int y_done = 0;
359 int bytes_written = 0;
1a0adaf3
HV
360 int idx = 0;
361
bd58df6d 362 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
77aded6b
IA
363
364 /* Insert buffer block for YUV if needed */
365 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
366 if (yi->blanking_dmaptr) {
367 s->sg_pending[idx].src = yi->blanking_dmaptr;
368 s->sg_pending[idx].dst = offset;
369 s->sg_pending[idx].size = 720 * 16;
370 }
371 offset += 720 * 16;
372 idx++;
373 }
374
805a4392 375 list_for_each_entry(buf, &s->q_predma.list, list) {
1a0adaf3 376 /* YUV UV Offset from Y Buffer */
c240ad00
IA
377 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done &&
378 (bytes_written + buf->bytesused) >= y_size) {
379 s->sg_pending[idx].src = buf->dma_handle;
380 s->sg_pending[idx].dst = offset;
381 s->sg_pending[idx].size = y_size - bytes_written;
1a0adaf3 382 offset = uv_offset;
c240ad00
IA
383 if (s->sg_pending[idx].size != buf->bytesused) {
384 idx++;
385 s->sg_pending[idx].src =
386 buf->dma_handle + s->sg_pending[idx - 1].size;
387 s->sg_pending[idx].dst = offset;
388 s->sg_pending[idx].size =
389 buf->bytesused - s->sg_pending[idx - 1].size;
390 offset += s->sg_pending[idx].size;
391 }
1a0adaf3 392 y_done = 1;
c240ad00
IA
393 } else {
394 s->sg_pending[idx].src = buf->dma_handle;
395 s->sg_pending[idx].dst = offset;
396 s->sg_pending[idx].size = buf->bytesused;
397 offset += buf->bytesused;
1a0adaf3 398 }
1a0adaf3
HV
399 bytes_written += buf->bytesused;
400
401 /* Sync SG buffers */
402 ivtv_buf_sync_for_device(s, buf);
403 idx++;
404 }
37093b1e 405 s->sg_pending_size = idx;
1a0adaf3
HV
406
407 /* Sync Hardware SG List of buffers */
408 ivtv_stream_sync_for_device(s);
d832672f
HV
409 if (lock) {
410 unsigned long flags = 0;
411
1a0adaf3 412 spin_lock_irqsave(&itv->dma_reg_lock, flags);
d832672f
HV
413 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
414 ivtv_dma_dec_start(s);
415 else
416 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
1a0adaf3 417 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
d832672f
HV
418 } else {
419 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
420 ivtv_dma_dec_start(s);
421 else
422 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
423 }
1a0adaf3
HV
424}
425
37093b1e
HV
426static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
427{
428 struct ivtv *itv = s->itv;
429
430 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
431 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
432 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
433 s->sg_processed++;
434 /* Sync Hardware SG List of buffers */
435 ivtv_stream_sync_for_device(s);
436 write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
437 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
2968e313 438 itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
9b2e5c6b 439 add_timer(&itv->dma_timer);
37093b1e
HV
440}
441
442static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
443{
444 struct ivtv *itv = s->itv;
445
446 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
447 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
448 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
449 s->sg_processed++;
450 /* Sync Hardware SG List of buffers */
451 ivtv_stream_sync_for_device(s);
452 write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
453 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
2968e313 454 itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
9b2e5c6b 455 add_timer(&itv->dma_timer);
37093b1e
HV
456}
457
1a0adaf3
HV
458/* start the encoder DMA */
459static void ivtv_dma_enc_start(struct ivtv_stream *s)
460{
461 struct ivtv *itv = s->itv;
462 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
463 int i;
464
bd58df6d 465 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
dc02d50a 466
1a0adaf3
HV
467 if (s->q_predma.bytesused)
468 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
dc02d50a
HV
469
470 if (ivtv_use_dma(s))
37093b1e 471 s->sg_pending[s->sg_pending_size - 1].size += 256;
1a0adaf3
HV
472
473 /* If this is an MPEG stream, and VBI data is also pending, then append the
474 VBI DMA to the MPEG DMA and transfer both sets of data at once.
475
476 VBI DMA is a second class citizen compared to MPEG and mixing them together
477 will confuse the firmware (the end of a VBI DMA is seen as the end of a
478 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
479 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
480 use. This way no conflicts occur. */
481 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
37093b1e
HV
482 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
483 s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
1a0adaf3 484 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
dc02d50a 485 if (ivtv_use_dma(s_vbi))
37093b1e
HV
486 s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
487 for (i = 0; i < s_vbi->sg_pending_size; i++) {
488 s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
1a0adaf3 489 }
37093b1e
HV
490 s_vbi->dma_offset = s_vbi->pending_offset;
491 s_vbi->sg_pending_size = 0;
f4071b85 492 s_vbi->dma_xfer_cnt++;
1a0adaf3 493 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
6b1e5676 494 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi->name);
1a0adaf3
HV
495 }
496
f4071b85 497 s->dma_xfer_cnt++;
b0510f8d 498 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
37093b1e
HV
499 s->sg_processing_size = s->sg_pending_size;
500 s->sg_pending_size = 0;
501 s->sg_processed = 0;
502 s->dma_offset = s->pending_offset;
503 s->dma_backup = s->pending_backup;
504 s->dma_pts = s->pending_pts;
dd1e729d 505
dc02d50a 506 if (ivtv_use_pio(s)) {
dc02d50a
HV
507 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
508 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
509 set_bit(IVTV_F_I_PIO, &itv->i_flags);
510 itv->cur_pio_stream = s->type;
511 }
512 else {
37093b1e
HV
513 itv->dma_retries = 0;
514 ivtv_dma_enc_start_xfer(s);
dc02d50a
HV
515 set_bit(IVTV_F_I_DMA, &itv->i_flags);
516 itv->cur_dma_stream = s->type;
dc02d50a 517 }
1a0adaf3
HV
518}
519
520static void ivtv_dma_dec_start(struct ivtv_stream *s)
521{
522 struct ivtv *itv = s->itv;
523
524 if (s->q_predma.bytesused)
525 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
37093b1e 526 s->dma_xfer_cnt++;
b0510f8d 527 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
37093b1e
HV
528 s->sg_processing_size = s->sg_pending_size;
529 s->sg_pending_size = 0;
530 s->sg_processed = 0;
531
bd58df6d 532 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
37093b1e
HV
533 itv->dma_retries = 0;
534 ivtv_dma_dec_start_xfer(s);
1a0adaf3
HV
535 set_bit(IVTV_F_I_DMA, &itv->i_flags);
536 itv->cur_dma_stream = s->type;
1a0adaf3
HV
537}
538
539static void ivtv_irq_dma_read(struct ivtv *itv)
540{
541 struct ivtv_stream *s = NULL;
542 struct ivtv_buffer *buf;
37093b1e 543 int hw_stream_type = 0;
1a0adaf3 544
bd58df6d 545 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
9b2e5c6b
HV
546
547 del_timer(&itv->dma_timer);
548
549 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0)
37093b1e 550 return;
37093b1e 551
1a0adaf3 552 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
37093b1e
HV
553 s = &itv->streams[itv->cur_dma_stream];
554 ivtv_stream_sync_for_cpu(s);
555
556 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
557 IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
558 read_reg(IVTV_REG_DMASTATUS),
559 s->sg_processed, s->sg_processing_size, itv->dma_retries);
560 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
561 if (itv->dma_retries == 3) {
e17a06ba 562 /* Too many retries, give up on this frame */
37093b1e 563 itv->dma_retries = 0;
e17a06ba 564 s->sg_processed = s->sg_processing_size;
37093b1e
HV
565 }
566 else {
567 /* Retry, starting with the first xfer segment.
568 Just retrying the current segment is not sufficient. */
569 s->sg_processed = 0;
570 itv->dma_retries++;
571 }
1a0adaf3 572 }
37093b1e
HV
573 if (s->sg_processed < s->sg_processing_size) {
574 /* DMA next buffer */
575 ivtv_dma_dec_start_xfer(s);
576 return;
1a0adaf3 577 }
37093b1e
HV
578 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
579 hw_stream_type = 2;
bd58df6d 580 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
1a0adaf3 581
1a0adaf3
HV
582 /* For some reason must kick the firmware, like PIO mode,
583 I think this tells the firmware we are done and the size
584 of the xfer so it can calculate what we need next.
585 I think we can do this part ourselves but would have to
586 fully calculate xfer info ourselves and not use interrupts
587 */
588 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
589 hw_stream_type);
590
591 /* Free last DMA call */
592 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
593 ivtv_buf_sync_for_cpu(s, buf);
594 ivtv_enqueue(s, buf, &s->q_free);
595 }
596 wake_up(&s->waitq);
597 }
598 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
599 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
600 itv->cur_dma_stream = -1;
601 wake_up(&itv->dma_waitq);
602}
603
604static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
605{
606 u32 data[CX2341X_MBOX_MAX_DATA];
607 struct ivtv_stream *s;
608
587808d5 609 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
37093b1e 610 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
9b2e5c6b
HV
611
612 del_timer(&itv->dma_timer);
613
614 if (itv->cur_dma_stream < 0)
1a0adaf3 615 return;
9b2e5c6b 616
37093b1e
HV
617 s = &itv->streams[itv->cur_dma_stream];
618 ivtv_stream_sync_for_cpu(s);
619
1a0adaf3 620 if (data[0] & 0x18) {
37093b1e
HV
621 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
622 s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
1a0adaf3 623 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
37093b1e 624 if (itv->dma_retries == 3) {
e17a06ba 625 /* Too many retries, give up on this frame */
37093b1e 626 itv->dma_retries = 0;
e17a06ba 627 s->sg_processed = s->sg_processing_size;
37093b1e
HV
628 }
629 else {
630 /* Retry, starting with the first xfer segment.
631 Just retrying the current segment is not sufficient. */
632 s->sg_processed = 0;
633 itv->dma_retries++;
634 }
1a0adaf3 635 }
37093b1e
HV
636 if (s->sg_processed < s->sg_processing_size) {
637 /* DMA next buffer */
638 ivtv_dma_enc_start_xfer(s);
639 return;
640 }
1a0adaf3
HV
641 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
642 itv->cur_dma_stream = -1;
643 dma_post(s);
1a0adaf3 644 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
1a0adaf3 645 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
1a0adaf3 646 dma_post(s);
1a0adaf3 647 }
37093b1e
HV
648 s->sg_processing_size = 0;
649 s->sg_processed = 0;
1a0adaf3
HV
650 wake_up(&itv->dma_waitq);
651}
652
dc02d50a
HV
653static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
654{
655 struct ivtv_stream *s;
656
657 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
658 itv->cur_pio_stream = -1;
659 return;
660 }
661 s = &itv->streams[itv->cur_pio_stream];
bd58df6d 662 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
dc02d50a
HV
663 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
664 itv->cur_pio_stream = -1;
665 dma_post(s);
666 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
667 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
668 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
669 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
670 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
671 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
672 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
673 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
dc02d50a 674 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
dc02d50a 675 dma_post(s);
dc02d50a
HV
676 }
677 wake_up(&itv->dma_waitq);
678}
679
1a0adaf3
HV
680static void ivtv_irq_dma_err(struct ivtv *itv)
681{
682 u32 data[CX2341X_MBOX_MAX_DATA];
d213ad08 683 u32 status;
1a0adaf3
HV
684
685 del_timer(&itv->dma_timer);
d213ad08 686
587808d5 687 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
d213ad08 688 status = read_reg(IVTV_REG_DMASTATUS);
1a0adaf3 689 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
d213ad08
M
690 status, itv->cur_dma_stream);
691 /*
692 * We do *not* write back to the IVTV_REG_DMASTATUS register to
693 * clear the error status, if either the encoder write (0x02) or
694 * decoder read (0x01) bus master DMA operation do not indicate
695 * completed. We can race with the DMA engine, which may have
696 * transitioned to completed status *after* we read the register.
697 * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the
698 * DMA engine has completed, will cause the DMA engine to stop working.
699 */
700 status &= 0x3;
701 if (status == 0x3)
702 write_reg(status, IVTV_REG_DMASTATUS);
703
1a0adaf3
HV
704 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
705 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
706 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
707
d213ad08
M
708 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
709 /* retry */
710 /*
711 * FIXME - handle cases of DMA error similar to
712 * encoder below, except conditioned on status & 0x1
713 */
1a0adaf3 714 ivtv_dma_dec_start(s);
d213ad08
M
715 return;
716 } else {
717 if ((status & 0x2) == 0) {
718 /*
719 * CX2341x Bus Master DMA write is ongoing.
720 * Reset the timer and let it complete.
721 */
722 itv->dma_timer.expires =
723 jiffies + msecs_to_jiffies(600);
724 add_timer(&itv->dma_timer);
725 return;
726 }
727
728 if (itv->dma_retries < 3) {
729 /*
730 * CX2341x Bus Master DMA write has ended.
731 * Retry the write, starting with the first
732 * xfer segment. Just retrying the current
733 * segment is not sufficient.
734 */
735 s->sg_processed = 0;
736 itv->dma_retries++;
737 ivtv_dma_enc_start_xfer(s);
738 return;
739 }
740 /* Too many retries, give up on this one */
741 }
742
1a0adaf3 743 }
37093b1e
HV
744 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
745 ivtv_udma_start(itv);
746 return;
747 }
1a0adaf3
HV
748 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
749 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
750 itv->cur_dma_stream = -1;
751 wake_up(&itv->dma_waitq);
752}
753
754static void ivtv_irq_enc_start_cap(struct ivtv *itv)
755{
756 u32 data[CX2341X_MBOX_MAX_DATA];
757 struct ivtv_stream *s;
758
759 /* Get DMA destination and size arguments from card */
587808d5 760 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, 7, data);
bd58df6d 761 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
1a0adaf3
HV
762
763 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
764 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
765 data[0], data[1], data[2]);
766 return;
767 }
1a0adaf3
HV
768 s = &itv->streams[ivtv_stream_map[data[0]]];
769 if (!stream_enc_dma_append(s, data)) {
dc02d50a 770 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
1a0adaf3
HV
771 }
772}
773
774static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
775{
1a0adaf3
HV
776 u32 data[CX2341X_MBOX_MAX_DATA];
777 struct ivtv_stream *s;
778
bd58df6d 779 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
1a0adaf3
HV
780 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
781
d526afe0 782 if (!stream_enc_dma_append(s, data))
dc02d50a 783 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
1a0adaf3
HV
784}
785
dc02d50a 786static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
1a0adaf3
HV
787{
788 u32 data[CX2341X_MBOX_MAX_DATA];
789 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
790
bd58df6d 791 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
1a0adaf3
HV
792 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
793 !stream_enc_dma_append(s, data)) {
dc02d50a 794 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
1a0adaf3
HV
795 }
796}
797
798static void ivtv_irq_dec_data_req(struct ivtv *itv)
799{
800 u32 data[CX2341X_MBOX_MAX_DATA];
801 struct ivtv_stream *s;
802
803 /* YUV or MPG */
1a0adaf3
HV
804
805 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
587808d5 806 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 2, data);
77aded6b
IA
807 itv->dma_data_req_size =
808 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
809 itv->dma_data_req_offset = data[1];
810 if (atomic_read(&itv->yuv_info.next_dma_frame) >= 0)
811 ivtv_yuv_frame_complete(itv);
1a0adaf3
HV
812 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
813 }
814 else {
587808d5 815 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 3, data);
14d5deba 816 itv->dma_data_req_size = min_t(u32, data[2], 0x10000);
1a0adaf3
HV
817 itv->dma_data_req_offset = data[1];
818 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
819 }
bd58df6d 820 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
1a0adaf3
HV
821 itv->dma_data_req_offset, itv->dma_data_req_size);
822 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
823 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
824 }
825 else {
77aded6b
IA
826 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags))
827 ivtv_yuv_setup_stream_frame(itv);
1a0adaf3
HV
828 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
829 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
830 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
831 }
832}
833
834static void ivtv_irq_vsync(struct ivtv *itv)
835{
836 /* The vsync interrupt is unusual in that it won't clear until
837 * the end of the first line for the current field, at which
838 * point it clears itself. This can result in repeated vsync
839 * interrupts, or a missed vsync. Read some of the registers
840 * to determine the line being displayed and ensure we handle
841 * one vsync per frame.
842 */
4e1af31a 843 unsigned int frame = read_reg(IVTV_REG_DEC_LINE_FIELD) & 1;
a3e5f5e2 844 struct yuv_playback_info *yi = &itv->yuv_info;
2bd7ac55 845 int last_dma_frame = atomic_read(&yi->next_dma_frame);
3b5c1c8e 846 struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame];
1a0adaf3
HV
847
848 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
849
3b5c1c8e
IA
850 if (((frame ^ f->sync_field) == 0 &&
851 ((itv->last_vsync_field & 1) ^ f->sync_field)) ||
852 (frame != (itv->last_vsync_field & 1) && !f->interlaced)) {
1a0adaf3
HV
853 int next_dma_frame = last_dma_frame;
854
3b5c1c8e 855 if (!(f->interlaced && f->delay && yi->fields_lapsed < 1)) {
a3e5f5e2 856 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&yi->next_fill_frame)) {
bfd7beac
IA
857 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
858 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
859 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
860 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
a3e5f5e2
IA
861 next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS;
862 atomic_set(&yi->next_dma_frame, next_dma_frame);
863 yi->fields_lapsed = -1;
2bd7ac55 864 yi->running = 1;
bfd7beac 865 }
1a0adaf3
HV
866 }
867 }
a158f355 868 if (frame != (itv->last_vsync_field & 1)) {
09250193
HV
869 static const struct v4l2_event evtop = {
870 .type = V4L2_EVENT_VSYNC,
871 .u.vsync.field = V4L2_FIELD_TOP,
872 };
873 static const struct v4l2_event evbottom = {
874 .type = V4L2_EVENT_VSYNC,
875 .u.vsync.field = V4L2_FIELD_BOTTOM,
876 };
1a0adaf3
HV
877 struct ivtv_stream *s = ivtv_get_output_stream(itv);
878
a158f355 879 itv->last_vsync_field += 1;
1a0adaf3
HV
880 if (frame == 0) {
881 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
882 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
883 }
884 else {
885 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
886 }
887 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
888 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
889 wake_up(&itv->event_waitq);
09250193
HV
890 if (s)
891 wake_up(&s->waitq);
1a0adaf3 892 }
635d62f0
HV
893 if (s && s->vdev.v4l2_dev)
894 v4l2_event_queue(&s->vdev, frame ? &evtop : &evbottom);
1a0adaf3 895 wake_up(&itv->vsync_waitq);
1a0adaf3
HV
896
897 /* Send VBI to saa7127 */
2f3a9893
HV
898 if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
899 test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) ||
900 test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) ||
901 test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) {
1e13f9e3 902 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
dc02d50a 903 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
1e13f9e3 904 }
1a0adaf3
HV
905
906 /* Check if we need to update the yuv registers */
2bd7ac55 907 if (yi->running && (yi->yuv_forced_update || f->update)) {
3b5c1c8e 908 if (!f->update) {
2bd7ac55
IA
909 last_dma_frame =
910 (u8)(atomic_read(&yi->next_dma_frame) -
911 1) % IVTV_YUV_BUFFERS;
3b5c1c8e
IA
912 f = &yi->new_frame_info[last_dma_frame];
913 }
1a0adaf3 914
3b5c1c8e 915 if (f->src_w) {
a3e5f5e2 916 yi->update_frame = last_dma_frame;
3b5c1c8e 917 f->update = 0;
a3e5f5e2 918 yi->yuv_forced_update = 0;
1e13f9e3 919 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
dc02d50a 920 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
1a0adaf3
HV
921 }
922 }
bfd7beac 923
a3e5f5e2 924 yi->fields_lapsed++;
1a0adaf3
HV
925 }
926}
927
2f3a9893 928#define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
1a0adaf3
HV
929
930irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
931{
932 struct ivtv *itv = (struct ivtv *)dev_id;
933 u32 combo;
934 u32 stat;
935 int i;
936 u8 vsync_force = 0;
937
938 spin_lock(&itv->dma_reg_lock);
939 /* get contents of irq status register */
940 stat = read_reg(IVTV_REG_IRQSTATUS);
941
942 combo = ~itv->irqmask & stat;
943
944 /* Clear out IRQ */
945 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
946
947 if (0 == combo) {
948 /* The vsync interrupt is unusual and clears itself. If we
949 * took too long, we may have missed it. Do some checks
950 */
951 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
952 /* vsync is enabled, see if we're in a new field */
4e1af31a
AW
953 if ((itv->last_vsync_field & 1) !=
954 (read_reg(IVTV_REG_DEC_LINE_FIELD) & 1)) {
1a0adaf3 955 /* New field, looks like we missed it */
4e1af31a
AW
956 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",
957 read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16);
1a0adaf3
HV
958 vsync_force = 1;
959 }
960 }
961
962 if (!vsync_force) {
963 /* No Vsync expected, wasn't for us */
964 spin_unlock(&itv->dma_reg_lock);
965 return IRQ_NONE;
966 }
967 }
968
969 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
970 these messages */
971 if (combo & ~0xff6d0400)
bd58df6d 972 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
1a0adaf3
HV
973
974 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
bd58df6d 975 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
1a0adaf3
HV
976 }
977
978 if (combo & IVTV_IRQ_DMA_READ) {
979 ivtv_irq_dma_read(itv);
980 }
981
982 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
983 ivtv_irq_enc_dma_complete(itv);
984 }
985
dc02d50a
HV
986 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
987 ivtv_irq_enc_pio_complete(itv);
988 }
989
1a0adaf3
HV
990 if (combo & IVTV_IRQ_DMA_ERR) {
991 ivtv_irq_dma_err(itv);
992 }
993
994 if (combo & IVTV_IRQ_ENC_START_CAP) {
995 ivtv_irq_enc_start_cap(itv);
996 }
997
998 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
999 ivtv_irq_enc_vbi_cap(itv);
1000 }
1001
1002 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
dc02d50a 1003 ivtv_irq_dec_vbi_reinsert(itv);
1a0adaf3
HV
1004 }
1005
1006 if (combo & IVTV_IRQ_ENC_EOS) {
1007 IVTV_DEBUG_IRQ("ENC EOS\n");
1008 set_bit(IVTV_F_I_EOS, &itv->i_flags);
fd8b281a 1009 wake_up(&itv->eos_waitq);
1a0adaf3
HV
1010 }
1011
1012 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
1013 ivtv_irq_dec_data_req(itv);
1014 }
1015
1016 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
1017 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
1018 ivtv_irq_vsync(itv);
1019 }
1020
1021 if (combo & IVTV_IRQ_ENC_VIM_RST) {
1022 IVTV_DEBUG_IRQ("VIM RST\n");
1023 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
1024 }
1025
1026 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
1027 IVTV_DEBUG_INFO("Stereo mode changed\n");
1028 }
1029
1030 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
33bc4dea 1031 itv->irq_rr_idx++;
1a0adaf3 1032 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
33bc4dea 1033 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
1a0adaf3
HV
1034 struct ivtv_stream *s = &itv->streams[idx];
1035
1036 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
1037 continue;
1038 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
1039 ivtv_dma_dec_start(s);
1040 else
1041 ivtv_dma_enc_start(s);
1042 break;
1043 }
b6e436b2
IA
1044
1045 if (i == IVTV_MAX_STREAMS &&
1046 test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags))
1a0adaf3 1047 ivtv_udma_start(itv);
1a0adaf3
HV
1048 }
1049
dc02d50a 1050 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
33bc4dea 1051 itv->irq_rr_idx++;
dc02d50a 1052 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
33bc4dea 1053 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
dc02d50a
HV
1054 struct ivtv_stream *s = &itv->streams[idx];
1055
1056 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
1057 continue;
1058 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
1059 ivtv_dma_enc_start(s);
1060 break;
1061 }
1062 }
1063
2f3a9893 1064 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
8c03cbe6 1065 kthread_queue_work(&itv->irq_worker, &itv->irq_work);
2f3a9893 1066 }
dc02d50a 1067
1a0adaf3
HV
1068 spin_unlock(&itv->dma_reg_lock);
1069
1070 /* If we've just handled a 'forced' vsync, it's safest to say it
1071 * wasn't ours. Another device may have triggered it at just
1072 * the right time.
1073 */
1074 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
1075}
1076
1077void ivtv_unfinished_dma(unsigned long arg)
1078{
1079 struct ivtv *itv = (struct ivtv *)arg;
1080
1081 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
1082 return;
1083 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
1084
1085 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
1086 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
1087 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
1088 itv->cur_dma_stream = -1;
1089 wake_up(&itv->dma_waitq);
1090}
This page took 0.829163 seconds and 5 git commands to generate.