V4L/DVB (6048): ivtv: fix stop stream locking
[deliverable/linux.git] / drivers / media / video / ivtv / ivtv-irq.c
CommitLineData
1a0adaf3
HV
1/* interrupt handling
2 Copyright (C) 2003-2004 Kevin Thayer <nufan_wfk at yahoo.com>
3 Copyright (C) 2004 Chris Kennedy <c@groovy.org>
4 Copyright (C) 2005-2007 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#include "ivtv-driver.h"
22#include "ivtv-firmware.h"
23#include "ivtv-fileops.h"
24#include "ivtv-queue.h"
25#include "ivtv-udma.h"
26#include "ivtv-irq.h"
27#include "ivtv-ioctl.h"
28#include "ivtv-mailbox.h"
29#include "ivtv-vbi.h"
1e13f9e3 30#include "ivtv-yuv.h"
1a0adaf3
HV
31
32#define DMA_MAGIC_COOKIE 0x000001fe
33
1a0adaf3
HV
34static void ivtv_dma_dec_start(struct ivtv_stream *s);
35
36static const int ivtv_stream_map[] = {
37 IVTV_ENC_STREAM_TYPE_MPG,
38 IVTV_ENC_STREAM_TYPE_YUV,
39 IVTV_ENC_STREAM_TYPE_PCM,
40 IVTV_ENC_STREAM_TYPE_VBI,
41};
42
dc02d50a
HV
43
44static void ivtv_pio_work_handler(struct ivtv *itv)
1a0adaf3 45{
dc02d50a
HV
46 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
47 struct ivtv_buffer *buf;
48 struct list_head *p;
49 int i = 0;
50
bd58df6d 51 IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
dc02d50a
HV
52 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
53 s->v4l2dev == NULL || !ivtv_use_pio(s)) {
54 itv->cur_pio_stream = -1;
55 /* trigger PIO complete user interrupt */
56 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
57 return;
58 }
bd58df6d 59 IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
dc02d50a
HV
60 buf = list_entry(s->q_dma.list.next, struct ivtv_buffer, list);
61 list_for_each(p, &s->q_dma.list) {
62 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
37093b1e 63 u32 size = s->sg_processing[i].size & 0x3ffff;
1a0adaf3 64
dc02d50a
HV
65 /* Copy the data from the card to the buffer */
66 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
37093b1e 67 memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
dc02d50a
HV
68 }
69 else {
37093b1e 70 memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
dc02d50a 71 }
dc02d50a 72 i++;
37093b1e
HV
73 if (i == s->sg_processing_size)
74 break;
dc02d50a
HV
75 }
76 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
1a0adaf3
HV
77}
78
1e13f9e3
HV
79void ivtv_irq_work_handler(struct work_struct *work)
80{
81 struct ivtv *itv = container_of(work, struct ivtv, irq_work_queue);
82
83 DEFINE_WAIT(wait);
84
dc02d50a
HV
85 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
86 ivtv_pio_work_handler(itv);
87
1e13f9e3 88 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
dc02d50a 89 ivtv_vbi_work_handler(itv);
1e13f9e3
HV
90
91 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
92 ivtv_yuv_work_handler(itv);
93}
94
1a0adaf3
HV
95/* Determine the required DMA size, setup enough buffers in the predma queue and
96 actually copy the data from the card to the buffers in case a PIO transfer is
97 required for this stream.
98 */
99static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
100{
101 struct ivtv *itv = s->itv;
102 struct ivtv_buffer *buf;
103 struct list_head *p;
104 u32 bytes_needed = 0;
105 u32 offset, size;
106 u32 UVoffset = 0, UVsize = 0;
107 int skip_bufs = s->q_predma.buffers;
37093b1e 108 int idx = s->sg_pending_size;
1a0adaf3
HV
109 int rc;
110
111 /* sanity checks */
112 if (s->v4l2dev == NULL) {
113 IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
114 return -1;
115 }
116 if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
117 IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
118 return -1;
119 }
120
121 /* determine offset, size and PTS for the various streams */
122 switch (s->type) {
123 case IVTV_ENC_STREAM_TYPE_MPG:
124 offset = data[1];
125 size = data[2];
37093b1e 126 s->pending_pts = 0;
1a0adaf3
HV
127 break;
128
129 case IVTV_ENC_STREAM_TYPE_YUV:
130 offset = data[1];
131 size = data[2];
132 UVoffset = data[3];
133 UVsize = data[4];
37093b1e 134 s->pending_pts = ((u64) data[5] << 32) | data[6];
1a0adaf3
HV
135 break;
136
137 case IVTV_ENC_STREAM_TYPE_PCM:
138 offset = data[1] + 12;
139 size = data[2] - 12;
37093b1e 140 s->pending_pts = read_dec(offset - 8) |
1a0adaf3
HV
141 ((u64)(read_dec(offset - 12)) << 32);
142 if (itv->has_cx23415)
143 offset += IVTV_DECODER_OFFSET;
144 break;
145
146 case IVTV_ENC_STREAM_TYPE_VBI:
147 size = itv->vbi.enc_size * itv->vbi.fpi;
148 offset = read_enc(itv->vbi.enc_start - 4) + 12;
149 if (offset == 12) {
150 IVTV_DEBUG_INFO("VBI offset == 0\n");
151 return -1;
152 }
37093b1e 153 s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
1a0adaf3
HV
154 break;
155
156 case IVTV_DEC_STREAM_TYPE_VBI:
157 size = read_dec(itv->vbi.dec_start + 4) + 8;
158 offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
37093b1e 159 s->pending_pts = 0;
1a0adaf3
HV
160 offset += IVTV_DECODER_OFFSET;
161 break;
162 default:
163 /* shouldn't happen */
164 return -1;
165 }
166
167 /* if this is the start of the DMA then fill in the magic cookie */
37093b1e 168 if (s->sg_pending_size == 0) {
1a0adaf3
HV
169 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
170 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
37093b1e 171 s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
1a0adaf3
HV
172 write_dec_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset - IVTV_DECODER_OFFSET);
173 }
174 else {
37093b1e 175 s->pending_backup = read_enc(offset);
1a0adaf3
HV
176 write_enc_sync(cpu_to_le32(DMA_MAGIC_COOKIE), offset);
177 }
37093b1e 178 s->pending_offset = offset;
1a0adaf3
HV
179 }
180
181 bytes_needed = size;
182 if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
183 /* The size for the Y samples needs to be rounded upwards to a
184 multiple of the buf_size. The UV samples then start in the
185 next buffer. */
186 bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
187 bytes_needed += UVsize;
188 }
189
bd58df6d 190 IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
1a0adaf3
HV
191 ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
192
193 rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
194 if (rc < 0) { /* Insufficient buffers */
195 IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
196 bytes_needed, s->name);
197 return -1;
198 }
199 if (rc && !s->buffers_stolen && (s->s_flags & IVTV_F_S_APPL_IO)) {
200 IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
201 IVTV_WARN("Cause: the application is not reading fast enough.\n");
202 }
203 s->buffers_stolen = rc;
204
37093b1e 205 /* got the buffers, now fill in sg_pending */
1a0adaf3
HV
206 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
207 memset(buf->buf, 0, 128);
208 list_for_each(p, &s->q_predma.list) {
209 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
210
211 if (skip_bufs-- > 0)
212 continue;
37093b1e
HV
213 s->sg_pending[idx].dst = buf->dma_handle;
214 s->sg_pending[idx].src = offset;
215 s->sg_pending[idx].size = s->buf_size;
1a0adaf3 216 buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
f4071b85 217 buf->dma_xfer_cnt = s->dma_xfer_cnt;
1a0adaf3 218
1a0adaf3
HV
219 s->q_predma.bytesused += buf->bytesused;
220 size -= buf->bytesused;
221 offset += s->buf_size;
222
223 /* Sync SG buffers */
224 ivtv_buf_sync_for_device(s, buf);
225
226 if (size == 0) { /* YUV */
227 /* process the UV section */
228 offset = UVoffset;
229 size = UVsize;
230 }
231 idx++;
232 }
37093b1e 233 s->sg_pending_size = idx;
1a0adaf3
HV
234 return 0;
235}
236
237static void dma_post(struct ivtv_stream *s)
238{
239 struct ivtv *itv = s->itv;
240 struct ivtv_buffer *buf = NULL;
241 struct list_head *p;
242 u32 offset;
243 u32 *u32buf;
244 int x = 0;
245
bd58df6d 246 IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
1a0adaf3
HV
247 s->name, s->dma_offset);
248 list_for_each(p, &s->q_dma.list) {
249 buf = list_entry(p, struct ivtv_buffer, list);
250 u32buf = (u32 *)buf->buf;
251
252 /* Sync Buffer */
253 ivtv_buf_sync_for_cpu(s, buf);
254
255 if (x == 0) {
256 offset = s->dma_last_offset;
257 if (u32buf[offset / 4] != DMA_MAGIC_COOKIE)
258 {
259 for (offset = 0; offset < 64; offset++) {
260 if (u32buf[offset] == DMA_MAGIC_COOKIE) {
261 break;
262 }
263 }
264 offset *= 4;
265 if (offset == 256) {
266 IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
267 offset = s->dma_last_offset;
268 }
269 if (s->dma_last_offset != offset)
270 IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
271 s->dma_last_offset = offset;
272 }
273 if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
274 s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
275 write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
276 }
277 else {
278 write_enc_sync(0, s->dma_offset);
279 }
280 if (offset) {
281 buf->bytesused -= offset;
282 memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
283 }
284 *u32buf = cpu_to_le32(s->dma_backup);
285 }
286 x++;
287 /* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
288 if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
289 s->type == IVTV_ENC_STREAM_TYPE_VBI)
f4071b85 290 buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
1a0adaf3
HV
291 }
292 if (buf)
293 buf->bytesused += s->dma_last_offset;
294 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
dc02d50a
HV
295 list_for_each(p, &s->q_dma.list) {
296 buf = list_entry(p, struct ivtv_buffer, list);
297
298 /* Parse and Groom VBI Data */
299 s->q_dma.bytesused -= buf->bytesused;
300 ivtv_process_vbi_data(itv, buf, 0, s->type);
301 s->q_dma.bytesused += buf->bytesused;
302 }
1a0adaf3
HV
303 if (s->id == -1) {
304 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
305 return;
306 }
307 }
308 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
309 if (s->id != -1)
310 wake_up(&s->waitq);
311}
312
313void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
314{
315 struct ivtv *itv = s->itv;
316 struct ivtv_buffer *buf;
317 struct list_head *p;
318 u32 y_size = itv->params.height * itv->params.width;
319 u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
320 int y_done = 0;
321 int bytes_written = 0;
322 unsigned long flags = 0;
323 int idx = 0;
324
bd58df6d 325 IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
1a0adaf3
HV
326 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
327 list_for_each(p, &s->q_predma.list) {
328 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
329
330 /* YUV UV Offset from Y Buffer */
331 if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done && bytes_written >= y_size) {
332 offset = uv_offset;
333 y_done = 1;
334 }
37093b1e
HV
335 s->sg_pending[idx].src = buf->dma_handle;
336 s->sg_pending[idx].dst = offset;
337 s->sg_pending[idx].size = buf->bytesused;
1a0adaf3
HV
338
339 offset += buf->bytesused;
340 bytes_written += buf->bytesused;
341
342 /* Sync SG buffers */
343 ivtv_buf_sync_for_device(s, buf);
344 idx++;
345 }
37093b1e 346 s->sg_pending_size = idx;
1a0adaf3
HV
347
348 /* Sync Hardware SG List of buffers */
349 ivtv_stream_sync_for_device(s);
350 if (lock)
351 spin_lock_irqsave(&itv->dma_reg_lock, flags);
352 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
353 ivtv_dma_dec_start(s);
354 }
355 else {
356 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
357 }
358 if (lock)
359 spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
360}
361
37093b1e
HV
362static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
363{
364 struct ivtv *itv = s->itv;
365
366 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
367 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
368 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
369 s->sg_processed++;
370 /* Sync Hardware SG List of buffers */
371 ivtv_stream_sync_for_device(s);
372 write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
373 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
374}
375
376static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
377{
378 struct ivtv *itv = s->itv;
379
380 s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
381 s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
382 s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
383 s->sg_processed++;
384 /* Sync Hardware SG List of buffers */
385 ivtv_stream_sync_for_device(s);
386 write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
387 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
388}
389
1a0adaf3
HV
390/* start the encoder DMA */
391static void ivtv_dma_enc_start(struct ivtv_stream *s)
392{
393 struct ivtv *itv = s->itv;
394 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
395 int i;
396
bd58df6d 397 IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
dc02d50a 398
1a0adaf3
HV
399 if (s->q_predma.bytesused)
400 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
dc02d50a
HV
401
402 if (ivtv_use_dma(s))
37093b1e 403 s->sg_pending[s->sg_pending_size - 1].size += 256;
1a0adaf3
HV
404
405 /* If this is an MPEG stream, and VBI data is also pending, then append the
406 VBI DMA to the MPEG DMA and transfer both sets of data at once.
407
408 VBI DMA is a second class citizen compared to MPEG and mixing them together
409 will confuse the firmware (the end of a VBI DMA is seen as the end of a
410 MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
411 sure we only use the MPEG DMA to transfer the VBI DMA if both are in
412 use. This way no conflicts occur. */
413 clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
37093b1e
HV
414 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
415 s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
1a0adaf3 416 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
dc02d50a 417 if (ivtv_use_dma(s_vbi))
37093b1e
HV
418 s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
419 for (i = 0; i < s_vbi->sg_pending_size; i++) {
420 s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
1a0adaf3 421 }
37093b1e
HV
422 s_vbi->dma_offset = s_vbi->pending_offset;
423 s_vbi->sg_pending_size = 0;
f4071b85 424 s_vbi->dma_xfer_cnt++;
1a0adaf3 425 set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
bd58df6d 426 IVTV_DEBUG_HI_DMA("include DMA for %s\n", s->name);
1a0adaf3
HV
427 }
428
f4071b85 429 s->dma_xfer_cnt++;
37093b1e
HV
430 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
431 s->sg_processing_size = s->sg_pending_size;
432 s->sg_pending_size = 0;
433 s->sg_processed = 0;
434 s->dma_offset = s->pending_offset;
435 s->dma_backup = s->pending_backup;
436 s->dma_pts = s->pending_pts;
dd1e729d 437
dc02d50a 438 if (ivtv_use_pio(s)) {
dc02d50a
HV
439 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
440 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
441 set_bit(IVTV_F_I_PIO, &itv->i_flags);
442 itv->cur_pio_stream = s->type;
443 }
444 else {
37093b1e
HV
445 itv->dma_retries = 0;
446 ivtv_dma_enc_start_xfer(s);
dc02d50a
HV
447 set_bit(IVTV_F_I_DMA, &itv->i_flags);
448 itv->cur_dma_stream = s->type;
201700d3 449 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
dc02d50a
HV
450 add_timer(&itv->dma_timer);
451 }
1a0adaf3
HV
452}
453
454static void ivtv_dma_dec_start(struct ivtv_stream *s)
455{
456 struct ivtv *itv = s->itv;
457
458 if (s->q_predma.bytesused)
459 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
37093b1e
HV
460 s->dma_xfer_cnt++;
461 memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_element) * s->sg_pending_size);
462 s->sg_processing_size = s->sg_pending_size;
463 s->sg_pending_size = 0;
464 s->sg_processed = 0;
465
bd58df6d 466 IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
37093b1e
HV
467 itv->dma_retries = 0;
468 ivtv_dma_dec_start_xfer(s);
1a0adaf3
HV
469 set_bit(IVTV_F_I_DMA, &itv->i_flags);
470 itv->cur_dma_stream = s->type;
201700d3 471 itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
1a0adaf3
HV
472 add_timer(&itv->dma_timer);
473}
474
475static void ivtv_irq_dma_read(struct ivtv *itv)
476{
477 struct ivtv_stream *s = NULL;
478 struct ivtv_buffer *buf;
37093b1e 479 int hw_stream_type = 0;
1a0adaf3 480
bd58df6d 481 IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
37093b1e
HV
482 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0) {
483 del_timer(&itv->dma_timer);
484 return;
1a0adaf3 485 }
37093b1e 486
1a0adaf3 487 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
37093b1e
HV
488 s = &itv->streams[itv->cur_dma_stream];
489 ivtv_stream_sync_for_cpu(s);
490
491 if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
492 IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
493 read_reg(IVTV_REG_DMASTATUS),
494 s->sg_processed, s->sg_processing_size, itv->dma_retries);
495 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
496 if (itv->dma_retries == 3) {
497 itv->dma_retries = 0;
498 }
499 else {
500 /* Retry, starting with the first xfer segment.
501 Just retrying the current segment is not sufficient. */
502 s->sg_processed = 0;
503 itv->dma_retries++;
504 }
1a0adaf3 505 }
37093b1e
HV
506 if (s->sg_processed < s->sg_processing_size) {
507 /* DMA next buffer */
508 ivtv_dma_dec_start_xfer(s);
509 return;
1a0adaf3 510 }
37093b1e
HV
511 if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
512 hw_stream_type = 2;
bd58df6d 513 IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
1a0adaf3 514
1a0adaf3
HV
515 /* For some reason must kick the firmware, like PIO mode,
516 I think this tells the firmware we are done and the size
517 of the xfer so it can calculate what we need next.
518 I think we can do this part ourselves but would have to
519 fully calculate xfer info ourselves and not use interrupts
520 */
521 ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
522 hw_stream_type);
523
524 /* Free last DMA call */
525 while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
526 ivtv_buf_sync_for_cpu(s, buf);
527 ivtv_enqueue(s, buf, &s->q_free);
528 }
529 wake_up(&s->waitq);
530 }
37093b1e 531 del_timer(&itv->dma_timer);
1a0adaf3
HV
532 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
533 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
534 itv->cur_dma_stream = -1;
535 wake_up(&itv->dma_waitq);
536}
537
538static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
539{
540 u32 data[CX2341X_MBOX_MAX_DATA];
541 struct ivtv_stream *s;
542
1a0adaf3 543 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
37093b1e
HV
544 IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
545 if (itv->cur_dma_stream < 0) {
546 del_timer(&itv->dma_timer);
1a0adaf3 547 return;
37093b1e
HV
548 }
549 s = &itv->streams[itv->cur_dma_stream];
550 ivtv_stream_sync_for_cpu(s);
551
1a0adaf3 552 if (data[0] & 0x18) {
37093b1e
HV
553 IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
554 s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
1a0adaf3 555 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
37093b1e
HV
556 if (itv->dma_retries == 3) {
557 itv->dma_retries = 0;
558 }
559 else {
560 /* Retry, starting with the first xfer segment.
561 Just retrying the current segment is not sufficient. */
562 s->sg_processed = 0;
563 itv->dma_retries++;
564 }
1a0adaf3 565 }
37093b1e
HV
566 if (s->sg_processed < s->sg_processing_size) {
567 /* DMA next buffer */
568 ivtv_dma_enc_start_xfer(s);
569 return;
570 }
571 del_timer(&itv->dma_timer);
1a0adaf3
HV
572 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
573 itv->cur_dma_stream = -1;
574 dma_post(s);
1a0adaf3 575 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
1a0adaf3 576 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
1a0adaf3 577 dma_post(s);
1a0adaf3 578 }
37093b1e
HV
579 s->sg_processing_size = 0;
580 s->sg_processed = 0;
1a0adaf3
HV
581 wake_up(&itv->dma_waitq);
582}
583
dc02d50a
HV
584static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
585{
586 struct ivtv_stream *s;
587
588 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
589 itv->cur_pio_stream = -1;
590 return;
591 }
592 s = &itv->streams[itv->cur_pio_stream];
bd58df6d 593 IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
37093b1e 594 s->sg_pending_size = 0;
dc02d50a
HV
595 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
596 itv->cur_pio_stream = -1;
597 dma_post(s);
598 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
599 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
600 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
601 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
602 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
603 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
604 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
605 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
dc02d50a 606 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
dc02d50a 607 dma_post(s);
dc02d50a
HV
608 }
609 wake_up(&itv->dma_waitq);
610}
611
1a0adaf3
HV
612static void ivtv_irq_dma_err(struct ivtv *itv)
613{
614 u32 data[CX2341X_MBOX_MAX_DATA];
615
616 del_timer(&itv->dma_timer);
617 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, data);
618 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
37093b1e
HV
619 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
620 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
1a0adaf3
HV
621 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
622 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
623 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
624
625 /* retry */
1a0adaf3
HV
626 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
627 ivtv_dma_dec_start(s);
628 else
629 ivtv_dma_enc_start(s);
630 return;
631 }
37093b1e
HV
632 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
633 ivtv_udma_start(itv);
634 return;
635 }
1a0adaf3
HV
636 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
637 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
638 itv->cur_dma_stream = -1;
639 wake_up(&itv->dma_waitq);
640}
641
642static void ivtv_irq_enc_start_cap(struct ivtv *itv)
643{
644 u32 data[CX2341X_MBOX_MAX_DATA];
645 struct ivtv_stream *s;
646
647 /* Get DMA destination and size arguments from card */
648 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, data);
bd58df6d 649 IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
1a0adaf3
HV
650
651 if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
652 IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
653 data[0], data[1], data[2]);
654 return;
655 }
1a0adaf3
HV
656 s = &itv->streams[ivtv_stream_map[data[0]]];
657 if (!stream_enc_dma_append(s, data)) {
dc02d50a 658 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
1a0adaf3
HV
659 }
660}
661
662static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
663{
664 struct ivtv_stream *s_mpg = &itv->streams[IVTV_ENC_STREAM_TYPE_MPG];
665 u32 data[CX2341X_MBOX_MAX_DATA];
666 struct ivtv_stream *s;
667
bd58df6d 668 IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
1a0adaf3
HV
669 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
670
1a0adaf3
HV
671 /* If more than two VBI buffers are pending, then
672 clear the old ones and start with this new one.
673 This can happen during transition stages when MPEG capturing is
674 started, but the first interrupts haven't arrived yet. During
675 that period VBI requests can accumulate without being able to
676 DMA the data. Since at most four VBI DMA buffers are available,
677 we just drop the old requests when there are already three
678 requests queued. */
37093b1e 679 if (s->sg_pending_size > 2) {
1a0adaf3
HV
680 struct list_head *p;
681 list_for_each(p, &s->q_predma.list) {
682 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
683 ivtv_buf_sync_for_cpu(s, buf);
684 }
685 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_free, 0);
37093b1e 686 s->sg_pending_size = 0;
1a0adaf3
HV
687 }
688 /* if we can append the data, and the MPEG stream isn't capturing,
689 then start a DMA request for just the VBI data. */
690 if (!stream_enc_dma_append(s, data) &&
691 !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
dc02d50a 692 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
1a0adaf3
HV
693 }
694}
695
dc02d50a 696static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
1a0adaf3
HV
697{
698 u32 data[CX2341X_MBOX_MAX_DATA];
699 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
700
bd58df6d 701 IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
1a0adaf3
HV
702 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
703 !stream_enc_dma_append(s, data)) {
dc02d50a 704 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
1a0adaf3
HV
705 }
706}
707
708static void ivtv_irq_dec_data_req(struct ivtv *itv)
709{
710 u32 data[CX2341X_MBOX_MAX_DATA];
711 struct ivtv_stream *s;
712
713 /* YUV or MPG */
714 ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, data);
715
716 if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
717 itv->dma_data_req_size = itv->params.width * itv->params.height * 3 / 2;
718 itv->dma_data_req_offset = data[1] ? data[1] : yuv_offset[0];
719 s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
720 }
721 else {
722 itv->dma_data_req_size = data[2] >= 0x10000 ? 0x10000 : data[2];
723 itv->dma_data_req_offset = data[1];
724 s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
725 }
bd58df6d 726 IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
1a0adaf3
HV
727 itv->dma_data_req_offset, itv->dma_data_req_size);
728 if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
729 set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
730 }
731 else {
732 clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
733 ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
734 ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
735 }
736}
737
738static void ivtv_irq_vsync(struct ivtv *itv)
739{
740 /* The vsync interrupt is unusual in that it won't clear until
741 * the end of the first line for the current field, at which
742 * point it clears itself. This can result in repeated vsync
743 * interrupts, or a missed vsync. Read some of the registers
744 * to determine the line being displayed and ensure we handle
745 * one vsync per frame.
746 */
747 unsigned int frame = read_reg(0x28c0) & 1;
748 int last_dma_frame = atomic_read(&itv->yuv_info.next_dma_frame);
749
750 if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
751
bfd7beac
IA
752 if (((frame ^ itv->yuv_info.sync_field[last_dma_frame]) == 0 &&
753 ((itv->lastVsyncFrame & 1) ^ itv->yuv_info.sync_field[last_dma_frame])) ||
1a0adaf3
HV
754 (frame != (itv->lastVsyncFrame & 1) && !itv->yuv_info.frame_interlaced)) {
755 int next_dma_frame = last_dma_frame;
756
bfd7beac
IA
757 if (!(itv->yuv_info.frame_interlaced && itv->yuv_info.field_delay[next_dma_frame] && itv->yuv_info.fields_lapsed < 1)) {
758 if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&itv->yuv_info.next_fill_frame)) {
759 write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
760 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
761 write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
762 write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
763 next_dma_frame = (next_dma_frame + 1) & 0x3;
764 atomic_set(&itv->yuv_info.next_dma_frame, next_dma_frame);
765 itv->yuv_info.fields_lapsed = -1;
766 }
1a0adaf3
HV
767 }
768 }
769 if (frame != (itv->lastVsyncFrame & 1)) {
770 struct ivtv_stream *s = ivtv_get_output_stream(itv);
771
772 itv->lastVsyncFrame += 1;
773 if (frame == 0) {
774 clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
775 clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
776 }
777 else {
778 set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
779 }
780 if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
781 set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
782 wake_up(&itv->event_waitq);
783 }
784 wake_up(&itv->vsync_waitq);
785 if (s)
786 wake_up(&s->waitq);
787
788 /* Send VBI to saa7127 */
1e13f9e3
HV
789 if (frame) {
790 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
dc02d50a 791 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
1e13f9e3 792 }
1a0adaf3
HV
793
794 /* Check if we need to update the yuv registers */
795 if ((itv->yuv_info.yuv_forced_update || itv->yuv_info.new_frame_info[last_dma_frame].update) && last_dma_frame != -1) {
796 if (!itv->yuv_info.new_frame_info[last_dma_frame].update)
797 last_dma_frame = (last_dma_frame - 1) & 3;
798
799 if (itv->yuv_info.new_frame_info[last_dma_frame].src_w) {
800 itv->yuv_info.update_frame = last_dma_frame;
801 itv->yuv_info.new_frame_info[last_dma_frame].update = 0;
802 itv->yuv_info.yuv_forced_update = 0;
1e13f9e3 803 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
dc02d50a 804 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
1a0adaf3
HV
805 }
806 }
bfd7beac
IA
807
808 itv->yuv_info.fields_lapsed ++;
1a0adaf3
HV
809 }
810}
811
812#define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ)
813
814irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
815{
816 struct ivtv *itv = (struct ivtv *)dev_id;
817 u32 combo;
818 u32 stat;
819 int i;
820 u8 vsync_force = 0;
821
822 spin_lock(&itv->dma_reg_lock);
823 /* get contents of irq status register */
824 stat = read_reg(IVTV_REG_IRQSTATUS);
825
826 combo = ~itv->irqmask & stat;
827
828 /* Clear out IRQ */
829 if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
830
831 if (0 == combo) {
832 /* The vsync interrupt is unusual and clears itself. If we
833 * took too long, we may have missed it. Do some checks
834 */
835 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
836 /* vsync is enabled, see if we're in a new field */
837 if ((itv->lastVsyncFrame & 1) != (read_reg(0x28c0) & 1)) {
838 /* New field, looks like we missed it */
839 IVTV_DEBUG_YUV("VSync interrupt missed %d\n",read_reg(0x28c0)>>16);
840 vsync_force = 1;
841 }
842 }
843
844 if (!vsync_force) {
845 /* No Vsync expected, wasn't for us */
846 spin_unlock(&itv->dma_reg_lock);
847 return IRQ_NONE;
848 }
849 }
850
851 /* Exclude interrupts noted below from the output, otherwise the log is flooded with
852 these messages */
853 if (combo & ~0xff6d0400)
bd58df6d 854 IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
1a0adaf3
HV
855
856 if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
bd58df6d 857 IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
1a0adaf3
HV
858 }
859
860 if (combo & IVTV_IRQ_DMA_READ) {
861 ivtv_irq_dma_read(itv);
862 }
863
864 if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
865 ivtv_irq_enc_dma_complete(itv);
866 }
867
dc02d50a
HV
868 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
869 ivtv_irq_enc_pio_complete(itv);
870 }
871
1a0adaf3
HV
872 if (combo & IVTV_IRQ_DMA_ERR) {
873 ivtv_irq_dma_err(itv);
874 }
875
876 if (combo & IVTV_IRQ_ENC_START_CAP) {
877 ivtv_irq_enc_start_cap(itv);
878 }
879
880 if (combo & IVTV_IRQ_ENC_VBI_CAP) {
881 ivtv_irq_enc_vbi_cap(itv);
882 }
883
884 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
dc02d50a 885 ivtv_irq_dec_vbi_reinsert(itv);
1a0adaf3
HV
886 }
887
888 if (combo & IVTV_IRQ_ENC_EOS) {
889 IVTV_DEBUG_IRQ("ENC EOS\n");
890 set_bit(IVTV_F_I_EOS, &itv->i_flags);
891 wake_up(&itv->cap_w);
892 }
893
894 if (combo & IVTV_IRQ_DEC_DATA_REQ) {
895 ivtv_irq_dec_data_req(itv);
896 }
897
898 /* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
899 if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
900 ivtv_irq_vsync(itv);
901 }
902
903 if (combo & IVTV_IRQ_ENC_VIM_RST) {
904 IVTV_DEBUG_IRQ("VIM RST\n");
905 /*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
906 }
907
908 if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
909 IVTV_DEBUG_INFO("Stereo mode changed\n");
910 }
911
912 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
33bc4dea 913 itv->irq_rr_idx++;
1a0adaf3 914 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
33bc4dea 915 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
1a0adaf3
HV
916 struct ivtv_stream *s = &itv->streams[idx];
917
918 if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
919 continue;
920 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
921 ivtv_dma_dec_start(s);
922 else
923 ivtv_dma_enc_start(s);
924 break;
925 }
926 if (i == IVTV_MAX_STREAMS && test_and_clear_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags)) {
927 ivtv_udma_start(itv);
928 }
929 }
930
dc02d50a 931 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
33bc4dea 932 itv->irq_rr_idx++;
dc02d50a 933 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
33bc4dea 934 int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
dc02d50a
HV
935 struct ivtv_stream *s = &itv->streams[idx];
936
937 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
938 continue;
939 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
940 ivtv_dma_enc_start(s);
941 break;
942 }
943 }
944
945 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags))
946 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
947
1a0adaf3
HV
948 spin_unlock(&itv->dma_reg_lock);
949
950 /* If we've just handled a 'forced' vsync, it's safest to say it
951 * wasn't ours. Another device may have triggered it at just
952 * the right time.
953 */
954 return vsync_force ? IRQ_NONE : IRQ_HANDLED;
955}
956
957void ivtv_unfinished_dma(unsigned long arg)
958{
959 struct ivtv *itv = (struct ivtv *)arg;
960
961 if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
962 return;
963 IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
964
965 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
966 clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
967 clear_bit(IVTV_F_I_DMA, &itv->i_flags);
968 itv->cur_dma_stream = -1;
969 wake_up(&itv->dma_waitq);
970}
This page took 0.127018 seconds and 5 git commands to generate.