aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/media/video/ivtv/ivtv-irq.c
diff options
context:
space:
mode:
authorHans Verkuil <hverkuil@xs4all.nl>2007-05-19 13:07:16 -0400
committerMauro Carvalho Chehab <mchehab@infradead.org>2007-06-08 07:21:13 -0400
commitdc02d50a6d71cba2b2edb78377af5a5965879a49 (patch)
tree199528db7b95dacd395004119614567b57725787 /drivers/media/video/ivtv/ivtv-irq.c
parentffeb9ec72e18e16d0b0835d959cdf01650758638 (diff)
V4L/DVB (5675): Move big PIO accesses from the interrupt handler to a workhandler
Sliced VBI transfers use PIO instead of DMA. This was done inside the interrupt handler, but since PIO accesses are very slow this meant that a lot of time was spent inside the interrupt handler. All PIO copies are now moved to a workqueue. This should fix various issues with missing time ticks and remote key hits. Signed-off-by: Hans Verkuil <hverkuil@xs4all.nl> Signed-off-by: Mauro Carvalho Chehab <mchehab@infradead.org>
Diffstat (limited to 'drivers/media/video/ivtv/ivtv-irq.c')
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c204
1 files changed, 137 insertions, 67 deletions
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index c3a047b381b3..ba98bf054f2e 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -31,8 +31,6 @@
31 31
32#define DMA_MAGIC_COOKIE 0x000001fe 32#define DMA_MAGIC_COOKIE 0x000001fe
33 33
34#define SLICED_VBI_PIO 1
35
36static void ivtv_dma_dec_start(struct ivtv_stream *s); 34static void ivtv_dma_dec_start(struct ivtv_stream *s);
37 35
38static const int ivtv_stream_map[] = { 36static const int ivtv_stream_map[] = {
@@ -42,12 +40,40 @@ static const int ivtv_stream_map[] = {
42 IVTV_ENC_STREAM_TYPE_VBI, 40 IVTV_ENC_STREAM_TYPE_VBI,
43}; 41};
44 42
45static inline int ivtv_use_pio(struct ivtv_stream *s) 43
44static void ivtv_pio_work_handler(struct ivtv *itv)
46{ 45{
47 struct ivtv *itv = s->itv; 46 struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
47 struct ivtv_buffer *buf;
48 struct list_head *p;
49 int i = 0;
50
51 IVTV_DEBUG_DMA("ivtv_pio_work_handler\n");
52 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
53 s->v4l2dev == NULL || !ivtv_use_pio(s)) {
54 itv->cur_pio_stream = -1;
55 /* trigger PIO complete user interrupt */
56 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
57 return;
58 }
59 IVTV_DEBUG_DMA("Process PIO %s\n", s->name);
60 buf = list_entry(s->q_dma.list.next, struct ivtv_buffer, list);
61 list_for_each(p, &s->q_dma.list) {
62 struct ivtv_buffer *buf = list_entry(p, struct ivtv_buffer, list);
63 u32 size = s->PIOarray[i].size & 0x3ffff;
48 64
49 return s->dma == PCI_DMA_NONE || 65 /* Copy the data from the card to the buffer */
50 (SLICED_VBI_PIO && s->type == IVTV_ENC_STREAM_TYPE_VBI && itv->vbi.sliced_in->service_set); 66 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
67 memcpy_fromio(buf->buf, itv->dec_mem + s->PIOarray[i].src - IVTV_DECODER_OFFSET, size);
68 }
69 else {
70 memcpy_fromio(buf->buf, itv->enc_mem + s->PIOarray[i].src, size);
71 }
72 if (s->PIOarray[i].size & 0x80000000)
73 break;
74 i++;
75 }
76 write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
51} 77}
52 78
53void ivtv_irq_work_handler(struct work_struct *work) 79void ivtv_irq_work_handler(struct work_struct *work)
@@ -56,8 +82,11 @@ void ivtv_irq_work_handler(struct work_struct *work)
56 82
57 DEFINE_WAIT(wait); 83 DEFINE_WAIT(wait);
58 84
85 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
86 ivtv_pio_work_handler(itv);
87
59 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags)) 88 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
60 vbi_work_handler(itv); 89 ivtv_vbi_work_handler(itv);
61 90
62 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags)) 91 if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
63 ivtv_yuv_work_handler(itv); 92 ivtv_yuv_work_handler(itv);
@@ -173,8 +202,7 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
173 } 202 }
174 s->buffers_stolen = rc; 203 s->buffers_stolen = rc;
175 204
176 /* got the buffers, now fill in SGarray (DMA) or copy the data from the card 205 /* got the buffers, now fill in SGarray (DMA) */
177 to the buffers (PIO). */
178 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list); 206 buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
179 memset(buf->buf, 0, 128); 207 memset(buf->buf, 0, 128);
180 list_for_each(p, &s->q_predma.list) { 208 list_for_each(p, &s->q_predma.list) {
@@ -182,21 +210,11 @@ static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MA
182 210
183 if (skip_bufs-- > 0) 211 if (skip_bufs-- > 0)
184 continue; 212 continue;
185 if (!ivtv_use_pio(s)) { 213 s->SGarray[idx].dst = cpu_to_le32(buf->dma_handle);
186 s->SGarray[idx].dst = cpu_to_le32(buf->dma_handle); 214 s->SGarray[idx].src = cpu_to_le32(offset);
187 s->SGarray[idx].src = cpu_to_le32(offset); 215 s->SGarray[idx].size = cpu_to_le32(s->buf_size);
188 s->SGarray[idx].size = cpu_to_le32(s->buf_size);
189 }
190 buf->bytesused = (size < s->buf_size) ? size : s->buf_size; 216 buf->bytesused = (size < s->buf_size) ? size : s->buf_size;
191 217
192 /* If PIO, then copy the data from the card to the buffer */
193 if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
194 memcpy_fromio(buf->buf, itv->dec_mem + offset - IVTV_DECODER_OFFSET, buf->bytesused);
195 }
196 else if (ivtv_use_pio(s)) {
197 memcpy_fromio(buf->buf, itv->enc_mem + offset, buf->bytesused);
198 }
199
200 s->q_predma.bytesused += buf->bytesused; 218 s->q_predma.bytesused += buf->bytesused;
201 size -= buf->bytesused; 219 size -= buf->bytesused;
202 offset += s->buf_size; 220 offset += s->buf_size;
@@ -224,11 +242,6 @@ static void dma_post(struct ivtv_stream *s)
224 u32 *u32buf; 242 u32 *u32buf;
225 int x = 0; 243 int x = 0;
226 244
227 if (ivtv_use_pio(s)) {
228 if (s->q_predma.bytesused)
229 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
230 s->SG_length = 0;
231 }
232 IVTV_DEBUG_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA", 245 IVTV_DEBUG_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
233 s->name, s->dma_offset); 246 s->name, s->dma_offset);
234 list_for_each(p, &s->q_dma.list) { 247 list_for_each(p, &s->q_dma.list) {
@@ -278,10 +291,14 @@ static void dma_post(struct ivtv_stream *s)
278 if (buf) 291 if (buf)
279 buf->bytesused += s->dma_last_offset; 292 buf->bytesused += s->dma_last_offset;
280 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) { 293 if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
281 /* Parse and Groom VBI Data */ 294 list_for_each(p, &s->q_dma.list) {
282 s->q_dma.bytesused -= buf->bytesused; 295 buf = list_entry(p, struct ivtv_buffer, list);
283 ivtv_process_vbi_data(itv, buf, 0, s->type); 296
284 s->q_dma.bytesused += buf->bytesused; 297 /* Parse and Groom VBI Data */
298 s->q_dma.bytesused -= buf->bytesused;
299 ivtv_process_vbi_data(itv, buf, 0, s->type);
300 s->q_dma.bytesused += buf->bytesused;
301 }
285 if (s->id == -1) { 302 if (s->id == -1) {
286 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0); 303 ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
287 return; 304 return;
@@ -351,10 +368,14 @@ static void ivtv_dma_enc_start(struct ivtv_stream *s)
351 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; 368 struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
352 int i; 369 int i;
353 370
371 IVTV_DEBUG_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
372
354 if (s->q_predma.bytesused) 373 if (s->q_predma.bytesused)
355 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused); 374 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
356 IVTV_DEBUG_DMA("start DMA for %s\n", s->name); 375
357 s->SGarray[s->SG_length - 1].size = cpu_to_le32(le32_to_cpu(s->SGarray[s->SG_length - 1].size) + 256); 376 if (ivtv_use_dma(s))
377 s->SGarray[s->SG_length - 1].size =
378 cpu_to_le32(le32_to_cpu(s->SGarray[s->SG_length - 1].size) + 256);
358 379
359 /* If this is an MPEG stream, and VBI data is also pending, then append the 380 /* If this is an MPEG stream, and VBI data is also pending, then append the
360 VBI DMA to the MPEG DMA and transfer both sets of data at once. 381 VBI DMA to the MPEG DMA and transfer both sets of data at once.
@@ -368,7 +389,8 @@ static void ivtv_dma_enc_start(struct ivtv_stream *s)
368 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->SG_length && 389 if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->SG_length &&
369 s->SG_length + s_vbi->SG_length <= s->buffers) { 390 s->SG_length + s_vbi->SG_length <= s->buffers) {
370 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused); 391 ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
371 s_vbi->SGarray[s_vbi->SG_length - 1].size = cpu_to_le32(le32_to_cpu(s_vbi->SGarray[s->SG_length - 1].size) + 256); 392 if (ivtv_use_dma(s_vbi))
393 s_vbi->SGarray[s_vbi->SG_length - 1].size = cpu_to_le32(le32_to_cpu(s_vbi->SGarray[s->SG_length - 1].size) + 256);
372 for (i = 0; i < s_vbi->SG_length; i++) { 394 for (i = 0; i < s_vbi->SG_length; i++) {
373 s->SGarray[s->SG_length++] = s_vbi->SGarray[i]; 395 s->SGarray[s->SG_length++] = s_vbi->SGarray[i];
374 } 396 }
@@ -381,14 +403,26 @@ static void ivtv_dma_enc_start(struct ivtv_stream *s)
381 /* Mark last buffer size for Interrupt flag */ 403 /* Mark last buffer size for Interrupt flag */
382 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000); 404 s->SGarray[s->SG_length - 1].size |= cpu_to_le32(0x80000000);
383 405
384 /* Sync Hardware SG List of buffers */ 406 if (ivtv_use_pio(s)) {
385 ivtv_stream_sync_for_device(s); 407 for (i = 0; i < s->SG_length; i++) {
386 write_reg(s->SG_handle, IVTV_REG_ENCDMAADDR); 408 s->PIOarray[i].src = le32_to_cpu(s->SGarray[i].src);
387 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER); 409 s->PIOarray[i].size = le32_to_cpu(s->SGarray[i].size);
388 set_bit(IVTV_F_I_DMA, &itv->i_flags); 410 }
389 itv->cur_dma_stream = s->type; 411 set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
390 itv->dma_timer.expires = jiffies + HZ / 10; 412 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
391 add_timer(&itv->dma_timer); 413 set_bit(IVTV_F_I_PIO, &itv->i_flags);
414 itv->cur_pio_stream = s->type;
415 }
416 else {
417 /* Sync Hardware SG List of buffers */
418 ivtv_stream_sync_for_device(s);
419 write_reg(s->SG_handle, IVTV_REG_ENCDMAADDR);
420 write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
421 set_bit(IVTV_F_I_DMA, &itv->i_flags);
422 itv->cur_dma_stream = s->type;
423 itv->dma_timer.expires = jiffies + HZ / 10;
424 add_timer(&itv->dma_timer);
425 }
392} 426}
393 427
394static void ivtv_dma_dec_start(struct ivtv_stream *s) 428static void ivtv_dma_dec_start(struct ivtv_stream *s)
@@ -489,6 +523,40 @@ static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
489 wake_up(&itv->dma_waitq); 523 wake_up(&itv->dma_waitq);
490} 524}
491 525
526static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
527{
528 struct ivtv_stream *s;
529
530 if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
531 itv->cur_pio_stream = -1;
532 return;
533 }
534 s = &itv->streams[itv->cur_pio_stream];
535 IVTV_DEBUG_IRQ("ENC PIO COMPLETE %s\n", s->name);
536 s->SG_length = 0;
537 clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
538 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
539 itv->cur_pio_stream = -1;
540 dma_post(s);
541 if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
542 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
543 else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
544 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
545 else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
546 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
547 clear_bit(IVTV_F_I_PIO, &itv->i_flags);
548 if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
549 u32 tmp;
550
551 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
552 tmp = s->dma_offset;
553 s->dma_offset = itv->vbi.dma_offset;
554 dma_post(s);
555 s->dma_offset = tmp;
556 }
557 wake_up(&itv->dma_waitq);
558}
559
492static void ivtv_irq_dma_err(struct ivtv *itv) 560static void ivtv_irq_dma_err(struct ivtv *itv)
493{ 561{
494 u32 data[CX2341X_MBOX_MAX_DATA]; 562 u32 data[CX2341X_MBOX_MAX_DATA];
@@ -532,13 +600,7 @@ static void ivtv_irq_enc_start_cap(struct ivtv *itv)
532 clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags); 600 clear_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
533 s = &itv->streams[ivtv_stream_map[data[0]]]; 601 s = &itv->streams[ivtv_stream_map[data[0]]];
534 if (!stream_enc_dma_append(s, data)) { 602 if (!stream_enc_dma_append(s, data)) {
535 if (ivtv_use_pio(s)) { 603 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
536 dma_post(s);
537 ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, data[0]);
538 }
539 else {
540 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
541 }
542 } 604 }
543} 605}
544 606
@@ -551,15 +613,6 @@ static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
551 IVTV_DEBUG_IRQ("ENC START VBI CAP\n"); 613 IVTV_DEBUG_IRQ("ENC START VBI CAP\n");
552 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI]; 614 s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
553 615
554 if (ivtv_use_pio(s)) {
555 if (stream_enc_dma_append(s, data))
556 return;
557 if (s->q_predma.bytesused)
558 ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
559 s->SG_length = 0;
560 dma_post(s);
561 return;
562 }
563 /* If more than two VBI buffers are pending, then 616 /* If more than two VBI buffers are pending, then
564 clear the old ones and start with this new one. 617 clear the old ones and start with this new one.
565 This can happen during transition stages when MPEG capturing is 618 This can happen during transition stages when MPEG capturing is
@@ -582,11 +635,11 @@ static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
582 if (!stream_enc_dma_append(s, data) && 635 if (!stream_enc_dma_append(s, data) &&
583 !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) { 636 !test_bit(IVTV_F_S_STREAMING, &s_mpg->s_flags)) {
584 set_bit(IVTV_F_I_ENC_VBI, &itv->i_flags); 637 set_bit(IVTV_F_I_ENC_VBI, &itv->i_flags);
585 set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags); 638 set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
586 } 639 }
587} 640}
588 641
589static void ivtv_irq_dev_vbi_reinsert(struct ivtv *itv) 642static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
590{ 643{
591 u32 data[CX2341X_MBOX_MAX_DATA]; 644 u32 data[CX2341X_MBOX_MAX_DATA];
592 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI]; 645 struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
@@ -594,7 +647,7 @@ static void ivtv_irq_dev_vbi_reinsert(struct ivtv *itv)
594 IVTV_DEBUG_IRQ("DEC VBI REINSERT\n"); 647 IVTV_DEBUG_IRQ("DEC VBI REINSERT\n");
595 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) && 648 if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
596 !stream_enc_dma_append(s, data)) { 649 !stream_enc_dma_append(s, data)) {
597 dma_post(s); 650 set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
598 } 651 }
599} 652}
600 653
@@ -657,7 +710,6 @@ static void ivtv_irq_vsync(struct ivtv *itv)
657 } 710 }
658 if (frame != (itv->lastVsyncFrame & 1)) { 711 if (frame != (itv->lastVsyncFrame & 1)) {
659 struct ivtv_stream *s = ivtv_get_output_stream(itv); 712 struct ivtv_stream *s = ivtv_get_output_stream(itv);
660 int work = 0;
661 713
662 itv->lastVsyncFrame += 1; 714 itv->lastVsyncFrame += 1;
663 if (frame == 0) { 715 if (frame == 0) {
@@ -678,7 +730,7 @@ static void ivtv_irq_vsync(struct ivtv *itv)
678 /* Send VBI to saa7127 */ 730 /* Send VBI to saa7127 */
679 if (frame) { 731 if (frame) {
680 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags); 732 set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
681 work = 1; 733 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
682 } 734 }
683 735
684 /* Check if we need to update the yuv registers */ 736 /* Check if we need to update the yuv registers */
@@ -691,11 +743,9 @@ static void ivtv_irq_vsync(struct ivtv *itv)
691 itv->yuv_info.new_frame_info[last_dma_frame].update = 0; 743 itv->yuv_info.new_frame_info[last_dma_frame].update = 0;
692 itv->yuv_info.yuv_forced_update = 0; 744 itv->yuv_info.yuv_forced_update = 0;
693 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags); 745 set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
694 work = 1; 746 set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
695 } 747 }
696 } 748 }
697 if (work)
698 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
699 } 749 }
700} 750}
701 751
@@ -755,6 +805,10 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
755 ivtv_irq_enc_dma_complete(itv); 805 ivtv_irq_enc_dma_complete(itv);
756 } 806 }
757 807
808 if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
809 ivtv_irq_enc_pio_complete(itv);
810 }
811
758 if (combo & IVTV_IRQ_DMA_ERR) { 812 if (combo & IVTV_IRQ_DMA_ERR) {
759 ivtv_irq_dma_err(itv); 813 ivtv_irq_dma_err(itv);
760 } 814 }
@@ -768,7 +822,7 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
768 } 822 }
769 823
770 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) { 824 if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
771 ivtv_irq_dev_vbi_reinsert(itv); 825 ivtv_irq_dec_vbi_reinsert(itv);
772 } 826 }
773 827
774 if (combo & IVTV_IRQ_ENC_EOS) { 828 if (combo & IVTV_IRQ_ENC_EOS) {
@@ -813,6 +867,22 @@ irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
813 } 867 }
814 } 868 }
815 869
870 if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
871 for (i = 0; i < IVTV_MAX_STREAMS; i++) {
872 int idx = (i + itv->irq_rr_idx++) % IVTV_MAX_STREAMS;
873 struct ivtv_stream *s = &itv->streams[idx];
874
875 if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
876 continue;
877 if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
878 ivtv_dma_enc_start(s);
879 break;
880 }
881 }
882
883 if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags))
884 queue_work(itv->irq_work_queues, &itv->irq_work_queue);
885
816 spin_unlock(&itv->dma_reg_lock); 886 spin_unlock(&itv->dma_reg_lock);
817 887
818 /* If we've just handled a 'forced' vsync, it's safest to say it 888 /* If we've just handled a 'forced' vsync, it's safest to say it